Compare commits
8 Commits
7b9f8b6773
...
mdeval_dev
Author | SHA1 | Date | |
---|---|---|---|
adae920527 | |||
11793d7960 | |||
55b06fa61d | |||
3aa91d7482 | |||
7d8c5d849d | |||
c09549902a | |||
b7bb8cb379 | |||
33c4756e34 |
@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "mdevaluate"
|
name = "mdevaluate"
|
||||||
version = "24.02"
|
version = "24.06"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"mdanalysis",
|
"mdanalysis",
|
||||||
"pandas",
|
"pandas",
|
||||||
|
@ -13,9 +13,95 @@ from .pbc import pbc_diff, pbc_points
|
|||||||
from .coordinates import Coordinates, CoordinateFrame, displacements_without_drift
|
from .coordinates import Coordinates, CoordinateFrame, displacements_without_drift
|
||||||
|
|
||||||
|
|
||||||
def log_indices(first: int, last: int, num: int = 100) -> np.ndarray:
|
def _is_multi_selector(selection):
|
||||||
ls = np.logspace(0, np.log10(last - first + 1), num=num)
|
if len(selection) == 0:
|
||||||
return np.unique(np.int_(ls) - 1 + first)
|
return False
|
||||||
|
elif (
|
||||||
|
isinstance(selection[0], int)
|
||||||
|
or isinstance(selection[0], bool)
|
||||||
|
or isinstance(selection[0], np.integer)
|
||||||
|
or isinstance(selection[0], np.bool_)
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
for indices in selection:
|
||||||
|
if len(indices) == 0:
|
||||||
|
continue
|
||||||
|
elif (
|
||||||
|
isinstance(indices[0], int)
|
||||||
|
or isinstance(indices[0], bool)
|
||||||
|
or isinstance(indices[0], np.integer)
|
||||||
|
or isinstance(indices[0], np.bool_)
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"selector has more than two dimensions or does not "
|
||||||
|
"contain int or bool types"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _calc_correlation(
|
||||||
|
frames: Coordinates,
|
||||||
|
start_frame: CoordinateFrame,
|
||||||
|
function: Callable,
|
||||||
|
selection: np.ndarray,
|
||||||
|
shifted_idx: np.ndarray,
|
||||||
|
) -> np.ndarray:
|
||||||
|
if len(selection) == 0:
|
||||||
|
correlation = np.zeros(len(shifted_idx))
|
||||||
|
else:
|
||||||
|
start = start_frame[selection]
|
||||||
|
correlation = np.array(
|
||||||
|
[
|
||||||
|
function(start, frames[frame_index][selection])
|
||||||
|
for frame_index in shifted_idx
|
||||||
|
]
|
||||||
|
)
|
||||||
|
return correlation
|
||||||
|
|
||||||
|
|
||||||
|
def _calc_correlation_multi(
|
||||||
|
frames: Coordinates,
|
||||||
|
start_frame: CoordinateFrame,
|
||||||
|
function: Callable,
|
||||||
|
selection: np.ndarray,
|
||||||
|
shifted_idx: np.ndarray,
|
||||||
|
) -> np.ndarray:
|
||||||
|
correlations = np.zeros((len(selection), len(shifted_idx)))
|
||||||
|
for i, frame_index in enumerate(shifted_idx):
|
||||||
|
frame = frames[frame_index]
|
||||||
|
for j, current_selection in enumerate(selection):
|
||||||
|
if len(selection) == 0:
|
||||||
|
correlations[j, i] = 0
|
||||||
|
else:
|
||||||
|
correlations[j, i] = function(
|
||||||
|
start_frame[current_selection], frame[current_selection]
|
||||||
|
)
|
||||||
|
return correlations
|
||||||
|
|
||||||
|
|
||||||
|
def _average_correlation(result):
|
||||||
|
averaged_result = []
|
||||||
|
for n in range(result.shape[1]):
|
||||||
|
clean_result = []
|
||||||
|
for entry in result[:, n]:
|
||||||
|
if np.all(entry == 0):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
clean_result.append(entry)
|
||||||
|
averaged_result.append(np.average(np.array(clean_result), axis=0))
|
||||||
|
return np.array(averaged_result)
|
||||||
|
|
||||||
|
|
||||||
|
def _average_correlation_multi(result):
|
||||||
|
clean_result = []
|
||||||
|
for entry in result:
|
||||||
|
if np.all(entry == 0):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
clean_result.append(entry)
|
||||||
|
return np.average(np.array(clean_result), axis=0)
|
||||||
|
|
||||||
|
|
||||||
@autosave_data(2)
|
@autosave_data(2)
|
||||||
@ -28,113 +114,82 @@ def shifted_correlation(
|
|||||||
window: float = 0.5,
|
window: float = 0.5,
|
||||||
average: bool = True,
|
average: bool = True,
|
||||||
points: int = 100,
|
points: int = 100,
|
||||||
) -> (np.ndarray, np.ndarray):
|
) -> tuple[np.ndarray, np.ndarray]:
|
||||||
|
"""Compute a time-dependent correlation function for a given trajectory.
|
||||||
|
|
||||||
|
To improve statistics, multiple (possibly overlapping) windows will be
|
||||||
|
layed over the whole trajectory and the correlation is computed for them separately.
|
||||||
|
The start frames of the windows are spaced linearly over the valid region of
|
||||||
|
the trajectory (skipping frames in the beginning given by skip parameter).
|
||||||
|
|
||||||
|
The points within each window are spaced logarithmically.
|
||||||
|
|
||||||
|
Only a certain subset of the given atoms may be selected for each window
|
||||||
|
individually using a selector function.
|
||||||
|
|
||||||
|
Note that this function is specifically optimized for multi selectors, which select
|
||||||
|
multiple selection sets per window, for which the correlation is to be computed
|
||||||
|
separately.
|
||||||
|
|
||||||
|
|
||||||
|
Arguments
|
||||||
|
---------
|
||||||
|
function:
|
||||||
|
The (correlation) function to evaluate.
|
||||||
|
Should be of the form (CoordinateFrame, CoordinateFrame) -> float
|
||||||
|
|
||||||
|
frames:
|
||||||
|
Trajectory to evaluate on
|
||||||
|
|
||||||
|
selector: (optional)
|
||||||
|
Selection function so select only certain selection sets for each start frame.
|
||||||
|
Should be of the form
|
||||||
|
(CoordinateFrame) -> list[A]
|
||||||
|
where A is something you can index an ndarray with.
|
||||||
|
For example a list of indices or a bool array.
|
||||||
|
Must return the same number of selection sets for every frame.
|
||||||
|
|
||||||
|
segments:
|
||||||
|
Number of start frames
|
||||||
|
|
||||||
|
skip:
|
||||||
|
Percentage of trajectory to skip from the start
|
||||||
|
|
||||||
|
window:
|
||||||
|
Length of each segment given as percentage of trajectory
|
||||||
|
|
||||||
|
average:
|
||||||
|
Whether to return averaged results.
|
||||||
|
See below for details on the returned ndarray.
|
||||||
|
|
||||||
|
points:
|
||||||
|
Number of points per segment
|
||||||
|
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
times: ndarray
|
||||||
|
1d array of time differences to start frame
|
||||||
|
result: ndarray
|
||||||
|
2d ndarray of averaged (or non-averaged) correlations.
|
||||||
|
|
||||||
|
When average==True (default) the returned array will be of the shape (S, P)
|
||||||
|
where S is the number of selection sets and P the number of points per window.
|
||||||
|
For selection sets that where empty for all start frames all data points will be
|
||||||
|
zero.
|
||||||
|
|
||||||
|
When average==False the returned array will be of shape (W, S) with
|
||||||
|
dtype=object. The elements are either ndarrays of shape (P,) containing the
|
||||||
|
correlation data for the specific window and selection set or None if the
|
||||||
|
corresponding selection set was empty.
|
||||||
|
W is the number of segments (windows).
|
||||||
|
S and P are the same as for average==True.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Calculate the time series for a correlation function.
|
|
||||||
|
|
||||||
The times at which the correlation is calculated are determined by
|
|
||||||
a logarithmic distribution.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
function: The function that should be correlated
|
|
||||||
frames: The coordinates of the simulation data
|
|
||||||
selector (opt.):
|
|
||||||
A function that returns the indices depending on
|
|
||||||
the staring frame for which particles the
|
|
||||||
correlation should be calculated.
|
|
||||||
segments (int, opt.):
|
|
||||||
The number of segments the time window will be
|
|
||||||
shifted
|
|
||||||
skip (float, opt.):
|
|
||||||
The fraction of the trajectory that will be skipped
|
|
||||||
at the beginning, if this is None the start index
|
|
||||||
of the frames slice will be used, which defaults
|
|
||||||
to 0.1.
|
|
||||||
window (float, opt.):
|
|
||||||
The fraction of the simulation the time series will
|
|
||||||
cover
|
|
||||||
average (bool, opt.):
|
|
||||||
If True, returns averaged correlation function
|
|
||||||
points (int, opt.):
|
|
||||||
The number of timeshifts for which the correlation
|
|
||||||
should be calculated
|
|
||||||
Returns:
|
|
||||||
tuple:
|
|
||||||
A list of length N that contains the timeshiftes of the frames at which
|
|
||||||
the time series was calculated and a numpy array of shape (segments, N)
|
|
||||||
that holds the (non-avaraged) correlation data
|
|
||||||
|
|
||||||
Example:
|
|
||||||
Calculating the mean square displacement of a coordinate object
|
|
||||||
named ``coords``:
|
|
||||||
|
|
||||||
>>> time, data = shifted_correlation(msd, coords)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_correlation(
|
|
||||||
frames: CoordinateFrame,
|
|
||||||
start_frame: CoordinateFrame,
|
|
||||||
index: np.ndarray,
|
|
||||||
shifted_idx: np.ndarray,
|
|
||||||
) -> np.ndarray:
|
|
||||||
if len(index) == 0:
|
|
||||||
correlation = np.zeros(len(shifted_idx))
|
|
||||||
else:
|
|
||||||
start = frames[start_frame][index]
|
|
||||||
correlation = np.array(
|
|
||||||
[function(start, frames[frame][index]) for frame in shifted_idx]
|
|
||||||
)
|
|
||||||
return correlation
|
|
||||||
|
|
||||||
def apply_selector(
|
|
||||||
start_frame: CoordinateFrame,
|
|
||||||
frames: CoordinateFrame,
|
|
||||||
idx: np.ndarray,
|
|
||||||
selector: Optional[Callable] = None,
|
|
||||||
):
|
|
||||||
shifted_idx = idx + start_frame
|
|
||||||
|
|
||||||
if selector is None:
|
|
||||||
index = np.arange(len(frames[start_frame]))
|
|
||||||
return get_correlation(frames, start_frame, index, shifted_idx)
|
|
||||||
else:
|
|
||||||
index = selector(frames[start_frame])
|
|
||||||
if len(index) == 0:
|
|
||||||
return np.zeros(len(shifted_idx))
|
|
||||||
|
|
||||||
elif (
|
|
||||||
isinstance(index[0], int)
|
|
||||||
or isinstance(index[0], bool)
|
|
||||||
or isinstance(index[0], np.integer)
|
|
||||||
or isinstance(index[0], np.bool_)
|
|
||||||
):
|
|
||||||
return get_correlation(frames, start_frame, index, shifted_idx)
|
|
||||||
else:
|
|
||||||
correlations = []
|
|
||||||
for ind in index:
|
|
||||||
if len(ind) == 0:
|
|
||||||
correlations.append(np.zeros(len(shifted_idx)))
|
|
||||||
|
|
||||||
elif (
|
|
||||||
isinstance(ind[0], int)
|
|
||||||
or isinstance(ind[0], bool)
|
|
||||||
or isinstance(ind[0], np.integer)
|
|
||||||
or isinstance(ind[0], np.bool_)
|
|
||||||
):
|
|
||||||
correlations.append(
|
|
||||||
get_correlation(frames, start_frame, ind, shifted_idx)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
"selector has more than two dimensions or does not "
|
|
||||||
"contain int or bool types"
|
|
||||||
)
|
|
||||||
return correlations
|
|
||||||
|
|
||||||
if 1 - skip < window:
|
if 1 - skip < window:
|
||||||
window = 1 - skip
|
window = 1 - skip
|
||||||
|
|
||||||
start_frames = np.unique(
|
start_frame_indices = np.unique(
|
||||||
np.linspace(
|
np.linspace(
|
||||||
len(frames) * skip,
|
len(frames) * skip,
|
||||||
len(frames) * (1 - window),
|
len(frames) * (1 - window),
|
||||||
@ -144,28 +199,44 @@ def shifted_correlation(
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
num_frames = int(len(frames) * window)
|
num_frames_per_window = int(len(frames) * window)
|
||||||
ls = np.logspace(0, np.log10(num_frames + 1), num=points)
|
logspaced_indices = np.logspace(0, np.log10(num_frames_per_window + 1), num=points)
|
||||||
idx = np.unique(np.int_(ls) - 1)
|
logspaced_indices = np.unique(np.int_(logspaced_indices) - 1)
|
||||||
t = np.array([frames[i].time for i in idx]) - frames[0].time
|
logspaced_time = (
|
||||||
|
np.array([frames[i].time for i in logspaced_indices]) - frames[0].time
|
||||||
result = np.array(
|
|
||||||
[
|
|
||||||
apply_selector(start_frame, frames=frames, idx=idx, selector=selector)
|
|
||||||
for start_frame in start_frames
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if average:
|
if selector is None:
|
||||||
clean_result = []
|
multi_selector = False
|
||||||
for entry in result:
|
|
||||||
if np.all(entry == 0):
|
|
||||||
continue
|
|
||||||
else:
|
else:
|
||||||
clean_result.append(entry)
|
selection = selector(frames[0])
|
||||||
result = np.array(clean_result)
|
multi_selector = _is_multi_selector(selection)
|
||||||
result = np.average(result, axis=0)
|
|
||||||
return t, result
|
result = []
|
||||||
|
for start_frame_index in start_frame_indices:
|
||||||
|
shifted_idx = logspaced_indices + start_frame_index
|
||||||
|
start_frame = frames[start_frame_index]
|
||||||
|
if selector is None:
|
||||||
|
selection = np.arange(len(start_frame))
|
||||||
|
else:
|
||||||
|
selection = selector(start_frame)
|
||||||
|
if multi_selector:
|
||||||
|
result_segment = _calc_correlation_multi(
|
||||||
|
frames, start_frame, function, selection, shifted_idx
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
result_segment = _calc_correlation(
|
||||||
|
frames, start_frame, function, selection, shifted_idx
|
||||||
|
)
|
||||||
|
result.append(result_segment)
|
||||||
|
result = np.array(result)
|
||||||
|
|
||||||
|
if average:
|
||||||
|
if multi_selector:
|
||||||
|
result = _average_correlation_multi(result)
|
||||||
|
else:
|
||||||
|
result = _average_correlation(result)
|
||||||
|
return logspaced_time, result
|
||||||
|
|
||||||
|
|
||||||
def msd(
|
def msd(
|
||||||
@ -184,11 +255,11 @@ def msd(
|
|||||||
if axis == "all":
|
if axis == "all":
|
||||||
return (displacements**2).sum(axis=1).mean()
|
return (displacements**2).sum(axis=1).mean()
|
||||||
elif axis == "xy" or axis == "yx":
|
elif axis == "xy" or axis == "yx":
|
||||||
return (displacements[:, [0, 1]]**2).sum(axis=1).mean()
|
return (displacements[:, [0, 1]] ** 2).sum(axis=1).mean()
|
||||||
elif axis == "xz" or axis == "zx":
|
elif axis == "xz" or axis == "zx":
|
||||||
return (displacements[:, [0, 2]]**2).sum(axis=1).mean()
|
return (displacements[:, [0, 2]] ** 2).sum(axis=1).mean()
|
||||||
elif axis == "yz" or axis == "zy":
|
elif axis == "yz" or axis == "zy":
|
||||||
return (displacements[:, [1, 2]]**2).sum(axis=1).mean()
|
return (displacements[:, [1, 2]] ** 2).sum(axis=1).mean()
|
||||||
elif axis == "x":
|
elif axis == "x":
|
||||||
return (displacements[:, 0] ** 2).mean()
|
return (displacements[:, 0] ** 2).mean()
|
||||||
elif axis == "y":
|
elif axis == "y":
|
||||||
@ -218,13 +289,13 @@ def isf(
|
|||||||
distance = (displacements**2).sum(axis=1) ** 0.5
|
distance = (displacements**2).sum(axis=1) ** 0.5
|
||||||
return np.sinc(distance * q / np.pi).mean()
|
return np.sinc(distance * q / np.pi).mean()
|
||||||
elif axis == "xy" or axis == "yx":
|
elif axis == "xy" or axis == "yx":
|
||||||
distance = (displacements[:, [0, 1]]**2).sum(axis=1) ** 0.5
|
distance = (displacements[:, [0, 1]] ** 2).sum(axis=1) ** 0.5
|
||||||
return np.real(jn(0, distance * q)).mean()
|
return np.real(jn(0, distance * q)).mean()
|
||||||
elif axis == "xz" or axis == "zx":
|
elif axis == "xz" or axis == "zx":
|
||||||
distance = (displacements[:, [0, 2]]**2).sum(axis=1) ** 0.5
|
distance = (displacements[:, [0, 2]] ** 2).sum(axis=1) ** 0.5
|
||||||
return np.real(jn(0, distance * q)).mean()
|
return np.real(jn(0, distance * q)).mean()
|
||||||
elif axis == "yz" or axis == "zy":
|
elif axis == "yz" or axis == "zy":
|
||||||
distance = (displacements[:, [1, 2]]**2).sum(axis=1) ** 0.5
|
distance = (displacements[:, [1, 2]] ** 2).sum(axis=1) ** 0.5
|
||||||
return np.real(jn(0, distance * q)).mean()
|
return np.real(jn(0, distance * q)).mean()
|
||||||
elif axis == "x":
|
elif axis == "x":
|
||||||
distance = np.abs(displacements[:, 0])
|
distance = np.abs(displacements[:, 0])
|
||||||
@ -278,11 +349,11 @@ def van_hove_self(
|
|||||||
if axis == "all":
|
if axis == "all":
|
||||||
delta_r = (vectors**2).sum(axis=1) ** 0.5
|
delta_r = (vectors**2).sum(axis=1) ** 0.5
|
||||||
elif axis == "xy" or axis == "yx":
|
elif axis == "xy" or axis == "yx":
|
||||||
delta_r = (vectors[:, [0, 1]]**2).sum(axis=1) ** 0.5
|
delta_r = (vectors[:, [0, 1]] ** 2).sum(axis=1) ** 0.5
|
||||||
elif axis == "xz" or axis == "zx":
|
elif axis == "xz" or axis == "zx":
|
||||||
delta_r = (vectors[:, [0, 2]]**2).sum(axis=1) ** 0.5
|
delta_r = (vectors[:, [0, 2]] ** 2).sum(axis=1) ** 0.5
|
||||||
elif axis == "yz" or axis == "zy":
|
elif axis == "yz" or axis == "zy":
|
||||||
delta_r = (vectors[:, [1, 2]]**2).sum(axis=1) ** 0.5
|
delta_r = (vectors[:, [1, 2]] ** 2).sum(axis=1) ** 0.5
|
||||||
elif axis == "x":
|
elif axis == "x":
|
||||||
delta_r = np.abs(vectors[:, 0])
|
delta_r = np.abs(vectors[:, 0])
|
||||||
elif axis == "y":
|
elif axis == "y":
|
||||||
@ -445,13 +516,13 @@ def non_gaussian_parameter(
|
|||||||
r = (vectors**2).sum(axis=1)
|
r = (vectors**2).sum(axis=1)
|
||||||
dimensions = 3
|
dimensions = 3
|
||||||
elif axis == "xy" or axis == "yx":
|
elif axis == "xy" or axis == "yx":
|
||||||
r = (vectors[:, [0, 1]]**2).sum(axis=1)
|
r = (vectors[:, [0, 1]] ** 2).sum(axis=1)
|
||||||
dimensions = 2
|
dimensions = 2
|
||||||
elif axis == "xz" or axis == "zx":
|
elif axis == "xz" or axis == "zx":
|
||||||
r = (vectors[:, [0, 2]]**2).sum(axis=1)
|
r = (vectors[:, [0, 2]] ** 2).sum(axis=1)
|
||||||
dimensions = 2
|
dimensions = 2
|
||||||
elif axis == "yz" or axis == "zy":
|
elif axis == "yz" or axis == "zy":
|
||||||
r = (vectors[:, [1, 2]]**2).sum(axis=1)
|
r = (vectors[:, [1, 2]] ** 2).sum(axis=1)
|
||||||
dimensions = 2
|
dimensions = 2
|
||||||
elif axis == "x":
|
elif axis == "x":
|
||||||
r = vectors[:, 0] ** 2
|
r = vectors[:, 0] ** 2
|
||||||
|
@ -4,7 +4,6 @@ from typing import Optional
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from numpy.typing import ArrayLike, NDArray
|
from numpy.typing import ArrayLike, NDArray
|
||||||
from numpy.polynomial.polynomial import Polynomial as Poly
|
from numpy.polynomial.polynomial import Polynomial as Poly
|
||||||
import math
|
|
||||||
from scipy.spatial import KDTree
|
from scipy.spatial import KDTree
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
@ -49,7 +48,7 @@ def _pbc_points_reduced(
|
|||||||
|
|
||||||
def _build_tree(points, box, r_max, pore_geometry):
|
def _build_tree(points, box, r_max, pore_geometry):
|
||||||
if np.all(np.diag(np.diag(box)) == box):
|
if np.all(np.diag(np.diag(box)) == box):
|
||||||
tree = KDTree(points, boxsize=box)
|
tree = KDTree(points % box, boxsize=box)
|
||||||
points_pbc_index = None
|
points_pbc_index = None
|
||||||
else:
|
else:
|
||||||
points_pbc, points_pbc_index = _pbc_points_reduced(
|
points_pbc, points_pbc_index = _pbc_points_reduced(
|
||||||
@ -79,11 +78,7 @@ def occupation_matrix(
|
|||||||
z_bins = np.arange(0, box[2][2] + edge_length, edge_length)
|
z_bins = np.arange(0, box[2][2] + edge_length, edge_length)
|
||||||
bins = [x_bins, y_bins, z_bins]
|
bins = [x_bins, y_bins, z_bins]
|
||||||
# Trajectory is split for parallel computing
|
# Trajectory is split for parallel computing
|
||||||
size = math.ceil(len(frame_indices) / nodes)
|
indices = np.array_split(frame_indices, nodes)
|
||||||
indices = [
|
|
||||||
np.arange(len(frame_indices))[i : i + size]
|
|
||||||
for i in range(0, len(frame_indices), size)
|
|
||||||
]
|
|
||||||
pool = mp.Pool(nodes)
|
pool = mp.Pool(nodes)
|
||||||
results = pool.map(
|
results = pool.map(
|
||||||
partial(_calc_histogram, trajectory=trajectory, bins=bins), indices
|
partial(_calc_histogram, trajectory=trajectory, bins=bins), indices
|
||||||
@ -277,7 +272,11 @@ def distance_resolved_energies(
|
|||||||
|
|
||||||
|
|
||||||
def find_energy_maxima(
|
def find_energy_maxima(
|
||||||
energy_df: pd.DataFrame, r_min: float, r_max: float
|
energy_df: pd.DataFrame,
|
||||||
|
r_min: float,
|
||||||
|
r_max: float,
|
||||||
|
r_eval: float = None,
|
||||||
|
degree: int = 2,
|
||||||
) -> pd.DataFrame:
|
) -> pd.DataFrame:
|
||||||
distances = []
|
distances = []
|
||||||
energies = []
|
energies = []
|
||||||
@ -286,6 +285,9 @@ def find_energy_maxima(
|
|||||||
x = np.array(data_d["r"])
|
x = np.array(data_d["r"])
|
||||||
y = np.array(data_d["energy"])
|
y = np.array(data_d["energy"])
|
||||||
mask = (x >= r_min) * (x <= r_max)
|
mask = (x >= r_min) * (x <= r_max)
|
||||||
p3 = Poly.fit(x[mask], y[mask], deg=2)
|
p3 = Poly.fit(x[mask], y[mask], deg=degree)
|
||||||
|
if r_eval is None:
|
||||||
energies.append(np.max(p3(np.linspace(r_min, r_max, 1000))))
|
energies.append(np.max(p3(np.linspace(r_min, r_max, 1000))))
|
||||||
|
else:
|
||||||
|
energies.append(p3(r_eval))
|
||||||
return pd.DataFrame({"d": distances, "energy": energies})
|
return pd.DataFrame({"d": distances, "energy": energies})
|
||||||
|
@ -177,7 +177,7 @@ def coherent_sum(
|
|||||||
func: Callable[[ArrayLike, ArrayLike], float],
|
func: Callable[[ArrayLike, ArrayLike], float],
|
||||||
coord_a: ArrayLike,
|
coord_a: ArrayLike,
|
||||||
coord_b: ArrayLike,
|
coord_b: ArrayLike,
|
||||||
) -> float:
|
) -> NDArray:
|
||||||
"""
|
"""
|
||||||
Perform a coherent sum over two arrays :math:`A, B`.
|
Perform a coherent sum over two arrays :math:`A, B`.
|
||||||
|
|
||||||
|
57
test/test_correlation.py
Normal file
57
test/test_correlation.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
import os
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import mdevaluate
|
||||||
|
from mdevaluate import correlation
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def trajectory(request):
|
||||||
|
return mdevaluate.open(os.path.join(os.path.dirname(__file__), "data/water"))
|
||||||
|
|
||||||
|
|
||||||
|
def test_shifted_correlation(trajectory):
|
||||||
|
test_array = np.array([100, 82, 65, 49, 39, 29, 20, 13, 7])
|
||||||
|
OW = trajectory.subset(atom_name="OW")
|
||||||
|
t, result = correlation.shifted_correlation(
|
||||||
|
correlation.isf, OW, segments=10, skip=0.1, points=10
|
||||||
|
)
|
||||||
|
assert (np.array(result * 100, dtype=int) == test_array).all()
|
||||||
|
|
||||||
|
|
||||||
|
def test_shifted_correlation_no_average(trajectory):
|
||||||
|
t, result = correlation.shifted_correlation(
|
||||||
|
correlation.isf, trajectory, segments=10, skip=0.1, points=5, average=False
|
||||||
|
)
|
||||||
|
assert result.shape == (10, 5)
|
||||||
|
|
||||||
|
|
||||||
|
def test_shifted_correlation_selector(trajectory):
|
||||||
|
test_array = np.array([100, 82, 64, 48, 37, 28, 19, 11, 5])
|
||||||
|
|
||||||
|
def selector(frame):
|
||||||
|
index = np.argwhere((frame[:, 0] >= 0) * (frame[:, 0] < 1))
|
||||||
|
return index.flatten()
|
||||||
|
|
||||||
|
OW = trajectory.subset(atom_name="OW")
|
||||||
|
t, result = correlation.shifted_correlation(
|
||||||
|
correlation.isf, OW, segments=10, skip=0.1, points=10, selector=selector
|
||||||
|
)
|
||||||
|
assert (np.array(result * 100, dtype=int) == test_array).all()
|
||||||
|
|
||||||
|
|
||||||
|
def test_shifted_correlation_multi_selector(trajectory):
|
||||||
|
def selector(frame):
|
||||||
|
indices = []
|
||||||
|
for i in range(3):
|
||||||
|
x = frame[:, 0].flatten()
|
||||||
|
index = np.argwhere((x >= i) * (x < i + 1))
|
||||||
|
indices.append(index.flatten())
|
||||||
|
return indices
|
||||||
|
|
||||||
|
OW = trajectory.subset(atom_name="OW")
|
||||||
|
t, result = correlation.shifted_correlation(
|
||||||
|
correlation.isf, OW, segments=10, skip=0.1, points=10, selector=selector
|
||||||
|
)
|
||||||
|
assert result.shape == (3, 9)
|
@ -13,19 +13,7 @@ def trajectory(request):
|
|||||||
|
|
||||||
|
|
||||||
def test_get_fel(trajectory):
|
def test_get_fel(trajectory):
|
||||||
test_array = np.array(
|
test_array = np.array([210., 214., 209., 192., 200., 193., 230., 218., 266.])
|
||||||
[
|
|
||||||
184.4909136,
|
|
||||||
233.79320471,
|
|
||||||
223.12003988,
|
|
||||||
228.49746397,
|
|
||||||
200.5626769,
|
|
||||||
212.82484221,
|
|
||||||
165.10818396,
|
|
||||||
170.74123681,
|
|
||||||
175.86672931,
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
OW = trajectory.subset(atom_name="OW")
|
OW = trajectory.subset(atom_name="OW")
|
||||||
box = trajectory[0].box
|
box = trajectory[0].box
|
||||||
|
Reference in New Issue
Block a user