Used black for formatting.

This commit is contained in:
sebastiankloth 2023-06-27 10:26:23 +02:00
parent a2164507d5
commit ef125c2a89
14 changed files with 860 additions and 553 deletions

View File

@ -11,11 +11,19 @@ from . import autosave
from . import reader from . import reader
from .logging import logger from .logging import logger
__version__ = '23.4' __version__ = "23.6"
def open(directory='', topology='*.tpr', trajectory='*.xtc', cached=False, def open(
nojump=False, index_file=None, charges=None, masses=None): directory="",
topology="*.tpr",
trajectory="*.xtc",
cached=False,
nojump=False,
index_file=None,
charges=None,
masses=None,
):
""" """
Open a simulation from a directory. Open a simulation from a directory.
@ -51,8 +59,8 @@ def open(directory='', topology='*.tpr', trajectory='*.xtc', cached=False,
""" """
top_glob = glob(os.path.join(directory, topology), recursive=True) top_glob = glob(os.path.join(directory, topology), recursive=True)
if top_glob is not None and len(top_glob) == 1: if top_glob is not None and len(top_glob) == 1:
top_file, = top_glob (top_file,) = top_glob
logger.info('Loading topology: {}'.format(top_file)) logger.info("Loading topology: {}".format(top_file))
if index_file is not None: if index_file is not None:
index_glob = glob(os.path.join(directory, index_file), recursive=True) index_glob = glob(os.path.join(directory, index_file), recursive=True)
if index_glob is not None: if index_glob is not None:
@ -60,19 +68,23 @@ def open(directory='', topology='*.tpr', trajectory='*.xtc', cached=False,
else: else:
index_file = None index_file = None
else: else:
raise FileNotFoundError('Topology file could not be identified.') raise FileNotFoundError("Topology file could not be identified.")
traj_glob = glob(os.path.join(directory, trajectory), recursive=True) traj_glob = glob(os.path.join(directory, trajectory), recursive=True)
if traj_glob is not None and len(traj_glob) == 1: if traj_glob is not None and len(traj_glob) == 1:
traj_file = traj_glob[0] traj_file = traj_glob[0]
logger.info('Loading trajectory: {}'.format(traj_file)) logger.info("Loading trajectory: {}".format(traj_file))
else: else:
raise FileNotFoundError('Trajectory file could not be identified.') raise FileNotFoundError("Trajectory file could not be identified.")
atom_set, frames = reader.open_with_mdanalysis( atom_set, frames = reader.open_with_mdanalysis(
top_file, traj_file, cached=cached, index_file=index_file, top_file,
charges=charges, masses=masses traj_file,
) cached=cached,
index_file=index_file,
charges=charges,
masses=masses,
)
coords = coordinates.Coordinates(frames, atom_subset=atom_set) coords = coordinates.Coordinates(frames, atom_subset=atom_set)
if nojump: if nojump:
try: try:
@ -81,11 +93,12 @@ def open(directory='', topology='*.tpr', trajectory='*.xtc', cached=False,
reader.generate_nojump_matrixes(coords) reader.generate_nojump_matrixes(coords)
return coords return coords
def open_energy(file, energies=None): def open_energy(file, energies=None):
"""Reads an gromacs energy file and output the data in a pandas DataFrame. """Reads an gromacs energy file and output the data in a pandas DataFrame.
Args: Args:
file: Filename of the energy file file: Filename of the energy file
energies (opt.): Specify energies to extract from the energy file energies (opt.): Specify energies to extract from the energy file
""" """
df = reader.energy_reader(file, energies=energies) df = reader.energy_reader(file, energies=energies)
return df return df

View File

@ -6,20 +6,23 @@ from .checksum import checksum
import numpy as np import numpy as np
import scipy import scipy
if scipy.version.version >= '0.17.0':
if scipy.version.version >= "0.17.0":
from scipy.spatial import cKDTree as KDTree from scipy.spatial import cKDTree as KDTree
else: else:
from scipy.spatial import KDTree from scipy.spatial import KDTree
def compare_regex(list, exp): def compare_regex(list, exp):
""" """
Compare a list of strings with a regular expression. Compare a list of strings with a regular expression.
""" """
if not exp.endswith('$'): if not exp.endswith("$"):
exp += '$' exp += "$"
regex = re.compile(exp) regex = re.compile(exp)
return np.array([regex.match(s) is not None for s in list]) return np.array([regex.match(s) is not None for s in list])
class Atoms: class Atoms:
""" """
Basic container class for atom information. Basic container class for atom information.
@ -62,8 +65,7 @@ class AtomMismatch(Exception):
class AtomSubset: class AtomSubset:
def __init__(self, atoms, selection=None, description=""):
def __init__(self, atoms, selection=None, description=''):
""" """
Args: Args:
atoms: Base atom object atoms: Base atom object
@ -71,7 +73,7 @@ class AtomSubset:
description (opt.): Descriptive string of the subset. description (opt.): Descriptive string of the subset.
""" """
if selection is None: if selection is None:
selection = np.ones(len(atoms), dtype='bool') selection = np.ones(len(atoms), dtype="bool")
self.selection = selection self.selection = selection
self.atoms = atoms self.atoms = atoms
self.description = description self.description = description
@ -92,26 +94,28 @@ class AtomSubset:
new_subset &= AtomSubset( new_subset &= AtomSubset(
self.atoms, self.atoms,
selection=compare_regex(self.atoms.atom_names, atom_name), selection=compare_regex(self.atoms.atom_names, atom_name),
description=atom_name description=atom_name,
) )
if residue_name is not None: if residue_name is not None:
new_subset &= AtomSubset( new_subset &= AtomSubset(
self.atoms, self.atoms,
selection=compare_regex(self.atoms.residue_names, residue_name), selection=compare_regex(self.atoms.residue_names, residue_name),
description=residue_name description=residue_name,
) )
if residue_id is not None: if residue_id is not None:
if np.iterable(residue_id): if np.iterable(residue_id):
selection = np.zeros(len(self.selection), dtype='bool') selection = np.zeros(len(self.selection), dtype="bool")
selection[np.in1d(self.atoms.residue_ids, residue_id)] = True selection[np.in1d(self.atoms.residue_ids, residue_id)] = True
new_subset &= AtomSubset(self.atoms, selection) new_subset &= AtomSubset(self.atoms, selection)
else: else:
new_subset &= AtomSubset(self.atoms, self.atoms.residue_ids == residue_id) new_subset &= AtomSubset(
self.atoms, self.atoms.residue_ids == residue_id
)
if indices is not None: if indices is not None:
selection = np.zeros(len(self.selection), dtype='bool') selection = np.zeros(len(self.selection), dtype="bool")
selection[indices] = True selection[indices] = True
new_subset &= AtomSubset(self.atoms, selection) new_subset &= AtomSubset(self.atoms, selection)
return new_subset return new_subset
@ -142,15 +146,15 @@ class AtomSubset:
def __and__(self, other): def __and__(self, other):
if self.atoms != other.atoms: if self.atoms != other.atoms:
raise AtomMismatch raise AtomMismatch
selection = (self.selection & other.selection) selection = self.selection & other.selection
description = '{}_{}'.format(self.description, other.description).strip('_') description = "{}_{}".format(self.description, other.description).strip("_")
return AtomSubset(self.atoms, selection, description) return AtomSubset(self.atoms, selection, description)
def __or__(self, other): def __or__(self, other):
if self.atoms != other.atoms: if self.atoms != other.atoms:
raise AtomMismatch raise AtomMismatch
selection = (self.selection | other.selection) selection = self.selection | other.selection
description = '{}_{}'.format(self.description, other.description).strip('_') description = "{}_{}".format(self.description, other.description).strip("_")
return AtomSubset(self.atoms, selection, description) return AtomSubset(self.atoms, selection, description)
def __invert__(self): def __invert__(self):
@ -158,14 +162,20 @@ class AtomSubset:
return AtomSubset(self.atoms, selection, self.description) return AtomSubset(self.atoms, selection, self.description)
def __repr__(self): def __repr__(self):
return 'Subset of Atoms ({} of {})'.format(len(self.atoms.residue_names[self.selection]), return "Subset of Atoms ({} of {})".format(
len(self.atoms)) len(self.atoms.residue_names[self.selection]), len(self.atoms)
)
@property @property
def summary(self): def summary(self):
return "\n".join(["{}{} {}".format(resid, resname, atom_names) return "\n".join(
for resid, resname, atom_names in zip(self.residue_ids, self.residue_names, self.atom_names) [
]) "{}{} {}".format(resid, resname, atom_names)
for resid, resname, atom_names in zip(
self.residue_ids, self.residue_names, self.atom_names
)
]
)
def __checksum__(self): def __checksum__(self):
return checksum(self.description) return checksum(self.description)
@ -182,35 +192,32 @@ def gyration_radius(position):
r""" r"""
Calculates a list of all radii of gyration of all molecules given in the coordinate frame, Calculates a list of all radii of gyration of all molecules given in the coordinate frame,
weighted with the masses of the individual atoms. weighted with the masses of the individual atoms.
Args: Args:
position: Coordinate frame object position: Coordinate frame object
..math:: ..math::
R_G = \left(\frac{\sum_{i=1}^{n} m_i |\vec{r_i} - \vec{r_{COM}}|^2 }{\sum_{i=1}^{n} m_i } R_G = \left(\frac{\sum_{i=1}^{n} m_i |\vec{r_i} - \vec{r_{COM}}|^2 }{\sum_{i=1}^{n} m_i }
\rigth)^{\frac{1}{2}} \rigth)^{\frac{1}{2}}
""" """
gyration_radii = np.array([]) gyration_radii = np.array([])
for resid in np.unique(position.residue_ids): for resid in np.unique(position.residue_ids):
pos = position.whole[position.residue_ids==resid] pos = position.whole[position.residue_ids == resid]
mass = position.masses[position.residue_ids==resid][:,np.newaxis] mass = position.masses[position.residue_ids == resid][:, np.newaxis]
COM = center_of_mass(pos,mass) COM = center_of_mass(pos, mass)
r_sq = ((pbc_diff(pos,COM,pos.box.diagonal()))**2).sum(1)[:,np.newaxis] r_sq = ((pbc_diff(pos, COM, pos.box.diagonal())) ** 2).sum(1)[:, np.newaxis]
g_radius = ((r_sq*mass).sum()/mass.sum())**(0.5) g_radius = ((r_sq * mass).sum() / mass.sum()) ** (0.5)
gyration_radii = np.append(gyration_radii,g_radius) gyration_radii = np.append(gyration_radii, g_radius)
return gyration_radii return gyration_radii
def layer_of_atoms(atoms, def layer_of_atoms(
thickness, atoms, thickness, plane_offset=np.array([0, 0, 0]), plane_normal=np.array([1, 0, 0])
plane_offset=np.array([0, 0, 0]), ):
plane_normal=np.array([1, 0, 0])):
p_ = atoms - plane_offset p_ = atoms - plane_offset
distance = np.dot(p_, plane_normal) distance = np.dot(p_, plane_normal)
@ -227,6 +234,7 @@ def distance_to_atoms(ref, atoms, box=None):
out[i] = np.sqrt(diff) out[i] = np.sqrt(diff)
return out return out
def distance_to_atoms_cKDtree(ref, atoms, box=None, thickness=None): def distance_to_atoms_cKDtree(ref, atoms, box=None, thickness=None):
""" """
Get the minimal distance from atoms to ref. Get the minimal distance from atoms to ref.
@ -236,19 +244,25 @@ def distance_to_atoms_cKDtree(ref, atoms, box=None, thickness=None):
If box is not given then periodic boundary conditions are not applied! If box is not given then periodic boundary conditions are not applied!
""" """
if thickness == None: if thickness == None:
thickness = box/5 thickness = box / 5
if box is not None: if box is not None:
start_coords = np.copy(atoms)%box start_coords = np.copy(atoms) % box
all_frame_coords = pbc_points(ref, box, thickness = thickness) all_frame_coords = pbc_points(ref, box, thickness=thickness)
else: else:
start_coords = atoms start_coords = atoms
all_frame_coords = ref all_frame_coords = ref
tree = spatial.cKDTree(all_frame_coords) tree = spatial.cKDTree(all_frame_coords)
return tree.query(start_coords)[0] return tree.query(start_coords)[0]
def next_neighbors(atoms, query_atoms=None, number_of_neighbors=1, distance_upper_bound=np.inf, distinct=False): def next_neighbors(
atoms,
query_atoms=None,
number_of_neighbors=1,
distance_upper_bound=np.inf,
distinct=False,
):
""" """
Find the N next neighbors of a set of atoms. Find the N next neighbors of a set of atoms.
@ -265,6 +279,9 @@ def next_neighbors(atoms, query_atoms=None, number_of_neighbors=1, distance_uppe
query_atoms = atoms query_atoms = atoms
elif not distinct: elif not distinct:
dnn = 1 dnn = 1
dist, indices = tree.query(query_atoms, number_of_neighbors + dnn, dist, indices = tree.query(
distance_upper_bound=distance_upper_bound) query_atoms,
number_of_neighbors + dnn,
distance_upper_bound=distance_upper_bound,
)
return indices[:, dnn:] return indices[:, dnn:]

View File

@ -9,7 +9,7 @@ from .logging import logger
autosave_directory = None autosave_directory = None
load_autosave_data = False load_autosave_data = False
verbose_print = True verbose_print = True
user_autosave_directory = os.path.join(os.environ['HOME'], '.mdevaluate/autosave') user_autosave_directory = os.path.join(os.environ["HOME"], ".mdevaluate/autosave")
def notify(msg): def notify(msg):
@ -33,7 +33,7 @@ def enable(dir, load_data=True, verbose=True):
# os.makedirs(absolute, exist_ok=True) # os.makedirs(absolute, exist_ok=True)
autosave_directory = dir autosave_directory = dir
load_autosave_data = load_data load_autosave_data = load_data
notify('Enabled autosave in directory: {}'.format(autosave_directory)) notify("Enabled autosave in directory: {}".format(autosave_directory))
def disable(): def disable():
@ -58,6 +58,7 @@ class disabled:
# After the context is exited, autosave will work as before. # After the context is exited, autosave will work as before.
""" """
def __enter__(self): def __enter__(self):
self._autosave_directory = autosave_directory self._autosave_directory = autosave_directory
disable() disable()
@ -76,8 +77,12 @@ def get_directory(reader):
except PermissionError: except PermissionError:
pass pass
if not os.access(savedir, os.W_OK): if not os.access(savedir, os.W_OK):
savedir = os.path.join(user_autosave_directory, savedir.lstrip('/')) savedir = os.path.join(user_autosave_directory, savedir.lstrip("/"))
logger.info('Switched autosave directory to {}, since original location is not writeable.'.format(savedir)) logger.info(
"Switched autosave directory to {}, since original location is not writeable.".format(
savedir
)
)
os.makedirs(savedir, exist_ok=True) os.makedirs(savedir, exist_ok=True)
return savedir return savedir
@ -86,17 +91,17 @@ def get_filename(function, checksum, description, *args):
"""Get the autosave filename for a specific function call.""" """Get the autosave filename for a specific function call."""
func_desc = function.__name__ func_desc = function.__name__
for arg in args: for arg in args:
if hasattr(arg, '__name__'): if hasattr(arg, "__name__"):
func_desc += '_{}'.format(arg.__name__) func_desc += "_{}".format(arg.__name__)
elif isinstance(arg, functools.partial): elif isinstance(arg, functools.partial):
func_desc += '_{}'.format(arg.func.__name__) func_desc += "_{}".format(arg.func.__name__)
if hasattr(arg, 'frames'): if hasattr(arg, "frames"):
savedir = get_directory(arg.frames) savedir = get_directory(arg.frames)
if hasattr(arg, 'description') and arg.description != '': if hasattr(arg, "description") and arg.description != "":
description += '_{}'.format(arg.description) description += "_{}".format(arg.description)
filename = '{}_{}.npz'.format(func_desc.strip('_'), description.strip('_')) filename = "{}_{}.npz".format(func_desc.strip("_"), description.strip("_"))
return os.path.join(savedir, filename) return os.path.join(savedir, filename)
@ -105,14 +110,14 @@ def verify_file(filename, checksum):
file_checksum = 0 file_checksum = 0
if os.path.exists(filename): if os.path.exists(filename):
data = np.load(filename, allow_pickle=True) data = np.load(filename, allow_pickle=True)
if 'checksum' in data: if "checksum" in data:
file_checksum = data['checksum'] file_checksum = data["checksum"]
return file_checksum == checksum return file_checksum == checksum
def save_data(filename, checksum, data): def save_data(filename, checksum, data):
"""Save data and checksum to a file.""" """Save data and checksum to a file."""
notify('Saving result to file: {}'.format(filename)) notify("Saving result to file: {}".format(filename))
try: try:
data = np.array(data) data = np.array(data)
except ValueError: except ValueError:
@ -125,13 +130,13 @@ def save_data(filename, checksum, data):
def load_data(filename): def load_data(filename):
"""Load data from a npz file.""" """Load data from a npz file."""
notify('Loading result from file: {}'.format(filename)) notify("Loading result from file: {}".format(filename))
fdata = np.load(filename, allow_pickle=True) fdata = np.load(filename, allow_pickle=True)
if 'data' in fdata: if "data" in fdata:
return fdata['data'] return fdata["data"]
else: else:
data = tuple(fdata[k] for k in sorted(fdata) if ('arr' in k)) data = tuple(fdata[k] for k in sorted(fdata) if ("arr" in k))
save_data(filename, fdata['checksum'], data) save_data(filename, fdata["checksum"], data)
return data return data
@ -142,10 +147,11 @@ def autosave_data(nargs, kwargs_keys=None, version=None):
Args: Args:
nargs: Number of args which are relevant for the calculation. nargs: Number of args which are relevant for the calculation.
kwargs_keys (opt.): List of keyword arguments which are relevant for the calculation. kwargs_keys (opt.): List of keyword arguments which are relevant for the calculation.
version (opt.): version (opt.):
An optional version number of the decorated function, which replaces the checksum of An optional version number of the decorated function, which replaces the checksum of
the function code, hence the checksum does not depend on the function code. the function code, hence the checksum does not depend on the function code.
""" """
def decorator_function(function): def decorator_function(function):
# make sure too include names of positional arguments in kwargs_keys, # make sure too include names of positional arguments in kwargs_keys,
# sice otherwise they will be ignored if passed via keyword. # sice otherwise they will be ignored if passed via keyword.
@ -154,8 +160,8 @@ def autosave_data(nargs, kwargs_keys=None, version=None):
@functools.wraps(function) @functools.wraps(function)
def autosave(*args, **kwargs): def autosave(*args, **kwargs):
description = kwargs.pop('description', '') description = kwargs.pop("description", "")
autoload = kwargs.pop('autoload', True) and load_autosave_data autoload = kwargs.pop("autoload", True) and load_autosave_data
if autosave_directory is not None: if autosave_directory is not None:
relevant_args = list(args[:nargs]) relevant_args = list(args[:nargs])
if kwargs_keys is not None: if kwargs_keys is not None:
@ -170,7 +176,9 @@ def autosave_data(nargs, kwargs_keys=None, version=None):
legacy_csum = checksum(function, *relevant_args) legacy_csum = checksum(function, *relevant_args)
filename = get_filename(function, csum, description, *relevant_args) filename = get_filename(function, csum, description, *relevant_args)
if autoload and (verify_file(filename, csum) or verify_file(filename, legacy_csum)): if autoload and (
verify_file(filename, csum) or verify_file(filename, legacy_csum)
):
result = load_data(filename) result = load_data(filename)
else: else:
result = function(*args, **kwargs) result = function(*args, **kwargs)
@ -179,5 +187,7 @@ def autosave_data(nargs, kwargs_keys=None, version=None):
result = function(*args, **kwargs) result = function(*args, **kwargs)
return result return result
return autosave return autosave
return decorator_function return decorator_function

View File

@ -1,4 +1,3 @@
import functools import functools
import hashlib import hashlib
from .logging import logger from .logging import logger
@ -14,6 +13,7 @@ SALT = 42
def version(version_nr, calls=[]): def version(version_nr, calls=[]):
"""Function decorator that assigns a custom checksum to a function.""" """Function decorator that assigns a custom checksum to a function."""
def decorator(func): def decorator(func):
cs = checksum(func.__name__, version_nr, *calls) cs = checksum(func.__name__, version_nr, *calls)
func.__checksum__ = lambda: cs func.__checksum__ = lambda: cs
@ -23,18 +23,19 @@ def version(version_nr, calls=[]):
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapped return wrapped
return decorator return decorator
def strip_comments(s): def strip_comments(s):
"""Strips comment lines and docstring from Python source string.""" """Strips comment lines and docstring from Python source string."""
o = '' o = ""
in_docstring = False in_docstring = False
for l in s.split('\n'): for l in s.split("\n"):
if l.strip().startswith(('#', '"', "'")) or in_docstring: if l.strip().startswith(("#", '"', "'")) or in_docstring:
in_docstring = l.strip().startswith(('"""', "'''")) + in_docstring == 1 in_docstring = l.strip().startswith(('"""', "'''")) + in_docstring == 1
continue continue
o += l + '\n' o += l + "\n"
return o return o
@ -42,7 +43,7 @@ def checksum(*args, csum=None):
""" """
Calculate a checksum of any object, by sha1 hash. Calculate a checksum of any object, by sha1 hash.
Input for the hash are some salt bytes and the byte encoding of a string Input for the hash are some salt bytes and the byte encoding of a string
that depends on the object and its type: that depends on the object and its type:
- If a method __checksum__ is available, it's return value is converted to bytes - If a method __checksum__ is available, it's return value is converted to bytes
@ -58,8 +59,8 @@ def checksum(*args, csum=None):
csum.update(str(SALT).encode()) csum.update(str(SALT).encode())
for arg in args: for arg in args:
if hasattr(arg, '__checksum__'): if hasattr(arg, "__checksum__"):
logger.debug('Checksum via __checksum__: %s', str(arg)) logger.debug("Checksum via __checksum__: %s", str(arg))
csum.update(str(arg.__checksum__()).encode()) csum.update(str(arg.__checksum__()).encode())
elif isinstance(arg, bytes): elif isinstance(arg, bytes):
csum.update(arg) csum.update(arg)
@ -74,7 +75,7 @@ def checksum(*args, csum=None):
if v is not arg: if v is not arg:
checksum(v, csum=csum) checksum(v, csum=csum)
elif isinstance(arg, functools.partial): elif isinstance(arg, functools.partial):
logger.debug('Checksum via partial for %s', str(arg)) logger.debug("Checksum via partial for %s", str(arg))
checksum(arg.func, csum=csum) checksum(arg.func, csum=csum)
for x in arg.args: for x in arg.args:
checksum(x, csum=csum) checksum(x, csum=csum)
@ -84,8 +85,7 @@ def checksum(*args, csum=None):
elif isinstance(arg, np.ndarray): elif isinstance(arg, np.ndarray):
csum.update(arg.tobytes()) csum.update(arg.tobytes())
else: else:
logger.debug('Checksum via str for %s', str(arg)) logger.debug("Checksum via str for %s", str(arg))
csum.update(str(arg).encode()) csum.update(str(arg).encode())
return int.from_bytes(csum.digest(), 'big') return int.from_bytes(csum.digest(), "big")

View File

@ -6,30 +6,32 @@ from . import open as md_open
def run(*args, **kwargs): def run(*args, **kwargs):
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'xtcfile', "xtcfile",
help='The xtc file to index.', help="The xtc file to index.",
) )
parser.add_argument( parser.add_argument(
'--tpr', "--tpr", help="The tprfile of the trajectory.", dest="tpr", default=None
help='The tprfile of the trajectory.',
dest='tpr', default=None
) )
parser.add_argument( parser.add_argument(
'--nojump', "--nojump",
help='Generate Nojump Matrices, requires a tpr file.', help="Generate Nojump Matrices, requires a tpr file.",
dest='nojump', action='store_true', default=False dest="nojump",
action="store_true",
default=False,
) )
parser.add_argument( parser.add_argument(
'--debug', "--debug",
help='Set logging level to debug.', help="Set logging level to debug.",
dest='debug', action='store_true', default=False dest="debug",
action="store_true",
default=False,
) )
args = parser.parse_args() args = parser.parse_args()
if args.debug: if args.debug:
logging.setlevel('DEBUG') logging.setlevel("DEBUG")
md_open('', trajectory=args.xtcfile, topology=args.tpr, nojump=args.nojump) md_open("", trajectory=args.xtcfile, topology=args.tpr, nojump=args.nojump)
if __name__ == '__main__': if __name__ == "__main__":
run() run()

View File

@ -31,14 +31,14 @@ def rotate_axis(coords, axis):
# return theta/pi, rotation_axis # return theta/pi, rotation_axis
ux, uy, uz = rotation_axis ux, uy, uz = rotation_axis
cross_matrix = np.array([ cross_matrix = np.array([[0, -uz, uy], [uz, 0, -ux], [-uy, ux, 0]])
[0, -uz, uy], rotation_matrix = (
[uz, 0, -ux], np.cos(theta) * np.identity(len(axis))
[-uy, ux, 0] + (1 - np.cos(theta))
]) * rotation_axis.reshape(-1, 1)
rotation_matrix = np.cos(theta) * np.identity(len(axis)) \ @ rotation_axis.reshape(1, -1)
+ (1 - np.cos(theta)) * rotation_axis.reshape(-1, 1) @ rotation_axis.reshape(1, -1) \
+ np.sin(theta) * cross_matrix + np.sin(theta) * cross_matrix
)
if len(coords.shape) == 2: if len(coords.shape) == 2:
rotated = np.array([rotation_matrix @ xyz for xyz in coords]) rotated = np.array([rotation_matrix @ xyz for xyz in coords])
@ -54,12 +54,12 @@ def spherical_radius(frame, origin=None):
""" """
if origin is None: if origin is None:
origin = frame.box.diagonal() / 2 origin = frame.box.diagonal() / 2
return ((frame - origin)**2).sum(axis=-1)**0.5 return ((frame - origin) ** 2).sum(axis=-1) ** 0.5
def polar_coordinates(x, y): def polar_coordinates(x, y):
"""Convert cartesian to polar coordinates.""" """Convert cartesian to polar coordinates."""
radius = (x**2 + y**2)**0.5 radius = (x**2 + y**2) ** 0.5
phi = np.arctan2(y, x) phi = np.arctan2(y, x)
return radius, phi return radius, phi
@ -67,11 +67,11 @@ def polar_coordinates(x, y):
def spherical_coordinates(x, y, z): def spherical_coordinates(x, y, z):
"""Convert cartesian to spherical coordinates.""" """Convert cartesian to spherical coordinates."""
xy, phi = polar_coordinates(x, y) xy, phi = polar_coordinates(x, y)
radius = (x**2 + y**2 + z**2)**0.5 radius = (x**2 + y**2 + z**2) ** 0.5
theta = np.arccos(z / radius) theta = np.arccos(z / radius)
return radius, phi, theta return radius, phi, theta
def radial_selector(frame, coordinates, rmin, rmax): def radial_selector(frame, coordinates, rmin, rmax):
""" """
Return a selection of all atoms with radius in the interval [rmin, rmax]. Return a selection of all atoms with radius in the interval [rmin, rmax].
@ -103,8 +103,7 @@ def spatial_selector(frame, transform, rmin, rmax):
class CoordinateFrame(np.ndarray): class CoordinateFrame(np.ndarray):
_known_modes = ("pbc", "whole", "nojump")
_known_modes = ('pbc', 'whole', 'nojump')
@property @property
def box(self): def box(self):
@ -144,33 +143,46 @@ class CoordinateFrame(np.ndarray):
@property @property
def selection(self): def selection(self):
return self.coordinates.atom_subset.selection return self.coordinates.atom_subset.selection
@property @property
def whole(self): def whole(self):
frame = whole(self) frame = whole(self)
frame.mode = 'whole' frame.mode = "whole"
return frame return frame
@property @property
def pbc(self): def pbc(self):
frame = self % self.box.diagonal() frame = self % self.box.diagonal()
frame.mode = 'pbc' frame.mode = "pbc"
return frame return frame
@property @property
def nojump(self): def nojump(self):
if self.mode != 'nojump': if self.mode != "nojump":
if self.mode is not None: if self.mode is not None:
logger.warn('Combining Nojump with other Coordinate modes is not supported and may cause unexpected results.') logger.warn(
"Combining Nojump with other Coordinate modes is not supported and may cause unexpected results."
)
frame = nojump(self) frame = nojump(self)
frame.mode = 'nojump' frame.mode = "nojump"
return frame return frame
else: else:
return self return self
def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, def __new__(
coordinates=None, step=None, box=None, mode=None): subtype,
shape,
dtype=float,
buffer=None,
offset=0,
strides=None,
order=None,
coordinates=None,
step=None,
box=None,
mode=None,
):
obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides) obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides)
obj.coordinates = coordinates obj.coordinates = coordinates
@ -182,11 +194,11 @@ class CoordinateFrame(np.ndarray):
if obj is None: if obj is None:
return return
self.coordinates = getattr(obj, 'coordinates', None) self.coordinates = getattr(obj, "coordinates", None)
self.step = getattr(obj, 'step', None) self.step = getattr(obj, "step", None)
self.mode = getattr(obj, 'mode', None) self.mode = getattr(obj, "mode", None)
if hasattr(obj, 'reference'): if hasattr(obj, "reference"):
self.reference = getattr(obj, 'reference') self.reference = getattr(obj, "reference")
class Coordinates: class Coordinates:
@ -198,22 +210,26 @@ class Coordinates:
def get_mode(self, mode): def get_mode(self, mode):
if self.atom_subset is not None: if self.atom_subset is not None:
return Coordinates(frames=self.frames, atom_subset=self.atom_subset, mode=mode)[self._slice] return Coordinates(
frames=self.frames, atom_subset=self.atom_subset, mode=mode
)[self._slice]
else: else:
return Coordinates(frames=self.frames, atom_filter=self.atom_filter, mode=mode)[self._slice] return Coordinates(
frames=self.frames, atom_filter=self.atom_filter, mode=mode
)[self._slice]
@property @property
def pbc(self): def pbc(self):
return self.get_mode('pbc') return self.get_mode("pbc")
@property @property
def whole(self): def whole(self):
return self.get_mode('whole') return self.get_mode("whole")
@property @property
def nojump(self): def nojump(self):
return self.get_mode('nojump') return self.get_mode("nojump")
@property @property
def mode(self): def mode(self):
return self._mode return self._mode
@ -221,12 +237,17 @@ class Coordinates:
@mode.setter @mode.setter
def mode(self, val): def mode(self, val):
if val in CoordinateFrame._known_modes: if val in CoordinateFrame._known_modes:
logger.warn('Changing the Coordinates mode directly is deprecated. Use Coordinates.%s instead, which returns a copy.', val) logger.warn(
"Changing the Coordinates mode directly is deprecated. Use Coordinates.%s instead, which returns a copy.",
val,
)
self._mode = val self._mode = val
else: else:
raise UnknownCoordinatesMode('No such mode: {}'.format(val)) raise UnknownCoordinatesMode("No such mode: {}".format(val))
def __init__(self, frames, atom_filter=None, atom_subset: AtomSubset=None, mode=None): def __init__(
self, frames, atom_filter=None, atom_subset: AtomSubset = None, mode=None
):
""" """
Args: Args:
frames: The trajectory reader frames: The trajectory reader
@ -241,7 +262,9 @@ class Coordinates:
self._mode = mode self._mode = mode
self.frames = frames self.frames = frames
self._slice = slice(None) self._slice = slice(None)
assert atom_filter is None or atom_subset is None, "Cannot use both: subset and filter" assert (
atom_filter is None or atom_subset is None
), "Cannot use both: subset and filter"
if atom_filter is not None: if atom_filter is not None:
self.atom_filter = atom_filter self.atom_filter = atom_filter
@ -258,7 +281,9 @@ class Coordinates:
"""Returns the fnr-th frame.""" """Returns the fnr-th frame."""
try: try:
if self.atom_filter is not None: if self.atom_filter is not None:
frame = self.frames[fnr].positions[self.atom_filter].view(CoordinateFrame) frame = (
self.frames[fnr].positions[self.atom_filter].view(CoordinateFrame)
)
else: else:
frame = self.frames.__getitem__(fnr).positions.view(CoordinateFrame) frame = self.frames.__getitem__(fnr).positions.view(CoordinateFrame)
frame.coordinates = self frame.coordinates = self
@ -272,7 +297,7 @@ class Coordinates:
def clear_cache(self): def clear_cache(self):
"""Clears the frame cache, if it is enabled.""" """Clears the frame cache, if it is enabled."""
if hasattr(self.get_frame, 'clear_cache'): if hasattr(self.get_frame, "clear_cache"):
self.get_frame.clear_cache() self.get_frame.clear_cache()
def __iter__(self): def __iter__(self):
@ -300,7 +325,9 @@ class Coordinates:
@wraps(AtomSubset.subset) @wraps(AtomSubset.subset)
def subset(self, **kwargs): def subset(self, **kwargs):
return Coordinates(self.frames, atom_subset=self.atom_subset.subset(**kwargs), mode=self._mode) return Coordinates(
self.frames, atom_subset=self.atom_subset.subset(**kwargs), mode=self._mode
)
@property @property
def description(self): def description(self):
@ -312,7 +339,6 @@ class Coordinates:
class MeanCoordinates(Coordinates): class MeanCoordinates(Coordinates):
def __init__(self, frames, atom_filter=None, mean=1): def __init__(self, frames, atom_filter=None, mean=1):
super().__init__(frames, atom_filter) super().__init__(frames, atom_filter)
self.mean = mean self.mean = mean
@ -330,7 +356,6 @@ class MeanCoordinates(Coordinates):
class CoordinatesMap: class CoordinatesMap:
def __init__(self, coordinates, function): def __init__(self, coordinates, function):
self.coordinates = coordinates self.coordinates = coordinates
self.frames = self.coordinates.frames self.frames = self.coordinates.frames
@ -374,7 +399,7 @@ class CoordinatesMap:
@property @property
def description(self): def description(self):
return '{}_{}'.format(self._description, self.coordinates.description) return "{}_{}".format(self._description, self.coordinates.description)
@description.setter @description.setter
def description(self, desc): def description(self, desc):
@ -392,8 +417,8 @@ class CoordinatesMap:
def pbc(self): def pbc(self):
return CoordinatesMap(self.coordinates.pbc, self.function) return CoordinatesMap(self.coordinates.pbc, self.function)
class CoordinatesFilter:
class CoordinatesFilter:
@property @property
def atom_subset(self): def atom_subset(self):
pass pass
@ -465,6 +490,7 @@ def map_coordinates(func):
@wraps(func) @wraps(func)
def wrapped(coordinates, **kwargs): def wrapped(coordinates, **kwargs):
return CoordinatesMap(coordinates, partial(func, **kwargs)) return CoordinatesMap(coordinates, partial(func, **kwargs))
return wrapped return wrapped
@ -496,13 +522,15 @@ def centers_of_mass(c, *, masses=None):
number_of_masses = len(masses) number_of_masses = len(masses)
number_of_coordinates, number_of_dimensions = c.shape number_of_coordinates, number_of_dimensions = c.shape
number_of_new_coordinates = number_of_coordinates // number_of_masses number_of_new_coordinates = number_of_coordinates // number_of_masses
grouped_masses = c.reshape(number_of_new_coordinates, number_of_masses, number_of_dimensions) grouped_masses = c.reshape(
number_of_new_coordinates, number_of_masses, number_of_dimensions
)
return np.average(grouped_masses, axis=1, weights=masses) return np.average(grouped_masses, axis=1, weights=masses)
@map_coordinates @map_coordinates
def pore_coordinates(coordinates, origin, sym_axis='z'): def pore_coordinates(coordinates, origin, sym_axis="z"):
""" """
Map coordinates of a pore simulation so the pore has cylindrical symmetry. Map coordinates of a pore simulation so the pore has cylindrical symmetry.
@ -512,9 +540,9 @@ def pore_coordinates(coordinates, origin, sym_axis='z'):
sym_axis (opt.): Symmtery axis of the pore, may be a literal direction sym_axis (opt.): Symmtery axis of the pore, may be a literal direction
'x', 'y' or 'z' or an array of shape (3,) 'x', 'y' or 'z' or an array of shape (3,)
""" """
if sym_axis in ('x', 'y', 'z'): if sym_axis in ("x", "y", "z"):
rot_axis = np.zeros(shape=(3,)) rot_axis = np.zeros(shape=(3,))
rot_axis[['x', 'y', 'z'].index(sym_axis)] = 1 rot_axis[["x", "y", "z"].index(sym_axis)] = 1
else: else:
rot_axis = sym_axis rot_axis = sym_axis
@ -560,4 +588,4 @@ def vectors(coordinates, atoms_a, atoms_b, normed=False, box=None):
vectors = pbc_diff(coords_a, coords_b, box=box) vectors = pbc_diff(coords_a, coords_b, box=box)
norm = np.linalg.norm(vectors, axis=-1).reshape(-1, 1) if normed else 1 norm = np.linalg.norm(vectors, axis=-1).reshape(-1, 1) if normed else 1
vectors.reference = coords_a vectors.reference = coords_a
return vectors / norm return vectors / norm

View File

@ -34,91 +34,110 @@ def subensemble_correlation(selector_function, correlation_function=correlation)
selector = selector_function(start_frame) selector = selector_function(start_frame)
subensemble = map(lambda f: f[selector], chain([start_frame], iterator)) subensemble = map(lambda f: f[selector], chain([start_frame], iterator))
return correlation_function(function, subensemble) return correlation_function(function, subensemble)
return c return c
def multi_subensemble_correlation(selector_function): def multi_subensemble_correlation(selector_function):
""" """
selector_function has to expect a frame and to selector_function has to expect a frame and to
return either valid indices (as with subensemble_correlation) return either valid indices (as with subensemble_correlation)
or a multidimensional array whose entries are valid indices or a multidimensional array whose entries are valid indices
e.g. slice(10,100,2) e.g. slice(10,100,2)
e.g. [1,2,3,4,5] e.g. [1,2,3,4,5]
e.g. [[[0,1],[2],[3]],[[4],[5],[6]] -> shape: 2,3 with e.g. [[[0,1],[2],[3]],[[4],[5],[6]] -> shape: 2,3 with
list of indices of varying length list of indices of varying length
e.g. [slice(1653),slice(1653,None,3)] e.g. [slice(1653),slice(1653,None,3)]
e.g. [np.ones(len_of_frames, bool)] e.g. [np.ones(len_of_frames, bool)]
in general using slices is the most efficient. in general using slices is the most efficient.
if the selections are small subsets of a frame or when many subsets are empty if the selections are small subsets of a frame or when many subsets are empty
using indices will be more efficient than using masks. using indices will be more efficient than using masks.
""" """
@set_has_counter @set_has_counter
def cmulti(function, frames): def cmulti(function, frames):
iterator = iter(frames) iterator = iter(frames)
start_frame = next(iterator) start_frame = next(iterator)
selectors = np.asarray(selector_function(start_frame)) selectors = np.asarray(selector_function(start_frame))
sel_shape = selectors.shape sel_shape = selectors.shape
if sel_shape[-1] == 0: selectors = np.asarray(selectors,int) if sel_shape[-1] == 0:
if (selectors.dtype != object): sel_shape = sel_shape[:-1] selectors = np.asarray(selectors, int)
f_values = np.zeros(sel_shape + function(start_frame,start_frame).shape,) if selectors.dtype != object:
count = np.zeros(sel_shape, dtype=int) sel_shape = sel_shape[:-1]
f_values = np.zeros(
sel_shape + function(start_frame, start_frame).shape,
)
count = np.zeros(sel_shape, dtype=int)
is_first_frame_loop = True is_first_frame_loop = True
def cc(act_frame):
def cc(act_frame):
nonlocal is_first_frame_loop nonlocal is_first_frame_loop
for index in np.ndindex(sel_shape): for index in np.ndindex(sel_shape):
sel = selectors[index] sel = selectors[index]
sf_sel = start_frame[sel] sf_sel = start_frame[sel]
if is_first_frame_loop: if is_first_frame_loop:
count[index] = len(sf_sel) count[index] = len(sf_sel)
f_values[index] = function(sf_sel, act_frame[sel]) if count[index] != 0 else 0 f_values[index] = (
is_first_frame_loop = False function(sf_sel, act_frame[sel]) if count[index] != 0 else 0
)
is_first_frame_loop = False
return np.asarray(f_values.copy()) return np.asarray(f_values.copy())
return map(cc, chain([start_frame], iterator)), count return map(cc, chain([start_frame], iterator)), count
return cmulti return cmulti
@autosave_data(2) @autosave_data(2)
def shifted_correlation(function, frames, selector=None, segments=10, def shifted_correlation(
skip=0.1, window=0.5, average=True, points=100, function,
nodes=8): frames,
selector=None,
segments=10,
skip=0.1,
window=0.5,
average=True,
points=100,
nodes=8,
):
""" """
Calculate the time series for a correlation function. Calculate the time series for a correlation function.
The times at which the correlation is calculated are determined by The times at which the correlation is calculated are determined by
a logarithmic distribution. a logarithmic distribution.
Args: Args:
function: The function that should be correlated function: The function that should be correlated
frames: The coordinates of the simulation data frames: The coordinates of the simulation data
selector (opt.): selector (opt.):
A function that returns the indices depending on A function that returns the indices depending on
the staring frame for which particles the the staring frame for which particles the
correlation should be calculated correlation should be calculated
segments (int, opt.): segments (int, opt.):
The number of segments the time window will be The number of segments the time window will be
shifted shifted
skip (float, opt.): skip (float, opt.):
The fraction of the trajectory that will be skipped The fraction of the trajectory that will be skipped
at the beginning, if this is None the start index at the beginning, if this is None the start index
of the frames slice will be used, which defaults of the frames slice will be used, which defaults
to 0.1. to 0.1.
window (float, opt.): window (float, opt.):
The fraction of the simulation the time series will The fraction of the simulation the time series will
cover cover
average (bool, opt.): average (bool, opt.):
If True, returns averaged correlation function If True, returns averaged correlation function
points (int, opt.): points (int, opt.):
The number of timeshifts for which the correlation The number of timeshifts for which the correlation
should be calculated should be calculated
nodes (int, opt.): nodes (int, opt.):
Number of nodes used for parallelization Number of nodes used for parallelization
Returns: Returns:
tuple: tuple:
A list of length N that contains the timeshiftes of the frames at which A list of length N that contains the timeshiftes of the frames at which
@ -130,6 +149,7 @@ def shifted_correlation(function, frames, selector=None, segments=10,
>>> time, data = shifted_correlation(msd, coords) >>> time, data = shifted_correlation(msd, coords)
""" """
def get_correlation(start_frame, idx, selector=None): def get_correlation(start_frame, idx, selector=None):
shifted_idx = idx + start_frame shifted_idx = idx + start_frame
if selector: if selector:
@ -140,42 +160,51 @@ def shifted_correlation(function, frames, selector=None, segments=10,
return np.zeros(len(idx)) return np.zeros(len(idx))
else: else:
start = frames[start_frame][index] start = frames[start_frame][index]
correlation = np.array([ function(start, frames[frame][index]) correlation = np.array(
for frame in shifted_idx ]) [function(start, frames[frame][index]) for frame in shifted_idx]
)
return correlation return correlation
if 1-skip < window: if 1 - skip < window:
window = 1-skip window = 1 - skip
start_frames = np.unique(np.linspace(len(frames)*skip, start_frames = np.unique(
len(frames)*(1-window), np.linspace(
num=segments, endpoint=False, len(frames) * skip,
dtype=int)) len(frames) * (1 - window),
num=segments,
endpoint=False,
dtype=int,
)
)
num_frames = int(len(frames) * window) num_frames = int(len(frames) * window)
ls = np.logspace(0, np.log10(num_frames + 1), num=points) ls = np.logspace(0, np.log10(num_frames + 1), num=points)
idx = np.unique(np.int_(ls) - 1) idx = np.unique(np.int_(ls) - 1)
t = np.array([frames[i].time for i in idx]) - frames[0].time t = np.array([frames[i].time for i in idx]) - frames[0].time
if nodes == 1: if nodes == 1:
result = np.array([get_correlation(start_frame, idx=idx, result = np.array(
selector=selector) [
for start_frame in start_frames]) get_correlation(start_frame, idx=idx, selector=selector)
for start_frame in start_frames
]
)
else: else:
pool = ProcessPool(nodes=nodes) pool = ProcessPool(nodes=nodes)
result = np.array(pool.map(partial(get_correlation, idx=idx, result = np.array(
selector=selector), pool.map(partial(get_correlation, idx=idx, selector=selector), start_frames)
start_frames)) )
pool.terminate() pool.terminate()
pool.restart() pool.restart()
if average == True: if average == True:
clean_result = [] clean_result = []
for entry in result: for entry in result:
if np.all(entry == 0): if np.all(entry == 0):
continue continue
else: else:
clean_result.append(entry) clean_result.append(entry)
result = np.array(clean_result) result = np.array(clean_result)
result = np.average(result, axis=0) result = np.average(result, axis=0)
return t, result return t, result
@ -186,7 +215,7 @@ def msd(start, frame):
Mean square displacement Mean square displacement
""" """
vec = start - frame vec = start - frame
return (vec ** 2).sum(axis=1).mean() return (vec**2).sum(axis=1).mean()
def isf(start, frame, q, box=None): def isf(start, frame, q, box=None):
@ -197,7 +226,7 @@ def isf(start, frame, q, box=None):
:param q: length of scattering vector :param q: length of scattering vector
""" """
vec = start - frame vec = start - frame
distance = (vec ** 2).sum(axis=1) ** .5 distance = (vec**2).sum(axis=1) ** 0.5
return np.sinc(distance * q / np.pi).mean() return np.sinc(distance * q / np.pi).mean()
@ -226,11 +255,13 @@ def van_hove_self(start, end, bins):
G(r, t) = \sum_i \delta(|\vec r_i(0) - \vec r_i(t)| - r) G(r, t) = \sum_i \delta(|\vec r_i(0) - \vec r_i(t)| - r)
""" """
vec = start - end vec = start - end
delta_r = ((vec)**2).sum(axis=-1)**.5 delta_r = ((vec) ** 2).sum(axis=-1) ** 0.5
return 1 / len(start) * histogram(delta_r, bins)[0] return 1 / len(start) * histogram(delta_r, bins)[0]
def van_hove_distinct(onset, frame, bins, box=None, use_dask=True, comp=False, bincount=True): def van_hove_distinct(
onset, frame, bins, box=None, use_dask=True, comp=False, bincount=True
):
r""" r"""
Compute the distinct part of the Van Hove autocorrelation function. Compute the distinct part of the Van Hove autocorrelation function.
@ -242,9 +273,13 @@ def van_hove_distinct(onset, frame, bins, box=None, use_dask=True, comp=False, b
dimension = len(box) dimension = len(box)
N = len(onset) N = len(onset)
if use_dask: if use_dask:
onset = darray.from_array(onset, chunks=(500, dimension)).reshape(1, N, dimension) onset = darray.from_array(onset, chunks=(500, dimension)).reshape(
frame = darray.from_array(frame, chunks=(500, dimension)).reshape(N, 1, dimension) 1, N, dimension
dist = ((pbc_diff(onset, frame, box)**2).sum(axis=-1)**0.5).ravel() )
frame = darray.from_array(frame, chunks=(500, dimension)).reshape(
N, 1, dimension
)
dist = ((pbc_diff(onset, frame, box) ** 2).sum(axis=-1) ** 0.5).ravel()
if np.diff(bins).std() < 1e6: if np.diff(bins).std() < 1e6:
dx = bins[0] - bins[1] dx = bins[0] - bins[1]
hist = darray.bincount((dist // dx).astype(int), minlength=(len(bins) - 1)) hist = darray.bincount((dist // dx).astype(int), minlength=(len(bins) - 1))
@ -253,16 +288,20 @@ def van_hove_distinct(onset, frame, bins, box=None, use_dask=True, comp=False, b
return hist.compute() / N return hist.compute() / N
else: else:
if comp: if comp:
dx = bins[1] - bins[0] dx = bins[1] - bins[0]
minlength = len(bins) - 1 minlength = len(bins) - 1
def f(x): def f(x):
d = (pbc_diff(x, frame, box)**2).sum(axis=-1)**0.5 d = (pbc_diff(x, frame, box) ** 2).sum(axis=-1) ** 0.5
return np.bincount((d // dx).astype(int), minlength=minlength)[:minlength] return np.bincount((d // dx).astype(int), minlength=minlength)[
:minlength
]
hist = sum(f(x) for x in onset) hist = sum(f(x) for x in onset)
else: else:
dist = (pbc_diff(onset.reshape(1, -1, 3), frame.reshape(-1, 1, 3), box)**2).sum(axis=-1)**0.5 dist = (
pbc_diff(onset.reshape(1, -1, 3), frame.reshape(-1, 1, 3), box) ** 2
).sum(axis=-1) ** 0.5
hist = histogram(dist, bins=bins)[0] hist = histogram(dist, bins=bins)[0]
return hist / N return hist / N
@ -304,9 +343,12 @@ def susceptibility(time, correlation, **kwargs):
**kwargs (opt.): **kwargs (opt.):
Additional keyword arguments will be passed to :func:`filon_fourier_transformation`. Additional keyword arguments will be passed to :func:`filon_fourier_transformation`.
""" """
frequencies, fourier = filon_fourier_transformation(time, correlation, imag=False, **kwargs) frequencies, fourier = filon_fourier_transformation(
time, correlation, imag=False, **kwargs
)
return frequencies, frequencies * fourier return frequencies, frequencies * fourier
def coherent_scattering_function(onset, frame, q): def coherent_scattering_function(onset, frame, q):
""" """
Calculate the coherent scattering function. Calculate the coherent scattering function.
@ -331,11 +373,12 @@ def coherent_scattering_function(onset, frame, q):
return coherent_sum(scfunc, onset.pbc, frame.pbc) / len(onset) return coherent_sum(scfunc, onset.pbc, frame.pbc) / len(onset)
def non_gaussian(onset, frame): def non_gaussian(onset, frame):
""" """
Calculate the Non-Gaussian parameter : Calculate the Non-Gaussian parameter :
..math: ..math:
\alpha_2 (t) = \frac{3}{5}\frac{\langle r_i^4(t)\rangle}{\langle r_i^2(t)\rangle^2} - 1 \alpha_2 (t) = \frac{3}{5}\frac{\langle r_i^4(t)\rangle}{\langle r_i^2(t)\rangle^2} - 1
""" """
r_2 = ((frame - onset)**2).sum(axis=-1) r_2 = ((frame - onset) ** 2).sum(axis=-1)
return 3 / 5 * (r_2**2).mean() / r_2.mean()**2 - 1 return 3 / 5 * (r_2**2).mean() / r_2.mean() ** 2 - 1

View File

@ -9,7 +9,7 @@ from .logging import logger
from scipy import spatial from scipy import spatial
@autosave_data(nargs=2, kwargs_keys=('coordinates_b',), version='time_average-1') @autosave_data(nargs=2, kwargs_keys=("coordinates_b",), version="time_average-1")
def time_average(function, coordinates, coordinates_b=None, pool=None): def time_average(function, coordinates, coordinates_b=None, pool=None):
""" """
Compute the time average of a function. Compute the time average of a function.
@ -45,7 +45,7 @@ def time_average(function, coordinates, coordinates_b=None, pool=None):
number_of_averages += 1 number_of_averages += 1
result += ev result += ev
if number_of_averages % 100 == 0: if number_of_averages % 100 == 0:
logger.debug('time_average: %d', number_of_averages) logger.debug("time_average: %d", number_of_averages)
return result / number_of_averages return result / number_of_averages
@ -78,7 +78,16 @@ def time_histogram(function, coordinates, bins, hist_range, pool=None):
return hist_results return hist_results
def rdf(atoms_a, atoms_b=None, bins=None, box=None, kind=None, chunksize=50000, returnx=False, **kwargs): def rdf(
atoms_a,
atoms_b=None,
bins=None,
box=None,
kind=None,
chunksize=50000,
returnx=False,
**kwargs
):
r""" r"""
Compute the radial pair distribution of one or two sets of atoms. Compute the radial pair distribution of one or two sets of atoms.
@ -102,8 +111,12 @@ def rdf(atoms_a, atoms_b=None, bins=None, box=None, kind=None, chunksize=50000,
as large as possible, depending on the available memory. as large as possible, depending on the available memory.
returnx (opt.): If True the x ordinate of the histogram is returned. returnx (opt.): If True the x ordinate of the histogram is returned.
""" """
assert bins is not None, 'Bins of the pair distribution have to be defined.' assert bins is not None, "Bins of the pair distribution have to be defined."
assert kind in ['intra', 'inter', None], 'Argument kind must be one of the following: intra, inter, None.' assert kind in [
"intra",
"inter",
None,
], "Argument kind must be one of the following: intra, inter, None."
if box is None: if box is None:
box = atoms_a.box.diagonal() box = atoms_a.box.diagonal()
if atoms_b is None: if atoms_b is None:
@ -121,18 +134,24 @@ def rdf(atoms_a, atoms_b=None, bins=None, box=None, kind=None, chunksize=50000,
for chunk in range(0, len(indices[0]), chunksize): for chunk in range(0, len(indices[0]), chunksize):
sl = slice(chunk, chunk + chunksize) sl = slice(chunk, chunk + chunksize)
diff = pbc_diff(atoms_a[indices[0][sl]], atoms_b[indices[1][sl]], box) diff = pbc_diff(atoms_a[indices[0][sl]], atoms_b[indices[1][sl]], box)
dist = (diff**2).sum(axis=1)**0.5 dist = (diff**2).sum(axis=1) ** 0.5
if kind == 'intra': if kind == "intra":
mask = atoms_a.residue_ids[indices[0][sl]] == atoms_b.residue_ids[indices[1][sl]] mask = (
atoms_a.residue_ids[indices[0][sl]]
== atoms_b.residue_ids[indices[1][sl]]
)
dist = dist[mask] dist = dist[mask]
elif kind == 'inter': elif kind == "inter":
mask = atoms_a.residue_ids[indices[0][sl]] != atoms_b.residue_ids[indices[1][sl]] mask = (
atoms_a.residue_ids[indices[0][sl]]
!= atoms_b.residue_ids[indices[1][sl]]
)
dist = dist[mask] dist = dist[mask]
nr_of_samples += len(dist) nr_of_samples += len(dist)
hist += np.histogram(dist, bins)[0] hist += np.histogram(dist, bins)[0]
volume = 4 / 3 * np.pi * (bins[1:]**3 - bins[:-1]**3) volume = 4 / 3 * np.pi * (bins[1:] ** 3 - bins[:-1] ** 3)
density = nr_of_samples / np.prod(box) density = nr_of_samples / np.prod(box)
res = hist / volume / density res = hist / volume / density
if returnx: if returnx:
@ -141,87 +160,117 @@ def rdf(atoms_a, atoms_b=None, bins=None, box=None, kind=None, chunksize=50000,
return res return res
def pbc_tree_rdf(atoms_a, atoms_b=None, bins=None, box=None, exclude=0, returnx=False, **kwargs): def pbc_tree_rdf(
atoms_a, atoms_b=None, bins=None, box=None, exclude=0, returnx=False, **kwargs
):
if box is None: if box is None:
box = atoms_a.box.diagonal() box = atoms_a.box.diagonal()
all_coords = pbc_points(atoms_b, box, thickness=np.amax(bins)+0.1) all_coords = pbc_points(atoms_b, box, thickness=np.amax(bins) + 0.1)
to_tree = spatial.cKDTree(all_coords) to_tree = spatial.cKDTree(all_coords)
dist = to_tree.query(pbc_diff(atoms_a,box=box),k=len(atoms_b), distance_upper_bound=np.amax(bins)+0.1)[0].flatten() dist = to_tree.query(
pbc_diff(atoms_a, box=box),
k=len(atoms_b),
distance_upper_bound=np.amax(bins) + 0.1,
)[0].flatten()
dist = dist[dist < np.inf] dist = dist[dist < np.inf]
hist = np.histogram(dist, bins)[0] hist = np.histogram(dist, bins)[0]
volume = 4/3*np.pi*(bins[1:]**3-bins[:-1]**3) volume = 4 / 3 * np.pi * (bins[1:] ** 3 - bins[:-1] ** 3)
res = (hist) * np.prod(box) / volume / len(atoms_a) / (len(atoms_b)-exclude) res = (hist) * np.prod(box) / volume / len(atoms_a) / (len(atoms_b) - exclude)
if returnx: if returnx:
return np.vstack((runningmean(bins, 2), res)) return np.vstack((runningmean(bins, 2), res))
else: else:
return res return res
def pbc_spm_rdf(atoms_a, atoms_b=None, bins=None, box=None, exclude=0, returnx=False, **kwargs): def pbc_spm_rdf(
atoms_a, atoms_b=None, bins=None, box=None, exclude=0, returnx=False, **kwargs
):
if box is None: if box is None:
box = atoms_a.box box = atoms_a.box
all_coords = pbc_points(atoms_b, box, thickness=np.amax(bins)+0.1) all_coords = pbc_points(atoms_b, box, thickness=np.amax(bins) + 0.1)
to_tree = spatial.cKDTree(all_coords) to_tree = spatial.cKDTree(all_coords)
if all_coords.nbytes/1024**3 * len(atoms_a) < 2: if all_coords.nbytes / 1024**3 * len(atoms_a) < 2:
from_tree = spatial.cKDTree(pbc_diff(atoms_a,box=box)) from_tree = spatial.cKDTree(pbc_diff(atoms_a, box=box))
dist = to_tree.sparse_distance_matrix(from_tree, max_distance=np.amax(bins)+0.1, output_type='ndarray') dist = to_tree.sparse_distance_matrix(
dist = np.asarray(dist.tolist())[:,2] from_tree, max_distance=np.amax(bins) + 0.1, output_type="ndarray"
)
dist = np.asarray(dist.tolist())[:, 2]
hist = np.histogram(dist, bins)[0] hist = np.histogram(dist, bins)[0]
else: else:
chunksize = int(2 * len(atoms_a) / (all_coords.nbytes/1024**3 * len(atoms_a))) chunksize = int(
2 * len(atoms_a) / (all_coords.nbytes / 1024**3 * len(atoms_a))
)
hist = 0 hist = 0
for chunk in range(0, len(atoms_a), chunksize): for chunk in range(0, len(atoms_a), chunksize):
sl = slice(chunk, chunk + chunksize) sl = slice(chunk, chunk + chunksize)
from_tree = spatial.cKDTree(pbc_diff(atoms_a[sl],box=box)) from_tree = spatial.cKDTree(pbc_diff(atoms_a[sl], box=box))
dist = to_tree.sparse_distance_matrix(from_tree, max_distance=np.amax(bins)+0.1, output_type='ndarray') dist = to_tree.sparse_distance_matrix(
dist = np.asarray(dist.tolist())[:,2] from_tree, max_distance=np.amax(bins) + 0.1, output_type="ndarray"
)
dist = np.asarray(dist.tolist())[:, 2]
hist += np.histogram(dist, bins)[0] hist += np.histogram(dist, bins)[0]
volume = 4/3*np.pi*(bins[1:]**3-bins[:-1]**3) volume = 4 / 3 * np.pi * (bins[1:] ** 3 - bins[:-1] ** 3)
res = (hist) * np.prod(box) / volume / len(atoms_a) / (len(atoms_b)-exclude) res = (hist) * np.prod(box) / volume / len(atoms_a) / (len(atoms_b) - exclude)
if returnx: if returnx:
return np.vstack((runningmean(bins, 2), res)) return np.vstack((runningmean(bins, 2), res))
else: else:
return res return res
@autosave_data(nargs=2, kwargs_keys=('to_coords','times')) @autosave_data(nargs=2, kwargs_keys=("to_coords", "times"))
def fast_averaged_rdf(from_coords, bins, to_coords=None, times=10, exclude=0, **kwargs): def fast_averaged_rdf(from_coords, bins, to_coords=None, times=10, exclude=0, **kwargs):
if to_coords is None: if to_coords is None:
to_coords = from_coords to_coords = from_coords
exclude = 1 exclude = 1
# first find timings for the different rdf functions # first find timings for the different rdf functions
import time import time
# only consider sparse matrix for this condition # only consider sparse matrix for this condition
if (len(from_coords[0])*len(to_coords[0]) <= 3000 * 2000 ) & (len(from_coords[0])/len(to_coords[0]) > 5 ): if (len(from_coords[0]) * len(to_coords[0]) <= 3000 * 2000) & (
len(from_coords[0]) / len(to_coords[0]) > 5
):
funcs = [rdf, pbc_tree_rdf, pbc_spm_rdf] funcs = [rdf, pbc_tree_rdf, pbc_spm_rdf]
else: else:
funcs = [rdf, pbc_tree_rdf] funcs = [rdf, pbc_tree_rdf]
timings = [] timings = []
for f in funcs: for f in funcs:
start = time.time() start = time.time()
f(from_coords[0], atoms_b=to_coords[0], bins=bins, box=np.diag(from_coords[0].box)) f(
from_coords[0],
atoms_b=to_coords[0],
bins=bins,
box=np.diag(from_coords[0].box),
)
end = time.time() end = time.time()
timings.append(end-start) timings.append(end - start)
timings = np.array(timings) timings = np.array(timings)
timings[0] = 2*timings[0] # statistics for the other functions is twice as good per frame timings[0] = (
logger.debug('rdf function timings: ' + str(timings)) 2 * timings[0]
) # statistics for the other functions is twice as good per frame
logger.debug("rdf function timings: " + str(timings))
rdffunc = funcs[np.argmin(timings)] rdffunc = funcs[np.argmin(timings)]
logger.debug('rdf function used: ' + str(rdffunc)) logger.debug("rdf function used: " + str(rdffunc))
if rdffunc == rdf: if rdffunc == rdf:
times = times*2 # duplicate times for same statistics times = times * 2 # duplicate times for same statistics
frames = np.array(range(0, len(from_coords), int(len(from_coords)/times)))[:times] frames = np.array(range(0, len(from_coords), int(len(from_coords) / times)))[:times]
out = np.zeros(len(bins)-1) out = np.zeros(len(bins) - 1)
for j, i in enumerate(frames): for j, i in enumerate(frames):
logger.debug('multi_radial_pair_distribution: %d/%d', j, len(frames)) logger.debug("multi_radial_pair_distribution: %d/%d", j, len(frames))
out += rdffunc(from_coords[i], to_coords[i], bins, box=np.diag(from_coords[i].box), exclude=exclude) out += rdffunc(
return out/len(frames) from_coords[i],
to_coords[i],
bins,
box=np.diag(from_coords[i].box),
exclude=exclude,
)
return out / len(frames)
def distance_distribution(atoms, bins): def distance_distribution(atoms, bins):
connection_vectors = atoms[:-1, :] - atoms[1:, :] connection_vectors = atoms[:-1, :] - atoms[1:, :]
connection_lengths = (connection_vectors**2).sum(axis=1)**.5 connection_lengths = (connection_vectors**2).sum(axis=1) ** 0.5
return np.histogram(connection_lengths, bins)[0] return np.histogram(connection_lengths, bins)[0]
@ -230,8 +279,12 @@ def tetrahedral_order(atoms, reference_atoms=None):
reference_atoms = atoms reference_atoms = atoms
indices = next_neighbors(reference_atoms, query_atoms=atoms, number_of_neighbors=4) indices = next_neighbors(reference_atoms, query_atoms=atoms, number_of_neighbors=4)
neighbors = reference_atoms[indices] neighbors = reference_atoms[indices]
neighbors_1, neighbors_2, neighbors_3, neighbors_4 = \ neighbors_1, neighbors_2, neighbors_3, neighbors_4 = (
neighbors[:, 0, :], neighbors[:, 1, :], neighbors[:, 2, :], neighbors[:, 3, :] neighbors[:, 0, :],
neighbors[:, 1, :],
neighbors[:, 2, :],
neighbors[:, 3, :],
)
# Connection vectors # Connection vectors
neighbors_1 -= atoms neighbors_1 -= atoms
@ -245,14 +298,14 @@ def tetrahedral_order(atoms, reference_atoms=None):
neighbors_3 /= np.linalg.norm(neighbors_3, axis=-1).reshape(-1, 1) neighbors_3 /= np.linalg.norm(neighbors_3, axis=-1).reshape(-1, 1)
neighbors_4 /= np.linalg.norm(neighbors_4, axis=-1).reshape(-1, 1) neighbors_4 /= np.linalg.norm(neighbors_4, axis=-1).reshape(-1, 1)
a_1_2 = ((neighbors_1 * neighbors_2).sum(axis=1) + 1 / 3)**2 a_1_2 = ((neighbors_1 * neighbors_2).sum(axis=1) + 1 / 3) ** 2
a_1_3 = ((neighbors_1 * neighbors_3).sum(axis=1) + 1 / 3)**2 a_1_3 = ((neighbors_1 * neighbors_3).sum(axis=1) + 1 / 3) ** 2
a_1_4 = ((neighbors_1 * neighbors_4).sum(axis=1) + 1 / 3)**2 a_1_4 = ((neighbors_1 * neighbors_4).sum(axis=1) + 1 / 3) ** 2
a_2_3 = ((neighbors_2 * neighbors_3).sum(axis=1) + 1 / 3)**2 a_2_3 = ((neighbors_2 * neighbors_3).sum(axis=1) + 1 / 3) ** 2
a_2_4 = ((neighbors_2 * neighbors_4).sum(axis=1) + 1 / 3)**2 a_2_4 = ((neighbors_2 * neighbors_4).sum(axis=1) + 1 / 3) ** 2
a_3_4 = ((neighbors_3 * neighbors_4).sum(axis=1) + 1 / 3)**2 a_3_4 = ((neighbors_3 * neighbors_4).sum(axis=1) + 1 / 3) ** 2
q = 1 - 3 / 8 * (a_1_2 + a_1_3 + a_1_4 + a_2_3 + a_2_4 + a_3_4) q = 1 - 3 / 8 * (a_1_2 + a_1_3 + a_1_4 + a_2_3 + a_2_4 + a_3_4)
@ -260,12 +313,14 @@ def tetrahedral_order(atoms, reference_atoms=None):
def tetrahedral_order_distribution(atoms, reference_atoms=None, bins=None): def tetrahedral_order_distribution(atoms, reference_atoms=None, bins=None):
assert bins is not None, 'Bin edges of the distribution have to be specified.' assert bins is not None, "Bin edges of the distribution have to be specified."
Q = tetrahedral_order(atoms, reference_atoms=reference_atoms) Q = tetrahedral_order(atoms, reference_atoms=reference_atoms)
return np.histogram(Q, bins=bins)[0] return np.histogram(Q, bins=bins)[0]
def radial_density(atoms, bins, symmetry_axis=(0, 0, 1), origin=(0, 0, 0), height=1, returnx=False): def radial_density(
atoms, bins, symmetry_axis=(0, 0, 1), origin=(0, 0, 0), height=1, returnx=False
):
""" """
Calculate the radial density distribution. Calculate the radial density distribution.
@ -290,7 +345,7 @@ def radial_density(atoms, bins, symmetry_axis=(0, 0, 1), origin=(0, 0, 0), heigh
cartesian = rotate_axis(atoms - origin, symmetry_axis) cartesian = rotate_axis(atoms - origin, symmetry_axis)
radius, _ = polar_coordinates(cartesian[:, 0], cartesian[:, 1]) radius, _ = polar_coordinates(cartesian[:, 0], cartesian[:, 1])
hist = np.histogram(radius, bins=bins)[0] hist = np.histogram(radius, bins=bins)[0]
volume = np.pi * (bins[1:]**2 - bins[:-1]**2) * height volume = np.pi * (bins[1:] ** 2 - bins[:-1] ** 2) * height
res = hist / volume res = hist / volume
if returnx: if returnx:
return np.vstack((runningmean(bins, 2), res)) return np.vstack((runningmean(bins, 2), res))
@ -298,8 +353,14 @@ def radial_density(atoms, bins, symmetry_axis=(0, 0, 1), origin=(0, 0, 0), heigh
return res return res
def shell_density(atoms, shell_radius, bins, shell_thickness=0.5, def shell_density(
symmetry_axis=(0, 0, 1), origin=(0, 0, 0)): atoms,
shell_radius,
bins,
shell_thickness=0.5,
symmetry_axis=(0, 0, 1),
origin=(0, 0, 0),
):
""" """
Compute the density distribution on a cylindrical shell. Compute the density distribution on a cylindrical shell.
@ -316,9 +377,11 @@ def shell_density(atoms, shell_radius, bins, shell_thickness=0.5,
Returns: Returns:
Two-dimensional density distribution of the atoms in the defined shell. Two-dimensional density distribution of the atoms in the defined shell.
""" """
cartesian = rotate_axis(atoms-origin, symmetry_axis) cartesian = rotate_axis(atoms - origin, symmetry_axis)
radius, theta = polar_coordinates(cartesian[:, 0], cartesian[:, 1]) radius, theta = polar_coordinates(cartesian[:, 0], cartesian[:, 1])
shell_indices = (shell_radius <= radius) & (radius <= shell_radius + shell_thickness) shell_indices = (shell_radius <= radius) & (
radius <= shell_radius + shell_thickness
)
hist = np.histogram2d(theta[shell_indices], cartesian[shell_indices, 2], bins)[0] hist = np.histogram2d(theta[shell_indices], cartesian[shell_indices, 2], bins)[0]
return hist return hist
@ -332,90 +395,121 @@ def spatial_density(atoms, bins, weights=None):
return density return density
def mixing_ratio_distribution(atoms_a, atoms_b, bins_ratio, bins_density, def mixing_ratio_distribution(
weights_a=None, weights_b=None, weights_ratio=None): atoms_a,
atoms_b,
bins_ratio,
bins_density,
weights_a=None,
weights_b=None,
weights_ratio=None,
):
""" """
Compute the distribution of the mixing ratio of two sets of atoms. Compute the distribution of the mixing ratio of two sets of atoms.
""" """
density_a, _ = time_average density_a, _ = time_average
density_b, _ = np.histogramdd(atoms_b, bins=bins_density, weights=weights_b) density_b, _ = np.histogramdd(atoms_b, bins=bins_density, weights=weights_b)
mixing_ratio = density_a/(density_a + density_b) mixing_ratio = density_a / (density_a + density_b)
good_inds = (density_a != 0) & (density_b != 0) good_inds = (density_a != 0) & (density_b != 0)
hist, _ = np.histogram(mixing_ratio[good_inds], bins=bins_ratio, weights=weights_ratio) hist, _ = np.histogram(
mixing_ratio[good_inds], bins=bins_ratio, weights=weights_ratio
)
return hist return hist
def next_neighbor_distribution(atoms, reference=None, number_of_neighbors=4, bins=None, normed=True): def next_neighbor_distribution(
atoms, reference=None, number_of_neighbors=4, bins=None, normed=True
):
""" """
Compute the distribution of next neighbors with the same residue name. Compute the distribution of next neighbors with the same residue name.
""" """
assert bins is not None, 'Bins have to be specified.' assert bins is not None, "Bins have to be specified."
if reference is None: if reference is None:
reference = atoms reference = atoms
nn = next_neighbors(reference, query_atoms=atoms, number_of_neighbors=number_of_neighbors) nn = next_neighbors(
reference, query_atoms=atoms, number_of_neighbors=number_of_neighbors
)
resname_nn = reference.residue_names[nn] resname_nn = reference.residue_names[nn]
count_nn = (resname_nn == atoms.residue_names.reshape(-1, 1)).sum(axis=1) count_nn = (resname_nn == atoms.residue_names.reshape(-1, 1)).sum(axis=1)
return np.histogram(count_nn, bins=bins, normed=normed)[0] return np.histogram(count_nn, bins=bins, normed=normed)[0]
def hbonds(D, H, A, box, DA_lim=0.35, HA_lim=0.35, min_cos=np.cos(30*np.pi/180), full_output=False): def hbonds(
D,
H,
A,
box,
DA_lim=0.35,
HA_lim=0.35,
min_cos=np.cos(30 * np.pi / 180),
full_output=False,
):
""" """
Compute h-bond pairs Compute h-bond pairs
Args: Args:
D: Set of coordinates for donators. D: Set of coordinates for donators.
H: Set of coordinates for hydrogen atoms. Should have the same H: Set of coordinates for hydrogen atoms. Should have the same
length as D. length as D.
A: Set of coordinates for acceptors. A: Set of coordinates for acceptors.
DA_lim (opt.): Minimum distance beteen donator and acceptor. DA_lim (opt.): Minimum distance beteen donator and acceptor.
HA_lim (opt.): Minimum distance beteen hydrogen and acceptor. HA_lim (opt.): Minimum distance beteen hydrogen and acceptor.
min_cos (opt.): Minimum cosine for the HDA angle. Default is min_cos (opt.): Minimum cosine for the HDA angle. Default is
equivalent to a maximum angle of 30 degree. equivalent to a maximum angle of 30 degree.
full_output (opt.): Returns additionally the cosine of the full_output (opt.): Returns additionally the cosine of the
angles and the DA distances angles and the DA distances
Return: Return:
List of (D,A)-pairs in hbonds. List of (D,A)-pairs in hbonds.
""" """
def dist_DltA(D, H, A, box, max_dist=0.35): def dist_DltA(D, H, A, box, max_dist=0.35):
ppoints, pind = pbc_points(D, box, thickness=max_dist+0.1, index=True) ppoints, pind = pbc_points(D, box, thickness=max_dist + 0.1, index=True)
Dtree = spatial.cKDTree(ppoints) Dtree = spatial.cKDTree(ppoints)
Atree = spatial.cKDTree(A) Atree = spatial.cKDTree(A)
pairs = Dtree.sparse_distance_matrix(Atree, max_dist, output_type='ndarray') pairs = Dtree.sparse_distance_matrix(Atree, max_dist, output_type="ndarray")
pairs = np.asarray(pairs.tolist()) pairs = np.asarray(pairs.tolist())
pairs = np.int_(pairs[pairs[:,2] > 0][:,:2]) pairs = np.int_(pairs[pairs[:, 2] > 0][:, :2])
pairs[:,0] = pind[pairs[:,0]] pairs[:, 0] = pind[pairs[:, 0]]
return pairs return pairs
def dist_AltD(D, H, A, box, max_dist=0.35): def dist_AltD(D, H, A, box, max_dist=0.35):
ppoints, pind = pbc_points(A, box, thickness=max_dist+0.1, index=True) ppoints, pind = pbc_points(A, box, thickness=max_dist + 0.1, index=True)
Atree = spatial.cKDTree(ppoints) Atree = spatial.cKDTree(ppoints)
Dtree = spatial.cKDTree(D) Dtree = spatial.cKDTree(D)
pairs = Atree.sparse_distance_matrix(Dtree, max_dist, output_type='ndarray') pairs = Atree.sparse_distance_matrix(Dtree, max_dist, output_type="ndarray")
pairs = np.asarray(pairs.tolist()) pairs = np.asarray(pairs.tolist())
pairs = np.int_(pairs[pairs[:,2] > 0][:,:2]) pairs = np.int_(pairs[pairs[:, 2] > 0][:, :2])
pairs = pairs[:,::-1] pairs = pairs[:, ::-1]
pairs[:,1] = pind[pairs[:,1]] pairs[:, 1] = pind[pairs[:, 1]]
return pairs return pairs
if len(D) <= len(A):
pairs = dist_DltA(D,H,A,box,DA_lim)
else:
pairs = dist_AltD(D,H,A,box,DA_lim)
vDH = pbc_diff(D[pairs[:,0]], H[pairs[:,0]], box)
vDA = pbc_diff(D[pairs[:,0]], A[pairs[:,1]], box)
vHA = pbc_diff(H[pairs[:,0]], A[pairs[:,1]], box)
angles_cos = np.clip(np.einsum('ij,ij->i', vDH, vDA)/
np.linalg.norm(vDH,axis=1)/
np.linalg.norm(vDA,axis=1), -1, 1)
is_bond = ((angles_cos >= min_cos) &
(np.sum(vHA**2,axis=-1) <= HA_lim**2) &
(np.sum(vDA**2,axis=-1) <= DA_lim**2))
if full_output:
return pairs[is_bond], angles_cos[is_bond], np.sum(vDA[is_bond]**2,axis=-1)**.5
else:
return pairs[is_bond]
if len(D) <= len(A):
pairs = dist_DltA(D, H, A, box, DA_lim)
else:
pairs = dist_AltD(D, H, A, box, DA_lim)
vDH = pbc_diff(D[pairs[:, 0]], H[pairs[:, 0]], box)
vDA = pbc_diff(D[pairs[:, 0]], A[pairs[:, 1]], box)
vHA = pbc_diff(H[pairs[:, 0]], A[pairs[:, 1]], box)
angles_cos = np.clip(
np.einsum("ij,ij->i", vDH, vDA)
/ np.linalg.norm(vDH, axis=1)
/ np.linalg.norm(vDA, axis=1),
-1,
1,
)
is_bond = (
(angles_cos >= min_cos)
& (np.sum(vHA**2, axis=-1) <= HA_lim**2)
& (np.sum(vDA**2, axis=-1) <= DA_lim**2)
)
if full_output:
return (
pairs[is_bond],
angles_cos[is_bond],
np.sum(vDA[is_bond] ** 2, axis=-1) ** 0.5,
)
else:
return pairs[is_bond]

View File

@ -2,20 +2,25 @@ import numpy as np
def kww(t, A, τ, β): def kww(t, A, τ, β):
return A * np.exp(-(t / τ)**β) return A * np.exp(-((t / τ) ** β))
def kww_1e(A, τ, β): def kww_1e(A, τ, β):
return τ * (-np.log(1 / (np.e * A)))**(1 / β) return τ * (-np.log(1 / (np.e * A))) ** (1 / β)
def cole_davidson(w, A, b, t0): def cole_davidson(w, A, b, t0):
P = np.arctan(w * t0) P = np.arctan(w * t0)
return A * np.cos(P)**b * np.sin(b * P) return A * np.cos(P) ** b * np.sin(b * P)
def cole_cole(w, A, b, t0): def cole_cole(w, A, b, t0):
return A * (w * t0)**b * np.sin(np.pi * b / 2) / (1 + 2 * (w * t0)**b * np.cos(np.pi * b / 2) + (w * t0)**(2 * b)) return (
A
* (w * t0) ** b
* np.sin(np.pi * b / 2)
/ (1 + 2 * (w * t0) ** b * np.cos(np.pi * b / 2) + (w * t0) ** (2 * b))
)
def havriliak_negami(ω, A, β, α, τ): def havriliak_negami(ω, A, β, α, τ):
@ -25,14 +30,14 @@ def havriliak_negami(ω, A, β, α, τ):
.. math:: .. math::
\chi_{HN}(\omega) = \Im\left(\frac{A}{(1 + (i\omega\tau)^\alpha)^\beta}\right) \chi_{HN}(\omega) = \Im\left(\frac{A}{(1 + (i\omega\tau)^\alpha)^\beta}\right)
""" """
return -(A / (1 + (1j * ω * τ)**α)**β).imag return -(A / (1 + (1j * ω * τ) ** α) ** β).imag
# fits decay of correlation times, e.g. with distance to pore walls # fits decay of correlation times, e.g. with distance to pore walls
def colen(d, X, t8, A): def colen(d, X, t8, A):
return t8 * np.exp(A*np.exp(-d/X)) return t8 * np.exp(A * np.exp(-d / X))
# fits decay of the plateau height of the overlap function, e.g. with distance to pore walls # fits decay of the plateau height of the overlap function, e.g. with distance to pore walls
def colenQ(d, X, Qb, g): def colenQ(d, X, Qb, g):
return (1-Qb)*np.exp(-(d/X)**g)+Qb return (1 - Qb) * np.exp(-((d / X) ** g)) + Qb

View File

@ -1,12 +1,15 @@
import logging import logging
logger = logging.getLogger('mdevaluate') logger = logging.getLogger("mdevaluate")
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler() stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO) stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler) logger.addHandler(stream_handler)
formatter = logging.Formatter('{levelname[0]}{levelname[1]}{levelname[2]}[{asctime}]:{funcName}: {message}', style='{') formatter = logging.Formatter(
"{levelname[0]}{levelname[1]}{levelname[2]}[{asctime}]:{funcName}: {message}",
style="{",
)
stream_handler.setFormatter(formatter) stream_handler.setFormatter(formatter)

View File

@ -8,6 +8,7 @@ from itertools import product
from .logging import logger from .logging import logger
def pbc_diff_old(v1, v2, box): def pbc_diff_old(v1, v2, box):
""" """
Calculate the difference of two vestors, considering optional boundary conditions. Calculate the difference of two vestors, considering optional boundary conditions.
@ -21,16 +22,19 @@ def pbc_diff_old(v1, v2, box):
return v return v
def pbc_diff(v1, v2=None, box=None): def pbc_diff(v1, v2=None, box=None):
if box is None: if box is None:
out = v1 - v2 out = v1 - v2
elif len(getattr(box, 'shape', [])) == 1: elif len(getattr(box, "shape", [])) == 1:
out = pbc_diff_rect(v1, v2, box) out = pbc_diff_rect(v1, v2, box)
elif len(getattr(box, 'shape', [])) == 2: elif len(getattr(box, "shape", [])) == 2:
out = pbc_diff_tric(v1, v2, box) out = pbc_diff_tric(v1, v2, box)
else: raise NotImplementedError("cannot handle box") else:
raise NotImplementedError("cannot handle box")
return out return out
def pbc_diff_rect(v1, v2, box): def pbc_diff_rect(v1, v2, box):
""" """
Calculate the difference of two vectors, considering periodic boundary conditions. Calculate the difference of two vectors, considering periodic boundary conditions.
@ -38,8 +42,8 @@ def pbc_diff_rect(v1, v2, box):
if v2 is None: if v2 is None:
v = v1 v = v1
else: else:
v = v1 -v2 v = v1 - v2
s = v / box s = v / box
v = box * (s - s.round()) v = box * (s - s.round())
return v return v
@ -48,94 +52,117 @@ def pbc_diff_rect(v1, v2, box):
def pbc_diff_tric(v1, v2=None, box=None): def pbc_diff_tric(v1, v2=None, box=None):
""" """
difference vector for arbitrary pbc difference vector for arbitrary pbc
Args: Args:
box_matrix: CoordinateFrame.box box_matrix: CoordinateFrame.box
""" """
if len(box.shape) == 1: box = np.diag(box) if len(box.shape) == 1:
if v1.shape == (3,): v1 = v1.reshape((1,3)) #quick 'n dirty box = np.diag(box)
if v2.shape == (3,): v2 = v2.reshape((1,3)) if v1.shape == (3,):
v1 = v1.reshape((1, 3)) # quick 'n dirty
if v2.shape == (3,):
v2 = v2.reshape((1, 3))
if box is not None: if box is not None:
r3 = np.subtract(v1, v2) r3 = np.subtract(v1, v2)
r2 = np.subtract(r3, (np.rint(np.divide(r3[:,2],box[2][2])))[:,np.newaxis] * box[2][np.newaxis,:]) r2 = np.subtract(
r1 = np.subtract(r2, (np.rint(np.divide(r2[:,1],box[1][1])))[:,np.newaxis] * box[1][np.newaxis,:]) r3,
v = np.subtract(r1, (np.rint(np.divide(r1[:,0],box[0][0])))[:,np.newaxis] * box[0][np.newaxis,:]) (np.rint(np.divide(r3[:, 2], box[2][2])))[:, np.newaxis]
* box[2][np.newaxis, :],
)
r1 = np.subtract(
r2,
(np.rint(np.divide(r2[:, 1], box[1][1])))[:, np.newaxis]
* box[1][np.newaxis, :],
)
v = np.subtract(
r1,
(np.rint(np.divide(r1[:, 0], box[0][0])))[:, np.newaxis]
* box[0][np.newaxis, :],
)
else: else:
v = v1 - v2 v = v1 - v2
return v return v
def pbc_dist(a1,a2,box = None): def pbc_dist(a1, a2, box=None):
return ((pbc_diff(a1,a2,box)**2).sum(axis=1))**0.5 return ((pbc_diff(a1, a2, box) ** 2).sum(axis=1)) ** 0.5
def pbc_extend(c, box): def pbc_extend(c, box):
""" """
in: c is frame, box is frame.box in: c is frame, box is frame.box
out: all atoms in frame and their perio. image (shape => array(len(c)*27,3)) out: all atoms in frame and their perio. image (shape => array(len(c)*27,3))
""" """
c=np.asarray(c) c = np.asarray(c)
if c.shape == (3,): c = c.reshape((1,3)) #quick 'n dirty if c.shape == (3,):
comb = np.array([np.asarray(i) for i in product([0,-1,1],[0,-1,1],[0,-1,1])]) c = c.reshape((1, 3)) # quick 'n dirty
b_matrices = comb[:,:,np.newaxis]*box[np.newaxis,:,:] comb = np.array(
b_vectors = b_matrices.sum(axis=1)[np.newaxis,:,:] [np.asarray(i) for i in product([0, -1, 1], [0, -1, 1], [0, -1, 1])]
return (c[:,np.newaxis,:]+b_vectors) )
b_matrices = comb[:, :, np.newaxis] * box[np.newaxis, :, :]
b_vectors = b_matrices.sum(axis=1)[np.newaxis, :, :]
return c[:, np.newaxis, :] + b_vectors
def pbc_kdtree(v1,box, leafsize = 32, compact_nodes = False, balanced_tree = False):
def pbc_kdtree(v1, box, leafsize=32, compact_nodes=False, balanced_tree=False):
""" """
kd_tree with periodic images kd_tree with periodic images
box - whole matrix box - whole matrix
rest: optional optimization rest: optional optimization
""" """
r0 = cKDTree(pbc_extend(v1,box).reshape((-1,3)),leafsize ,compact_nodes ,balanced_tree) r0 = cKDTree(
pbc_extend(v1, box).reshape((-1, 3)), leafsize, compact_nodes, balanced_tree
)
return r0 return r0
def pbc_kdtree_query(v1,v2,box,n = 1): def pbc_kdtree_query(v1, v2, box, n=1):
""" """
kd_tree query with periodic images kd_tree query with periodic images
""" """
r0, r1 = pbc_kdtree(v1,box).query(v2,n) r0, r1 = pbc_kdtree(v1, box).query(v2, n)
r1 = r1 // 27 r1 = r1 // 27
return r0, r1 return r0, r1
def pbc_backfold_rect(act_frame,box_matrix): def pbc_backfold_rect(act_frame, box_matrix):
""" """
mimics "trjconv ... -pbc atom -ur rect" mimics "trjconv ... -pbc atom -ur rect"
folds coords of act_frame in cuboid folds coords of act_frame in cuboid
""" """
af=np.asarray(act_frame) af = np.asarray(act_frame)
if af.shape == (3,): act_frame = act_frame.reshape((1,3)) #quick 'n dirty if af.shape == (3,):
act_frame = act_frame.reshape((1, 3)) # quick 'n dirty
b = box_matrix b = box_matrix
c = np.diag(b)/2 c = np.diag(b) / 2
af = pbc_diff(np.zeros((1,3)),af-c,b) af = pbc_diff(np.zeros((1, 3)), af - c, b)
return af + c return af + c
def pbc_backfold_compact(act_frame,box_matrix): def pbc_backfold_compact(act_frame, box_matrix):
""" """
mimics "trjconv ... -pbc atom -ur compact" mimics "trjconv ... -pbc atom -ur compact"
folds coords of act_frame in wigner-seitz-cell (e.g. dodecahedron) folds coords of act_frame in wigner-seitz-cell (e.g. dodecahedron)
""" """
c = act_frame c = act_frame
box = box_matrix box = box_matrix
ctr = box.sum(0)/2 ctr = box.sum(0) / 2
c=np.asarray(c) c = np.asarray(c)
shape = c.shape shape = c.shape
if shape == (3,): if shape == (3,):
c = c.reshape((1,3)) c = c.reshape((1, 3))
shape = (1,3) #quick 'n dirty shape = (1, 3) # quick 'n dirty
comb = np.array([np.asarray(i) for i in product([0,-1,1],[0,-1,1],[0,-1,1])]) comb = np.array(
b_matrices = comb[:,:,np.newaxis]*box[np.newaxis,:,:] [np.asarray(i) for i in product([0, -1, 1], [0, -1, 1], [0, -1, 1])]
b_vectors = b_matrices.sum(axis=1)[np.newaxis,:,:] )
sc = c[:,np.newaxis,:]+b_vectors b_matrices = comb[:, :, np.newaxis] * box[np.newaxis, :, :]
w = np.argsort((((sc)-ctr)**2).sum(2),1)[:,0] b_vectors = b_matrices.sum(axis=1)[np.newaxis, :, :]
return sc[range(shape[0]),w] sc = c[:, np.newaxis, :] + b_vectors
w = np.argsort((((sc) - ctr) ** 2).sum(2), 1)[:, 0]
return sc[range(shape[0]), w]
def whole(frame): def whole(frame):
@ -147,11 +174,11 @@ def whole(frame):
# make sure, residue_ids are sorted, then determine indices at which the res id changes # make sure, residue_ids are sorted, then determine indices at which the res id changes
# kind='stable' assures that any existent ordering is preserved # kind='stable' assures that any existent ordering is preserved
logger.debug('Using first atom as reference for whole.') logger.debug("Using first atom as reference for whole.")
sort_ind = residue_ids.argsort(kind='stable') sort_ind = residue_ids.argsort(kind="stable")
i = np.concatenate([[0], np.where(np.diff(residue_ids[sort_ind]) > 0)[0] + 1]) i = np.concatenate([[0], np.where(np.diff(residue_ids[sort_ind]) > 0)[0] + 1])
coms = frame[sort_ind[i]][residue_ids - 1] coms = frame[sort_ind[i]][residue_ids - 1]
cor = np.zeros_like(frame) cor = np.zeros_like(frame)
cd = frame - coms cd = frame - coms
n, d = np.where(cd > box / 2 * 0.9) n, d = np.where(cd > box / 2 * 0.9)
@ -172,7 +199,7 @@ def nojump(frame, usecache=True):
selection = frame.selection selection = frame.selection
reader = frame.coordinates.frames reader = frame.coordinates.frames
if usecache: if usecache:
if not hasattr(reader, '_nojump_cache'): if not hasattr(reader, "_nojump_cache"):
reader._nojump_cache = OrderedDict() reader._nojump_cache = OrderedDict()
# make sure to use absolute (non negative) index # make sure to use absolute (non negative) index
abstep = frame.step % len(frame.coordinates) abstep = frame.step % len(frame.coordinates)
@ -185,27 +212,42 @@ def nojump(frame, usecache=True):
i0 = 0 i0 = 0
delta = 0 delta = 0
delta = delta + np.array(np.vstack( delta = (
[m[i0:abstep + 1].sum(axis=0) for m in reader.nojump_matrixes] delta
).T) * frame.box.diagonal() + np.array(
np.vstack(
[m[i0 : abstep + 1].sum(axis=0) for m in reader.nojump_matrixes]
).T
)
* frame.box.diagonal()
)
reader._nojump_cache[abstep] = delta reader._nojump_cache[abstep] = delta
while len(reader._nojump_cache) > NOJUMP_CACHESIZE: while len(reader._nojump_cache) > NOJUMP_CACHESIZE:
reader._nojump_cache.popitem(last=False) reader._nojump_cache.popitem(last=False)
delta = delta[selection, :] delta = delta[selection, :]
else: else:
delta = np.array(np.vstack( delta = (
[m[:frame.step + 1, selection].sum(axis=0) for m in reader.nojump_matrixes] np.array(
).T) * frame.box.diagonal() np.vstack(
[
m[: frame.step + 1, selection].sum(axis=0)
for m in reader.nojump_matrixes
]
).T
)
* frame.box.diagonal()
)
return frame - delta return frame - delta
def pbc_points(coordinates, box, thickness=0, index=False, shear=False, def pbc_points(
extra_image=False): coordinates, box, thickness=0, index=False, shear=False, extra_image=False
):
""" """
Returns the points their first periodic images. Does not fold Returns the points their first periodic images. Does not fold
them back into the box. them back into the box.
Thickness 0 means all 27 boxes. Positive means the box+thickness. Thickness 0 means all 27 boxes. Positive means the box+thickness.
Negative values mean that less than the box is returned. Negative values mean that less than the box is returned.
index=True also returns the indices with indices of images being their index=True also returns the indices with indices of images being their
originals values. originals values.
@ -213,21 +255,28 @@ def pbc_points(coordinates, box, thickness=0, index=False, shear=False,
if shear: if shear:
box[2, 0] = box[2, 0] % box[0, 0] box[2, 0] = box[2, 0] % box[0, 0]
if shear or extra_image: if shear or extra_image:
grid = np.array([[i, j, k] for k in [-2, -1, 0, 1, 2] grid = np.array(
for j in [2, 1, 0, -1, -2] for i in [-2, -1, 0, 1, 2]]) [
[i, j, k]
for k in [-2, -1, 0, 1, 2]
for j in [2, 1, 0, -1, -2]
for i in [-2, -1, 0, 1, 2]
]
)
indices = np.tile(np.arange(len(coordinates)), 125) indices = np.tile(np.arange(len(coordinates)), 125)
else: else:
grid = np.array([[i, j, k] for k in [-1, 0, 1] for j in [1, 0, -1] grid = np.array(
for i in [-1, 0, 1]]) [[i, j, k] for k in [-1, 0, 1] for j in [1, 0, -1] for i in [-1, 0, 1]]
)
indices = np.tile(np.arange(len(coordinates)), 27) indices = np.tile(np.arange(len(coordinates)), 27)
coordinates_pbc = np.concatenate([coordinates+v@box for v in grid], axis=0) coordinates_pbc = np.concatenate([coordinates + v @ box for v in grid], axis=0)
size = np.diag(box) size = np.diag(box)
if thickness != 0: if thickness != 0:
mask = np.all(coordinates_pbc > -thickness, axis=1) mask = np.all(coordinates_pbc > -thickness, axis=1)
coordinates_pbc = coordinates_pbc[mask] coordinates_pbc = coordinates_pbc[mask]
indices = indices[mask] indices = indices[mask]
mask = np.all(coordinates_pbc < size+thickness, axis=1) mask = np.all(coordinates_pbc < size + thickness, axis=1)
coordinates_pbc = coordinates_pbc[mask] coordinates_pbc = coordinates_pbc[mask]
indices = indices[mask] indices = indices[mask]
if index: if index:

View File

@ -16,7 +16,7 @@ from array import array
from zipfile import BadZipFile from zipfile import BadZipFile
import builtins import builtins
import warnings import warnings
import subprocess import subprocess
import re import re
import itertools import itertools
@ -31,11 +31,14 @@ import re
class NojumpError(Exception): class NojumpError(Exception):
pass pass
class WrongTopologyError(Exception): class WrongTopologyError(Exception):
pass pass
def open_with_mdanalysis(topology, trajectory, cached=False, index_file=None,
charges=None, masses=None): def open_with_mdanalysis(
topology, trajectory, cached=False, index_file=None, charges=None, masses=None
):
"""Open a the topology and trajectory with mdanalysis.""" """Open a the topology and trajectory with mdanalysis."""
uni = mdanalysis.Universe(topology, trajectory, convert_units=False) uni = mdanalysis.Universe(topology, trajectory, convert_units=False)
if cached is not False: if cached is not False:
@ -47,10 +50,10 @@ def open_with_mdanalysis(topology, trajectory, cached=False, index_file=None,
else: else:
reader = BaseReader(uni.trajectory) reader = BaseReader(uni.trajectory)
reader.universe = uni reader.universe = uni
if topology.endswith('.tpr'): if topology.endswith(".tpr"):
charges = uni.atoms.charges charges = uni.atoms.charges
masses = uni.atoms.masses masses = uni.atoms.masses
elif topology.endswith('.gro'): elif topology.endswith(".gro"):
charges = charges charges = charges
masses = masses masses = masses
else: else:
@ -58,15 +61,19 @@ def open_with_mdanalysis(topology, trajectory, cached=False, index_file=None,
indices = None indices = None
if index_file: if index_file:
indices = load_indices(index_file) indices = load_indices(index_file)
atms = atoms.Atoms( atms = atoms.Atoms(
np.stack((uni.atoms.resids, uni.atoms.resnames, uni.atoms.names), axis=1), np.stack((uni.atoms.resids, uni.atoms.resnames, uni.atoms.names), axis=1),
charges=charges, masses=masses, indices=indices charges=charges,
).subset() masses=masses,
indices=indices,
).subset()
return atms, reader return atms, reader
group_re = re.compile('\[ ([-+\w]+) \]')
group_re = re.compile("\[ ([-+\w]+) \]")
def load_indices(index_file): def load_indices(index_file):
indices = {} indices = {}
@ -79,13 +86,14 @@ def load_indices(index_file):
index_array = indices.get(group_name, []) index_array = indices.get(group_name, [])
indices[group_name] = index_array indices[group_name] = index_array
else: else:
elements = line.strip().split('\t') elements = line.strip().split("\t")
elements = [x.split(' ') for x in elements] elements = [x.split(" ") for x in elements]
elements = itertools.chain(*elements) # make a flat iterator elements = itertools.chain(*elements) # make a flat iterator
elements = [x for x in elements if x != ''] elements = [x for x in elements if x != ""]
index_array += [int(x) - 1 for x in elements] index_array += [int(x) - 1 for x in elements]
return indices return indices
def is_writeable(fname): def is_writeable(fname):
"""Test if a directory is actually writeable, by writing a temporary file.""" """Test if a directory is actually writeable, by writing a temporary file."""
fdir = os.path.dirname(fname) fdir = os.path.dirname(fname)
@ -95,7 +103,7 @@ def is_writeable(fname):
if os.access(fdir, os.W_OK): if os.access(fdir, os.W_OK):
try: try:
with builtins.open(ftmp, 'w'): with builtins.open(ftmp, "w"):
pass pass
os.remove(ftmp) os.remove(ftmp)
return True return True
@ -106,68 +114,70 @@ def is_writeable(fname):
def nojump_load_filename(reader): def nojump_load_filename(reader):
directory, fname = path.split(reader.filename) directory, fname = path.split(reader.filename)
full_path = path.join(directory, '.{}.nojump.npz'.format(fname)) full_path = path.join(directory, ".{}.nojump.npz".format(fname))
if not is_writeable(directory): if not is_writeable(directory):
user_data_dir = os.path.join("/data/", user_data_dir = os.path.join("/data/", os.environ["HOME"].split("/")[-1])
os.environ['HOME'].split("/")[-1])
full_path_fallback = os.path.join( full_path_fallback = os.path.join(
os.path.join(user_data_dir, '.mdevaluate/nojump'), os.path.join(user_data_dir, ".mdevaluate/nojump"),
directory.lstrip('/'), directory.lstrip("/"),
'.{}.nojump.npz'.format(fname) ".{}.nojump.npz".format(fname),
) )
if os.path.exists(full_path_fallback): if os.path.exists(full_path_fallback):
return full_path_fallback return full_path_fallback
if os.path.exists(fname) or is_writeable(directory): if os.path.exists(fname) or is_writeable(directory):
return full_path return full_path
else: else:
user_data_dir = os.path.join("/data/", user_data_dir = os.path.join("/data/", os.environ["HOME"].split("/")[-1])
os.environ['HOME'].split("/")[-1])
full_path_fallback = os.path.join( full_path_fallback = os.path.join(
os.path.join(user_data_dir, '.mdevaluate/nojump'), os.path.join(user_data_dir, ".mdevaluate/nojump"),
directory.lstrip('/'), directory.lstrip("/"),
'.{}.nojump.npz'.format(fname) ".{}.nojump.npz".format(fname),
) )
return full_path return full_path
def nojump_save_filename(reader): def nojump_save_filename(reader):
directory, fname = path.split(reader.filename) directory, fname = path.split(reader.filename)
full_path = path.join(directory, '.{}.nojump.npz'.format(fname)) full_path = path.join(directory, ".{}.nojump.npz".format(fname))
if is_writeable(directory): if is_writeable(directory):
return full_path return full_path
else: else:
user_data_dir = os.path.join("/data/", user_data_dir = os.path.join("/data/", os.environ["HOME"].split("/")[-1])
os.environ['HOME'].split("/")[-1])
full_path_fallback = os.path.join( full_path_fallback = os.path.join(
os.path.join(user_data_dir, '.mdevaluate/nojump'), os.path.join(user_data_dir, ".mdevaluate/nojump"),
directory.lstrip('/'), directory.lstrip("/"),
'.{}.nojump.npz'.format(fname) ".{}.nojump.npz".format(fname),
)
logger.info(
"Saving nojump to {}, since original location is not writeable.".format(
full_path_fallback
)
) )
logger.info('Saving nojump to {}, since original location is not writeable.'.format(full_path_fallback))
os.makedirs(os.path.dirname(full_path_fallback), exist_ok=True) os.makedirs(os.path.dirname(full_path_fallback), exist_ok=True)
return full_path_fallback return full_path_fallback
CSR_ATTRS = ('data', 'indices', 'indptr')
CSR_ATTRS = ("data", "indices", "indptr")
NOJUMP_MAGIC = 2016 NOJUMP_MAGIC = 2016
def parse_jumps(trajectory): def parse_jumps(trajectory):
prev = trajectory[0].whole prev = trajectory[0].whole
box = prev.box.diagonal() box = prev.box.diagonal()
SparseData = namedtuple('SparseData', ['data', 'row', 'col']) SparseData = namedtuple("SparseData", ["data", "row", "col"])
jump_data = ( jump_data = (
SparseData(data=array('b'), row=array('l'), col=array('l')), SparseData(data=array("b"), row=array("l"), col=array("l")),
SparseData(data=array('b'), row=array('l'), col=array('l')), SparseData(data=array("b"), row=array("l"), col=array("l")),
SparseData(data=array('b'), row=array('l'), col=array('l')) SparseData(data=array("b"), row=array("l"), col=array("l")),
) )
for i, curr in enumerate(trajectory): for i, curr in enumerate(trajectory):
if i % 500 == 0: if i % 500 == 0:
logger.debug('Parse jumps Step: %d', i) logger.debug("Parse jumps Step: %d", i)
delta = ((curr - prev) / box).round().astype(np.int8) delta = ((curr - prev) / box).round().astype(np.int8)
prev = curr prev = curr
for d in range(3): for d in range(3):
col, = np.where(delta[:, d] != 0) (col,) = np.where(delta[:, d] != 0)
jump_data[d].col.extend(col) jump_data[d].col.extend(col)
jump_data[d].row.extend([i] * len(col)) jump_data[d].row.extend([i] * len(col))
jump_data[d].data.extend(delta[col, d]) jump_data[d].data.extend(delta[col, d])
@ -179,14 +189,15 @@ def generate_nojump_matrixes(trajectory):
""" """
Create the matrixes with pbc jumps for a trajectory. Create the matrixes with pbc jumps for a trajectory.
""" """
logger.info('generate Nojump Matrixes for: {}'.format(trajectory)) logger.info("generate Nojump Matrixes for: {}".format(trajectory))
jump_data = parse_jumps(trajectory) jump_data = parse_jumps(trajectory)
N = len(trajectory) N = len(trajectory)
M = len(trajectory[0]) M = len(trajectory[0])
trajectory.frames.nojump_matrixes = tuple( trajectory.frames.nojump_matrixes = tuple(
sparse.csr_matrix((np.array(m.data), (m.row, m.col)), shape=(N, M)) for m in jump_data sparse.csr_matrix((np.array(m.data), (m.row, m.col)), shape=(N, M))
for m in jump_data
) )
save_nojump_matrixes(trajectory.frames) save_nojump_matrixes(trajectory.frames)
@ -194,11 +205,11 @@ def generate_nojump_matrixes(trajectory):
def save_nojump_matrixes(reader, matrixes=None): def save_nojump_matrixes(reader, matrixes=None):
if matrixes is None: if matrixes is None:
matrixes = reader.nojump_matrixes matrixes = reader.nojump_matrixes
data = {'checksum': checksum(NOJUMP_MAGIC, checksum(reader))} data = {"checksum": checksum(NOJUMP_MAGIC, checksum(reader))}
for d, mat in enumerate(matrixes): for d, mat in enumerate(matrixes):
data['shape'] = mat.shape data["shape"] = mat.shape
for attr in CSR_ATTRS: for attr in CSR_ATTRS:
data['{}_{}'.format(attr, d)] = getattr(mat, attr) data["{}_{}".format(attr, d)] = getattr(mat, attr)
np.savez(nojump_save_filename(reader), **data) np.savez(nojump_save_filename(reader), **data)
@ -209,23 +220,25 @@ def load_nojump_matrixes(reader):
data = np.load(zipname, allow_pickle=True) data = np.load(zipname, allow_pickle=True)
except (AttributeError, BadZipFile, OSError): except (AttributeError, BadZipFile, OSError):
# npz-files can be corrupted, propably a bug for big arrays saved with savez_compressed? # npz-files can be corrupted, propably a bug for big arrays saved with savez_compressed?
logger.info('Removing zip-File: %s', zipname) logger.info("Removing zip-File: %s", zipname)
os.remove(nojump_load_filename(reader)) os.remove(nojump_load_filename(reader))
return return
try: try:
if data['checksum'] == checksum(NOJUMP_MAGIC, checksum(reader)): if data["checksum"] == checksum(NOJUMP_MAGIC, checksum(reader)):
reader.nojump_matrixes = tuple( reader.nojump_matrixes = tuple(
sparse.csr_matrix( sparse.csr_matrix(
tuple(data['{}_{}'.format(attr, d)] for attr in CSR_ATTRS), tuple(data["{}_{}".format(attr, d)] for attr in CSR_ATTRS),
shape=data['shape'] shape=data["shape"],
) )
for d in range(3) for d in range(3)
) )
logger.info('Loaded Nojump Matrixes: {}'.format(nojump_load_filename(reader))) logger.info(
"Loaded Nojump Matrixes: {}".format(nojump_load_filename(reader))
)
else: else:
logger.info('Invlaid Nojump Data: {}'.format(nojump_load_filename(reader))) logger.info("Invlaid Nojump Data: {}".format(nojump_load_filename(reader)))
except KeyError: except KeyError:
logger.info('Removing zip-File: %s', zipname) logger.info("Removing zip-File: %s", zipname)
os.remove(nojump_load_filename(reader)) os.remove(nojump_load_filename(reader))
return return
@ -238,22 +251,32 @@ def correct_nojump_matrixes_for_whole(trajectory):
for d in range(3): for d in range(3):
reader.nojump_matrixes[d][0] = cor[:, d] reader.nojump_matrixes[d][0] = cor[:, d]
save_nojump_matrixes(reader) save_nojump_matrixes(reader)
def energy_reader(file, energies=None): def energy_reader(file, energies=None):
"""Reads an gromacs energy file and output the data in a pandas DataFrame. """Reads an gromacs energy file and output the data in a pandas DataFrame.
Args: Args:
file: Filename of the energy file file: Filename of the energy file
energies (opt.): Specify energies to extract from the energy file energies (opt.): Specify energies to extract from the energy file
""" """
if energies is None: if energies is None:
energies = np.arange(1, 100).astype('str') energies = np.arange(1, 100).astype("str")
directory = file.rsplit("/", 1)[0] directory = file.rsplit("/", 1)[0]
ps = subprocess.Popen(("echo", *energies), stdout=subprocess.PIPE) ps = subprocess.Popen(("echo", *energies), stdout=subprocess.PIPE)
try: try:
subprocess.run(("gmx", "energy", "-f", file, "-o", subprocess.run(
f"{directory}/tmp.xvg", "-quiet", "-nobackup"), (
stdin=ps.stdout) "gmx",
"energy",
"-f",
file,
"-o",
f"{directory}/tmp.xvg",
"-quiet",
"-nobackup",
),
stdin=ps.stdout,
)
except FileNotFoundError: except FileNotFoundError:
print("No GROMACS found!") print("No GROMACS found!")
ps.wait() ps.wait()
@ -271,9 +294,9 @@ def energy_reader(file, energies=None):
data = np.loadtxt(f"{directory}/tmp.xvg", skiprows=header) data = np.loadtxt(f"{directory}/tmp.xvg", skiprows=header)
df = pd.DataFrame({"Time":data[:,0]}) df = pd.DataFrame({"Time": data[:, 0]})
for i, label in enumerate(labels): for i, label in enumerate(labels):
tmp_df = pd.DataFrame({label:data[:,i+1]}) tmp_df = pd.DataFrame({label: data[:, i + 1]})
df = pd.concat([df, tmp_df], axis=1) df = pd.concat([df, tmp_df], axis=1)
subprocess.run(("rm", f"{directory}/tmp.xvg")) subprocess.run(("rm", f"{directory}/tmp.xvg"))
return df return df
@ -289,7 +312,7 @@ class BaseReader:
@property @property
def nojump_matrixes(self): def nojump_matrixes(self):
if self._nojump_matrixes is None: if self._nojump_matrixes is None:
raise NojumpError('Nojump Data not available: {}'.format(self.filename)) raise NojumpError("Nojump Data not available: {}".format(self.filename))
return self._nojump_matrixes return self._nojump_matrixes
@nojump_matrixes.setter @nojump_matrixes.setter
@ -314,12 +337,12 @@ class BaseReader:
return len(self.rd) return len(self.rd)
def __checksum__(self): def __checksum__(self):
if hasattr(self.rd, 'cache'): if hasattr(self.rd, "cache"):
# Has an pygmx reader # Has an pygmx reader
return checksum(self.filename, str(self.rd.cache)) return checksum(self.filename, str(self.rd.cache))
elif hasattr(self.rd, '_xdr'): elif hasattr(self.rd, "_xdr"):
# Has an mdanalysis reader # Has an mdanalysis reader
cache = array('L', self.rd._xdr.offsets.tobytes()) cache = array("L", self.rd._xdr.offsets.tobytes())
return checksum(self.filename, str(cache)) return checksum(self.filename, str(cache))
@ -353,27 +376,25 @@ class CachedReader(BaseReader):
class DelayedReader(BaseReader): class DelayedReader(BaseReader):
@property @property
def filename(self): def filename(self):
if self.rd is not None: if self.rd is not None:
return self.rd.filename return self.rd.filename
else: else:
return self._filename return self._filename
def __init__(self, filename, reindex=False, ignore_index_timestamps=False): def __init__(self, filename, reindex=False, ignore_index_timestamps=False):
super().__init__(filename, reindex=False, ignore_index_timestamps=False) super().__init__(filename, reindex=False, ignore_index_timestamps=False)
self.natoms = len(self.rd[0].coordinates) self.natoms = len(self.rd[0].coordinates)
self.cache = self.rd.cache self.cache = self.rd.cache
self._filename = self.rd.filename self._filename = self.rd.filename
self.rd = None self.rd = None
def __len__(self): def __len__(self):
return len(self.cache) return len(self.cache)
def _get_item(self, frame): def _get_item(self, frame):
return read_xtcframe_delayed(self.filename, self.cache[frame], self.natoms) return read_xtcframe_delayed(self.filename, self.cache[frame], self.natoms)
def __getitem__(self, frame): def __getitem__(self, frame):
return self._get_item(frame) return self._get_item(frame)

View File

@ -33,14 +33,18 @@ def five_point_stencil(xdata, ydata):
See: https://en.wikipedia.org/wiki/Five-point_stencil See: https://en.wikipedia.org/wiki/Five-point_stencil
""" """
return xdata[2:-2], ( return xdata[2:-2], (
(-ydata[4:] + 8 * ydata[3:-1] - 8 * ydata[1:-3] + ydata[:-4]) / (-ydata[4:] + 8 * ydata[3:-1] - 8 * ydata[1:-3] + ydata[:-4])
(3 * (xdata[4:] - xdata[:-4])) / (3 * (xdata[4:] - xdata[:-4]))
) )
def filon_fourier_transformation(time, correlation, def filon_fourier_transformation(
frequencies=None, derivative='linear', imag=True, time,
): correlation,
frequencies=None,
derivative="linear",
imag=True,
):
""" """
Fourier-transformation for slow varrying functions. The filon algorithmus is Fourier-transformation for slow varrying functions. The filon algorithmus is
described in detail in ref [Blochowicz]_, ch. 3.2.3. described in detail in ref [Blochowicz]_, ch. 3.2.3.
@ -68,17 +72,15 @@ def filon_fourier_transformation(time, correlation,
""" """
if frequencies is None: if frequencies is None:
f_min = 1 / max(time) f_min = 1 / max(time)
f_max = 0.05**(1.2 - max(correlation)) / min(time[time > 0]) f_max = 0.05 ** (1.2 - max(correlation)) / min(time[time > 0])
frequencies = 2 * np.pi * np.logspace( frequencies = 2 * np.pi * np.logspace(np.log10(f_min), np.log10(f_max), num=60)
np.log10(f_min), np.log10(f_max), num=60
)
frequencies.reshape(1, -1) frequencies.reshape(1, -1)
if derivative == 'linear': if derivative == "linear":
derivative = (np.diff(correlation) / np.diff(time)).reshape(-1, 1) derivative = (np.diff(correlation) / np.diff(time)).reshape(-1, 1)
elif derivative == 'stencil': elif derivative == "stencil":
_, derivative = five_point_stencil(time, correlation) _, derivative = five_point_stencil(time, correlation)
time = ((time[2:-1] * time[1:-2])**.5).reshape(-1, 1) time = ((time[2:-1] * time[1:-2]) ** 0.5).reshape(-1, 1)
derivative = derivative.reshape(-1, 1) derivative = derivative.reshape(-1, 1)
elif np.iterable(derivative) and len(time) is len(derivative): elif np.iterable(derivative) and len(time) is len(derivative):
derivative.reshape(-1, 1) derivative.reshape(-1, 1)
@ -88,14 +90,29 @@ def filon_fourier_transformation(time, correlation,
) )
time = time.reshape(-1, 1) time = time.reshape(-1, 1)
integral = (np.cos(frequencies * time[1:]) - np.cos(frequencies * time[:-1])) / frequencies**2 integral = (
np.cos(frequencies * time[1:]) - np.cos(frequencies * time[:-1])
) / frequencies**2
fourier = (derivative * integral).sum(axis=0) fourier = (derivative * integral).sum(axis=0)
if imag: if imag:
integral = 1j * (np.sin(frequencies * time[1:]) - np.sin(frequencies * time[:-1])) / frequencies**2 integral = (
fourier = fourier + (derivative * integral).sum(axis=0) + 1j * correlation[0] / frequencies 1j
* (np.sin(frequencies * time[1:]) - np.sin(frequencies * time[:-1]))
/ frequencies**2
)
fourier = (
fourier
+ (derivative * integral).sum(axis=0)
+ 1j * correlation[0] / frequencies
)
return frequencies.reshape(-1,), fourier return (
frequencies.reshape(
-1,
),
fourier,
)
def mask2indices(mask): def mask2indices(mask):
@ -127,22 +144,24 @@ def superpose(x1, y1, x2, y2, N=100, damping=1.0):
x_ol = np.logspace( x_ol = np.logspace(
np.log10(max(x1[~reg1][0], x2[~reg2][0]) + 0.001), np.log10(max(x1[~reg1][0], x2[~reg2][0]) + 0.001),
np.log10(min(x1[~reg1][-1], x2[~reg2][-1]) - 0.001), np.log10(min(x1[~reg1][-1], x2[~reg2][-1]) - 0.001),
(sum(~reg1) + sum(~reg2)) / 2 (sum(~reg1) + sum(~reg2)) / 2,
) )
def w(x): def w(x):
A = x_ol.min() A = x_ol.min()
B = x_ol.max() B = x_ol.max()
return (np.log10(B / x) / np.log10(B / A))**damping return (np.log10(B / x) / np.log10(B / A)) ** damping
xdata = np.concatenate((x1[reg1], x_ol, x2[reg2])) xdata = np.concatenate((x1[reg1], x_ol, x2[reg2]))
y1_interp = interp1d(x1[~reg1], y1[~reg1]) y1_interp = interp1d(x1[~reg1], y1[~reg1])
y2_interp = interp1d(x2[~reg2], y2[~reg2]) y2_interp = interp1d(x2[~reg2], y2[~reg2])
ydata = np.concatenate(( ydata = np.concatenate(
y1[x1 < x2.min()], (
w(x_ol) * y1_interp(x_ol) + (1 - w(x_ol)) * y2_interp(x_ol), y1[x1 < x2.min()],
y2[x2 > x1.max()] w(x_ol) * y1_interp(x_ol) + (1 - w(x_ol)) * y2_interp(x_ol),
)) y2[x2 > x1.max()],
)
)
return xdata, ydata return xdata, ydata
@ -157,9 +176,10 @@ def runningmean(data, nav):
Returns: Returns:
Array of shape (N-(nav-1), ) Array of shape (N-(nav-1), )
""" """
return np.convolve(data, np.ones((nav,)) / nav, mode='valid') return np.convolve(data, np.ones((nav,)) / nav, mode="valid")
def moving_average(A,n=3):
def moving_average(A, n=3):
""" """
Compute the running mean of an array. Compute the running mean of an array.
Uses the second axis if it is of higher dimensionality. Uses the second axis if it is of higher dimensionality.
@ -174,15 +194,15 @@ def moving_average(A,n=3):
Supports 2D-Arrays. Supports 2D-Arrays.
Slower than runningmean for small n but faster for large n. Slower than runningmean for small n but faster for large n.
""" """
k1 = int(n/2) k1 = int(n / 2)
k2 = int((n-1)/2) k2 = int((n - 1) / 2)
if k2 == 0: if k2 == 0:
if A.ndim > 1: if A.ndim > 1:
return uniform_filter1d(A,n)[:,k1:] return uniform_filter1d(A, n)[:, k1:]
return uniform_filter1d(A,n)[k1:] return uniform_filter1d(A, n)[k1:]
if A.ndim > 1: if A.ndim > 1:
return uniform_filter1d(A,n)[:,k1:-k2] return uniform_filter1d(A, n)[:, k1:-k2]
return uniform_filter1d(A,n)[k1:-k2] return uniform_filter1d(A, n)[k1:-k2]
def coherent_sum(func, coord_a, coord_b): def coherent_sum(func, coord_a, coord_b):
@ -235,7 +255,9 @@ def coherent_histogram(func, coord_a, coord_b, bins, distinct=False):
if isinstance(func, FunctionType): if isinstance(func, FunctionType):
func = numba.jit(func, nopython=True, cache=True) func = numba.jit(func, nopython=True, cache=True)
assert np.isclose(np.diff(bins).mean(), np.diff(bins)).all(), 'A regular distribution of bins is required.' assert np.isclose(
np.diff(bins).mean(), np.diff(bins)
).all(), "A regular distribution of bins is required."
hmin = bins[0] hmin = bins[0]
hmax = bins[-1] hmax = bins[-1]
N = len(bins) - 1 N = len(bins) - 1
@ -297,11 +319,11 @@ def Fqt_from_Grt(data, q):
if isinstance(data, pd.DataFrame): if isinstance(data, pd.DataFrame):
df = data.copy() df = data.copy()
else: else:
df = pd.DataFrame(data, columns=['r', 'time', 'G']) df = pd.DataFrame(data, columns=["r", "time", "G"])
df['isf'] = df['G'] * np.sinc(q / np.pi * df['r']) df["isf"] = df["G"] * np.sinc(q / np.pi * df["r"])
isf = df.groupby('time')['isf'].sum() isf = df.groupby("time")["isf"].sum()
if isinstance(data, pd.DataFrame): if isinstance(data, pd.DataFrame):
return pd.DataFrame({'time': isf.index, 'isf': isf.values, 'q': q}) return pd.DataFrame({"time": isf.index, "isf": isf.values, "q": q})
else: else:
return isf.index, isf.values return isf.index, isf.values
@ -312,6 +334,7 @@ def singledispatchmethod(func):
def wrapper(*args, **kw): def wrapper(*args, **kw):
return dispatcher.dispatch(args[1].__class__)(*args, **kw) return dispatcher.dispatch(args[1].__class__)(*args, **kw)
wrapper.register = dispatcher.register wrapper.register = dispatcher.register
functools.update_wrapper(wrapper, func) functools.update_wrapper(wrapper, func)
return wrapper return wrapper
@ -323,7 +346,7 @@ def histogram(data, bins):
dx = dbins.mean() dx = dbins.mean()
if bins.min() == 0 and dbins.std() < 1e-6: if bins.min() == 0 and dbins.std() < 1e-6:
logger.debug("Using numpy.bincount for histogramm compuation.") logger.debug("Using numpy.bincount for histogramm compuation.")
hist = np.bincount((data // dx).astype(int), minlength=len(dbins))[:len(dbins)] hist = np.bincount((data // dx).astype(int), minlength=len(dbins))[: len(dbins)]
else: else:
hist = np.histogram(data, bins=bins)[0] hist = np.histogram(data, bins=bins)[0]
@ -341,20 +364,22 @@ def quick1etau(t, C, n=7):
n is the minimum number of points around 1/e required n is the minimum number of points around 1/e required
""" """
# first rough estimate, the closest time. This is returned if the interpolation fails! # first rough estimate, the closest time. This is returned if the interpolation fails!
tau_est = t[np.argmin(np.fabs(C-np.exp(-1)))] tau_est = t[np.argmin(np.fabs(C - np.exp(-1)))]
# reduce the data to points around 1/e # reduce the data to points around 1/e
k = 0.1 k = 0.1
mask = (C < np.exp(-1)+k) & (C > np.exp(-1)-k) mask = (C < np.exp(-1) + k) & (C > np.exp(-1) - k)
while np.sum(mask) < n: while np.sum(mask) < n:
k += 0.01 k += 0.01
mask = (C < np.exp(-1)+k) & (C > np.exp(-1)-k) mask = (C < np.exp(-1) + k) & (C > np.exp(-1) - k)
if k + np.exp(-1) > 1.0: if k + np.exp(-1) > 1.0:
break break
# if enough points are found, try a curve fit, else and in case of failing keep using the estimate # if enough points are found, try a curve fit, else and in case of failing keep using the estimate
if np.sum(mask) >= n: if np.sum(mask) >= n:
try: try:
with np.errstate(invalid='ignore'): with np.errstate(invalid="ignore"):
fit, _ = curve_fit(kww, t[mask], C[mask], p0=[0.9, tau_est, 0.9], maxfev=100000) fit, _ = curve_fit(
kww, t[mask], C[mask], p0=[0.9, tau_est, 0.9], maxfev=100000
)
tau_est = kww_1e(*fit) tau_est = kww_1e(*fit)
except: except:
pass pass

View File

@ -2,26 +2,23 @@ from setuptools import setup
def get_version(module): def get_version(module):
version = '' version = ""
with open(module) as f: with open(module) as f:
for line in f: for line in f:
if '__version__' in line: if "__version__" in line:
version = line.split('=')[-1].strip("' \n\t") version = line.split("=")[-1].strip("' \n\t")
break break
return version return version
setup( setup(
name='mdevaluate', name="mdevaluate",
description='Collection of python utilities for md simulations', description="Collection of python utilities for md simulations",
author_email='niels.mueller@physik.tu-darmstadt.de', author_email="niels.mueller@physik.tu-darmstadt.de",
packages=[
packages=['mdevaluate',], "mdevaluate",
entry_points={ ],
'console_scripts': [ entry_points={"console_scripts": ["index-xtc = mdevaluate.cli:run"]},
'index-xtc = mdevaluate.cli:run' version="23.6",
] requires=["numpy", "scipy"],
},
version='23.4',
requires=['numpy', 'scipy'],
) )