Compare commits
69 Commits
cc08f5ae50
...
mdeval_dev
Author | SHA1 | Date | |
---|---|---|---|
adae920527 | |||
11793d7960 | |||
55b06fa61d | |||
3aa91d7482 | |||
7d8c5d849d | |||
c09549902a | |||
b7bb8cb379 | |||
33c4756e34 | |||
7b9f8b6773 | |||
c89cead81c | |||
31eb145a13 | |||
af3758cbef | |||
93d020a4de | |||
b5395098ce | |||
5e80701562 | |||
363e420cd8 | |||
6b77ef78e1 | |||
0c940115af | |||
b0f29907df | |||
37bf496b21 | |||
befaef2dfa | |||
8ea7da5d2f | |||
b405842452 | |||
f5cf453d61 | |||
4394f70530 | |||
298da3818d | |||
d9278eed83 | |||
787882810c | |||
f527d25864 | |||
77771738ab | |||
3e8fd04726 | |||
02fed343f0 | |||
5c17e04b38 | |||
16233e2f2c | |||
87ffa1e67e | |||
5fdc9c8698 | |||
95b46c43be | |||
9200d31af6 | |||
5d16c7de5e | |||
cf865930a5 | |||
13532b3db1 | |||
25cb3d38b3 | |||
9e9c865cf9 | |||
ba893881a3 | |||
a559f72221 | |||
0870a94b44 | |||
7dd2074ee5 | |||
949ed877fb | |||
4f17cfe876 | |||
b39dd3c45f | |||
76db85d628 | |||
b1cb9d99cf | |||
0968ff214c | |||
9d8965ace9 | |||
ec3f6b1c67 | |||
c9c4ffcc59 | |||
62d05423c5 | |||
f48dff2ede | |||
218574d50c | |||
dffd06d3c0 | |||
4ace860436 | |||
608cdb12eb | |||
f2806ca3ca | |||
4a6c02627a | |||
b4f9b98876 | |||
0dd6daa941 | |||
94d67496ba | |||
62705da6f3 | |||
b4486ff265 |
195
doc/Makefile
195
doc/Makefile
@ -1,195 +0,0 @@
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " applehelp to make an Apple Help Book"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
@echo " coverage to run coverage check of the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/mdevaluate.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/mdevaluate.qhc"
|
||||
|
||||
applehelp:
|
||||
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
|
||||
@echo
|
||||
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
|
||||
@echo "N.B. You won't be able to view it unless you put it in" \
|
||||
"~/Library/Documentation/Help or install it in your application" \
|
||||
"bundle."
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/mdevaluate"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/mdevaluate"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
coverage:
|
||||
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
|
||||
@echo "Testing of coverage in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/coverage/python.txt."
|
||||
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
||||
|
||||
deploy: html
|
||||
rsync -r _build/html/ /autohome/niels/public_html/mdevaluate/
|
317
doc/conf.py
317
doc/conf.py
@ -1,317 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# mdevaluate documentation build configuration file, created by
|
||||
# sphinx-quickstart on Tue Nov 10 11:46:41 2015.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import shlex
|
||||
|
||||
sys.path.insert(0, os.path.abspath('..'))
|
||||
import mdevaluate
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.doctest',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.intersphinx',
|
||||
# 'sphinx.ext.autosummary',
|
||||
# 'sphinx.ext.inheritance_diagram',
|
||||
'sphinx_gallery.gen_gallery',
|
||||
'sphinxcontrib.github_ribbon'
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'mdevaluate'
|
||||
copyright = '2017, Niels Müller'
|
||||
author = 'Niels Müller'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = mdevaluate.__version__
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = mdevaluate.__version__
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
highlight_language = "python3"
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
|
||||
#html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# Now only 'ja' uses this config value
|
||||
#html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'mdevaluatedoc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'mdevaluate.tex', 'mdevaluate Documentation',
|
||||
'mbartelm', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'mdevaluate', 'mdevaluate Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'mdevaluate', 'mdevaluate Documentation',
|
||||
author, 'mdevaluate', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
|
||||
intersphinx_mapping = {
|
||||
'python': ('http://docs.python.org/3/', None),
|
||||
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
|
||||
'ipython': ('http://ipython.org/ipython-doc/dev/', None),
|
||||
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
|
||||
}
|
||||
|
||||
sphinx_gallery_conf = {
|
||||
# path to your examples scripts
|
||||
'examples_dirs' : '../examples',
|
||||
# path where to save gallery generated examples
|
||||
'gallery_dirs' : 'gallery'}
|
||||
|
||||
|
||||
github_ribbon_repo = 'mdevaluate/mdevaluate'
|
||||
github_ribbon_color = 'green'
|
@ -1,244 +0,0 @@
|
||||
|
||||
Contributing
|
||||
============
|
||||
|
||||
This document aims to lay out the basics of contributing code to the ``mdevaluate`` package.
|
||||
The code is managed through a git repository, hence this guides gives basic information on the usage of `git <https://git-scm.com>`_.
|
||||
Int this document the prefix ``$`` indicates commands which should be ran on a shell.
|
||||
For a brief 15 min interactive tutorial visit `try.github.org <https://try.gitbhub.org>`_.
|
||||
|
||||
|
||||
Let's start with a short introduction to the terminology.
|
||||
Python code is organized in *packages* and *modules*:
|
||||
|
||||
Modules:
|
||||
Any python file (e.g. ``test.py``) is called a module. A module can be imported (``import test``) an then used
|
||||
in other python code if in the python path, for example the working directory.
|
||||
In principle, importing a package means executing the code inside the file.
|
||||
All definitions, like variables or functions, are then available under the modules name.
|
||||
|
||||
Packages:
|
||||
Python modules can be grouped into packages. A python package is basically a folder,
|
||||
which contains at least one mandatory file ``__init__.py``. This file is the entry
|
||||
point into the module that is imported if the package is imported.
|
||||
All modules in the folder are treated as submodules, which can be accessed via
|
||||
a dot syntax, e.g. ``import package.test``. Packages can also contain sub packages.
|
||||
|
||||
A more `detailed explanation <https://docs.python.org/3/tutorial/modules.html>`_ can be found in the official python documentation.
|
||||
|
||||
Extending the documentation
|
||||
+++++++++++++++++++++++++++
|
||||
|
||||
One of the most important parts of software is its documentation.
|
||||
For modular packages like ``mdevaluate`` it's crucial to have a good coverage of the API,
|
||||
since users need to know which functions are provided and how they are used.
|
||||
To help others by extending the documentation is thereby a nice way of contributing to mdevaluate.
|
||||
|
||||
The documentation is generated with a third party tools named `Sphinx <http://www.sphinx-doc.org/en/stable/>`_.
|
||||
The contents of the documentation are based on the source code (for the reference guide)
|
||||
and documents written in the markup language *reStructuredText* (rst).
|
||||
The source of every page can be viewed in the browser through the *View page source* link in the upper right of the page.
|
||||
The name of the rst files can also be derived from the page URL.
|
||||
The rst files are placed in the ``doc`` directory of the repository.
|
||||
|
||||
Extending the documentation can be done in different ways, e.g.
|
||||
|
||||
- Correct, clarify or extend existing sections
|
||||
- Add new sections about the general use of mdevaluate
|
||||
- Add use cases to the special topics section.
|
||||
|
||||
To add a new sections to special topics, first create a new file for this guide in ``doc/special``.
|
||||
Then add the name of this file (without the .rst extension) to the toctree in the file ``special-topics.rst``.
|
||||
Now write the guide in the newly created file.
|
||||
|
||||
Building the docs
|
||||
-----------------
|
||||
|
||||
When you have made changes to the docs, first re-build them locally.
|
||||
You will need to have the ``sphinx`` python package installed and of course a working environment for ``mdevaluate``.
|
||||
When those requirements are fulfilled build the docs by:
|
||||
|
||||
1. Navigate to the ``doc`` directory
|
||||
2. Run ``make html`` in the shell
|
||||
3. View the produced html files in the browser: ``firefox _build/html/index.html``
|
||||
|
||||
Organization of the code
|
||||
++++++++++++++++++++++++
|
||||
|
||||
|
||||
|
||||
The code for the evaluation software is organized in two python packages:
|
||||
|
||||
- ``pygmx``: This package provides a python wrapper for the Gromacs library and
|
||||
thereby functionality to read file formats used within Gromacs.
|
||||
- ``mdevaluate``: This package provides functionality for evaluation of molecular
|
||||
dynamics simulations. It uses the ``pygmx`` package to read files, but is
|
||||
(in theory) not limited to Gromacs data.
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
Below the content of the submodules of the package is described.
|
||||
|
||||
atoms.py
|
||||
........
|
||||
|
||||
Definition of the ``Atom`` class and related functions for atom selection and information.
|
||||
|
||||
autosave.py
|
||||
...........
|
||||
|
||||
Experimental functionality for automatic saving and loading of evaluated data,
|
||||
like correlation functions. For each function call a checksum is calculated
|
||||
from the input, which changes if the output of the function changes.
|
||||
|
||||
coordinates.py
|
||||
..............
|
||||
|
||||
Definition of the ``Coordinates`` class and ``CoordinatesMap`` for coordinates
|
||||
transformations and related functions.
|
||||
|
||||
correlation.py
|
||||
..............
|
||||
|
||||
Functionality to calculate correlation functions.
|
||||
|
||||
distribution.py
|
||||
...............
|
||||
|
||||
Functionality to calculate distribution functions.
|
||||
|
||||
reader.py
|
||||
.........
|
||||
|
||||
Defines reader classes that handle trajectory reading and caching.
|
||||
|
||||
utils.py
|
||||
........
|
||||
|
||||
A collection of utility functions.
|
||||
|
||||
Set up a development environment
|
||||
++++++++++++++++++++++++++++++++
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git clone https://github.com/mdevaluate/mdevaluate.git
|
||||
|
||||
Organization of the repository
|
||||
------------------------------
|
||||
|
||||
The repository is organized through git branches.
|
||||
At the moment there exist two branches in the remote repository: *master* and *dev*.
|
||||
|
||||
|
||||
Adding code to the repository
|
||||
+++++++++++++++++++++++++++++
|
||||
|
||||
All changes to the code are done in your local clone of the repository.
|
||||
If a feature is complete, or at least works, the code can be pushed to the remote,
|
||||
to make it accessible for others.
|
||||
|
||||
A standard work flow to submit new code is the following
|
||||
|
||||
1. Fork the main repository o github and clone your fork to your local machine.
|
||||
2. Create a new branch locally and apply the desired changes.
|
||||
3. If the master branch was updated, merge it into the local branch.
|
||||
4. Push the changes to github and create a pull request for your fork.
|
||||
|
||||
Pulling updates from remote
|
||||
---------------------------
|
||||
|
||||
Before working with the code, the latest updates should be pulled for the master branch
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout master
|
||||
$ git pull
|
||||
|
||||
Create a new branch
|
||||
-------------------
|
||||
|
||||
Before changing any code, create a new branch in your local repository.
|
||||
This helps to keep an overview of all the changes and simplifies merging.
|
||||
To create a new branch locally enter the following commands
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout master
|
||||
$ git branch my-feature
|
||||
$ git checkout my-feature
|
||||
|
||||
First switch to the master branch to make sure the new branch is based on it.
|
||||
Then create the new branch, called `my-feature` and switch to it.
|
||||
Now you can start making changes in the code.
|
||||
|
||||
Committing changes
|
||||
------------------
|
||||
|
||||
A bundle of changes in the code is called a *commit*.
|
||||
These changes can happen in different files and should be associated with each other.
|
||||
Let's assume, two files have been changed (``atoms.py`` and ``utils.py``).
|
||||
The command
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git diff atoms.py
|
||||
|
||||
will show you all changes that were made in the file since the latest commit.
|
||||
Before committing changes have to be *staged*, which is done by
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git add atoms.py utils.py
|
||||
|
||||
This my be repeated as often as necessary.
|
||||
When all changes for a commit are staged, it can actually be created
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git commit
|
||||
|
||||
This will open up an editor where a commit message has to be entered.
|
||||
After writing the commit message, save & close the file, which will create the commit.
|
||||
|
||||
Create Pullrequest
|
||||
------------------
|
||||
|
||||
When all changes are made and the new feature should be made public, you can open a new pull request on github.
|
||||
Most of the time, the master branch will have been updated, so first pull any updates
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout master
|
||||
$ git pull
|
||||
|
||||
When the master branch is up to date, it can be merged into the feature branch
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git checkout my-feature
|
||||
$ git merge master
|
||||
|
||||
If no conflicting changes were made, merging works automatically.
|
||||
If for example the same line was modified in a commit in master and your commits, a merge conflict will occur.
|
||||
Git tells you which files have conflicts and asks you to resolve these.
|
||||
The respective lines will be marked with conflict-resolution markers in the files.
|
||||
The most basic way of resolving a conflict is by editing these files and choosing the appropriate version of the code.
|
||||
See the `git documentation <https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging#Basic-Merge-Conflicts>`_ for an explanation.
|
||||
After resolving the conflict, the files need to be staged and the merge has to be committed
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git add utils.py
|
||||
$ git commit
|
||||
|
||||
The commit message will be generated automatically, indicating the merge.
|
||||
|
||||
After merging, the changes can be pushed to the remote
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git push
|
||||
|
||||
The new code is now available in the remote.
|
@ -1,50 +0,0 @@
|
||||
Evaluation of dynamic properties
|
||||
================================
|
||||
|
||||
Dynamic properties like mean square displacement are calculated with the
|
||||
function :func:`mdevaluate.correlation.shifted_correlation`.
|
||||
This function takes a correlation function and calculates the averaged
|
||||
time series of it, by shifting a time interval over the trajectory.
|
||||
|
||||
::
|
||||
|
||||
from mdevaluate import correlation
|
||||
|
||||
time, msd_amim = correlation.shifted_correlation(correlation.msd, com_amim, average=True)
|
||||
plot(time,msd_amim)
|
||||
|
||||
The result of :func:`shifted_correlation` are two lists, the first one (``time``)
|
||||
contains the times of the frames that have been used for the correlation.
|
||||
The second list ``msd_amim`` is the correlation function at these times.
|
||||
If the keyword ``average=False`` is given, the correlation function for each shifted
|
||||
time window will be returned.
|
||||
|
||||
Arguments of ``shifted_correlation``
|
||||
------------------------------------
|
||||
|
||||
The function :func:`mdevaluate.correlation.shifted_correlation` accepts several keyword arguments.
|
||||
With those arguments, the calculation of the correlation function may be controlled in detail.
|
||||
The mathematical expression for a correlation function is the following:
|
||||
|
||||
.. math:: S(t) = \frac{1}{N} \sum_{i=1}^N C(f, R, t_i, t)
|
||||
|
||||
Here :math:`S(t)` denotes the correlation function at time t, :math:`R` are the coordinates of all atoms
|
||||
and :math:`t_i` are the onset times (:math:`N` is the number of onset times or time windows).
|
||||
Note that the outer sum and division by :math:`N` is only carried out if ``average=True``.
|
||||
The onset times are defined by the keywords ``segments`` and ``window``, with
|
||||
:math:`N = segments` and :math:`t_i = \frac{ (1 - window) \cdot t_{max}}{N} (i - 1)` with the total simulation time :math:`t_{max}`.
|
||||
As can be seen ``segments`` gives the number of onset times and ``window`` defines the part of the simulation time the correlation is calculated for,
|
||||
hence ``window - 1`` is the part of the simulation the onset times a distributed over.
|
||||
|
||||
|
||||
:math:`C(f, R, t_0, t)` is the function that actually correlates the function :math:`f`.
|
||||
For standard correlations the functions :math:`C(...)` and :math:`f` are defined as:
|
||||
|
||||
.. math:: C(f, R, t_0, t) = f(R(t_0), R(t_0 + t))
|
||||
|
||||
.. math:: f(r_0, r) = \langle s(r_0, r) \rangle
|
||||
|
||||
Here the brackets denote an ensemble average, small :math:`r` are coordinates of one frame and :math:`s(r_0, r)` is the value that is correlated,
|
||||
e.g. for the MSD :math:`s(r_0, r) = (r - r_0)^2`.
|
||||
|
||||
The function :math:`C(f, R, t_0, t)` is specified by the keyword ``correlation``, the function :math:`f(r_0, r)` is given by ``function``.
|
@ -1,80 +0,0 @@
|
||||
General Hints for Python Programming
|
||||
====================================
|
||||
|
||||
This page collects some general hints for data centered programming with Python.
|
||||
Some resources for tutorials on the topics can be found here:
|
||||
|
||||
* http://www.scipy-lectures.org/
|
||||
* The `Python Data Science Handbook <https://jakevdp.github.io/PythonDataScienceHandbook/>`_, by Jake VanderPlas
|
||||
* PyCon-Talk on Numpy arrays: `Losing Your Loops, by Jake VanderPlas <https://www.youtube.com/watch?v=EEUXKG97YRw>`_
|
||||
|
||||
Programming Environments
|
||||
------------------------
|
||||
|
||||
There exist different environments for Python programming, each with their pros and cons.
|
||||
Some examples are:
|
||||
|
||||
* **IPython Console**: The most basic way to use Python is on the interactive console, the ipython console is a suffisticated Python console. After the mdevaluate module is loaded, ipython can be started with the command ``ipython``.
|
||||
* **Jupyter Notebook**: Provides a Mathematica-style notebook, which is accesed through a web browser. After the mdevaluate module is loaded a (local) notebook server can be started with the command ``jupyter-notebook``. See the help menu in the notebook for a short introduction and http://jupyter.org/ for a detailed user guide.
|
||||
* **Atom Editor**: When developing more complex code, like modules an editor comes in handy. Besides basic preinstalled editors (e.g. Gedit) the `atom editor <https://atom.io>`_ is a nice option. Recommended atpm packages for Python development are: language-python, autocomplete-python and linter-flake8.
|
||||
|
||||
Common Pitfalls
|
||||
---------------
|
||||
|
||||
* **For-Loops**: The biggest pitfall of data-intensive Python programming are ``for``-loops. Those loops perform bad in Python, but can be avoided in most cases through Numpy arrays, see the mentioned talk by Jake VdP.
|
||||
* **Non-Portable Code**: Most non-programmers tend to write complex scripts. It's always advisable to source out your code into seperate Python modules (i.e. seperate files) and split the code into reusable functions. Since these modules can be imported from any Python code, this will save time in the long run and often reduces errors.
|
||||
|
||||
|
||||
Pandas Dataframes
|
||||
-----------------
|
||||
|
||||
Most data in Mdevaluate is handled as Numpy arrays.
|
||||
For example the function :func:`~mdevaluate.correlation.shifted_correlation` returns a multidimensional array, which contains the time steps and the value of the correlation function.
|
||||
As pointed out above, those arrays a good for computation and can be used to plot data with, e.g. matplotlib.
|
||||
But often there is metadata associated with this data, for example the temperature or the specific subset of atoms that were analyzed.
|
||||
This is the point where **`Pandas dataframes <http://pandas.pydata.org/>`_** come in handy.
|
||||
Dataframes are most basically tables of samples, with named columns.
|
||||
The dataframe class allows easy acces of columns by label and complex operations, like grouping by columns or merging different datasets.
|
||||
|
||||
As an example say we have simulations at some temperatures and want to calculate the ISF and do a KWW-Fit for each of these trajectories.
|
||||
Details of the analysis will be explained at a later point of this document, thereby they will be omitted here::
|
||||
|
||||
import pandas
|
||||
datasets = []
|
||||
|
||||
for T in [250, 260, 270, 280, 290, 300]:
|
||||
# calculate the isf for this temperature
|
||||
t, Sqt = ...
|
||||
|
||||
# DataFrames can be created from dictionaries
|
||||
datasets.append(pandas.DataFrame({'time': t, 'Sqt': Sqt, 'T': T}))
|
||||
|
||||
# join the individual dataframes into one
|
||||
isf_data = pandas.concat(datasets)
|
||||
|
||||
# Now calculate the KWW fits for each temperature
|
||||
from scipy.optimize import curve_fit
|
||||
from mdevaluate.functions import kww
|
||||
kww_datasets = []
|
||||
# The isf data is grouped by temperature,
|
||||
# that is the loop iterates over all T values and the part of the data where isf_data['T'] == T
|
||||
for T, data in isf_data.groupby('T'):
|
||||
fit, cuv = curve_fit(kww, data['time'], data['Sqt'])
|
||||
# DataFrames can also be cerated from arrays and a defintion of columns
|
||||
df = pandas.DataFrame(fit, columns=['A', 'τ', 'β'])
|
||||
# columns can be added dynamically
|
||||
df['T'] = T
|
||||
kww_datasets.append(df)
|
||||
kww_data = pandas.concat(kww_datasets)
|
||||
|
||||
# We have two dataframes now, one with time series of the ISF at each temperature
|
||||
# and one with the fit parameters of the KWW for each temperature
|
||||
|
||||
# We can merge this data into one dataframe, by the overlapping columns (i.e. 'T' in this example)
|
||||
data = pandas.merge(isf_data, kww_data)
|
||||
# We can now compute the kww fit value of each sample point of the isf in one line:
|
||||
data['kww_fit'] = kww(data['time'], data['A'], data['τ'], data['β'])
|
||||
# And plot the data, resolved by temperature.
|
||||
for T, df in data.groupby('T'):
|
||||
plot(df['time'], df['Sqt'], 'o') # The actual correlation value
|
||||
plot(df['time'], df['kww_fit'], '-') # The kww fit
|
@ -1,11 +0,0 @@
|
||||
|
||||
User Guide
|
||||
==========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
loading
|
||||
static-evaluation
|
||||
dynamic-evaluation
|
||||
special-topics
|
@ -1,29 +0,0 @@
|
||||
.. mdevaluate documentation master file, created by
|
||||
sphinx-quickstart on Tue Nov 10 11:46:41 2015.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Documentation of mdevaluate
|
||||
===========================
|
||||
|
||||
A python package for evaluation of molecular dynamics simulation data.
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
installation
|
||||
general-hints
|
||||
guide
|
||||
gallery/index
|
||||
contributing
|
||||
modules
|
||||
|
||||
Indices and tables
|
||||
------------------
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
@ -1,44 +0,0 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
Mdevaluate itself is a pure Python package and can be imported directly from the source directory, if needed.
|
||||
The Gromacs dependency pygmx has to be installed into the Python distribution,
|
||||
since parts are compiled with Cython.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
The package depends on some python packages that can all be installed via pip or conda:
|
||||
|
||||
- Python 3.5 (or higher)
|
||||
- NumPy
|
||||
- SciPy
|
||||
|
||||
|
||||
Install pygmx & mdevaluate
|
||||
---------------------------
|
||||
|
||||
To instal pygmx, first get the source from its repository, https://github.com/mdevaluate/pygmx.
|
||||
Installation instructions are given in the respective readme file.
|
||||
Two steps have to be performed:
|
||||
|
||||
1. Install Gromacs 2016
|
||||
2. Install pygmx
|
||||
|
||||
When this requirement is met, installing mdevaluate simply means getting the source code from the repository and running
|
||||
|
||||
python setup.py install
|
||||
|
||||
form within the source directory.
|
||||
|
||||
|
||||
Running Tests
|
||||
-------------
|
||||
|
||||
Some tests are included with the source that can be used too test the installation.
|
||||
The testsuite requires `pytest <https://pytest.org>`_.
|
||||
To run the test simply execute
|
||||
|
||||
pytest
|
||||
|
||||
in the source directory.
|
115
doc/loading.rst
115
doc/loading.rst
@ -1,115 +0,0 @@
|
||||
Loading of simulation data
|
||||
==========================
|
||||
|
||||
Mdevaulate provides a convenient function :func:`mdevaluate.load_simulation`
|
||||
that loads a simulation more or less automatically.
|
||||
It takes a path as input and looks for all files it needs in this directory.
|
||||
|
||||
For information about the topology either a `tpr` or `gro` a file is read,
|
||||
where the former is the preferred choice.
|
||||
Trajectory data will be read from a xtc file.
|
||||
If the directory contains more than one file of any type, the desired file
|
||||
has to be specified with the appropriate keyword argument.
|
||||
For details see :func:`mdevaluate.open`.
|
||||
|
||||
The function will return a coordinates object, for the whole system.
|
||||
A subset of the system may be obtained directly from the coordinates object by
|
||||
calling its :func:`~mdevaluate.coordinates.Coordinates.subset` method.
|
||||
This function accepts the same input as :func:`mdevaluate.atoms.AtomSubset.subset`.
|
||||
A new feature that was introduced in the function is the possibility to chose
|
||||
atoms with regular expressions.
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
The following code loads the example trajectory and selects a subset of all CW atoms.
|
||||
Since there are two CW atoms in each molecule (CW1 and CW2) a regular expression is
|
||||
used when selecting the subset.
|
||||
|
||||
::
|
||||
|
||||
import mdevaluate as md
|
||||
|
||||
trajectory = md.open('/data/niels/tutorial')
|
||||
CW_atoms = trajectory.subset(atom_name='CW.')
|
||||
|
||||
And that's it, now one can evaluate stuff for this subset of atoms.
|
||||
|
||||
Selecting a subset
|
||||
------------------
|
||||
|
||||
As shown in the example above it is often necessary to select a subset of the system for analysis.
|
||||
This can be a special group of atoms (e.g. all C atoms) or a whole residue for which the center of mass should be computed.
|
||||
Subsets are selected with the :func:`~mdevaluate.Coordinates.subset` method of Coordinates objects.
|
||||
|
||||
This method accepts four keyword arguments, with which the atom name, residue name and residue id or atom indices can be specified.
|
||||
The former two name arguments accept a regular expression which allows two include several different names in one subset.
|
||||
Some examples:
|
||||
|
||||
- All carbon atoms (which are named CW1, CT1, CA, ...): ``tr.subset(atom_name='C.*')``
|
||||
- Atoms NA1, NA2 and OW: ``tr.subset(atom_name='NA.|OW')``
|
||||
- All oxygen atoms of residue EG: ``tr.subset(atom_name='O.*', residue_name='EG')``
|
||||
|
||||
|
||||
Specifying data files
|
||||
---------------------
|
||||
|
||||
The above example only works if the directory contains exactly one tpr file and
|
||||
one xtc file.
|
||||
If your data files are located in subdirectories or multiple files of these types exist,
|
||||
they can be specified by the keywords ``topology`` and ``trajectory``.
|
||||
Those filenames can be a relative path to the simulation directory and can also make
|
||||
use of *shell globing*. For example::
|
||||
|
||||
traj = md.open('/path/to/sim', topology='atoms.gro', trajectory='out/traj_*.xtc')
|
||||
|
||||
Note that the topology can be specified as a gro file, with the limitation that
|
||||
only atom and residue names will be read from those files.
|
||||
Information about atom masses and charges for example will only be read from tpr files,
|
||||
therefore it is generally recommended to use the latter topologies.
|
||||
|
||||
The trajectory above is specified through a shell globing, meaning the ``*`` may be
|
||||
expanded to any string (without containing a forward slash).
|
||||
If more than one file exists which match this pattern an error will be raised,
|
||||
since the trajectory can not be identified clearly.
|
||||
|
||||
Caching of frames
|
||||
-----------------
|
||||
|
||||
One bottleneck in the analysis of MD data is the reading speed of the trajectory.
|
||||
In many cases frames will be needed repeatedly and hence the amount of time spend reading
|
||||
data from disk (or worse over the network) is huge.
|
||||
Therefore the mdevaluate package implements a simple caching mechanism, which holds
|
||||
on to a number of read frames.
|
||||
The downside if this is increased memory usage which may slow down the computation too.
|
||||
|
||||
Caching is done on the level of the trajectory readers, so that all ``Coordinate`` and
|
||||
``CoordinateMap`` objects working on the same trajectory will be sharing a cache.
|
||||
Caching has to be activated when opening a trajectory::
|
||||
|
||||
traj = md.open('/path/to/sim', cached=True)
|
||||
|
||||
The ``cached`` keyword takes either a boolean, a integer or None as input value.
|
||||
The value of ``cached`` controls the size of the cache and thereby the additional memory usage.
|
||||
Setting it to True will activate the caching with a maximum size of 128 frames,
|
||||
with an integer any other maximum size may be set.
|
||||
The special value ``None`` will set the cache size to infinite, so all frames will be cached.
|
||||
This will prevent the frames from being loaded twice but can also consume a whole lot of memory,
|
||||
since a single frame can easily take 1 MB of memory.
|
||||
|
||||
Clearing cached frames
|
||||
++++++++++++++++++++++
|
||||
|
||||
In some scenarios it may be advisable to free cached frames which are no longer needed.
|
||||
For this case the reader has a function ``clear_cache()``.
|
||||
The current state of the cache can be displayed with the ``cache_info`` property::
|
||||
|
||||
>>> traj.frames.cache_info
|
||||
CacheInfo(hits=12, misses=20, maxsize=128, currsize=20)
|
||||
>>> traj.frames.clear_cache()
|
||||
>>> traj.frames.cache_info
|
||||
CacheInfo(hits=0, misses=0, maxsize=128, currsize=0)
|
||||
|
||||
Clearing the cache when it is not needed anymore is advisable since this will help the
|
||||
Python interpreter to reuse the memory.
|
||||
|
263
doc/make.bat
263
doc/make.bat
@ -1,263 +0,0 @@
|
||||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
echo. coverage to run coverage check of the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
REM Check if sphinx-build is available and fallback to Python version if any
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 goto sphinx_python
|
||||
goto sphinx_ok
|
||||
|
||||
:sphinx_python
|
||||
|
||||
set SPHINXBUILD=python -m sphinx.__init__
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
:sphinx_ok
|
||||
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\mdevaluate.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\mdevaluate.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "coverage" (
|
||||
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of coverage in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/coverage/python.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
@ -1,83 +0,0 @@
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: mdevaluate
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.autosave
|
||||
...................
|
||||
|
||||
.. automodule:: mdevaluate.autosave
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.atoms
|
||||
................
|
||||
|
||||
.. automodule:: mdevaluate.atoms
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.coordinates
|
||||
......................
|
||||
|
||||
.. automodule:: mdevaluate.coordinates
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.correlation
|
||||
......................
|
||||
|
||||
.. automodule:: mdevaluate.correlation
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.distribution
|
||||
.......................
|
||||
|
||||
.. automodule:: mdevaluate.distribution
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.evaluation
|
||||
.....................
|
||||
|
||||
mdevaluate.functions
|
||||
....................
|
||||
|
||||
.. automodule:: mdevaluate.functions
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.pbc
|
||||
..............
|
||||
|
||||
.. automodule:: mdevaluate.pbc
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.reader
|
||||
.....................
|
||||
|
||||
.. automodule:: mdevaluate.reader
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mdevaluate.utils
|
||||
.....................
|
||||
|
||||
.. automodule:: mdevaluate.utils
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -1,9 +0,0 @@
|
||||
.. _reference-guide:
|
||||
|
||||
Reference Guide
|
||||
===============
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
mdevaluate
|
@ -1,12 +0,0 @@
|
||||
|
||||
Special Topics
|
||||
==============
|
||||
|
||||
This part of the documentation describes advanced ways of the use of mdevaluate.
|
||||
|
||||
.. toctree::
|
||||
|
||||
special/autosave
|
||||
special/spatial
|
||||
special/overlap
|
||||
special/energies
|
@ -1,93 +0,0 @@
|
||||
Automatic Saving of Analysis Data
|
||||
=================================
|
||||
|
||||
Mdevaluate provides a functionality to save the result of analysis functions automatically.
|
||||
The data is saved to a file after it was computed.
|
||||
If an analysis was done in the exact same way before, the result is loaded from this file.
|
||||
|
||||
This function may be activated through calling :func:`mdevaluate.autosave.enable`, which takes a directory as input.
|
||||
If this directory is a relative path (e.g. no trailing slash) the results will be saved in a location
|
||||
relative to the directory of the trajectory file.
|
||||
If the output files of your simulations are located in a subdirectory, like ``/path/to/sim/Output`` it is possible
|
||||
to specify the auto save location as ``../data`` such that the result files will be placed under ``/path/to/sim/data``.
|
||||
|
||||
At the moment the two functions which use this behavior are:
|
||||
|
||||
- :func:`~mdevaluate.correlation.shifted_correlation`
|
||||
- :func:`~mdevaluate.distribution.time_average`
|
||||
|
||||
Any other function can make use of the autosave mechanism by decorating it with :func:`mdevaluate.autosave.autosave_data`.
|
||||
|
||||
A full example
|
||||
--------------
|
||||
|
||||
This is how it works, for a detailed explanation see below::
|
||||
|
||||
import mdevaluate as md
|
||||
md.autosave.enable('data')
|
||||
water = md.open('/path/to/sim').subset(atom_name='OW')
|
||||
md.correlation.shifted_correlation(
|
||||
md.correlation.msd,
|
||||
water,
|
||||
description='test'
|
||||
)
|
||||
# The result will be saved to the file:
|
||||
# /path/to/sim/data/shifted_correlation_msd_OW_test.npz
|
||||
|
||||
Checksum of the Analysis Call
|
||||
-----------------------------
|
||||
|
||||
The autosave module calculates a checksum for each call of an analysis function,
|
||||
which is used to validate a present the data file.
|
||||
This way the result should only be loaded from file if the analysis is exactly the same.
|
||||
This includes the function code that is evaluated, so the result will be recomputed if any bit of the code changes.
|
||||
But there is always the possibility that checksums coincide accidentally,
|
||||
by chance or due to a bug in the code, which should be kept in mind when using this functionality.
|
||||
|
||||
Special Keyword Arguments
|
||||
-------------------------
|
||||
|
||||
The autosave module introduces two special keyword arguments to the decorated functions:
|
||||
|
||||
- ``autoload``: This prevents the loading of previously calculated data even if a valid file was found.
|
||||
- ``description``: A descriptive string of the specific analysis, see below.
|
||||
|
||||
Those keywords may be passed to those function (shifted_correlation, time_average) like any other keyword argument.
|
||||
If autosave was not enabled, they will be ignored.
|
||||
|
||||
File names and Analysis Descriptions
|
||||
------------------------------------
|
||||
|
||||
The evaluated data is saved to human readable files, whose name is derived from the function call
|
||||
and the automatic description of the subset.
|
||||
The latter one is assigned based on the ``atom_name`` and ``residue_name`` of the :func:`~mdevaluate.atoms.AtomSubset.subset` method.
|
||||
|
||||
In some cases this is not enough, for example if the same subset is analyzed spatially resolved,
|
||||
which would lead to identical filenames that would be overwritten.
|
||||
Therefore a more detailed description of each specific analysis call needs to be provided.
|
||||
For this reason the autosave module introduces the before mentioned keyword argument ``description``.
|
||||
The value of this keyword is appended to the filename and in addition if any of
|
||||
the other arguments of the function call has a attribute description, this will appended as well.
|
||||
For example this (pseudo) code will lead to the filename ``shifted_correlation_isf_OW_1-2nm_nice.npz``::
|
||||
|
||||
OW = traj.subset(atom_name='OW')
|
||||
|
||||
corr = subensemble_correlation(spatial_selector)
|
||||
corr.description = '1-2nm'
|
||||
|
||||
shifted_correlation(
|
||||
isf,
|
||||
OW,
|
||||
correlation=corr,
|
||||
description='nice'
|
||||
)
|
||||
|
||||
|
||||
Reusing the autosaved data
|
||||
--------------------------
|
||||
|
||||
The results of the functions are saved in NumPy's npz format, see :func:`numpy.savez`.
|
||||
If the result should be used in a different place, it can either be loaded with
|
||||
:func:`numpy.load` or :func:`mdevaluate.autosave.load_data`.
|
||||
The latter function will return the result of the function call directly, the former
|
||||
returns a dict with the keys ``checksum`` and ``data``, the latter yielding the results data.
|
@ -1,18 +0,0 @@
|
||||
Gromacs Energy Files
|
||||
====================
|
||||
|
||||
It is possible to read the energy files (.edr) written out by Gromacs with mdevaluate.
|
||||
Those files contain thermodynamic properties of the system, like temperature or pressure.
|
||||
The exact contents of an energy file depend on the type of ensemble that was simulated,
|
||||
an NVT simulation's energy file for example will not contain information about the box size.
|
||||
|
||||
To open these files use the function :func:`mdevaluate.open_energy`, which takes the filename of an energy file.
|
||||
The types of energies stored in the file can be shown with the :attr:`types` attribute of the class :class:`~mdevaluate.reader.EnergyReader`,
|
||||
the :attr:`units` attribute gives the units of these energy types.
|
||||
The timesteps at which those energies were written out are accessible through the :attr:`~mdevaluate.reader.EnergyReader.time` property.
|
||||
The time series of one of these energies can be accessed through the named index, comparable to python dictionaries.
|
||||
::
|
||||
import mdevaluate as md
|
||||
edr = md.open_energy('/path/to/energy.edr')
|
||||
# plot the evolution of temperature
|
||||
plot(edr.time, edr['Temperature'])
|
@ -1,76 +0,0 @@
|
||||
Computing the Overlap Function
|
||||
==============================
|
||||
|
||||
The overlap function is defined as the portion of particles of a given set,
|
||||
whose positions *overlap* after a given time :math:`t` with the reference configuration at :math:`t=0`.
|
||||
This is calculated as follows:
|
||||
The Initial positions define spheres of a given radius :math:`r` which then are used
|
||||
to test how many of the particles at a later time are found within those spheres.
|
||||
Normalized by the number of spheres this gives the correlation of the configurational overlap.
|
||||
|
||||
.. math::
|
||||
|
||||
Q(t) = \frac{1}{N} \left\langle \sum\limits_{i=1}^N n_i(t) \right\rangle
|
||||
|
||||
Where :math:`n_i(t)` defines the :math:`N` spheres, with :math:`n_i(t)=1` if a particle
|
||||
is found within this sphere at time :math:`t` and :math:`n_i(0) = 1` for :math:`1\leq i \leq N`.
|
||||
|
||||
Evaluation with mdevaluate
|
||||
--------------------------
|
||||
|
||||
Computation of the overlap requires the relatively expensive computation of next neighbor distances,
|
||||
which scales with the order of :math:`\mathcal{O}(N^2)`.
|
||||
There are more efficient ways for the solution of this problem, the one used here is
|
||||
the so called :class:`~scipy.spatial.cKDTree`.
|
||||
This is much more efficient and allows to compute the overlap relatively fast::
|
||||
|
||||
OW = md.open('/path/to/sim').subset(atom_name='OW')
|
||||
tree = md.coordinates.CoordinatesKDTree(OW)
|
||||
Qol = md.correlation.shifted_correlation(
|
||||
partial(md.correlation.overlap, crds_tree=tree, radius=0.11),
|
||||
OW
|
||||
)
|
||||
|
||||
As seen above, mdevaluate provides the function :func:`~mdevaluate.correlation.overlap`
|
||||
for this evaluation, which uses a special object of type :class:`~mdevaluate.coordinates.CoordinatesKDTree`
|
||||
for the neighbor search.
|
||||
The latter provides two features, necessary for the computation:
|
||||
First it computes a :class:`~scipy.spatial.cKDTree` for each necessary frame of the trajectory;
|
||||
second it caches those trees, since assembly of KDTrees is expensive.
|
||||
The size of the cache can be controlled with the keyword argument ``maxsize`` of the CoordinatesKDTree initialization.
|
||||
|
||||
Note that this class uses the C version (hence the lowercase C) rather than
|
||||
the pure Python version :class:`~scipy.spatial.KDTree` since the latter is significantly slower.
|
||||
The only downside is, that the C version had a memory leak before SciPy 0.17,
|
||||
but as long as a recent version of SciPy is used, this shouldn't be a problem.
|
||||
|
||||
Overlap of a Subsystem
|
||||
----------------------
|
||||
|
||||
In many cases the overlap of a subsystem, e.g. a spatial region, should be computed.
|
||||
This is done by selecting a subset of the initial configuration before defining the spheres.
|
||||
The overlap is then probed with the whole system.
|
||||
This has two benefits:
|
||||
|
||||
1. It yields the correct results
|
||||
2. The KDTree structures are smaller and thereby less computation and memory expensive
|
||||
|
||||
An example of a spatial resolved analysis, where ``OW`` is loaded as above::
|
||||
|
||||
selector = partial(
|
||||
md.coordinates.spatial_selector,
|
||||
transform=md.coordinates.spherical_radius,
|
||||
rmin=1.0,
|
||||
rmax=1.5
|
||||
)
|
||||
tree = md.coordinates.CoordinatesKDTree(OW, selector=selector)
|
||||
Qol = md.correlation.shifted_correlation(
|
||||
partial(md.correlation.overlap, crds_tree=tree, radius=0.11),
|
||||
OW
|
||||
)
|
||||
|
||||
This computes the overlap of OW atoms in the region :math:`1.0 \leq r \leq 1.5`.
|
||||
This method can of course be used to probe the overlap of any subsystem, which is selected by the given selector function.
|
||||
It should return a viable index for a (m, 3) sized NumPy array when called with original frame of size (N, 3)::
|
||||
|
||||
subset = frame[selector(frame)]
|
@ -1,38 +0,0 @@
|
||||
Spatial Resolved Analysis
|
||||
=========================
|
||||
|
||||
This section describes how spatially resolved correlation can be analyzed with mdevaluate.
|
||||
This guide assumes that the variable ``traj`` holds a trajectory where the subset of atoms that should be analyzed are selected.
|
||||
For example::
|
||||
|
||||
traj = md.open('/path/to/sim', cached=1000).subset(atom_name='OW')
|
||||
|
||||
Which would load a simulation from the directory ``/path/to/sim`` and select all ``OW`` atoms.
|
||||
Note that for this use case, the caching is quite useful since it enables us to iterate over spatial regions
|
||||
without significant time penalty.
|
||||
Moving on let's calculate the ISF of water oxygens with spherical radius between 0.5 and 0.7 nm::
|
||||
|
||||
from functools import partial
|
||||
func = partial(md.correlation.isf, q=22.7)
|
||||
selector = partial(
|
||||
md.coordinates.spatial_selector,
|
||||
transform=md.coordinates.spherical_radius,
|
||||
rmin=0.5, rmax=0.7
|
||||
)
|
||||
t, S = md.correlation.shifted_correlation(
|
||||
func, traj,
|
||||
correlation=md.correlation.subensemble_correlation(selector)
|
||||
)
|
||||
|
||||
To explain how this works, let's go through the code from bottom to top.
|
||||
The spatial filtering is done inside the shifted_correlation by the function
|
||||
:func:`mdevaluate.correlation.subensemble_correlation`.
|
||||
This function takes a selector function as argument that should take a frame as input
|
||||
and return the selection of the coordinates that should be selected.
|
||||
A new selection is taken for the starting frame of each shifted time segment.
|
||||
|
||||
In this case the selection is done with the function :func:`mdevaluate.coordinates.spatial_selector`.
|
||||
This function takes four arguments, the first being the frame of coordinates which is handed by :func:`subensemble_correlation`.
|
||||
The second argument is a transformation function, which transforms the input coordinates to the coordinate which will be filtered,
|
||||
in this case the spherical radius.
|
||||
The two last arguments define the minimum and maximum value of this quantity.
|
@ -1,76 +0,0 @@
|
||||
|
||||
Evaluation of static properties
|
||||
===============================
|
||||
|
||||
.. note::
|
||||
All examples in this section assume, that the packages has been imported and a trajectory was loaded::
|
||||
|
||||
import mdevaluate.distribution as dist
|
||||
|
||||
coords = mdevaluate.open('/path/to/simulation')
|
||||
|
||||
Static properties of the system, like density distribution or pair correlation function,
|
||||
can be evaluated with the :mod:`mdevaluate.distribution` module.
|
||||
It provides the function :func:`mdevaluate.distribution.time_average`
|
||||
that computes the average of a property over the whole trajectory.
|
||||
An example call of this function is::
|
||||
|
||||
tetra = dist.time_average(dist.tetrahedral_order, coords)
|
||||
|
||||
This will calculate the average of the tetrahedral order parameter for each atom.
|
||||
The first argument of :func:`time_average` is a function that takes one argument.
|
||||
It will be called for each frame in the trajectory and the output of this function
|
||||
is than averaged over all these frames.
|
||||
|
||||
Slicing of the trajectory
|
||||
-------------------------
|
||||
|
||||
In most cases averaging each frame of the trajectory is not necessary,
|
||||
since the conformation of the atoms doesn't change significantly between two frames.
|
||||
Hence it is sufficient to skip some frames without suffering significant statistics.
|
||||
The exact amount of frames which can be skipped before the statistics suffer depends strongly
|
||||
on the calculated property, therefore it has to be chosen manually.
|
||||
For this purpose the Coordinates objects can be sliced like any python list::
|
||||
|
||||
tetra = dist.time_average(dist.tetrahedral_order, coords[1000::50])
|
||||
|
||||
This makes it possible to skip a number of frames at the start (or end) and with every step.
|
||||
The above call would start with frame 1000 of the trajectory and evaluate each 50th frame until the end.
|
||||
Since the number of frames read and evaluated is reduced by about a factor of 50, the computational cost will decrease accordingly.
|
||||
|
||||
Calculating distributions
|
||||
-------------------------
|
||||
|
||||
In many cases the static distributions of a property is of interest.
|
||||
For example, the tetrahedral order parameter is often wanted as a distribution.
|
||||
This can too be calculated with ``time_average`` but the bins of the distribution have to be specified::
|
||||
|
||||
from functools import partial
|
||||
func = partial(dist.tetrahedral_order_distribution, bins=np.linspace(-3, 1, 401)
|
||||
tetra_dist = dist.time_average(func, coords)
|
||||
|
||||
The bins (which are ultimately used with the function :func:`numpy.histogram`) are specified
|
||||
by partially evaluating the evaluation function with :func:`functools.partial`.
|
||||
See the documentation of :func:`numpy.histogram` for details on bin specification.
|
||||
|
||||
.. note::
|
||||
If :func:`numpy.histogram` is used with :func:`time_average` the bins have to be given explicitly.
|
||||
When not specified, the bins will be chosen automatically for each call of ``histogram`` leading to
|
||||
different bins for each frame, hence an incorrect average.
|
||||
|
||||
Advanced evaluations
|
||||
--------------------
|
||||
|
||||
The function that will be evaluated by ``time_average`` can return numpy arrays of arbitrary shape.
|
||||
It is for example possible to calculate the distribution of a property for several subsets of the system at once::
|
||||
|
||||
def subset_tetra(frame, bins):
|
||||
tetra = dist.tetrahedral_order(frame)
|
||||
return array([np.histogram(tetra[0::2], bins=bins),
|
||||
np.histogram(tetra[1::2], bins=bins),])
|
||||
|
||||
func = partial(subset, bins=np.linspace(-1,1,201))
|
||||
tetra_subdist = dist.time_average(func, coords)
|
||||
|
||||
In this example the tetrahedral order parameter is first calculated for each atom of the system.
|
||||
Then the distribution is calculated for two subsets, containing atoms (0, 2, 4, 6, ...) and (1, 3, 5, 7, ...).
|
@ -1,2 +0,0 @@
|
||||
Example Gallery
|
||||
===============
|
@ -1,47 +0,0 @@
|
||||
r"""
|
||||
Four-Point susceptibility
|
||||
=========================
|
||||
|
||||
The dynamic four-point susceptibility :math:`\chi_4(t)` is a measure for heterogenous dynamics. [Berthier]_
|
||||
It can be calculated from the variance of the incoherent intermediate scattering function
|
||||
:math:`F_q(t)`.
|
||||
|
||||
.. math::
|
||||
\chi_4 (t) = N\cdot\left( \left\langle F_q^2(t) \right\rangle - \left\langle F_q(t) \right\rangle^2 \right)
|
||||
|
||||
This is astraight forward calculation in mdevaluate.
|
||||
First calculate the ISF without time average and then take the variance along the first axis of this data.
|
||||
Note that this quantity requires good statistics, hence it is adviced to use a small time window
|
||||
and a sufficient number of segments for the analysis.
|
||||
Another way to reduce scatter is to smooth the data with a running mean,
|
||||
calling :func:`~mdevaluate.utils.runningmean` as shown below.
|
||||
|
||||
.. [Berthier] http://link.aps.org/doi/10.1103/Physics.4.42
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
import matplotlib.pyplot as plt
|
||||
import mdevaluate as md
|
||||
import tudplot
|
||||
|
||||
OW = md.open('/data/niels/sim/water/bulk/260K', trajectory='out/*.xtc').subset(atom_name='OW')
|
||||
|
||||
t, Fqt = md.correlation.shifted_correlation(
|
||||
partial(md.correlation.isf, q=22.7),
|
||||
OW,
|
||||
average=False,
|
||||
window=0.2,
|
||||
skip=0.1,
|
||||
segments=20
|
||||
)
|
||||
chi4 = len(OW[0]) * Fqt.var(axis=0)
|
||||
|
||||
tudplot.activate()
|
||||
|
||||
plt.plot(t, chi4, 'h', label=r'$\chi_4$')
|
||||
plt.plot(t[2:-2], md.utils.runningmean(chi4, 5), '-', label='smoothed')
|
||||
|
||||
plt.semilogx()
|
||||
plt.xlabel('time / ps')
|
||||
plt.ylabel('$\\chi_4$')
|
||||
plt.legend(loc='best')
|
@ -1,30 +0,0 @@
|
||||
"""
|
||||
Calculating the ISF of Water
|
||||
=======================================================
|
||||
|
||||
In this example the ISF of water oxygens is calculated for a bulk simulation.
|
||||
Additionally a KWW function is fitted to the results.
|
||||
"""
|
||||
from functools import partial
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.optimize import curve_fit
|
||||
import mdevaluate as md
|
||||
import tudplot
|
||||
|
||||
OW = md.open('/data/niels/sim/water/bulk/260K', trajectory='out/*.xtc').subset(atom_name='OW')
|
||||
t, S = md.correlation.shifted_correlation(
|
||||
partial(md.correlation.isf, q=22.7),
|
||||
OW,
|
||||
average=True
|
||||
)
|
||||
# Only include data-points of the alpha-relaxation for the fit
|
||||
mask = t > 3e-1
|
||||
fit, cov = curve_fit(md.functions.kww, t[mask], S[mask])
|
||||
tau = md.functions.kww_1e(*fit)
|
||||
|
||||
tudplot.activate()
|
||||
plt.figure()
|
||||
plt.plot(t, S, '.', label='ISF of Bulk Water')
|
||||
plt.plot(t, md.functions.kww(t, *fit), '-', label=r'KWW, $\tau$={:.2f}ps'.format(tau))
|
||||
plt.xscale('log')
|
||||
plt.legend()
|
@ -1,121 +0,0 @@
|
||||
"""
|
||||
Spatially resolved analysis in a cylindrical pore
|
||||
=======================================================
|
||||
|
||||
Calculate the spatially resolved ISF inside a cylindrical neutral water pore
|
||||
In this case the bins describe the shortest distance of an oxygen atom to any wall atom
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import mdevaluate as md
|
||||
import tudplot
|
||||
from scipy import spatial
|
||||
from scipy.optimize import curve_fit
|
||||
|
||||
#trajectory with index file
|
||||
#TODO eine allgemeinere stelle?
|
||||
traj = md.open('/data/robin/sim/nvt/12kwater/240_r25_0_NVT',
|
||||
trajectory='nojump.xtc', index_file='indexSL.ndx',topology='*.gro')
|
||||
#Liquid oxygens
|
||||
LO = traj.subset(indices= traj.atoms.indices['LH2O'])
|
||||
#Solid oxygens
|
||||
SO = traj.subset(indices= traj.atoms.indices['SH2O'])
|
||||
#Solid oxygens and bonded hydrogens
|
||||
SW = traj.subset(residue_id = SO.atom_subset.residue_ids)
|
||||
|
||||
#TODO die folgenden beiden zusammen sind nochmal deutlich schneller als
|
||||
#md.atom.distance_to_atoms, kannst du entweder in irgendeiner weise einbauen
|
||||
#oder hier lassen, man muss aber auf thickness achten, dass das sinn macht
|
||||
#adds periodic layers of the atoms
|
||||
def pbc_points(points, box_vector, thickness=0, index=False, inclusive=True):
|
||||
coordinates = np.copy(points)%box_vector
|
||||
allcoordinates = np.copy(coordinates)
|
||||
indices = np.tile(np.arange(len(points)),(27))
|
||||
for x in range(-1, 2, 1):
|
||||
for y in range(-1, 2, 1):
|
||||
for z in range(-1, 2, 1):
|
||||
vv = np.array([x, y, z], dtype=float)
|
||||
if not (vv == 0).all() :
|
||||
allcoordinates = np.concatenate((allcoordinates, coordinates + vv*box_vector), axis=0)
|
||||
|
||||
if thickness != 0:
|
||||
mask = np.all(allcoordinates < box_vector+thickness, axis=1)
|
||||
allcoordinates = allcoordinates[mask]
|
||||
indices = indices[mask]
|
||||
mask = np.all(allcoordinates > -thickness, axis=1)
|
||||
allcoordinates = allcoordinates[mask]
|
||||
indices = indices[mask]
|
||||
if not inclusive:
|
||||
allcoordinates = allcoordinates[len(points):]
|
||||
indices = indices[len(points):]
|
||||
if index:
|
||||
return (allcoordinates, indices)
|
||||
return allcoordinates
|
||||
|
||||
#fast calculation of shortest distance from one subset to another, uses pbc_points
|
||||
def distance_to_atoms(ref, observed_atoms, box=None, thickness=0.5):
|
||||
if box is not None:
|
||||
start_coords = np.copy(observed_atoms)%box
|
||||
all_frame_coords = pbc_points(ref, box, thickness = thickness)
|
||||
else:
|
||||
start_coords = np.copy(observed_atoms)
|
||||
all_frame_coords = np.copy(ref)
|
||||
|
||||
tree = spatial.cKDTree(all_frame_coords)
|
||||
first_neighbors = tree.query(start_coords)[0]
|
||||
return first_neighbors
|
||||
|
||||
#this is used to reduce the number of wall atoms to those relevant, speeds up the rest
|
||||
dist = distance_to_atoms(LO[0], SW[0], np.diag(LO[0].box))
|
||||
wall_atoms = SW.atom_subset.indices[0]
|
||||
wall_atoms = wall_atoms[dist < 0.35]
|
||||
SW = traj.subset(indices = wall_atoms)
|
||||
|
||||
from functools import partial
|
||||
func = partial(md.correlation.isf, q=22.7)
|
||||
|
||||
#selector function to choose liquid oxygens with a certain distance to wall atoms
|
||||
def selector_func(coords, lindices, windices, dmin, dmax):
|
||||
lcoords = coords[lindices]
|
||||
wcoords = coords[windices]
|
||||
dist = distance_to_atoms(wcoords, lcoords,box=np.diag(coords.box))
|
||||
#radial distance to pore center to ignore molecules that entered the wall
|
||||
rad = np.sum((lcoords[:,:2]-np.diag(coords.box)[:2]/2)**2,axis=1)**.5
|
||||
return lindices[(dist >= dmin) & (dist < dmax) & (rad < 2.7)]
|
||||
|
||||
#calculate the shifted correlation for several bins
|
||||
#bin positions are roughly the average of the limits
|
||||
bins = np.array([0.15,0.2,0.3,0.4,0.5,0.8,1.0,1.4,1.8,2.3])
|
||||
binpos = (bins[1:]+bins[:-1])/2
|
||||
S = np.empty(len(bins)-1, dtype='object')
|
||||
for i in range(len(bins)-1):
|
||||
selector = partial(selector_func,lindices=LO.atom_subset.indices[0],
|
||||
windices=SW.atom_subset.indices[0],dmin=bins[i],
|
||||
dmax = bins[i+1])
|
||||
t, S[i] = md.correlation.shifted_correlation(
|
||||
func, traj,segments=50, skip=0.1,average=True,
|
||||
correlation=md.correlation.subensemble_correlation(selector),
|
||||
description=str(bins[i])+','+str(bins[i+1]))
|
||||
|
||||
taus = np.zeros(len(S))
|
||||
tudplot.activate()
|
||||
plt.figure()
|
||||
for i,s in enumerate(S):
|
||||
pl = plt.plot(t, s, '.', label='d = ' + str(binpos[i]) + ' nm')
|
||||
#only includes the relevant data for 1/e fitting
|
||||
mask = s < 0.6
|
||||
fit, cov = curve_fit(md.functions.kww, t[mask], s[mask],
|
||||
p0=[1.0,t[t>1/np.e][-1],0.5])
|
||||
taus[i] = md.functions.kww_1e(*fit)
|
||||
plt.plot(t, md.functions.kww(t, *fit), c=pl[0].get_color())
|
||||
plt.xscale('log')
|
||||
plt.legend()
|
||||
#plt.show()
|
||||
|
||||
tudplot.activate()
|
||||
plt.figure()
|
||||
plt.plot(binpos, taus,'.',label=r'$\tau$(d)')
|
||||
plt.yscale('log')
|
||||
plt.legend()
|
||||
#plt.show()
|
@ -1,17 +0,0 @@
|
||||
"""
|
||||
Plotting the Temperature from an Energy File
|
||||
============================================
|
||||
|
||||
This example reads an Gromacs energy file and plots the evolultion and mean of the temperature.
|
||||
"""
|
||||
|
||||
from matplotlib import pyplot as plt
|
||||
import mdevaluate as md
|
||||
import tudplot
|
||||
|
||||
tudplot.activate()
|
||||
|
||||
edr = md.open_energy('/data/niels/sim/water/bulk/300K/out/energy_water1000bulk300.edr')
|
||||
T = edr['Temperature']
|
||||
plt.plot(edr.time, T)
|
||||
plt.plot(edr.time[[0, -1]], [T.mean(), T.mean()])
|
@ -4,10 +4,12 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "mdevaluate"
|
||||
version = "23.12"
|
||||
version = "24.06"
|
||||
dependencies = [
|
||||
"mdanalysis",
|
||||
"pandas",
|
||||
"dask",
|
||||
"pathos",
|
||||
"tables",
|
||||
"pyedr"
|
||||
]
|
||||
|
@ -2,3 +2,6 @@ mdanalysis
|
||||
pandas
|
||||
dask
|
||||
pathos
|
||||
tables
|
||||
pytest
|
||||
pyedr
|
@ -5,15 +5,17 @@ from typing import Optional
|
||||
import pandas as pd
|
||||
|
||||
from . import atoms
|
||||
from . import autosave
|
||||
from . import checksum
|
||||
from . import coordinates
|
||||
from . import correlation
|
||||
from . import distribution
|
||||
from . import functions
|
||||
from . import pbc
|
||||
from . import autosave
|
||||
from . import reader
|
||||
from . import system
|
||||
from .extra import free_energy_landscape, chill
|
||||
from . import utils
|
||||
from . import extra
|
||||
from .logging import logger
|
||||
|
||||
|
||||
@ -36,7 +38,7 @@ def open(
|
||||
used, if there is exactly one in the directoy.
|
||||
trajectory (opt.): Descriptor of the trajectory (xtc or trr file).
|
||||
nojump (opt.):
|
||||
If nojump matrixes should be generated. They will alwyas be loaded
|
||||
If nojump matrices should be generated. They will alwyas be loaded
|
||||
if present
|
||||
index_file (opt.): Descriptor of the index file (ndx file).
|
||||
charges (opt.):
|
||||
@ -95,9 +97,9 @@ def open(
|
||||
coords = coordinates.Coordinates(frames, atom_subset=atom_set)
|
||||
if nojump:
|
||||
try:
|
||||
frames.nojump_matrixes
|
||||
frames.nojump_matrices
|
||||
except reader.NojumpError:
|
||||
reader.generate_nojump_matrixes(coords)
|
||||
reader.generate_nojump_matrices(coords)
|
||||
return coords
|
||||
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
from functools import partial, wraps
|
||||
from copy import copy
|
||||
from .logging import logger
|
||||
from typing import Optional, Callable
|
||||
from typing import Optional, Callable, List, Tuple
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
from numpy.typing import ArrayLike, NDArray
|
||||
from scipy.spatial import KDTree
|
||||
|
||||
from .atoms import AtomSubset
|
||||
@ -17,7 +17,7 @@ class UnknownCoordinatesMode(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CoordinateFrame(np.ndarray):
|
||||
class CoordinateFrame(NDArray):
|
||||
_known_modes = ("pbc", "whole", "nojump")
|
||||
|
||||
@property
|
||||
@ -99,7 +99,7 @@ class CoordinateFrame(np.ndarray):
|
||||
box=None,
|
||||
mode=None,
|
||||
):
|
||||
obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides)
|
||||
obj = NDArray.__new__(subtype, shape, dtype, buffer, offset, strides)
|
||||
|
||||
obj.coordinates = coordinates
|
||||
obj.step = step
|
||||
@ -218,7 +218,7 @@ class Coordinates:
|
||||
self.get_frame.clear_cache()
|
||||
|
||||
def __iter__(self):
|
||||
for i in range(len(self))[self._slice]:
|
||||
for i in range(len(self.frames))[self._slice]:
|
||||
yield self[i]
|
||||
|
||||
@singledispatchmethod
|
||||
@ -232,7 +232,7 @@ class Coordinates:
|
||||
return sliced
|
||||
|
||||
def __len__(self):
|
||||
return len(self.frames)
|
||||
return len(self.frames[self._slice])
|
||||
|
||||
def __checksum__(self):
|
||||
return checksum(self.frames, self.atom_filter, self._slice, self.mode)
|
||||
@ -261,6 +261,7 @@ class CoordinatesMap:
|
||||
self.frames = self.coordinates.frames
|
||||
self.atom_subset = self.coordinates.atom_subset
|
||||
self.function = function
|
||||
self._slice = slice(None)
|
||||
if isinstance(function, partial):
|
||||
self._description = self.function.func.__name__
|
||||
else:
|
||||
@ -319,7 +320,7 @@ class CoordinatesMap:
|
||||
return CoordinatesMap(self.coordinates.pbc, self.function)
|
||||
|
||||
|
||||
def rotate_axis(coords: npt.ArrayLike, axis: npt.ArrayLike) -> np.ndarray:
|
||||
def rotate_axis(coords: ArrayLike, axis: ArrayLike) -> NDArray:
|
||||
"""
|
||||
Rotate a set of coordinates to a given axis.
|
||||
"""
|
||||
@ -352,8 +353,8 @@ def rotate_axis(coords: npt.ArrayLike, axis: npt.ArrayLike) -> np.ndarray:
|
||||
|
||||
|
||||
def spherical_radius(
|
||||
frame: CoordinateFrame, origin: Optional[npt.ArrayLike] = None
|
||||
) -> np.ndarray:
|
||||
frame: CoordinateFrame, origin: Optional[ArrayLike] = None
|
||||
) -> NDArray:
|
||||
"""
|
||||
Transform a frame of cartesian coordinates into the spherical radius.
|
||||
If origin=None, the center of the box is taken as the coordinates' origin.
|
||||
@ -363,7 +364,7 @@ def spherical_radius(
|
||||
return ((frame - origin) ** 2).sum(axis=-1) ** 0.5
|
||||
|
||||
|
||||
def polar_coordinates(x: npt.ArrayLike, y: npt.ArrayLike) -> (np.ndarray, np.ndarray):
|
||||
def polar_coordinates(x: ArrayLike, y: ArrayLike) -> (NDArray, NDArray):
|
||||
"""Convert cartesian to polar coordinates."""
|
||||
radius = (x**2 + y**2) ** 0.5
|
||||
phi = np.arctan2(y, x)
|
||||
@ -371,8 +372,8 @@ def polar_coordinates(x: npt.ArrayLike, y: npt.ArrayLike) -> (np.ndarray, np.nda
|
||||
|
||||
|
||||
def spherical_coordinates(
|
||||
x: npt.ArrayLike, y: npt.ArrayLike, z: npt.ArrayLike
|
||||
) -> (np.ndarray, np.ndarray, np.ndarray):
|
||||
x: ArrayLike, y: ArrayLike, z: ArrayLike
|
||||
) -> (NDArray, NDArray, NDArray):
|
||||
"""Convert cartesian to spherical coordinates."""
|
||||
xy, phi = polar_coordinates(x, y)
|
||||
radius = (x**2 + y**2 + z**2) ** 0.5
|
||||
@ -384,8 +385,8 @@ def selector_radial_cylindrical(
|
||||
atoms: CoordinateFrame,
|
||||
r_min: float,
|
||||
r_max: float,
|
||||
origin: Optional[npt.ArrayLike] = None,
|
||||
) -> np.ndarray:
|
||||
origin: Optional[ArrayLike] = None,
|
||||
) -> NDArray:
|
||||
box = atoms.box
|
||||
atoms = atoms % np.diag(box)
|
||||
if origin is None:
|
||||
@ -397,7 +398,7 @@ def selector_radial_cylindrical(
|
||||
|
||||
|
||||
def map_coordinates(
|
||||
func: Callable[[CoordinateFrame, ...], np.ndarray]
|
||||
func: Callable[[CoordinateFrame, ...], NDArray]
|
||||
) -> Callable[..., CoordinatesMap]:
|
||||
@wraps(func)
|
||||
def wrapped(coordinates: Coordinates, **kwargs) -> CoordinatesMap:
|
||||
@ -408,14 +409,14 @@ def map_coordinates(
|
||||
|
||||
@map_coordinates
|
||||
def center_of_masses(
|
||||
frame: CoordinateFrame, atoms=None, shear: bool = False
|
||||
) -> np.ndarray:
|
||||
if atoms is None:
|
||||
atoms = list(range(len(frame)))
|
||||
res_ids = frame.residue_ids[atoms]
|
||||
masses = frame.masses[atoms]
|
||||
frame: CoordinateFrame, atom_indices=None, shear: bool = False
|
||||
) -> NDArray:
|
||||
if atom_indices is None:
|
||||
atom_indices = list(range(len(frame)))
|
||||
res_ids = frame.residue_ids[atom_indices]
|
||||
masses = frame.masses[atom_indices]
|
||||
if shear:
|
||||
coords = frame[atoms]
|
||||
coords = frame[atom_indices]
|
||||
box = frame.box
|
||||
sort_ind = res_ids.argsort(kind="stable")
|
||||
i = np.concatenate([[0], np.where(np.diff(res_ids[sort_ind]) > 0)[0] + 1])
|
||||
@ -423,7 +424,7 @@ def center_of_masses(
|
||||
cor = pbc_diff(coords, coms, box)
|
||||
coords = coms + cor
|
||||
else:
|
||||
coords = frame.whole[atoms]
|
||||
coords = frame.whole[atom_indices]
|
||||
mask = np.bincount(res_ids)[1:] != 0
|
||||
positions = np.array(
|
||||
[
|
||||
@ -437,8 +438,8 @@ def center_of_masses(
|
||||
|
||||
@map_coordinates
|
||||
def pore_coordinates(
|
||||
frame: CoordinateFrame, origin: npt.ArrayLike, sym_axis: str = "z"
|
||||
) -> np.ndarray:
|
||||
frame: CoordinateFrame, origin: ArrayLike, sym_axis: str = "z"
|
||||
) -> NDArray:
|
||||
"""
|
||||
Map coordinates of a pore simulation so the pore has cylindrical symmetry.
|
||||
|
||||
@ -459,17 +460,17 @@ def pore_coordinates(
|
||||
@map_coordinates
|
||||
def vectors(
|
||||
frame: CoordinateFrame,
|
||||
atoms_indices_a: npt.ArrayLike,
|
||||
atoms_indices_b: npt.ArrayLike,
|
||||
atom_indices_a: ArrayLike,
|
||||
atom_indices_b: ArrayLike,
|
||||
normed: bool = False,
|
||||
) -> np.ndarray:
|
||||
) -> NDArray:
|
||||
"""
|
||||
Compute the vectors between the atoms of two subsets.
|
||||
|
||||
Args:
|
||||
frame: The Coordinates object the atoms will be taken from
|
||||
atoms_indices_a: Mask or indices of the first atom subset
|
||||
atoms_indices_b: Mask or indices of the second atom subset
|
||||
atom_indices_a: Mask or indices of the first atom subset
|
||||
atom_indices_b: Mask or indices of the second atom subset
|
||||
normed (opt.): If the vectors should be normed
|
||||
|
||||
The definition of atoms_a/b can be any possible subript of a numpy array.
|
||||
@ -492,10 +493,10 @@ def vectors(
|
||||
])
|
||||
"""
|
||||
box = frame.box
|
||||
coords_a = frame[atoms_indices_a]
|
||||
coords_a = frame[atom_indices_a]
|
||||
if len(coords_a.shape) > 2:
|
||||
coords_a = coords_a.mean(axis=0)
|
||||
coords_b = frame[atoms_indices_b]
|
||||
coords_b = frame[atom_indices_b]
|
||||
if len(coords_b.shape) > 2:
|
||||
coords_b = coords_b.mean(axis=0)
|
||||
vec = pbc_diff(coords_a, coords_b, box=box)
|
||||
@ -507,8 +508,8 @@ def vectors(
|
||||
|
||||
@map_coordinates
|
||||
def dipole_vector(
|
||||
frame: CoordinateFrame, atom_indices: npt.ArrayLike, normed: bool = None
|
||||
) -> np.ndarray:
|
||||
frame: CoordinateFrame, atom_indices: ArrayLike, normed: bool = None
|
||||
) -> NDArray:
|
||||
coords = frame.whole[atom_indices]
|
||||
res_ids = frame.residue_ids[atom_indices]
|
||||
charges = frame.charges[atom_indices]
|
||||
@ -525,9 +526,9 @@ def dipole_vector(
|
||||
@map_coordinates
|
||||
def sum_dipole_vector(
|
||||
coordinates: CoordinateFrame,
|
||||
atom_indices: npt.ArrayLike,
|
||||
atom_indices: ArrayLike,
|
||||
normed: bool = True,
|
||||
) -> np.ndarray:
|
||||
) -> NDArray:
|
||||
coords = coordinates.whole[atom_indices]
|
||||
charges = coordinates.charges[atom_indices]
|
||||
dipole = np.array([c * charges for c in coords.T]).T
|
||||
@ -539,11 +540,11 @@ def sum_dipole_vector(
|
||||
@map_coordinates
|
||||
def normal_vectors(
|
||||
frame: CoordinateFrame,
|
||||
atom_indices_a: npt.ArrayLike,
|
||||
atom_indices_b: npt.ArrayLike,
|
||||
atom_indices_c: npt.ArrayLike,
|
||||
atom_indices_a: ArrayLike,
|
||||
atom_indices_b: ArrayLike,
|
||||
atom_indices_c: ArrayLike,
|
||||
normed: bool = True,
|
||||
) -> np.ndarray:
|
||||
) -> NDArray:
|
||||
coords_a = frame[atom_indices_a]
|
||||
coords_b = frame[atom_indices_b]
|
||||
coords_c = frame[atom_indices_c]
|
||||
@ -571,8 +572,8 @@ def displacements_without_drift(
|
||||
|
||||
@map_coordinates
|
||||
def cylindrical_coordinates(
|
||||
frame: CoordinateFrame, origin: npt.ArrayLike = None
|
||||
) -> np.ndarray:
|
||||
frame: CoordinateFrame, origin: ArrayLike = None
|
||||
) -> NDArray:
|
||||
if origin is None:
|
||||
origin = np.diag(frame.box) / 2
|
||||
x = frame[:, 0] - origin[0]
|
||||
@ -586,8 +587,8 @@ def cylindrical_coordinates(
|
||||
def layer_of_atoms(
|
||||
atoms: CoordinateFrame,
|
||||
thickness: float,
|
||||
plane_normal: npt.ArrayLike,
|
||||
plane_offset: Optional[npt.ArrayLike] = np.array([0, 0, 0]),
|
||||
plane_normal: ArrayLike,
|
||||
plane_offset: Optional[ArrayLike] = np.array([0, 0, 0]),
|
||||
) -> np.array:
|
||||
if plane_offset is None:
|
||||
np.array([0, 0, 0])
|
||||
@ -603,7 +604,7 @@ def next_neighbors(
|
||||
distance_upper_bound: float = np.inf,
|
||||
distinct: bool = False,
|
||||
**kwargs
|
||||
) -> (np.ndarray, np.ndarray):
|
||||
) -> Tuple[List, List]:
|
||||
"""
|
||||
Find the N next neighbors of a set of atoms.
|
||||
|
||||
@ -635,9 +636,17 @@ def next_neighbors(
|
||||
number_of_neighbors + dnn,
|
||||
distance_upper_bound=distance_upper_bound,
|
||||
)
|
||||
distances = distances[:, dnn:]
|
||||
indices = indices[:, dnn:]
|
||||
distances_new = []
|
||||
indices_new = []
|
||||
for dist, ind in zip(distances, indices):
|
||||
distances_new.append(dist[dist <= distance_upper_bound])
|
||||
indices_new.append(ind[dist <= distance_upper_bound])
|
||||
return distances_new, indices_new
|
||||
else:
|
||||
atoms_pbc, atoms_pbc_index = pbc_points(
|
||||
query_atoms, box, thickness=distance_upper_bound + 0.1, index=True, **kwargs
|
||||
atoms, box, thickness=distance_upper_bound + 0.1, index=True, **kwargs
|
||||
)
|
||||
tree = KDTree(atoms_pbc)
|
||||
distances, indices = tree.query(
|
||||
@ -645,6 +654,51 @@ def next_neighbors(
|
||||
number_of_neighbors + dnn,
|
||||
distance_upper_bound=distance_upper_bound,
|
||||
)
|
||||
indices = atoms_pbc_index[indices]
|
||||
distances = distances[:, dnn:]
|
||||
indices = indices[:, dnn:]
|
||||
distances_new = []
|
||||
indices_new = []
|
||||
for dist, ind in zip(distances, indices):
|
||||
distances_new.append(dist[dist <= distance_upper_bound])
|
||||
indices_new.append(atoms_pbc_index[ind[dist <= distance_upper_bound]])
|
||||
return distances_new, indices_new
|
||||
|
||||
return distances[:, dnn:], indices[:, dnn:]
|
||||
|
||||
def number_of_neighbors(
|
||||
atoms: CoordinateFrame,
|
||||
query_atoms: Optional[CoordinateFrame] = None,
|
||||
r_max: float = 1,
|
||||
distinct: bool = False,
|
||||
**kwargs
|
||||
) -> Tuple[List, List]:
|
||||
"""
|
||||
Find the N next neighbors of a set of atoms.
|
||||
|
||||
Args:
|
||||
atoms:
|
||||
The reference atoms and also the atoms which are queried if `query_atoms`
|
||||
is net provided
|
||||
query_atoms (opt.): If this is not None, these atoms will be queried
|
||||
r_max (float, opt.):
|
||||
Upper bound of the distance between neighbors
|
||||
distinct (bool, opt.):
|
||||
If this is true, the atoms and query atoms are taken as distinct sets of
|
||||
atoms
|
||||
"""
|
||||
dnn = 0
|
||||
if query_atoms is None:
|
||||
query_atoms = atoms
|
||||
dnn = 1
|
||||
elif not distinct:
|
||||
dnn = 1
|
||||
|
||||
box = atoms.box
|
||||
if np.all(np.diag(np.diag(box)) == box):
|
||||
atoms = atoms % np.diag(box)
|
||||
tree = KDTree(atoms, boxsize=np.diag(box))
|
||||
else:
|
||||
atoms_pbc = pbc_points(atoms, box, thickness=r_max + 0.1, **kwargs)
|
||||
tree = KDTree(atoms_pbc)
|
||||
|
||||
num_of_neighbors = tree.query_ball_point(query_atoms, r_max, return_length=True)
|
||||
return num_of_neighbors - dnn
|
||||
|
@ -2,122 +2,194 @@ from typing import Callable, Optional
|
||||
|
||||
import numpy as np
|
||||
from numpy.typing import ArrayLike
|
||||
from scipy.special import legendre
|
||||
from scipy.special import legendre, jn
|
||||
import dask.array as darray
|
||||
from functools import partial
|
||||
from scipy.spatial import KDTree
|
||||
|
||||
from .autosave import autosave_data
|
||||
from .utils import coherent_sum, histogram
|
||||
from .utils import coherent_sum
|
||||
from .pbc import pbc_diff, pbc_points
|
||||
from .coordinates import Coordinates, CoordinateFrame, displacements_without_drift
|
||||
|
||||
|
||||
def log_indices(first: int, last: int, num: int = 100) -> np.ndarray:
|
||||
ls = np.logspace(0, np.log10(last - first + 1), num=num)
|
||||
return np.unique(np.int_(ls) - 1 + first)
|
||||
def _is_multi_selector(selection):
|
||||
if len(selection) == 0:
|
||||
return False
|
||||
elif (
|
||||
isinstance(selection[0], int)
|
||||
or isinstance(selection[0], bool)
|
||||
or isinstance(selection[0], np.integer)
|
||||
or isinstance(selection[0], np.bool_)
|
||||
):
|
||||
return False
|
||||
else:
|
||||
for indices in selection:
|
||||
if len(indices) == 0:
|
||||
continue
|
||||
elif (
|
||||
isinstance(indices[0], int)
|
||||
or isinstance(indices[0], bool)
|
||||
or isinstance(indices[0], np.integer)
|
||||
or isinstance(indices[0], np.bool_)
|
||||
):
|
||||
return True
|
||||
else:
|
||||
raise ValueError(
|
||||
"selector has more than two dimensions or does not "
|
||||
"contain int or bool types"
|
||||
)
|
||||
|
||||
|
||||
def _calc_correlation(
|
||||
frames: Coordinates,
|
||||
start_frame: CoordinateFrame,
|
||||
function: Callable,
|
||||
selection: np.ndarray,
|
||||
shifted_idx: np.ndarray,
|
||||
) -> np.ndarray:
|
||||
if len(selection) == 0:
|
||||
correlation = np.zeros(len(shifted_idx))
|
||||
else:
|
||||
start = start_frame[selection]
|
||||
correlation = np.array(
|
||||
[
|
||||
function(start, frames[frame_index][selection])
|
||||
for frame_index in shifted_idx
|
||||
]
|
||||
)
|
||||
return correlation
|
||||
|
||||
|
||||
def _calc_correlation_multi(
|
||||
frames: Coordinates,
|
||||
start_frame: CoordinateFrame,
|
||||
function: Callable,
|
||||
selection: np.ndarray,
|
||||
shifted_idx: np.ndarray,
|
||||
) -> np.ndarray:
|
||||
correlations = np.zeros((len(selection), len(shifted_idx)))
|
||||
for i, frame_index in enumerate(shifted_idx):
|
||||
frame = frames[frame_index]
|
||||
for j, current_selection in enumerate(selection):
|
||||
if len(selection) == 0:
|
||||
correlations[j, i] = 0
|
||||
else:
|
||||
correlations[j, i] = function(
|
||||
start_frame[current_selection], frame[current_selection]
|
||||
)
|
||||
return correlations
|
||||
|
||||
|
||||
def _average_correlation(result):
|
||||
averaged_result = []
|
||||
for n in range(result.shape[1]):
|
||||
clean_result = []
|
||||
for entry in result[:, n]:
|
||||
if np.all(entry == 0):
|
||||
continue
|
||||
else:
|
||||
clean_result.append(entry)
|
||||
averaged_result.append(np.average(np.array(clean_result), axis=0))
|
||||
return np.array(averaged_result)
|
||||
|
||||
|
||||
def _average_correlation_multi(result):
|
||||
clean_result = []
|
||||
for entry in result:
|
||||
if np.all(entry == 0):
|
||||
continue
|
||||
else:
|
||||
clean_result.append(entry)
|
||||
return np.average(np.array(clean_result), axis=0)
|
||||
|
||||
|
||||
@autosave_data(2)
|
||||
def shifted_correlation(
|
||||
function: Callable,
|
||||
frames: Coordinates,
|
||||
selector: ArrayLike = None,
|
||||
selector: Optional[Callable] = None,
|
||||
segments: int = 10,
|
||||
skip: float = 0.1,
|
||||
window: float = 0.5,
|
||||
average: bool = True,
|
||||
points: int = 100,
|
||||
) -> (np.ndarray, np.ndarray):
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
"""Compute a time-dependent correlation function for a given trajectory.
|
||||
|
||||
To improve statistics, multiple (possibly overlapping) windows will be
|
||||
layed over the whole trajectory and the correlation is computed for them separately.
|
||||
The start frames of the windows are spaced linearly over the valid region of
|
||||
the trajectory (skipping frames in the beginning given by skip parameter).
|
||||
|
||||
The points within each window are spaced logarithmically.
|
||||
|
||||
Only a certain subset of the given atoms may be selected for each window
|
||||
individually using a selector function.
|
||||
|
||||
Note that this function is specifically optimized for multi selectors, which select
|
||||
multiple selection sets per window, for which the correlation is to be computed
|
||||
separately.
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
function:
|
||||
The (correlation) function to evaluate.
|
||||
Should be of the form (CoordinateFrame, CoordinateFrame) -> float
|
||||
|
||||
frames:
|
||||
Trajectory to evaluate on
|
||||
|
||||
selector: (optional)
|
||||
Selection function so select only certain selection sets for each start frame.
|
||||
Should be of the form
|
||||
(CoordinateFrame) -> list[A]
|
||||
where A is something you can index an ndarray with.
|
||||
For example a list of indices or a bool array.
|
||||
Must return the same number of selection sets for every frame.
|
||||
|
||||
segments:
|
||||
Number of start frames
|
||||
|
||||
skip:
|
||||
Percentage of trajectory to skip from the start
|
||||
|
||||
window:
|
||||
Length of each segment given as percentage of trajectory
|
||||
|
||||
average:
|
||||
Whether to return averaged results.
|
||||
See below for details on the returned ndarray.
|
||||
|
||||
points:
|
||||
Number of points per segment
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
times: ndarray
|
||||
1d array of time differences to start frame
|
||||
result: ndarray
|
||||
2d ndarray of averaged (or non-averaged) correlations.
|
||||
|
||||
When average==True (default) the returned array will be of the shape (S, P)
|
||||
where S is the number of selection sets and P the number of points per window.
|
||||
For selection sets that where empty for all start frames all data points will be
|
||||
zero.
|
||||
|
||||
When average==False the returned array will be of shape (W, S) with
|
||||
dtype=object. The elements are either ndarrays of shape (P,) containing the
|
||||
correlation data for the specific window and selection set or None if the
|
||||
corresponding selection set was empty.
|
||||
W is the number of segments (windows).
|
||||
S and P are the same as for average==True.
|
||||
|
||||
"""
|
||||
Calculate the time series for a correlation function.
|
||||
|
||||
The times at which the correlation is calculated are determined by
|
||||
a logarithmic distribution.
|
||||
|
||||
Args:
|
||||
function: The function that should be correlated
|
||||
frames: The coordinates of the simulation data
|
||||
selector (opt.):
|
||||
A function that returns the indices depending on
|
||||
the staring frame for which particles the
|
||||
correlation should be calculated.
|
||||
segments (int, opt.):
|
||||
The number of segments the time window will be
|
||||
shifted
|
||||
skip (float, opt.):
|
||||
The fraction of the trajectory that will be skipped
|
||||
at the beginning, if this is None the start index
|
||||
of the frames slice will be used, which defaults
|
||||
to 0.1.
|
||||
window (float, opt.):
|
||||
The fraction of the simulation the time series will
|
||||
cover
|
||||
average (bool, opt.):
|
||||
If True, returns averaged correlation function
|
||||
points (int, opt.):
|
||||
The number of timeshifts for which the correlation
|
||||
should be calculated
|
||||
Returns:
|
||||
tuple:
|
||||
A list of length N that contains the timeshiftes of the frames at which
|
||||
the time series was calculated and a numpy array of shape (segments, N)
|
||||
that holds the (non-avaraged) correlation data
|
||||
|
||||
Example:
|
||||
Calculating the mean square displacement of a coordinate object
|
||||
named ``coords``:
|
||||
|
||||
>>> time, data = shifted_correlation(msd, coords)
|
||||
"""
|
||||
|
||||
def get_correlation(
|
||||
frames: CoordinateFrame,
|
||||
start_frame: CoordinateFrame,
|
||||
index: np.ndarray,
|
||||
shifted_idx: np.ndarray,
|
||||
) -> np.ndarray:
|
||||
if len(index) == 0:
|
||||
correlation = np.zeros(len(shifted_idx))
|
||||
else:
|
||||
start = frames[start_frame][index]
|
||||
correlation = np.array(
|
||||
[function(start, frames[frame][index]) for frame in shifted_idx]
|
||||
)
|
||||
return correlation
|
||||
|
||||
def apply_selector(
|
||||
start_frame: CoordinateFrame,
|
||||
frames: CoordinateFrame,
|
||||
idx: np.ndarray,
|
||||
selector: Optional[Callable] = None,
|
||||
):
|
||||
shifted_idx = idx + start_frame
|
||||
|
||||
if selector is None:
|
||||
index = np.arange(len(frames[start_frame]))
|
||||
return get_correlation(frames, start_frame, index, shifted_idx)
|
||||
else:
|
||||
index = selector(frames[start_frame])
|
||||
if len(index.shape) == 1:
|
||||
return get_correlation(frames, start_frame, index, shifted_idx)
|
||||
elif len(index.shape) == 2:
|
||||
correlations = []
|
||||
for ind in index:
|
||||
correlations.append(
|
||||
get_correlation(frames, start_frame, ind, shifted_idx)
|
||||
)
|
||||
return correlations
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Index list of selector has {len(index.shape)} dimensions, "
|
||||
"but should have 1 or 2"
|
||||
)
|
||||
|
||||
if 1 - skip < window:
|
||||
window = 1 - skip
|
||||
|
||||
start_frames = np.unique(
|
||||
start_frame_indices = np.unique(
|
||||
np.linspace(
|
||||
len(frames) * skip,
|
||||
len(frames) * (1 - window),
|
||||
@ -127,28 +199,44 @@ def shifted_correlation(
|
||||
)
|
||||
)
|
||||
|
||||
num_frames = int(len(frames) * window)
|
||||
ls = np.logspace(0, np.log10(num_frames + 1), num=points)
|
||||
idx = np.unique(np.int_(ls) - 1)
|
||||
t = np.array([frames[i].time for i in idx]) - frames[0].time
|
||||
|
||||
result = np.array(
|
||||
[
|
||||
apply_selector(start_frame, frames=frames, idx=idx, selector=selector)
|
||||
for start_frame in start_frames
|
||||
]
|
||||
num_frames_per_window = int(len(frames) * window)
|
||||
logspaced_indices = np.logspace(0, np.log10(num_frames_per_window + 1), num=points)
|
||||
logspaced_indices = np.unique(np.int_(logspaced_indices) - 1)
|
||||
logspaced_time = (
|
||||
np.array([frames[i].time for i in logspaced_indices]) - frames[0].time
|
||||
)
|
||||
|
||||
if average:
|
||||
clean_result = []
|
||||
for entry in result:
|
||||
if np.all(entry == 0):
|
||||
continue
|
||||
if selector is None:
|
||||
multi_selector = False
|
||||
else:
|
||||
clean_result.append(entry)
|
||||
result = np.array(clean_result)
|
||||
result = np.average(result, axis=0)
|
||||
return t, result
|
||||
selection = selector(frames[0])
|
||||
multi_selector = _is_multi_selector(selection)
|
||||
|
||||
result = []
|
||||
for start_frame_index in start_frame_indices:
|
||||
shifted_idx = logspaced_indices + start_frame_index
|
||||
start_frame = frames[start_frame_index]
|
||||
if selector is None:
|
||||
selection = np.arange(len(start_frame))
|
||||
else:
|
||||
selection = selector(start_frame)
|
||||
if multi_selector:
|
||||
result_segment = _calc_correlation_multi(
|
||||
frames, start_frame, function, selection, shifted_idx
|
||||
)
|
||||
else:
|
||||
result_segment = _calc_correlation(
|
||||
frames, start_frame, function, selection, shifted_idx
|
||||
)
|
||||
result.append(result_segment)
|
||||
result = np.array(result)
|
||||
|
||||
if average:
|
||||
if multi_selector:
|
||||
result = _average_correlation_multi(result)
|
||||
else:
|
||||
result = _average_correlation(result)
|
||||
return logspaced_time, result
|
||||
|
||||
|
||||
def msd(
|
||||
@ -166,6 +254,12 @@ def msd(
|
||||
displacements = displacements_without_drift(start_frame, end_frame, trajectory)
|
||||
if axis == "all":
|
||||
return (displacements**2).sum(axis=1).mean()
|
||||
elif axis == "xy" or axis == "yx":
|
||||
return (displacements[:, [0, 1]] ** 2).sum(axis=1).mean()
|
||||
elif axis == "xz" or axis == "zx":
|
||||
return (displacements[:, [0, 2]] ** 2).sum(axis=1).mean()
|
||||
elif axis == "yz" or axis == "zy":
|
||||
return (displacements[:, [1, 2]] ** 2).sum(axis=1).mean()
|
||||
elif axis == "x":
|
||||
return (displacements[:, 0] ** 2).mean()
|
||||
elif axis == "y":
|
||||
@ -194,6 +288,15 @@ def isf(
|
||||
if axis == "all":
|
||||
distance = (displacements**2).sum(axis=1) ** 0.5
|
||||
return np.sinc(distance * q / np.pi).mean()
|
||||
elif axis == "xy" or axis == "yx":
|
||||
distance = (displacements[:, [0, 1]] ** 2).sum(axis=1) ** 0.5
|
||||
return np.real(jn(0, distance * q)).mean()
|
||||
elif axis == "xz" or axis == "zx":
|
||||
distance = (displacements[:, [0, 2]] ** 2).sum(axis=1) ** 0.5
|
||||
return np.real(jn(0, distance * q)).mean()
|
||||
elif axis == "yz" or axis == "zy":
|
||||
distance = (displacements[:, [1, 2]] ** 2).sum(axis=1) ** 0.5
|
||||
return np.real(jn(0, distance * q)).mean()
|
||||
elif axis == "x":
|
||||
distance = np.abs(displacements[:, 0])
|
||||
return np.mean(np.cos(np.abs(q * distance)))
|
||||
@ -245,6 +348,12 @@ def van_hove_self(
|
||||
vectors = displacements_without_drift(start_frame, end_frame, trajectory)
|
||||
if axis == "all":
|
||||
delta_r = (vectors**2).sum(axis=1) ** 0.5
|
||||
elif axis == "xy" or axis == "yx":
|
||||
delta_r = (vectors[:, [0, 1]] ** 2).sum(axis=1) ** 0.5
|
||||
elif axis == "xz" or axis == "zx":
|
||||
delta_r = (vectors[:, [0, 2]] ** 2).sum(axis=1) ** 0.5
|
||||
elif axis == "yz" or axis == "zy":
|
||||
delta_r = (vectors[:, [1, 2]] ** 2).sum(axis=1) ** 0.5
|
||||
elif axis == "x":
|
||||
delta_r = np.abs(vectors[:, 0])
|
||||
elif axis == "y":
|
||||
@ -311,7 +420,7 @@ def van_hove_distinct(
|
||||
)
|
||||
** 2
|
||||
).sum(axis=-1) ** 0.5
|
||||
hist = histogram(dist, bins=bins)[0]
|
||||
hist = np.histogram(dist, bins=bins)[0]
|
||||
return hist / N
|
||||
|
||||
|
||||
@ -406,6 +515,15 @@ def non_gaussian_parameter(
|
||||
if axis == "all":
|
||||
r = (vectors**2).sum(axis=1)
|
||||
dimensions = 3
|
||||
elif axis == "xy" or axis == "yx":
|
||||
r = (vectors[:, [0, 1]] ** 2).sum(axis=1)
|
||||
dimensions = 2
|
||||
elif axis == "xz" or axis == "zx":
|
||||
r = (vectors[:, [0, 2]] ** 2).sum(axis=1)
|
||||
dimensions = 2
|
||||
elif axis == "yz" or axis == "zy":
|
||||
r = (vectors[:, [1, 2]] ** 2).sum(axis=1)
|
||||
dimensions = 2
|
||||
elif axis == "x":
|
||||
r = vectors[:, 0] ** 2
|
||||
dimensions = 1
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import Callable, Optional, Union
|
||||
from typing import Callable, Optional, Union, Tuple, List
|
||||
|
||||
import numpy as np
|
||||
from numpy.typing import ArrayLike, NDArray
|
||||
@ -12,6 +12,7 @@ from .coordinates import (
|
||||
Coordinates,
|
||||
CoordinateFrame,
|
||||
next_neighbors,
|
||||
number_of_neighbors,
|
||||
)
|
||||
from .autosave import autosave_data
|
||||
from .pbc import pbc_diff, pbc_points
|
||||
@ -52,7 +53,43 @@ def time_average(
|
||||
return np.mean(result, axis=0)
|
||||
|
||||
|
||||
def gr(
|
||||
@autosave_data(nargs=2, kwargs_keys=("coordinates_b",))
|
||||
def time_distribution(
|
||||
function: Callable,
|
||||
coordinates: Coordinates,
|
||||
coordinates_b: Optional[Coordinates] = None,
|
||||
skip: float = 0,
|
||||
segments: int = 100,
|
||||
) -> Tuple[NDArray, List]:
|
||||
"""
|
||||
Compute the time distribution of a function.
|
||||
|
||||
Args:
|
||||
function:
|
||||
The function that will be averaged, it has to accept exactly one argument
|
||||
which is the current atom set (or two if coordinates_b is provided)
|
||||
coordinates: The coordinates object of the simulation
|
||||
coordinates_b: Additional coordinates object of the simulation
|
||||
skip:
|
||||
segments:
|
||||
"""
|
||||
frame_indices = np.unique(
|
||||
np.int_(
|
||||
np.linspace(len(coordinates) * skip, len(coordinates) - 1, num=segments)
|
||||
)
|
||||
)
|
||||
times = np.array([coordinates[frame_index].time for frame_index in frame_indices])
|
||||
if coordinates_b is None:
|
||||
result = [function(coordinates[frame_index]) for frame_index in frame_indices]
|
||||
else:
|
||||
result = [
|
||||
function(coordinates[frame_index], coordinates_b[frame_index])
|
||||
for frame_index in frame_indices
|
||||
]
|
||||
return times, result
|
||||
|
||||
|
||||
def rdf(
|
||||
atoms_a: CoordinateFrame,
|
||||
atoms_b: Optional[CoordinateFrame] = None,
|
||||
bins: Optional[ArrayLike] = None,
|
||||
@ -86,10 +123,9 @@ def gr(
|
||||
if bins is None:
|
||||
bins = np.arange(0, 1, 0.01)
|
||||
|
||||
box = atoms_b.box
|
||||
n = len(atoms_a) / np.prod(np.diag(box))
|
||||
V = 4 / 3 * np.pi * bins[-1] ** 3
|
||||
particles_in_volume = int(n * V * 1.1)
|
||||
particles_in_volume = int(
|
||||
np.max(number_of_neighbors(atoms_a, query_atoms=atoms_b, r_max=bins[-1])) * 1.1
|
||||
)
|
||||
distances, indices = next_neighbors(
|
||||
atoms_a,
|
||||
atoms_b,
|
||||
@ -108,19 +144,19 @@ def gr(
|
||||
)
|
||||
distances = np.concatenate(new_distances)
|
||||
else:
|
||||
distances = distances.flatten()
|
||||
distances = [d for dist in distances for d in dist]
|
||||
|
||||
hist, bins = np.histogram(distances, bins=bins, range=(0, bins[-1]), density=False)
|
||||
hist = hist / len(atoms_a)
|
||||
hist = hist / len(atoms_b)
|
||||
hist = hist / (4 / 3 * np.pi * bins[1:] ** 3 - 4 / 3 * np.pi * bins[:-1] ** 3)
|
||||
n = len(atoms_b) / np.prod(np.diag(atoms_b.box))
|
||||
n = len(atoms_a) / np.prod(np.diag(atoms_a.box))
|
||||
hist = hist / n
|
||||
|
||||
return hist
|
||||
|
||||
|
||||
def distance_distribution(
|
||||
atoms: CoordinateFrame, bins: Optional[int, ArrayLike]
|
||||
atoms: CoordinateFrame, bins: Union[int, ArrayLike]
|
||||
) -> NDArray:
|
||||
connection_vectors = atoms[:-1, :] - atoms[1:, :]
|
||||
connection_lengths = (connection_vectors**2).sum(axis=1) ** 0.5
|
||||
@ -270,30 +306,31 @@ def next_neighbor_distribution(
|
||||
)[1]
|
||||
resname_nn = reference.residue_names[nn]
|
||||
count_nn = (resname_nn == atoms.residue_names.reshape(-1, 1)).sum(axis=1)
|
||||
return np.histogram(count_nn, bins=bins, normed=normed)[0]
|
||||
return np.histogram(count_nn, bins=bins, density=normed)[0]
|
||||
|
||||
|
||||
def hbonds(
|
||||
D: CoordinateFrame,
|
||||
H: CoordinateFrame,
|
||||
A: CoordinateFrame,
|
||||
atoms: CoordinateFrame,
|
||||
donator_indices: ArrayLike,
|
||||
hydrogen_indices: ArrayLike,
|
||||
acceptor_indices: ArrayLike,
|
||||
DA_lim: float = 0.35,
|
||||
HA_lim: float = 0.35,
|
||||
min_cos: float = np.cos(30 * np.pi / 180),
|
||||
max_angle_deg: float = 30,
|
||||
full_output: bool = False,
|
||||
) -> Union[NDArray, tuple[NDArray, NDArray, NDArray]]:
|
||||
"""
|
||||
Compute h-bond pairs
|
||||
|
||||
Args:
|
||||
D: Set of coordinates for donators.
|
||||
H: Set of coordinates for hydrogen atoms. Should have the same
|
||||
atoms: Set of all coordinates for a frame.
|
||||
donator_indices: Set of indices for donators.
|
||||
hydrogen_indices: Set of indices for hydrogen atoms. Should have the same
|
||||
length as D.
|
||||
A: Set of coordinates for acceptors.
|
||||
DA_lim (opt.): Minimum distance beteen donator and acceptor.
|
||||
HA_lim (opt.): Minimum distance beteen hydrogen and acceptor.
|
||||
min_cos (opt.): Minimum cosine for the HDA angle. Default is
|
||||
equivalent to a maximum angle of 30 degree.
|
||||
acceptor_indices: Set of indices for acceptors.
|
||||
DA_lim (opt.): Minimum distance between donator and acceptor.
|
||||
HA_lim (opt.): Minimum distance between hydrogen and acceptor.
|
||||
max_angle_deg (opt.): Maximum angle in degree for the HDA angle.
|
||||
full_output (opt.): Returns additionally the cosine of the
|
||||
angles and the DA distances
|
||||
|
||||
@ -326,6 +363,10 @@ def hbonds(
|
||||
pairs[:, 1] = pind[pairs[:, 1]]
|
||||
return pairs
|
||||
|
||||
D = atoms[donator_indices]
|
||||
H = atoms[hydrogen_indices]
|
||||
A = atoms[acceptor_indices]
|
||||
min_cos = np.cos(max_angle_deg * np.pi / 180)
|
||||
box = D.box
|
||||
if len(D) <= len(A):
|
||||
pairs = dist_DltA(D, A, DA_lim)
|
||||
|
@ -0,0 +1,3 @@
|
||||
from . import chill
|
||||
from . import free_energy_landscape
|
||||
from . import water
|
||||
|
@ -1,9 +1,17 @@
|
||||
from typing import Tuple, Callable
|
||||
|
||||
import numpy as np
|
||||
from numpy.typing import ArrayLike, NDArray
|
||||
import pandas as pd
|
||||
from scipy import sparse
|
||||
from scipy.spatial import KDTree
|
||||
from scipy.special import sph_harm
|
||||
|
||||
from mdevaluate.coordinates import CoordinateFrame, Coordinates
|
||||
from mdevaluate.pbc import pbc_points
|
||||
|
||||
def a_ij(atoms, N=4, l=3):
|
||||
|
||||
def calc_aij(atoms: ArrayLike, N: int = 4, l: int = 3) -> tuple[NDArray, NDArray]:
|
||||
tree = KDTree(atoms)
|
||||
|
||||
dist, indices = tree.query(atoms, N + 1)
|
||||
@ -30,13 +38,9 @@ def a_ij(atoms, N=4, l=3):
|
||||
return np.real(aij), indices
|
||||
|
||||
|
||||
def number_of_neighbors(atoms):
|
||||
tree = KDTree(atoms)
|
||||
dist, _ = tree.query(atoms, 10, distance_upper_bound=0.35)
|
||||
return np.array([len(distance[distance < 0.4]) - 1 for distance in dist])
|
||||
|
||||
|
||||
def classify_ice(aij, indices, neighbors, indexSOL):
|
||||
def classify_ice(
|
||||
aij: NDArray, indices: NDArray, neighbors: NDArray, indexSOL: NDArray
|
||||
) -> NDArray:
|
||||
staggerdBonds = np.sum(aij <= -0.8, axis=1)
|
||||
eclipsedBonds = np.sum((aij >= -0.35) & (aij <= 0.25), axis=1)
|
||||
|
||||
@ -67,7 +71,7 @@ def classify_ice(aij, indices, neighbors, indexSOL):
|
||||
return iceTypes
|
||||
|
||||
|
||||
def ice_parts(iceTypes):
|
||||
def count_ice_types(iceTypes: NDArray) -> NDArray:
|
||||
cubic = len(iceTypes[iceTypes == 0])
|
||||
hexagonal = len(iceTypes[iceTypes == 1])
|
||||
interface = len(iceTypes[iceTypes == 2])
|
||||
@ -77,3 +81,119 @@ def ice_parts(iceTypes):
|
||||
return np.array(
|
||||
[cubic, hexagonal, interface, clathrate, clathrate_interface, liquid]
|
||||
)
|
||||
|
||||
|
||||
def selector_ice(
|
||||
oxygen_atoms_water: CoordinateFrame,
|
||||
chosen_ice_types: ArrayLike,
|
||||
combined: bool = True,
|
||||
next_neighbor_distance: float = 0.35,
|
||||
) -> NDArray:
|
||||
atoms = oxygen_atoms_water
|
||||
atoms_PBC = pbc_points(atoms, thickness=next_neighbor_distance * 2.2)
|
||||
aij, indices = calc_aij(atoms_PBC)
|
||||
tree = KDTree(atoms_PBC)
|
||||
neighbors = tree.query_ball_point(
|
||||
atoms_PBC, next_neighbor_distance, return_length=True
|
||||
) - 1
|
||||
index_SOL = atoms_PBC.tolist().index(atoms[0].tolist())
|
||||
index_SOL = np.arange(index_SOL, index_SOL + len(atoms))
|
||||
ice_Types = classify_ice(aij, indices, neighbors, index_SOL)
|
||||
index = []
|
||||
if combined is True:
|
||||
for i, ice_Type in enumerate(ice_Types):
|
||||
if ice_Type in chosen_ice_types:
|
||||
index.append(i)
|
||||
else:
|
||||
for entry in chosen_ice_types:
|
||||
index_entry = []
|
||||
for i, ice_Type in enumerate(ice_Types):
|
||||
if ice_Type == entry:
|
||||
index_entry.append(i)
|
||||
index.append(np.array(index_entry))
|
||||
return np.array(index)
|
||||
|
||||
|
||||
def ice_types(trajectory: Coordinates, segments: int = 10000) -> pd.DataFrame:
|
||||
def ice_types_distribution(frame: CoordinateFrame, selector: Callable) -> NDArray:
|
||||
atoms_PBC = pbc_points(frame, thickness=1)
|
||||
aij, indices = calc_aij(atoms_PBC)
|
||||
tree = KDTree(atoms_PBC)
|
||||
neighbors = tree.query_ball_point(atoms_PBC, 0.35, return_length=True) - 1
|
||||
index = selector(frame, atoms_PBC)
|
||||
ice_types_data = classify_ice(aij, indices, neighbors, index)
|
||||
ice_parts_data = count_ice_types(ice_types_data)
|
||||
return ice_parts_data
|
||||
|
||||
def selector(frame: CoordinateFrame, atoms_PBC: ArrayLike) -> NDArray:
|
||||
atoms_SOL = traj.subset(residue_name="SOL")[frame.step]
|
||||
index = atoms_PBC.tolist().index(atoms_SOL[0].tolist())
|
||||
index = np.arange(index, index + len(atoms_SOL))
|
||||
return np.array(index)
|
||||
|
||||
traj = trajectory.subset(atom_name="OW")
|
||||
|
||||
frame_indices = np.unique(np.int_(np.linspace(0, len(traj) - 1, num=segments)))
|
||||
|
||||
result = np.array(
|
||||
[
|
||||
[
|
||||
traj[frame_index].time,
|
||||
*ice_types_distribution(traj[frame_index], selector),
|
||||
]
|
||||
for frame_index in frame_indices
|
||||
]
|
||||
)
|
||||
|
||||
return pd.DataFrame(
|
||||
{
|
||||
"time": result[:, 0],
|
||||
"cubic": result[:, 1],
|
||||
"hexagonal": result[:, 2],
|
||||
"ice_interface": result[:, 3],
|
||||
"clathrate": result[:, 4],
|
||||
"clathrate_interface": result[:, 5],
|
||||
"liquid": result[:, 6],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def ice_clusters_traj(
|
||||
traj: Coordinates, segments: int = 10000, skip: float = 0.1
|
||||
) -> list:
|
||||
def ice_clusters(frame: CoordinateFrame) -> Tuple[float, list]:
|
||||
selection = selector_ice(frame, [0, 1, 2])
|
||||
if len(selection) == 0:
|
||||
return frame.time, []
|
||||
else:
|
||||
ice = frame[selection]
|
||||
ice_PBC, indices_PBC = pbc_points(
|
||||
ice, box=frame.box, thickness=0.5, index=True
|
||||
)
|
||||
ice_tree = KDTree(ice_PBC)
|
||||
ice_matrix = ice_tree.sparse_distance_matrix(
|
||||
ice_tree, 0.35, output_type="ndarray"
|
||||
)
|
||||
new_ice_matrix = np.zeros((len(ice), len(ice)))
|
||||
for entry in ice_matrix:
|
||||
if entry[2] > 0:
|
||||
new_ice_matrix[indices_PBC[entry[0]], indices_PBC[entry[1]]] = 1
|
||||
n_components, labels = sparse.csgraph.connected_components(
|
||||
new_ice_matrix, directed=False
|
||||
)
|
||||
clusters = []
|
||||
selection = np.array(selection)
|
||||
for i in range(0, np.max(labels) + 1):
|
||||
if len(ice[labels == i]) > 1:
|
||||
clusters.append(
|
||||
list(zip(selection[labels == i], ice[labels == i].tolist()))
|
||||
)
|
||||
return frame.time, clusters
|
||||
|
||||
frame_indices = np.unique(
|
||||
np.int_(np.linspace(len(traj) * skip, len(traj) - 1, num=segments))
|
||||
)
|
||||
all_clusters = [
|
||||
ice_clusters(traj[frame_index]) for frame_index in frame_indices
|
||||
]
|
||||
return all_clusters
|
||||
|
@ -1,18 +1,73 @@
|
||||
from functools import partial
|
||||
import os.path
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
import math
|
||||
import scipy
|
||||
from numpy.typing import ArrayLike, NDArray
|
||||
from numpy.polynomial.polynomial import Polynomial as Poly
|
||||
from scipy.spatial import KDTree
|
||||
import cmath
|
||||
import pandas as pd
|
||||
import multiprocessing as mp
|
||||
|
||||
VALID_GEOMETRY = {"cylindrical", "slab"}
|
||||
from ..coordinates import Coordinates
|
||||
|
||||
|
||||
def occupation_matrix(trajectory, edge_length=0.05, segments=1000, skip=0.1, nodes=8):
|
||||
def _pbc_points_reduced(
|
||||
coordinates: ArrayLike,
|
||||
pore_geometry: str,
|
||||
box: Optional[NDArray] = None,
|
||||
thickness: Optional[float] = None,
|
||||
) -> tuple[NDArray, NDArray]:
|
||||
if box is None:
|
||||
box = coordinates.box
|
||||
if pore_geometry == "cylindrical":
|
||||
grid = np.array([[i, j, k] for k in [-1, 0, 1] for j in [0] for i in [0]])
|
||||
indices = np.tile(np.arange(len(coordinates)), 3)
|
||||
elif pore_geometry == "slit":
|
||||
grid = np.array(
|
||||
[[i, j, k] for k in [0] for j in [1, 0, -1] for i in [-1, 0, 1]]
|
||||
)
|
||||
indices = np.tile(np.arange(len(coordinates)), 9)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"pore_geometry is {pore_geometry}, should either be "
|
||||
f"'cylindrical' or 'slit'"
|
||||
)
|
||||
coordinates_pbc = np.concatenate([coordinates + v @ box for v in grid], axis=0)
|
||||
size = np.diag(box)
|
||||
|
||||
if thickness is not None:
|
||||
mask = np.all(coordinates_pbc > -thickness, axis=1)
|
||||
coordinates_pbc = coordinates_pbc[mask]
|
||||
indices = indices[mask]
|
||||
mask = np.all(coordinates_pbc < size + thickness, axis=1)
|
||||
coordinates_pbc = coordinates_pbc[mask]
|
||||
indices = indices[mask]
|
||||
|
||||
return coordinates_pbc, indices
|
||||
|
||||
|
||||
def _build_tree(points, box, r_max, pore_geometry):
|
||||
if np.all(np.diag(np.diag(box)) == box):
|
||||
tree = KDTree(points % box, boxsize=box)
|
||||
points_pbc_index = None
|
||||
else:
|
||||
points_pbc, points_pbc_index = _pbc_points_reduced(
|
||||
points,
|
||||
pore_geometry,
|
||||
box,
|
||||
thickness=r_max + 0.01,
|
||||
)
|
||||
tree = KDTree(points_pbc)
|
||||
return tree, points_pbc_index
|
||||
|
||||
|
||||
def occupation_matrix(
|
||||
trajectory: Coordinates,
|
||||
edge_length: float = 0.05,
|
||||
segments: int = 1000,
|
||||
skip: float = 0.1,
|
||||
nodes: int = 8,
|
||||
) -> pd.DataFrame:
|
||||
frame_indices = np.unique(
|
||||
np.int_(np.linspace(len(trajectory) * skip, len(trajectory) - 1, num=segments))
|
||||
)
|
||||
@ -23,11 +78,7 @@ def occupation_matrix(trajectory, edge_length=0.05, segments=1000, skip=0.1, nod
|
||||
z_bins = np.arange(0, box[2][2] + edge_length, edge_length)
|
||||
bins = [x_bins, y_bins, z_bins]
|
||||
# Trajectory is split for parallel computing
|
||||
size = math.ceil(len(frame_indices) / nodes)
|
||||
indices = [
|
||||
np.arange(len(frame_indices))[i : i + size]
|
||||
for i in range(0, len(frame_indices), size)
|
||||
]
|
||||
indices = np.array_split(frame_indices, nodes)
|
||||
pool = mp.Pool(nodes)
|
||||
results = pool.map(
|
||||
partial(_calc_histogram, trajectory=trajectory, bins=bins), indices
|
||||
@ -44,18 +95,20 @@ def occupation_matrix(trajectory, edge_length=0.05, segments=1000, skip=0.1, nod
|
||||
occupation_df = pd.DataFrame(
|
||||
{"x": coords[0], "y": coords[1], "z": coords[2], "occupation": matbin_new}
|
||||
)
|
||||
occupation_df = occupation_df.query("occupation != 0")
|
||||
occupation_df = occupation_df.query("occupation != 0").reset_index(drop=True)
|
||||
return occupation_df
|
||||
|
||||
|
||||
def _calc_histogram(numberlist, trajectory, bins):
|
||||
def _calc_histogram(
|
||||
indices: ArrayLike, trajectory: Coordinates, bins: ArrayLike
|
||||
) -> NDArray:
|
||||
matbin = None
|
||||
for index in range(0, len(numberlist), 1000):
|
||||
for index in range(0, len(indices), 1000):
|
||||
try:
|
||||
indices = numberlist[index : index + 1000]
|
||||
current_indices = indices[index : index + 1000]
|
||||
except IndexError:
|
||||
indices = numberlist[index:]
|
||||
frames = np.concatenate(np.array([trajectory.pbc[i] for i in indices]))
|
||||
current_indices = indices[index:]
|
||||
frames = np.concatenate(np.array([trajectory.pbc[i] for i in current_indices]))
|
||||
hist, _ = np.histogramdd(frames, bins=bins)
|
||||
if matbin is None:
|
||||
matbin = hist
|
||||
@ -64,424 +117,177 @@ def _calc_histogram(numberlist, trajectory, bins):
|
||||
return matbin
|
||||
|
||||
|
||||
def get_fel(
|
||||
traj,
|
||||
path,
|
||||
geometry,
|
||||
temperature,
|
||||
edge=0.05,
|
||||
radiusmin=0.05,
|
||||
radiusmax=2.05,
|
||||
z=[-np.inf, np.inf],
|
||||
overwrite=False,
|
||||
def find_maxima(
|
||||
occupation_df: pd.DataFrame, box: ArrayLike, radius: float, pore_geometry: str
|
||||
) -> pd.DataFrame:
|
||||
maxima_df = occupation_df.copy()
|
||||
maxima_df["maxima"] = None
|
||||
points = np.array(maxima_df[["x", "y", "z"]])
|
||||
tree, points_pbc_index = _build_tree(points, box, radius, pore_geometry)
|
||||
for i in range(len(maxima_df)):
|
||||
if maxima_df.loc[i, "maxima"] is not None:
|
||||
continue
|
||||
maxima_pos = maxima_df.loc[i, ["x", "y", "z"]]
|
||||
neighbors = np.array(tree.query_ball_point(maxima_pos, radius))
|
||||
if points_pbc_index is not None:
|
||||
neighbors = points_pbc_index[neighbors]
|
||||
neighbors = neighbors[neighbors != i]
|
||||
if len(neighbors) == 0:
|
||||
maxima_df.loc[i, "maxima"] = True
|
||||
elif (
|
||||
maxima_df.loc[neighbors, "occupation"].max()
|
||||
< maxima_df.loc[i, "occupation"]
|
||||
):
|
||||
"""
|
||||
The main method of this script. Will calculate the energy difference based on radius.
|
||||
|
||||
This method will calculate the relative energy of different minima based on radius
|
||||
to the pore center.
|
||||
After that it will save those results in a .npy file in the filepath give by the
|
||||
"path" parameter and will also try to load from there.
|
||||
|
||||
|
||||
Parameters:
|
||||
traj : The trajectory of the system to be evaluated
|
||||
path : The save and load location of the files
|
||||
geometry : Either "cylindrical" or "slab". The geometry of your system. Other types
|
||||
currently not supported
|
||||
temperature: The temperature of your system. Needed for the energy difference
|
||||
edge (opt.) : The length of the cubes in which your system will be divided
|
||||
radiusmin (opt.) : The radius where the calculation begins. Will create a bin of
|
||||
+- 0.05 of that number.
|
||||
radiusmax (opt.) : The radius where the calculation ends. Will create a bin of
|
||||
+- 0.05 of that number.
|
||||
z (opt.) : The evaluated slice of the trajectory for the energy landscape.
|
||||
|
||||
Returns:
|
||||
list: A list of the energy difference based on radius
|
||||
|
||||
"""
|
||||
if geometry not in VALID_GEOMETRY:
|
||||
raise ValueError("results: status must be one of %r." % VALID_GEOMETRY)
|
||||
|
||||
if (os.path.exists(f"{path}/radiiData.npy")) & (not overwrite):
|
||||
data = np.load(f"{path}/radiiData.npy")
|
||||
bins = np.load(f"{path}/radiiBins.npy")
|
||||
# Here the different standard geometries are inserted
|
||||
maxima_df.loc[neighbors, "maxima"] = False
|
||||
maxima_df.loc[i, "maxima"] = True
|
||||
else:
|
||||
if geometry == "cylindrical":
|
||||
bins, data = short_all_radii(
|
||||
traj, path, edge=edge, radiusmin=radiusmin, radiusmax=radiusmax, z=z
|
||||
maxima_df.loc[i, "maxima"] = False
|
||||
return maxima_df
|
||||
|
||||
|
||||
def _calc_energies(
|
||||
maxima_indices: ArrayLike,
|
||||
maxima_df: pd.DataFrame,
|
||||
bins: ArrayLike,
|
||||
box: NDArray,
|
||||
pore_geometry: str,
|
||||
T: float,
|
||||
nodes: int = 8,
|
||||
) -> NDArray:
|
||||
points = np.array(maxima_df[["x", "y", "z"]])
|
||||
tree, points_pbc_index = _build_tree(points, box, bins[-1], pore_geometry)
|
||||
maxima = maxima_df.loc[maxima_indices, ["x", "y", "z"]]
|
||||
maxima_occupations = np.array(maxima_df.loc[maxima_indices, "occupation"])
|
||||
num_of_neighbors = np.max(
|
||||
tree.query_ball_point(maxima, bins[-1], return_length=True)
|
||||
)
|
||||
elif geometry == "slab":
|
||||
bins, data = short_all_radii_slab(
|
||||
traj, path, edge=edge, radiusmin=radiusmin, radiusmax=radiusmax, z=z
|
||||
split_maxima = []
|
||||
for i in range(0, len(maxima), 1000):
|
||||
split_maxima.append(maxima[i : i + 1000])
|
||||
|
||||
distances = []
|
||||
indices = []
|
||||
for maxima in split_maxima:
|
||||
distances_step, indices_step = tree.query(
|
||||
maxima, k=num_of_neighbors, distance_upper_bound=bins[-1], workers=nodes
|
||||
)
|
||||
np.save(f"{path}/radiiData", data)
|
||||
np.save(f"{path}/radiiBins", bins)
|
||||
|
||||
energy_differences = np.array(calculate_energy_difference(data, bins, temperature))
|
||||
r = bins[1:] - (bins[1] - bins[0]) / 2
|
||||
return r, energy_differences
|
||||
distances.append(distances_step)
|
||||
indices.append(indices_step)
|
||||
distances = np.concatenate(distances)
|
||||
indices = np.concatenate(indices)
|
||||
all_energy_hist = []
|
||||
all_occupied_bins_hist = []
|
||||
if distances.ndim == 1:
|
||||
current_distances = distances[1:][distances[1:] <= bins[-1]]
|
||||
if points_pbc_index is None:
|
||||
current_indices = indices[1:][distances[1:] <= bins[-1]]
|
||||
else:
|
||||
current_indices = points_pbc_index[indices[1:][distances[1:] <= bins[-1]]]
|
||||
energy = (
|
||||
-np.log(maxima_df.loc[current_indices, "occupation"] / maxima_occupations)
|
||||
* T
|
||||
)
|
||||
energy_hist = np.histogram(current_distances, bins=bins, weights=energy)[0]
|
||||
occupied_bins_hist = np.histogram(current_distances, bins=bins)[0]
|
||||
result = energy_hist / occupied_bins_hist
|
||||
return result
|
||||
for i, maxima_occupation in enumerate(maxima_occupations):
|
||||
current_distances = distances[i, 1:][distances[i, 1:] <= bins[-1]]
|
||||
if points_pbc_index is None:
|
||||
current_indices = indices[i, 1:][distances[i, 1:] <= bins[-1]]
|
||||
else:
|
||||
current_indices = points_pbc_index[
|
||||
indices[i, 1:][distances[i, 1:] <= bins[-1]]
|
||||
]
|
||||
energy = (
|
||||
-np.log(maxima_df.loc[current_indices, "occupation"] / maxima_occupation)
|
||||
* T
|
||||
)
|
||||
energy_hist = np.histogram(current_distances, bins=bins, weights=energy)[0]
|
||||
occupied_bins_hist = np.histogram(current_distances, bins=bins)[0]
|
||||
all_energy_hist.append(energy_hist)
|
||||
all_occupied_bins_hist.append(occupied_bins_hist)
|
||||
result = np.sum(all_energy_hist, axis=0) / np.sum(all_occupied_bins_hist, axis=0)
|
||||
return result
|
||||
|
||||
|
||||
def fill_bins(traj, path, edge=0.05):
|
||||
# If available Matrix is directly loaded
|
||||
if os.path.exists(f"{path}/Matrix{edge}.npy"):
|
||||
matbin = np.load(f"{path}/Matrix{edge}.npy")
|
||||
return matbin
|
||||
|
||||
pool = mp.Pool(8)
|
||||
size = math.ceil(len(traj) / 8)
|
||||
|
||||
# Trajectory is split for parallel computing
|
||||
indices = list(chunksplit(np.arange(0, len(traj), 1), size))
|
||||
# indices = list(Chunksplit(np.arange(len(traj)-80, len(traj), 1), size))
|
||||
fill = partial(help_fill, traj=traj)
|
||||
results = pool.map(fill, indices)
|
||||
boxmat = traj[0].box
|
||||
|
||||
a = math.ceil(boxmat[0][0] / 0.05)
|
||||
b = math.ceil(boxmat[1][1] / 0.05)
|
||||
c = math.ceil(boxmat[2][2] / 0.05)
|
||||
matbin = np.zeros((a, b, c))
|
||||
|
||||
pool.close()
|
||||
|
||||
for mat in results:
|
||||
matbin = matbin + mat
|
||||
np.save(file=f"{path}/Matrix{edge}", arr=matbin)
|
||||
return matbin
|
||||
def add_distances(
|
||||
occupation_df: pd.DataFrame, pore_geometry: str, origin: ArrayLike
|
||||
) -> pd.DataFrame:
|
||||
distance_df = occupation_df.copy()
|
||||
if pore_geometry == "cylindrical":
|
||||
distance_df["distance"] = (
|
||||
(distance_df["x"] - origin[0]) ** 2 + (distance_df["y"] - origin[1]) ** 2
|
||||
) ** (1 / 2)
|
||||
elif pore_geometry == "slit":
|
||||
distance_df["distance"] = np.abs(distance_df["z"] - origin[2])
|
||||
else:
|
||||
raise ValueError(
|
||||
f"pore_geometry is {pore_geometry}, should either be "
|
||||
f"'cylindrical' or 'slit'"
|
||||
)
|
||||
return distance_df
|
||||
|
||||
|
||||
def help_fill(numberlist, traj, edge=0.05):
|
||||
boxmat = traj[0].box
|
||||
|
||||
a = math.ceil(boxmat[0][0] / edge)
|
||||
b = math.ceil(boxmat[1][1] / edge)
|
||||
c = math.ceil(boxmat[2][2] / edge)
|
||||
matbin = np.zeros((a, b, c))
|
||||
temp = np.array([[]]).reshape(0, 3)
|
||||
|
||||
# Trajectory is split in chunks of 1000 frames to increase efficency while
|
||||
# keeping ram usage low
|
||||
h = 1000
|
||||
while h < len(numberlist):
|
||||
temp = np.array([[]]).reshape(0, 3)
|
||||
x = numberlist[h - 1000]
|
||||
y = numberlist[h]
|
||||
for j in traj.pbc[x:y]:
|
||||
l = np.floor(j / edge).astype("int32")
|
||||
temp = np.concatenate((temp, np.array(l)))
|
||||
# Positions are counted for whole chunk
|
||||
unique, counts = np.unique(temp, return_counts=True, axis=0)
|
||||
m = 0
|
||||
# Count is combined into matrix with position in Matrix corresponding to
|
||||
# position in system
|
||||
for z in unique.astype("int"):
|
||||
a = z[0]
|
||||
b = z[1]
|
||||
c = z[2]
|
||||
matbin[a][b][c] += counts[m]
|
||||
m += 1
|
||||
h += 1000
|
||||
# The last few frames of the system are seperately calculated
|
||||
x = numberlist[h - 1000]
|
||||
y = numberlist[-1]
|
||||
temp = np.array([[]]).reshape(0, 3)
|
||||
for j in traj.pbc[x : y + 1]:
|
||||
l = np.floor(j / edge).astype("int32")
|
||||
temp = np.concatenate((temp, np.array(l)))
|
||||
|
||||
unique, counts = np.unique(temp, return_counts=True, axis=0)
|
||||
m = 0
|
||||
for z in unique.astype("int"):
|
||||
a = z[0]
|
||||
b = z[1]
|
||||
c = z[2]
|
||||
matbin[a][b][c] += counts[m]
|
||||
m += 1
|
||||
return matbin
|
||||
|
||||
|
||||
def calculate_maxima(matbin):
|
||||
maxima = []
|
||||
|
||||
z = [-1, 0, 1]
|
||||
|
||||
max_i = matbin.shape[0]
|
||||
max_j = matbin.shape[1]
|
||||
max_k = matbin.shape[2]
|
||||
|
||||
# For each element in the matrix all surrounding 26 elements are compared to
|
||||
# determine the minimum.
|
||||
# Algorithm can definitely be improved but is so fast that it's not necessary.
|
||||
|
||||
for i in range(max_i):
|
||||
for j in range(max_j):
|
||||
for k in range(max_k):
|
||||
a = matbin[i][j][k]
|
||||
b = True
|
||||
for l in z:
|
||||
for m in z:
|
||||
for n in z:
|
||||
if (l != 0) or (m != 0) or (n != 0):
|
||||
b = b and (
|
||||
a
|
||||
> matbin[(i + l) % max_i][(j + m) % max_j][
|
||||
(k + n) % max_k
|
||||
def distance_resolved_energies(
|
||||
maxima_df: pd.DataFrame,
|
||||
distance_bins: ArrayLike,
|
||||
r_bins: ArrayLike,
|
||||
box: NDArray,
|
||||
pore_geometry: str,
|
||||
temperature: float,
|
||||
nodes: int = 8,
|
||||
) -> pd.DataFrame:
|
||||
results = []
|
||||
distances = []
|
||||
for i in range(len(distance_bins) - 1):
|
||||
maxima_indices = np.array(
|
||||
maxima_df.index[
|
||||
(maxima_df["distance"] >= distance_bins[i])
|
||||
* (maxima_df["distance"] < distance_bins[i + 1])
|
||||
* (maxima_df["maxima"] == True)
|
||||
]
|
||||
)
|
||||
|
||||
if b:
|
||||
maxima.append([i, j, k])
|
||||
return maxima
|
||||
|
||||
|
||||
def list_of_coordinates(matbin):
|
||||
# Matrix elements are translated back into postion vectors
|
||||
max_i = matbin.shape[0]
|
||||
max_j = matbin.shape[1]
|
||||
max_k = matbin.shape[2]
|
||||
|
||||
coord = np.zeros((max_i, max_j, max_k, 3))
|
||||
|
||||
for i in range(max_i):
|
||||
for j in range(max_j):
|
||||
for k in range(max_k):
|
||||
coord[i][j][k] = [i, j, k]
|
||||
|
||||
return coord
|
||||
|
||||
|
||||
def sphere_quotient(
|
||||
matbin,
|
||||
maxima,
|
||||
coordlist,
|
||||
tree,
|
||||
radius,
|
||||
try:
|
||||
results.append(
|
||||
_calc_energies(
|
||||
maxima_indices,
|
||||
maxima_df,
|
||||
r_bins,
|
||||
box,
|
||||
edge,
|
||||
unitdist,
|
||||
z=np.array([-np.inf, np.inf]),
|
||||
):
|
||||
# Here pore center is assumed to be system center
|
||||
diff = np.diag(box)[0:2] / 2
|
||||
|
||||
# Distance to the z-axis
|
||||
distance = np.linalg.norm(np.array(maxima)[:, 0:2] * edge - diff, axis=1)
|
||||
|
||||
# selection with given parameters
|
||||
mask = (
|
||||
(distance > radius - 0.05)
|
||||
& (distance < radius + 0.05)
|
||||
& (np.array(maxima)[:, 2] * edge > z[0])
|
||||
& (np.array(maxima)[:, 2] * edge < z[1])
|
||||
pore_geometry,
|
||||
temperature,
|
||||
nodes,
|
||||
)
|
||||
|
||||
maxima_masked = np.array(maxima)[mask]
|
||||
|
||||
# Distances between maxima and other cubes in the system
|
||||
coordlist = coordlist.reshape(-1, 3)
|
||||
numOfNeigbour = tree.query_ball_point(maxima[0], unitdist, return_length=True)
|
||||
d, neighbourlist = tree.query(maxima_masked, k=numOfNeigbour, workers=-1)
|
||||
|
||||
i = 0
|
||||
|
||||
average = 0
|
||||
|
||||
y = []
|
||||
num = []
|
||||
for neighbours in neighbourlist:
|
||||
current = maxima_masked[i]
|
||||
|
||||
# energy between minimum and all sourrounding cubes is calculated
|
||||
energy = -np.log(
|
||||
matbin[current[0], current[1], current[2]]
|
||||
/ matbin.flatten()[neighbours[1:]]
|
||||
)
|
||||
distances.append((distance_bins[i] + distance_bins[i + 1]) / 2)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
v, b = np.histogram(
|
||||
d[i, 1:].flatten()[(energy < np.Inf) & (energy > -np.Inf)],
|
||||
bins=np.arange(1, 40.5, 0.5),
|
||||
weights=energy[(energy < np.Inf) & (energy > -np.Inf)],
|
||||
)
|
||||
|
||||
# For averaging purposes number weighted is also the calculated
|
||||
k, l = np.histogram(
|
||||
d[i, 1:].flatten()[(energy < np.Inf) & (energy > -np.Inf)],
|
||||
bins=np.arange(1, 40.5, 0.5),
|
||||
)
|
||||
y.append(v)
|
||||
num.append(k)
|
||||
i += 1
|
||||
|
||||
# energy is averaged over minima
|
||||
quotient = np.sum(y, axis=0) / np.sum(num, axis=0)
|
||||
if len(neighbourlist) == 0:
|
||||
return np.arange(1, 40.5, 0.5) * edge, np.arange(1, 40, 0.5) * 0
|
||||
return b * edge, quotient
|
||||
radii = (r_bins[:-1] + r_bins[1:]) / 2
|
||||
d = np.array([d for d in distances for r in radii])
|
||||
r = np.array([r for d in distances for r in radii])
|
||||
result = np.array(results).flatten()
|
||||
return pd.DataFrame({"d": d, "r": r, "energy": result})
|
||||
|
||||
|
||||
def sphere_quotient_slab(
|
||||
matbin, maxima, coordlist, tree, radius, box, edge, unitdist, z=[-np.inf, np.inf]
|
||||
):
|
||||
# Same as SphereQuotient bur for Slabs
|
||||
diff = box[2, 2] / 2
|
||||
distance = abs(np.array(maxima)[:, 2] * edge - diff)
|
||||
mask = (
|
||||
(distance > radius - 0.05)
|
||||
& (distance < radius + 0.05)
|
||||
& (np.array(maxima)[:, 2] > z[0])
|
||||
& (np.array(maxima)[:, 2] < z[1])
|
||||
)
|
||||
|
||||
maxima_masked = np.array(maxima)[mask]
|
||||
|
||||
coordlist = coordlist.reshape(-1, 3)
|
||||
numOfNeigbour = tree.query_ball_point(maxima[0], unitdist, return_length=True)
|
||||
d, neighbourlist = tree.query(maxima_masked, k=numOfNeigbour, workers=-1)
|
||||
|
||||
i = 0
|
||||
|
||||
average = 0
|
||||
|
||||
y = []
|
||||
num = []
|
||||
for neighbours in neighbourlist:
|
||||
current = maxima_masked[i]
|
||||
|
||||
energy = -np.log(
|
||||
matbin[current[0], current[1], current[2]]
|
||||
/ matbin.flatten()[neighbours[1:]]
|
||||
)
|
||||
|
||||
# Energy is ordered according to distance to the minimum
|
||||
v, z = np.histogram(
|
||||
d[i, 1:].flatten()[(energy < np.Inf) & (energy > -np.Inf)],
|
||||
bins=unitdist * 2 - 2,
|
||||
weights=energy[(energy < np.Inf) & (energy > -np.Inf)],
|
||||
)
|
||||
|
||||
k, l = np.histogram(
|
||||
d[i, 1:].flatten()[(energy < np.Inf) & (energy > -np.Inf)],
|
||||
bins=unitdist * 2 - 2,
|
||||
)
|
||||
|
||||
y.append(v)
|
||||
|
||||
num.append(k)
|
||||
i += 1
|
||||
|
||||
quotient = np.sum(y, axis=0) / np.sum(num, axis=0)
|
||||
if len(neighbourlist) == 0:
|
||||
return np.arange(1, 40.5, 0.5), np.arange(1, 40, 0.5) * 0
|
||||
return z * edge, quotient
|
||||
|
||||
|
||||
def short_all_radii(
|
||||
traj, path, edge=0.05, radiusmin=0.05, radiusmax=2.05, z=[-np.inf, np.inf]
|
||||
):
|
||||
# Shorthand function cylindrical systems
|
||||
matbin = fill_bins(traj, path, edge)
|
||||
maxima = calculate_maxima(matbin)
|
||||
coordinates = list_of_coordinates(matbin)
|
||||
tree = KDTree(np.reshape(coordinates, (-1, 3)), boxsize=matbin.shape)
|
||||
bins = []
|
||||
data = []
|
||||
for radius in np.arange(radiusmin, radiusmax, 0.1):
|
||||
b, d = sphere_quotient(
|
||||
matbin,
|
||||
maxima,
|
||||
coordinates,
|
||||
tree,
|
||||
radius=radius,
|
||||
box=traj[0].box,
|
||||
edge=edge,
|
||||
unitdist=40,
|
||||
z=z,
|
||||
)
|
||||
bins.append(b)
|
||||
data.append(d)
|
||||
return bins, data
|
||||
|
||||
|
||||
def short_all_radii_slab(
|
||||
traj, path, edge=0.05, radiusmin=0.05, radiusmax=2.05, z=[-np.inf, np.inf]
|
||||
):
|
||||
# Shorthand function for Slab systems
|
||||
matbin = fill_bins(traj, path, edge)
|
||||
maxima = calculate_maxima(matbin)
|
||||
coordinates = list_of_coordinates(matbin)
|
||||
tree = KDTree(np.reshape(coordinates, (-1, 3)), boxsize=matbin.shape)
|
||||
bins = []
|
||||
data = []
|
||||
for radius in np.arange(radiusmin, radiusmax, 0.1):
|
||||
c, d = sphere_quotient_slab(
|
||||
matbin,
|
||||
maxima,
|
||||
coordinates,
|
||||
tree,
|
||||
radius=radius,
|
||||
box=traj[0].box,
|
||||
edge=edge,
|
||||
unitdist=40,
|
||||
z=z,
|
||||
)
|
||||
bins.append(c)
|
||||
data.append(d)
|
||||
return bins, data
|
||||
|
||||
|
||||
def calculate_energy_difference(data, bins, temperature):
|
||||
"""
|
||||
Calculates the energy difference between local energy minimum and the
|
||||
minimum in the center
|
||||
"""
|
||||
difference = []
|
||||
i = 0
|
||||
while i < len(data):
|
||||
#
|
||||
q = (
|
||||
get_minimum(data[0], bins[0])[1] - get_minimum(data[i], bins[0])[1]
|
||||
) * temperature
|
||||
difference.append(q)
|
||||
i += 1
|
||||
return difference
|
||||
|
||||
|
||||
def chunksplit(list_a, chunk_size):
|
||||
for i in range(0, len(list_a), chunk_size):
|
||||
yield list_a[i : i + chunk_size]
|
||||
|
||||
|
||||
def get_minimum(data, bins):
|
||||
# Fits a polynom of order 3 to determine the energy minimum and analytically
|
||||
# calculates the minimum location
|
||||
y = data[:10]
|
||||
x = bins[1:11]
|
||||
|
||||
popt, _ = scipy.optimize.curve_fit(pol3, x, y, maxfev=80000)
|
||||
|
||||
a, b = solve_quadratic(3 * popt[0], 2 * popt[1], popt[2])
|
||||
|
||||
if 6 * popt[0] * a + 2 * popt[1] > 0:
|
||||
if (np.real(a) < 0) or (np.real(a) > 0.3):
|
||||
return np.real(a), np.average(y)
|
||||
return np.real(a), np.real(pol3(a, *popt))
|
||||
def find_energy_maxima(
|
||||
energy_df: pd.DataFrame,
|
||||
r_min: float,
|
||||
r_max: float,
|
||||
r_eval: float = None,
|
||||
degree: int = 2,
|
||||
) -> pd.DataFrame:
|
||||
distances = []
|
||||
energies = []
|
||||
for d, data_d in energy_df.groupby("d"):
|
||||
distances.append(d)
|
||||
x = np.array(data_d["r"])
|
||||
y = np.array(data_d["energy"])
|
||||
mask = (x >= r_min) * (x <= r_max)
|
||||
p3 = Poly.fit(x[mask], y[mask], deg=degree)
|
||||
if r_eval is None:
|
||||
energies.append(np.max(p3(np.linspace(r_min, r_max, 1000))))
|
||||
else:
|
||||
if (np.real(b) < 0) or (np.real(b) > 0.3):
|
||||
return np.real(b), np.average(y)
|
||||
return np.real(b), np.real(pol3(b, *popt))
|
||||
|
||||
|
||||
def solve_quadratic(a, b, c):
|
||||
d = (b**2) - (4 * a * c)
|
||||
|
||||
sol1 = (-b - cmath.sqrt(d)) / (2 * a)
|
||||
sol2 = (-b + cmath.sqrt(d)) / (2 * a)
|
||||
return sol1, sol2
|
||||
|
||||
|
||||
def pol3(x, a, b, c, d):
|
||||
return a * x**3 + b * x**2 + c * x + d
|
||||
energies.append(p3(r_eval))
|
||||
return pd.DataFrame({"d": distances, "energy": energies})
|
||||
|
419
src/mdevaluate/extra/water.py
Normal file
419
src/mdevaluate/extra/water.py
Normal file
@ -0,0 +1,419 @@
|
||||
from functools import partial
|
||||
from typing import Tuple, Callable, Optional
|
||||
|
||||
import numpy as np
|
||||
from numpy.typing import NDArray, ArrayLike
|
||||
import pandas as pd
|
||||
from scipy.spatial import KDTree
|
||||
|
||||
from ..distribution import hbonds
|
||||
from ..pbc import pbc_points
|
||||
from ..correlation import shifted_correlation, overlap
|
||||
from ..coordinates import Coordinates, CoordinateFrame
|
||||
|
||||
|
||||
def tanaka_zeta(
|
||||
trajectory: Coordinates, angle: float = 30, segments: int = 100, skip: float = 0.1
|
||||
) -> pd.DataFrame:
|
||||
frame_indices = np.unique(
|
||||
np.int_(np.linspace(len(trajectory) * skip, len(trajectory) - 1, num=segments))
|
||||
)
|
||||
sel = trajectory.atom_subset.selection
|
||||
A = np.where(
|
||||
trajectory.subset(atom_name="OW", residue_name="SOL").atom_subset.selection[sel]
|
||||
)[0]
|
||||
D = np.vstack([A] * 2).T.reshape((-1,))
|
||||
H = np.where(
|
||||
trajectory.subset(atom_name="HW.", residue_name="SOL").atom_subset.selection[
|
||||
sel
|
||||
]
|
||||
)[0]
|
||||
|
||||
zeta_dist = []
|
||||
zeta_cg_dist = []
|
||||
for frame_index in frame_indices:
|
||||
D_frame = trajectory[frame_index][D]
|
||||
H_frame = trajectory[frame_index][H]
|
||||
A_frame = trajectory[frame_index][A]
|
||||
box = trajectory[frame_index].box
|
||||
pairs = hbonds(
|
||||
D_frame, H_frame, A_frame, box, min_cos=np.cos(angle / 180 * np.pi)
|
||||
)
|
||||
pairs[:, 0] = np.int_((pairs[:, 0] / 2))
|
||||
pairs = np.sort(pairs, axis=1)
|
||||
pairs = np.unique(pairs, axis=0)
|
||||
pairs = pairs.tolist()
|
||||
|
||||
A_PBC, A_index = pbc_points(A_frame, box, thickness=0.7, index=True)
|
||||
A_tree = KDTree(A_PBC)
|
||||
dist, dist_index = A_tree.query(A_frame, 16, distance_upper_bound=0.7)
|
||||
|
||||
dist_index = A_index[dist_index]
|
||||
zeta = []
|
||||
for i, indices in enumerate(dist_index):
|
||||
dist_hbond = []
|
||||
dist_non_hbond = []
|
||||
for j, index in enumerate(indices):
|
||||
if j != 0:
|
||||
if np.sort([indices[0], index]).tolist() in pairs:
|
||||
dist_hbond.append(dist[i, j])
|
||||
else:
|
||||
dist_non_hbond.append(dist[i, j])
|
||||
try:
|
||||
zeta.append(np.min(dist_non_hbond) - np.max(dist_hbond))
|
||||
except ValueError:
|
||||
zeta.append(0)
|
||||
|
||||
zeta = np.array(zeta)
|
||||
|
||||
dist, dist_index = A_tree.query(A_frame, 16, distance_upper_bound=0.7)
|
||||
dist_index = A_index[dist_index]
|
||||
dist_index = np.array(
|
||||
[indices[dist[i] <= 0.35] for i, indices in enumerate(dist_index)]
|
||||
)
|
||||
zeta_cg = np.array([np.mean(zeta[indices]) for indices in dist_index])
|
||||
|
||||
bins = np.linspace(-0.1, 0.2, 301)
|
||||
zeta_dist.append(np.histogram(zeta, bins=bins)[0])
|
||||
zeta_cg_dist.append(np.histogram(zeta_cg, bins=bins)[0])
|
||||
z = bins[1:] - (bins[1] - bins[0]) / 2
|
||||
|
||||
zeta_dist = np.mean(zeta_dist, axis=0)
|
||||
zeta_dist = zeta_dist / np.mean(zeta_dist)
|
||||
|
||||
zeta_cg_dist = np.mean(zeta_cg_dist, axis=0)
|
||||
zeta_cg_dist = zeta_cg_dist / np.mean(zeta_cg_dist)
|
||||
|
||||
return pd.DataFrame({"zeta": z, "result": zeta_dist, "result_cg": zeta_cg_dist})
|
||||
|
||||
|
||||
def chi_four_trans(
|
||||
trajectory: Coordinates, skip: float = 0.1, segments: int = 10000
|
||||
) -> pd.DataFrame:
|
||||
traj = trajectory.nojump
|
||||
N = len(trajectory[0])
|
||||
t, S = shifted_correlation(
|
||||
partial(overlap, radius=0.1), traj, skip=skip, segments=segments, average=False
|
||||
)
|
||||
chi = 1 / N * S.var(axis=0)[1:]
|
||||
return pd.DataFrame({"time": t[1:], "chi": chi})
|
||||
|
||||
|
||||
def tanaka_correlation_map(
|
||||
trajectory: Coordinates,
|
||||
data_chi_four_trans: pd.DataFrame,
|
||||
angle: float = 30,
|
||||
segments: int = 100,
|
||||
skip: float = 0.1,
|
||||
) -> pd.DataFrame:
|
||||
def tanaka_zeta_cg(
|
||||
trajectory: Coordinates,
|
||||
angle: float = 30,
|
||||
segments: int = 1000,
|
||||
skip: float = 0.1,
|
||||
) -> Tuple[NDArray, NDArray]:
|
||||
frame_indices = np.unique(
|
||||
np.int_(
|
||||
np.linspace(len(trajectory) * skip, len(trajectory) - 1, num=segments)
|
||||
)
|
||||
)
|
||||
sel = trajectory.atom_subset.selection
|
||||
A = np.where(
|
||||
trajectory.subset(atom_name="OW", residue_name="SOL").atom_subset.selection[
|
||||
sel
|
||||
]
|
||||
)[0]
|
||||
D = np.vstack([A] * 2).T.reshape((-1,))
|
||||
H = np.where(
|
||||
trajectory.subset(
|
||||
atom_name="HW.", residue_name="SOL"
|
||||
).atom_subset.selection[sel]
|
||||
)[0]
|
||||
|
||||
zeta_cg = []
|
||||
times = []
|
||||
for frame_index in frame_indices:
|
||||
D_frame = trajectory[frame_index][D]
|
||||
H_frame = trajectory[frame_index][H]
|
||||
A_frame = trajectory[frame_index][A]
|
||||
box = trajectory[frame_index].box
|
||||
pairs = hbonds(
|
||||
D_frame, H_frame, A_frame, box, min_cos=np.cos(angle / 180 * np.pi)
|
||||
)
|
||||
pairs[:, 0] = np.int_((pairs[:, 0] / 2))
|
||||
pairs = np.sort(pairs, axis=1)
|
||||
pairs = np.unique(pairs, axis=0)
|
||||
pairs = pairs.tolist()
|
||||
|
||||
A_PBC, A_index = pbc_points(A_frame, box, thickness=0.7, index=True)
|
||||
A_tree = KDTree(A_PBC)
|
||||
dist, dist_index = A_tree.query(A_frame, 16, distance_upper_bound=0.7)
|
||||
|
||||
dist_index = A_index[dist_index]
|
||||
zeta = []
|
||||
for i, indices in enumerate(dist_index):
|
||||
dist_hbond = []
|
||||
dist_non_hbond = []
|
||||
for j, index in enumerate(indices):
|
||||
if j != 0:
|
||||
if np.sort([indices[0], index]).tolist() in pairs:
|
||||
dist_hbond.append(dist[i, j])
|
||||
else:
|
||||
dist_non_hbond.append(dist[i, j])
|
||||
try:
|
||||
zeta.append(np.min(dist_non_hbond) - np.max(dist_hbond))
|
||||
except ValueError:
|
||||
zeta.append(0)
|
||||
zeta = np.array(zeta)
|
||||
dist_index = np.array(
|
||||
[indices[dist[i] <= 0.35] for i, indices in enumerate(dist_index)]
|
||||
)
|
||||
zeta_cg.append(np.array([np.mean(zeta[indices]) for indices in dist_index]))
|
||||
times.append(trajectory[frame_index].time)
|
||||
return np.array(times), np.array(zeta_cg)
|
||||
|
||||
def delta_r_max(
|
||||
trajectory: Coordinates, frame: CoordinateFrame, tau_4: float
|
||||
) -> NDArray:
|
||||
dt = trajectory[1].time - trajectory[0].time
|
||||
index_start = frame.step
|
||||
index_end = index_start + int(tau_4 / dt) + 1
|
||||
frame_indices = np.arange(index_start, index_end + 1)
|
||||
end_cords = np.array([trajectory[frame_index] for frame_index in frame_indices])
|
||||
vectors = trajectory[index_start] - end_cords
|
||||
|
||||
delta_r = np.linalg.norm(vectors, axis=-1)
|
||||
delta_r = np.max(delta_r, axis=0)
|
||||
return delta_r
|
||||
|
||||
d = np.array(data_chi_four_trans[["time", "chi"]])
|
||||
mask = d[:, 1] >= 0.7 * np.max(d[:, 1])
|
||||
fit = np.polyfit(d[mask, 0], d[mask, 1], 4)
|
||||
p = np.poly1d(fit)
|
||||
x_inter = np.linspace(d[mask, 0][0], d[mask, 0][-1], 1e6)
|
||||
y_inter = p(x_inter)
|
||||
tau_4 = x_inter[y_inter == np.max(y_inter)]
|
||||
|
||||
oxygens = trajectory.nojump.subset(atom_name="OW")
|
||||
window = tau_4 / trajectory[-1].time
|
||||
start_frames = np.unique(
|
||||
np.linspace(
|
||||
len(trajectory) * skip,
|
||||
len(trajectory) * (1 - window),
|
||||
num=segments,
|
||||
endpoint=False,
|
||||
dtype=int,
|
||||
)
|
||||
)
|
||||
|
||||
times, zeta_cg = tanaka_zeta_cg(trajectory, angle=angle)
|
||||
|
||||
zeta_cg_mean = np.array(
|
||||
[
|
||||
np.mean(
|
||||
zeta_cg[
|
||||
(times >= trajectory[start_frame].time)
|
||||
* (times <= (trajectory[start_frame].time + tau_4))
|
||||
],
|
||||
axis=0,
|
||||
)
|
||||
for start_frame in start_frames
|
||||
]
|
||||
).flatten()
|
||||
delta_r = np.array(
|
||||
[
|
||||
delta_r_max(oxygens, oxygens[start_frame], tau_4)
|
||||
for start_frame in start_frames
|
||||
]
|
||||
).flatten()
|
||||
return pd.DataFrame({"zeta_cg": zeta_cg_mean, "delta_r": delta_r})
|
||||
|
||||
|
||||
def LSI_atom(distances: ArrayLike) -> NDArray:
|
||||
r_j = distances[distances <= 0.37]
|
||||
r_j = r_j.tolist()
|
||||
r_j.append(distances[len(r_j)])
|
||||
delta_ji = [r_j[i + 1] - r_j[i] for i in range(0, len(r_j) - 1)]
|
||||
mean_delta_i = np.mean(delta_ji)
|
||||
I = 1 / len(delta_ji) * np.sum((mean_delta_i - delta_ji) ** 2)
|
||||
return I
|
||||
|
||||
|
||||
def LSI(
|
||||
trajectory: Coordinates, segments: int = 10000, skip: float = 0
|
||||
) -> pd.DataFrame:
|
||||
def LSI_distribution(
|
||||
frame: CoordinateFrame, bins: NDArray, selector: Optional[Callable] = None
|
||||
) -> NDArray:
|
||||
atoms_PBC = pbc_points(frame, frame.box, thickness=0.7)
|
||||
atoms_tree = KDTree(atoms_PBC)
|
||||
if selector:
|
||||
index = selector(frame)
|
||||
else:
|
||||
index = np.arange(len(frame))
|
||||
dist, _ = atoms_tree.query(frame[index], 50, distance_upper_bound=0.6)
|
||||
distances = dist[:, 1:]
|
||||
LSI_values = np.array([LSI_atom(distance) for distance in distances])
|
||||
dist = np.histogram(LSI_values, bins=bins, density=True)[0]
|
||||
return dist
|
||||
|
||||
bins = np.linspace(0, 0.007, 201)
|
||||
I = bins[1:] - (bins[1] - bins[0]) / 2
|
||||
|
||||
frame_indices = np.unique(
|
||||
np.int_(np.linspace(len(trajectory) * skip, len(trajectory) - 1, num=segments))
|
||||
)
|
||||
distributions = np.array(
|
||||
[
|
||||
LSI_distribution(trajectory[frame_index], bins, selector=None)
|
||||
for frame_index in frame_indices
|
||||
]
|
||||
)
|
||||
P = np.mean(distributions, axis=0)
|
||||
return pd.DataFrame({"I": I, "P": P})
|
||||
|
||||
|
||||
def HDL_LDL_positions(
|
||||
frame: CoordinateFrame, selector: Optional[Callable] = None
|
||||
) -> Tuple[NDArray, NDArray]:
|
||||
atoms_PBC = pbc_points(frame, frame.box, thickness=0.7)
|
||||
atoms_tree = KDTree(atoms_PBC)
|
||||
if selector:
|
||||
index = selector(frame)
|
||||
else:
|
||||
index = range(len(frame))
|
||||
dist = atoms_tree.query(frame[index], 50, distance_upper_bound=0.6)[0]
|
||||
distances = dist[:, 1:]
|
||||
LSI_values = np.array([LSI_atom(distance) for distance in distances])
|
||||
LDL = LSI_values >= 0.0013
|
||||
HDL = LSI_values < 0.0013
|
||||
pos_HDL = frame[index][HDL]
|
||||
pos_LDL = frame[index][LDL]
|
||||
return pos_HDL, pos_LDL
|
||||
|
||||
|
||||
def HDL_LDL_gr(
|
||||
trajectory: Coordinates, segments: int = 10000, skip: float = 0.1
|
||||
) -> pd.DataFrame:
|
||||
def gr_frame(
|
||||
frame: CoordinateFrame, trajectory: Coordinates, bins: ArrayLike
|
||||
) -> NDArray:
|
||||
atoms_ALL = frame
|
||||
atoms_HDL, atoms_LDL = HDL_LDL_positions(frame, trajectory)
|
||||
|
||||
atoms_PBC_ALL = pbc_points(atoms_ALL, frame.box)
|
||||
atoms_PBC_LDL = pbc_points(atoms_LDL, frame.box)
|
||||
atoms_PBC_HDL = pbc_points(atoms_HDL, frame.box)
|
||||
|
||||
tree_ALL = KDTree(atoms_PBC_ALL)
|
||||
tree_LDL = KDTree(atoms_PBC_LDL)
|
||||
tree_HDL = KDTree(atoms_PBC_HDL)
|
||||
|
||||
dist_ALL_ALL, _ = tree_ALL.query(
|
||||
atoms_ALL, len(frame) // 2, distance_upper_bound=bins[-1] + 0.1
|
||||
)
|
||||
dist_HDL_HDL, _ = tree_HDL.query(
|
||||
atoms_HDL, len(frame) // 2, distance_upper_bound=bins[-1] + 0.1
|
||||
)
|
||||
dist_LDL_LDL, _ = tree_LDL.query(
|
||||
atoms_LDL, len(frame) // 2, distance_upper_bound=bins[-1] + 0.1
|
||||
)
|
||||
dist_HDL_LDL, _ = tree_LDL.query(
|
||||
atoms_HDL, len(frame) // 2, distance_upper_bound=bins[-1] + 0.1
|
||||
)
|
||||
|
||||
dist_ALL_ALL = dist_ALL_ALL[:, 1:].flatten()
|
||||
dist_HDL_HDL = dist_HDL_HDL[:, 1:].flatten()
|
||||
dist_LDL_LDL = dist_LDL_LDL[:, 1:].flatten()
|
||||
dist_HDL_LDL = dist_HDL_LDL.flatten()
|
||||
|
||||
hist_ALL_ALL = np.histogram(
|
||||
dist_ALL_ALL, bins=bins, range=(0, bins[-1]), density=False
|
||||
)[0]
|
||||
hist_HDL_HDL = np.histogram(
|
||||
dist_HDL_HDL, bins=bins, range=(0, bins[-1]), density=False
|
||||
)[0]
|
||||
hist_LDL_LDL = np.histogram(
|
||||
dist_LDL_LDL, bins=bins, range=(0, bins[-1]), density=False
|
||||
)[0]
|
||||
hist_HDL_LDL = np.histogram(
|
||||
dist_HDL_LDL, bins=bins, range=(0, bins[-1]), density=False
|
||||
)[0]
|
||||
|
||||
return np.array(
|
||||
[
|
||||
hist_ALL_ALL / len(atoms_ALL),
|
||||
hist_HDL_HDL / len(atoms_HDL),
|
||||
hist_LDL_LDL / len(atoms_LDL),
|
||||
hist_HDL_LDL / len(atoms_HDL),
|
||||
]
|
||||
)
|
||||
|
||||
start_frame = trajectory[int(len(trajectory) * skip)]
|
||||
upper_bound = round(np.min(np.diag(start_frame.box)) / 2 - 0.05, 1)
|
||||
bins = np.linspace(0, upper_bound, upper_bound * 500 + 1)
|
||||
frame_indices = np.unique(
|
||||
np.int_(np.linspace(len(trajectory) * skip, len(trajectory) - 1, num=segments))
|
||||
)
|
||||
|
||||
gr = []
|
||||
for frame_index in frame_indices:
|
||||
hists = gr_frame(trajectory[frame_index], trajectory, bins)
|
||||
gr.append(hists)
|
||||
|
||||
gr = np.mean(gr, axis=0)
|
||||
gr = gr / (4 / 3 * np.pi * bins[1:] ** 3 - 4 / 3 * np.pi * bins[:-1] ** 3)
|
||||
r = bins[1:] - (bins[1] - bins[0]) / 2
|
||||
|
||||
return pd.DataFrame(
|
||||
{"r": r, "gr_ALL": [0], "gr_HDL": gr[1], "gr_LDL": gr[2], "gr_MIX": gr[3]}
|
||||
)
|
||||
|
||||
|
||||
def HDL_LDL_concentration(
|
||||
trajectory: Coordinates, segments: int = 10000, skip: float = 0.1
|
||||
) -> pd.DataFrame:
|
||||
def HDL_LDL_concentration_frame(
|
||||
frame: CoordinateFrame, bins: ArrayLike
|
||||
) -> Tuple[NDArray, NDArray]:
|
||||
atoms_HDL, atoms_LDL = HDL_LDL_positions(frame, trajectory)
|
||||
atoms_PBC_HDL = pbc_points(atoms_HDL, frame.box, thickness=0.61)
|
||||
atoms_PBC_LDL = pbc_points(atoms_LDL, frame.box, thickness=0.61)
|
||||
tree_LDL = KDTree(atoms_PBC_LDL)
|
||||
tree_HDL = KDTree(atoms_PBC_HDL)
|
||||
dist_HDL_HDL, _ = tree_HDL.query(atoms_HDL, 31, distance_upper_bound=0.6)
|
||||
dist_HDL_LDL, _ = tree_LDL.query(atoms_HDL, 30, distance_upper_bound=0.6)
|
||||
HDL_near_HDL = np.sum(
|
||||
dist_HDL_HDL <= 0.5, axis=-1
|
||||
) # Ausgangsteilchen dazu zählen
|
||||
LDL_near_HDL = np.sum(dist_HDL_LDL <= 0.5, axis=-1)
|
||||
x_HDL = HDL_near_HDL / (HDL_near_HDL + LDL_near_HDL)
|
||||
x_HDL_dist = np.histogram(x_HDL, bins=bins, range=(0, bins[-1]), density=True)[
|
||||
0
|
||||
]
|
||||
dist_LDL_LDL, _ = tree_LDL.query(atoms_LDL, 31, distance_upper_bound=0.6)
|
||||
dist_LDL_HDL, _ = tree_HDL.query(atoms_LDL, 30, distance_upper_bound=0.6)
|
||||
LDL_near_LDL = np.sum(
|
||||
dist_LDL_LDL <= 0.5, axis=-1
|
||||
) # Ausgangsteilchen dazu zählen
|
||||
HDL_near_LDL = np.sum(dist_LDL_HDL <= 0.5, axis=-1)
|
||||
x_LDL = LDL_near_LDL / (LDL_near_LDL + HDL_near_LDL)
|
||||
x_LDL_dist = np.histogram(x_LDL, bins=bins, range=(0, bins[-1]), density=True)[
|
||||
0
|
||||
]
|
||||
return x_HDL_dist, x_LDL_dist
|
||||
|
||||
bins = np.linspace(0, 1, 21)
|
||||
x = bins[1:] - (bins[1] - bins[0]) / 2
|
||||
frame_indices = np.unique(
|
||||
np.int_(np.linspace(len(trajectory) * skip, len(trajectory) - 1, num=segments))
|
||||
)
|
||||
local_concentration_dist = np.array(
|
||||
[
|
||||
HDL_LDL_concentration_frame(trajectory[frame_index], trajectory, bins)
|
||||
for frame_index in frame_indices
|
||||
]
|
||||
)
|
||||
x_HDL = np.mean(local_concentration_dist[:, 0], axis=0)
|
||||
x_LDL = np.mean(local_concentration_dist[:, 1], axis=0)
|
||||
return pd.DataFrame({"x": x, "x_HDL": x_HDL, "x_LDL": x_LDL})
|
@ -1,54 +1,57 @@
|
||||
from __future__ import annotations
|
||||
from collections import OrderedDict
|
||||
from typing import Optional, Union, TYPE_CHECKING
|
||||
|
||||
import numpy as np
|
||||
from numpy.typing import ArrayLike, NDArray
|
||||
|
||||
from scipy.spatial import cKDTree
|
||||
from itertools import product
|
||||
|
||||
from .logging import logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from mdevaluate.coordinates import CoordinateFrame
|
||||
|
||||
def pbc_diff(v1, v2=None, box=None):
|
||||
|
||||
def pbc_diff(
|
||||
coords_a: NDArray, coords_b: NDArray, box: Optional[NDArray] = None
|
||||
) -> NDArray:
|
||||
if box is None:
|
||||
out = v1 - v2
|
||||
out = coords_a - coords_b
|
||||
elif len(getattr(box, "shape", [])) == 1:
|
||||
out = pbc_diff_rect(v1, v2, box)
|
||||
out = pbc_diff_rect(coords_a, coords_b, box)
|
||||
elif len(getattr(box, "shape", [])) == 2:
|
||||
out = pbc_diff_tric(v1, v2, box)
|
||||
out = pbc_diff_tric(coords_a, coords_b, box)
|
||||
else:
|
||||
raise NotImplementedError("cannot handle box")
|
||||
return out
|
||||
|
||||
|
||||
def pbc_diff_rect(v1, v2, box):
|
||||
def pbc_diff_rect(coords_a: NDArray, coords_b: NDArray, box: NDArray) -> NDArray:
|
||||
"""
|
||||
Calculate the difference of two vectors, considering periodic boundary conditions.
|
||||
"""
|
||||
if v2 is None:
|
||||
v = v1
|
||||
else:
|
||||
v = v1 - v2
|
||||
|
||||
v = coords_a - coords_b
|
||||
s = v / box
|
||||
v = box * (s - s.round())
|
||||
v = box * (s - np.round(s))
|
||||
return v
|
||||
|
||||
|
||||
def pbc_diff_tric(v1, v2=None, box=None):
|
||||
def pbc_diff_tric(coords_a: NDArray, coords_b: NDArray, box: NDArray) -> NDArray:
|
||||
"""
|
||||
difference vector for arbitrary pbc
|
||||
Difference vector for arbitrary pbc
|
||||
|
||||
Args:
|
||||
box_matrix: CoordinateFrame.box
|
||||
"""
|
||||
if len(box.shape) == 1:
|
||||
box = np.diag(box)
|
||||
if v1.shape == (3,):
|
||||
v1 = v1.reshape((1, 3)) # quick 'n dirty
|
||||
if v2.shape == (3,):
|
||||
v2 = v2.reshape((1, 3))
|
||||
if coords_a.shape == (3,):
|
||||
coords_a = coords_a.reshape((1, 3)) # quick 'n dirty
|
||||
if coords_b.shape == (3,):
|
||||
coords_b = coords_b.reshape((1, 3))
|
||||
if box is not None:
|
||||
r3 = np.subtract(v1, v2)
|
||||
r3 = np.subtract(coords_a, coords_b)
|
||||
r2 = np.subtract(
|
||||
r3,
|
||||
(np.rint(np.divide(r3[:, 2], box[2][2])))[:, np.newaxis]
|
||||
@ -65,68 +68,17 @@ def pbc_diff_tric(v1, v2=None, box=None):
|
||||
* box[0][np.newaxis, :],
|
||||
)
|
||||
else:
|
||||
v = v1 - v2
|
||||
v = coords_a - coords_b
|
||||
return v
|
||||
|
||||
|
||||
def pbc_dist(a1, a2, box=None):
|
||||
return ((pbc_diff(a1, a2, box) ** 2).sum(axis=1)) ** 0.5
|
||||
def pbc_dist(
|
||||
atoms_a: NDArray, atoms_b: NDArray, box: Optional[NDArray] = None
|
||||
) -> ArrayLike:
|
||||
return ((pbc_diff(atoms_a, atoms_b, box) ** 2).sum(axis=1)) ** 0.5
|
||||
|
||||
|
||||
def pbc_extend(c, box):
|
||||
"""
|
||||
in: c is frame, box is frame.box
|
||||
out: all atoms in frame and their perio. image (shape => array(len(c)*27,3))
|
||||
"""
|
||||
c = np.asarray(c)
|
||||
if c.shape == (3,):
|
||||
c = c.reshape((1, 3)) # quick 'n dirty
|
||||
comb = np.array(
|
||||
[np.asarray(i) for i in product([0, -1, 1], [0, -1, 1], [0, -1, 1])]
|
||||
)
|
||||
b_matrices = comb[:, :, np.newaxis] * box[np.newaxis, :, :]
|
||||
b_vectors = b_matrices.sum(axis=1)[np.newaxis, :, :]
|
||||
return c[:, np.newaxis, :] + b_vectors
|
||||
|
||||
|
||||
def pbc_kdtree(v1, box, leafsize=32, compact_nodes=False, balanced_tree=False):
|
||||
"""
|
||||
kd_tree with periodic images
|
||||
box - whole matrix
|
||||
rest: optional optimization
|
||||
"""
|
||||
r0 = cKDTree(
|
||||
pbc_extend(v1, box).reshape((-1, 3)), leafsize, compact_nodes, balanced_tree
|
||||
)
|
||||
return r0
|
||||
|
||||
|
||||
def pbc_kdtree_query(v1, v2, box, n=1):
|
||||
"""
|
||||
kd_tree query with periodic images
|
||||
"""
|
||||
r0, r1 = pbc_kdtree(v1, box).query(v2, n)
|
||||
r1 = r1 // 27
|
||||
return r0, r1
|
||||
|
||||
|
||||
def pbc_backfold_rect(act_frame, box_matrix):
|
||||
"""
|
||||
mimics "trjconv ... -pbc atom -ur rect"
|
||||
|
||||
folds coords of act_frame in cuboid
|
||||
|
||||
"""
|
||||
af = np.asarray(act_frame)
|
||||
if af.shape == (3,):
|
||||
act_frame = act_frame.reshape((1, 3)) # quick 'n dirty
|
||||
b = box_matrix
|
||||
c = np.diag(b) / 2
|
||||
af = pbc_diff(np.zeros((1, 3)), af - c, b)
|
||||
return af + c
|
||||
|
||||
|
||||
def pbc_backfold_compact(act_frame, box_matrix):
|
||||
def pbc_backfold_compact(act_frame: NDArray, box_matrix: NDArray) -> NDArray:
|
||||
"""
|
||||
mimics "trjconv ... -pbc atom -ur compact"
|
||||
|
||||
@ -146,11 +98,11 @@ def pbc_backfold_compact(act_frame, box_matrix):
|
||||
b_matrices = comb[:, :, np.newaxis] * box[np.newaxis, :, :]
|
||||
b_vectors = b_matrices.sum(axis=1)[np.newaxis, :, :]
|
||||
sc = c[:, np.newaxis, :] + b_vectors
|
||||
w = np.argsort((((sc) - ctr) ** 2).sum(2), 1)[:, 0]
|
||||
w = np.argsort(((sc - ctr) ** 2).sum(2), 1)[:, 0]
|
||||
return sc[range(shape[0]), w]
|
||||
|
||||
|
||||
def whole(frame):
|
||||
def whole(frame: CoordinateFrame) -> CoordinateFrame:
|
||||
"""
|
||||
Apply ``-pbc whole`` to a CoordinateFrame.
|
||||
"""
|
||||
@ -177,7 +129,7 @@ def whole(frame):
|
||||
NOJUMP_CACHESIZE = 128
|
||||
|
||||
|
||||
def nojump(frame, usecache=True):
|
||||
def nojump(frame: CoordinateFrame, usecache: bool = True) -> CoordinateFrame:
|
||||
"""
|
||||
Return the nojump coordinates of a frame, based on a jump matrix.
|
||||
"""
|
||||
@ -201,10 +153,10 @@ def nojump(frame, usecache=True):
|
||||
delta
|
||||
+ np.array(
|
||||
np.vstack(
|
||||
[m[i0 : abstep + 1].sum(axis=0) for m in reader.nojump_matrixes]
|
||||
[m[i0 : abstep + 1].sum(axis=0) for m in reader.nojump_matrices]
|
||||
).T
|
||||
)
|
||||
* frame.box.diagonal()
|
||||
@ frame.box
|
||||
)
|
||||
|
||||
reader._nojump_cache[abstep] = delta
|
||||
@ -217,24 +169,32 @@ def nojump(frame, usecache=True):
|
||||
np.vstack(
|
||||
[
|
||||
m[: frame.step + 1, selection].sum(axis=0)
|
||||
for m in reader.nojump_matrixes
|
||||
for m in reader.nojump_matrices
|
||||
]
|
||||
).T
|
||||
)
|
||||
* frame.box.diagonal()
|
||||
@ frame.box
|
||||
)
|
||||
return frame - delta
|
||||
|
||||
|
||||
def pbc_points(coordinates, box, thickness=0, index=False, shear=False):
|
||||
def pbc_points(
|
||||
coordinates: ArrayLike,
|
||||
box: Optional[NDArray] = None,
|
||||
thickness: Optional[float] = None,
|
||||
index: bool = False,
|
||||
shear: bool = False,
|
||||
) -> Union[NDArray, tuple[NDArray, NDArray]]:
|
||||
"""
|
||||
Returns the points their first periodic images. Does not fold
|
||||
them back into the box.
|
||||
Thickness 0 means all 27 boxes. Positive means the box+thickness.
|
||||
Negative values mean that less than the box is returned.
|
||||
index=True also returns the indices with indices of images being their
|
||||
originals values.
|
||||
original values.
|
||||
"""
|
||||
if box is None:
|
||||
box = coordinates.box
|
||||
if shear:
|
||||
box[2, 0] = box[2, 0] % box[0, 0]
|
||||
# Shifts the box images in the other directions if they moved more than
|
||||
@ -249,7 +209,7 @@ def pbc_points(coordinates, box, thickness=0, index=False, shear=False):
|
||||
coordinates_pbc = np.concatenate([coordinates + v @ box for v in grid], axis=0)
|
||||
size = np.diag(box)
|
||||
|
||||
if thickness != 0:
|
||||
if thickness is not None:
|
||||
mask = np.all(coordinates_pbc > -thickness, axis=1)
|
||||
coordinates_pbc = coordinates_pbc[mask]
|
||||
indices = indices[mask]
|
||||
|
@ -152,7 +152,7 @@ def nojump_load_filename(reader: BaseReader):
|
||||
)
|
||||
if os.path.exists(full_path_fallback):
|
||||
return full_path_fallback
|
||||
if os.path.exists(fname) or is_writeable(directory):
|
||||
if os.path.exists(full_path) or is_writeable(directory):
|
||||
return full_path
|
||||
else:
|
||||
user_data_dir = os.path.join("/data/", os.environ["HOME"].split("/")[-1])
|
||||
@ -187,19 +187,33 @@ def nojump_save_filename(reader: BaseReader):
|
||||
|
||||
def parse_jumps(trajectory: Coordinates):
|
||||
prev = trajectory[0].whole
|
||||
box = prev.box.diagonal()
|
||||
box = prev.box
|
||||
SparseData = namedtuple("SparseData", ["data", "row", "col"])
|
||||
jump_data = (
|
||||
SparseData(data=array("b"), row=array("l"), col=array("l")),
|
||||
SparseData(data=array("b"), row=array("l"), col=array("l")),
|
||||
SparseData(data=array("b"), row=array("l"), col=array("l")),
|
||||
)
|
||||
|
||||
for i, curr in enumerate(trajectory):
|
||||
if i % 500 == 0:
|
||||
logger.debug("Parse jumps Step: %d", i)
|
||||
delta = ((curr - prev) / box).round().astype(np.int8)
|
||||
r3 = np.subtract(curr, prev)
|
||||
delta_z = np.array(np.rint(np.divide(r3[:, 2], box[2][2])), dtype=np.int8)
|
||||
r2 = np.subtract(
|
||||
r3,
|
||||
(np.rint(np.divide(r3[:, 2], box[2][2])))[:, np.newaxis]
|
||||
* box[2][np.newaxis, :],
|
||||
)
|
||||
delta_y = np.array(np.rint(np.divide(r2[:, 1], box[1][1])), dtype=np.int8)
|
||||
r1 = np.subtract(
|
||||
r2,
|
||||
(np.rint(np.divide(r2[:, 1], box[1][1])))[:, np.newaxis]
|
||||
* box[1][np.newaxis, :],
|
||||
)
|
||||
delta_x = np.array(np.rint(np.divide(r1[:, 0], box[0][0])), dtype=np.int8)
|
||||
delta = np.array([delta_x, delta_y, delta_z]).T
|
||||
prev = curr
|
||||
box = prev.box
|
||||
for d in range(3):
|
||||
(col,) = np.where(delta[:, d] != 0)
|
||||
jump_data[d].col.extend(col)
|
||||
|
@ -4,20 +4,21 @@ Collection of utility functions.
|
||||
import functools
|
||||
from time import time as pytime
|
||||
from subprocess import run
|
||||
from types import FunctionType
|
||||
from typing import Callable, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
from numpy.typing import ArrayLike, NDArray
|
||||
import pandas as pd
|
||||
from .functions import kww, kww_1e
|
||||
from scipy.ndimage import uniform_filter1d
|
||||
|
||||
from scipy.interpolate import interp1d
|
||||
from scipy.optimize import curve_fit
|
||||
|
||||
from .logging import logger
|
||||
from .functions import kww, kww_1e
|
||||
|
||||
|
||||
def five_point_stencil(xdata, ydata):
|
||||
def five_point_stencil(xdata: ArrayLike, ydata: ArrayLike) -> ArrayLike:
|
||||
"""
|
||||
Calculate the derivative dy/dx with a five point stencil.
|
||||
This algorith is only valid for equally distributed x values.
|
||||
@ -42,28 +43,28 @@ def five_point_stencil(xdata, ydata):
|
||||
|
||||
|
||||
def filon_fourier_transformation(
|
||||
time,
|
||||
correlation,
|
||||
frequencies=None,
|
||||
derivative="linear",
|
||||
imag=True,
|
||||
):
|
||||
time: NDArray,
|
||||
correlation: NDArray,
|
||||
frequencies: Optional[NDArray] = None,
|
||||
derivative: Union[str, NDArray] = "linear",
|
||||
imag: bool = True,
|
||||
) -> tuple[NDArray, NDArray]:
|
||||
"""
|
||||
Fourier-transformation for slow varrying functions. The filon algorithmus is
|
||||
Fourier-transformation for slow varying functions. The filon algorithm is
|
||||
described in detail in ref [Blochowicz]_, ch. 3.2.3.
|
||||
|
||||
Args:
|
||||
time: List of times where the correlation function was sampled.
|
||||
time: List of times when the correlation function was sampled.
|
||||
correlation: Values of the correlation function.
|
||||
frequencies (opt.):
|
||||
List of frequencies where the fourier transformation will be calculated.
|
||||
If None the frequencies will be choosen based on the input times.
|
||||
If None the frequencies will be chosen based on the input times.
|
||||
derivative (opt.):
|
||||
Approximation algorithmus for the derivative of the correlation function.
|
||||
Approximation algorithm for the derivative of the correlation function.
|
||||
Possible values are: 'linear', 'stencil' or a list of derivatives.
|
||||
imag (opt.): If imaginary part of the integral should be calculated.
|
||||
|
||||
If frequencies are not explicitly given they will be evenly placed on a log scale
|
||||
If frequencies are not explicitly given, they will be evenly placed on a log scale
|
||||
in the interval [1/tmax, 0.1/tmin] where tmin and tmax are the smallest respectively
|
||||
the biggest time (greater than 0) of the provided times. The frequencies are cut off
|
||||
at high values by one decade, since the fourier transformation deviates quite
|
||||
@ -85,7 +86,7 @@ def filon_fourier_transformation(
|
||||
_, derivative = five_point_stencil(time, correlation)
|
||||
time = ((time[2:-1] * time[1:-2]) ** 0.5).reshape(-1, 1)
|
||||
derivative = derivative.reshape(-1, 1)
|
||||
elif np.iterable(derivative) and len(time) is len(derivative):
|
||||
elif isinstance(derivative, NDArray) and len(time) is len(derivative):
|
||||
derivative.reshape(-1, 1)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
@ -111,15 +112,12 @@ def filon_fourier_transformation(
|
||||
+ 1j * correlation[0] / frequencies
|
||||
)
|
||||
|
||||
return (
|
||||
frequencies.reshape(
|
||||
-1,
|
||||
),
|
||||
fourier,
|
||||
)
|
||||
return frequencies.reshape(-1), fourier
|
||||
|
||||
|
||||
def superpose(x1, y1, x2, y2, N=100, damping=1.0):
|
||||
def superpose(
|
||||
x1: NDArray, y1: NDArray, x2: NDArray, y2: NDArray, damping: float = 1.0
|
||||
) -> tuple[NDArray, NDArray]:
|
||||
if x2[0] == 0:
|
||||
x2 = x2[1:]
|
||||
y2 = y2[1:]
|
||||
@ -127,12 +125,12 @@ def superpose(x1, y1, x2, y2, N=100, damping=1.0):
|
||||
reg1 = x1 < x2[0]
|
||||
reg2 = x2 > x1[-1]
|
||||
x_ol = np.logspace(
|
||||
np.log10(max(x1[~reg1][0], x2[~reg2][0]) + 0.001),
|
||||
np.log10(min(x1[~reg1][-1], x2[~reg2][-1]) - 0.001),
|
||||
(sum(~reg1) + sum(~reg2)) / 2,
|
||||
np.log10(np.max(x1[~reg1][0], x2[~reg2][0]) + 0.001),
|
||||
np.log10(np.min(x1[~reg1][-1], x2[~reg2][-1]) - 0.001),
|
||||
(np.sum(~reg1) + np.sum(~reg2)) / 2,
|
||||
)
|
||||
|
||||
def w(x):
|
||||
def w(x: NDArray) -> NDArray:
|
||||
A = x_ol.min()
|
||||
B = x_ol.max()
|
||||
return (np.log10(B / x) / np.log10(B / A)) ** damping
|
||||
@ -150,21 +148,7 @@ def superpose(x1, y1, x2, y2, N=100, damping=1.0):
|
||||
return xdata, ydata
|
||||
|
||||
|
||||
def runningmean(data, nav):
|
||||
"""
|
||||
Compute the running mean of a 1-dimenional array.
|
||||
|
||||
Args:
|
||||
data: Input data of shape (N, )
|
||||
nav: Number of points over which the data will be averaged
|
||||
|
||||
Returns:
|
||||
Array of shape (N-(nav-1), )
|
||||
"""
|
||||
return np.convolve(data, np.ones((nav,)) / nav, mode="valid")
|
||||
|
||||
|
||||
def moving_average(A, n=3):
|
||||
def moving_average(data: NDArray, n: int = 3) -> NDArray:
|
||||
"""
|
||||
Compute the running mean of an array.
|
||||
Uses the second axis if it is of higher dimensionality.
|
||||
@ -177,27 +161,30 @@ def moving_average(A, n=3):
|
||||
Array of shape (N-(n-1), )
|
||||
|
||||
Supports 2D-Arrays.
|
||||
Slower than runningmean for small n but faster for large n.
|
||||
"""
|
||||
k1 = int(n / 2)
|
||||
k2 = int((n - 1) / 2)
|
||||
if k2 == 0:
|
||||
if A.ndim > 1:
|
||||
return uniform_filter1d(A, n)[:, k1:]
|
||||
return uniform_filter1d(A, n)[k1:]
|
||||
if A.ndim > 1:
|
||||
return uniform_filter1d(A, n)[:, k1:-k2]
|
||||
return uniform_filter1d(A, n)[k1:-k2]
|
||||
if data.ndim > 1:
|
||||
return uniform_filter1d(data, n)[:, k1:]
|
||||
return uniform_filter1d(data, n)[k1:]
|
||||
if data.ndim > 1:
|
||||
return uniform_filter1d(data, n)[:, k1:-k2]
|
||||
return uniform_filter1d(data, n)[k1:-k2]
|
||||
|
||||
|
||||
def coherent_sum(func, coord_a, coord_b):
|
||||
def coherent_sum(
|
||||
func: Callable[[ArrayLike, ArrayLike], float],
|
||||
coord_a: ArrayLike,
|
||||
coord_b: ArrayLike,
|
||||
) -> NDArray:
|
||||
"""
|
||||
Perform a coherent sum over two arrays :math:`A, B`.
|
||||
|
||||
.. math::
|
||||
\\frac{1}{N_A N_B}\\sum_i\\sum_j f(A_i, B_j)
|
||||
|
||||
For numpy arrays this is equal to::
|
||||
For numpy arrays, this is equal to::
|
||||
|
||||
N, d = x.shape
|
||||
M, d = y.shape
|
||||
@ -206,24 +193,27 @@ def coherent_sum(func, coord_a, coord_b):
|
||||
Args:
|
||||
func: The function is called for each two items in both arrays, this should
|
||||
return a scalar value.
|
||||
coord_a, coord_b: The two arrays.
|
||||
coord_a: First array.
|
||||
coord_b: Second array.
|
||||
|
||||
"""
|
||||
|
||||
def cohsum(coord_a, coord_b):
|
||||
res = 0
|
||||
for i in range(len(coord_a)):
|
||||
for j in range(len(coord_b)):
|
||||
res += func(coord_a[i], coord_b[j])
|
||||
return res
|
||||
|
||||
return cohsum(coord_a, coord_b)
|
||||
|
||||
|
||||
def coherent_histogram(func, coord_a, coord_b, bins, distinct=False):
|
||||
def coherent_histogram(
|
||||
func: Callable[[ArrayLike, ArrayLike], float],
|
||||
coord_a: ArrayLike,
|
||||
coord_b: ArrayLike,
|
||||
bins: ArrayLike,
|
||||
distinct: bool = False,
|
||||
) -> NDArray:
|
||||
"""
|
||||
Compute a coherent histogram over two arrays, equivalent to coherent_sum.
|
||||
For numpy arrays ofthis is equal to::
|
||||
For numpy arrays, this is equal to::
|
||||
|
||||
N, d = x.shape
|
||||
M, d = y.shape
|
||||
@ -235,9 +225,11 @@ def coherent_histogram(func, coord_a, coord_b, bins, distinct=False):
|
||||
Args:
|
||||
func: The function is called for each two items in both arrays, this should
|
||||
return a scalar value.
|
||||
coord_a, coord_b: The two arrays.
|
||||
bins: The bins used for the histogram must be distributed regular on a linear
|
||||
coord_a: First array.
|
||||
coord_b: Second array.
|
||||
bins: The bins used for the histogram must be distributed regularly on a linear
|
||||
scale.
|
||||
distinct: Only calculate distinct part.
|
||||
|
||||
"""
|
||||
assert np.isclose(
|
||||
@ -248,7 +240,6 @@ def coherent_histogram(func, coord_a, coord_b, bins, distinct=False):
|
||||
N = len(bins) - 1
|
||||
dh = (hmax - hmin) / N
|
||||
|
||||
def cohsum(coord_a, coord_b):
|
||||
res = np.zeros((N,))
|
||||
for i in range(len(coord_a)):
|
||||
for j in range(len(coord_b)):
|
||||
@ -258,10 +249,8 @@ def coherent_histogram(func, coord_a, coord_b, bins, distinct=False):
|
||||
res[int((h - hmin) / dh)] += 1
|
||||
return res
|
||||
|
||||
return cohsum(coord_a, coord_b)
|
||||
|
||||
|
||||
def Sq_from_gr(r, gr, q, ρ):
|
||||
def Sq_from_gr(r: NDArray, gr: NDArray, q: NDArray, n: float) -> NDArray:
|
||||
r"""
|
||||
Compute the static structure factor as fourier transform of the pair correlation
|
||||
function. [Yarnell]_
|
||||
@ -273,7 +262,7 @@ def Sq_from_gr(r, gr, q, ρ):
|
||||
r: Radii of the pair correlation function
|
||||
gr: Values of the pair correlation function
|
||||
q: List of q values
|
||||
ρ: Average number density
|
||||
n: Average number density
|
||||
|
||||
.. [Yarnell]
|
||||
Yarnell, J. L., Katz, M. J., Wenzel, R. G., & Koenig, S. H. (1973). Physical
|
||||
@ -282,10 +271,12 @@ def Sq_from_gr(r, gr, q, ρ):
|
||||
|
||||
"""
|
||||
ydata = ((gr - 1) * r).reshape(-1, 1) * np.sin(r.reshape(-1, 1) * q.reshape(1, -1))
|
||||
return np.trapz(x=r, y=ydata, axis=0) * (4 * np.pi * ρ / q) + 1
|
||||
return np.trapz(x=r, y=ydata, axis=0) * (4 * np.pi * n / q) + 1
|
||||
|
||||
|
||||
def Fqt_from_Grt(data, q):
|
||||
def Fqt_from_Grt(
|
||||
data: Union[pd.DataFrame, ArrayLike], q: ArrayLike
|
||||
) -> Union[pd.DataFrame, tuple[NDArray, NDArray]]:
|
||||
"""
|
||||
Calculate the ISF from the van Hove function for a given q value by fourier
|
||||
transform.
|
||||
@ -317,7 +308,7 @@ def Fqt_from_Grt(data, q):
|
||||
return isf.index, isf.values
|
||||
|
||||
|
||||
def singledispatchmethod(func):
|
||||
def singledispatchmethod(func: Callable) -> Callable:
|
||||
"""
|
||||
A decorator to define a genric instance method, analogue to
|
||||
functools.singledispatch.
|
||||
@ -332,22 +323,7 @@ def singledispatchmethod(func):
|
||||
return wrapper
|
||||
|
||||
|
||||
def histogram(data, bins):
|
||||
"""
|
||||
Compute the histogram of the given data. Uses numpy.bincount function, if possible.
|
||||
"""
|
||||
dbins = np.diff(bins)
|
||||
dx = dbins.mean()
|
||||
if bins.min() == 0 and dbins.std() < 1e-6:
|
||||
logger.debug("Using numpy.bincount for histogramm compuation.")
|
||||
hist = np.bincount((data // dx).astype(int), minlength=len(dbins))[: len(dbins)]
|
||||
else:
|
||||
hist = np.histogram(data, bins=bins)[0]
|
||||
|
||||
return hist, runningmean(bins, 2)
|
||||
|
||||
|
||||
def quick1etau(t, C, n=7):
|
||||
def quick1etau(t: ArrayLike, C: ArrayLike, n: int = 7) -> float:
|
||||
"""
|
||||
Estimate the time for a correlation function that goes from 1 to 0 to decay to 1/e.
|
||||
|
||||
@ -381,15 +357,15 @@ def quick1etau(t, C, n=7):
|
||||
return tau_est
|
||||
|
||||
|
||||
def susceptibility(time, correlation, **kwargs):
|
||||
def susceptibility(
|
||||
time: NDArray, correlation: NDArray, **kwargs
|
||||
) -> tuple[NDArray, NDArray]:
|
||||
"""
|
||||
Calculate the susceptibility of a correlation function.
|
||||
|
||||
Args:
|
||||
time: Timesteps of the correlation data
|
||||
correlation: Value of the correlation function
|
||||
**kwargs (opt.):
|
||||
Additional keyword arguments will be passed to :func:`filon_fourier_transformation`.
|
||||
"""
|
||||
frequencies, fourier = filon_fourier_transformation(
|
||||
time, correlation, imag=False, **kwargs
|
||||
@ -397,7 +373,7 @@ def susceptibility(time, correlation, **kwargs):
|
||||
return frequencies, frequencies * fourier
|
||||
|
||||
|
||||
def read_gro(file):
|
||||
def read_gro(file: str) -> tuple[pd.DataFrame, NDArray, str]:
|
||||
with open(file, "r") as f:
|
||||
lines = f.readlines()
|
||||
description = lines[0].splitlines()[0]
|
||||
@ -438,7 +414,9 @@ def read_gro(file):
|
||||
return atoms_DF, box, description
|
||||
|
||||
|
||||
def write_gro(file, atoms_DF, box, description):
|
||||
def write_gro(
|
||||
file: str, atoms_DF: pd.DataFrame, box: NDArray, description: str
|
||||
) -> None:
|
||||
with open(file, "w") as f:
|
||||
f.write(f"{description} \n")
|
||||
f.write(f"{len(atoms_DF)}\n")
|
||||
@ -456,7 +434,7 @@ def write_gro(file, atoms_DF, box, description):
|
||||
)
|
||||
|
||||
|
||||
def fibonacci_sphere(samples=1000):
|
||||
def fibonacci_sphere(samples: int = 1000) -> NDArray:
|
||||
points = []
|
||||
phi = np.pi * (np.sqrt(5.0) - 1.0) # golden angle in radians
|
||||
|
||||
@ -471,7 +449,7 @@ def fibonacci_sphere(samples=1000):
|
||||
return np.array(points)
|
||||
|
||||
|
||||
def timing(function):
|
||||
def timing(function: Callable) -> Callable:
|
||||
@functools.wraps(function)
|
||||
def wrap(*args, **kw):
|
||||
start_time = pytime()
|
||||
@ -483,7 +461,8 @@ def timing(function):
|
||||
|
||||
return wrap
|
||||
|
||||
def cleanup_h5(hdf5_file) -> None:
|
||||
|
||||
def cleanup_h5(hdf5_file: str) -> None:
|
||||
hdf5_temp_file = f"{hdf5_file[:-3]}_temp.h5"
|
||||
run(
|
||||
[
|
||||
|
@ -5,11 +5,11 @@ import numpy as np
|
||||
|
||||
def test_checksum():
|
||||
salt = checksum.SALT
|
||||
checksum.SALT = ''
|
||||
checksum.SALT = ""
|
||||
assert checksum.checksum(1) == 304942582444936629325699363757435820077590259883
|
||||
assert checksum.checksum('42') == checksum.checksum(42)
|
||||
assert checksum.checksum("42") == checksum.checksum(42)
|
||||
cs1 = checksum.checksum(999)
|
||||
checksum.SALT = '999'
|
||||
checksum.SALT = "999"
|
||||
assert cs1 != checksum.checksum(999)
|
||||
|
||||
a = np.array([1, 2, 3])
|
||||
@ -19,7 +19,6 @@ def test_checksum():
|
||||
|
||||
|
||||
def test_version():
|
||||
|
||||
@checksum.version(1)
|
||||
def f1():
|
||||
pass
|
||||
|
@ -7,7 +7,7 @@ from mdevaluate import coordinates
|
||||
|
||||
@pytest.fixture
|
||||
def trajectory(request):
|
||||
return mdevaluate.open(os.path.join(os.path.dirname(__file__), 'data/water'))
|
||||
return mdevaluate.open(os.path.join(os.path.dirname(__file__), "data/water"))
|
||||
|
||||
|
||||
def test_coordinates_getitem(trajectory):
|
||||
|
57
test/test_correlation.py
Normal file
57
test/test_correlation.py
Normal file
@ -0,0 +1,57 @@
|
||||
import os
|
||||
import pytest
|
||||
|
||||
import mdevaluate
|
||||
from mdevaluate import correlation
|
||||
import numpy as np
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def trajectory(request):
|
||||
return mdevaluate.open(os.path.join(os.path.dirname(__file__), "data/water"))
|
||||
|
||||
|
||||
def test_shifted_correlation(trajectory):
|
||||
test_array = np.array([100, 82, 65, 49, 39, 29, 20, 13, 7])
|
||||
OW = trajectory.subset(atom_name="OW")
|
||||
t, result = correlation.shifted_correlation(
|
||||
correlation.isf, OW, segments=10, skip=0.1, points=10
|
||||
)
|
||||
assert (np.array(result * 100, dtype=int) == test_array).all()
|
||||
|
||||
|
||||
def test_shifted_correlation_no_average(trajectory):
|
||||
t, result = correlation.shifted_correlation(
|
||||
correlation.isf, trajectory, segments=10, skip=0.1, points=5, average=False
|
||||
)
|
||||
assert result.shape == (10, 5)
|
||||
|
||||
|
||||
def test_shifted_correlation_selector(trajectory):
|
||||
test_array = np.array([100, 82, 64, 48, 37, 28, 19, 11, 5])
|
||||
|
||||
def selector(frame):
|
||||
index = np.argwhere((frame[:, 0] >= 0) * (frame[:, 0] < 1))
|
||||
return index.flatten()
|
||||
|
||||
OW = trajectory.subset(atom_name="OW")
|
||||
t, result = correlation.shifted_correlation(
|
||||
correlation.isf, OW, segments=10, skip=0.1, points=10, selector=selector
|
||||
)
|
||||
assert (np.array(result * 100, dtype=int) == test_array).all()
|
||||
|
||||
|
||||
def test_shifted_correlation_multi_selector(trajectory):
|
||||
def selector(frame):
|
||||
indices = []
|
||||
for i in range(3):
|
||||
x = frame[:, 0].flatten()
|
||||
index = np.argwhere((x >= i) * (x < i + 1))
|
||||
indices.append(index.flatten())
|
||||
return indices
|
||||
|
||||
OW = trajectory.subset(atom_name="OW")
|
||||
t, result = correlation.shifted_correlation(
|
||||
correlation.isf, OW, segments=10, skip=0.1, points=10, selector=selector
|
||||
)
|
||||
assert result.shape == (3, 9)
|
@ -4,7 +4,7 @@ import pytest
|
||||
import numpy as np
|
||||
|
||||
import mdevaluate
|
||||
from mdevaluate import free_energy_landscape as fel
|
||||
import mdevaluate.extra.free_energy_landscape as fel
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@ -13,42 +13,24 @@ def trajectory(request):
|
||||
|
||||
|
||||
def test_get_fel(trajectory):
|
||||
test_array = np.array(
|
||||
[
|
||||
0.0,
|
||||
12.87438176,
|
||||
4.95868203,
|
||||
11.02055197,
|
||||
5.44195534,
|
||||
6.73933442,
|
||||
3.30971789,
|
||||
6.10424055,
|
||||
8.56153733,
|
||||
5.45777331,
|
||||
5.64545817,
|
||||
8.42100423,
|
||||
6.28132121,
|
||||
7.4777172,
|
||||
11.64839354,
|
||||
4.52566354,
|
||||
40.84730838,
|
||||
93.86241602,
|
||||
140.3039937,
|
||||
173.55970021,
|
||||
]
|
||||
)
|
||||
test_array = np.array([210., 214., 209., 192., 200., 193., 230., 218., 266.])
|
||||
|
||||
oxygens_water = trajectory.subset(atom_name="OW", residue_name="SOL")
|
||||
r, energy_differences = fel.get_fel(
|
||||
oxygens_water,
|
||||
os.path.join(os.path.dirname(__file__), "data/pore"),
|
||||
"cylindrical",
|
||||
225,
|
||||
edge=0.05,
|
||||
radiusmin=0.05,
|
||||
radiusmax=2.05,
|
||||
z=[-np.inf, np.inf],
|
||||
overwrite=True,
|
||||
OW = trajectory.subset(atom_name="OW")
|
||||
box = trajectory[0].box
|
||||
box_voxels = (np.diag(box) // [0.05, 0.05, 0.05] + [1, 1, 1]) * [0.05, 0.05, 0.05]
|
||||
occupation_matrix = fel.occupation_matrix(OW, skip=0, segments=10)
|
||||
radius_maxima = 0.05 * 3 ** (1 / 2) + 0.05 / 100
|
||||
maxima_matrix = fel.find_maxima(
|
||||
occupation_matrix,
|
||||
box=box_voxels,
|
||||
radius=radius_maxima,
|
||||
pore_geometry="cylindrical",
|
||||
)
|
||||
|
||||
assert (np.round(energy_differences) == np.round(test_array)).all()
|
||||
maxima_matrix = fel.add_distances(maxima_matrix, "cylindrical", np.diag(box) / 2)
|
||||
r_bins = np.arange(0, 0.5, 0.02)
|
||||
distance_bins = np.arange(1.8, 1.9, 0.01)
|
||||
energy_df = fel.distance_resolved_energies(
|
||||
maxima_matrix, distance_bins, r_bins, box, "cylindrical", 225
|
||||
)
|
||||
result = fel.find_energy_maxima(energy_df, r_min=0.05, r_max=0.15)
|
||||
assert (np.round(np.array(result["energy"])) == np.round(test_array)).all()
|
||||
|
@ -9,6 +9,6 @@ def test_pbc_diff():
|
||||
y = np.random.rand(10, 3)
|
||||
box = np.ones((3,))
|
||||
|
||||
assert (pbc.pbc_diff(x, x, box) == approx(0))
|
||||
assert pbc.pbc_diff(x, x, box) == approx(0)
|
||||
dxy = (pbc.pbc_diff(x, y, box) ** 2).sum(axis=1) ** 0.5
|
||||
assert (dxy <= 0.75**0.5).all()
|
||||
|
@ -8,7 +8,7 @@ from mdevaluate import utils
|
||||
@pytest.fixture
|
||||
def logdata(request):
|
||||
xdata = np.logspace(-1, 3, 50)
|
||||
ydata = np.exp(- (xdata)**0.7)
|
||||
ydata = np.exp(-((xdata) ** 0.7))
|
||||
return xdata, ydata
|
||||
|
||||
|
||||
@ -18,29 +18,16 @@ def test_filon_fourier_transformation(logdata):
|
||||
xdata_zero = copy(xdata)
|
||||
xdata_zero[0] = 0
|
||||
_, filon = utils.filon_fourier_transformation(xdata_zero, ydata)
|
||||
assert not np.isnan(filon).any(), 'There are NaN values in the filon result!'
|
||||
assert not np.isnan(filon).any(), "There are NaN values in the filon result!"
|
||||
|
||||
freqs = np.logspace(-4, 1)
|
||||
filon_freqs, filon_imag = utils.filon_fourier_transformation(
|
||||
xdata, xdata, frequencies=freqs, derivative='linear', imag=True
|
||||
xdata, xdata, frequencies=freqs, derivative="linear", imag=True
|
||||
)
|
||||
|
||||
assert (freqs == filon_freqs).all()
|
||||
|
||||
freqs, filon_real = utils.filon_fourier_transformation(
|
||||
xdata, xdata, frequencies=freqs, derivative='linear', imag=False
|
||||
xdata, xdata, frequencies=freqs, derivative="linear", imag=False
|
||||
)
|
||||
assert np.isclose(filon_imag.real, filon_real).all()
|
||||
|
||||
|
||||
def test_histogram():
|
||||
data = np.random.rand(100)
|
||||
bins = np.linspace(0, 1)
|
||||
np_hist = np.histogram(data, bins=bins)[0]
|
||||
ut_hist = utils.histogram(data, bins=bins)[0]
|
||||
assert (np_hist == ut_hist).all()
|
||||
|
||||
bins = np.linspace(0.3, 1.5)
|
||||
np_hist = np.histogram(data, bins=bins)[0]
|
||||
ut_hist = utils.histogram(data, bins=bins)[0]
|
||||
assert (np_hist == ut_hist).all()
|
||||
|
Reference in New Issue
Block a user