Changed energy_reader to use MDAnalysis instead of Gromacs energy tool.
This commit is contained in:
parent
578d3638a4
commit
f6416756a3
@ -24,7 +24,6 @@ import numpy as np
|
|||||||
import MDAnalysis as mdanalysis
|
import MDAnalysis as mdanalysis
|
||||||
from scipy import sparse
|
from scipy import sparse
|
||||||
from dask import delayed, __version__ as DASK_VERSION
|
from dask import delayed, __version__ as DASK_VERSION
|
||||||
import pandas as pd
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
@ -253,53 +252,12 @@ def correct_nojump_matrixes_for_whole(trajectory):
|
|||||||
save_nojump_matrixes(reader)
|
save_nojump_matrixes(reader)
|
||||||
|
|
||||||
|
|
||||||
def energy_reader(file, energies=None):
|
def energy_reader(file):
|
||||||
"""Reads an gromacs energy file and output the data in a pandas DataFrame.
|
"""Reads a gromacs energy file with mdanalysis and returns an auxiliary file.
|
||||||
Args:
|
Args:
|
||||||
file: Filename of the energy file
|
file: Filename of the energy file
|
||||||
energies (opt.): Specify energies to extract from the energy file
|
|
||||||
"""
|
"""
|
||||||
if energies is None:
|
return mdanalysis.auxiliary.EDR.EDRReader(file)
|
||||||
energies = np.arange(1, 100).astype("str")
|
|
||||||
directory = file.rsplit("/", 1)[0]
|
|
||||||
ps = subprocess.Popen(("echo", *energies), stdout=subprocess.PIPE)
|
|
||||||
try:
|
|
||||||
subprocess.run(
|
|
||||||
(
|
|
||||||
"gmx",
|
|
||||||
"energy",
|
|
||||||
"-f",
|
|
||||||
file,
|
|
||||||
"-o",
|
|
||||||
f"{directory}/tmp.xvg",
|
|
||||||
"-quiet",
|
|
||||||
"-nobackup",
|
|
||||||
),
|
|
||||||
stdin=ps.stdout,
|
|
||||||
)
|
|
||||||
except FileNotFoundError:
|
|
||||||
print("No GROMACS found!")
|
|
||||||
ps.wait()
|
|
||||||
labels = []
|
|
||||||
is_legend = False
|
|
||||||
with open(f"{directory}/tmp.xvg") as f:
|
|
||||||
for i, line in enumerate(f):
|
|
||||||
if line.split(" ")[0] == "@":
|
|
||||||
if re.search("s\d+", line.split()[1]):
|
|
||||||
is_legend = True
|
|
||||||
labels.append(line.split('"')[1])
|
|
||||||
elif is_legend:
|
|
||||||
header = i
|
|
||||||
break
|
|
||||||
|
|
||||||
data = np.loadtxt(f"{directory}/tmp.xvg", skiprows=header)
|
|
||||||
|
|
||||||
df = pd.DataFrame({"Time": data[:, 0]})
|
|
||||||
for i, label in enumerate(labels):
|
|
||||||
tmp_df = pd.DataFrame({label: data[:, i + 1]})
|
|
||||||
df = pd.concat([df, tmp_df], axis=1)
|
|
||||||
subprocess.run(("rm", f"{directory}/tmp.xvg"))
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
class BaseReader:
|
class BaseReader:
|
||||||
|
Loading…
Reference in New Issue
Block a user