Used black for formatting.

This commit is contained in:
sebastiankloth
2023-06-27 10:26:23 +02:00
parent a2164507d5
commit ef125c2a89
14 changed files with 860 additions and 553 deletions

View File

@@ -33,14 +33,18 @@ def five_point_stencil(xdata, ydata):
See: https://en.wikipedia.org/wiki/Five-point_stencil
"""
return xdata[2:-2], (
(-ydata[4:] + 8 * ydata[3:-1] - 8 * ydata[1:-3] + ydata[:-4]) /
(3 * (xdata[4:] - xdata[:-4]))
(-ydata[4:] + 8 * ydata[3:-1] - 8 * ydata[1:-3] + ydata[:-4])
/ (3 * (xdata[4:] - xdata[:-4]))
)
def filon_fourier_transformation(time, correlation,
frequencies=None, derivative='linear', imag=True,
):
def filon_fourier_transformation(
time,
correlation,
frequencies=None,
derivative="linear",
imag=True,
):
"""
Fourier-transformation for slow varrying functions. The filon algorithmus is
described in detail in ref [Blochowicz]_, ch. 3.2.3.
@@ -68,17 +72,15 @@ def filon_fourier_transformation(time, correlation,
"""
if frequencies is None:
f_min = 1 / max(time)
f_max = 0.05**(1.2 - max(correlation)) / min(time[time > 0])
frequencies = 2 * np.pi * np.logspace(
np.log10(f_min), np.log10(f_max), num=60
)
f_max = 0.05 ** (1.2 - max(correlation)) / min(time[time > 0])
frequencies = 2 * np.pi * np.logspace(np.log10(f_min), np.log10(f_max), num=60)
frequencies.reshape(1, -1)
if derivative == 'linear':
if derivative == "linear":
derivative = (np.diff(correlation) / np.diff(time)).reshape(-1, 1)
elif derivative == 'stencil':
elif derivative == "stencil":
_, derivative = five_point_stencil(time, correlation)
time = ((time[2:-1] * time[1:-2])**.5).reshape(-1, 1)
time = ((time[2:-1] * time[1:-2]) ** 0.5).reshape(-1, 1)
derivative = derivative.reshape(-1, 1)
elif np.iterable(derivative) and len(time) is len(derivative):
derivative.reshape(-1, 1)
@@ -88,14 +90,29 @@ def filon_fourier_transformation(time, correlation,
)
time = time.reshape(-1, 1)
integral = (np.cos(frequencies * time[1:]) - np.cos(frequencies * time[:-1])) / frequencies**2
integral = (
np.cos(frequencies * time[1:]) - np.cos(frequencies * time[:-1])
) / frequencies**2
fourier = (derivative * integral).sum(axis=0)
if imag:
integral = 1j * (np.sin(frequencies * time[1:]) - np.sin(frequencies * time[:-1])) / frequencies**2
fourier = fourier + (derivative * integral).sum(axis=0) + 1j * correlation[0] / frequencies
integral = (
1j
* (np.sin(frequencies * time[1:]) - np.sin(frequencies * time[:-1]))
/ frequencies**2
)
fourier = (
fourier
+ (derivative * integral).sum(axis=0)
+ 1j * correlation[0] / frequencies
)
return frequencies.reshape(-1,), fourier
return (
frequencies.reshape(
-1,
),
fourier,
)
def mask2indices(mask):
@@ -127,22 +144,24 @@ def superpose(x1, y1, x2, y2, N=100, damping=1.0):
x_ol = np.logspace(
np.log10(max(x1[~reg1][0], x2[~reg2][0]) + 0.001),
np.log10(min(x1[~reg1][-1], x2[~reg2][-1]) - 0.001),
(sum(~reg1) + sum(~reg2)) / 2
(sum(~reg1) + sum(~reg2)) / 2,
)
def w(x):
A = x_ol.min()
B = x_ol.max()
return (np.log10(B / x) / np.log10(B / A))**damping
return (np.log10(B / x) / np.log10(B / A)) ** damping
xdata = np.concatenate((x1[reg1], x_ol, x2[reg2]))
y1_interp = interp1d(x1[~reg1], y1[~reg1])
y2_interp = interp1d(x2[~reg2], y2[~reg2])
ydata = np.concatenate((
y1[x1 < x2.min()],
w(x_ol) * y1_interp(x_ol) + (1 - w(x_ol)) * y2_interp(x_ol),
y2[x2 > x1.max()]
))
ydata = np.concatenate(
(
y1[x1 < x2.min()],
w(x_ol) * y1_interp(x_ol) + (1 - w(x_ol)) * y2_interp(x_ol),
y2[x2 > x1.max()],
)
)
return xdata, ydata
@@ -157,9 +176,10 @@ def runningmean(data, nav):
Returns:
Array of shape (N-(nav-1), )
"""
return np.convolve(data, np.ones((nav,)) / nav, mode='valid')
return np.convolve(data, np.ones((nav,)) / nav, mode="valid")
def moving_average(A,n=3):
def moving_average(A, n=3):
"""
Compute the running mean of an array.
Uses the second axis if it is of higher dimensionality.
@@ -174,15 +194,15 @@ def moving_average(A,n=3):
Supports 2D-Arrays.
Slower than runningmean for small n but faster for large n.
"""
k1 = int(n/2)
k2 = int((n-1)/2)
k1 = int(n / 2)
k2 = int((n - 1) / 2)
if k2 == 0:
if A.ndim > 1:
return uniform_filter1d(A,n)[:,k1:]
return uniform_filter1d(A,n)[k1:]
return uniform_filter1d(A, n)[:, k1:]
return uniform_filter1d(A, n)[k1:]
if A.ndim > 1:
return uniform_filter1d(A,n)[:,k1:-k2]
return uniform_filter1d(A,n)[k1:-k2]
return uniform_filter1d(A, n)[:, k1:-k2]
return uniform_filter1d(A, n)[k1:-k2]
def coherent_sum(func, coord_a, coord_b):
@@ -235,7 +255,9 @@ def coherent_histogram(func, coord_a, coord_b, bins, distinct=False):
if isinstance(func, FunctionType):
func = numba.jit(func, nopython=True, cache=True)
assert np.isclose(np.diff(bins).mean(), np.diff(bins)).all(), 'A regular distribution of bins is required.'
assert np.isclose(
np.diff(bins).mean(), np.diff(bins)
).all(), "A regular distribution of bins is required."
hmin = bins[0]
hmax = bins[-1]
N = len(bins) - 1
@@ -297,11 +319,11 @@ def Fqt_from_Grt(data, q):
if isinstance(data, pd.DataFrame):
df = data.copy()
else:
df = pd.DataFrame(data, columns=['r', 'time', 'G'])
df['isf'] = df['G'] * np.sinc(q / np.pi * df['r'])
isf = df.groupby('time')['isf'].sum()
df = pd.DataFrame(data, columns=["r", "time", "G"])
df["isf"] = df["G"] * np.sinc(q / np.pi * df["r"])
isf = df.groupby("time")["isf"].sum()
if isinstance(data, pd.DataFrame):
return pd.DataFrame({'time': isf.index, 'isf': isf.values, 'q': q})
return pd.DataFrame({"time": isf.index, "isf": isf.values, "q": q})
else:
return isf.index, isf.values
@@ -312,6 +334,7 @@ def singledispatchmethod(func):
def wrapper(*args, **kw):
return dispatcher.dispatch(args[1].__class__)(*args, **kw)
wrapper.register = dispatcher.register
functools.update_wrapper(wrapper, func)
return wrapper
@@ -323,7 +346,7 @@ def histogram(data, bins):
dx = dbins.mean()
if bins.min() == 0 and dbins.std() < 1e-6:
logger.debug("Using numpy.bincount for histogramm compuation.")
hist = np.bincount((data // dx).astype(int), minlength=len(dbins))[:len(dbins)]
hist = np.bincount((data // dx).astype(int), minlength=len(dbins))[: len(dbins)]
else:
hist = np.histogram(data, bins=bins)[0]
@@ -341,20 +364,22 @@ def quick1etau(t, C, n=7):
n is the minimum number of points around 1/e required
"""
# first rough estimate, the closest time. This is returned if the interpolation fails!
tau_est = t[np.argmin(np.fabs(C-np.exp(-1)))]
tau_est = t[np.argmin(np.fabs(C - np.exp(-1)))]
# reduce the data to points around 1/e
k = 0.1
mask = (C < np.exp(-1)+k) & (C > np.exp(-1)-k)
mask = (C < np.exp(-1) + k) & (C > np.exp(-1) - k)
while np.sum(mask) < n:
k += 0.01
mask = (C < np.exp(-1)+k) & (C > np.exp(-1)-k)
mask = (C < np.exp(-1) + k) & (C > np.exp(-1) - k)
if k + np.exp(-1) > 1.0:
break
# if enough points are found, try a curve fit, else and in case of failing keep using the estimate
if np.sum(mask) >= n:
try:
with np.errstate(invalid='ignore'):
fit, _ = curve_fit(kww, t[mask], C[mask], p0=[0.9, tau_est, 0.9], maxfev=100000)
with np.errstate(invalid="ignore"):
fit, _ = curve_fit(
kww, t[mask], C[mask], p0=[0.9, tau_est, 0.9], maxfev=100000
)
tau_est = kww_1e(*fit)
except:
pass