def correlation(self): corr = utilities.correlate(self.timeseries) if self.reference is not None: mask = utilities.correlate(self.maskseries) if self.shape[1] > 1: tmp_mask = mask.copy() for i in range(1, self.shape[1]): mask = np.hstack((mask, tmp_mask)) corr[mask > 0] = corr[mask > 0] / mask[mask > 0] if self.reduced == True: corr = np.sum(corr, axis=1) if self.normalize == True: corr = corr / corr[0] return corr
def _autocorrelation_intermittent(self, ts, ms): dim = self.dim corr = ts.copy() for xyz in range(dim): corr[:, xyz::dim] = utilities.correlate(ts[:, xyz::dim] * ms, _normalize=False) corr = np.sum(corr, axis=1) / np.sum(np.cumsum(ms, axis=0), axis=1)[::-1] return corr
def _autocorrelation_continuous(self, ts, ms): dim = self.dim n_part = len(ms[0]) corr = np.zeros((int(ts.shape[0]), int(ts.shape[1]) // dim)) for part in range(n_part): edges = self._find_edges(ms[::, part]) deltat = edges[1::2] - edges[0::2] for n, dt in enumerate( deltat): # for each of the disconnected segments t1, t2 = edges[2 * n], edges[2 * n + 1] i1, i2 = dim * part, dim * (part + 1) corr[0:dt, part] += np.sum(utilities.correlate(ts[t1:t2, i1:i2], _normalize=False), axis=1) return np.sum(corr, axis=1) / np.sum( np.cumsum(self.maskseries, axis=0)[::-1], axis=1)
def _survival_intermittent(self, ms): corr = np.sum(utilities.correlate(ms, _normalize=False), axis=1) return corr / np.sum(np.cumsum(self.timeseries, axis=0), axis=1)[::-1]
def correlation(self, normalized=True, continuous=True): """ Calculate the autocorrelation from the sampled data :parameter bool normalized: normalize the correlation function to: its zero-time value for regular correlations; to the average of the characteristic function for the survival probability. :parameter bool continuous: applies only when a reference group has been specified: if True (default) the contribution of a particle at time lag :math:`\\tau=t_1-t_0` is considered only if the particle did not leave the reference group between :math:`t_0` and :math:`t_1`. If False, the intermittent correlation is calculated, and the above restriction is released. Example: >>> # We build a fake trajectory to test the various options: >>> import MDAnalysis as mda >>> import pytim >>> import numpy as np >>> from pytim.datafiles import WATER_GRO >>> from pytim.observables import Correlator, Velocity >>> np.set_printoptions(suppress=True,precision=3) >>> >>> u = mda.Universe(WATER_GRO) >>> g = u.atoms[0:2] >>> g.velocities*=0.0 >>> g.velocities+=1.0 >>> >>> # velocity autocorrelation along x, variable group >>> vv = Correlator(observable=Velocity('x'), reference=g) >>> nn = Correlator(reference=g) # survival probability in group g >>> >>> for c in [vv,nn]: ... c.sample(g) # t=0 ... c.sample(g) # t=1 ... c.sample(g[:1]) # t=2, exclude the second particle ... g.velocities /= 2. # from now on v=0.5 ... c.sample(g) # t=3 >>> The timeseries sampled can be accessed using: >>> print(vv.timeseries) # rows refer to time, columns to particle [[1.0, 1.0], [1.0, 1.0], [1.0, 0.0], [0.5, 0.5]] >>> >>> print(nn.timeseries) [[True, True], [True, True], [True, False], [True, True]] >>> Note that the average of the characteristic function :math:`h(t)` is done over all trajectories, including those that start with :math:`h=0`. The correlation :math:`\\langle h(t)h(0) \\rangle` is divided by the average :math:`\\langle h \\rangle` computed over all trajectores that extend up to a time lag :math:`t`. The `normalize` switch has no effect. >>> # normalized, continuous >>> corr = nn.correlation() >>> print (np.allclose(corr, [ 7./7, 4./5, 2./4, 1./2])) True >>> # normalized, intermittent >>> corr = nn.correlation(continuous=False) >>> print (np.allclose(corr, [ 7./7, 4./5, 3./4, 2./2 ])) True The autocorrelation functions are calculated by taking into account in the average only those trajectory that start with :math:`h=1` (i.e., which start within the reference group). The normalization is done by dividing the correlation at time lag :math:`t` by its value at time lag 0 computed over all trajectories that extend up to time lag :math:`t` and do not start with :math:`h=0`. >>> # not normalizd, intermittent >>> corr = vv.correlation(normalized=False,continuous=False) >>> c0 = (1+1+1+0.25+1+1+0.25)/7 >>> c1 = (1+1+0.5+1)/5 ; c2 = (1+0.5+0.5)/4 ; c3 = (0.5+0.5)/2 >>> print (np.allclose(corr, [ c0, c1, c2, c3])) True >>> # check normalization >>> np.all(vv.correlation(continuous=False) == corr/corr[0]) True >>> # not normalizd, continuous >>> corr = vv.correlation(normalized=False,continuous=True) >>> c0 = (1+1+1+0.25+1+1+0.25)/7 >>> c1 = (1+1+0.5+1)/5 ; c2 = (1+0.5)/4 ; c3 = (0.5+0.)/2 >>> print (np.allclose(corr, [ c0, c1, c2, c3])) True >>> # check normalization >>> np.all(vv.correlation(continuous=True) == corr/corr[0]) True """ intermittent = not continuous self.dim = self._determine_dimension() # the standard correlation if self.reference is None: ts = np.asarray(self.timeseries) corr = utilities.correlate(ts) corr = np.average(corr, axis=1) if normalized is True: corr /= corr[0] return corr # prepare the mask for the intermittent/continuous cases if intermittent is True: ms = np.asarray(self.maskseries, dtype=np.double) else: # we add Falses at the begining and at the end to ease the # splitting in sub-trajectories falses = [[False] * len(self.maskseries[0])] ms = np.asarray(falses + self.maskseries + falses) # compute the survival probabily if self.observable is None: return self._survival_probability(ms, normalized, intermittent) # compute the autocorrelation function else: ts = np.asarray(self.timeseries) return self._autocorrelation(ts, ms, normalized, intermittent)