Esempio n. 1
0
 def _makeMapObjects(self):
     if self.map is None:
         self.map = InterpolatingFunction(
             (self.x_axis, self.y_axis, self.z_axis), self.data, 0.)
         self.map_gx = self.map.derivative(0)
         self.map_gy = self.map.derivative(1)
         self.map_gz = self.map.derivative(2)
Esempio n. 2
0
def smooth_function_from_coordinates(
        x,
        y,  # coordinates of some curve
        sample_fraction=1.0,  # fraction of x,y points used for resampling
        spline_smoothing=4,  # degree of spline (1 gives piecewise linear)
):
    """
    Given a set of coordinates in the `x` and `y` arrays, create a
    smooth function from these coordinates by 1) resampling n
    uniformly distributed points by linear interpolation of the `x`
    and `y` coordinates, where n is given by `sample_fraction` times
    the length of `x`; and 2) interpolating the resampled points by
    a smooth spline, where `spline_smoothing` is an integer holding
    the degree of the piecewise polynomial pieces of the spline
    (0 and 1 gives a piecewise linear function, 2 and higher gives
    splines of that order). Return the smooth function as a
    Python function of x, together with the (uniformly distributed)
    resampled points on which the smooth function is based.
    """
    # Construct linear interpolator of data points
    from Scientific.Functions.Interpolation \
         import InterpolatingFunction
    linear = InterpolatingFunction([x], y)
    # Resample
    xp = np.linspace(x[0], x[-1], sample_fraction * len(x))
    yp = np.array([linear(xi) for xi in xp])
    # Spline smoothing or linear interpolation, based on (xp,yp)
    if spline_smoothing >= 2:
        from scipy.interpolate import UnivariateSpline as Spline
        function = Spline(xp, yp, s=0, k=spline_smoothing)
    else:
        function = InterpolatingFunction([xp], yp)
    return function, xp, yp
Esempio n. 3
0
 def findAlphaBeta(self):
     if self.res_shells is None:
         return
     p = self.model_amplitudes*self.exp_amplitudes/self.epsilon
     t = None
     alpha = []
     beta = []
     for rsc, rsa in self.res_shells:
         a = b = c = d = 0.
         tw = len(rsc) + 2.*len(rsa)
         for ri in rsc:
             a += self.model_amplitudes[ri]**2/self.epsilon[ri]
             b += self.exp_amplitudes[ri]**2/self.epsilon[ri]
             c += p[ri]
             d += p[ri]*p[ri]
         for ri in rsa:
             a += 2.*self.model_amplitudes[ri]**2/self.epsilon[ri]
             b += 2.*self.exp_amplitudes[ri]**2/self.epsilon[ri]
             c += 2.*p[ri]
             d += 2.*p[ri]*p[ri]
         a /= tw
         b /= tw
         c /= tw
         d /= tw
         if d < a*b:
             t = 0.
         else:
             def g(t):
                 return N.sqrt(1.+4.*a*b*t*t)-2.*t*_l(t, p, rsc, rsa)-1.
             if t is None:
                 t = 1.
             while g(t) > 0.:
                 t = t/2.
             t1 = t
             while g(t) < 0.:
                 t = 2.*t
             t2 = t
             g1 = g(t1)
             g2 = g(t2)
             while t2-t1 > 1.e-3*t1:
                 t = t1-g1*(t2-t1)/(g2-g1)
                 gt = g(t)
                 if gt == 0.:
                     break
                 elif gt < 0:
                     t1 = t
                     g1 = gt
                 else:
                     t2 = t
                     g2 = gt
         s = N.sqrt(1.+4.*a*b*t*t)
         v = N.sqrt((s-1)/(2*a))
         u = N.sqrt((s+1)/(2*b))
         alpha.append(v/u)
         beta.append(1./(u*u))
     self.alpha = InterpolatingFunction((self.ssq_av_shell,),
                                      N.array([alpha[0]]+alpha+[alpha[-1]]))
     self.beta = InterpolatingFunction((self.ssq_av_shell,),
                                        N.array([beta[0]]+beta+[beta[-1]]))
Esempio n. 4
0
def Integrate(xdata,ydata):
    x_array=Numeric.array(xdata)
    y_array=Numeric.array(ydata)

    # Create the interpolating function
    f = InterpolatingFunction((x_array,), y_array)

    f_integral=f.definiteIntegral()

    return f_integral
 def correlation(self, nsteps):
     """Return the autocorrelation function of the process (as estimated
     from the AR model) up to |nsteps| times the sampling interval.
     """
     poles = self.poles()
     cpoles = N.conjugate(poles)
     x = 0.
     exponents = N.arange(self.order - 1, nsteps + self.order - 1)
     for i in range(len(poles)):
         pole = poles[i]
         factor = N.multiply.reduce((pole-poles)[:i]) * \
                  N.multiply.reduce((pole-poles)[i+1:]) * \
                  N.multiply.reduce((pole-1./cpoles))
         try:
             x = x + pole**exponents / factor
         except OverflowError:
             # happens with some Python versions on some systems
             power = N.zeros(exponents.shape, N.Complex)
             for i in range(len(exponents)):
                 try:
                     power[i] = pole**exponents[i]
                 except ValueError:
                     pass
             x = x + power / factor
     cf = -self.sigsq * x / N.conjugate(self.coeff[0])
     if not isComplex(self.coeff):
         cf = realPart(cf)
     return InterpolatingFunction((self.delta_t * N.arange(nsteps), ), cf)
Esempio n. 6
0
    def staticStructureFactor(self,
                              q_range=(1., 15.),
                              subset=None,
                              weights=None,
                              random_vectors=15,
                              first_mode=6):
        """
        :param q_range: the range of angular wavenumber values
        :type q_range: tuple
        :param subset: the subset of the universe used in the calculation
                       (default: the whole universe)
        :type subset: :class:~MMTK.Collections.GroupOfAtoms
        :param weights: the weight to be given to each atom in the average
                        (default: coherent scattering lengths)
        :type weights: :class:~MMTK.ParticleProperties.ParticleScalar
        :param random_vectors: the number of random direction vectors
                               used in the orientational average
        :type random_vectors: int
        :param first_mode: the first mode to be taken into account for
                           the fluctuation calculation. The default value
                           of 6 is right for molecules in vacuum.
        :type first_mode: int
        :returns: the Static Structure Factor as a
                  function of angular wavenumber
        :rtype: Scientific.Functions.Interpolation.InterpolatingFunction
        """
        if subset is None:
            subset = self.universe
        if weights is None:
            weights = self.universe.getParticleScalar('b_coherent')
        mask = subset.booleanMask()
        weights = N.repeat(weights.array, mask.array)
        weights = weights / N.sqrt(N.add.reduce(weights * weights))
        friction = N.repeat(self.friction.array, mask.array)
        r = N.repeat(self.universe.configuration().array, mask.array)

        first, last, step = (q_range + (None, ))[:3]
        if step is None:
            step = (last - first) / 50.
        q = N.arange(first, last, step)

        kT = Units.k_B * self.temperature
        natoms = subset.numberOfAtoms()
        sq = 0.
        random_vectors = Random.randomDirections(random_vectors)
        for v in random_vectors:
            sab = N.zeros((natoms, natoms), N.Float)
            for i in range(first_mode, self.nmodes):
                irt = self.rawMode(i).inv_relaxation_time
                d = N.repeat((self.rawMode(i)*v).array, mask.array) \
                       / N.sqrt(friction)
                sab = sab + (d[N.NewAxis, :] - d[:, N.NewAxis])**2 / irt
            sab = sab[N.NewAxis, :, :] * q[:, N.NewAxis, N.NewAxis]**2
            phase = N.exp(-1.j*q[:, N.NewAxis]
                          * N.dot(r, v.array)[N.NewAxis, :]) \
                    * weights[N.NewAxis, :]
            temp = N.sum(phase[:, :, N.NewAxis] * N.exp(-0.5 * kT * sab), 1)
            temp = N.sum(N.conjugate(phase) * temp, 1)
            sq = sq + temp.real
        return InterpolatingFunction((q, ), sq / len(random_vectors))
    def EISF(self,
             q_range=(0., 15.),
             subset=None,
             weights=None,
             random_vectors=15,
             first_mode=6):
        if subset is None:
            subset = self.universe
        if weights is None:
            weights = self.universe.getParticleScalar('b_incoherent')
            weights = weights * weights
        weights = weights * subset.booleanMask()
        total = weights.sumOverParticles()
        weights = weights / total

        first, last, step = (q_range + (None, ))[:3]
        if step is None:
            step = (last - first) / 50.
        q = N.arange(first, last, step)

        f = ParticleProperties.ParticleTensor(self.universe)
        for i in range(first_mode, self.nmodes):
            mode = self.rawMode(i)
            f = f + (1. / mode.inv_relaxation_time) * mode.dyadicProduct(mode)
        f = Units.k_B * self.temperature * f / self.friction

        eisf = N.zeros(q.shape, N.Float)
        random_vectors = Random.randomDirections(random_vectors)
        for v in random_vectors:
            for a in subset.atomList():
                exp = N.exp(-v * (f[a] * v))
                N.add(eisf, weights[a] * exp**(q * q), eisf)
        return InterpolatingFunction((q, ), eisf / len(random_vectors))
Esempio n. 8
0
 def __init__(self, data):
     self.data = data  # (x,y,f) data for an f(x,y) function
     from Scientific.Functions.Interpolation \
          import InterpolatingFunction # from ScientificPython
     self.interpolating_function = \
          InterpolatingFunction(self.data[:-1], self.data[-1])
     self.ndims = len(self.data[:-1])  # no of spatial dim.
Esempio n. 9
0
 def correlation(self, nsteps):
     """
     @param nsteps: the number of time steps for which the autocorrelation
     function is to be evaluated
     @type nsteps: C{int}
     @returns: the autocorrelation function of the process as estimated
     from the AR model
     @rtype: L{Scientific.Functions.Interpolation.InterpolatingFunction}
     """
     poles = self.poles()
     cpoles = N.conjugate(poles)
     x = 0.
     exponents = N.arange(self.order - 1, nsteps + self.order - 1)
     for i in range(len(poles)):
         pole = poles[i]
         factor = N.multiply.reduce((pole-poles)[:i]) * \
                  N.multiply.reduce((pole-poles)[i+1:]) * \
                  N.multiply.reduce((pole-1./cpoles))
         try:
             x = x + pole**exponents / factor
         except OverflowError:
             # happens with some Python versions on some systems
             power = N.zeros(exponents.shape, N.Complex)
             for i in range(len(exponents)):
                 try:
                     power[i] = pole**exponents[i]
                 except ValueError:
                     pass
             x = x + power / factor
     cf = -self.sigsq * x / N.conjugate(self.coeff[0])
     if not _isComplex(self.coeff):
         cf = _realPart(cf)
     return InterpolatingFunction((self.delta_t * N.arange(nsteps), ), cf)
 def meanSquareDisplacement(self,
                            subset=None,
                            weights=None,
                            time_range=(0., None, None),
                            first_mode=6):
     """Returns the averaged mean-square displacement of the
     atoms in |subset| (default: all atoms) at time points
     defined by |time_range| using |weights| in the average
     (default: masses). |time_range| is a three element tuple
     (first, last, step). The defaults are first=0., last=
     three times the longest relaxation time, and step defined
     such that 300 points are used in total.
     """
     if subset is None:
         subset = self.universe
     if weights is None:
         weights = self.universe.masses()
     weights = weights * subset.booleanMask()
     total = weights.sumOverParticles()
     weights = weights / (total * self.friction)
     first, last, step = (time_range + (None, None))[:3]
     if last is None:
         last = 3. / self.rawMode(first_mode).inv_relaxation_time
     if step is None:
         step = (last - first) / 300.
     time = N.arange(first, last, step)
     msd = N.zeros(time.shape, N.Float)
     for i in range(first_mode, self.nmodes):
         mode = self.rawMode(i)
         rt = mode.inv_relaxation_time
         d = (weights * (mode * mode)).sumOverParticles()
         N.add(msd, d * (1. - N.exp(-rt * time)) / rt, msd)
     N.multiply(msd, 2. * Units.k_B * self.temperature, msd)
     return InterpolatingFunction((time, ), msd)
 def spectrum(self, omega):
     """Return the frequency spectrum of the process at the
     angular frequencies |omega| (an array).
     """
     sum = 1.
     for i in range(1, len(self.coeff) + 1):
         sum = sum - self.coeff[-i] * N.exp(-1j * i * self.delta_t * omega)
     s = 0.5 * self.delta_t * self.sigsq / (sum * N.conjugate(sum)).real
     return InterpolatingFunction((omega, ), s)
 def memoryFunction(self, nsteps):
     """Return the memory function corresponding to the autocorrelation
     function of the process up to |nsteps| times the sampling interval.
     """
     mz = self.memoryFunctionZapprox(nsteps + self.order)
     mem = mz.divide(nsteps - 1)[0].coeff[::-1]
     if len(mem) == nsteps + 1:
         mem = mem[1:]
     mem[0] = 2. * realPart(mem[0])
     time = self.delta_t * N.arange(nsteps)
     return InterpolatingFunction((time, ), mem)
Esempio n. 13
0
    def EISF(self,
             q_range=(0., 15.),
             subset=None,
             weights=None,
             random_vectors=15,
             first_mode=6):
        """
        :param q_range: the range of angular wavenumber values
        :type q_range: tuple
        :param subset: the subset of the universe used in the calculation
                       (default: the whole universe)
        :type subset: :class:~MMTK.Collections.GroupOfAtoms
        :param weights: the weight to be given to each atom in the average
                        (default: incoherent scattering lengths)
        :type weights: :class:~MMTK.ParticleProperties.ParticleScalar
        :param random_vectors: the number of random direction vectors
                               used in the orientational average
        :type random_vectors: int
        :param first_mode: the first mode to be taken into account for
                           the fluctuation calculation. The default value
                           of 6 is right for molecules in vacuum.
        :type first_mode: int
        :returns: the Elastic Incoherent Structure Factor (EISF) as a
                  function of angular wavenumber
        :rtype: Scientific.Functions.Interpolation.InterpolatingFunction
        """
        if subset is None:
            subset = self.universe
        if weights is None:
            weights = self.universe.getParticleScalar('b_incoherent')
            weights = weights * weights
        weights = weights * subset.booleanMask()
        total = weights.sumOverParticles()
        weights = weights / total

        first, last, step = (q_range + (None, ))[:3]
        if step is None:
            step = (last - first) / 50.
        q = N.arange(first, last, step)

        f = ParticleProperties.ParticleTensor(self.universe)
        for i in range(first_mode, self.nmodes):
            mode = self.rawMode(i)
            f = f + (1. / mode.inv_relaxation_time) * mode.dyadicProduct(mode)
        f = Units.k_B * self.temperature * f / self.friction

        eisf = N.zeros(q.shape, N.Float)
        random_vectors = Random.randomDirections(random_vectors)
        for v in random_vectors:
            for a in subset.atomList():
                exp = N.exp(-v * (f[a] * v))
                N.add(eisf, weights[a] * exp**(q * q), eisf)
        return InterpolatingFunction((q, ), eisf / len(random_vectors))
Esempio n. 14
0
 def spectrum(self, omega):
     """
     @param omega: the angular frequencies at which the spectrum
     is to be evaluated
     @type omega: C{Numeric.array} of C{float}
     @returns: the frequency spectrum of the process
     @rtype: C{Numeric.array} of C{float}
     """
     sum = 1.
     for i in range(1, len(self.coeff) + 1):
         sum = sum - self.coeff[-i] * N.exp(-1j * i * self.delta_t * omega)
     s = 0.5 * self.delta_t * self.sigsq / (sum * N.conjugate(sum)).real
     return InterpolatingFunction((omega, ), s)
Esempio n. 15
0
 def memoryFunction(self, nsteps):
     mz = self.memoryFunctionZ()
     mem = mz.divide(nsteps - 1)[0].coeff[:]
     mem.reverse()
     if len(mem) == nsteps + 1:
         mem = mem[1:]
     if isComplex(self.coeff[0]):
         mem = N.array([complex(m.real, m.imag) for m in mem])
     else:
         mem = N.array([float(m) for m in mem])
     mem[0] = 2. * realPart(mem[0])
     time = self.delta_t * N.arange(nsteps)
     return InterpolatingFunction((time, ), mem)
Esempio n. 16
0
 def meanSquareDisplacement(self,
                            subset=None,
                            weights=None,
                            time_range=(0., None, None),
                            first_mode=6):
     """
     :param subset: the subset of the universe used in the calculation
                    (default: the whole universe)
     :type subset: :class:~MMTK.Collections.GroupOfAtoms
     :param weights: the weight to be given to each atom in the average
                     (default: atomic masses)
     :type weights: :class:~MMTK.ParticleProperties.ParticleScalar
     :param time_range: the time values at which the mean-square
                        displacement is evaluated, specified as a
                        range tuple (first, last, step).
                        The defaults are first=0, last=
                        20 times the longest vibration perdiod,
                        and step defined such that 300 points are
                        used in total.
     :type time_range: tuple
     :param first_mode: the first mode to be taken into account for
                        the fluctuation calculation. The default value
                        of 6 is right for molecules in vacuum.
     :type first_mode: int
     :returns: the averaged mean-square displacement of the
               atoms in subset as a function of time
     :rtype: Scientific.Functions.Interpolation.InterpolatingFunction
     """
     if subset is None:
         subset = self.universe
     if weights is None:
         weights = self.universe.masses()
     weights = weights * subset.booleanMask()
     total = weights.sumOverParticles()
     weights = weights / (total * self.friction)
     first, last, step = (time_range + (None, None))[:3]
     if last is None:
         last = 3. / self.rawMode(first_mode).inv_relaxation_time
     if step is None:
         step = (last - first) / 300.
     time = N.arange(first, last, step)
     msd = N.zeros(time.shape, N.Float)
     for i in range(first_mode, self.nmodes):
         mode = self.rawMode(i)
         rt = mode.inv_relaxation_time
         d = (weights * (mode * mode)).sumOverParticles()
         N.add(msd, d * (1. - N.exp(-rt * time)) / rt, msd)
     N.multiply(msd, 2. * Units.k_B * self.temperature, msd)
     return InterpolatingFunction((time, ), msd)
Esempio n. 17
0
 def memoryFunction(self, nsteps):
     """
     @param nsteps: the number of time steps for which the memory
     function is to be evaluated
     @type nsteps: C{int}
     @returns: the memory function of the process as estimated
     from the AR model
     @rtype: L{Scientific.Functions.Interpolation.InterpolatingFunction}
     """
     mz = self.memoryFunctionZapprox(nsteps + self.order)
     mem = mz.divide(nsteps - 1)[0].coeff[::-1]
     if len(mem) == nsteps + 1:
         mem = mem[1:]
     mem[0] = 2. * _realPart(mem[0])
     time = self.delta_t * N.arange(nsteps)
     return InterpolatingFunction((time, ), mem)
    def coherentScatteringFunction(self,
                                   q,
                                   time_range=(0., None, None),
                                   subset=None,
                                   weights=None,
                                   random_vectors=15,
                                   first_mode=6):
        if subset is None:
            subset = self.universe
        if weights is None:
            weights = self.universe.getParticleScalar('b_coherent')
        mask = subset.booleanMask()
        weights = N.repeat(weights.array, mask.array)
        weights = weights / N.sqrt(N.add.reduce(weights * weights))
        friction = N.repeat(self.friction.array, mask.array)
        r = N.repeat(self.universe.configuration().array, mask.array)

        first, last, step = (time_range + (None, None))[:3]
        if last is None:
            last = 3. / self.rawMode(first_mode).inv_relaxation_time
        if step is None:
            step = (last - first) / 300.
        time = N.arange(first, last, step)

        natoms = subset.numberOfAtoms()
        kT = Units.k_B * self.temperature
        fcoh = N.zeros((len(time), ), N.Complex)
        random_vectors = Random.randomDirections(random_vectors)
        for v in random_vectors:
            phase = N.exp(-1.j * q * N.dot(r, v.array))
            for ai in range(natoms):
                fbt = N.zeros((natoms, len(time)), N.Float)
                for i in range(first_mode, self.nmodes):
                    irt = self.rawMode(i).inv_relaxation_time
                    d = q * N.repeat((self.rawMode(i)*v).array, mask.array) \
                        / N.sqrt(friction)
                    ft = N.exp(-irt * time) / irt
                    N.add(fbt, d[ai] * d[:, N.NewAxis] * ft[N.NewAxis, :], fbt)
                    N.add(fbt, (-0.5 / irt) * (d[ai]**2 + d[:, N.NewAxis]**2),
                          fbt)
                N.add(
                    fcoh, weights[ai] * phase[ai] *
                    N.dot(weights * N.conjugate(phase), N.exp(kT * fbt)), fcoh)
        return InterpolatingFunction((time, ), fcoh.real / len(random_vectors))
    def incoherentScatteringFunction(self,
                                     q,
                                     time_range=(0., None, None),
                                     subset=None,
                                     random_vectors=15,
                                     first_mode=6):
        if subset is None:
            subset = self.universe
        mask = subset.booleanMask()
        weights_inc = self.universe.getParticleScalar('b_incoherent')
        weights_inc = N.repeat(weights_inc.array**2, mask.array)
        weights_inc = weights_inc / N.add.reduce(weights_inc)
        friction = N.repeat(self.friction.array, mask.array)
        mass = N.repeat(self.universe.masses().array, mask.array)
        r = N.repeat(self.universe.configuration().array, mask.array)

        first, last, step = (time_range + (None, None))[:3]
        if last is None:
            last = 3. / self.weighedMode(first_mode).inv_relaxation_time
        if step is None:
            step = (last - first) / 300.
        time = N.arange(first, last, step)

        natoms = subset.numberOfAtoms()
        kT = Units.k_B * self.temperature
        finc = N.zeros((len(time), ), N.Float)
        eisf = 0.
        random_vectors = Random.randomDirections(random_vectors)
        for v in random_vectors:
            phase = N.exp(-1.j * q * N.dot(r, v.array))
            faat = N.zeros((natoms, len(time)), N.Float)
            eisf_sum = N.zeros((natoms, ), N.Float)
            for i in range(first_mode, self.nmodes):
                irt = self.rawMode(i).inv_relaxation_time
                d = q * N.repeat((self.rawMode(i)*v).array, mask.array) \
                    / N.sqrt(friction)
                ft = (N.exp(-irt * time) - 1.) / irt
                N.add(faat, d[:, N.NewAxis]**2 * ft[N.NewAxis, :], faat)
                N.add(eisf_sum, -d**2 / irt, eisf_sum)
            N.add(finc, N.sum(weights_inc[:, N.NewAxis] * N.exp(kT * faat), 0),
                  finc)
            eisf = eisf + N.sum(weights_inc * N.exp(kT * eisf_sum))
        return InterpolatingFunction((time, ), finc / len(random_vectors))
    def staticStructureFactor(self,
                              q_range=(1., 15.),
                              subset=None,
                              weights=None,
                              random_vectors=15,
                              first_mode=6):
        if subset is None:
            subset = self.universe
        if weights is None:
            weights = self.universe.getParticleScalar('b_coherent')
        mask = subset.booleanMask()
        weights = N.repeat(weights.array, mask.array)
        weights = weights / N.sqrt(N.add.reduce(weights * weights))
        friction = N.repeat(self.friction.array, mask.array)
        r = N.repeat(self.universe.configuration().array, mask.array)

        first, last, step = (q_range + (None, ))[:3]
        if step is None:
            step = (last - first) / 50.
        q = N.arange(first, last, step)

        kT = Units.k_B * self.temperature
        natoms = subset.numberOfAtoms()
        sq = 0.
        random_vectors = Random.randomDirections(random_vectors)
        for v in random_vectors:
            sab = N.zeros((natoms, natoms), N.Float)
            for i in range(first_mode, self.nmodes):
                irt = self.rawMode(i).inv_relaxation_time
                d = N.repeat((self.rawMode(i)*v).array, mask.array) \
                       / N.sqrt(friction)
                sab = sab + (d[N.NewAxis, :] - d[:, N.NewAxis])**2 / irt
            sab = sab[N.NewAxis, :, :] * q[:, N.NewAxis, N.NewAxis]**2
            phase = N.exp(-1.j*q[:, N.NewAxis]
                          * N.dot(r, v.array)[N.NewAxis, :]) \
                    * weights[N.NewAxis, :]
            temp = N.sum(phase[:, :, N.NewAxis] * N.exp(-0.5 * kT * sab), 1)
            temp = N.sum(N.conjugate(phase) * temp, 1)
            sq = sq + temp.real
        return InterpolatingFunction((q, ), sq / len(random_vectors))
Esempio n. 21
0
    from Gnuplot import plot
    from RandomArray import random
    dt = 1.
    t = dt * N.arange(500)
    if 1:
        data = N.sin(t) + N.cos(3. * t) + 0.1 * (random(len(t)) - 0.5)
        data = data + 0.1j * (random(len(t)) - 0.5)
    if 0:
        data = [0.]
        for i in range(500 + len(t) - 1):
            data.append(mean(data[-500:]) + gaussian(0., 0.1))
        data = N.exp(1j * N.array(data[500:]))

    if 0:
        #data = readArray('~/scientific/Test/data')
        string = open('/users1/hinsen/scientific/Test/data').read()[4:]
        data = N.array(eval(string))
        data = data[:, 0]
    model = AutoRegressiveModel(20, data, dt)
    print model.coeff
    print model.poles()
    c = model.correlation(200)
    cref = InterpolatingFunction((t, ), AutoCorrelationFunction(data))[:200]
    m = model.memoryFunction(200)
    s = model.spectrum(N.arange(0., 5., 0.01))
    #plot(c.real, cref.real); plot(c.imag, cref.imag)
    print model.frictionConstant(), model.memoryFunctionZ()(
        1.), m.definiteIntegral()
    #plot(m.real, m.imag)
    plot(m)
Esempio n. 22
0
x = D(10, index=0, order=2)
y = D(0, index=1, order=2)
z = D(1, index=2, order=2)
r = somefunc(x, y, z)
print r
# (40, [3, -1, 20], [[0, 0, 0], [0, 0, 0], [0, 0, 20]])
print "d^2(somefunc)/dzdx:", r[2][2][0]  # 0
print "d^2(somefunc)/dz^2:", r[2][2][2]  # 20

print "\n\ntesting interpolation:"
from Scientific.Functions.Interpolation \
     import InterpolatingFunction as Ip
t = sequence(0, 10, 0.1)
v = sin(t)
vi = Ip((t, ), v)
# interpolate and compare with exact result:
print "interpolated:", vi(5.05), " exact:", sin(5.05)
# interpolate the derivative of v:
vid = vi.derivative()
print "interpolated derivative:", vid(5.05), " exact:", cos(5.05)
# compute the integral of v over all t values:
print "definite integral:", vi.definiteIntegral(), \
      " exact:", -cos(t[-1]) - (-cos(t[0]))

# add path to Grid2D:
sys.path.insert(0,
                os.path.join(os.environ['scripting'], 'src', 'py', 'examples'))
from Grid2D import Grid2D
g = Grid2D(dx=0.1, dy=0.2)
f = g('sin(pi*x)*sin(pi*y)')
Esempio n. 23
0
class EventDamageModel:
    """
    Object for working out the damage and cost

    """
    STRUCT_LOSS_TITLE = "STRUCT_LOSS_$"  #"Structure Loss ($)"
    CONTENTS_LOSS_TITLE = "CONTENTS_LOSS_$"  #"Contents Loss ($)"
    CONTENTS_DAMAGE_TITLE = "CONTENTS_DAMAGE_fraction"  #"Contents damaged (fraction)"
    STRUCT_DAMAGE_TITLE = "STRUCT_DAMAGE_fraction"  #"Structure damaged (fraction)"
    COLLAPSE_CSV_INFO_TITLE = "COLLAPSE_CSV_INFO"  #"Calculation notes"
    MAX_DEPTH_TITLE = "MAX_DEPTH_m"  #"Inundation height above ground floor (m)"
    STRUCT_COLLAPSED_TITLE = "STRUCT_COLLAPSED"  #"collapsed structure if 1"
    STRUCT_INUNDATED_TITLE = "STRUCT_INUNDATED"  #"inundated structure if 1"
    double_brick_damage_array = num.array(
        [  #[-kinds.default_float_kind.MAX, 0.0],
            [-1000.0, 0.0], [0.0 - depth_epsilon, 0.0], [0.0, 0.016],
            [0.1, 0.150], [0.3, 0.425], [0.5, 0.449], [1.0, 0.572],
            [1.5, 0.582], [2.0, 0.587], [2.5, 0.647], [1000.0, 64.7]
            #[kinds.default_float_kind.MAX,64.7]
        ])

    if scipy_available:
        double_brick_damage_curve = interp1d(double_brick_damage_array[:, 0],
                                             double_brick_damage_array[:, 1])
    else:
        double_brick_damage_curve = InterpolatingFunction( \
             (num.ravel(double_brick_damage_array[:,0:1]),),
              num.ravel(double_brick_damage_array[:,1:]))

    brick_veeer_damage_array = num.array(
        [  #[-kinds.default_float_kind.MAX, 0.0],
            [-1000.0, 0.0], [0.0 - depth_epsilon, 0.0], [0.0, 0.016],
            [0.1, 0.169], [0.3, 0.445], [0.5, 0.472], [1.0, 0.618],
            [1.5, 0.629], [2.0, 0.633], [2.5, 0.694], [1000.0, 69.4]
            #[kinds.default_float_kind.MAX,69.4]
        ])

    if scipy_available:
        brick_veeer_damage_curve = interp1d(brick_veeer_damage_array[:, 0],
                                            brick_veeer_damage_array[:, 1])
    else:
        brick_veeer_damage_curve = InterpolatingFunction( \
                                 (num.ravel(brick_veeer_damage_array[:,0:1]),),
                                  num.ravel(brick_veeer_damage_array[:,1:]))

    struct_damage_curve = {
        'Double Brick': double_brick_damage_curve,
        'Brick Veneer': brick_veeer_damage_curve
    }
    default_struct_damage_curve = brick_veeer_damage_curve

    contents_damage_array = num.array([  #[-kinds.default_float_kind.MAX, 0.0],
        [-1000.0, 0.0], [0.0 - depth_epsilon, 0.0], [0.0, 0.013], [0.1, 0.102],
        [0.3, 0.381], [0.5, 0.500], [1.0, 0.970], [1.5, 0.976], [2.0, 0.986],
        [1000.0, 98.6]
        #[kinds.default_float_kind.MAX,98.6]
    ])

    if scipy_available:
        contents_damage_curve = interp1d(contents_damage_array[:, 0],
                                         contents_damage_array[:, 1])
    else:
        contents_damage_curve = InterpolatingFunction( \
             (num.ravel(contents_damage_array[:,0:1]),),
              num.ravel(contents_damage_array[:,1:]))

    #building collapse probability
    # inundation depth above ground floor, m
    depth_upper_limits = [
        depth_epsilon, 1.0, 2.0, 3.0, 5.0, kinds.default_float_kind.MAX
    ]
    # shore mistance, m
    shore_upper_limits = [125, 200, 250, kinds.default_float_kind.MAX]
    # Building collapse probability
    collapse_probability = [
        [0.0, 0.0, 0.0, 0.0],  #Code below assumes 0.0
        [0.05, 0.02, 0.01, 0.0],
        [0.6, 0.3, 0.1, 0.05],
        [0.8, 0.4, 0.25, 0.15],
        [0.95, 0.7, 0.5, 0.3],
        [0.99, 0.9, 0.65, 0.45]
    ]

    def __init__(self, max_depths, shore_distances, walls, struct_costs,
                 content_costs):
        """
        max depth is Inundation height above ground floor (m), so
                  the ground floor has been taken into account.
        """
        self.max_depths = [float(x) for x in max_depths]
        self.shore_distances = [float(x) for x in shore_distances]
        self.walls = walls
        self.struct_costs = [float(x) for x in struct_costs]
        self.content_costs = [float(x) for x in content_costs]

        self.structure_count = len(self.max_depths)
        #Fixme expand
        assert self.structure_count == len(self.shore_distances)
        assert self.structure_count == len(self.walls)
        assert self.structure_count == len(self.struct_costs)
        assert self.structure_count == len(self.content_costs)
        #assert  self.structure_count == len(self.)

    def calc_damage_and_costs(self, verbose_csv=False, verbose=False):
        """
        This is an overall method to calculate the % damage and collapsed
        structures and then the $ loss.
        """
        self.calc_damage_percentages()
        collapse_probability = self.calc_collapse_probability()
        self._calc_collapse_structures(collapse_probability,
                                       verbose_csv=verbose_csv)
        self.calc_cost()
        results_dict = {
            self.STRUCT_LOSS_TITLE: self.struct_loss,
            self.STRUCT_DAMAGE_TITLE: self.struct_damage,
            self.CONTENTS_LOSS_TITLE: self.contents_loss,
            self.CONTENTS_DAMAGE_TITLE: self.contents_damage,
            self.MAX_DEPTH_TITLE: self.max_depths,
            self.STRUCT_COLLAPSED_TITLE: self.struct_collapsed,
            self.STRUCT_INUNDATED_TITLE: self.struct_inundated
        }
        if verbose_csv:
            results_dict[self.COLLAPSE_CSV_INFO_TITLE] = self.collapse_csv_info
        return results_dict

    def calc_damage_percentages(self):
        """
        Using stage curves calc the damage to structures and contents
        """

        # the data being created
        struct_damage = num.zeros(self.structure_count, num.float)
        contents_damage = num.zeros(self.structure_count, num.float)
        self.struct_inundated = [''] * self.structure_count

        for i, max_depth, shore_distance, wall in map(
                None, range(self.structure_count), self.max_depths,
                self.shore_distances, self.walls):
            ## WARNING SKIP IF DEPTH < 0.0
            if 0.0 > max_depth:
                continue

            # The definition of inundated is if the max_depth is > 0.0
            self.struct_inundated[i] = 1.0

            #calc structural damage %
            damage_curve = self.struct_damage_curve.get(
                wall, self.default_struct_damage_curve)
            struct_damage[i] = damage_curve(max_depth)
            contents_damage[i] = self.contents_damage_curve(max_depth)

        self.struct_damage = struct_damage
        self.contents_damage = contents_damage

    def calc_cost(self):
        """
        Once the damage has been calculated, determine the $ cost.
        """
        # ensure_numeric does not cut it.
        self.struct_loss = self.struct_damage * \
                           ensure_numeric(self.struct_costs)
        self.contents_loss = self.contents_damage * \
                           ensure_numeric(self.content_costs)

    def calc_collapse_probability(self):
        """
        return a dict of which structures have x probability of collapse.
             key is collapse probability
             value is list of struct indexes with key probability of collapse 
        """
        # I could've done this is the calc_damage_percentages and
        # Just had one loop.
        # But for ease of testing and bug finding I'm seperating the loops.
        # I'm make the outer loop for both of them the same though,
        # so this loop can easily be folded into the other loop.

        # dict of which structures have x probability of collapse.
        # key of collapse probability
        # value of list of struct indexes
        struct_coll_prob = {}

        for i, max_depth, shore_distance, wall in map(
                None, range(self.structure_count), self.max_depths,
                self.shore_distances, self.walls):
            # WARNING ASSUMING THE FIRST BIN OF DEPTHS GIVE A ZERO PROBABILITY
            depth_upper_limits = self.depth_upper_limits
            shore_upper_limits = self.shore_upper_limits
            collapse_probability = self.collapse_probability
            if max_depth <= depth_upper_limits[0]:
                continue
            start = 1
            for i_depth, depth_limit in enumerate(depth_upper_limits[start:]):
                #Have to change i_depth so it indexes into the lists correctly
                i_depth += start
                if max_depth <= depth_limit:
                    for i_shore, shore_limit in enumerate(shore_upper_limits):
                        if shore_distance <= shore_limit:
                            coll_prob = collapse_probability[i_depth][i_shore]
                            if 0.0 == collapse_probability[i_depth][i_shore]:
                                break
                            struct_coll_prob.setdefault(coll_prob,
                                                        []).append(i)
                            break
                    break

        return struct_coll_prob

    def _calc_collapse_structures(self,
                                  collapse_probability,
                                  verbose_csv=False):
        """
        Given the collapse probabilities, throw the dice
        and collapse some houses
        """

        self.struct_collapsed = [''] * self.structure_count
        if verbose_csv:
            self.collapse_csv_info = [''] * self.structure_count
        #for a given 'bin', work out how many houses will collapse
        for probability, house_indexes in collapse_probability.iteritems():
            collapse_count = round(len(house_indexes) * probability)

            if verbose_csv:
                for i in house_indexes:
                    # This could be sped up I think
                    self.collapse_csv_info[i] = str(probability) + ' prob.( ' \
                           + str(int(collapse_count)) + ' collapsed out of ' \
                           + str(len(house_indexes)) + ')'
            for _ in range(int(collapse_count)):
                house_index = choice(house_indexes)
                self.struct_damage[house_index] = 1.0
                self.contents_damage[house_index] = 1.0
                house_indexes.remove(house_index)
                self.struct_collapsed[house_index] = 1
Esempio n. 24
0
class DensityMap:
    def __init__(self, filename):
        if filename is None:
            return
        filetype = os.path.splitext(filename)[1]
        if filetype.lower() == '.ezd':
            self.readEZD(filename)
        elif filetype.lower() == '.ccp4':
            self.readCCP4(filename)
        else:
            raise ValueError("Unknown file type %s" % filetype)
        self.map = None
        self.box = Box(
            Vector(self.x_axis[0], self.y_axis[0], self.z_axis[0]),
            Vector(self.x_axis[-1], self.y_axis[-1], self.z_axis[-1]))
        self.normalize()

    def readEZD(filename):
        file = open(filename)
        while 1:
            line = file.readline()
            if not line:
                raise IOError, "unexpected end of file"
            words = string.split(line)
            if words[0] == 'MAP':
                break
            if words[0] == 'CELL':
                cell_x, cell_y, cell_z, alpha, beta, gamma = \
                        map(float, words[1:])
                if alpha != 90. or beta != 90. or gamma != 90.:
                    raise ValueError, "cell must be rectangular"
            if words[0] == 'EXTENT':
                self.nx, self.ny, self.nz = map(float, words[1:])
            if words[0] == 'GRID':
                gnx, gny, gnz = map(float, words[1:])

        data = []
        while 1:
            line = file.readline()
            if not line:
                raise IOError, "unexpected end of file"
            line = string.join(string.split(line, '.-'), '. -')
            words = string.split(line)
            if words[0] == 'END':
                break
            for value in map(float, words):
                data.append(value)

        data = N.array(data)
        data.shape = (self.nz, self.ny, self.nx)
        self.data = N.transpose(data)
        self.x_axis = N.arange(self.nx) * (cell_x / gnx) * Units.Ang
        self.y_axis = N.arange(self.ny) * (cell_y / gny) * Units.Ang
        self.z_axis = N.arange(self.nz) * (cell_z / gnz) * Units.Ang

    def readCCP4(self, filename):
        mapfile = file(filename)
        header_data = mapfile.read(1024)
        NC, NR, NS, MODE, NCSTART, NRSTART, NSSTART, NX, NY, NZ, X, Y, Z, \
            ALPHA, BETA, GAMMA, MAPC, MAPR, MAPS, AMIN, AMAX, AMEAN, \
            ISPG, NSYMBT, LSKFLG = struct.unpack('=10l6f3l3f3l',
                                                 header_data[:4*25])
        if MODE == 2:
            byte_order = '='
        elif MODE == 33554432:
            NC, NR, NS, MODE, NCSTART, NRSTART, NSSTART, NX, NY, NZ, X, Y, Z, \
                ALPHA, BETA, GAMMA, MAPC, MAPR, MAPS, AMIN, AMAX, AMEAN, \
                ISPG, NSYMBT, LSKFLG = struct.unpack('>10l6f3l3f3l',
                                                     header_data[:4*25])
            byte_order = '>'
            if MODE == 33554432:
                NC, NR, NS, MODE, NCSTART, NRSTART, NSSTART, NX, NY, NZ, \
                    X, Y, Z, ALPHA, BETA, GAMMA, MAPC, MAPR, MAPS, \
                    AMIN, AMAX, AMEAN, ISPG, NSYMBT, LSKFLG \
                    = struct.unpack('<10l6f3l3f3l', header_data[:4*25])
                byte_order = '<'

        else:
            raise IOError("Not a mode 2 CCP4 map file")

        symmetry_data = mapfile.read(NSYMBT)
        map_data = mapfile.read(4 * NS * NR * NC)

        if byte_order == '=':
            array = N.fromstring(map_data, N.Float32, NC * NR * NS)
        else:
            array = N.zeros((NS * NR * NC, ), N.Float32)
            index = 0
            while len(map_data) >= 4 * 10000:
                values = struct.unpack(byte_order + '10000f',
                                       map_data[:4 * 10000])
                array[index:index + 10000] = N.array(values, N.Float32)
                index += 10000
                map_data = map_data[4 * 10000:]
            values = struct.unpack(byte_order + '%df' % (len(map_data) / 4),
                                   map_data)
            array[index:] = N.array(values, N.Float32)

        del map_data

        array.shape = (NS, NR, NC)
        self.data = N.transpose(array)

        resolution_x = X * Units.Ang / NX
        resolution_y = Y * Units.Ang / NY
        resolution_z = Z * Units.Ang / NZ

        self.x_axis = (NCSTART + N.arange(NC)) * resolution_x
        self.y_axis = (NRSTART + N.arange(NR)) * resolution_y
        self.z_axis = (NSSTART + N.arange(NS)) * resolution_z

    def __getitem__(self, item):
        if not isinstance(item, tuple) or len(item) != 3:
            raise ValueError("indexation requires three slices")
        sx, sy, sz = item
        if not (isinstance(sx, slice) and isinstance(sy, slice) \
                and isinstance(sz, slice)):
            raise ValueError("indexation requires three slices")
        new_map = DensityMap(None)
        new_map.data = self.data[sx, sy, sz]
        new_map.x_axis = self.x_axis[sx]
        new_map.y_axis = self.y_axis[sy]
        new_map.z_axis = self.z_axis[sz]
        new_map.map = None
        new_map.box = Box(
            Vector(new_map.x_axis[0], new_map.y_axis[0], new_map.z_axis[0]),
            Vector(new_map.x_axis[-1], new_map.y_axis[-1], new_map.z_axis[-1]))
        return new_map

    def normalize(self):
        self.data /= N.sum(N.ravel(self.data))

    def makePositive(self):
        min = N.minimum.reduce(N.ravel(self.data))
        if min < 0:
            nonzero_mask = self.data != 0
            self.data = (self.data - min) * nonzero_mask

    def _makeMapObjects(self):
        if self.map is None:
            self.map = InterpolatingFunction(
                (self.x_axis, self.y_axis, self.z_axis), self.data, 0.)
            self.map_gx = self.map.derivative(0)
            self.map_gy = self.map.derivative(1)
            self.map_gz = self.map.derivative(2)

    def center(self):
        self.map = None
        x_center = N.sum(
            N.ravel(self.x_axis[:, N.NewAxis, N.NewAxis] * self.data))
        y_center = N.sum(
            N.ravel(self.y_axis[N.NewAxis, :, N.NewAxis] * self.data))
        z_center = N.sum(
            N.ravel(self.z_axis[N.NewAxis, N.NewAxis, :] * self.data))
        self.x_axis = self.x_axis - x_center
        self.y_axis = self.y_axis - y_center
        self.z_axis = self.z_axis - z_center
        self.box = Box(
            Vector(self.x_axis[0], self.y_axis[0], self.z_axis[0]),
            Vector(self.x_axis[-1], self.y_axis[-1], self.z_axis[-1]))

    def principalAxes(self):
        r = self._rGrid()
        cm = N.sum(N.sum(N.sum(self.data[..., N.NewAxis] * r)))
        r = r - cm[N.NewAxis, N.NewAxis, N.NewAxis, :]
        nx, ny, nz = self.data.shape
        t = 0.
        for i in range(nx):  # make loops explicit to conserve memory
            for j in range(ny):
                for k in range(nz):
                    t = t + self.data[i, j, k] * r[i, j, k, N.NewAxis, :] * \
                        r[i, j, k, :, N.NewAxis]
        ev, axes = eigenvectors(t)
        return map(lambda a, b: (Vector(a), b), axes, ev)

    def principalPoints(self):
        (ex, vx), (ey, vy), (ez, vz) = self.principalAxes()
        axes = N.array([ex.array, ey.array, ez.array])
        r = self._rGrid()
        cm = N.sum(N.sum(N.sum(self.data[..., N.NewAxis] * r)))
        r = r - cm[N.NewAxis, N.NewAxis, N.NewAxis, :]
        regions = N.greater(N.dot(r, N.transpose(axes)), 0)
        regions = N.sum(N.array([[[[1, 2, 4]]]]) * regions, -1)
        points = []
        for i in range(8):
            mask = N.equal(regions, i)
            weight = N.sum(N.sum(N.sum(mask * self.data)))
            cmr = N.sum(N.sum(N.sum((mask*self.data)[..., N.NewAxis]*r))) \
                  / weight + cm
            points.append(Vector(cmr))
        return points

    def overlap(self, object):
        self._makeMapObjects()
        sum = 0.
        for a in object.atomList():
            x, y, z = a.position()
            sum = sum + self.map(x, y, z)
        return sum

    def gradient(self, object):
        self._makeMapObjects()
        g = ParticleVector(object.universe())
        for a in object.atomList():
            x, y, z = a.position()
            g[a] = Vector(self.map_gx(x, y, z), self.map_gy(x, y, z),
                          self.map_gz(x, y, z))
        return g

    def atomMap(self, object, r0=0.3):
        r = self._rGrid()
        atom_map = N.zeros(self.data.shape, N.Float)
        cutoff = 4. * r0
        for a in object.atomList():
            # An over-eager optimization: it should use
            # an enlarged box
            #if not self.box.enclosesPoint(a.position()):
            #    continue
            ra = a.position().array
            xi1 = N.sum(self.x_axis < ra[0] - cutoff)
            xi2 = N.sum(self.x_axis < ra[0] + cutoff)
            yi1 = N.sum(self.y_axis < ra[1] - cutoff)
            yi2 = N.sum(self.y_axis < ra[1] + cutoff)
            zi1 = N.sum(self.z_axis < ra[2] - cutoff)
            zi2 = N.sum(self.z_axis < ra[2] + cutoff)
            if xi2 > xi1 and yi2 > yi1 and zi2 > zi1:
                dr = r[xi1:xi2, yi1:yi2, zi1:zi2] - \
                     ra[N.NewAxis, N.NewAxis, N.NewAxis, :]
                w = N.exp(-0.5 * N.sum(dr**2, axis=-1) / r0**2)
                lmap = atom_map[xi1:xi2, yi1:yi2, zi1:zi2]
                N.add(lmap, w, lmap)
        N.divide(atom_map, N.sum(N.sum(N.sum(atom_map))), atom_map)
        return atom_map

    def fit(self, object, r0=0.4, w_neg=1.):
        diff = self.atomMap(object, r0) - self.data
        N.multiply(diff, 1. + (w_neg - 1.) * N.less(diff, 0.), diff)
        return N.sum(N.sum(N.sum(diff**2)))

    def fitWithGradient(self, object, r0=0.4, w_neg=1.):
        r = self._rGrid()
        atom_map = N.zeros(self.data.shape, N.Float)
        cutoff = 4. * r0
        for a in object.atomList():
            ra = a.position().array
            xi1 = N.sum(self.x_axis < ra[0] - cutoff)
            xi2 = N.sum(self.x_axis < ra[0] + cutoff)
            yi1 = N.sum(self.y_axis < ra[1] - cutoff)
            yi2 = N.sum(self.y_axis < ra[1] + cutoff)
            zi1 = N.sum(self.z_axis < ra[2] - cutoff)
            zi2 = N.sum(self.z_axis < ra[2] + cutoff)
            if xi2 > xi1 and yi2 > yi1 and zi2 > zi1:
                dr = r[xi1:xi2, yi1:yi2, zi1:zi2] - \
                     ra[N.NewAxis, N.NewAxis, N.NewAxis, :]
                w = N.exp(-0.5 * N.sum(dr**2, axis=-1) / r0**2)
                lmap = atom_map[xi1:xi2, yi1:yi2, zi1:zi2]
                N.add(lmap, w, lmap)
        norm_factor = 1. / N.sum(N.sum(N.sum(atom_map)))
        N.multiply(atom_map, norm_factor, atom_map)
        N.subtract(atom_map, self.data, atom_map)
        weight = 1. + (w_neg - 1.) * N.less(atom_map, 0.)
        N.multiply(atom_map, weight, atom_map)
        g = ParticleVector(object.universe())
        for a in object.atomList():
            ra = a.position().array
            xi1 = N.sum(self.x_axis < ra[0] - cutoff)
            xi2 = N.sum(self.x_axis < ra[0] + cutoff)
            yi1 = N.sum(self.y_axis < ra[1] - cutoff)
            yi2 = N.sum(self.y_axis < ra[1] + cutoff)
            zi1 = N.sum(self.z_axis < ra[2] - cutoff)
            zi2 = N.sum(self.z_axis < ra[2] + cutoff)
            if xi2 > xi1 and yi2 > yi1 and zi2 > zi1:
                dr = r[xi1:xi2, yi1:yi2, zi1:zi2] - \
                     ra[N.NewAxis, N.NewAxis, N.NewAxis, :]
                lmap = atom_map[xi1:xi2, yi1:yi2, zi1:zi2]
                lw = weight[xi1:xi2, yi1:yi2, zi1:zi2]
                w = N.exp(-0.5 * N.sum(dr**2, axis=-1) / r0**2)
                v = N.sum(
                    N.sum(
                        N.sum(lmap[..., N.NewAxis] * lw[..., N.NewAxis] * dr *
                              w[..., N.NewAxis])))
                g[a] = Vector(v)
        return N.sum(N.sum(N.sum(atom_map**2))), -2 * norm_factor * g / r0**2

    def _rGrid(self):
        x = N.add.outer(N.add.outer(self.x_axis, 0 * self.y_axis),
                        0 * self.z_axis)[..., N.NewAxis]
        y = N.add.outer(N.add.outer(0 * self.x_axis, self.y_axis),
                        0 * self.z_axis)[..., N.NewAxis]
        z = N.add.outer(N.add.outer(0 * self.x_axis, 0 * self.y_axis),
                        self.z_axis)[..., N.NewAxis]
        return N.concatenate((x, y, z), axis=-1)

    def clipBelow(self, cutoff):
        mask = self.clipMask(cutoff)
        self.data *= mask
        self.data /= N.add.reduce(N.ravel(self.data))
        self.map = None

    def clipAbove(self, cutoff):
        mask = self.clipMask(cutoff)
        self.data *= (1 - mask)
        self.data /= N.add.reduce(N.ravel(self.data))
        self.map = None

    def clipMask(self, cutoff):
        max_value = N.maximum.reduce(N.ravel(self.data))
        mask = N.greater_equal(self.data, cutoff * max_value)
        gradient_masks = []
        for i in range(len(mask.shape)):
            upper_index = i * index_expression[::] + index_expression[1::]
            lower_index = i * index_expression[::] + index_expression[:-1:]
            gmask1 = N.greater(self.data[upper_index], self.data[lower_index])
            gmask2 = N.greater(self.data[lower_index], self.data[upper_index])
            gradient_masks.append((gmask1, gmask2))
        while 1:
            new_mask = 0 * mask
            for i in range(len(mask.shape)):
                upper_index = i * index_expression[::] + index_expression[1::]
                lower_index = i * index_expression[::] + index_expression[:-1:]
                upper_mask, lower_mask = gradient_masks[i]
                N.logical_or(new_mask[upper_index],
                             N.logical_and(mask[lower_index], lower_mask),
                             new_mask[upper_index])
                N.logical_or(new_mask[lower_index],
                             N.logical_and(mask[upper_index], upper_mask),
                             new_mask[lower_index])
            N.logical_and(new_mask, N.logical_not(mask), new_mask)
            N.logical_or(mask, new_mask, mask)
            if N.sum(N.ravel(new_mask)) == 0:
                break
        return mask

    def dataDump(self, filename):
        outfile = file(filename, 'w')
        data = N.ravel(N.transpose(self.data))
        data = 10000. * data / N.maximum.reduce(data)
        data.shape = (len(data) / 7, 7)
        for line in data:
            for number in line:
                outfile.write('%.1f ' % number)
            outfile.write('\n')
        outfile.close()

    def writeVTK(self, filename):
        import pyvtk
        origin = N.array([self.x_axis[0], self.y_axis[0], self.z_axis[0]])
        spacing = N.array([self.x_axis[1], self.y_axis[1], self.z_axis[1]]) \
                  - origin
        values = pyvtk.Scalars(N.ravel(N.transpose(self.data)),
                               'electron density')
        data = pyvtk.VtkData(
            pyvtk.StructuredPoints(self.data.shape, origin, spacing),
            'Density map', pyvtk.PointData(values))
        data.tofile(filename, format='binary')

    def view(self, gmodule, lower, upper, file=None):
        scene = gmodule.Scene()
        scale = gmodule.ColorScale(1.)
        size = 0.5 * min(self.x_axis[1], self.y_axis[1], self.z_axis[1])
        for i in range(self.nx):
            for j in range(self.ny):
                for k in range(self.nz):
                    value = self.data[i, j, k]
                    if value > lower and value < upper:
                        center = Vector(self.x_axis[i], self.y_axis[j],
                                        self.z_axis[k])
                        m = gmodule.Material(diffuse_color=scale(value))
                        object = gmodule.Sphere(center, size, material=m)
                        scene.addObject(object)
        if file is None:
            scene.view()
        else:
            scene.writeToFile(file)

    def viewVMD(self, filename=None, pdb_filename=None):
        run_vmd = 0
        if filename is None:
            filename = tempfile.mktemp()
            run_vmd = 1
        file = open(filename, 'w')
        if pdb_filename is not None:
            file.write('mol load pdb %s\n' % pdb_filename)
        file.write('mol volume top "Electron Density" \\\n')
        x_origin = self.x_axis[0] / Units.Ang
        y_origin = self.y_axis[0] / Units.Ang
        z_origin = self.z_axis[0] / Units.Ang
        x_length = self.x_axis[-1] / Units.Ang - x_origin
        y_length = self.y_axis[-1] / Units.Ang - y_origin
        z_length = self.z_axis[-1] / Units.Ang - z_origin
        file.write('  {%f %f %f} \\\n' % (x_origin, y_origin, z_origin))
        file.write('  {%f 0. 0.} \\\n' % x_length)
        file.write('  {0. %f 0.} \\\n' % y_length)
        file.write('  {0. 0. %f} \\\n' % z_length)
        file.write('  %d %d %d \\\n' % self.data.shape)
        file.write('  {')
        factor = 1. / N.maximum.reduce(N.ravel(self.data))
        for iz in range(self.data.shape[2]):
            for iy in range(self.data.shape[1]):
                for ix in range(self.data.shape[0]):
                    file.write(str(factor * self.data[ix, iy, iz]) + ' ')
        file.write('}\n')
        file.write('mol addrep top\nmol modstyle 1 top isosurface\n')
        if run_vmd:
            file.write('file delete %s\n' % filename)
        file.close()
        if run_vmd:
            os.system('vmd -e ' + filename + ' 1> /dev/null 2>&1')

    def writeXPlor(self, filename):
        from Scientific.IO.FortranFormat import FortranFormat, FortranLine
        file = open(filename, 'w')
        file.write('\n       1 !NTITLE\n')
        file.write('REMARKS Electronic density map\n')
        data = [
            self.data.shape[0], 1, self.data.shape[0], self.data.shape[1], 1,
            self.data.shape[1], self.data.shape[2], 1, self.data.shape[2]
        ]
        file.write(str(FortranLine(data, '9I8')) + '\n')
        x = (self.x_axis[-1] - self.x_axis[0]) / Units.Ang
        y = (self.y_axis[-1] - self.y_axis[0]) / Units.Ang
        z = (self.z_axis[-1] - self.z_axis[0]) / Units.Ang
        data = [x, y, z] + 3 * [90.]
        file.write(str(FortranLine(data, '6E12.5')) + '\n')
        file.write('ZYX\n')
        map_data = N.ravel(self.data)
        map_data = map_data / N.maximum.reduce(map_data)
        average = N.sum(map_data) / len(map_data)
        sd = N.sqrt(N.sum((map_data - average)**2) / len(map_data))
        map_data.shape = self.data.shape
        for i in range(self.data.shape[2]):
            file.write(str(FortranLine([i], 'I8')) + '\n')
            data = list(N.ravel(N.transpose(map_data[:, :, i])))
            while data:
                file.write(str(FortranLine(data[:6],
                                           '%dE12.5' % min(6, len(data)))) \
                           + '\n')
                data = data[6:]
        file.write(str(FortranLine([-9999], 'I8')) + '\n')
        file.write(str(FortranLine([average, sd], '2(E12.4,1X)')) + '\n')
        file.close()
Esempio n. 25
0
x = D(10, index=0, order=2)
y = D(0, index=1, order=2)
z = D(1, index=2, order=2)
r = somefunc(x, y, z)
print r
# (40, [3, -1, 20], [[0, 0, 0], [0, 0, 0], [0, 0, 20]])
print 'd^2(somefunc)/dzdx:', r[2][2][0]  # 0
print 'd^2(somefunc)/dz^2:', r[2][2][2]  # 20

print '\n\ntesting interpolation:'
from Scientific.Functions.Interpolation \
     import InterpolatingFunction as Ip
t = linspace(0, 10, 101)
v = sin(t)
vi = Ip((t, ), v)
# interpolate and compare with exact result:
print 'interpolated:', vi(5.05), ' exact:', sin(5.05)
# interpolate the derivative of v:
vid = vi.derivative()
print 'interpolated derivative:', vid(5.05), ' exact:', cos(5.05)
# compute the integral of v over all t values:
print 'definite integral:', vi.definiteIntegral(), \
      ' exact:', -cos(t[-1]) - (-cos(t[0]))

# add path to Grid2D (for testing interpolation on a 2D grid):
sys.path.insert(0,
                os.path.join(os.environ['scripting'], 'src', 'py', 'examples'))
from Grid2D import Grid2D
g = Grid2D(dx=0.1, dy=0.2)
f = g(lambda x, y: sin(pi * x) * sin(pi * y))
Esempio n. 26
0
def get_ld_grid(photband, **kwargs):
    """
    Retrieve an interpolating grid for the LD coefficients
    
    Check outcome:
    
    #>>> bands = ['GENEVA.U', 'GENEVA.B', 'GENEVA.G', 'GENEVA.V']
    #>>> f_ld_grid = get_ld_grid(bands)
    #>>> ff = pyfits.open(_atmos['file'])
    #>>> all(ff['GENEVA.U'].data[257][2:]==f_ld_grid(ff['GENEVA.U'].data[257][0],ff['GENEVA.U'].data[257][1])[0:5])
    #True
    #>>> all(ff['GENEVA.G'].data[257][2:]==f_ld_grid(ff['GENEVA.G'].data[257][0],ff['GENEVA.G'].data[257][1])[10:15])
    #True
    #>>> ff.close()
    
    #Make some plots:
    
    #>>> photband = ['GENEVA.V']
    #>>> f_ld = get_ld_grid(photband)
    #>>> logg = 4.0
    #>>> mu = linspace(0,1,100)
    #>>> p = figure()
    #>>> p = gcf().canvas.set_window_title('test of function <get_ld_grid>')
    #>>> for teff in linspace(9000,12000,19):
    #...    out = f_ld(teff,logg)
    #...    a1x,a2x,a3x,a4x, I_x1 = out.reshape((len(photband),5)).T
    #...    p = subplot(221);p = title('Interpolation of absolute intensities')
    #...    p = plot(teff,I_x1,'ko')
    #...    p = subplot(222);p = title('Interpolation of LD coefficients')
    #...    p = scatter(4*[teff],[a1x,a2x,a3x,a4x],c=range(4),vmin=0,vmax=3,cmap=cm.spectral,edgecolors='none')
    #...    p = subplot(223);p = title('Without absolute intensity')
    #...    p = plot(mu,ld_eval(mu,[a1x,a2x,a3x,a4x]),'-')
    #...    p = subplot(224);p = title('With absolute intensity')
    #...    p = plot(mu,I_x1*ld_eval(mu,[a1x,a2x,a3x,a4x]),'-')    
    
    """
    #-- retrieve the grid points (unique values)
    teffs, loggs = get_ld_grid_dimensions(**kwargs)
    teffs_grid = np.sort(np.unique1d(teffs))
    loggs_grid = np.sort(np.unique1d(loggs))
    coeff_grid = np.zeros(
        (len(teffs_grid), len(loggs_grid), 5 * len(photband)))

    #-- get the FITS-file containing the tables
    gridfile = get_file(**kwargs)
    #-- fill the grid
    ff = pyfits.open(gridfile)
    for pp, iband in enumerate(photband):
        teffs = ff[iband].data.field('Teff')
        loggs = ff[iband].data.field('logg')
        for ii, (iteff, ilogg) in enumerate(zip(teffs, loggs)):
            indext = np.searchsorted(teffs_grid, iteff)
            indexg = np.searchsorted(loggs_grid, ilogg)
            #-- array and list are added for backwards compatibility with some
            #   pyfits versions
            coeff_grid[indext, indexg, 5 * pp:5 * (pp + 1)] = np.array(
                list(ff[iband].data[ii]))[2:]
    ff.close()
    #-- make an interpolating function
    f_ld_grid = InterpolatingFunction([teffs_grid, loggs_grid], coeff_grid)
    return f_ld_grid
Esempio n. 27
0
import pandas as pd
import numpy as np
from Scientific.Functions.Interpolation import InterpolatingFunction

df_EC = pd.read_csv("EC.csv")

df_DL = pd.read_csv("DL.csv")

gt = np.arange(1,4,1)
g1 = np.arange(20,55,15)
g2 = np.arange(20,55,15)
WCU = np.arange(2,7,2)
temp = df_EC.iloc[0:27]
temp2 = temp.as_matrix()
data = temp2.reshape(len(gt),len(g1),len(g2))

axes = (gt,g1,g2)
print(axes)
print(data)
f = InterpolatingFunction(axes, data)

print(f(2.5,25.5,40.5))
Esempio n. 28
0
    def incoherentScatteringFunction(self,
                                     q,
                                     time_range=(0., None, None),
                                     subset=None,
                                     random_vectors=15,
                                     first_mode=6):
        """
        :param q: the angular wavenumber
        :type q: float
        :param time_range: the time values at which the mean-square
                           displacement is evaluated, specified as a
                           range tuple (first, last, step).
                           The defaults are first=0, last=
                           20 times the longest vibration perdiod,
                           and step defined such that 300 points are
                           used in total.
        :type time_range: tuple
        :param subset: the subset of the universe used in the calculation
                       (default: the whole universe)
        :type subset: :class:~MMTK.Collections.GroupOfAtoms
        :param random_vectors: the number of random direction vectors
                               used in the orientational average
        :type random_vectors: int
        :param first_mode: the first mode to be taken into account for
                           the fluctuation calculation. The default value
                           of 6 is right for molecules in vacuum.
        :type first_mode: int
        :returns: the Incoherent Scattering Function as a function of time
        :rtype: Scientific.Functions.Interpolation.InterpolatingFunction
        """
        if subset is None:
            subset = self.universe
        mask = subset.booleanMask()
        weights_inc = self.universe.getParticleScalar('b_incoherent')
        weights_inc = N.repeat(weights_inc.array**2, mask.array)
        weights_inc = weights_inc / N.add.reduce(weights_inc)
        friction = N.repeat(self.friction.array, mask.array)
        mass = N.repeat(self.universe.masses().array, mask.array)
        r = N.repeat(self.universe.configuration().array, mask.array)

        first, last, step = (time_range + (None, None))[:3]
        if last is None:
            last = 3. / self.weighedMode(first_mode).inv_relaxation_time
        if step is None:
            step = (last - first) / 300.
        time = N.arange(first, last, step)

        natoms = subset.numberOfAtoms()
        kT = Units.k_B * self.temperature
        finc = N.zeros((len(time), ), N.Float)
        eisf = 0.
        random_vectors = Random.randomDirections(random_vectors)
        for v in random_vectors:
            phase = N.exp(-1.j * q * N.dot(r, v.array))
            faat = N.zeros((natoms, len(time)), N.Float)
            eisf_sum = N.zeros((natoms, ), N.Float)
            for i in range(first_mode, self.nmodes):
                irt = self.rawMode(i).inv_relaxation_time
                d = q * N.repeat((self.rawMode(i)*v).array, mask.array) \
                    / N.sqrt(friction)
                ft = (N.exp(-irt * time) - 1.) / irt
                N.add(faat, d[:, N.NewAxis]**2 * ft[N.NewAxis, :], faat)
                N.add(eisf_sum, -d**2 / irt, eisf_sum)
            N.add(finc, N.sum(weights_inc[:, N.NewAxis] * N.exp(kT * faat), 0),
                  finc)
            eisf = eisf + N.sum(weights_inc * N.exp(kT * eisf_sum))
        return InterpolatingFunction((time, ), finc / len(random_vectors))
Esempio n. 29
0
    def coherentScatteringFunction(self,
                                   q,
                                   time_range=(0., None, None),
                                   subset=None,
                                   weights=None,
                                   random_vectors=15,
                                   first_mode=6):
        """
        :param q: the angular wavenumber
        :type q: float
        :param time_range: the time values at which the mean-square
                           displacement is evaluated, specified as a
                           range tuple (first, last, step).
                           The defaults are first=0, last=
                           20 times the longest vibration perdiod,
                           and step defined such that 300 points are
                           used in total.
        :type time_range: tuple
        :param subset: the subset of the universe used in the calculation
                       (default: the whole universe)
        :type subset: :class:~MMTK.Collections.GroupOfAtoms
        :param weights: the weight to be given to each atom in the average
                        (default: coherent scattering lengths)
        :type weights: :class:~MMTK.ParticleProperties.ParticleScalar
        :param random_vectors: the number of random direction vectors
                               used in the orientational average
        :type random_vectors: int
        :param first_mode: the first mode to be taken into account for
                           the fluctuation calculation. The default value
                           of 6 is right for molecules in vacuum.
        :type first_mode: int
        :returns: the Coherent Scattering Function as a function of time
        :rtype: Scientific.Functions.Interpolation.InterpolatingFunction
        """
        if subset is None:
            subset = self.universe
        if weights is None:
            weights = self.universe.getParticleScalar('b_coherent')
        mask = subset.booleanMask()
        weights = N.repeat(weights.array, mask.array)
        weights = weights / N.sqrt(N.add.reduce(weights * weights))
        friction = N.repeat(self.friction.array, mask.array)
        r = N.repeat(self.universe.configuration().array, mask.array)

        first, last, step = (time_range + (None, None))[:3]
        if last is None:
            last = 3. / self.rawMode(first_mode).inv_relaxation_time
        if step is None:
            step = (last - first) / 300.
        time = N.arange(first, last, step)

        natoms = subset.numberOfAtoms()
        kT = Units.k_B * self.temperature
        fcoh = N.zeros((len(time), ), N.Complex)
        random_vectors = Random.randomDirections(random_vectors)
        for v in random_vectors:
            phase = N.exp(-1.j * q * N.dot(r, v.array))
            for ai in range(natoms):
                fbt = N.zeros((natoms, len(time)), N.Float)
                for i in range(first_mode, self.nmodes):
                    irt = self.rawMode(i).inv_relaxation_time
                    d = q * N.repeat((self.rawMode(i)*v).array, mask.array) \
                        / N.sqrt(friction)
                    ft = N.exp(-irt * time) / irt
                    N.add(fbt, d[ai] * d[:, N.NewAxis] * ft[N.NewAxis, :], fbt)
                    N.add(fbt, (-0.5 / irt) * (d[ai]**2 + d[:, N.NewAxis]**2),
                          fbt)
                N.add(
                    fcoh, weights[ai] * phase[ai] *
                    N.dot(weights * N.conjugate(phase), N.exp(kT * fbt)), fcoh)
        return InterpolatingFunction((time, ), fcoh.real / len(random_vectors))
Esempio n. 30
0
x = D(10, index=0, order=2)
y = D(0, index=1, order=2)
z = D(1, index=2, order=2)
r = somefunc(x, y, z)
print r
# (40, [3, -1, 20], [[0, 0, 0], [0, 0, 0], [0, 0, 20]])
print "d^2(somefunc)/dzdx:", r[2][2][0]  # 0
print "d^2(somefunc)/dz^2:", r[2][2][2]  # 20

print "\n\ntesting interpolation:"
from Scientific.Functions.Interpolation import InterpolatingFunction as Ip

t = linspace(0, 10, 101)
v = sin(t)
vi = Ip((t,), v)
# interpolate and compare with exact result:
print "interpolated:", vi(5.05), " exact:", sin(5.05)
# interpolate the derivative of v:
vid = vi.derivative()
print "interpolated derivative:", vid(5.05), " exact:", cos(5.05)
# compute the integral of v over all t values:
print "definite integral:", vi.definiteIntegral(), " exact:", -cos(t[-1]) - (-cos(t[0]))

# add path to Grid2D (for testing interpolation on a 2D grid):
sys.path.insert(0, os.path.join(os.environ["scripting"], "src", "py", "examples"))
from Grid2D import Grid2D

g = Grid2D(dx=0.1, dy=0.2)
f = g(lambda x, y: sin(pi * x) * sin(pi * y))
fi = Ip((g.xcoor, g.ycoor), f)
Esempio n. 31
0
def get_grid_mesh(wave=None, teffrange=None, loggrange=None, **kwargs):
    """
    Return InterpolatingFunction spanning the available grid of spectrum models.
    
    WARNING: the grid must be entirely defined on a mesh grid, but it does not
    need to be equidistant.
    
    It is thus the user's responsibility to know whether the grid is evenly
    spaced in logg and teff
    
    You can supply your own wavelength range, since the grid models'
    resolution are not necessarily homogeneous. If not, the first wavelength
    array found in the grid will be used as a template.
        
    It might take a long a time and cost a lot of memory if you load the entire
    grid. Therefor, you can also set range of temperature and gravity.
    
    @param wave: wavelength to define the grid on
    @type wave: ndarray
    @param teffrange: starting and ending of the grid in teff
    @type teffrange: tuple of floats
    @param loggrange: starting and ending of the grid in logg
    @type loggrange: tuple of floats
    @return: wavelengths, teffs, loggs and fluxes of grid, and the interpolating
    function
    @rtype: (1Darray,1Darray,1Darray,3Darray,InterpolatingFunction)
    """
    #-- get the dimensions of the grid
    teffs, loggs = get_grid_dimensions(**kwargs)
    #-- build flux grid, assuming a perfectly sampled grid (needs not to be
    #   equidistant)
    if teffrange is not None:
        sa = (teffrange[0] <= teffs) & (teffs <= teffrange[1])
        teffs = teffs[sa]
    if loggrange is not None:
        sa = (loggrange[0] <= loggs) & (loggs <= loggrange[1])
        loggs = loggs[sa]
    #-- clip if necessary
    teffs = list(set(list(teffs)))
    loggs = list(set(list(loggs)))
    teffs = np.sort(teffs)
    loggs = np.sort(loggs)
    if wave is not None:
        flux = np.ones((len(teffs), len(loggs), len(wave)))
        cont = np.ones((len(teffs), len(loggs), len(wave)))
    #-- run over teff and logg, and interpolate the models onto the supplied
    #   wavelength range
    gridfile = get_file(**kwargs)
    ff = pf.open(gridfile)
    for i, teff in enumerate(teffs):
        for j, logg in enumerate(loggs):
            try:
                mod_name = "T%05d_logg%01.02f" % (teff, logg)
                mod = ff[mod_name]
                wave_ = mod.data.field(
                    'wavelength')  #array(mod.data.tolist())[:,0]
                flux_ = mod.data.field('flux')  #array(mod.data.tolist())[:,1]
                cont_ = mod.data.field('cont')  #array(mod.data.tolist())[:,1]
                #-- if there is no wavelength range given, we assume that
                #   the whole grid has the same resolution, and the first
                #   wave-array will be used as a template
                if wave is None:
                    wave = wave_
                    flux = np.ones((len(teffs), len(loggs), len(wave)))
                    cont = np.ones((len(teffs), len(loggs), len(wave)))
            except KeyError:
                continue
            #-- it could be that we're lucky and the grid is completely
            #   homogeneous. In that case, there is no need for interpolation
            try:
                flux[i, j, :] = flux_
                cont[i, j, :] = cont_
            except:
                flux[i, j, :] = np.interp(wave, wave_, flux_)
                cont[i, j, :] = np.interp(wave, wave_, cont_)
    flux_grid = InterpolatingFunction([np.log10(teffs), loggs], flux)
    cont_grid = InterpolatingFunction([np.log10(teffs), loggs], cont)
    #logger.info('Constructed spectrum interpolation grid')
    return wave, teffs, loggs, flux, flux_grid, cont_grid