Example #1
0
def mk_image(galaxy):
    base = './../../images_v5/GS_2.5as_matched/gs_all_'

    i_img = pyf.getdata(base+str(galaxy)+'_I.fits')
    j_img = pyf.getdata(base+str(galaxy)+'_J.fits')
    h_img = pyf.getdata(base+str(galaxy)+'_H.fits')

    #include 90% of pixels
    x = pyl.hstack(i_img)
    i_lim = scoreatpercentile(x,99)
    x = pyl.hstack(j_img)
    j_lim = scoreatpercentile(x,99)
    x = pyl.hstack(h_img)
    h_lim = scoreatpercentile(x,99)

    print galaxy, i_lim, j_lim, h_lim

    img = pyl.zeros((h_img.shape[0], h_img.shape[1], 3), dtype=float)
    img[:,:,0] = img_scale.asinh(h_img, scale_min=-0.1*h_lim, scale_max=h_lim,
            non_linear=0.5)
    img[:,:,1] = img_scale.asinh(j_img, scale_min=-0.1*j_lim, scale_max=j_lim,
            non_linear=0.5)
    img[:,:,2] = img_scale.asinh(i_img, scale_min=-0.1*i_lim, scale_max=i_lim,
            non_linear=0.5)

    return img
Example #2
0
def mk_image(galaxy):
    base = './../../images_v5/GS_2.5as_matched/gs_all_'

    i_img = pyf.getdata(base + str(galaxy) + '_I.fits')
    j_img = pyf.getdata(base + str(galaxy) + '_J.fits')
    h_img = pyf.getdata(base + str(galaxy) + '_H.fits')

    #include 90% of pixels
    x = pyl.hstack(i_img)
    i_lim = scoreatpercentile(x, 99)
    x = pyl.hstack(j_img)
    j_lim = scoreatpercentile(x, 99)
    x = pyl.hstack(h_img)
    h_lim = scoreatpercentile(x, 99)

    print galaxy, i_lim, j_lim, h_lim

    img = pyl.zeros((h_img.shape[0], h_img.shape[1], 3), dtype=float)
    img[:, :, 0] = img_scale.asinh(h_img,
                                   scale_min=-0.1 * h_lim,
                                   scale_max=h_lim,
                                   non_linear=0.5)
    img[:, :, 1] = img_scale.asinh(j_img,
                                   scale_min=-0.1 * j_lim,
                                   scale_max=j_lim,
                                   non_linear=0.5)
    img[:, :, 2] = img_scale.asinh(i_img,
                                   scale_min=-0.1 * i_lim,
                                   scale_max=i_lim,
                                   non_linear=0.5)

    return img
Example #3
0
    def _shapeStim(self,
                   isi=1,
                   variation=0,
                   width=0.05,
                   weight=10,
                   start=0,
                   finish=1,
                   stimshape='gaussian'):
        from pylab import r_, convolve, shape, exp, zeros, hstack, array, rand

        # Create event times
        timeres = 0.001  # Time resolution = 1 ms = 500 Hz (DJK to CK: 500...?)
        pulselength = 10  # Length of pulse in units of width
        currenttime = 0
        timewindow = finish - start
        allpts = int(timewindow / timeres)
        output = []
        while currenttime < timewindow:
            # Note: The timeres/2 subtraction acts as an eps to avoid later int rounding errors.
            if currenttime >= 0 and currenttime < timewindow - timeres / 2:
                output.append(currenttime)
            currenttime = currenttime + isi + variation * (rand() - 0.5)

        # Create single pulse
        npts = pulselength * width / timeres
        x = (r_[0:npts] - npts / 2 + 1) * timeres
        if stimshape == 'gaussian':
            pulse = exp(-2 * (2 * x / width - 1)**
                        2)  # Offset by 2 standard deviations from start
            pulse = pulse / max(pulse)
        elif stimshape == 'square':
            pulse = zeros(shape(x))
            pulse[int(npts / 2):int(npts / 2) +
                  int(width / timeres)] = 1  # Start exactly on time
        else:
            raise Exception('Stimulus shape "%s" not recognized' % stimshape)

        # Create full stimulus
        events = zeros((allpts))
        events[array(array(output) / timeres, dtype=int)] = 1
        fulloutput = convolve(
            events, pulse, mode='full'
        ) * weight  # Calculate the convolved input signal, scaled by rate
        fulloutput = fulloutput[
            npts / 2 - 1:-npts /
            2]  # Slices out where the convolved pulse train extends before and after sequence of allpts.
        fulltime = (r_[0:allpts] * timeres +
                    start) * 1e3  # Create time vector and convert to ms

        fulltime = hstack(
            (0, fulltime, fulltime[-1] + timeres *
             1e3))  # Create "bookends" so always starts and finishes at zero
        fulloutput = hstack(
            (0, fulloutput,
             0))  # Set weight to zero at either end of the stimulus period
        events = hstack((0, events, 0))  # Ditto
        stimvecs = deepcopy([fulltime, fulloutput,
                             events])  # Combine vectors into a matrix

        return stimvecs
Example #4
0
	def sigma_vectors(self,x,P):

		"""
		generates sigma vectors

		Arguments
		----------
		x : matrix
			state at time instant t
		P:  matrix
			state covariance matrix at time instant t

		Returns
		----------
		Chi : matrix
			matrix of sigma points
		"""
		State_covariance_cholesky=sp.linalg.cholesky(P).T
		State_covariance_cholesky_product=self.gamma_sigma_points*State_covariance_cholesky		
		chi_plus=[]
		chi_minus=[]
		for i in range(self.L):
			chi_plus.append(x+State_covariance_cholesky_product[:,i].reshape(self.L,1)) 
			chi_minus.append(x-State_covariance_cholesky_product[:,i].reshape(self.L,1)) 

		Chi=pb.hstack((x,pb.hstack((pb.hstack(chi_plus),pb.hstack(chi_minus))))) 
		return pb.matrix(Chi)
Example #5
0
 def dy_Stance(self, t, y, pars, return_force = False):
     """
     This is the ode function that is passed to the solver. Internally, it calles:
         legfunc1 - force of leg 1 (overwrite for new models)
         legfunc2 - force of leg 2 (overwrite for new models)
     
     :args:
         t (float): simulation time
         y (6x float): CoM state
         pars (dict): parameters, will be passed to legfunc1 and legfunc2.
             must also include 'foot1' (3x float), 'foot2' (3x float), 'm' (float)
             and 'g' (3x float) indicating the feet positions, mass and direction of
             gravity, respectively.
         return_force (bool, default: False): return [F_leg1, F_leg2] (6x
             float) instead of dy/dt.
     """
     
     f1 = max(self.legfunc1(t, y, pars), 0) # only push
     l1 = norm(array(y[:3]) - array(pars['foot1']))
     f1_vec = (array(y[:3]) - array(pars['foot1'])) / l1 * f1
     f2 = max(self.legfunc2(t, y, pars), 0) # only push
     l2 = norm(array(y[:3]) - array(pars['foot2']))
     f2_vec = (array(y[:3]) - array(pars['foot2'])) / l2 * f2
     if return_force:
         return hstack([f1_vec, f2_vec])
     return hstack([y[3:], (f1_vec + f2_vec) / pars['m'] + pars['g']])
Example #6
0
    def dy_Stance(self, t, y, pars, return_force=False):
        """
        This is the ode function that is passed to the solver. Internally, it calles:
            legfunc1 - force of leg 1 (overwrite for new models)
            legfunc2 - force of leg 2 (overwrite for new models)
        
        :args:
            t (float): simulation time
            y (6x float): CoM state
            pars (dict): parameters, will be passed to legfunc1 and legfunc2.
                must also include 'foot1' (3x float), 'foot2' (3x float), 'm' (float)
                and 'g' (3x float) indicating the feet positions, mass and direction of
                gravity, respectively.
            return_force (bool, default: False): return [F_leg1, F_leg2] (6x
                float) instead of dy/dt.
        """

        f1 = max(self.legfunc1(t, y, pars), 0)  # only push
        l1 = norm(array(y[:3]) - array(pars['foot1']))
        f1_vec = (array(y[:3]) - array(pars['foot1'])) / l1 * f1
        f2 = max(self.legfunc2(t, y, pars), 0)  # only push
        l2 = norm(array(y[:3]) - array(pars['foot2']))
        f2_vec = (array(y[:3]) - array(pars['foot2'])) / l2 * f2
        if return_force:
            return hstack([f1_vec, f2_vec])
        return hstack([y[3:], (f1_vec + f2_vec) / pars['m'] + pars['g']])
Example #7
0
    def extrude_mesh(self, l, z_offset):
        # accepts the number of layers and the length of extrusion

        mesh = self.mesh

        # Extrude vertices
        all_coords = []
        for i in linspace(0, z_offset, l):
            all_coords.append(
                hstack((mesh.coordinates(), i * ones((self.n_v2, 1)))))
        self.global_vertices = vstack(all_coords)

        # Extrude cells (tris to tetrahedra)
        for i in range(l - 1):
            for c in self.mesh.cells():
                # Make a prism out of 2 stacked triangles
                vertices = hstack((c + i * self.n_v2, c + (i + 1) * self.n_v2))

                # Determine prism orientation
                smallest_vertex_index = argmin(vertices)

                # Map to I-ordering of Dompierre et al.
                mapping = self.indirection_table[smallest_vertex_index]

                # Determine which subdivision scheme to use.
                if min(vertices[mapping][[1, 5]]) < min(
                        vertices[mapping][[2, 4]]):
                    local_tets = vstack((vertices[mapping][[0,1,2,5]],\
                                         vertices[mapping][[0,1,5,4]],\
                                         vertices[mapping][[0,4,5,3]]))
                else:
                    local_tets = vstack((vertices[mapping][[0,1,2,4]],\
                                         vertices[mapping][[0,4,2,5]],\
                                         vertices[mapping][[0,4,5,3]]))
                # Concatenate local tet to cell array
                self.global_tets = vstack((self.global_tets, local_tets))

        # Eliminate phantom initialization tet
        self.global_tets = self.global_tets[1:, :]

        # Query number of vertices and tets in new mesh
        self.n_verts = self.global_vertices.shape[0]
        self.n_tets = self.global_tets.shape[0]

        # Initialize new dolfin mesh of dimension 3
        self.new_mesh = Mesh()
        m = MeshEditor()
        m.open(self.new_mesh, 3, 3)
        m.init_vertices(self.n_verts, self.n_verts)
        m.init_cells(self.n_tets, self.n_tets)

        # Copy vertex data into new mesh
        for i, v in enumerate(self.global_vertices):
            m.add_vertex(i, Point(*v))

        # Copy cell data into new mesh
        for j, c in enumerate(self.global_tets):
            m.add_cell(j, *c)

        m.close()
Example #8
0
	def sigma_vectors(self,x,P):

		"""
		generator for the sigma vectors

		Arguments
		----------
		x : ndarray
			state at time instant t
		P:  ndarray
			state covariance matrix at time instant t

		Returns
		----------
		Xi : ndarray
			matrix of sigma points, each column is a sigma vector: [x0 x0+ x0-];nx by 2nx+1
		"""
		Pc=sp.linalg.cholesky(P,lower=1)
		Weighted_Pc=self.gamma_sigma_points*Pc		
		Xi_plus=[]
		Xi_minus=[]
		for i in range(self.nx):
			Xi_plus.append(x+Weighted_Pc[:,i].reshape(self.nx,1)) #list of ndarray with length nx
			Xi_minus.append(x-Weighted_Pc[:,i].reshape(self.nx,1)) #list of ndarray with length nx

		Xi=pb.hstack((x,pb.hstack((pb.hstack(Xi_plus),pb.hstack(Xi_minus))))) 
		return Xi
Example #9
0
def simCSLIP_xp(x0, x0R, x0L, p0R, p0L, AR, AL, SLIP_param0, n=50):
    """
    simulates the controlled 2step-SLIP, using [x,p]-referenced control
    input:
        x0 - initial (augmented) state, e.g. [x0L, p0R].T
        x0R - reference right apex (y, vx, vz)
        x0L - reference left apex     -"-
        p0R - reference right parameters
        p0L - reference left parameters
        AR - parameter control right leg
        AL - parameter control left leg
        SLIP_param0: dict, containing {'m': ..., 'g': ... }
        n - number of strides to simulate at most
    """
    res = []
    refStateL = hstack([x0L, squeeze(sp_d2a(p0R))])[:,newaxis]
    refStateR = hstack([x0R, squeeze(sp_d2a(p0L))])[:,newaxis]
    currState = array(x0)
    slip_params = copy.deepcopy(SLIP_param0)
    if currState.ndim == 1:
        currState = currState[:,newaxis]
    elif currState.shape[0] == 1:
        currState = currState.T
    for step in range(n):
        #print 'AL: ', AL.shape, 'p0L: ', sp_d2a(p0L).shape
        pL = sp_d2a(p0L) + dot(AL, currState - refStateL)
        #print 'pL changed:', not allclose(pL,sp_d2a(p0L))
        slip_params.update(sp_a2d(pL))
        try:
            resL = sl.SLIP_step3D(currState[:3,0], slip_params)
        except ValueError:
            print 'simulation aborted (l1)\n'
            break
        if resL['sim_fail']:
            print 'simulation aborted (l2)\n'
            break
        res.append(resL)
        currState = hstack([resL['y'][-1],
                            resL['vx'][-1],
                            resL['vz'][-1],
                            squeeze(pL)])[:,newaxis]
        pR = sp_d2a(p0R) + dot(AR, currState - refStateR)
        #print 'pR changed:', not allclose(pR,sp_d2a(p0R))
        slip_params.update(sp_a2d(pR))
        try:
            resR = sl.SLIP_step3D(currState[:3,0], slip_params)
        except ValueError:
            print 'simulation aborted (r1)\n'
            break
        if resR['sim_fail']:
            print 'simulation aborted (r2)\n'
            break
        res.append(resR)
        currState = hstack([resR['y'][-1],
                            resR['vx'][-1],
                            resR['vz'][-1],
                            squeeze(pR)])[:,newaxis]
    return res
Example #10
0
def int_f(a, fs=1.):
    """
    A fourier-based integrator.

    ===========
    Parameters:
    ===========
    a : *array* (1D)
        The array which should be integrated
    fs : *float*
        sampling time of the data

    ========
    Returns:
    ========
    y : *array* (1D)
        The integrated array

    """

    if False:
        # version with "mirrored" code
        xp = hstack([a, a[::-1]])
        int_fluc = int_f0(xp, float(fs))[:len(a)]
        baseline = mean(a) * arange(len(a)) / float(fs)
        return int_fluc + baseline - int_fluc[0]

    # old version
    baseline = mean(a) * arange(len(a)) / float(fs)
    int_fluc = int_f0(a, float(fs))
    return int_fluc + baseline - int_fluc[0]

    # old code - remove eventually (comment on 02/2014)
    # periodify
    if False:
        baseline = linspace(a[0], a[-1], len(a))
        a0 = a - baseline
        m = a0[-1] - a0[-2]
        b2 = linspace(0, -.5 * m, len(a))
        baseline -= b2
        a0 += b2
        a2 = hstack([a0, -1. * a0[1:][::-1]])  # "smooth" periodic signal

        dbase = baseline[1] - baseline[0]
        t_vec = arange(len(a)) / float(fs)
        baseint = baseline[0] * t_vec + .5 * dbase * t_vec**2

        # define frequencies
        T = len(a2) / float(fs)
        freqs = 1. / T * arange(len(a2))
        freqs[len(freqs) // 2 + 1:] -= float(fs)

        spec = fft.fft(a2)
        spec_i = zeros_like(spec, dtype=complex)
        spec_i[1:] = spec[1:] / (2j * pi * freqs[1:])
        res_int = fft.ifft(spec_i).real[:len(a0)] + baseint
        return res_int - res_int[0]
Example #11
0
def int_f(a, fs=1.):
    """
    A fourier-based integrator.

    ===========
    Parameters:
    ===========
    a : *array* (1D)
        The array which should be integrated
    fs : *float*
        sampling time of the data

    ========
    Returns:
    ========
    y : *array* (1D)
        The integrated array

    """

    if False:
    # version with "mirrored" code
        xp = hstack([a, a[::-1]])
        int_fluc = int_f0(xp, float(fs))[:len(a)]
        baseline = mean(a) * arange(len(a)) / float(fs)
        return int_fluc + baseline - int_fluc[0]
    
    # old version
    baseline = mean(a) * arange(len(a)) / float(fs)
    int_fluc = int_f0(a, float(fs))
    return int_fluc + baseline - int_fluc[0]

    # old code - remove eventually (comment on 02/2014)
    # periodify
    if False:
        baseline = linspace(a[0], a[-1], len(a))
        a0 = a - baseline
        m = a0[-1] - a0[-2]
        b2 = linspace(0, -.5 * m, len(a))
        baseline -= b2
        a0 += b2
        a2 = hstack([a0, -1. * a0[1:][::-1]]) # "smooth" periodic signal  

        dbase = baseline[1] - baseline[0]
        t_vec = arange(len(a)) / float(fs)
        baseint = baseline[0] * t_vec + .5 * dbase * t_vec ** 2
        
        # define frequencies
        T = len(a2) / float(fs)
        freqs = 1. / T * arange(len(a2))
        freqs[len(freqs) // 2 + 1 :] -= float(fs)

        spec = fft.fft(a2)
        spec_i = zeros_like(spec, dtype=complex)
        spec_i[1:] = spec[1:] / (2j * pi* freqs[1:])
        res_int = fft.ifft(spec_i).real[:len(a0)] + baseint
        return res_int - res_int[0]
Example #12
0
  def extrude_mesh(self,l,z_offset):
    # accepts the number of layers and the length of extrusion

    # Extrude vertices
    all_coords = []
    for i in linspace(0,z_offset,l):
      all_coords.append(hstack((mesh.coordinates(),i*ones((self.n_v2,1)))))
    self.global_vertices = vstack(all_coords)

    # Extrude cells (tris to tetrahedra)
    for i in range(l-1):
      for c in self.mesh.cells():
        # Make a prism out of 2 stacked triangles
        vertices = hstack((c+i*self.n_v2,c+(i+1)*self.n_v2))

        # Determine prism orientation
        smallest_vertex_index = argmin(vertices)

        # Map to I-ordering of Dompierre et al.
        mapping = self.indirection_table[smallest_vertex_index]

        # Determine which subdivision scheme to use.
        if min(vertices[mapping][[1,5]]) < min(vertices[mapping][[2,4]]):
          local_tets = vstack((vertices[mapping][[0,1,2,5]],\
                               vertices[mapping][[0,1,5,4]],\
                               vertices[mapping][[0,4,5,3]]))
        else:
          local_tets = vstack((vertices[mapping][[0,1,2,4]],\
                               vertices[mapping][[0,4,2,5]],\
                               vertices[mapping][[0,4,5,3]]))
        # Concatenate local tet to cell array
        self.global_tets = vstack((self.global_tets,local_tets))

    # Eliminate phantom initialization tet
    self.global_tets = self.global_tets[1:,:]

    # Query number of vertices and tets in new mesh
    self.n_verts = self.global_vertices.shape[0]
    self.n_tets = self.global_tets.shape[0]

    # Initialize new dolfin mesh of dimension 3
    self.new_mesh = Mesh()
    m = MeshEditor()
    m.open(self.new_mesh,3,3)
    m.init_vertices(self.n_verts,self.n_verts)
    m.init_cells(self.n_tets,self.n_tets)

    # Copy vertex data into new mesh
    for i,v in enumerate(self.global_vertices):
      m.add_vertex(i,Point(*v))

    # Copy cell data into new mesh
    for j,c in enumerate(self.global_tets):
      m.add_cell(j,*c)

    m.close()
Example #13
0
def simCSLIP_xp(x0, x0R, x0L, p0R, p0L, AR, AL, SLIP_param0, n=50):
    """
    simulates the controlled 2step-SLIP, using [x,p]-referenced control
    input:
        x0 - initial (augmented) state, e.g. [x0L, p0R].T
        x0R - reference right apex (y, vx, vz)
        x0L - reference left apex     -"-
        p0R - reference right parameters
        p0L - reference left parameters
        AR - parameter control right leg
        AL - parameter control left leg
        SLIP_param0: dict, containing {'m': ..., 'g': ... }
        n - number of strides to simulate at most
    """
    res = []
    refStateL = hstack([x0L, squeeze(sp_d2a(p0R))])[:, newaxis]
    refStateR = hstack([x0R, squeeze(sp_d2a(p0L))])[:, newaxis]
    currState = array(x0)
    slip_params = copy.deepcopy(SLIP_param0)
    if currState.ndim == 1:
        currState = currState[:, newaxis]
    elif currState.shape[0] == 1:
        currState = currState.T
    for step in range(n):
        #print 'AL: ', AL.shape, 'p0L: ', sp_d2a(p0L).shape
        pL = sp_d2a(p0L) + dot(AL, currState - refStateL)
        #print 'pL changed:', not allclose(pL,sp_d2a(p0L))
        slip_params.update(sp_a2d(pL))
        try:
            resL = sl.SLIP_step3D(currState[:3, 0], slip_params)
        except ValueError:
            print 'simulation aborted (l1)\n'
            break
        if resL['sim_fail']:
            print 'simulation aborted (l2)\n'
            break
        res.append(resL)
        currState = hstack(
            [resL['y'][-1], resL['vx'][-1], resL['vz'][-1],
             squeeze(pL)])[:, newaxis]
        pR = sp_d2a(p0R) + dot(AR, currState - refStateR)
        #print 'pR changed:', not allclose(pR,sp_d2a(p0R))
        slip_params.update(sp_a2d(pR))
        try:
            resR = sl.SLIP_step3D(currState[:3, 0], slip_params)
        except ValueError:
            print 'simulation aborted (r1)\n'
            break
        if resR['sim_fail']:
            print 'simulation aborted (r2)\n'
            break
        res.append(resR)
        currState = hstack(
            [resR['y'][-1], resR['vx'][-1], resR['vz'][-1],
             squeeze(pR)])[:, newaxis]
    return res
Example #14
0
def makestim(isi=1,
             variation=0,
             width=0.05,
             weight=10,
             start=0,
             finish=1,
             stimshape='gaussian'):
    from pylab import r_, convolve, shape

    # Create event times
    timeres = 0.005  # Time resolution = 5 ms = 200 Hz
    pulselength = 10  # Length of pulse in units of width
    currenttime = 0
    timewindow = finish - start
    allpts = int(timewindow / timeres)
    output = []
    while currenttime < timewindow:
        if currenttime >= 0 and currenttime < timewindow:
            output.append(currenttime)
        currenttime = currenttime + isi + variation * (rand() - 0.5)

    # Create single pulse
    npts = min(pulselength * width / timeres,
               allpts)  # Calculate the number of points to use
    x = (r_[0:npts] - npts / 2 + 1) * timeres
    if stimshape == 'gaussian':
        pulse = exp(-(x / width * 2 - 2)**
                    2)  # Offset by 2 standard deviations from start
        pulse = pulse / max(pulse)
    elif stimshape == 'square':
        pulse = zeros(shape(x))
        pulse[int(npts / 2):int(npts / 2) +
              int(width / timeres)] = 1  # Start exactly on time
    else:
        raise Exception('Stimulus shape "%s" not recognized' % stimshape)

# Create full stimulus
    events = zeros((allpts))
    events[array(array(output) / timeres, dtype=int)] = 1
    fulloutput = convolve(
        events, pulse, mode='same'
    ) * weight  # Calculate the convolved input signal, scaled by rate
    fulltime = (r_[0:allpts] * timeres +
                start) * 1e3  # Create time vector and convert to ms
    fulltime = hstack(
        (0, fulltime, fulltime[-1] + timeres *
         1e3))  # Create "bookends" so always starts and finishes at zero
    fulloutput = hstack(
        (0, fulloutput,
         0))  # Set weight to zero at either end of the stimulus period
    events = hstack((0, events, 0))  # Ditto
    stimvecs = [fulltime, fulloutput, events]  # Combine vectors into a matrix

    return stimvecs
Example #15
0
def getPeriodicOrbit(statesL,
                     T_L,
                     ymin_L,
                     statesR,
                     T_R,
                     ymin_R,
                     baseParams,
                     startParams=[14000, 1.16, 1, 0.]):
    """
    returns a tuple of SLIP parameters, that result in the two-step periodic
    solution defined by <statesL> -> <statesR> -> >statesL>,
    with step time left (right) = <T_L> (<T_R>)
    minimal vertical position left (right) = <ymin_L> (<ymin_R>)
    statesL/R: a list of (left/right) apex states y, vx, vz
    baseParams: dict of base SLIP parameters: g, m (gravity acceleration, mass)
    
    returns: [SL, paramsL, dEL], [SR, paramsR, dER] 
             two tuples of initial apex states and corresponding SLIP
             parameters that yield the two-step periodic solution
             (dE: energy fluctuation)
        
    """
    SL = mean(vstack(statesL), axis=0) if len(statesL) > 1 else statesL
    SR = mean(vstack(statesR), axis=0) if len(statesR) > 1 else statesR
    tr = mean(hstack(T_R))
    tl = mean(hstack(T_L))
    yminl = mean(hstack(ymin_L))
    yminr = mean(hstack(ymin_R))
    m = baseParams['m']
    g = baseParams['g']
    # energy input right (left) step
    dER = (SL[0] - SR[0]) * m * abs(g) + .5 * m * (SL[1]**2 + SL[2]**2 -
                                                   SR[1]**2 - SR[2]**2)
    dEL = -dER

    # initialize parameters
    PR = copy.deepcopy(baseParams)
    PL = copy.deepcopy(baseParams)
    PL['IC'] = SL
    PL['dE'] = dEL
    PR['IC'] = SR
    PR['dE'] = dER

    # define step params: (y_apex2, T, y_min, vz_apex2)
    spL = (SR[0], tl, yminl, SR[2])
    spR = (SL[0], tr, yminr, SL[2])

    # compute necessary model parameters
    paramsL = fl.calcSlipParams3D(spL, PL, startParams)
    paramsR = fl.calcSlipParams3D(spR, PR, startParams)

    return ([SL, paramsL, dEL], [SR, paramsR, dER])
Example #16
0
def getPeriodicOrbit(statesL, T_L, ymin_L,
                     statesR, T_R, ymin_R,
                     baseParams ,
                     startParams=[14000, 1.16, 1, 0.] ):
    """
    returns a tuple of SLIP parameters, that result in the two-step periodic
    solution defined by <statesL> -> <statesR> -> >statesL>,
    with step time left (right) = <T_L> (<T_R>)
    minimal vertical position left (right) = <ymin_L> (<ymin_R>)
    statesL/R: a list of (left/right) apex states y, vx, vz
    baseParams: dict of base SLIP parameters: g, m (gravity acceleration, mass)
    
    returns: [SL, paramsL, dEL], [SR, paramsR, dER] 
             two tuples of initial apex states and corresponding SLIP
             parameters that yield the two-step periodic solution
             (dE: energy fluctuation)
        
    """    
    SL = mean(vstack(statesL), axis=0) if len(statesL) > 1 else statesL
    SR = mean(vstack(statesR), axis=0) if len(statesR) > 1 else statesR
    tr = mean(hstack(T_R))
    tl = mean(hstack(T_L))
    yminl = mean(hstack(ymin_L))
    yminr = mean(hstack(ymin_R))
    m = baseParams['m']
    g = baseParams['g']
    # energy input right (left) step
    dER = (SL[0]-SR[0])*m*abs(g) + .5*m*(SL[1]**2 + SL[2]**2 
                                       - SR[1]**2 - SR[2]**2)
    dEL = -dER

    # initialize parameters
    PR = copy.deepcopy( baseParams )
    PL = copy.deepcopy( baseParams )
    PL['IC'] = SL    
    PL['dE'] = dEL
    PR['IC'] = SR
    PR['dE'] = dER
    
    # define step params: (y_apex2, T, y_min, vz_apex2)
    spL = (SR[0], tl, yminl, SR[2])
    spR = (SL[0], tr, yminr, SL[2])
    
    # compute necessary model parameters
    paramsL = fl.calcSlipParams3D(spL, PL, startParams)
    paramsR = fl.calcSlipParams3D(spR, PR, startParams)
    
    
    return ([SL, paramsL, dEL],[SR, paramsR, dER])
Example #17
0
def stackSimRes(simRes):
    """
    input: a *list* of single steps
    returns: an array that contains the complete gait (consecutive time & way)
    """
    resDat = []
    res_t = []
    for part in simRes:
        if len(resDat) == 0:
            res_t.append(part['t'])
            resDat.append(
                vstack([
                    part['x'],
                    part['y'],
                    part['z'],
                    part['vx'],
                    part['vy'],
                    part['vz'],
                ]).T)
        else:
            res_t.append(part['t'][1:] + res_t[-1][-1])
            # compensate x and z translation
            resDat.append(
                vstack([
                    part['x'][1:] + resDat[-1][-1, 0],
                    part['y'][1:],
                    part['z'][1:] + resDat[-1][-1, 2],
                    part['vx'][1:],
                    part['vy'][1:],
                    part['vz'][1:],
                ]).T)
    return hstack(res_t), vstack(resDat)
Example #18
0
File: misc.py Project: MMaus/mutils
 def my_medfilt(data, tailLength):
     """
     returns the median-filtered data; edges are "extrapolated" (constant)
     """
     data1 = hstack([data[tailLength:0:-1], data, data[-tailLength:]])
     out = medfilt(data1, 2 * tailLength + 1)
     return out[tailLength:-tailLength]
Example #19
0
File: misc.py Project: MMaus/mutils
 def my_medfilt(data, tailLength):
     """
     returns the median-filtered data; edges are "extrapolated" (constant)
     """
     data1 = hstack([data[tailLength:0:-1], data, data[-tailLength:]])
     out = medfilt(data1, 2 * tailLength + 1)
     return out[tailLength:-tailLength]
Example #20
0
def createSimilarAR(data):
    """
    creates an AR-process that is similar to a given data set.
    data must be given in n x d-format
    """
    # step 1: get "average" fit matrix
    l_A = []
    for rep in arange(100):
        idx = randint(0,data.shape[0]-1,data.shape[0]-1)
        idat = data[idx,:]
        odat = data[idx+1,:]
        l_A.append(lstsq(idat,odat)[0])

    sysmat = meanMat(l_A).T
    # idea: get "dynamic noise" from input data as difference of
    # expected vs. predicted data:
    # eta_i = (sysmat*(data[:,i-1]).T - data[:,i])
    # however, in order to destroy any possible correlations in the
    # input noise (they would also occur in the output), the
    # noise per section has to be permuted.
    prediction = dot(sysmat,data[:-1,:].T)
    dynNoise = data[1:,:].T - prediction
    res = [zeros((dynNoise.shape[0],1)), ]
    for nidx in permutation(dynNoise.shape[1]):
        res.append( dot(sysmat,res[-1]) + dynNoise[:,nidx][:,newaxis] )
    
    return hstack(res).T
 def generatePath(self, T):
     """
     T: time period
     r: rate of return
     sigma: standard deviation
     dt: time steps
     drift: mean movement price
     zn: array of random numbers with dimension(nPaths, nSteps)
     """
     assert (T > 0), 'Time needs to be a positive number'
     try:
         S0 = self.initialPrice
         r = self.rateOfReturn
         sigma = self.stdev
         nPaths = self.nPaths
         nSteps = self.nSteps
         dt = T/float(nSteps)
         drift = r-0.5*(sigma**2)
         zn = pylab.randn(nPaths, nSteps)
         zn = np.vstack((zn, -zn))
         S = pylab.zeros((nPaths, nSteps))
         start = S0*pylab.ones((2*nPaths, 1))
         next = S0*pylab.cumprod(pylab.exp(drift*dt +
                                 sigma*pylab.sqrt(dt)*zn), 1)
     except ValueError:
         return 'Please check the value of the initial parameters.'
     return pylab.hstack((start, next))
Example #22
0
def createSimilarAR(data):
    """
    creates an AR-process that is similar to a given data set.
    data must be given in n x d-format
    """
    # step 1: get "average" fit matrix
    l_A = []
    for rep in arange(100):
        idx = randint(0, data.shape[0] - 1, data.shape[0] - 1)
        idat = data[idx, :]
        odat = data[idx + 1, :]
        l_A.append(lstsq(idat, odat)[0])

    sysmat = meanMat(l_A).T
    # idea: get "dynamic noise" from input data as difference of
    # expected vs. predicted data:
    # eta_i = (sysmat*(data[:,i-1]).T - data[:,i])
    # however, in order to destroy any possible correlations in the
    # input noise (they would also occur in the output), the
    # noise per section has to be permuted.
    prediction = dot(sysmat, data[:-1, :].T)
    dynNoise = data[1:, :].T - prediction
    res = [
        zeros((dynNoise.shape[0], 1)),
    ]
    for nidx in permutation(dynNoise.shape[1]):
        res.append(dot(sysmat, res[-1]) + dynNoise[:, nidx][:, newaxis])

    return hstack(res).T
Example #23
0
    def SLIP_ode(y,t,params):
        """
        defines the ODE of the SLIP, under stance condition
        state: 
            [x
             y
             z
             vx
             vy
             vz]
        params:
            {'L0' : leg rest length
             'x0' : leg touchdown position
             'k'  : spring stiffness
             'm'  : mass
             'xF' : anterior foot position
             'zF' : lateral foot position }
        """

        dy0 = y[3]
        dy1 = y[4]
        dy2 = y[5]
        L = sqrt((y[0]-params['xF'])**2 + y[1]**2 + (y[2]-params['zF'])**2)
        F = params['k']*(params['L0']-L)
        Fx = F*(y[0]-params['xF'])/L
        Fy = F*y[1]/L
        Fz = F*(y[2]-params['zF'])/L
        dy3 = Fx/m
        dy4 = Fy/m + params['g']
        dy5 = Fz/m
        return hstack([dy0,dy1,dy2,dy3,dy4,dy5])
Example #24
0
    def SLIP_ode(y, t, params):
        """
        defines the ODE of the SLIP, under stance condition
        state: 
            [x
             y
             z
             vx
             vy
             vz]
        params:
            {'L0' : leg rest length
             'x0' : leg touchdown position
             'k'  : spring stiffness
             'm'  : mass
             'xF' : anterior foot position
             'zF' : lateral foot position }
        """

        dy0 = y[3]
        dy1 = y[4]
        dy2 = y[5]
        L = sqrt((y[0] - params['xF'])**2 + y[1]**2 + (y[2] - params['zF'])**2)
        F = params['k'] * (params['L0'] - L)
        Fx = F * (y[0] - params['xF']) / L
        Fy = F * y[1] / L
        Fz = F * (y[2] - params['zF']) / L
        dy3 = Fx / m
        dy4 = Fy / m + params['g']
        dy5 = Fz / m
        return hstack([dy0, dy1, dy2, dy3, dy4, dy5])
Example #25
0
def stackSimRes(simRes):
    """
    input: a *list* of single steps
    returns: an array that contains the complete gait (consecutive time & way)
    """
    resDat = []
    res_t = []
    for part in simRes:
        if len(resDat) == 0:
            res_t.append(part['t'])
            resDat.append(vstack( [ part['x'],
                                    part['y'],
                                    part['z'],
                                    part['vx'],
                                    part['vy'],
                                    part['vz'],
                                    ]).T)
        else:
            res_t.append(part['t'][1:] + res_t[-1][-1])
            # compensate x and z translation
            resDat.append(vstack( [ part['x'][1:] + resDat[-1][-1,0],
                                    part['y'][1:],
                                    part['z'][1:] + resDat[-1][-1,2],
                                    part['vx'][1:],
                                    part['vy'][1:],
                                    part['vz'][1:],
                                    ]).T)
    return hstack(res_t), vstack(resDat)
Example #26
0
    def SVMAF(self,freq,n,l):
        #Apply the SVMAF filter to the material parameters
        runningMean=lambda x,N: py.hstack((x[:N-1],py.convolve(x,py.ones((N,))/N,mode='same')[N-1:-N+1],x[(-N+1):]))
        #calculate the moving average of 3 points
        n_smoothed=runningMean(n,3)
        #evaluate H_smoothed from n_smoothed
        H_smoothed=self.H_theory(freq,[n_smoothed.real,n_smoothed.imag],l)
        
        H_r=H_smoothed.real
        H_i=H_smoothed.imag
        f=1
        #the uncertainty margins
        lb_r=self.H.getFReal()-self.H.getFRealUnc()*f
        lb_i=self.H.getFImag()-self.H.getFImagUnc()*f
        ub_r=self.H.getFReal()+self.H.getFRealUnc()*f
        ub_i=self.H.getFImag()+self.H.getFImagUnc()*f
        
        #ix=all indices for which after smoothening n H is still inbetwen the bounds        
        ix=py.all([H_r>=lb_r,H_r<ub_r,H_i>=lb_i,H_i<ub_i],axis=0)
#        #dont have a goood idea at the moment, so manually:
        for i in range(len(n_smoothed)):
            if ix[i]==0:
                n_smoothed[i]=n[i]
        print("SVMAF changed the refractive index at " + str(sum(ix)) + " frequencies")
        return n_smoothed      
Example #27
0
 def getDiff(t, y):
     """ returns the difference in desired params """
     delta = [T - t[-1],
              ymin - min(y[:,1]),
              #FS - y[-1,[1,3,5]],
              FS[[0,2]] - y[-1,[1,5]]
              ]
     return hstack(delta)
Example #28
0
def sp_d2a(params):
    """
    transforms a given SLIP parameter set (dict) into an array
    """
    return hstack([
        params['k'], params['alpha'], params['L0'], params['beta'],
        params['dE']
    ])[:, newaxis]
Example #29
0
    def _sample_posteriors(self,true_N_A,p_A,N_samples):
        true_N_B = self.N_u-true_N_A  
        N_values = pl.shape(true_N_A)[0]
        posteriors = pl.zeros((N_samples,N_values))
        for i in range(N_samples):
            for (j,(t_N_A,t_N_B)) in enumerate(zip(true_N_A,true_N_B)):
                A_given_A = pl.ones(t_N_A)*self.p_uA_given_A
                A_given_B = pl.ones(t_N_B)*self.p_uA_given_B
                A_probs = pl.hstack((A_given_A,A_given_B))
                B_given_A = pl.ones(t_N_A)*self.p_uB_given_A
                B_given_B = pl.ones(t_N_B)*self.p_uB_given_B
                B_probs = pl.hstack((B_given_A,B_given_B))

                N_A = pl.sum(A_probs>pl.rand(self.N_u))
                N_B = pl.sum(B_probs>pl.rand(self.N_u))

                posteriors[i,j] = self._p_A_given_N_A(N_A,N_B)
         
        return pl.mean(posteriors,0)
Example #30
0
    def _sample_posteriors(self, true_N_A, p_A, N_samples):
        true_N_B = self.N_u - true_N_A
        N_values = pl.shape(true_N_A)[0]
        posteriors = pl.zeros((N_samples, N_values))
        for i in range(N_samples):
            for (j, (t_N_A, t_N_B)) in enumerate(zip(true_N_A, true_N_B)):
                A_given_A = pl.ones(t_N_A) * self.p_uA_given_A
                A_given_B = pl.ones(t_N_B) * self.p_uA_given_B
                A_probs = pl.hstack((A_given_A, A_given_B))
                B_given_A = pl.ones(t_N_A) * self.p_uB_given_A
                B_given_B = pl.ones(t_N_B) * self.p_uB_given_B
                B_probs = pl.hstack((B_given_A, B_given_B))

                N_A = pl.sum(A_probs > pl.rand(self.N_u))
                N_B = pl.sum(B_probs > pl.rand(self.N_u))

                posteriors[i, j] = self._p_A_given_N_A(N_A, N_B)

        return pl.mean(posteriors, 0)
Example #31
0
def pos_from_xml(mesh, results):

    # read files into dolfin format
    mesh = dolf.Mesh(mesh)
    Q = dolf.FunctionSpace(mesh, 'CG', 1)
    field = dolf.Function(Q)
    dolf.File(results) >> field
    results = results.rstrip("xml") + "pos"
    output = open(results, 'w')
    cell_type = mesh.type().cell_type()
    nodes = mesh.coordinates()
    n_nodes = mesh.num_vertices()
    nodes = p.hstack(
        (nodes, p.zeros((n_nodes, 3 - p.shape(mesh.coordinates())[1]))))
    f = lambda a: [field(a[0], a[1])]
    nodes = p.hstack((nodes, map(f, nodes)))
    cells = mesh.cells()
    n_cells = mesh.num_cells()

    # write each triangular elment and the values at each corner
    output.write("View \"NodalValues\" { \n")
    for ii, cell in enumerate(cells):

        # ST(first vierice of triangual elment, second, third){value at first, second, third};\n
        output.write(
            "ST({0:g},{1:g},{2:g},{3:g},{4:g},{5:g},{6:g},{7:g},{8:g}){{{9:g},{10:g},{11:g}}};\n"
            .format(nodes[cell[0]][0], nodes[cell[0]][1], nodes[cell[0]][2],
                    nodes[cell[1]][0], nodes[cell[1]][1], nodes[cell[1]][2],
                    nodes[cell[2]][0], nodes[cell[2]][1], nodes[cell[2]][2],
                    nodes[cell[0]][3], nodes[cell[1]][3], nodes[cell[2]][3]))

    output.write("};")

    output.close()

    # to get the rounding on the numbers write we mus oepn it and re-save in gmsh
    output = open('temp.geo', 'w')
    output.write("Include '{0}'; \n".format(results))
    output.close()
    print "please click on post processing view and hit save"
    s.call(['gmsh', 'temp.geo'])
    os.remove('temp.geo')
Example #32
0
def block_hankel(data, f):
    """
    Create a block hankel matrix.
    f : number of rows
    """
    data = pl.matrix(data)
    assert len(data.shape) == 2
    n = data.shape[1] - f
    return pl.matrix(pl.hstack([
        pl.vstack([data[:, i+j] for i in range(f)])
        for j in range(n)]))
Example #33
0
def block_hankel(data, f):
    """
    Create a block hankel matrix.
    f : number of rows
    """
    data = np.matrix(data)
    assert len(data.shape) == 2
    n = data.shape[1] - f
    return np.matrix(pl.hstack([
        pl.vstack([data[:, i+j] for i in range(f)])
        for j in range(n)]))
Example #34
0
 def __insert_used_time(self, utcdatetime):
     """
     Inserts the given UTCDateTime at the right position in the list keeping
     the order intact.
     Add sorting of self.psd and self.spikes arrays
     """
     if len(self.times_used) > 1:
         i = hstack((array(self.times_used), utcdatetime)).argsort()
         self.psd = self.psd[i]
         self.spikes = self.spikes[i]
     bisect.insort(self.times_used, utcdatetime)
Example #35
0
 def getWindowedData(self,windowlength_time=1e-12):
     #returns the blackmanwindowed tdData
     N=int(windowlength_time/self.dt)
     w=py.blackman(N*2)
     w=py.hstack((w[0:N],py.ones((self.getLength()-N*2),),w[N:]))
     windowedData=self.tdData
     #this could be written more elegantly?!
     windowedData[:,1]*=w
     windowedData[:,2]*=w
     windowedData[:,3]*=w
     windowedData[:,4]*=w
     return windowedData
Example #36
0
def computeDistances(positions, ranked_bonds_ids):
    """Compute all the distances using the map
    Argument(s):
        positions {np.ndarray} -- Array of the positions of the atoms of the molecules. Dimension(s) should be in (n_frames, n_molecules, n_atoms_per_molecule, 3).
        ranked_bonds_ids {np.ndarray} -- Array containing all the distance pairs at a specific rank. Dimension(s) should be in (n_distances, 2).
    Output(s):
        distances {np.ndarray} -- Array of the distances of the atoms of the molecules. Dimension(s) are in (n_frames, n_molecules, n_distances).
    """

    # Reshape the position array
    moleculeNbr = positions.shape[1]
    atomNbr = positions.shape[2]
    positions = np.reshape(positions,
                           (positions.shape[0], moleculeNbr * atomNbr, 3))

    # Loop over all the frames
    all_distances = []
    for frame in tqdm(range(positions.shape[0]),
                      desc='Computing distances...'):

        # Loop over all the distance pairs
        a = arange(0)
        b = arange(0)
        for (i, j) in ranked_bonds_ids:
            a = hstack((a, (i + arange(moleculeNbr) * atomNbr)))
            b = hstack((b, (j + arange(moleculeNbr) * atomNbr)))

        # Calculate the distances and return the resulting array
        vectdist = (positions[frame][a] - positions[frame][b])

        all_distances.append((vectdist**2).sum(axis=1)**0.5)

    # Reshape the distance array
    all_distances = np.array(all_distances)
    all_distances = np.reshape(
        all_distances,
        (all_distances.shape[0], ranked_bonds_ids.shape[0], moleculeNbr))
    all_distances = np.swapaxes(all_distances, 1, 2)

    return all_distances
Example #37
0
def jetWoGn(reverse=False):
    """
    jetWoGn(reverse=False)
       - returning a colormap similar to cm.jet, but without green.
         if reverse=True, the map starts with red instead of blue.
    """
    m=18 # magic number, which works fine
    m0=pylab.floor(m*0.0)
    m1=pylab.floor(m*0.2)
    m2=pylab.floor(m*0.2)
    m3=pylab.floor(m/2)-m2-m1

    b_ = pylab.hstack( (0.4*pylab.arange(m1)/(m1-1.)+0.6, pylab.ones((m2+m3,)) ) )
    g_ = pylab.hstack( (pylab.zeros((m1,)),pylab.arange(m2)/(m2-1.),pylab.ones((m3,))) )
    r_ = pylab.hstack( (pylab.zeros((m1,)),pylab.zeros((m2,)),pylab.arange(m3)/(m3-1.)))

    r = pylab.hstack((r_,pylab.flipud(b_)))
    g = pylab.hstack((g_,pylab.flipud(g_)))
    b = pylab.hstack((b_,pylab.flipud(r_)))

    if reverse:
        r = pylab.flipud(r)
        g = pylab.flipud(g)
        b = pylab.flipud(b)

    ra = pylab.linspace(0.0,1.0,m)

    cdict = {'red': zip(ra,r,r),
            'green': zip(ra,g,g),
            'blue': zip(ra,b,b)}

    return LinearSegmentedColormap('new_RdBl',cdict,256)
Example #38
0
File: misc.py Project: MMaus/mutils
def twoD_oneD(data2D,nps):
    """
    transforms the 2D format into the 1D-format used here
    1D-format: 
        a single row represents one stride; 
        the first (nps) frames represent coordinate 1, the second (nps)
        frames represent coordinate 2, ...
    2D-format: 
        a single row represents one coordinate.
        The k-th stride is represented by the subsection [:,k*(nps):(k+1)*nps]
    """    
    data1D = vstack([hstack(data2D[:,x*nps:(x+1)*nps]) for x in range(data2D.shape[1]/nps)])
    return data1D
Example #39
0
File: misc.py Project: MMaus/mutils
def oneD_twoD(data1D,nps):
    """
    transforms the 1D-format used here into the 2D-format
    1D-format: 
        a single row represents one stride; 
        the first (nps) frames represent coordinate 1, the second (nps)
        frames represent coordinate 2, ...
    2D-format: 
        a single row represents one coordinate.
        The k-th stride is represented by the subsection [:,k*(nps):(k+1)*nps]    
    """   
    ncoords = data1D.shape[1]/nps
    data2D = vstack([hstack(data1D[:,nps*x:nps*(x+1)]) for x in range(ncoords)])
    return data2D
Example #40
0
File: helper.py Project: pf4d/cslvr
def write_gmsh(mesh,path):
	"""
	This function iterates through the mesh and writes a file to the specified
	path

	Args:

	  :mesh: Mesh that is to be written to a file
	  :path: Path to write the mesh file to

	"""
	output = open(path,'w')

	cell_type = mesh.type().cell_type()

	nodes = mesh.coordinates()
	n_nodes = mesh.num_vertices()

	nodes = pl.hstack((nodes,pl.zeros((n_nodes,3 - pl.shape(mesh.coordinates())[1]))))

	cells = mesh.cells()
	n_cells = mesh.num_cells()

	output.write("$MeshFormat\n" +
	      "2.2 0 8\n" +
	       "$EndMeshFormat\n" +
	       "$Nodes \n" +
	       "{0:d}\n".format(n_nodes))

	for ii,node in enumerate(nodes):
		output.write("{0:d} {1:g} {2:g} {3:g}\n".format(ii+1,node[0],node[1],node[2]))

	output.write("$EndNodes\n")

	output.write("$Elements\n" +
	        "{0:d}\n".format(n_cells))

	for ii,cell in enumerate(cells):
		#if cell_type == 1:
		#  output.write("{0:d} 1 0 {1:d} {2:d}\n".format(ii+1,int(cell[0]+1),int(cell[1]+1)))
		if cell_type == 2:
			output.write("{0:d} 2 0 {1:d} {2:d} {3:d}\n".format(ii+1,int(cell[0]+1),int(cell[1]+1),int(cell[2]+1)))
		#elif cell_type == 3:
		#  output.write("{0:d} 4 0 {1:d} {2:d} {3:d} {4:d}\n".format(ii+1,int(cell[0]+1),int(cell[1]+1),int(cell[2]+1),int(cell[3]+1)))
		else:
			print("Unknown cell type")

	output.write("$EndElements\n")
	output.close()
Example #41
0
def gen_cities_avg(climate, multi_cities, years):
    """
    Compute the average annual temperature over multiple cities.

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to average over (list of str)
        years: the range of years of the yearly averaged temperature (list of
            int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the average annual temperature over the given
        cities for a given year.
    """
    yearly_avgs = pylab.array([])
    for year in years:
        cities_temps = pylab.array([])
        for city in multi_cities:
            city_temps = climate.get_yearly_temp(city, year)
            cities_temps = pylab.hstack((cities_temps, city_temps))
        yearly_avg = pylab.average(cities_temps)
        yearly_avgs = pylab.hstack((yearly_avgs, yearly_avg))
    return yearly_avgs
Example #42
0
File: misc.py Project: MMaus/mutils
def oneD_twoD(data1D, nps):
    """
    transforms the 1D-format used here into the 2D-format
    1D-format: 
        a single row represents one stride; 
        the first (nps) frames represent coordinate 1, the second (nps)
        frames represent coordinate 2, ...
    2D-format: 
        a single row represents one coordinate.
        The k-th stride is represented by the subsection [:,k*(nps):(k+1)*nps]    
    """
    ncoords = data1D.shape[1] / nps
    data2D = vstack(
        [hstack(data1D[:, nps * x:nps * (x + 1)]) for x in range(ncoords)])
    return data2D
def pos_from_xml(mesh,results):
    
    # read files into dolfin format
    mesh = dolf.Mesh(mesh)
    Q    = dolf.FunctionSpace(mesh,'CG',1)
    field= dolf.Function(Q)
    dolf.File(results) >> field
    results = results.rstrip("xml")+"pos"
    output = open(results,'w')
    cell_type = mesh.type().cell_type()
    nodes = mesh.coordinates()
    n_nodes = mesh.num_vertices()
    nodes = p.hstack((nodes,p.zeros((n_nodes,3 - p.shape(mesh.coordinates())[1]))))
    f = lambda a: [field(a[0],a[1])]
    nodes = p.hstack((nodes,map(f,nodes)))
    cells = mesh.cells()
    n_cells = mesh.num_cells()

    # write each triangular elment and the values at each corner   
    output.write("View \"NodalValues\" { \n")
    for ii,cell in enumerate(cells):

        # ST(first vierice of triangual elment, second, third){value at first, second, third};\n
        output.write("ST({0:g},{1:g},{2:g},{3:g},{4:g},{5:g},{6:g},{7:g},{8:g}){{{9:g},{10:g},{11:g}}};\n".format(nodes[cell[0]][0],  nodes[cell[0]][1],  nodes[cell[0]][2],  nodes[cell[1]][0],  nodes[cell[1]][1],  nodes[cell[1]][2],  nodes[cell[2]][0],  nodes[cell[2]][1],  nodes[cell[2]][2],  nodes[cell[0]][3],  nodes[cell[1]][3],  nodes[cell[2]][3]))
   
    output.write("};")

    output.close()
    
    # to get the rounding on the numbers write we mus oepn it and re-save in gmsh
    output = open('temp.geo','w')
    output.write("Include '{0}'; \n".format(results))
    output.close()
    print "please click on post processing view and hit save"
    s.call(['gmsh','temp.geo'])
    os.remove('temp.geo')
Example #44
0
def write_gmsh(mesh, path):
    """
  This function iterates through the mesh and writes a file to the specified
  path

  Args:
  
    :mesh: Mesh that is to be written to a file
    :path: Path to write the mesh file to

  """
    output = open(path, 'w')

    cell_type = mesh.type().cell_type()

    nodes = mesh.coordinates()
    n_nodes = mesh.num_vertices()

    nodes = pl.hstack(
        (nodes, pl.zeros((n_nodes, 3 - pl.shape(mesh.coordinates())[1]))))

    cells = mesh.cells()
    n_cells = mesh.num_cells()

    output.write("$MeshFormat\n" + "2.2 0 8\n" + "$EndMeshFormat\n" +
                 "$Nodes \n" + "{0:d}\n".format(n_nodes))

    for ii, node in enumerate(nodes):
        output.write("{0:d} {1:g} {2:g} {3:g}\n".format(
            ii + 1, node[0], node[1], node[2]))

    output.write("$EndNodes\n")

    output.write("$Elements\n" + "{0:d}\n".format(n_cells))

    for ii, cell in enumerate(cells):
        #if cell_type == 1:
        #  output.write("{0:d} 1 0 {1:d} {2:d}\n".format(ii+1,int(cell[0]+1),int(cell[1]+1)))
        if cell_type == 2:
            output.write("{0:d} 2 0 {1:d} {2:d} {3:d}\n".format(
                ii + 1, int(cell[0] + 1), int(cell[1] + 1), int(cell[2] + 1)))
        #elif cell_type == 3:
        #  output.write("{0:d} 4 0 {1:d} {2:d} {3:d} {4:d}\n".format(ii+1,int(cell[0]+1),int(cell[1]+1),int(cell[2]+1),int(cell[3]+1)))
        else:
            print "Unknown cell type"

    output.write("$EndElements\n")
    output.close()
Example #45
0
File: misc.py Project: MMaus/mutils
def twoD_oneD(data2D, nps):
    """
    transforms the 2D format into the 1D-format used here
    1D-format: 
        a single row represents one stride; 
        the first (nps) frames represent coordinate 1, the second (nps)
        frames represent coordinate 2, ...
    2D-format: 
        a single row represents one coordinate.
        The k-th stride is represented by the subsection [:,k*(nps):(k+1)*nps]
    """
    data1D = vstack([
        hstack(data2D[:, x * nps:(x + 1) * nps])
        for x in range(data2D.shape[1] / nps)
    ])
    return data1D
 def _generate_labeled_correlation_matrix(self, label):
     """ Concatenates the feature names to the actual correlation matrices.
         This is for better overview in stored txt files later on."""
     labeled_corr_matrix = pylab.array([])
     for i in pylab.array(self.corr_important_feats[label]):
         if len(labeled_corr_matrix) == 0:
             labeled_corr_matrix = [[('% .2f' % j).rjust(10) for j in i]]
         else:
             labeled_corr_matrix = pylab.vstack((labeled_corr_matrix,
                                 [[('% .2f' % j).rjust(10) for j in i]]))
     
     labeled_corr_matrix = pylab.c_[self.corr_important_feat_names,
                                    labeled_corr_matrix]
     labeled_corr_matrix = pylab.vstack((pylab.hstack(('          ',
                                        self.corr_important_feat_names)),
                                        labeled_corr_matrix))
     
     return labeled_corr_matrix
Example #47
0
File: ism.py Project: peterhm/gbd
    def mu_age_p(logit_C0=logit_C0, i=rate["i"]["mu_age"], r=rate["r"]["mu_age"], f=rate["f"]["mu_age"]):

        # for acute conditions, it is silly to use ODE solver to
        # derive prevalence, and it can be approximated with a simple
        # transformation of incidence
        if r.min() > 5.99:
            return i / (r + m_all + f)

        C0 = mc.invlogit(logit_C0)

        x = pl.hstack((i, r, f, 1 - C0, C0))
        y = fun.forward(0, x)

        susceptible = y[:N]
        condition = y[N:]

        p = condition / (susceptible + condition)
        p[pl.isnan(p)] = 0.0
        return p
Example #48
0
def augStates(allStates, nAug, start=0, nStride=2):
    """
    returns the augmented states
    allStates: list of subsequent apex states
    nAug: numbers of consecutive states to augment
    start: if start=0 -> return even states, start=1: odd states
    nStride: number of consecutive states that build a stride
    
    the first (start + nAug) indices of allStates are skipped.
    (internal comment: you might want to use paramsL1[nAug:,:], or
     paramsR1[nAug:,:])
    """
    aug_states = []
    startState = start + nStride * nAug
    for rep in range(startState, len(allStates), nStride):
        lastIdx = rep - nAug - 1 if rep > nAug else None
        aug_states.append(hstack(allStates[rep:lastIdx:-1]))

    return vstack(aug_states)
    def _generate_labeled_correlation_matrix(self, label):
        """ Concatenates the feature names to the actual correlation matrices.
            This is for better overview in stored txt files later on."""
        labeled_corr_matrix = pylab.array([])
        for i in pylab.array(self.corr_important_feats[label]):
            if len(labeled_corr_matrix) == 0:
                labeled_corr_matrix = [[('% .2f' % j).rjust(10) for j in i]]
            else:
                labeled_corr_matrix = pylab.vstack(
                    (labeled_corr_matrix, [[('% .2f' % j).rjust(10)
                                            for j in i]]))

        labeled_corr_matrix = pylab.c_[self.corr_important_feat_names,
                                       labeled_corr_matrix]
        labeled_corr_matrix = pylab.vstack((pylab.hstack(
            ('          ', self.corr_important_feat_names)),
                                            labeled_corr_matrix))

        return labeled_corr_matrix
Example #50
0
def augStates(allStates, nAug, start=0, nStride=2):
    """
    returns the augmented states
    allStates: list of subsequent apex states
    nAug: numbers of consecutive states to augment
    start: if start=0 -> return even states, start=1: odd states
    nStride: number of consecutive states that build a stride
    
    the first (start + nAug) indices of allStates are skipped.
    (internal comment: you might want to use paramsL1[nAug:,:], or
     paramsR1[nAug:,:])
    """
    aug_states = []
    startState = start + nStride * nAug
    for rep in range(startState, len(allStates), nStride):
        lastIdx = rep - nAug - 1 if rep > nAug else None
        aug_states.append(hstack(allStates[rep:lastIdx:-1]))
    
    return vstack(aug_states)
Example #51
0
	def LS(self,X):

		"""
			estimate the connectivity kernel parameters and the time constant parameter using Least Square method
	
			Arguments
			----------
			X: list of matrix
				state vectors

			Returns
			---------
			Least Square estimation of the the connectivity kernel parameters and the time constant parameter 
		"""
		q=self.q_calc(X)
		Z=pb.vstack(X[1:])
		X_t_1=pb.vstack(X[:-1])
		q_t_1=pb.vstack(q[:-1])
		X_ls=pb.hstack((q_t_1,X_t_1))
		W=(X_ls.T*X_ls).I*X_ls.T*Z
		return [float( W[0]),float(W[1]),float(W[2]),float(W[3])]
Example #52
0
    def CalculateRates(self, times, levels):
        N = len(levels)
        t_mat = pylab.matrix(times).T

        # normalize the cell_count data by its minimum
        count_matrix = pylab.matrix(levels).T
        norm_counts = count_matrix - min(levels)
        c_mat = pylab.matrix(norm_counts)
        if c_mat[-1, 0] == 0:
            ge_zero = c_mat[pylab.find(c_mat > 0)]
            if ge_zero.any():
                c_mat[-1, 0] = min(ge_zero)

        for i in pylab.arange(N - 1, 0, -1):
            if c_mat[i - 1, 0] <= 0:
                c_mat[i - 1, 0] = c_mat[i, 0]

        c_mat = pylab.log(c_mat)

        res_mat = pylab.zeros(
            (N, 5))  # columns are: slope, offset, error, avg_value, max_value
        for i in xrange(N - self.window_size):
            i_range = range(i, i + self.window_size)
            x = pylab.hstack(
                [t_mat[i_range, 0],
                 pylab.ones((len(i_range), 1))])
            y = c_mat[i_range, 0]

            # Measurements in window must all be above the min.
            if min(pylab.exp(y)) < self.minimum_level:
                continue

            (a, residues) = pylab.lstsq(x, y)[0:2]
            res_mat[i, 0] = a[0]
            res_mat[i, 1] = a[1]
            res_mat[i, 2] = residues
            res_mat[i, 3] = pylab.mean(count_matrix[i_range, 0])
            res_mat[i, 4] = max(pylab.exp(y))

        return res_mat
    def generatePath(self, T):
        """
        r: rate of return
        sigma: standard deviation
        dt: time steps
        drift: mean movement price
        zn: array of random numbers with dimension(nPaths,nSteps)
        ld: poisson arrival rate
        a: mean drift of jump
        d: standard deviation of jump
        k: expected value of jump

        """
        assert (T > 0), 'Time needs to be a positive number'
        try:
            S0 = self.initialPrice
            r = self.rateOfReturn
            sigma = self.stdev
            ld = self.jumpParameters[0]
            a = self.jumpParameters[1]
            d = self.jumpParameters[2]
            k = pylab.exp(a+0.5*(d**2))-1
            nPaths = self.nPaths
            nSteps = self.nSteps
            dt = T/float(nSteps)
            drift = r-ld*k-0.5*(sigma**2)
            zn = pylab.randn(nPaths, nSteps)
            zn = np.vstack((zn, -zn))
            zp = pylab.randn(nPaths, nSteps)
            zp = np.vstack((zp, -zp))
            S = pylab.zeros((2*nPaths, nSteps))
            p = pylab.poisson(ld*dt, (2*nPaths, nSteps))
            j = a*p+d*pylab.sqrt(p)*zp
            start = S0*pylab.ones((2*nPaths, 1))
            next = S0*pylab.cumprod(pylab.exp(drift*dt +
                                              sigma*pylab.sqrt(dt)*zn + j), 1)
        except ValueError:
            return 'Please check the value' + \
                   'of the properties.'
        return pylab.hstack((start, next))
Example #54
0
    def CalculateGrowthInternal(self, times, levels):
        res_mat = self.CalculateRates(times, levels)
        max_i = self.FindMaximumGrowthRate(res_mat)        
        
        t_mat = pylab.matrix(times).T
        count_matrix = pylab.matrix(levels).T
        norm_counts = count_matrix - min(levels)

        abs_res_mat = pylab.array(res_mat)
        abs_res_mat[:,0] = pylab.absolute(res_mat[:,0])
        order = abs_res_mat[:,0].argsort(axis=0)
        stationary_indices = filter(lambda x: x >= max_i, order)
        stationary_indices = pylab.array(filter(lambda x: res_mat[x,3] > 0,
                                                stationary_indices))
        
        stationary_level = 0.0
        if stationary_indices.any():
            stationary_level = res_mat[stationary_indices[0], 3]
        
        
        pylab.hold(True)
        pylab.plot(times, norm_counts)
        pylab.plot(times, res_mat[:,0])
        pylab.plot([0, times.max()], [self.minimum_level, self.minimum_level], 'r--')
        pylab.plot([0, times.max()], [self.maximum_level, self.maximum_level], 'r--')
        i_range = range(max_i, max_i+self.window_size)
        
        x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))])
        y = x * pylab.matrix(res_mat[max_i, 0:2]).T
        pylab.plot(x[:,0], pylab.exp(y), 'k:', linewidth=4)
                
        #pylab.plot([0, max(times)], [stationary_level, stationary_level], 'k-')
        
        pylab.yscale('log')
        pylab.legend(['OD', 'growth rate', 'threshold', 'fit'])
        #, 'stationary'])
        
        
        return res_mat[max_i, 0], stationary_level
Example #55
0
    def CalculateGrowthInternal(self, times, levels):
        res_mat = self.CalculateRates(times, levels)
        max_i = self.FindMaximumGrowthRate(res_mat)

        t_mat = pylab.matrix(times).T
        count_matrix = pylab.matrix(levels).T
        norm_counts = count_matrix - min(levels)

        abs_res_mat = pylab.array(res_mat)
        abs_res_mat[:, 0] = pylab.absolute(res_mat[:, 0])
        order = abs_res_mat[:, 0].argsort(axis=0)
        stationary_indices = filter(lambda x: x >= max_i, order)
        stationary_indices = pylab.array(
            filter(lambda x: res_mat[x, 3] > 0, stationary_indices))

        stationary_level = 0.0
        if stationary_indices.any():
            stationary_level = res_mat[stationary_indices[0], 3]

        pylab.hold(True)
        pylab.plot(times, norm_counts)
        pylab.plot(times, res_mat[:, 0])
        pylab.plot([0, times.max()], [self.minimum_level, self.minimum_level],
                   'r--')
        pylab.plot([0, times.max()], [self.maximum_level, self.maximum_level],
                   'r--')
        i_range = range(max_i, max_i + self.window_size)

        x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))])
        y = x * pylab.matrix(res_mat[max_i, 0:2]).T
        pylab.plot(x[:, 0], pylab.exp(y), 'k:', linewidth=4)

        #pylab.plot([0, max(times)], [stationary_level, stationary_level], 'k-')

        pylab.yscale('log')
        pylab.legend(['OD', 'growth rate', 'threshold', 'fit'])
        #, 'stationary'])

        return res_mat[max_i, 0], stationary_level
Example #56
0
 def ReverseTransformWithT(self):
     """Transform preserving temperature data."""
     logging.info("Reverse transforming the NIST data")
     nist_rows = self.nist.SelectRowsFromNist()
     nist_rows_normalized = [row.Clone() for row in nist_rows]               
     data = self.GetDissociation().ReverseTransformNistRows(nist_rows_normalized)
     stoichiometric_matrix = data['S']
     cids_to_estimate = data['cids_to_estimate']
     
     logging.info("%d out of %d NIST measurements can be used" %
                  (stoichiometric_matrix.shape[0], len(nist_rows_normalized)))
     
     # for every unique row, calculate the average dG0_r of all the rows that
     # are the same reaction
     dG0_r = data['dG0_r']
     temps = data['T']
     stoich_temps = stoichiometric_matrix * temps
     stoich_and_temps = pylab.hstack((stoichiometric_matrix, stoich_temps))
     
     print stoich_and_temps.shape
     
     return stoich_and_temps, dG0_r, cids_to_estimate
Example #57
0
    def mu_age_p(logit_C0=logit_C0,
                 i=rate['i']['mu_age'],
                 r=rate['r']['mu_age'],
                 f=rate['f']['mu_age']):

        # for acute conditions, it is silly to use ODE solver to
        # derive prevalence, and it can be approximated with a simple
        # transformation of incidence
        if r.min() > 5.99:
            return i / (r + m_all + f)

        C0 = mc.invlogit(logit_C0)

        x = pl.hstack((i, r, f, 1 - C0, C0))
        y = fun.forward(0, x)

        susceptible = y[:N]
        condition = y[N:]

        p = condition / (susceptible + condition)
        p[pl.isnan(p)] = 0.
        return p
Example #58
0
def lfilter_zi(b,a):
    #compute the zi state from the filter parameters. see [Gust96].

    #Based on:
    # [Gust96] Fredrik Gustafsson, Determining the initial states in forward-backward 
    # filtering, IEEE Transactions on Signal Processing, pp. 988--992, April 1996, 
    # Volume 44, Issue 4

    n=max(len(a),len(b))

    zin = (  pylab.eye(n-1) - pylab.hstack( (-a[1:n,pylab.newaxis],
                                 pylab.vstack((pylab.eye(n-2), pylab.zeros(n-2))))))

    zid=  b[1:n] - a[1:n]*b[0]

    zi_matrix=pylab.linalg.inv(zin)*(pylab.matrix(zid).transpose())
    zi_return=[]

    #convert the result into a regular array (not a matrix)
    for i in range(len(zi_matrix)):
      zi_return.append(float(zi_matrix[i][0]))

    return pylab.array(zi_return)
Example #59
0
	def estimate_kernel(self,X):

		"""
			estimate the ide parameters using least squares method
	
			Arguments
			----------
			X: list of ndarray
				state vectors

			Returns
			---------
			least squares estimation of the IDE parameters
		"""

		Q=self.Q_calc(X)
		Z=pb.vstack(X[1:])
		X_t_1=pb.vstack(X[:-1])
		Q_t_1=pb.vstack(Q[:-1])
		X_ls=pb.hstack((Q_t_1,X_t_1))
		theta=dots(pb.inv(pb.dot(X_ls.T,X_ls)),X_ls.T,Z)
		parameters=[float(theta[i]) for i in range(theta.shape[0])]
		return parameters
Example #60
0
 def CalculateRates(self, times, levels):
     N = len(levels)    
     t_mat = pylab.matrix(times).T
     
     # normalize the cell_count data by its minimum
     count_matrix = pylab.matrix(levels).T
     norm_counts = count_matrix - min(levels)
     c_mat = pylab.matrix(norm_counts)
     if c_mat[-1, 0] == 0:
         ge_zero = c_mat[pylab.find(c_mat > 0)]
         if ge_zero.any():
             c_mat[-1, 0] = min(ge_zero)                
 
     for i in pylab.arange(N-1, 0, -1):
         if c_mat[i-1, 0] <= 0:
             c_mat[i-1, 0] = c_mat[i, 0]
 
     c_mat = pylab.log(c_mat)
     
     res_mat = pylab.zeros((N, 5)) # columns are: slope, offset, error, avg_value, max_value
     for i in xrange(N-self.window_size):
         i_range = range(i, i+self.window_size)
         x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))])
         y = c_mat[i_range, 0]
         
         # Measurements in window must all be above the min.
         if min(pylab.exp(y)) < self.minimum_level:
             continue
                     
         (a, residues) = pylab.lstsq(x, y)[0:2]
         res_mat[i, 0] = a[0]
         res_mat[i, 1] = a[1]
         res_mat[i, 2] = residues
         res_mat[i, 3] = pylab.mean(count_matrix[i_range,0])
         res_mat[i, 4] = max(pylab.exp(y))
     
     return res_mat