Example #1
0
def stackSimRes(simRes):
    """
    input: a *list* of single steps
    returns: an array that contains the complete gait (consecutive time & way)
    """
    resDat = []
    res_t = []
    for part in simRes:
        if len(resDat) == 0:
            res_t.append(part['t'])
            resDat.append(vstack( [ part['x'],
                                    part['y'],
                                    part['z'],
                                    part['vx'],
                                    part['vy'],
                                    part['vz'],
                                    ]).T)
        else:
            res_t.append(part['t'][1:] + res_t[-1][-1])
            # compensate x and z translation
            resDat.append(vstack( [ part['x'][1:] + resDat[-1][-1,0],
                                    part['y'][1:],
                                    part['z'][1:] + resDat[-1][-1,2],
                                    part['vx'][1:],
                                    part['vy'][1:],
                                    part['vz'][1:],
                                    ]).T)
    return hstack(res_t), vstack(resDat)
Example #2
0
def stackSimRes(simRes):
    """
    input: a *list* of single steps
    returns: an array that contains the complete gait (consecutive time & way)
    """
    resDat = []
    res_t = []
    for part in simRes:
        if len(resDat) == 0:
            res_t.append(part['t'])
            resDat.append(
                vstack([
                    part['x'],
                    part['y'],
                    part['z'],
                    part['vx'],
                    part['vy'],
                    part['vz'],
                ]).T)
        else:
            res_t.append(part['t'][1:] + res_t[-1][-1])
            # compensate x and z translation
            resDat.append(
                vstack([
                    part['x'][1:] + resDat[-1][-1, 0],
                    part['y'][1:],
                    part['z'][1:] + resDat[-1][-1, 2],
                    part['vx'][1:],
                    part['vy'][1:],
                    part['vz'][1:],
                ]).T)
    return hstack(res_t), vstack(resDat)
Example #3
0
def RecalcPhase(data):
    """
    this function re-computes the phase of a given artificial Floquet system
    analog to the way the phases are computed in the original systen.
    
    Data must be given in a D x N-format, 
        D: number of Dimensions, 
        N: number of samples
    """

    p1 = -1. * data[0, :]  # R_Kne_y - L_kne_y
    p2 = data[5, :] - data[2, :]  # R_Trc_y - R_Anl_y
    p3 = data[6, :] - data[1, :]  # L_trc_y - L_Anl_y
    p4 = data[2, :] - data[1, :] - data[0, :]  # R_anl_y - L_anl_y

    allPhrIn_TDR = [
        vstack((p1, p2, p3, p4)),
    ]
    psec_TDR = [
        p4.copy(),
    ]
    phrIn = vstack((p1, p2, p3, p4))
    psecIn = p4.copy()

    print 'building phaser ...\n'
    Phaser = phaser2.Phaser2(y=allPhrIn_TDR, psecData=psec_TDR)
    print 'computing phases ...\n'
    return Phaser.phaserEval(phrIn, psecIn).squeeze()
Example #4
0
 def __init__(self, scanList, stateArray):
     if len(scanList)!=stateArray.shape[0]:
         raise Exception('number of scan and state should be the same')
     times = [scan.timestamp for scan in scanList]
     self.avgTime = times[int(pl.floor(len(times)/2))]
     #self.avgTime = pl.mean([scan.timestamp for scan in scanList])
     
     #transform the 3d coordinates of each scan
     #and numpy.vstack all the output m*3 array together
     
     #find average Lidar frame
     avgBodyState = stateArray[int(pl.floor(len(stateArray)/2))]
     #avgBodyState=pl.mean(stateArray, 0)
     
     
     w_R_avg_L, w_T_avg_L = self._bodyState2LidarState(avgBodyState)
     self.avgLidarState = self._matrix2State(w_R_avg_L, w_T_avg_L)
     
     transform = self._transformPointsFromBodyToAvgLidar                
     #from data points with transformation to avgState
     self.dataPoints = pl.vstack([transform(scan.dataArray, state, w_R_avg_L, w_T_avg_L)  for scan, state in zip(scanList, stateArray) if scan.hasValidData()])
     self.intensity = pl.vstack([scan.intensityArray for scan in scanList if scan.hasValidData()]).flatten()
     
     if self.dataPoints.shape[0]!=self.intensity.shape[0]:
         raise Exception('dist and intensity have different size')
Example #5
0
    def extrude_mesh(self, l, z_offset):
        # accepts the number of layers and the length of extrusion

        mesh = self.mesh

        # Extrude vertices
        all_coords = []
        for i in linspace(0, z_offset, l):
            all_coords.append(
                hstack((mesh.coordinates(), i * ones((self.n_v2, 1)))))
        self.global_vertices = vstack(all_coords)

        # Extrude cells (tris to tetrahedra)
        for i in range(l - 1):
            for c in self.mesh.cells():
                # Make a prism out of 2 stacked triangles
                vertices = hstack((c + i * self.n_v2, c + (i + 1) * self.n_v2))

                # Determine prism orientation
                smallest_vertex_index = argmin(vertices)

                # Map to I-ordering of Dompierre et al.
                mapping = self.indirection_table[smallest_vertex_index]

                # Determine which subdivision scheme to use.
                if min(vertices[mapping][[1, 5]]) < min(
                        vertices[mapping][[2, 4]]):
                    local_tets = vstack((vertices[mapping][[0,1,2,5]],\
                                         vertices[mapping][[0,1,5,4]],\
                                         vertices[mapping][[0,4,5,3]]))
                else:
                    local_tets = vstack((vertices[mapping][[0,1,2,4]],\
                                         vertices[mapping][[0,4,2,5]],\
                                         vertices[mapping][[0,4,5,3]]))
                # Concatenate local tet to cell array
                self.global_tets = vstack((self.global_tets, local_tets))

        # Eliminate phantom initialization tet
        self.global_tets = self.global_tets[1:, :]

        # Query number of vertices and tets in new mesh
        self.n_verts = self.global_vertices.shape[0]
        self.n_tets = self.global_tets.shape[0]

        # Initialize new dolfin mesh of dimension 3
        self.new_mesh = Mesh()
        m = MeshEditor()
        m.open(self.new_mesh, 3, 3)
        m.init_vertices(self.n_verts, self.n_verts)
        m.init_cells(self.n_tets, self.n_tets)

        # Copy vertex data into new mesh
        for i, v in enumerate(self.global_vertices):
            m.add_vertex(i, Point(*v))

        # Copy cell data into new mesh
        for j, c in enumerate(self.global_tets):
            m.add_cell(j, *c)

        m.close()
Example #6
0
    def __init__(self, scanList, stateArray):
        if len(scanList) != stateArray.shape[0]:
            raise Exception('number of scan and state should be the same')
        times = [scan.timestamp for scan in scanList]
        self.avgTime = times[int(pl.floor(len(times) / 2))]
        #self.avgTime = pl.mean([scan.timestamp for scan in scanList])

        #transform the 3d coordinates of each scan
        #and numpy.vstack all the output m*3 array together

        #find average Lidar frame
        avgBodyState = stateArray[int(pl.floor(len(stateArray) / 2))]
        #avgBodyState=pl.mean(stateArray, 0)

        w_R_avg_L, w_T_avg_L = self._bodyState2LidarState(avgBodyState)
        self.avgLidarState = self._matrix2State(w_R_avg_L, w_T_avg_L)

        transform = self._transformPointsFromBodyToAvgLidar
        #from data points with transformation to avgState
        self.dataPoints = pl.vstack([
            transform(scan.dataArray, state, w_R_avg_L, w_T_avg_L)
            for scan, state in zip(scanList, stateArray)
            if scan.hasValidData()
        ])
        self.intensity = pl.vstack([
            scan.intensityArray for scan in scanList if scan.hasValidData()
        ]).flatten()

        if self.dataPoints.shape[0] != self.intensity.shape[0]:
            raise Exception('dist and intensity have different size')
Example #7
0
  def extrude_mesh(self,l,z_offset):
    # accepts the number of layers and the length of extrusion

    # Extrude vertices
    all_coords = []
    for i in linspace(0,z_offset,l):
      all_coords.append(hstack((mesh.coordinates(),i*ones((self.n_v2,1)))))
    self.global_vertices = vstack(all_coords)

    # Extrude cells (tris to tetrahedra)
    for i in range(l-1):
      for c in self.mesh.cells():
        # Make a prism out of 2 stacked triangles
        vertices = hstack((c+i*self.n_v2,c+(i+1)*self.n_v2))

        # Determine prism orientation
        smallest_vertex_index = argmin(vertices)

        # Map to I-ordering of Dompierre et al.
        mapping = self.indirection_table[smallest_vertex_index]

        # Determine which subdivision scheme to use.
        if min(vertices[mapping][[1,5]]) < min(vertices[mapping][[2,4]]):
          local_tets = vstack((vertices[mapping][[0,1,2,5]],\
                               vertices[mapping][[0,1,5,4]],\
                               vertices[mapping][[0,4,5,3]]))
        else:
          local_tets = vstack((vertices[mapping][[0,1,2,4]],\
                               vertices[mapping][[0,4,2,5]],\
                               vertices[mapping][[0,4,5,3]]))
        # Concatenate local tet to cell array
        self.global_tets = vstack((self.global_tets,local_tets))

    # Eliminate phantom initialization tet
    self.global_tets = self.global_tets[1:,:]

    # Query number of vertices and tets in new mesh
    self.n_verts = self.global_vertices.shape[0]
    self.n_tets = self.global_tets.shape[0]

    # Initialize new dolfin mesh of dimension 3
    self.new_mesh = Mesh()
    m = MeshEditor()
    m.open(self.new_mesh,3,3)
    m.init_vertices(self.n_verts,self.n_verts)
    m.init_cells(self.n_tets,self.n_tets)

    # Copy vertex data into new mesh
    for i,v in enumerate(self.global_vertices):
      m.add_vertex(i,Point(*v))

    # Copy cell data into new mesh
    for j,c in enumerate(self.global_tets):
      m.add_cell(j,*c)

    m.close()
Example #8
0
 def color_by_level(current_data):
     from pylab import vstack, contourf, plot, ones, arange, colorbar
     fs = current_data.framesoln
     pout, level = gridtools1.grid_output_1d(fs, 0, xout, return_level=True)
     Xout = vstack((xout, xout))
     Yout = vstack((-1.1 * ones(xout.shape), 1.1 * ones(xout.shape)))
     L = vstack((level, level))
     contourf(Xout, Yout, L, v_levels, colors=c_levels)
     cb = colorbar(ticks=range(1, maxlevels + 1))
     cb.set_label('AMR Level')
     plot(xout, pout, 'k')
Example #9
0
def getPeriodicOrbit(statesL,
                     T_L,
                     ymin_L,
                     statesR,
                     T_R,
                     ymin_R,
                     baseParams,
                     startParams=[14000, 1.16, 1, 0.]):
    """
    returns a tuple of SLIP parameters, that result in the two-step periodic
    solution defined by <statesL> -> <statesR> -> >statesL>,
    with step time left (right) = <T_L> (<T_R>)
    minimal vertical position left (right) = <ymin_L> (<ymin_R>)
    statesL/R: a list of (left/right) apex states y, vx, vz
    baseParams: dict of base SLIP parameters: g, m (gravity acceleration, mass)
    
    returns: [SL, paramsL, dEL], [SR, paramsR, dER] 
             two tuples of initial apex states and corresponding SLIP
             parameters that yield the two-step periodic solution
             (dE: energy fluctuation)
        
    """
    SL = mean(vstack(statesL), axis=0) if len(statesL) > 1 else statesL
    SR = mean(vstack(statesR), axis=0) if len(statesR) > 1 else statesR
    tr = mean(hstack(T_R))
    tl = mean(hstack(T_L))
    yminl = mean(hstack(ymin_L))
    yminr = mean(hstack(ymin_R))
    m = baseParams['m']
    g = baseParams['g']
    # energy input right (left) step
    dER = (SL[0] - SR[0]) * m * abs(g) + .5 * m * (SL[1]**2 + SL[2]**2 -
                                                   SR[1]**2 - SR[2]**2)
    dEL = -dER

    # initialize parameters
    PR = copy.deepcopy(baseParams)
    PL = copy.deepcopy(baseParams)
    PL['IC'] = SL
    PL['dE'] = dEL
    PR['IC'] = SR
    PR['dE'] = dER

    # define step params: (y_apex2, T, y_min, vz_apex2)
    spL = (SR[0], tl, yminl, SR[2])
    spR = (SL[0], tr, yminr, SL[2])

    # compute necessary model parameters
    paramsL = fl.calcSlipParams3D(spL, PL, startParams)
    paramsR = fl.calcSlipParams3D(spR, PR, startParams)

    return ([SL, paramsL, dEL], [SR, paramsR, dER])
Example #10
0
    def plot_finest(current_data):
        from pylab import vstack,contourf,plot,ones,arange,colorbar,\
                          xlim,ylim,semilogy,figure,title,clf,subplot,show,draw,\
                          tight_layout,ylabel,grid

        fs = current_data.framesoln
        t = current_data.t
        print('+++ plot_finest at t = %.4f' % t)
        pout, level = gridtools1.grid_output_1d(fs, 0, xout, return_level=True)
        err = abs(pout - p_true_fcn(xout, t))
        Xout = vstack((xout, xout))
        L = vstack((level, level))
        figure(3, figsize=(12, 8))
        clf()

        subplot(311)
        Yout = vstack((-1.1 * ones(xout.shape), 1.1 * ones(xout.shape)))
        contourf(Xout, Yout, L, v_levels, colors=c_levels)
        cb = colorbar(ticks=range(1, maxlevels + 1))
        cb.set_label('AMR Level')
        plot(xout, pout, 'k')
        xlim(xlimits)
        ylim(-1.1, 1.1)
        title('Pressure at t = %.4f' % t)

        subplot(312)
        Yout = vstack((ylimits_error[0] * ones(xout.shape),
                       ylimits_error[1] * ones(xout.shape)))
        contourf(Xout, Yout, L, v_levels, colors=c_levels)
        cb = colorbar(ticks=range(1, maxlevels + 1))
        cb.set_label('AMR Level')
        semilogy(xout, err, 'k')
        if tolerance is not None:
            plot(xout, tolerance * ones(xout.shape), 'r--')
        xlim(xlimits)
        ylim(ylimits_error)
        ylabel('abs(error)')
        grid(True)

        subplot(313)
        Yout = vstack(
            (0 * ones(xout.shape), (maxlevels + 1) * ones(xout.shape)))
        contourf(Xout, Yout, L, v_levels, colors=c_levels)
        cb = colorbar(ticks=range(1, maxlevels + 1))
        cb.set_label('AMR Level')
        plot(xout, level, 'k')
        xlim(xlimits)
        ylim(0, maxlevels + 1)
        ylabel('AMR Level')
        tight_layout()
        grid(True)
        draw()
Example #11
0
def getPeriodicOrbit(statesL, T_L, ymin_L,
                     statesR, T_R, ymin_R,
                     baseParams ,
                     startParams=[14000, 1.16, 1, 0.] ):
    """
    returns a tuple of SLIP parameters, that result in the two-step periodic
    solution defined by <statesL> -> <statesR> -> >statesL>,
    with step time left (right) = <T_L> (<T_R>)
    minimal vertical position left (right) = <ymin_L> (<ymin_R>)
    statesL/R: a list of (left/right) apex states y, vx, vz
    baseParams: dict of base SLIP parameters: g, m (gravity acceleration, mass)
    
    returns: [SL, paramsL, dEL], [SR, paramsR, dER] 
             two tuples of initial apex states and corresponding SLIP
             parameters that yield the two-step periodic solution
             (dE: energy fluctuation)
        
    """    
    SL = mean(vstack(statesL), axis=0) if len(statesL) > 1 else statesL
    SR = mean(vstack(statesR), axis=0) if len(statesR) > 1 else statesR
    tr = mean(hstack(T_R))
    tl = mean(hstack(T_L))
    yminl = mean(hstack(ymin_L))
    yminr = mean(hstack(ymin_R))
    m = baseParams['m']
    g = baseParams['g']
    # energy input right (left) step
    dER = (SL[0]-SR[0])*m*abs(g) + .5*m*(SL[1]**2 + SL[2]**2 
                                       - SR[1]**2 - SR[2]**2)
    dEL = -dER

    # initialize parameters
    PR = copy.deepcopy( baseParams )
    PL = copy.deepcopy( baseParams )
    PL['IC'] = SL    
    PL['dE'] = dEL
    PR['IC'] = SR
    PR['dE'] = dER
    
    # define step params: (y_apex2, T, y_min, vz_apex2)
    spL = (SR[0], tl, yminl, SR[2])
    spR = (SL[0], tr, yminr, SL[2])
    
    # compute necessary model parameters
    paramsL = fl.calcSlipParams3D(spL, PL, startParams)
    paramsR = fl.calcSlipParams3D(spR, PR, startParams)
    
    
    return ([SL, paramsL, dEL],[SR, paramsR, dER])
Example #12
0
def homog2D(xPrime, x):
    """
    
    Compute the 3x3 homography matrix mapping a set of N 2D homogeneous 
    points (3xN) to another set (3xN)

    """

    numPoints = xPrime.shape[1]
    assert numPoints >= 4

    A = None
    for i in range(0, numPoints):
        xiPrime = xPrime[:, i]
        xi = x[:, i]

        Ai_row0 = pl.concatenate((pl.zeros(3), -xiPrime[2] * xi, xiPrime[1] * xi))
        Ai_row1 = pl.concatenate((xiPrime[2] * xi, pl.zeros(3), -xiPrime[0] * xi))
        Ai = pl.row_stack((Ai_row0, Ai_row1))

        if A is None:
            A = Ai
        else:
            A = pl.vstack((A, Ai))

    U, S, V = pl.svd(A)
    V = V.T
    h = V[:, -1]
    H = pl.reshape(h, (3, 3))
    return H
Example #13
0
def homog3D(points2d, points3d):
    """
    
    Compute a matrix relating homogeneous 3D points (4xN) to homogeneous
    2D points (3xN)

    Not sure why anyone would do this.  Note that the returned transformation 
    *NOT* an isometry.  But it's here... so deal with it.

    """

    numPoints = points2d.shape[1]
    assert numPoints >= 4

    A = None
    for i in range(0, numPoints):
        xiPrime = points2d[:, i]
        xi = points3d[:, i]

        Ai_row0 = pl.concatenate((pl.zeros(4), -xiPrime[2] * xi, xiPrime[1] * xi))
        Ai_row1 = pl.concatenate((xiPrime[2] * xi, pl.zeros(4), -xiPrime[0] * xi))
        Ai = pl.row_stack((Ai_row0, Ai_row1))

        if A is None:
            A = Ai
        else:
            A = pl.vstack((A, Ai))

    U, S, V = pl.svd(A)
    V = V.T
    h = V[:, -1]
    P = pl.reshape(h, (3, 4))
    return P
Example #14
0
def main():
	shifts = [
		[-1,  1], [0,  1], [1,  1],
		[-1,  0],          [1,  0],
		[-1, -1], [0, -1], [1, -1]
	]

	num_atoms = 100
	num_dims = 2 # dimensions
	coords = pl.random((num_atoms, num_dims))
	chosen = pl.random_integers(num_atoms) # from 1 to num_atoms
	chosen -= 1 # from 0 to num_atoms - 1

	for i in range(len(shifts)):
		coords = pl.vstack((coords, coords[:num_atoms] + shifts[i]))
	num_atoms *= 9 # after 8 shifts added

	max_distance = 0.9
	for i in range(num_atoms):
		if i != chosen:
			dx = coords[chosen, 0] - coords[i, 0]
			dy = coords[chosen, 1] - coords[i, 1]
			distance = pl.sqrt(dx*dx + dy*dy)
			if distance < max_distance:
				pl.plot([coords[i, 0]], [coords[i, 1]], "bo")
			else:
				pl.plot([coords[i, 0]], [coords[i, 1]], "ko")

	# plot last for visibility
	pl.plot([coords[chosen, 0]], [coords[chosen, 1]], "ro")
	pl.grid(True)
	pl.show()
Example #15
0
def homog3D (points2d, points3d):
    """

    Compute a matrix relating homogeneous 3D points (4xN) to homogeneous
    2D points (3xN)

    Not sure why anyone would do this.  Note that the returned transformation
    *NOT* an isometry.  But it's here... so deal with it.

    """

    numPoints = points2d.shape[1]
    assert (numPoints >= 4)

    A = None
    for i in range (0, numPoints):
        xiPrime = points2d[:,i]
        xi = points3d[:,i]

        Ai_row0 = pl.concatenate ((pl.zeros (4,), -xiPrime[2]*xi, xiPrime[1]*xi))
        Ai_row1 = pl.concatenate ((xiPrime[2]*xi, pl.zeros (4,), -xiPrime[0]*xi))
        Ai = pl.row_stack ((Ai_row0, Ai_row1))

        if A is None:
            A = Ai
        else:
            A = pl.vstack ((A, Ai))

    U, S, V = pl.svd (A)
    V = V.T
    h = V[:,-1]
    P = pl.reshape (h, (3, 4))
    return P
Example #16
0
File: misc.py Project: MMaus/mutils
def calcJacobian(fun, x0, h=.0001):
    """
    calculates the jacobian of a given function fun with respect to its
    parameters at the point (array or list) x0.

    :args:
        fun (function): the function to calcualte the jacobian from
        x0 (iterable, e.g. array): position to evaluate the jacobian at
        h (float): step size 

    :returns:
        J (n-by-n array): the jacobian of f at x0
    """
    J = []
    x = array(x0)
    for elem, val in enumerate(x0):

        ICp = x.copy()
        ICp[elem] += h
        resp = fun(ICp)
        ICn = x.copy()
        ICn[elem] -= h
        resn = fun(ICn)
        J.append((resp - resn) / (2. * h))

    J = vstack(J).T
    return J
Example #17
0
def dS_dP(x0, PR, keys = [('k',750.),('alpha',0.05),('L0',0.05),
                                ('beta',0.05), ('dE', 7.5) ], r_mag = .005):
    """
    calculates the SLIP derivative with respect to 'keys'
    keys is a list of tuples with the keys of PR that should be changed,
    and the order of magnitude of deviation (i.e. something like std(x))
    
    -- only for a single step --
    """
    df = []
    # r_mag = .005 # here: relative magnitude of disturbance in standrad dev's
    
    for elem,mag in keys:
        h = r_mag*mag
        # positive direction
        PRp = copy.deepcopy(PR)
        PRp[elem] += h
        resR = sl.SLIP_step3D(x0, PRp)
        SRp = array([resR['y'][-1], resR['vx'][-1], resR['vz'][-1]])
        #fhp = array(SR2 - x0)
        # positive direction
        PRn = copy.deepcopy(PR)            
        PRn[elem] -= h
        resR = sl.SLIP_step3D(x0, PRn)
        SRn = array([resR['y'][-1], resR['vx'][-1], resR['vz'][-1]])
        #fhn = array(SR2 - x0)
        # derivative: difference quotient
        df.append( (SRp - SRn)/(2.*h) )
    
    return vstack(df).T
Example #18
0
def gen_std_devs(climate, multi_cities, years):
    """
    For each year in years, compute the standard deviation over the averaged yearly
    temperatures for each city in multi_cities.

    Args:
        climate: instance of Climate
        multi_cities: the names of cities we want to use in our std dev calculation (list of str)
        years: the range of years to calculate standard deviation for (list of int)

    Returns:
        a pylab 1-d array of floats with length = len(years). Each element in
        this array corresponds to the standard deviation of the average annual
        city temperatures for the given cities in a given year.
    """
    std_dev = []
    for year in years:
        yearly_temps = None
        for city in multi_cities:
            if yearly_temps is None:
                yearly_temps = climate.get_yearly_temp(city, year)
            else:
                yearly_temp = climate.get_yearly_temp(city, year)
                yearly_temps = pylab.vstack((yearly_temps, yearly_temp))
        if yearly_temps.ndim > 1:
            yearly_temps = pylab.average(yearly_temps, axis=0)
        std_dev.append(pylab.std(yearly_temps))
    return pylab.array(std_dev)
Example #19
0
def homog2D (xPrime, x):
    """

    Compute the 3x3 homography matrix mapping a set of N 2D homogeneous
    points (3xN) to another set (3xN)

    """

    numPoints = xPrime.shape[1]
    assert (numPoints >= 4)

    A = None
    for i in range (0, numPoints):
        xiPrime = xPrime[:,i]
        xi = x[:,i]

        Ai_row0 = pl.concatenate ((pl.zeros (3,), -xiPrime[2]*xi, xiPrime[1]*xi))
        Ai_row1 = pl.concatenate ((xiPrime[2]*xi, pl.zeros (3,), -xiPrime[0]*xi))
        Ai = pl.row_stack ((Ai_row0, Ai_row1))

        if A is None:
            A = Ai
        else:
            A = pl.vstack ((A, Ai))

    U, S, V = pl.svd (A)
    V = V.T
    h = V[:,-1]
    H = pl.reshape (h, (3, 3))
    return H
Example #20
0
def plot_matches(im1, im2, locs1, locs2, matchscores, show_below=True):
    """
    Show a figure with lines joining the accepted matches
    input: im1, im2 (images as arrays), locs1, locs2 (feature locations),
    matchscores (as output from the match() method)
    show_below (if images should be shown below matches).
    :param im1:
    :param im2:
    :param locs1:
    :param locs2:
    :param matchscores:
    :param show_below:
    :return:
    """

    im3 = appendImages(im1, im2)
    if show_below:
        im3 = vstack((im3, im3))
        imshow(im3)

        cols1 = im1.shape[1]
        for i,m in enumerate(matchscores):
            if m>0:
                plot([locs1[i][1], locs2[m][1]+cols1], [locs1[i][0],locs2[m][0]],'c')
        axis('off')
Example #21
0
def dS_dX(x0, PR, h_mag = .0005):
    """
    calculates the Jacobian of the SLIP at the given point x0,
    with PR beeing the parameters for that step
    coordinates under consideration are:
        y
        vx
        vz
    only for a single step!
    """
    df = []
    for dim in range(len(x0)):
        delta = zeros_like(x0)
        delta[dim] = 1.            
        h = h_mag * delta      
        # in positive direction           
        resRp = sl.SLIP_step3D(x0 + h, PR)
        SRp = array([resRp['y'][-1], resRp['vx'][-1], resRp['vz'][-1]])
        #fhp = array(SR2 - x0)
        # in negative direction
        resRn = sl.SLIP_step3D(x0 - h, PR)
        SRn = array([resRn['y'][-1], resRn['vx'][-1], resRn['vz'][-1]])
        #fhn = array(SR2 - x0)
        # derivative: difference quotient
        df.append( (SRp - SRn)/(2.*h_mag) )
    
    return vstack(df).T
Example #22
0
def dS_dP(x0,
          PR,
          keys=[('k', 750.), ('alpha', 0.05), ('L0', 0.05), ('beta', 0.05),
                ('dE', 7.5)],
          r_mag=.005):
    """
    calculates the SLIP derivative with respect to 'keys'
    keys is a list of tuples with the keys of PR that should be changed,
    and the order of magnitude of deviation (i.e. something like std(x))
    
    -- only for a single step --
    """
    df = []
    # r_mag = .005 # here: relative magnitude of disturbance in standrad dev's

    for elem, mag in keys:
        h = r_mag * mag
        # positive direction
        PRp = copy.deepcopy(PR)
        PRp[elem] += h
        resR = sl.SLIP_step3D(x0, PRp)
        SRp = array([resR['y'][-1], resR['vx'][-1], resR['vz'][-1]])
        #fhp = array(SR2 - x0)
        # positive direction
        PRn = copy.deepcopy(PR)
        PRn[elem] -= h
        resR = sl.SLIP_step3D(x0, PRn)
        SRn = array([resR['y'][-1], resR['vx'][-1], resR['vz'][-1]])
        #fhn = array(SR2 - x0)
        # derivative: difference quotient
        df.append((SRp - SRn) / (2. * h))

    return vstack(df).T
def main():
    shifts = [[-1, 1], [0, 1], [1, 1], [-1, 0], [1, 0], [-1, -1], [0, -1],
              [1, -1]]

    num_atoms = 100
    num_dims = 2  # dimensions
    coords = pl.random((num_atoms, num_dims))
    chosen = pl.random_integers(num_atoms)  # from 1 to num_atoms
    chosen -= 1  # from 0 to num_atoms - 1

    for i in range(len(shifts)):
        coords = pl.vstack((coords, coords[:num_atoms] + shifts[i]))
    num_atoms *= 9  # after 8 shifts added

    max_distance = 0.9
    for i in range(num_atoms):
        if i != chosen:
            dx = coords[chosen, 0] - coords[i, 0]
            dy = coords[chosen, 1] - coords[i, 1]
            distance = pl.sqrt(dx * dx + dy * dy)
            if distance < max_distance:
                pl.plot([coords[i, 0]], [coords[i, 1]], "bo")
            else:
                pl.plot([coords[i, 0]], [coords[i, 1]], "ko")

    # plot last for visibility
    pl.plot([coords[chosen, 0]], [coords[chosen, 1]], "ro")
    pl.grid(True)
    pl.show()
Example #24
0
def dS_dX(x0, PR, h_mag=.0005):
    """
    calculates the Jacobian of the SLIP at the given point x0,
    with PR beeing the parameters for that step
    coordinates under consideration are:
        y
        vx
        vz
    only for a single step!
    """
    df = []
    for dim in range(len(x0)):
        delta = zeros_like(x0)
        delta[dim] = 1.
        h = h_mag * delta
        # in positive direction
        resRp = sl.SLIP_step3D(x0 + h, PR)
        SRp = array([resRp['y'][-1], resRp['vx'][-1], resRp['vz'][-1]])
        #fhp = array(SR2 - x0)
        # in negative direction
        resRn = sl.SLIP_step3D(x0 - h, PR)
        SRn = array([resRn['y'][-1], resRn['vx'][-1], resRn['vz'][-1]])
        #fhn = array(SR2 - x0)
        # derivative: difference quotient
        df.append((SRp - SRn) / (2. * h_mag))

    return vstack(df).T
Example #25
0
File: misc.py Project: MMaus/mutils
def calcJacobian(fun, x0, h=.0001):
    """
    calculates the jacobian of a given function fun with respect to its
    parameters at the point (array or list) x0.

    :args:
        fun (function): the function to calcualte the jacobian from
        x0 (iterable, e.g. array): position to evaluate the jacobian at
        h (float): step size 

    :returns:
        J (n-by-n array): the jacobian of f at x0
    """
    J = []
    x = array(x0)
    for elem, val in enumerate(x0):
        
        ICp = x.copy()
        ICp[elem] += h
        resp = fun(ICp)
        ICn = x.copy()
        ICn[elem] -= h
        resn = fun(ICn)
        J.append((resp - resn)  / (2. * h))
        
    J = vstack(J).T    
    return J
Example #26
0
def perspectiveTransform(xy, xaya):
    M = array([], dtype=float64).reshape(0, 9)
    # Make matrix M for arbritatry amount of points
    for i in range(len(xy)):
        xi = xy[i][0]
        yi = xy[i][1]
        xi_ = xaya[i][0]
        yi_ = xaya[i][1]
        M = vstack([M, array([xi, yi, 1, 0, 0, 0, -xi_*xi, -xi_*yi, -xi_])])
        M = vstack([M, array([0, 0, 0, xi, yi, 1, -yi_*xi, -yi_*yi, -yi_])])
    # Take SVD
    U, S, Vt = sclin.svd(M, full_matrices=True)
    # p = last column of V, and because Vt is V transposed, the last row is
    # taken.
    p = Vt[-1]
    return p.reshape((3, 3))
Example #27
0
def prepare_line(line, pad=16):
    line = line * 1.0 / amax(line)
    line = amax(line) - line
    line = line.T
    if pad > 0:
        w = line.shape[1]
        line = vstack([zeros((pad, w)), line, zeros((pad, w))])
    return line
Example #28
0
File: io.py Project: MMaus/mutils
 def get_kin(self, fps=250.):
     """
     returns a list of the selected kinematics (one list item for each repetition)
     
     :args:
         self: kin object
         fps (float, default 250): sampling frequency. Required to correctly compute the velocities.
 
     :returns:
         a list. Each element contains the selected (-> self.selection) data with corresponding 
            velocities (i.e. 2d x n elements per item)
     """
     # walk through each element of "selection"
     all_pos = []
     all_vel = []
     for raw in self.raw_dat:
         curr_pos = []
         curr_vel = []
         for elem in self.selection:
             items = [x.strip() for x in elem.split('-')] # 1 item if no "-" present
             dims = []
             markers = []
             for item in items:                
                 if item.endswith('_x'):
                     dims.append(0)
                 elif item.endswith('_y'):
                     dims.append(1)
                 elif item.endswith('_z'):
                     dims.append(2)
                 else:
                     print "invalid marker suffix: ", item
                     continue
                 markers.append(item[:-2])
                         
             if len(items) == 1: # single marker
                 curr_pos.append(raw[markers[0]][:, dims[0]])
                 curr_vel.append(gradient(raw[markers[0]][:, dims[0]]) * fps)
             else: # difference between two markers
                 curr_pos.append(raw[markers[0]][:, dims[0]] - raw[markers[1]][:, dims[1]])
                 curr_vel.append(gradient(raw[markers[0]][:, dims[0]] - raw[markers[1]][:, dims[1]]) * fps)
 
         all_pos.append(vstack(curr_pos + curr_vel))
         all_vel.append(vstack(curr_vel))  
         
     return all_pos        
Example #29
0
 def error_color_by_level(current_data):
     from pylab import vstack,contourf,plot,ones,arange,colorbar,\
                       ylim,semilogy
     fs = current_data.framesoln
     t = current_data.t
     pout, level = gridtools1.grid_output_1d(fs, 0, xout, return_level=True)
     err = abs(pout - p_true_fcn(xout, t))
     Xout = vstack((xout, xout))
     Yout = vstack((ylimits_error[0] * ones(xout.shape),
                    ylimits_error[1] * ones(xout.shape)))
     L = vstack((level, level))
     contourf(Xout, Yout, L, v_levels, colors=c_levels)
     cb = colorbar(ticks=range(1, maxlevels + 1))
     cb.set_label('AMR Level')
     semilogy(xout, err, 'k')
     #semilogy(xout,level,'k')
     if tolerance is not None:
         plot(xout, tolerance * ones(xout.shape), 'r--')
 def _generate_labeled_correlation_matrix(self, label):
     """ Concatenates the feature names to the actual correlation matrices.
         This is for better overview in stored txt files later on."""
     labeled_corr_matrix = pylab.array([])
     for i in pylab.array(self.corr_important_feats[label]):
         if len(labeled_corr_matrix) == 0:
             labeled_corr_matrix = [[('% .2f' % j).rjust(10) for j in i]]
         else:
             labeled_corr_matrix = pylab.vstack((labeled_corr_matrix,
                                 [[('% .2f' % j).rjust(10) for j in i]]))
     
     labeled_corr_matrix = pylab.c_[self.corr_important_feat_names,
                                    labeled_corr_matrix]
     labeled_corr_matrix = pylab.vstack((pylab.hstack(('          ',
                                        self.corr_important_feat_names)),
                                        labeled_corr_matrix))
     
     return labeled_corr_matrix
Example #31
0
 def getAllPrecNoise(self,timePreceedingSignal=-1):
     #returns the concatenated Preceeding Noise
     precNoise=py.array([])
     for tdData in self._thzdata_raw:
         tN=self.getPreceedingNoise(tdData,timePreceedingSignal)
         if precNoise.shape[0]==0:
             precNoise=tN
         else:
             precNoise=py.vstack((precNoise,tN))
     return precNoise
    def _generate_labeled_correlation_matrix(self, label):
        """ Concatenates the feature names to the actual correlation matrices.
            This is for better overview in stored txt files later on."""
        labeled_corr_matrix = pylab.array([])
        for i in pylab.array(self.corr_important_feats[label]):
            if len(labeled_corr_matrix) == 0:
                labeled_corr_matrix = [[('% .2f' % j).rjust(10) for j in i]]
            else:
                labeled_corr_matrix = pylab.vstack(
                    (labeled_corr_matrix, [[('% .2f' % j).rjust(10)
                                            for j in i]]))

        labeled_corr_matrix = pylab.c_[self.corr_important_feat_names,
                                       labeled_corr_matrix]
        labeled_corr_matrix = pylab.vstack((pylab.hstack(
            ('          ', self.corr_important_feat_names)),
                                            labeled_corr_matrix))

        return labeled_corr_matrix
Example #33
0
File: io.py Project: MMaus/mutils
    def get_kin_apex(self, phases, return_times = False):
        """
        returns the kinematic state at the apices which are close to the given phases. Apex is re-calculated.
        
        :args:
            self: kin object (-> later: "self")
            phases (list): list of lists of apex phases. must match with length of "kin.raw_data". 
               The n'th list of apex phases will be assigned to the nth "<object>.raw_data" element.
            return_times (bool): if true, return only the times at which apex occurred.
    
        :returns:
           if lr_split is True:
              [[r_apices], [l_apices]]
           else:
              [[apices], ]
              where apices is the kinematic (from <object>.selection at the apices *around* the given phases.
              *NOTE* The apices themselves are re-computed for higher accuracy.
    
        """
        
        all_kin = []
        all_kin_orig = self.get_kin()
        all_apex_times = []
        if len(self.raw_dat) != len(phases):
            raise ValueError("length of phases list does not match number of datasets")
        for raw, phase, kin_orig in zip(self.raw_dat, phases, all_kin_orig):
            kin_apex = []
            kin_time = arange(len(raw['phi2'].squeeze()), dtype=float) / 250.
            # 1st: compute apex *times*
            apex_times = []
            for phi_apex in phase:
                # 1a: get "rough" estimate
                idx_apex = argmin(abs(raw['phi2'].squeeze() - phi_apex))
                # 1b: fit quadratic function to com_y
                idx_low = max(0, idx_apex - 4)
                idx_high = min(raw['com'].shape[0] - 1, idx_apex + 4)
                com_y_pt = raw['com'][idx_low:idx_high + 1, 2]            
                tp = arange(idx_high - idx_low + 1) # improve numerical accuracy: do not take absolute time
                p = polyfit(tp, com_y_pt, 2) # p: polynomial, highest power coeff first
                t0 = -p[1] / (2.*p[0]) # "real" index of apex (offset is 2: a value of 2
                           # indicates that apex is exactly at the measured frame
                t_apex = kin_time[idx_apex] + (t0 - 4.) / 250.
                apex_times.append(t_apex)
            
            if return_times:
                all_apex_times.append(array(apex_times))		    
            else:
                # 2nd: interpolate data
                dat = vstack([interp(apex_times, kin_time, kin_orig[row, :]) for row in arange(kin_orig.shape[0])])
                all_kin.append(dat)

        if return_times:
	    return all_apex_times

        return all_kin
Example #34
0
File: misc.py Project: MMaus/mutils
def sinDistort(data,twisting=1.):
    """
    a distortion of the data:
        x will be mapped to sin(x/max(abs(x))), for every coordinate
    this is to distort a lower dimensional system so that it is not
    restricted to a lower-dimensional subspace any longer
    data must be given in NxD - Format
    the optional twisting factor increases the twisting strength
    """
    return vstack([sin( data[:,x]*twisting/max(abs(data[:,x])))*max(abs(data[:,x]))
                   for x in range(data.shape[1])]).T
Example #35
0
 def test_sim_data_2(self): 
     sims = 10000 
     return # skip for now
     test1 = pl.zeros(3, dtype='f').view(pl.recarray)
     for i in range(sims): 
         temp = data.sim_data(1, [0.1,0.1,0.8], [0.01,0.01,0.01])
         test1 = pl.vstack((test1, temp))
     test1 = test1[1:,]
     test2 = data.sim_data(sims, [0.1,0.1,0.8], [0.01, 0.01, 0.01])
     diff = (test1.mean(0) - test2.mean(0))/test1.mean(0)
     assert pl.allclose(diff, 0, atol=0.01), 'should be close to zero, (%s found)' % str(diff)
Example #36
0
def block_hankel(data, f):
    """
    Create a block hankel matrix.
    f : number of rows
    """
    data = pl.matrix(data)
    assert len(data.shape) == 2
    n = data.shape[1] - f
    return pl.matrix(pl.hstack([
        pl.vstack([data[:, i+j] for i in range(f)])
        for j in range(n)]))
Example #37
0
def block_hankel(data, f):
    """
    Create a block hankel matrix.
    f : number of rows
    """
    data = np.matrix(data)
    assert len(data.shape) == 2
    n = data.shape[1] - f
    return np.matrix(pl.hstack([
        pl.vstack([data[:, i+j] for i in range(f)])
        for j in range(n)]))
Example #38
0
	def LS(self,X):

		"""
			estimate the connectivity kernel parameters and the time constant parameter using Least Square method
	
			Arguments
			----------
			X: list of matrix
				state vectors

			Returns
			---------
			Least Square estimation of the the connectivity kernel parameters and the time constant parameter 
		"""
		q=self.q_calc(X)
		Z=pb.vstack(X[1:])
		X_t_1=pb.vstack(X[:-1])
		q_t_1=pb.vstack(q[:-1])
		X_ls=pb.hstack((q_t_1,X_t_1))
		W=(X_ls.T*X_ls).I*X_ls.T*Z
		return [float( W[0]),float(W[1]),float(W[2]),float(W[3])]
Example #39
0
 def save_rsj_params(self):
     mat = pl.vstack((pl.array(self.idx), pl.transpose(self.c),\
       self.icarr, self.rnarr, self.ioarr, self.voarr,\
       self.ic_err_arr, self.rn_err_arr, self.io_err_arr, self.vo_err_arr))
     
     header_c = ''
     for n in range(self.cdim):
         header_c = header_c + 'Control%d '%(n+1)
     header_ = 'Index ' + header_c + \
       'Ic Rn Io Vo Ic_err Rn_err Io_err Vo_err'
     pl.savetxt(self.get_fullpath(self.fn_rsjparams), pl.transpose(mat),\
       header=header_)
Example #40
0
 def dewarp(self, img, cval=0, dtype=dtype('f')):
     assert img.shape == self.shape
     h, w = img.shape
     hpadding = self.r
     padded = vstack(
         [cval * ones((hpadding, w)), img, cval * ones((hpadding, w))])
     center = self.center + hpadding
     dewarped = [
         padded[center[i] - self.r:center[i] + self.r, i] for i in range(w)
     ]
     dewarped = array(dewarped, dtype=dtype).T
     return dewarped
Example #41
0
def approximate(x,y):
    """
    Linear approximation of y=f(x) using least square estimator.
    In:
        x : ndarray
        y : ndarray
    Out:
        a, b : float, as in a*x+b=y
    """
    assert pl.shape(x) == pl.shape(y)
    A = pl.vstack([x, pl.ones(len(x))]).T
    a, b = pl.lstsq(A, y)[0]
    return a, b
Example #42
0
File: misc.py Project: MMaus/mutils
def sinDistort(data, twisting=1.):
    """
    a distortion of the data:
        x will be mapped to sin(x/max(abs(x))), for every coordinate
    this is to distort a lower dimensional system so that it is not
    restricted to a lower-dimensional subspace any longer
    data must be given in NxD - Format
    the optional twisting factor increases the twisting strength
    """
    return vstack([
        sin(data[:, x] * twisting / max(abs(data[:, x]))) *
        max(abs(data[:, x])) for x in range(data.shape[1])
    ]).T
Example #43
0
 def test_sim_data_2(self):
     sims = 10000
     return  # skip for now
     test1 = pl.zeros(3, dtype='f').view(pl.recarray)
     for i in range(sims):
         temp = data.sim_data(1, [0.1, 0.1, 0.8], [0.01, 0.01, 0.01])
         test1 = pl.vstack((test1, temp))
     test1 = test1[1:, ]
     test2 = data.sim_data(sims, [0.1, 0.1, 0.8], [0.01, 0.01, 0.01])
     diff = (test1.mean(0) - test2.mean(0)) / test1.mean(0)
     assert pl.allclose(
         diff, 0,
         atol=0.01), 'should be close to zero, (%s found)' % str(diff)
Example #44
0
File: misc.py Project: MMaus/mutils
def twoD_oneD(data2D,nps):
    """
    transforms the 2D format into the 1D-format used here
    1D-format: 
        a single row represents one stride; 
        the first (nps) frames represent coordinate 1, the second (nps)
        frames represent coordinate 2, ...
    2D-format: 
        a single row represents one coordinate.
        The k-th stride is represented by the subsection [:,k*(nps):(k+1)*nps]
    """    
    data1D = vstack([hstack(data2D[:,x*nps:(x+1)*nps]) for x in range(data2D.shape[1]/nps)])
    return data1D
    def get_coords_for_frame(self, frame, root_offset=None):
        coords = []

        def draw_line_to_children(bone, ppos, P, rpos, ppindex):
            for child in self.hierarchy[bone]:
                cbone = self.bones[child]
                C = self.__local_matrices[child + '__C']
                Cinv = self.__local_matrices[child + '__Cinv']
                B = self.__local_matrices[child + '__B']

                #Motion matrix
                M = eye(4)
                try:
                    for dof, val in zip(cbone.dof, frame[child]):
                        val = pl.deg2rad(val)
                        R = eye(4)
                        if dof == 'rx':
                            R = Rx(val)
                        elif dof == 'ry':
                            R = Ry(val)
                        elif dof == 'rz':
                            R = Rz(val)

                        M = dot(R, M)
                except:  #We might not have dof data for the current bone
                    pass

                L = C.dot(M).dot(Cinv).dot(B)

                #Full transform
                A = dot(P, L)

                cpos = dot(A, [0, 0, 0, 1]) + rpos

                coords.append(cpos[:3])
                draw_line_to_children(child, cpos, A, rpos, len(coords) - 1)

        #Root orientation and translation
        transf = frame['root'].copy()
        if root_offset is not None:
            transf[0:3] += root_offset

        R = dot(Rz(pl.deg2rad(transf[5])),
                dot(Ry(pl.deg2rad(transf[4])), Rx(pl.deg2rad(transf[3]))))
        B = T(transf[0:3])
        rpos = dot(B, [0, 0, 0, 1]) / 0.45 * Skeleton.SCALE
        coords.append(rpos[:3])

        draw_line_to_children('root', rpos, R, rpos, 0)

        return pl.vstack(coords).T
Example #46
0
def predTest(idat, odat, out_of_sample=True, nboot=50, totvar=False, rcond=1e-7):
    """
    .. note::

      computes how well odat can be predicted (in terms of variance reduction)
      using idat, using the bootstrap method

    Some formatting test: :py:func:`mutils.statistics.vred`

    Parameters
    ----------
    idat : array_like
        format: n x d , d-dimensional data in n rows
        used to predict odat
    odat : array_like
        format: n x q, q-dimensional data in n rows
        to be predicted from idat
    out_of_sample : bool
        if True, perform an out-of-sample prediction
    nboot : int
        the number of bootstrap repetitions to be performed for prediction
    totvar : bool
        if `True`, the total relative remaining variance will be computed,
        otherwise the relative remaining variance for each coordinate
        to be predict will be computed

    Returns
    -------
        Return value and format depends on whether or not **totvar** is *True*

        * if **totvar** is *True*:
                returns an array of dimension nboot x 1, which contains
                the relative remaining variance after prediciton
                (in nboot bootstrap repetitions)
        * if **totvar** is *False*:
                returns an array of dimension nboot x q, which contains the
                relative remaining variance after  prediction for each
                coordinate (in nboot bootstrap repetitions)




    """

    _, mapsT, idcs = fitData(idat, odat, nps=1, nrep=nboot, rcond=rcond)
    maps = [x.T for x in mapsT]
    # indices will be "swapped" to "NOT(indices)" in vRedPartial
    indices = idcs if out_of_sample else [otheridx(x, idat.shape[0]) for x in idcs]
    vaxis = None if totvar else 0
    res = vRedPartial(idat, odat, maps, indices, vaxis)
    return vstack(res)
Example #47
0
	def estimate_kernel(self,X):

		"""
			estimate the ide parameters using least squares method
	
			Arguments
			----------
			X: list of ndarray
				state vectors

			Returns
			---------
			least squares estimation of the IDE parameters
		"""

		Q=self.Q_calc(X)
		Z=pb.vstack(X[1:])
		X_t_1=pb.vstack(X[:-1])
		Q_t_1=pb.vstack(Q[:-1])
		X_ls=pb.hstack((Q_t_1,X_t_1))
		theta=dots(pb.inv(pb.dot(X_ls.T,X_ls)),X_ls.T,Z)
		parameters=[float(theta[i]) for i in range(theta.shape[0])]
		return parameters
Example #48
0
File: misc.py Project: MMaus/mutils
def oneD_twoD(data1D,nps):
    """
    transforms the 1D-format used here into the 2D-format
    1D-format: 
        a single row represents one stride; 
        the first (nps) frames represent coordinate 1, the second (nps)
        frames represent coordinate 2, ...
    2D-format: 
        a single row represents one coordinate.
        The k-th stride is represented by the subsection [:,k*(nps):(k+1)*nps]    
    """   
    ncoords = data1D.shape[1]/nps
    data2D = vstack([hstack(data1D[:,nps*x:nps*(x+1)]) for x in range(ncoords)])
    return data2D
def combine_output(J, T, model, dir, reps, save=False):
    """
    Combine output on absolute error, relative error, csmf_accuracy, and coverage from from
    multiple runs of validate_once. Either saves the output to the disk, or returns arays
    for each. 
    """

    cause = pl.zeros(J*T, dtype='f').view(pl.recarray)
    time = pl.zeros(J*T, dtype='f').view(pl.recarray)
    abs_err = pl.zeros(J*T, dtype='f').view(pl.recarray) 
    rel_err = pl.zeros(J*T, dtype='f').view(pl.recarray)
    coverage = pl.zeros(J*T, dtype='f').view(pl.recarray)
    csmf_accuracy = pl.zeros(J*T, dtype='f').view(pl.recarray)

    for i in range(reps): 
        metrics = pl.csv2rec('%s/metrics_%s_%i.csv' % (dir, model, i))
        cause = pl.vstack((cause, metrics.cause))
        time = pl.vstack((time, metrics.time))
        abs_err = pl.vstack((abs_err, metrics.abs_err))
        rel_err = pl.vstack((rel_err, metrics.rel_err))
        coverage = pl.vstack((coverage, metrics.coverage))
        csmf_accuracy = pl.vstack((csmf_accuracy, metrics.csmf_accuracy))

    cause = cause[1:,]
    time = time[1:,]    
    abs_err = abs_err[1:,]
    rel_err = rel_err[1:,]
    coverage = coverage[1:,]
    csmf_accuracy = csmf_accuracy[1:,]

    mean_abs_err = abs_err.mean(0)
    median_abs_err =  pl.median(abs_err, 0)
    mean_rel_err = rel_err.mean(0)
    median_rel_err = pl.median(rel_err, 0)
    mean_csmf_accuracy = csmf_accuracy.mean(0)
    median_csmf_accuracy = pl.median(csmf_accuracy, 0)
    mean_coverage_bycause = coverage.mean(0)
    mean_coverage = coverage.reshape(reps, T, J).mean(0).mean(1)
    percent_total_coverage = (coverage.reshape(reps, T, J).sum(2)==3).mean(0)
    mean_coverage = pl.array([[i for j in range(J)] for i in mean_coverage]).ravel()
    percent_total_coverage = pl.array([[i for j in range(J)] for i in percent_total_coverage]).ravel()

    models = pl.array([[model for j in range(J)] for i in range(T)]).ravel()
    true_cf = metrics.true_cf
    true_std = metrics.true_std
    std_bias = metrics.std_bias

    all = pl.np.core.records.fromarrays([models, cause[0], time[0], true_cf, true_std, std_bias, mean_abs_err, median_abs_err, mean_rel_err, median_rel_err, 
                                         mean_csmf_accuracy, median_csmf_accuracy, mean_coverage_bycause, mean_coverage, percent_total_coverage], 
                                        names=['model', 'cause', 'time', 'true_cf', 'true_std', 'std_bias', 'mean_abs_err', 'median_abs_err', 
                                         'mean_rel_err', 'median_rel_err', 'mean_csmf_accuracy', 'median_csmf_accuracy', 
                                         'mean_covearge_bycause', 'mean_coverage', 'percent_total_coverage'])   
    
    if save: 
        pl.rec2csv(all, '%s/%s_summary.csv' % (dir, model)) 
    else: 
        return all
Example #50
0
File: misc.py Project: MMaus/mutils
def oneD_twoD(data1D, nps):
    """
    transforms the 1D-format used here into the 2D-format
    1D-format: 
        a single row represents one stride; 
        the first (nps) frames represent coordinate 1, the second (nps)
        frames represent coordinate 2, ...
    2D-format: 
        a single row represents one coordinate.
        The k-th stride is represented by the subsection [:,k*(nps):(k+1)*nps]    
    """
    ncoords = data1D.shape[1] / nps
    data2D = vstack(
        [hstack(data1D[:, nps * x:nps * (x + 1)]) for x in range(ncoords)])
    return data2D
Example #51
0
def my_stats(node):
    """ Convenience function to generate a stats dict even if the pymc.Node has no trace
    
    :Parameters:
      - `node` : pymc.PyMCObjects.Deterministic

    :Results:
      - dictionary of statistics

    """
    try:
        return node.stats()
    except AttributeError:
        return {'mean': node.value,
                '95% HPD interval': pl.vstack((node.value, node.value)).T}
Example #52
0
def my_stats(node):
    """ Convenience function to generate a stats dict even if the pymc.Node has no trace
    
    :Parameters:
      - `node` : pymc.PyMCObjects.Deterministic

    :Results:
      - dictionary of statistics

    """
    try:
        return node.stats()
    except AttributeError:
        return {'mean': node.value,
                '95% HPD interval': pl.vstack((node.value, node.value)).T}
Example #53
0
def RecalcPhase(data):    
    """
    this function re-computes the phase of a given artificial Floquet system
    analog to the way the phases are computed in the original systen.
    
    Data must be given in a D x N-format, 
        D: number of Dimensions, 
        N: number of samples
    """
    
    p1 = -1.*data[0,:]              # R_Kne_y - L_kne_y
    p2 = data[5,:] - data[2,:]      # R_Trc_y - R_Anl_y
    p3 = data[6,:] - data[1,:]      # L_trc_y - L_Anl_y
    p4 = data[2,:] - data[1,:] - data[0,:]   # R_anl_y - L_anl_y   
    
    allPhrIn_TDR = [vstack((p1,p2,p3,p4)),]
    psec_TDR = [p4.copy(),]    
    phrIn = vstack((p1,p2,p3,p4))
    psecIn = p4.copy()  

    print 'building phaser ...\n'
    Phaser = phaser2.Phaser2(y = allPhrIn_TDR, psecData = psec_TDR)
    print 'computing phases ...\n'
    return Phaser.phaserEval(phrIn,psecIn).squeeze()
Example #54
0
File: io.py Project: MMaus/mutils
def read_kistler(fname):
    """
    reads the output text files from Kistler force plate data.

    :args:
        fname (str): file name of the data file (typically .txt export from
            Kistler bioware)

    :returns:
        dat (dict): dictionary containing the data ("Matlab(r)"-workspace)
    """
    data = []
    fieldnames = []

    desc_found = False
    n_past_desc = 0
    with open(fname) as f:
        for line in f:
            if desc_found:
                n_past_desc += 1
            elif 'description' in line.lower():
                desc_found = True

            if n_past_desc == 1:
                fieldnames = [fn.strip() for fn in line.split('\t')]
                if 'time' in fieldnames[0].lower(): # remove invalide name
                    fieldnames[0] = 'time'

            elif n_past_desc == 2:
                units = line.split('\t') # actually - this is ignored
            elif n_past_desc > 2:
                try:
                    numbers =[float(elem.replace(',','.')) for elem in
                        line.split('\t')]
                    data.append(numbers)
                except ValueError:
                    pass
    
    if not desc_found:
        raise ValueError('Line with "Description" not found - does not appear'
        ' to be a valid file!')

    data = vstack(data)
    d = {}
    for nr, fn in enumerate(fieldnames):
        d[fn] = data[:, nr]

    return d
Example #55
0
def main():
    pH, pMg, I, T = (7.0, 3, 0.1, 298.15)

    db = SqliteDatabase('../res/gibbs.sqlite')
    kegg = Kegg.getInstance()
    alberty = PsuedoisomerTableThermodynamics(
        '../data/thermodynamics/alberty_pseudoisomers.csv')

    cids = alberty.get_all_cids()
    dG0_f = pylab.zeros((len(cids), 1))

    for i, cid in enumerate(cids):
        dG0_f[i, 0] = alberty.cid2dG0_tag(cid, pH=pH, pMg=pMg, I=I, T=T)

    S = pylab.zeros((0, len(cids)))
    rids = []
    ec_numbers = []

    for rid in kegg.get_all_rids():
        sparse = kegg.rid2sparse_reaction(rid)
        if not set(cids).issuperset(sparse.keys()):
            continue

        rids.append(rid)
        ec_numbers.append(kegg.rid2ec_list(rid))
        S_row = pylab.zeros((1, len(cids)))
        for cid, coeff in sparse.iteritems():
            S_row[0, cids.index(cid)] = coeff
        S = pylab.vstack([S, S_row])

    dG0_r = pylab.dot(S, dG0_f)

    util._mkdir('../res/arren')
    s_writer = csv.writer(open('../res/arren/stoichiomety.csv', 'w'))
    r_writer = csv.writer(open('../res/arren/reactions.csv', 'w'))
    e_writer = csv.writer(open('../res/arren/ec_numbers.csv', 'w'))
    r_writer.writerow(['rid', 'dG0_r'])
    e_writer.writerow(['rid', 'ec0', 'ec1', 'ec2', 'ec3'])
    for i in xrange(S.shape[0]):
        s_writer.writerow(["%d" % x for x in S[i, :]])
        for ec in ec_numbers[i].split(';'):
            e_writer.writerow(['%d' % rids[i]] + ec.split('.'))
        r_writer.writerow(["%d" % rids[i], '%.1f' % dG0_r[i, 0]])

    c_writer = csv.writer(open('../res/arren/compounds.csv', 'w'))
    c_writer.writerow(['cid', 'dG0_f'])
    for j in xrange(len(cids)):
        c_writer.writerow(['%d' % cids[j], '%.1f' % dG0_f[j, 0]])
Example #56
0
def plot_color_gradients(cmap_category, cmap_list):
    global ncolums, number, DBtable
    axes[0][ncolums].set_title(cmap_category + ' Monitoring', fontsize=14)
    for ax, name in zip(axes, cmap_list):
        gradient_vstack = pylab.vstack((value[number], value[number]))
        number+=1
        ax[ncolums].imshow(gradient_vstack, aspect='auto', cmap='Blues')
        pos = list(ax[ncolums].get_position().bounds)
        x_text = pos[0] - 0.01
        y_text = pos[1] + pos[3]/2.
        fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
   
        # Turn off *all* ticks & spines, not just the ones with colormaps.
        for ax in axes:
            ax[ncolums].set_axis_off()
    ncolums=ncolums+1
Example #57
0
def main():
    pH, pMg, I, T = (7.0, 3, 0.1, 298.15)
    
    db = SqliteDatabase('../res/gibbs.sqlite')
    kegg = Kegg.getInstance()
    alberty = PsuedoisomerTableThermodynamics('../data/thermodynamics/alberty_pseudoisomers.csv')
    
    cids = alberty.get_all_cids()
    dG0_f = pylab.zeros((len(cids), 1))

    for i, cid in enumerate(cids):
        dG0_f[i, 0] = alberty.cid2dG0_tag(cid, pH=pH, pMg=pMg, I=I, T=T)
    
    S = pylab.zeros((0, len(cids)))
    rids = []
    ec_numbers = []
    
    for rid in kegg.get_all_rids():
        sparse = kegg.rid2sparse_reaction(rid)
        if not set(cids).issuperset(sparse.keys()):
            continue
        
        rids.append(rid)
        ec_numbers.append(kegg.rid2ec_list(rid))
        S_row = pylab.zeros((1, len(cids)))
        for cid, coeff in sparse.iteritems():
            S_row[0, cids.index(cid)] = coeff
        S = pylab.vstack([S, S_row])
    
    dG0_r = pylab.dot(S, dG0_f)

    util._mkdir('../res/arren')
    s_writer = csv.writer(open('../res/arren/stoichiomety.csv', 'w'))
    r_writer = csv.writer(open('../res/arren/reactions.csv', 'w'))
    e_writer = csv.writer(open('../res/arren/ec_numbers.csv', 'w'))
    r_writer.writerow(['rid', 'dG0_r'])
    e_writer.writerow(['rid', 'ec0', 'ec1', 'ec2', 'ec3'])
    for i in xrange(S.shape[0]):
        s_writer.writerow(["%d" % x for x in S[i,:]])
        for ec in ec_numbers[i].split(';'):
            e_writer.writerow(['%d' % rids[i]] + ec.split('.'))
        r_writer.writerow(["%d" % rids[i], '%.1f' % dG0_r[i,0]])
    
    c_writer = csv.writer(open('../res/arren/compounds.csv', 'w'))
    c_writer.writerow(['cid', 'dG0_f'])
    for j in xrange(len(cids)):
        c_writer.writerow(['%d' % cids[j], '%.1f' % dG0_f[j, 0]])