def reduceDim(fullmat,n=1): """ reduces the dimension of a d x d - matrix to a (d-n)x(d-n) matrix, keeping the largest eigenvalues unchanged. """ u,s,v = svd(fullmat) return dot(u[:-n,:-n],dot(diag(s[:-n]),v[:-n,:-n]))
def createSimilarAR(data): """ creates an AR-process that is similar to a given data set. data must be given in n x d-format """ # step 1: get "average" fit matrix l_A = [] for rep in arange(100): idx = randint(0,data.shape[0]-1,data.shape[0]-1) idat = data[idx,:] odat = data[idx+1,:] l_A.append(lstsq(idat,odat)[0]) sysmat = meanMat(l_A).T # idea: get "dynamic noise" from input data as difference of # expected vs. predicted data: # eta_i = (sysmat*(data[:,i-1]).T - data[:,i]) # however, in order to destroy any possible correlations in the # input noise (they would also occur in the output), the # noise per section has to be permuted. prediction = dot(sysmat,data[:-1,:].T) dynNoise = data[1:,:].T - prediction res = [zeros((dynNoise.shape[0],1)), ] for nidx in permutation(dynNoise.shape[1]): res.append( dot(sysmat,res[-1]) + dynNoise[:,nidx][:,newaxis] ) return hstack(res).T
def varRed(idat,odat,A,bootstrap = None): """ computed the variance reduction when using A*idat[:,x].T as predictor for odat[:,x].T if bootstrap is an integer > 1, a bootstrap with the given number of iterations will be performed. returns tVred, sVred: the total relative variance after prediction (all coordinates) and the variance reduction for each coordinate separately. These data are scalar and array or lists of scalars and arrays when a bootstrap is performed. Note: in the bootstrapped results, the first element refers to the "full" data variance reduction. """ nBoot = bootstrap if type(bootstrap) is int else 0 if nBoot < 2: nBoot = 0 odat_pred = dot(A,idat.T) rdiff = odat_pred - odat.T # remaining difference rvar = var(rdiff,axis=1)/var(odat.T,axis=1) # relative variance trvar = var(rdiff.flat)/var(odat.T.flat) # total relative variance if nBoot > 0: rvar = [rvar,] trvar = [trvar,] for rep in range(nBoot-1): indices = randint(0,odat.T.shape[1],odat.T.shape[1]) odat_pred = dot(A,idat[indices,:].T) rdiff = odat_pred - odat[indices,:].T # remaining difference rvar.append( var(rdiff,axis=1)/var(odat.T,axis=1) ) # relative variance trvar.append (var(rdiff.flat)/var(odat.T.flat) ) # total relative variance return trvar, rvar
def Global_Stiffness(self): ''' Generates Global Stiffness Matrix for the plane structure ''' elem = self.element; B = py.zeros((6,6)) for i in range (0,py.size(elem,0)): #for each element find the stifness matrix K = py.zeros((self.n_nodes*2,self.n_nodes*2)) el = elem[i] #nodes formatted for input [node1, node2, node3] = el; node1x = 2*(node1-1);node2x = 2*(node2-1);node3x = 2*(node3-1); node1y = 2*(node1-1)+1;node2y = 2*(node2-1)+1;node3y = 2*(node3-1)+1; #Area, Strain Matrix and E Matrix multiplied to get element stiffness [J,B] = self.B(el) local_k =0.5*abs(J)*py.dot(py.transpose(B),py.dot(self.E_matrix,B)) if self.debug: print 'K for elem', el, '\n', local_k #Element K-Matrix converted into Global K-Matrix format K[py.ix_([node1x,node1y,node2x,node2y,node3x,node3y],[node1x,node1y,node2x,node2y,node3x,node3y])] = K[py.ix_([node1x,node1y,node2x,node2y,node3x,node3y],[node1x,node1y,node2x,node2y,node3x,node3y])]+local_k #Adding contibution into Global Stiffness self.k_global = self.k_global + K if self.debug: print 'Global Stiffness','\n', self.k_global
def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01, nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): """Compute Spectral Subband Centroid features from an audio signal. :param signal: the audio signal from which to compute features. Should be an N*1 array :param samplerate: the samplerate of the signal we are working with. :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds) :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds) :param nfilt: the number of filters in the filterbank, default 26. :param nfft: the FFT size. Default is 512. :param lowfreq: lowest band edge of mel filters. In Hz, default is 0. :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2 :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97. :returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. """ highfreq= highfreq or samplerate/2 signal = sigproc.preemphasis(signal,preemph) frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate) pspec = sigproc.powspec(frames,nfft) pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq) feat = pylab.dot(pspec,fb.T) # compute the filterbank energies R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1)) return pylab.dot(pspec*R,fb.T) / feat
def Strain_stress(self): ''' Strain is obtained by Strain_Matrix (B)X Displacement (u). Stress is Stiffess_Matrix(E) x Strain. Von mises stress is also computed to study convergence. ''' # copy displacement vector after reshaping with two columns for x and y d = self.displacement.reshape(self.n_nodes,2) #stress-strain calculation for each element for i in range(self.n_el): el = self.element[i] # present element #Displacement formatted for an element disp=py.array([d[el[0]][0],d[el[0]][1],d[el[1]][0],d[el[1]][1],d[el[2]][0],d[el[2]][1]]) #Element Strain vector = Product of Strain Matrix and Displacement [J,B] = self.B(el) strain = py.dot(B,disp.T) self.strain_vector[i] = strain #Element Stress vector = Product of Element K-Matrix and Strain Vector stress = py.dot(self.E_matrix,strain) self.stress_vector[i] = stress #von-mises stress for plotting self.von[i] = py.math.sqrt(0.5*((stress[0]-stress[1])**2 + stress[0]**2 + stress[1]**2 + 6*(stress[2])**2))
def Q_calc(self,X): """ calculates Q (n_x by n_theta) matrix of the IDE model at each time step Arguments ---------- X: list of ndarray state vectors Returns --------- Q : list of ndarray (n_x by n_theta) """ Q=[] T=len(X) Psi=self.model.Gamma_inv_psi_conv_Phi Psi_T=pb.transpose(self.model.Gamma_inv_psi_conv_Phi,(0,2,1)) for t in range(T): firing_rate_temp=pb.dot(X[t].T,self.model.Phi_values) firing_rate=self.model.act_fun.fmax/(1.+pb.exp(self.model.act_fun.varsigma*(self.model.act_fun.v0-firing_rate_temp))) #calculate q g=pb.dot(firing_rate,Psi_T) g *=(self.model.spacestep**2) q=self.model.Ts*g q=q.reshape(self.model.nx,self.model.n_theta) Q.append(q) return Q
def renormalize(x_unpurt,x_before,x_purt,epsilon,N): # BEFORE ANYTHING: make sure particles near boundaries are shuffeled into places where where the # seam is not between any purturbed and fudicial trajectories. x_unpurt,x_purt = shuff(x_unpurt,x_purt,N) # The trajectory we are going to be returning is going to be the new one for the next run. lets # call it x_new = pl.copy(x_unpurt) # copied it because we are going to add the small amounts to it to purturb it. # lets find a vector pointing in the direction of the trajectories path. For this we need the # fiducual point at t-dt, which is given to us in the function as x_before. find the vector # between x_before and x_unpurt traj_vec = x_unpurt-x_before # normalize it traj_vec = traj_vec/pl.sqrt(pl.dot(traj_vec,traj_vec)) print('traj_vec magnitude (should be 1): ' + str(pl.sqrt(pl.dot(traj_vec,traj_vec)))) # Now lets see how close the vector pointing from the fidicial to the perturbed trajectorie is # to orthogonal with the trajectory... should get closer to 1 as we check more because it should # be aligning itself with the axis of greatest expansion and that should be orthogonal. # First normalize the difference vector diff_vec = x_unpurt - x_purt # normalize it diff_vec = diff_vec/pl.sqrt(pl.dot(diff_vec,diff_vec)) print('diff_vec magnitude (should be 1): ' + str(pl.sqrt(pl.dot(diff_vec,diff_vec)))) print('normalized(x_unpurt-x_purt)dot(traj_vec) (should get close to 0): '+ str(pl.dot(diff_vec,traj_vec))) # for now lets just return a point moved back along the difference vector. no gram shmidt or # anything. return x_new + epsilon*diff_vec
def fBM_nd(dims, H, return_mat = False, use_eig_ev = True): """ creates fractional Brownian motion parameters: dims is a tuple of the shape of the sample path (nxd); H: Hurst exponent this is the slow version of fBM. It might, however, be more precise than fBM, however - sometimes, the matrix square root has a problem, which might induce inaccuracy use_eig_ev: use eigenvalue decomposition for matrix square root computation (faster) """ n = dims[0] d = dims[1] Gamma = zeros((n,n)) print ('building ...\n') for t in arange(n): for s in arange(n): Gamma[t,s] = .5*((s+1)**(2.*H) + (t+1)**(2.*H) - abs(t-s)**(2.*H)) print('rooting ...\n') if use_eig_ev: ev,ew = eig(Gamma.real) Sigma = dot(ew, dot(diag(sqrt(ev)),ew.T) ) else: Sigma = sqrtm(Gamma) if return_mat: return Sigma v = randn(n,d) return dot(Sigma,v)
def getApices(y): """ returns the time (in frames) and position of initial and final apex height from a given trajectory y, which are obtained by fitting a cubic spline ========== parameter: ========== y : *array* (1D) the trajectory. Should ideally start ~1 frame before an apex and end ~1 frame behind an apex ======== returns: ======== [x0, xF], [y0, yF] : location (in frames) and value of the first and final apices """ # the math behind here is: fitting a 2nd order polynomial and finding # the root of its derivative. Here, only the results are applied, that's # why it appears like "magic" numbers c = dot(array([[.5, -1, .5], [-1.5, 2., -.5], [1., 0., 0.]]), y[:3]) x0 = -1. * c[1] / (2. * c[0]) y0 = polyval(c, x0) c = dot(array([[.5, -1, .5], [-1.5, 2., -.5], [1., 0., 0.]]), y[-3:]) xF = -1. * c[1] / (2. * c[0]) yF = polyval(c, xF) xF += len(y) - 3 return [x0, xF], [y0, yF]
def rotate_molecule(coords, rotp = m.array((0.,0.,0.)), phi = 0., \ theta = 0., psi = 0.): """Rotate a molecule via Euler angles. See http://mathworld.wolfram.com/EulerAngles.html for definition. Input arguments: coords: Atom coordinates, as Nx3 2d pylab array. rotp: The point to rotate about, as a 1d 3-element pylab array phi: The 1st rotation angle around z axis. theta: Rotation around x axis. psi: 2nd rotation around z axis. """ # First move the molecule to the origin # In contrast to MATLAB, numpy broadcasts the smaller array to the larger # row-wise, so there is no need to play with the Kronecker product. rcoords = coords - rotp # First Euler rotation about z in matrix form D = m.array(((m.cos(phi), m.sin(phi), 0.), (-m.sin(phi), m.cos(phi), 0.), \ (0., 0., 1.))) # Second Euler rotation about x: C = m.array(((1., 0., 0.), (0., m.cos(theta), m.sin(theta)), \ (0., -m.sin(theta), m.cos(theta)))) # Third Euler rotation, 2nd rotation about z: B = m.array(((m.cos(psi), m.sin(psi), 0.), (-m.sin(psi), m.cos(psi), 0.), \ (0., 0., 1.))) # Total Euler rotation A = m.dot(B, m.dot(C, D)) # Do the rotation rcoords = m.dot(A, m.transpose(rcoords)) # Move back to the rotation point return m.transpose(rcoords) + rotp
def main(): mu = pl.array([[0], [12], [24], [36]]) Sigma = pl.array([[3.01602775, 1.02746769, -3.60224613, -2.08792829], [1.02746769, 5.65146472, -3.98616664, 0.48723704], [-3.60224613, -3.98616664, 13.04508284, -1.59255406], [-2.08792829, 0.48723704, -1.59255406, 8.28742469]]) # The data matrix is created for above mu and Sigma. d, U = pl.eig(Sigma) L = pl.diagflat(d) A = pl.dot(U, pl.sqrt(L)) X = pl.randn(4, 1000) # Y is the data matrix of random samples. Y = pl.dot(A, X) + pl.tile(mu, 1000) pl.figure(1) pl.clf() pl.plot(X[0], Y[1], '+', color='#0000FF', label='i=0,j=1') pl.plot(X[0], Y[2], '+', color='#FF0000', label='i=0,j=2') pl.plot(X[0], Y[3], '+', color='#00FF00', label='i=0,j=3') pl.plot(X[1], Y[0], 'x', color='#FFFF00', label='i=1,j=0') pl.plot(X[1], Y[2], 'x', color='#00FFFF', label='i=1,j=2') pl.plot(X[1], Y[3], 'x', color='#444444', label='i=1,j=3') pl.plot(X[2], Y[0], '.', color='#774411', label='i=2,j=0') pl.plot(X[2], Y[1], '.', color='#222222', label='i=2,j=1') pl.plot(X[2], Y[3], '.', color='#AAAAAA', label='i=2,j=3') pl.plot(X[3], Y[0], '+', color='#FFAA22', label='i=3,j=0') pl.plot(X[3], Y[1], '+', color='#22AAFF', label='i=3,j=1') pl.plot(X[3], Y[2], '+', color='#FFDD00', label='i=3,j=2') pl.legend() pl.savefig('fig21.png')
def estimate_kernel(self, X, P, M): """estimate the ide model's kernel weights from data stored in the ide object""" # form Xi variables Xi_0 = pb.zeros([self.model.nx, self.model.nx]) Xi_1 = pb.zeros([self.model.nx, self.model.nx]) for t in range(1, len(X)): Xi_0 += pb.dot(X[t - 1, :].reshape(self.model.nx, 1), X[t, :].reshape(self.model.nx, 1).T) + M[ t, : ].reshape(self.model.nx, self.model.nx) Xi_1 += pb.dot(X[t - 1, :].reshape(self.model.nx, 1), X[t - 1, :].reshape(self.model.nx, 1).T) + P[ t - 1, : ].reshape(self.model.nx, self.model.nx) # form Upsilon and upsilons Upsilon = pb.zeros([self.model.ntheta, self.model.ntheta]) upsilon0 = pb.zeros([1, self.model.ntheta]) upsilon1 = pb.zeros([1, self.model.ntheta]) for i in range(self.model.nx): for j in range(self.model.nx): Upsilon += Xi_1[i, j] * self.model.Delta_Upsilon[j, i] upsilon0 += Xi_0[i, j] * self.model.Delta_upsilon[j, i] upsilon1 += Xi_1[i, j] * self.model.Delta_upsilon[j, i] upsilon1 = upsilon1 * self.model.xi Upsilon = Upsilon * self.model.Ts * self.model.varsigma weights = pb.dot(pb.inv(Upsilon.T), upsilon0.T - upsilon1.T) return weights
def plotEnsemble2D(ens,v1,v2,colordata=None,hess=None,\ size=50,labelBest=True,ensembleAlpha=0.75,contourAlpha=1.0): """ Plots a 2-dimensional projection of a given parameter ensemble, along given directions: -- If v1 and v2 are scalars, project onto plane given by those two bare parameter directions. -- If v1 and v2 are vectors, project onto those two vectors. When given colordata (either a single color, or an array of different colors the length of ensemble size), each point will be assigned a color based on the colordata. With labelBest set, the first point in the ensemble is plotted larger (to show the 'best fit' point for a usual parameter ensemble). If a Hessian is given, cost contours will be plotted using plotContours2D. """ if pylab.shape(v1) is (): xdata = pylab.transpose(ens)[v1] ydata = pylab.transpose(ens)[v2] # label axes param1name, param2name = '','' try: paramLabels = ens[0].keys() except: paramLabels = None if paramLabels is not None: param1name = ' ('+paramLabels[param1]+')' param2name = ' ('+paramLabels[param2]+')' pylab.xlabel('Parameter '+str(v1)+param1name) pylab.ylabel('Parameter '+str(v2)+param2name) else: xdata = pylab.dot(ens,v1) ydata = pylab.dot(ens,v2) if colordata==None: colordata = pylab.ones(len(xdata)) if labelBest: # plot first as larger circle if pylab.shape(colordata) is (): # single color colordata0 = colordata colordataRest = colordata else: # specified colors colordata0 = [colordata[0]] colordataRest = colordata[1:] scatterColors(xdata[1:],ydata[1:],colordataRest, \ size,alpha=ensembleAlpha) scatterColors([xdata[0]],[ydata[0]],colordata0, \ size*4,alpha=ensembleAlpha) else: scatterColors(xdata,ydata,colordata,size,alpha=ensembleAlpha) if hess is not None: plotApproxContours2D(hess,param1,param2,pylab.array(ens[0]), \ alpha=contourAlpha)
def brute_force_Z_vis_gauss(W): v,h = W.v, W.h Z = zeros(2**h) for i in xrange(2**h): H = int_to_bin(i, h) b = W.T() * H Z[i] = .5 * dot(b,b.T) + dot(H,W[2]) return log_sum_exp(Z)
def simCSLIP_xp(x0, x0R, x0L, p0R, p0L, AR, AL, SLIP_param0, n=50): """ simulates the controlled 2step-SLIP, using [x,p]-referenced control input: x0 - initial (augmented) state, e.g. [x0L, p0R].T x0R - reference right apex (y, vx, vz) x0L - reference left apex -"- p0R - reference right parameters p0L - reference left parameters AR - parameter control right leg AL - parameter control left leg SLIP_param0: dict, containing {'m': ..., 'g': ... } n - number of strides to simulate at most """ res = [] refStateL = hstack([x0L, squeeze(sp_d2a(p0R))])[:,newaxis] refStateR = hstack([x0R, squeeze(sp_d2a(p0L))])[:,newaxis] currState = array(x0) slip_params = copy.deepcopy(SLIP_param0) if currState.ndim == 1: currState = currState[:,newaxis] elif currState.shape[0] == 1: currState = currState.T for step in range(n): #print 'AL: ', AL.shape, 'p0L: ', sp_d2a(p0L).shape pL = sp_d2a(p0L) + dot(AL, currState - refStateL) #print 'pL changed:', not allclose(pL,sp_d2a(p0L)) slip_params.update(sp_a2d(pL)) try: resL = sl.SLIP_step3D(currState[:3,0], slip_params) except ValueError: print 'simulation aborted (l1)\n' break if resL['sim_fail']: print 'simulation aborted (l2)\n' break res.append(resL) currState = hstack([resL['y'][-1], resL['vx'][-1], resL['vz'][-1], squeeze(pL)])[:,newaxis] pR = sp_d2a(p0R) + dot(AR, currState - refStateR) #print 'pR changed:', not allclose(pR,sp_d2a(p0R)) slip_params.update(sp_a2d(pR)) try: resR = sl.SLIP_step3D(currState[:3,0], slip_params) except ValueError: print 'simulation aborted (r1)\n' break if resR['sim_fail']: print 'simulation aborted (r2)\n' break res.append(resR) currState = hstack([resR['y'][-1], resR['vx'][-1], resR['vz'][-1], squeeze(pR)])[:,newaxis] return res
def state_equation(self,x): '''state equation for sigma points propogation ''' firing_rate_temp=pb.dot(x.T,self.Phi_values) firing_rate=self.act_fun.fmax/(1.+pb.exp(self.act_fun.varsigma*(self.act_fun.v0-firing_rate_temp))).T g=pb.dot(self.Gamma_inv_Psi_conv_Phi,firing_rate) g *=(self.spacestep**2) x=self.Ts*g+self.xi*x return x
def JointValuesFromTransform(robot, Tdesired): """ Find the dummy joint values such that the Transform of the baselink is Tdesired. T0 is the initial Transform of the baselink. """ R = dot(inv(robot.baselinkinittransform[0:3, 0:3]), Tdesired[0:3, 0:3]) [h1, h2, h3] = AnglesFromRot(R) offset = dot(R, robot.baselinkinittransform[0:3, 3]) [s1, s2, s3] = Tdesired[0:3, 3] - offset return [s1, s2, s3, h1, h2, h3]
def _get_eff(window_positions,window_steps,window_length): s2 = 0 dispVec = window_steps[window_length-1,:] - window_steps[0,:] disp = pl.dot(dispVec,dispVec) for i in range(window_length-1): s2 += pl.dot(window_steps[i,:],window_steps[i,:]) efficiency = disp/(window_length*s2+epsilon) return efficiency
def find_factors(idat, odat, k = None): """ A routine to compute the main predictors (linear combinations of coordinates) in idat to predict odat. *Parameters* idat: d x n data matrix, with n measurements, each of dimension d odat: q x n data matrix with n measurements, each of dimension q *Returns* **Depending on whether or not** *k* **is provided, the returned value is different** * if k is given, compute the first k regressors and return an orthogonal matrix that contains the regressors in its colums, i.e. reg[0,:] is the first regressor * if k is not given or None, return a d-dimensional vector v(k) explaining which fraction of the total predictable variance can be explained using only k regressors. **NOTE** #. idat and odat must have zero mean #. To interpret the regressors, it is advisable to have the for columns of idat having the same variance """ # transform into z-scores u, s, v = svd(idat, full_matrices = False) su = dot(diag(1./s), u.T) z = dot(su,idat) # ! Note that the covariance of z is *NOT* 1, but 1/n; z*z.T = 1 ! # least-squares regression: A = dot(odat, pinv(z)) uA, sigma_A, vA = svd(A, full_matrices = False) if k is None: vk = cumsum(sigma_A**2) / sum(sigma_A**2) return vk else: # choose k predictors sigma_A1 = sigma_A.copy() sigma_A1[k:] = 0 A1 = reduce(dot, [uA, diag(sigma_A1), vA]) B = dot(A1, su) uB, sigma_B, vB = svd(B, full_matrices = False) regs = vB[:k,:].T return regs
def Strain_stress(self,el): #Displacement formatted for an element d=py.array([self.displacement[el[0]-1][0],self.displacement[el[0]-1][1],self.displacement[el[1]-1][0],self.displacement[el[1]-1][1],self.displacement[el[2]-1][0],self.displacement[el[2]-1][1]]) #Element Strain vector = Product of Strain Matrix and Displacement [J,B] = self.B(el) strain = py.dot(B,d.T) #Element Stress vector = Product of Element K-Matrix and Strain Vector stress = py.dot(self.E_matrix,strain) return strain, stress
def new_bad_model(F): """ Results in a matrix with shape matching X, but all rows sum to 1""" N, T, J = F.shape pi = pl.zeros_like(F) for t in range(T): u = F[:,t,:].var(axis=0) u /= pl.sqrt(pl.dot(u,u)) F_t_par = pl.dot(pl.atleast_2d(pl.dot(F[:,t,:], u)).T, pl.atleast_2d(u)) F_t_perp = F[:,t,:] - F_t_par for n in range(N): alpha = (1 - F_t_perp[n].sum()) / F_t_par[n].sum() pi[n,t,:] = F_t_perp[n,:] + alpha*F_t_par[n,:] return pi
def __init__(self,x,y,cov,deg): self.deg = deg self.x = x self.y = y self.cov = cov self.icov = pl.linalg.inv(cov) self.M = pl.zeros([deg+1,len(x)]) for i in range(deg+1): self.M[i,:]=x**(deg-i) self.MC = pl.dot(self.M,self.icov) self.MCM = pl.dot(self.M,pl.transpose(self.MC)) self.fit()
def pc_pm_std(data, ndim): """ This is a helper function. It returns the value of +1 * std(x), where x is the ndim-th principal component of the data Parameters: ----------- data: `array` (*n*-by-*d*) the data on which the principal component analysis is performed. ndim: `integer` the number of the principal axis on which the analysis is performed. **NOTE** this is zero-based, i.e. to compute the first principal component, ndim=0 Returns: -------- std_pc: `array` (1-by-*d*) the vector that points in the direction of the *ndim*th principal axis, and has the length of the standard deviation of the scores along this axis. """ u,s,v = svd(data.T, full_matrices = False) direction = u[:, ndim : ndim + 1] scale = std(dot(direction.T, data.T)) return scale * direction.T
def cov_ar(A, n=200): """ returns the covariance matrix of an AR(1)-process of the following form: x_(n+1) = A * x_n + eta, where eta is iid noise. The result can be computed by sum_k=0^infinity [A^k A.T^k] ========== Parameter: ========== A : *array* the system matrix of the AR(1)-process n : *integer* number of matrix powers to compute before convergence is assumed. ======== Returns: ======== P : *array* the positive-semidefinite symmetric covariance matrix of the process output """ d = ([dot(mpow(A, n), mpow(A.T, n)) for n in arange(190)]) return reduce(lambda x, y: x + y, d)
def wallCalc(self,wall,container,particle): distFromWall, xWall, yWall = wall.wallDist(container.xpos[particle],container.ypos[particle]) wallAclX = 0 wallAclY = 0 #debug_here() if distFromWall <= (2**(1/6)): if (container.ypos[particle] >= wall.endpointsY[0] and container.ypos[particle] <= wall.endpointsY[1]) or wall.isHor: # K1 = (self.sigma/distFromWall)**12 # K2 = (self.sigma/distFromWall)**6 # K3 = 2*K1 - K2 # wallMag = 24*(self.epsilon/distFromWall)*K3 wallMag = self.forceCalc(distFromWall) displacement = array([xWall,yWall]) unitVector = displacement/(displacement**2) nans = isnan(unitVector) unitVector[nans] = 0. dotProduct = dot(displacement, (container.xvel[particle],container.yvel[particle])) #debug_here() forceVector = -1*self.gamma*dotProduct*unitVector wallAclX = forceVector[0] wallAclX += wallMag*(xWall/distFromWall) wallAclY = forceVector[1] wallAclY += wallMag*(yWall/distFromWall) # xacl[particle] += wallAclX # # yacl[particle] += wallAclY return wallAclX, wallAclY
def find_convex_hull(X, num_iter, num_points=None): """ if num_points is set to None, find_convex_hull will return all the points in the convex hull (that have been found) sorted according to their sharpness. Otherwise, it will return the N-sharpest points. """ (N, D) = X.shape if (num_points == None): num_points = N # randomly choose 'num_iter' direction on the unit sphere. # find the maximal point in the chosen direction, and add 1 to its counter. # only points on the convex hull will be hit, and 'sharp' corners will # have more hits than 'smooth' corners. hits = p.zeros((N, 1)) for j in xrange(num_iter): a = p.randn(D) a = a / p.norm(a) i = p.dot(X, a).argmax() hits[i] += 1 # don't take points with 0 hits num_points = min(num_points, sum(p.find(hits))) # the indices of the n-best points o = list(p.argsort(hits, 0)[xrange(-1, -(num_points+1), -1)].flat) return X[o, :]
def test_covariate_model_sim_no_hierarchy(): # simulate normal data model = data.ModelData() model.hierarchy, model.output_template = data_simulation.small_output() X = mc.rnormal(0., 1.**2, size=(128,3)) beta_true = [-.1, .1, .2] Y_true = pl.dot(X, beta_true) pi_true = pl.exp(Y_true) sigma_true = .01*pl.ones_like(pi_true) p = mc.rnormal(pi_true, 1./sigma_true**2.) model.input_data = pandas.DataFrame(dict(value=p, x_0=X[:,0], x_1=X[:,1], x_2=X[:,2])) model.input_data['area'] = 'all' model.input_data['sex'] = 'total' model.input_data['year_start'] = 2000 model.input_data['year_end'] = 2000 # create model and priors vars = {} vars.update(covariate_model.mean_covariate_model('test', 1, model.input_data, {}, model, 'all', 'total', 'all')) vars.update(rate_model.normal_model('test', vars['pi'], 0., p, sigma_true)) # fit model m = mc.MCMC(vars) m.sample(2)
def obs_lb(value=value, N=N, Xa=Xa, Xb=Xb, alpha=alpha, beta=beta, gamma=gamma, bounds_func=vars['bounds_func'], delta=delta, age_indices=ai, age_weights=aw): # calculate study-specific rate function shifts = pl.exp(pl.dot(Xa, alpha) + pl.dot(Xb, pl.atleast_1d(beta))) exp_gamma = pl.exp(gamma) mu_i = [pl.dot(weights, bounds_func(s_i * exp_gamma[ages], ages)) for s_i, ages, weights in zip(shifts, age_indices, age_weights)] # TODO: try vectorizing this loop to increase speed rate_param = mu_i*N violated_bounds = pl.nonzero(rate_param < value) logp = mc.negative_binomial_like(value[violated_bounds], rate_param[violated_bounds], delta) return logp
def regressionPlotDescentBuiltin(X, Y, order, guess): pl.plot(X.T.tolist()[0],Y.T.tolist()[0], 'gs') # You will need to write the designMatrix and regressionFit function # constuct the design matrix (Bishop 3.16), the 0th column is just 1s. phi = designMatrix(X, order) # compute the weight vector wo = regressionFit(X, Y, phi) print 'optimal w', wo def f(w): #print np.matrix(w).T return SSE(X,Y,order,np.matrix(w).T) w = fmin_bfgs(f,guess); w = np.matrix(w).T print 'descent w', w print SSE(X,Y,order,w); #print SSEDer(X,Y,order,w); #print num_gradient(lambda w: SSE(X,Y,order,w),w,0.001); # produce a plot of the values of the function pts = [[p] for p in pl.linspace(min(X), max(X), 100)] A= np.matrix(pts) Yp = pl.dot(w.T, designMatrix(A, order).T) pl.plot(pts, Yp.tolist()[0])
def main(): pH, pMg, I, T = (7.0, 3, 0.1, 298.15) db = SqliteDatabase('../res/gibbs.sqlite') kegg = Kegg.getInstance() alberty = PsuedoisomerTableThermodynamics( '../data/thermodynamics/alberty_pseudoisomers.csv') cids = alberty.get_all_cids() dG0_f = pylab.zeros((len(cids), 1)) for i, cid in enumerate(cids): dG0_f[i, 0] = alberty.cid2dG0_tag(cid, pH=pH, pMg=pMg, I=I, T=T) S = pylab.zeros((0, len(cids))) rids = [] ec_numbers = [] for rid in kegg.get_all_rids(): sparse = kegg.rid2sparse_reaction(rid) if not set(cids).issuperset(sparse.keys()): continue rids.append(rid) ec_numbers.append(kegg.rid2ec_list(rid)) S_row = pylab.zeros((1, len(cids))) for cid, coeff in sparse.iteritems(): S_row[0, cids.index(cid)] = coeff S = pylab.vstack([S, S_row]) dG0_r = pylab.dot(S, dG0_f) util._mkdir('../res/arren') s_writer = csv.writer(open('../res/arren/stoichiomety.csv', 'w')) r_writer = csv.writer(open('../res/arren/reactions.csv', 'w')) e_writer = csv.writer(open('../res/arren/ec_numbers.csv', 'w')) r_writer.writerow(['rid', 'dG0_r']) e_writer.writerow(['rid', 'ec0', 'ec1', 'ec2', 'ec3']) for i in xrange(S.shape[0]): s_writer.writerow(["%d" % x for x in S[i, :]]) for ec in ec_numbers[i].split(';'): e_writer.writerow(['%d' % rids[i]] + ec.split('.')) r_writer.writerow(["%d" % rids[i], '%.1f' % dG0_r[i, 0]]) c_writer = csv.writer(open('../res/arren/compounds.csv', 'w')) c_writer.writerow(['cid', 'dG0_f']) for j in xrange(len(cids)): c_writer.writerow(['%d' % cids[j], '%.1f' % dG0_f[j, 0]])
def helecho(C, P, K=10000): """ Funcion helecho. Calcula un conjunto de puntos iterando K veces las funciones de la forma f(x,y) = Mx + B donde M es una matriz cuadrada, y x,B son vectores columna El parametro C es una lista de lista, donde cada elemento contiene las diferentes entradas de la matriz M y B. Los primeros 4 corresponden a las entradas de M y los ultimos dos a B. El parametro P=[p1,p2,p3,p4] es el conjunto de probabilidades con las que cada ecuacion es elegida de tal forma que [0,p1) U [p1,p2) U [p2,p3) U [p3,p4] = [0,1] """ x0, y0 = 0.5, 0. X = [x0] Y = [y0] p1, p2, p3, p4 = P C0, C1, C2, C3 = C for i in range(K): x = X[-1] y = Y[-1] Pa = array([[x], [y]]) rand = (random.uniform(0, 1)) if (rand < p1): a, b, c, d, e, f = C0 elif ((p1 <= rand) and (rand <= p2)): a, b, c, d, e, f = C1 elif ((p2 < rand) and (rand <= p3)): a, b, c, d, e, f = C2 elif ((p3 < rand) and (rand <= p4)): a, b, c, d, e, f = C3 M = array([[a, b], [c, d]]) B = array([[e], [f]]) Pn = dot(M, Pa) Pn = Pn + B x = Pn[0][0] y = Pn[1][0] X.append(x) Y.append(y) return array(X), array(Y)
def distance(p1, pm, p2): # returns distance of pm from line between p1 and p2 line = p2 - p1 linel = norm(line) vm = pm - p1 if linel == 0.0: return norm(vm), 0.5 linem = line / linel position = pl.dot(vm, linem) / linel if position < 0.0: return norm(vm) elif position > 1.0: return norm(pm - p2) else: return norm(vm - line * position)
def errEval( self, lpamat, Smat, Tmat): ''' This function ... Aguments -------- Keyword arguments ----------------- ''' lpamat_est = pl.dot(Smat, Tmat) lpamat_diff = lpamat - lpamat_est err = pl.mean(lpamat_diff**2)/pl.mean(lpamat**2) return err
def regressionPlot(X, Y, order): pl.plot(X.T.tolist()[0],Y.T.tolist()[0], 'gs') # You will need to write the designMatrix and regressionFit function # constuct the design matrix, the 0th column is just 1s. phi = designMatrix(X, order) # compute the weight vector w = regressionFit(Y, phi) print('w', w) print('SSE: ', SSE(Y, phi, w)) # produce a plot of the values of the function pts = [[p] for p in pl.linspace(min(X), max(X), 100)] Yp = pl.dot(w.T, designMatrix(pts, order).T) pl.plot(pts, Yp.tolist()[0])
def __init__(self, LatConst): self.a0 = array([0.5 * LatConst, 0.5 * LatConst, 0]) self.a1 = array([0.5 * LatConst, 0, 0.5 * LatConst]) self.a2 = array([0, 0.5 * LatConst, 0.5 * LatConst]) Vc = dot(cross(self.a0, self.a1), self.a2) # Volume self.Volume = abs(Vc) print "Volume is", self.Volume self.b0 = (2 * pi / Vc) * cross(self.a1, self.a2) self.b1 = (2 * pi / Vc) * cross(self.a2, self.a0) self.b2 = (2 * pi / Vc) * cross(self.a0, self.a1) # Special points in Brillouin zone brs = 2 * pi / LatConst self.GPoint = [0, 0, 0] self.LPoint = array([0.5, 0.5, 0.5]) * brs self.KPoint = array([0.75, 0.75, 0]) * brs self.XPoint = array([1.0, 0.0, 0]) * brs self.WPoint = array([1.0, 0.5, 0]) * brs
def lfpDecomp( self, L_params, rmat, kernel ): ''' This function ... Aguments -------- Keyword arguments ----------------- ''' exec('h_list = _'+kernel+'(L_params, self.dt)') Rmat = _createRmat(h_list, rmat, self.nstim, self.ntime) Lmat = pl.dot(self._lfpmat, pl.linalg.pinv(Rmat)) return Lmat, Rmat
def plot_nub(a, b, diff_degree=2., amp=1., scale=1.5, steps=100, **addl_plot_params): """ Plot a puzzle nub from a to b""" X, Y = models.gp_puzzle_nub(diff_degree, amp, scale, steps) xy = pl.array([X, Y]) a = pl.array(a) b = pl.array(b) r = pl.norm(b-a) cosT, sinT = (b-a)/r R = pl.array([[cosT, -sinT], [sinT, cosT]]) xy = a + r*pl.dot(R, xy).T X = xy[:, 0] Y = xy[:, 1] my_plot(X, Y, **addl_plot_params)
def regressionPlot(X, Y, order, designMatrix=designMatrix): assert (len(X) == len(Y)) pl.plot(X.T.tolist()[0], Y.T.tolist()[0], 'gs') # You will need to write the designMatrix and regressionFit function # constuct the design matrix (Bishop 3.16), the 0th column is just 1s. phi = designMatrix(X, order) # compute the weight vector w = regressionFit(X, Y, phi) # produce a plot of the values of the function pts = [[p] for p in pl.linspace(min(X), max(X), 100)] Yp = pl.dot(w.T, designMatrix(pts, order).T) pl.plot(pts, Yp.tolist()[0]) return w
def w2x(self, x, t): #============================== """Evaluate 2-wave component at (x,t)""" #Returns the 2-wave component magnitude #First determine if there is an interface involved imat = self.getmat(x) iint = self.whichint(x, t, -1) #Reverse 2-characteristics go to left if iint == -100: #No interface involved x0 = x - self.c[imat] * t w2val = pl.dot(self.l2[imat, :], self.q0(x0)) * self.z[imat] else: #Characteristic crosses interface iint if iint == -1: tnew = t - (x - self.xlower) / self.c[imat] else: tnew = t - (x - self.interface[iint]) / self.c[imat] w2val = self.w1int(iint, tnew) * self.CR_rl[iint] + self.w2int( iint, tnew) * self.CT_lr[iint] return w2val
def evaluate(X, Y, order, l, vdata): pl.plot(vdata[0].T.tolist()[0],vdata[1].T.tolist()[0], 'gs') # You will need to write the designMatrix and regressionFit function # constuct the design matrix, the 0th column is just 1s. phi = designMatrix(X, order) I = np.identity(order+1) # compute the weight vector w = ridgeRegressionFit(Y, phi, l, I) print('w', w) print('SSE: ', SSE(Y, phi, w)) # produce a plot of the values of the function pts = [[p] for p in pl.linspace(min(X), max(X), 100)] Yp = pl.dot(w.T, designMatrix(pts, order).T) pl.plot(pts, Yp.tolist()[0])
def _create_rmat( Mmat, muamat, muavar, rneg_factor): ''' This function ... Aguments -------- Keyword arguments ----------------- ''' # Estimate rmat based on pseudoinverse of Mmat rmat=pl.dot(pl.linalg.pinv(Mmat), muamat) # Rectification of rmat rmat[pl.find(rmat<-rneg_factor*pl.sqrt(muavar))]= \ -rneg_factor*pl.sqrt(muavar) return rmat
def ydot(t,y,p): """ a general example function to use with the ode5 :args: t (float): simulation time y (array or float): state of the system here: 1x2 matrix p (tuple or float): system parameter(s) here: float :returns: y' (array of float): the derivative of the system state y at time t """ sysmat = array([[0, 1.], [-2., p]]) return dot(sysmat, y)
def regressionPlot(X, Y, order): pl.plot(X.T.tolist()[0], Y.T.tolist()[0], 'gs') # You will need to write the designMatrix and regressionFit function # constuct the design matrix (Bishop 3.16), the 0th column is just 1s. phi = designMatrix(X, order) #print phi # compute the weight vector #w = ridgeRegression(X, Y, 1) #print w.shape w = regressionFit(X, Y, phi) w_o, w_ridge = ridgeRegression(X, Y, phi, LAMBDA) #Yp = w_o + sum(w_ridge*X) #print Yp.shape #print w.shape #print w_ridge.shape #print 'ridge_error', sum(sum(ridge_error(X,Y,w,LAMBDA))) #print w_o.shape #print y_pred.shape #print w_1.shape #pts =np.array([[p] for p in pl.linspace(min(X), max(X), 10)]) pts = np.array([[p] for p in pl.linspace(min(X), max(X), 100)]) #pts_10 = np.array([[p] for p in pl.linspace(min(X), max(X), 10)]) #print w_ridge.shape #y_pred = w_o[0] + sum(w_ridge[0]*pts_10) #print y_pred #print Y #print y_pred.shape #pl.plot(pts_10, y_pred.tolist()) #print 'pts', pts #Yp = w_o + sum(w_ridge*X) #print pts.shape #print Yp.shape Yp = pl.dot(w_ridge.T, designMatrix(pts, order).T) #print Yp.shape #EvaluateModel(X,w,order) #t_eval = EvaluateModel(, w, order) #theta = np.ones(X.shape) #print gradientDescent(X, Y, theta, sse_deriv,alpha, numIterations) #print 'sse', sse(t_test, Yp) pl.plot(pts, Yp.tolist()[0]) pl.show()
def ridgeRegression(X, Y, order, lam=0, designMatrix=designMatrix, verbose=True): phi = designMatrix(X, order) phiTphi = np.dot(phi.T, phi) w = np.dot( np.dot(np.linalg.inv(lam * np.identity(phiTphi.shape[0]) + phiTphi), phi.T), Y) if verbose: pl.plot(X.T.tolist()[0], Y.T.tolist()[0], 'gs') pts = [[p] for p in pl.linspace(min(X), max(X), 100)] Yp = pl.dot(w.T, designMatrix(pts, order).T) pl.plot(pts, Yp.tolist()[0]) return w
def _mfcc(self): """ :: DCT of the Log magnitude CQFT """ fp = self._check_feature_params() if not self._cqft(): return False self._make_dct() AA = P.log10(P.clip(self.CQFT,0.0001,self.CQFT.max())) self.MFCC = P.dot(self.DCT, AA) self._have_mfcc=True if self.verbosity: print "Extracted MFCC: lcoef=%d, ncoef=%d, intensified=%d" %(self.lcoef, self.ncoef, self.intensify) n=self.ncoef l=self.lcoef self.X=self.MFCC[l:l+n,:] return True
def check_convergence(vars): """ Apply a simple test of convergence to the model: compare autocorrelation at 25 lags to zero lags. warn about convergence if it exceeds 10% for any stoch """ import dismod_mr cells, stochs = dismod_mr.plot.tally_stochs(vars) for s in sorted(stochs, key=lambda s: s.__name__): tr = s.trace() if len(tr.shape) == 1: tr = tr.reshape((len(tr), 1)) for d in range(len(pl.atleast_1d(s.value))): for k in range(50,100): acorr = pl.dot(tr[:-k,d]-tr[:k,d].mean(), tr[k:,d]-tr[k:,d].mean()) / pl.dot(tr[k:,d]-tr[k:,d].mean(), tr[k:,d]-tr[k:,d].mean()) if abs(acorr) > .5: print('potential non-convergence', s, acorr) return False return True
def _hcqft(self): """ :: Apply high lifter to MFCC and invert to CQFT domain """ fp = self._check_feature_params() if not self._mfcc(): return False a,b = self.CQFT.shape n=self.ncoef l=self.lcoef AA = self.MFCC[n+l:a,:] # apply Lifter self.HCQFT=10**P.dot( self.DCT[n+l:a,:].T, AA) self._have_hcqft=True if self.verbosity: print "Extracted HCQFT: lcoef=%d, ncoef=%d, intensified=%d" %(self.lcoef, self.ncoef, self.intensify) self.inverse=self.icqft self.X=self.HCQFT return True
def ridgeFit(X, Y, phi, params, verbose=False): #phi = designMatrix(X, params['order'], includeConstantTerm=False) l = params['lambda'] phi_avg = sum(phi)*1.0 / len(phi) Z = phi - phi_avg Y_avg = sum(Y)*1.0 / len(Y) Yc = Y - Y_avg if verbose: print('phi', phi) print('phi_avg', phi_avg) print('Z', Z) print('Yc', Yc) a = pl.dot(Z.T, Z) + l * pl.eye(len(Z.T)) b = np.linalg.inv(a).dot(Z.T) W = b.dot(Yc) W_0 = np.array([Y_avg - W.T.dot(phi_avg)]) if verbose: print('W_0', W_0) print('W', W) return np.hstack((W_0, W.T)).T
def _lcqft(self): """ :: Apply low-lifter to MFCC and invert to CQFT domain """ fp = self._check_feature_params() if not self._mfcc(): return False a,b = self.CQFT.shape a = (a-1)*2 n=self.ncoef l=self.lcoef AA = self.MFCC[l:l+n,:] # apply Lifter self.LCQFT = 10**P.dot( self.DCT[l:l+n,:].T, AA ) self._have_lcqft=True if self.verbosity: print("Extracted LCQFT: lcoef=%d, ncoef=%d, intensified=%d" %(self.lcoef, self.ncoef, self.intensify)) self.inverse=self.icqft self.X=self.LCQFT return True
def branch_angle(G, parent, child1, child2): parent_coord = pylab.array(G.node[parent]['coord']) child1_coord = pylab.array(G.node[child1]['coord']) child2_coord = pylab.array(G.node[child2]['coord']) v1 = child1_coord - parent_coord v2 = child2_coord - parent_coord m1 = ((v1**2).sum())**0.5 m2 = ((v2**2).sum())**0.5 dp = pylab.dot(v1, v2) cos = dp / (m1 * m2) if cos < -1: cos = 1 elif cos > 1: cos = 1 theta = pylab.arccos(cos) return theta
def SunAngle(PositionVector, SimulationTime): """Calculates angle between a position vector and the position vector of the Sun. Simulates a single point in time for the Sun observed from Earth using Skyfield and then calculates the angle between the position vector of the Sun and the given input position vector. Used to determine the eclipse angle of the Sun angle of the position. Arguments: PositionVector (array): Position vector. SimulationTime (:obj:`ephem.Date`): The time of the simulation. Returns: (float): The sun angle [degrees]. """ current_time_datetime = ephem.Date(SimulationTime).datetime() year = current_time_datetime.year month = current_time_datetime.month day = current_time_datetime.day hour = current_time_datetime.hour minute = current_time_datetime.minute second = current_time_datetime.second + current_time_datetime.microsecond / 1000000 current_time_skyfield = timescale_skyfield.utc( year, month, day, hour, minute, second ) Sun = database_skyfield["Sun"] Earth = database_skyfield["Earth"] SunFromEarth = Earth.at(current_time_skyfield).observe(Sun) r_SunFromEarth_km = SunFromEarth.position.km SunAngle = arccos( dot(PositionVector, r_SunFromEarth_km) / (norm(r_SunFromEarth_km) * norm(PositionVector)) ) SunAngle = SunAngle / pi * 180 return SunAngle
def assembledev2Mvv(A, B, M, P): #A ? #B ? #M Mesh object #P Physics object nodes = 4 sa0, ma0 = localStiffness(M) etabflat = M.etab.reshape(M.nelx * M.nelz, nodes) Aflat = A.flatten()[pl.newaxis].T Bflat = B.flatten()[pl.newaxis].T inew = pl.kron(etabflat, pl.ones((1, nodes))).flatten() jnew = pl.kron(etabflat, pl.ones((nodes, 1))).flatten() dnew = -pl.dot(Bflat, ma0.reshape((1, nodes * nodes))).flatten() i = list(inew) j = list(jnew) d = list(dnew) #Periodic Bloch-Floquet BC: pen = BCpen n1 = list(range(0, (M.nelz + 1) * (M.nelx + 1), M.nelx + 1)) n2 = list(range(M.nelx, (M.nelz + 1) * (M.nelx + 1) + M.nelx, M.nelx + 1)) i += n1 + n2 + n1 + n2 j += n1 + n2 + n2 + n1 d += [0] * 2 * len(n1) + [ -0.5 * pen * 1.j * P.kInx * M.lx * 1.j * P.kInx * M.lx / P.k0 / P.k0 * exp(1.j * P.kInx * M.lx) ] * len(n1) + [ -0.5 * pen * 1.j * P.kInx * M.lx * 1.j * P.kInx * M.lx / P.k0 / P.k0 * exp(1.j * P.kInx * M.lx).conj() ] * len(n1) #Calculate the matrices given in Eq (43) in Dossou2006. See Fuchi2010 for a "nicer" way # of writing them. The elements used are the same as in Andreassen2011 #Mvv Mvv = coo_matrix((d, (i, j)), shape=(M.ndof, M.ndof), dtype='complex').toarray() return Mvv
def simulated_age_intervals(data_type, n, a, pi_age_true, sigma_true): # choose age intervals to measure age_start = pl.array(mc.runiform(0, 100, n), dtype=int) age_start.sort() # sort to make it easy to discard the edges when testing age_end = pl.array(mc.runiform(age_start+1, pl.minimum(age_start+10,100)), dtype=int) # find truth for the integral across the age intervals import scipy.integrate pi_interval_true = [scipy.integrate.trapz(pi_age_true[a_0i:(a_1i+1)]) / (a_1i - a_0i) for a_0i, a_1i in zip(age_start, age_end)] # generate covariates that add explained variation X = mc.rnormal(0., 1.**2, size=(n,3)) beta_true = [-.1, .1, .2] beta_true = [0, 0, 0] Y_true = pl.dot(X, beta_true) # calculate the true value of the rate in each interval pi_true = pi_interval_true*pl.exp(Y_true) # simulate the noisy measurement of the rate in each interval p = pl.maximum(0., mc.rnormal(pi_true, 1./sigma_true**2.)) # store the simulated data in a pandas DataFrame data = pandas.DataFrame(dict(value=p, age_start=age_start, age_end=age_end, x_0=X[:,0], x_1=X[:,1], x_2=X[:,2])) data['effective_sample_size'] = pl.maximum(p*(1-p)/sigma_true**2, 1.) data['standard_error'] = pl.nan data['upper_ci'] = pl.nan data['lower_ci'] = pl.nan data['year_start'] = 2005. # TODO: make these vary data['year_end'] = 2005. data['sex'] = 'total' data['area'] = 'all' data['data_type'] = data_type return data
def regressionPlot(X, Y, order, fitMethod=regressionFit, params={}, verbose=False, plot=True, validationData=None): if verbose: print('X', X) print('Y', Y) if plot: pl.plot(X.T.tolist()[0],Y.T.tolist()[0], 'gs') if validationData != None: X_validate, Y_validate = validationData pl.plot(X_validate.T.tolist()[0],Y_validate.T.tolist()[0], 'bo') # You will need to write the designMatrix and regressionFit function # constuct the design matrix (Bishop 3.16), the 0th column is just 1s. phi = designMatrix(X, order) # compute the weight vector params['order'] = order w = fitMethod(X, Y, phi, params) if verbose: print 'w', w try: Y_estimated = pl.dot(w.T, phi.T) except: print('matrices not aligned: ', w.T.size, phi.T.size) if verbose: print('Y_estimated', Y_estimated) # produce a plot of the values of the function pts = [[p] for p in pl.linspace(min(X), max(X), 100)] Yp = applyWeights(pts, order, w) if plot: pl.plot(pts, Yp.tolist()[0]) pl.show() error = sumOfSquaresErrorGenerator(phi, Y)(w) #print('error: %f' % error) print('analytical error gradient: %s' % sumOfSquaresErrorGradientGenerator(phi, Y)(w)) print('numerical error gradient: %s' % \ numericalGradient(sumOfSquaresErrorGenerator(phi, Y), 1e-5)(w)) return (error, w)
def forward_py(n, N, ni, ns, na, xs, source, gix, gfx, gox, cix, gi, gf, go, ci, state, output, WGI, WGF, WGO, WCI, WIP, WFP, WOP): for t in range(n): prev = zeros(ns) if t == 0 else output[t - 1] source[t, 0] = 1 source[t, 1:1 + ni] = xs[t] source[t, 1 + ni:] = prev dot(WGI, source[t], out=gix[t]) dot(WGF, source[t], out=gfx[t]) dot(WGO, source[t], out=gox[t]) dot(WCI, source[t], out=cix[t]) if t > 0: gix[t] += WIP * state[t - 1] gfx[t] += WFP * state[t - 1] gi[t] = ffunc(gix[t]) gf[t] = ffunc(gfx[t]) ci[t] = gfunc(cix[t]) state[t] = ci[t] * gi[t] if t > 0: state[t] += gf[t] * state[t - 1] gox[t] += WOP * state[t] go[t] = ffunc(gox[t]) output[t] = hfunc(state[t]) * go[t] assert not isnan(output[:n]).any()
def ComputeZMPConfig(robot, q, qd, qdd): n = len(robot.GetLinks()) g = robot.GetEnv().GetPhysicsEngine().GetGravity() with robot: robot.SetDOFValues(Fill(robot, q)) robot.SetDOFVelocities(Fill(robot, qd)) com_pos = array([k.GetGlobalCOM() for k in robot.GetLinks()]) vel = robot.GetLinkVelocities() acc = robot.GetLinkAccelerations(Fill(robot, qdd)) transforms = [k.GetTransform()[0:3, 0:3] for k in robot.GetLinks()] masses = [k.GetMass() for k in robot.GetLinks()] localCOM = [k.GetLocalCOM() for k in robot.GetLinks()] tau0 = array([0., 0., 0.]) totalmass = sum(masses) if hasattr(robot, 'activelinks'): totalmass = 0 for k in range(n): if robot.activelinks[k] > 0.1: totalmass += masses[k] f02 = totalmass * g[2] com = zeros(3) for i in range(n): if hasattr(robot, 'activelinks') and robot.activelinks[i] < 0.1: continue # Compute the inertia matrix in the global frame R = transforms[i] ri = dot(R, localCOM[i]) omegai = vel[i, 3:6] omegadi = acc[i, 3:6] #com_vel = vel[i, 0:3] + cross(omegai, ri) ci = com_pos[i] cidd = acc[i, 0:3] + cross(omegai, cross(omegai, ri)) + cross( omegadi, ri) tau0 += masses[i] * cross(ci, g - cidd) f02 -= masses[i] * cidd[2] com += masses[i] * ci return -tau0[1] / f02, tau0[0] / f02, com / totalmass
def blogRegression(X, Y, fitMethod=ridgeFit, params={}, verbose=False, validationData=None): if verbose: print('X', X) print('Y', Y) phi = linearDesignMatrix(X, includeConstantTerm=False) # compute the weight vector w = fitMethod(X, Y, phi, params) if verbose: print 'w', w phi = linearDesignMatrix(X, includeConstantTerm=True) try: Y_estimated = pl.dot(w.T, phi.T) except: print('matrices not aligned: ', w.T.shape, phi.T.shape) #Y_estimated = pl.dot(w.T, phi.T) if verbose: print('Y_estimated', Y_estimated) error = sumOfSquaresErrorGenerator(phi, Y)(w) return (error, w)
def distance(p1_, pm_, p2_, ele_weight=1.0): # returns distance of pm from line between p1 and p2 p1, pm, p2 = array(p1_), array(pm_), array(p2_) h1, hm, h2 = norm(p1), norm(pm), norm(p2) if ele_weight != 1.0 and min(h1, hm, h2) > 0.0: hmean = (h1 + hm + h2) / 3.0 p1 *= (ele_weight + (1.0 - ele_weight) * hmean / h1) pm *= (ele_weight + (1.0 - ele_weight) * hmean / hm) p2 *= (ele_weight + (1.0 - ele_weight) * hmean / h2) line = p2 - p1 linel = norm(line) vm = pm - p1 if linel == 0.0: return norm(vm) linem = line / linel position = pl.dot(vm, linem) / linel if position < 0.0: return norm(vm) elif position > 1.0: return norm(pm - p2) else: return norm(vm - line * position)
def forward(self, ys): """Forward propagate activations. This updates the internal state for a subsequent call to `backward` and returns the output activations.""" n = len(ys) # inputs, zs = [None]*n,[None]*n zs = [None] * n for i in range(n): # inputs[i] = concatenate([ones(1), ys[i]]) # print self.W2[:,0] temp = dot(self.W2[:, 1:], ys[i]) + self.W2[:, 0] # print 'yss', ys[i].shape, self.W2[:,1:].shape, temp.shape # temp = dot(self.W2, inputs[i]) # print temp - dot(self.W2[:,1:], ys[i]) - self.W2[:,0] # print inputs[i].shape, self.W2.shape, temp.shape # print self.W2[i], i, n # temp = dot(self.W2[:,1:], ys[i]) + self.W2[:,0] temp = exp(clip(temp, -100, 100)) temp /= sum(temp) zs[i] = temp # self.state = (inputs,zs) return zs