def complex_nk_t_list_from_n_k_t_list(n_list, k_list, t_list): nk_list = [complex(n_list[i], k_list[i]) for i in range(0, len(n_list))] nk_list = sp.insert(nk_list, 0, 1.0) nk_list = sp.append(nk_list, 1.0) t_list = sp.insert(t_list, 0, sp.inf) t_list = sp.append(t_list, sp.inf) return nk_list, t_list
def sinogramLine(beam, r, z, invessel=True, ds=2.5e-3, pargs=None, pltobj=None, **kwargs): try: if invessel: temp = beam(scipy.mgrid[beam.norm.s[-2]:beam.norm.s[-1]:ds]) else: temp = beam(scipy.mgrid[beam.norm.s[0]:beam.norm.s[-1]:ds]) # deal with branch cut temp0 = temp.t0(r, z) temp2 = temp.t2(r, z) temp = scipy.arange(temp0.size)[abs(temp2[1:] - temp2[:-1]) > scipy.pi] print(temp) if len(temp) > 0: temp0 = scipy.insert(temp0, temp+1, None) temp2 = scipy.insert(temp2, temp+1, None) if pltobj is None: pltobj = plt if not pargs is None: pltobj.plot(temp2,temp0, pargs, **kwargs) else: pltobj.plot(temp2,temp0, **kwargs) except AttributeError: for i in beam: sinogramLine(i, r, z, invessel=invessel, pargs=pargs, pltobj=pltobj, **kwargs)
def __init__(self, x, y, z, f, boundary = 'natural', dx=0, dy=0, dz=0, bounds_error=True, fill_value=scipy.nan): if dx != 0 or dy != 0 or dz != 0: raise NotImplementedError( "Trispline derivatives are not implemented, do not use tricubic " "interpolation if you need to compute magnetic fields!" ) self._x = scipy.array(x,dtype=float) self._y = scipy.array(y,dtype=float) self._z = scipy.array(z,dtype=float) self._xlim = scipy.array((x.min(), x.max())) self._ylim = scipy.array((y.min(), y.max())) self._zlim = scipy.array((z.min(), z.max())) self.bounds_error = bounds_error self.fill_value = fill_value if f.shape != (self._x.size,self._y.size,self._z.size): raise ValueError("dimensions do not match f") if _tricub.ismonotonic(self._x) and _tricub.ismonotonic(self._y) and _tricub.ismonotonic(self._z): self._x = scipy.insert(self._x,0,2*self._x[0]-self._x[1]) self._x = scipy.append(self._x,2*self._x[-1]-self._x[-2]) self._y = scipy.insert(self._y,0,2*self._y[0]-self._y[1]) self._y = scipy.append(self._y,2*self._y[-1]-self._y[-2]) self._z = scipy.insert(self._z,0,2*self._z[0]-self._z[1]) self._z = scipy.append(self._z,2*self._z[-1]-self._z[-2]) self._f = scipy.zeros(scipy.array(f.shape)+(2,2,2)) self._f[1:-1,1:-1,1:-1] = scipy.array(f) # place f in center, so that it is padded by unfilled values on all sides if boundary == 'clamped': # faces self._f[(0,-1),1:-1,1:-1] = f[(0,-1),:,:] self._f[1:-1,(0,-1),1:-1] = f[:,(0,-1),:] self._f[1:-1,1:-1,(0,-1)] = f[:,:,(0,-1)] #verticies self._f[(0,0,-1,-1),(0,-1,0,-1),1:-1] = f[(0,0,-1,-1),(0,-1,0,-1),:] self._f[(0,0,-1,-1),1:-1,(0,-1,0,-1)] = f[(0,0,-1,-1),:,(0,-1,0,-1)] self._f[1:-1,(0,0,-1,-1),(0,-1,0,-1)] = f[:,(0,0,-1,-1),(0,-1,0,-1)] #corners self._f[(0,0,0,0,-1,-1,-1,-1),(0,0,-1,-1,0,0,-1,-1),(0,-1,0,-1,0,-1,0,-1)] = f[(0,0,0,0,-1,-1,-1,-1),(0,0,-1,-1,0,0,-1,-1),(0,-1,0,-1,0,-1,0,-1)] elif boundary == 'natural': # faces self._f[(0,-1),1:-1,1:-1] = 2*f[(0,-1),:,:] - f[(1,-2),:,:] self._f[1:-1,(0,-1),1:-1] = 2*f[:,(0,-1),:] - f[:,(1,-2),:] self._f[1:-1,1:-1,(0,-1)] = 2*f[:,:,(0,-1)] - f[:,:,(1,-2)] #verticies self._f[(0,0,-1,-1),(0,-1,0,-1),1:-1] = 4*f[(0,0,-1,-1),(0,-1,0,-1),:] - f[(1,1,-2,-2),(0,-1,0,-1),:] - f[(0,0,-1,-1),(1,-2,1,-2),:] - f[(1,1,-2,-2),(1,-2,1,-2),:] self._f[(0,0,-1,-1),1:-1,(0,-1,0,-1)] = 4*f[(0,0,-1,-1),:,(0,-1,0,-1)] - f[(1,1,-2,-2),:,(0,-1,0,-1)] - f[(0,0,-1,-1),:,(1,-2,1,-2)] - f[(1,1,-2,-2),:,(1,-2,1,-2)] self._f[1:-1,(0,0,-1,-1),(0,-1,0,-1)] = 4*f[:,(0,0,-1,-1),(0,-1,0,-1)] - f[:,(1,1,-2,-2),(0,-1,0,-1)] - f[:,(0,0,-1,-1),(1,-2,1,-2)] - f[:,(1,1,-2,-2),(1,-2,1,-2)] #corners self._f[(0,0,0,0,-1,-1,-1,-1),(0,0,-1,-1,0,0,-1,-1),(0,-1,0,-1,0,-1,0,-1)] = 8*f[(0,0,0,0,-1,-1,-1,-1),(0,0,-1,-1,0,0,-1,-1),(0,-1,0,-1,0,-1,0,-1)] -f[(1,1,1,1,-2,-2,-2,-2),(0,0,-1,-1,0,0,-1,-1),(0,-1,0,-1,0,-1,0,-1)] -f[(0,0,0,0,-1,-1,-1,-1),(1,1,-2,-2,1,1,-2,-2),(0,-1,0,-1,0,-1,0,-1)] -f[(0,0,0,0,-1,-1,-1,-1),(0,0,-1,-1,0,0,-1,-1),(1,-2,1,-2,1,-2,1,-2)] -f[(1,1,1,1,-2,-2,-2,-2),(1,1,-2,-2,1,1,-2,-2),(0,-1,0,-1,0,-1,0,-1)] -f[(0,0,0,0,-1,-1,-1,-1),(1,1,-2,-2,1,1,-2,-2),(1,-2,1,-2,1,-2,1,-2)] -f[(1,1,1,1,-2,-2,-2,-2),(0,0,-1,-1,0,0,-1,-1),(1,-2,1,-2,1,-2,1,-2)] -f[(1,1,1,1,-2,-2,-2,-2),(1,1,-2,-2,1,1,-2,-2),(1,-2,1,-2,1,-2,1,-2)] self._regular = False if _tricub.isregular(self._x) and _tricub.isregular(self._y) and _tricub.isregular(self._z): self._regular = True
def train(self, iterations, train_vector, iterative_update=False, grow=True, num_pts_to_grow=3): self.iterations = iterations for t in range(len(train_vector)): train_vector[t] = scipy.array(train_vector[t]) delta_nodes = scipy.zeros((self.width, self.height, self.FV_size), float) for i in range(0, iterations): cur_radius = self.radius_decay(i) cur_lr = self.learning_rate_decay(i) sys.stdout.write("\rTraining Iteration: " + str(i+1) + "/" + str(iterations)) sys.stdout.flush() # Grow the map where it's doing worst if grow and not (i % 20): for iik in range(num_pts_to_grow): dist_mask = self.build_distance_mask() worst_loc = find_indices(scipy.argmax(dist_mask), self.width) worst_row = worst_loc[0] worst_col = worst_loc[1] # Insert the row prev_row = worst_row - 1 if worst_row-1 >= 0 else self.height - 1 next_row = worst_row + 1 if worst_row+1 < self.height else 0 self.nodes = scipy.insert(self.nodes, worst_row, [[0]], axis=0) self.height += 1 # Fill the new row with interpolated values for col in range(self.width): self.nodes[worst_row, col] = (self.nodes[prev_row, col] + self.nodes[next_row, col]) / 2 # Insert the column prev_col = worst_col - 1 if worst_col-1 >= 0 else self.width - 1 next_col = worst_col + 1 if worst_col+1 < self.width else 0 self.nodes = scipy.insert(self.nodes, worst_col, [[0]], axis=1) self.width += 1 # Fill the new column with interpolated values for row in range(self.height): self.nodes[row, worst_col] = (self.nodes[row, prev_col] + self.nodes[row, next_col]) / 2 self.radius = (self.height+self.width)/4 delta_nodes = scipy.zeros((self.width, self.height, self.FV_size), float) if not iterative_update: delta_nodes.fill(0) else: random.shuffle(train_vector) for j in range(len(train_vector)): best = self.best_match(train_vector[j]) # pick out the nodes that are within our decaying radius: for loc in self.find_neighborhood(best, cur_radius): influence = (-loc[2] + cur_radius) / cur_radius # linear scaling of influence inf_lrd = influence*cur_lr delta_nodes[loc[0],loc[1]] += inf_lrd*(train_vector[j]-self.nodes[loc[0],loc[1]]) if iterative_update: self.nodes += delta_nodes delta_nodes.fill(0) if not iterative_update: delta_nodes /= len(train_vector) self.nodes += delta_nodes sys.stdout.write("\n")
def calc_displ(self): self.displ = {} for sub in self.model.subcases.values(): displ = scipy.zeros(0) for grid in self.grids: count = 0 lst = [count * 6 for i in xrange(6)] scipy.insert(displ, lst, grid.displ[sub.id]) count += 1 self.displ[sub.id] = self.R2el * displ
def calc_displ(self): self.displ = {} for sub in self.model.subcases.values(): displ = scipy.zeros(0) for grid in self.grids: count = 0 lst = [count*6 for i in xrange(6)] scipy.insert( displ, lst, grid.displ[sub.id] ) count += 1 self.displ[sub.id] = self.R2el * displ
def neo_logl_rvpm(theta, paramis, CHECK=False): _t, indexer, params = paramis AC, B, CV = indexer params_rv, params_pm = params[0], params[1] kplanets, nins, MOAV = params_rv[6], params_rv[7], params_rv[8] MOAV_STAR, totcornum, ACC = params_rv[9], params_rv[10], params_rv[11] # THETA CORRECTION FOR FIXED THETAS for a in AC: theta = sp.insert(theta, a, _t[a].val) # count 'em # this could be outside!!!! # DEL all_rv = kplanets * 5 all_rv += (nins + sp.sum(MOAV)) * 2 all_rv += MOAV_STAR * 2 all_rv += ACC # armar thetas theta_rv = theta[:all_rv] # DEL theta_pm = theta # DEL for b in B: theta_pm = sp.insert(theta_pm, b[0], theta[b[1]]) theta_pm = theta_pm[all_rv:] # DEL #t0, per, pr, sma, inc, ecc, w # per, amp, pha, ecc, w x = sp.array([(True, False, False, True, True) for _ in range(kplanets)]).reshape(-1) x1 = sp.arange(len(x)) xx = x1[x] #raise Exception('deb') #ndim_rv = 5*kplanets + 2*nins*(MOAV+1) + (1 + PACC) + totcornum #theta_rv = theta[:ndim_rv] #P = sp.array([theta[5*k] for k in range(kplanets)]) #theta_pm = theta[ndim_rv:] logl_params_rv = [_t, AC, params_rv] logl_params_pm = [_t, AC, params_pm] LOGL_RV = neo_logl_rv( nano_henshin_hou(theta_rv, kplanets, CV, _t.list('val'), AC), logl_params_rv) if CHECK: print('loglrv', LOGL_RV) #raise Exception('deb') LOGL_PM = neo_logl_pm(theta_pm, logl_params_pm) if CHECK: print('loglpm', LOGL_PM) #print('pass2') return LOGL_RV + LOGL_PM
def __init__(self, surf1, surf2): """ """ normal = geometry.pts2Vec(surf1, surf2) #orthogonal coordinates based off of connecting normal snew = surf1.sagi - normal * ((surf1.sagi * normal) * (old_div(surf1.sagi.s, normal.s))) mnew = surf1.meri - normal * ((surf1.meri * normal) * (old_div(surf1.meri.s, normal.s))) super(Beam, self).__init__(surf1, surf1._origin, vec=[mnew, normal]) #calculate area at diode. self.sagi.s = snew.s a1 = surf1.area(snew.s, mnew.s) #calculate area at aperature a2 = surf2.area(((old_div( (self.sagi * surf2.sagi), self.sagi.s))**2 + (old_div( (self.meri * surf2.sagi), self.meri.s))**2)**.5, ((old_div( (self.sagi * surf2.meri), self.sagi.s))**2 + (old_div( (self.meri * surf2.meri), self.meri.s))**2)**.5) #generate etendue self.etendue = a1 * a2 / (normal.s**2) # give inital beam, which is two points self.norm.s = scipy.atleast_1d(self.norm.s) self.norm.s = scipy.insert(self.norm.s, 0, 0.)
def __init__(self, surf1, surf2): """ """ normal = geometry.pts2Vec(surf1, surf2) #orthogonal coordinates based off of connecting normal snew = surf1.sagi - normal*((surf1.sagi * normal)*(surf1.sagi.s/normal.s)) mnew = surf1.meri - normal*((surf1.meri * normal)*(surf1.meri.s/normal.s)) super(Beam, self).__init__(surf1, surf1._origin, vec=[mnew,normal]) #calculate area at diode. self.sagi.s = snew.s a1 = surf1.area(snew.s,mnew.s) #calculate area at aperature a2 = surf2.area((((self.sagi*surf2.sagi)/self.sagi.s)**2 + ((self.meri*surf2.sagi)/self.meri.s)**2)**.5, (((self.sagi*surf2.meri)/self.sagi.s)**2 + ((self.meri*surf2.meri)/self.meri.s)**2)**.5) #generate etendue self.etendue = a1*a2/(normal.s ** 2) # give inital beam, which is two points self.norm.s = scipy.atleast_1d(self.norm.s) self.norm.s = scipy.insert(self.norm.s,0,0.)
def time_history(t, x, realify=True, num_time_points=200): r"""Generate refined time history from harmonic balance solution. Harmonic balance solutions presume a limited number of harmonics in the solution. The result is that the time history is usually a very limited number of values. Plotting these results implies that the solution isn't actually a continuous one. This function fills in the gaps using the harmonics obtained in the solution. Parameters ---------- t: array_like 1 x m array where m is the number of values representing the repeating solution. x: array_like n x m array where m is the number of equations and m is the number of values representing the repeating solution. realify: boolean Force the returned results to be real. num_time_points: int number of points desired in the "smooth" time history. Returns ------- t: array_like 1 x num_time_points array. x: array_like n x num_time_points array. Examples -------- >>> import numpy as np >>> import mousai as ms >>> x = np.array([[-0.34996499, 1.36053998, -1.11828552]]) >>> t = np.array([0. , 2.991993 , 5.98398601]) >>> t_full, x_full = ms.time_history(t, x, num_time_points=300) Notes ----- The implication of this function is that the higher harmonics that were not determined in the solution are zero. This is indeed the assumption made when setting up the harmonic balance solution. Whether this is a valid assumption is something that the user must judge when obtaining the solution. """ dt = t[1] t_length = t.size t = sp.linspace(0, t_length * dt, num_time_points, endpoint=False) x_freq = fftp.fft(x) x_zeros = sp.zeros((x.shape[0], t.size - x.shape[1])) x_freq = sp.insert(x_freq, [t_length - t_length // 2], x_zeros, axis=1) x = fftp.ifft(x_freq) * num_time_points / t_length if realify is True: x = sp.real(x) else: print('x was real') return t, x
def neo_logl_pm(theta, paramis): _t, AC, params = paramis time, flux, err = params[0], params[1], params[2] ins, kplanets, nins = params[3], params[4], params[5] # for linear, linear should be [1, 1] ld, batman_m, batman_p = params[6], params[7], params[8] gp, gaussian_processor = params[9], params[10] ndat = len(time) #logl_params = sp.array([self.time_pm, self.rv_pm, self.err_pm, # self.ins_pm, kplan, self.nins_pm]) # 0 correct for fixed values theta1 = theta.astype(float) for a in AC: theta1 = sp.insert(theta1, a, _t[a].val) # 1 armar el modelo con batman, es decir, llamar neo_lc theta_b = theta1[:-len(gp)] theta_g = theta1[-len(gp):] params_b = time, kplanets, ld, batman_m, batman_p model = neo_lightcurve(theta_b, params_b) # 2 calcular res PM_residuals = flux - model # why some people do the *1e6 # DEL #raise Exception('Debug') # 3 invocar likelihood usando george (puede ser otra func), # pero lo haré abajo pq why not # 4 armar kernel, hacer GP(kernel), can this be done outside?! #theta_gp = theta1[-len(gp):] #theta_gp[1] = 10 ** theta_gp[1] # for k_r in Matern32Kernel theta_g[-1] = 10.**theta_g[-1] gp.set_parameter_vector(theta_g) # last <gp> params, check for fixed shit? #raise Exception('debug') # should be jitter with err #gp.compute(time, sp.sqrt(err**2+theta_gp[0]**2)) gp.compute(time, err) if gaussian_processor == 'george': return gp.lnlikelihood(PM_residuals, quiet=True) # george if gaussian_processor == 'celerite': try: return gp.log_likelihood(PM_residuals) # celerite except: return -sp.inf #this should go outside ''' kernel = t1 ** 2 * kernels.ExpSquaredKernel(t2 ** 2) jitt = george.modeling.ConstantModel(sp.log((1e-4)**2.)) gp = george.GP(kernel, mean=0.0, fit_mean=False, white_noise=jitt, fit_white_noise=True) gp.compute(time) #likelihood gp.set_parameter_vector(p) return gp.lnlikelihood(flux, quiet=True) ''' pass
def rebin(self,xnew): """ Rebin the spectrum on a new grid named xnew """ #Does not need equal spaced bins, but why would you not? xnew.sort() fbin = sp.zeros(xnew.size) efbin = sp.zeros(xnew.size) #up sampling is just interpolation m = (self.wv >= xnew[0])*(self.wv <= xnew[-1]) if self.wv[m].size <= xnew.size - 1: fbin,efbin = self.interp(xnew) else: #down sampling-- #1) define bins so that xnew is at the center. #2) interpolate to account for fractional pixel weights #3) take the mean within each bin db = 0.5*sp.diff(xnew) b2 = xnew[1::] - db b2 = sp.insert(b2,0,xnew[0]) insert = sp.searchsorted(self.wv,b2) xinsert = sp.insert(self.wv,insert,xnew) xinsert = sp.unique(xinsert) yinsert,zinsert = self.interp(xinsert) i = sp.digitize(xinsert,b2) for j in range(b2.size): iuse = sp.where(i == j+1)[0] fbin[j] = sp.mean(yinsert[iuse]) efbin[j] = sp.mean(zinsert[iuse]) self._wv = xnew if self.ef is not None: self._ef = efbin self.f = fbin assert self.wv.size == self.f.size
def rebin(self, xnew): """ Rebin the spectrum on a new grid named xnew """ #Does not need equal spaced bins, but why would you not? xnew.sort() fbin = sp.zeros(xnew.size) efbin = sp.zeros(xnew.size) #up sampling is just interpolation m = (self.wv >= xnew[0]) * (self.wv <= xnew[-1]) if self.wv[m].size <= xnew.size - 1: fbin, efbin = self.interp(xnew) else: #down sampling-- #1) define bins so that xnew is at the center. #2) interpolate to account for fractional pixel weights #3) take the mean within each bin db = 0.5 * sp.diff(xnew) b2 = xnew[1::] - db b2 = sp.insert(b2, 0, xnew[0]) insert = sp.searchsorted(self.wv, b2) xinsert = sp.insert(self.wv, insert, xnew) xinsert = sp.unique(xinsert) yinsert, zinsert = self.interp(xinsert) i = sp.digitize(xinsert, b2) for j in range(b2.size): iuse = sp.where(i == j + 1)[0] fbin[j] = sp.mean(yinsert[iuse]) efbin[j] = sp.mean(zinsert[iuse]) self._wv = xnew if self.ef is not None: self._ef = efbin self.f = fbin assert self.wv.size == self.f.size
def processBuffer(cls, buf): preEnergy = buf.energy() alpha = cls.alpha() unmodifiedPreviousSample = buf.samples[0] tempSample = None first_sample = buf.samples[0] buf.samples = buf.samples[1:] + (buf.samples[:-1] * alpha) buf.samples = sp.insert(buf.samples, 0, first_sample) cls.scaleBuffer(buf, preEnergy, buf.energy())
def henshin_hou(thetas, kplanets, tags, fixed_values, anticoor): try: for t in range(len(thetas)): for i in range(len(anticoor)): thetas[t] = sp.insert(thetas[t], anticoor[i], fixed_values[anticoor[i]], axis=1) except: # RAW for i in range(len(anticoor)): thetas = sp.insert(thetas, anticoor[i], fixed_values[anticoor[i]], axis=2) for t in range(len(thetas)): for i in range(kplanets): if tags[i][0]: Pk = sp.exp(thetas[t][:, i * 5]) thetas[t][:, i * 5] = Pk print('changed period! (devs note)') if tags[i][1]: Ask = thetas[t][:, i * 5 + 1] Ack = thetas[t][:, i * 5 + 2] Ak = Ask**2 + Ack**2 Phasek = sp.where(Ask >= 0, sp.arccos(Ack / (Ak**0.5)), 2 * sp.pi - sp.arccos(Ack / (Ak**0.5))) thetas[t][:, i * 5 + 1] = Ak thetas[t][:, i * 5 + 2] = Phasek print('changed amplitude! (devs note)') if tags[i][2]: Sk = thetas[t][:, i * 5 + 3] Ck = thetas[t][:, i * 5 + 4] ecck = Sk**2 + Ck**2 wk = sp.where(Sk >= 0, sp.arccos(Ck / (ecck**0.5)), 2 * sp.pi - sp.arccos(Ck / (ecck**0.5))) thetas[t][:, i * 5 + 3] = ecck thetas[t][:, i * 5 + 4] = wk print('changed eccentricity! (devs note)') return thetas
def genCartGrid(x0, x1, x2, edges = False): if edges: for i in (x0,x1,x2): i = scipy.insert(i,0,2*i[1]-i[2]) i = scipy.append(i,2*i[-1]-i[-2]) i = (i[1:]+i[:-1])/2 pnts = scipy.empty((x0.size, x1.size, x2.size,3)) x0in,x1in,x2in = scipy.meshgrid(x0, x1, x2, indexing='ij') pnts[:,:,:,0] = x0in pnts[:,:,:,1] = x1in pnts[:,:,:,2] = x2in return pnts
def __init__(self, pt1, inp2): """ """ try: self.norm = geometry.pts2Vec(pt1, inp2) except AttributeError: self.norm = inp2.copy() super(Ray,self).__init__(pt1) self.norm.s = scipy.atleast_1d(self.norm.s) self.norm.s = scipy.insert(self.norm.s,0,0.)
def __init__(self, pt1, inp2): """ """ try: self.norm = geometry.pts2Vec(pt1, inp2) except AttributeError: self.norm = inp2.copy() super(Ray, self).__init__(pt1) self.norm.s = scipy.atleast_1d(self.norm.s) self.norm.s = scipy.insert(self.norm.s, 0, 0.)
def genCartGrid(x0, x1, x2, edges=False): if edges: for i in (x0, x1, x2): i = scipy.insert(i, 0, 2 * i[1] - i[2]) i = scipy.append(i, 2 * i[-1] - i[-2]) i = (i[1:] + i[:-1]) / 2 pnts = scipy.empty((x0.size, x1.size, x2.size, 3)) x0in, x1in, x2in = scipy.meshgrid(x0, x1, x2, indexing='ij') pnts[:, :, :, 0] = x0in pnts[:, :, :, 1] = x1in pnts[:, :, :, 2] = x2in return pnts
def genCylGrid(x0,x1,x2,edges=False): if edges: for i in (x0,x1,x2): i = scipy.insert(i,0,2*i[1]-i[2]) i = scipy.append(i,2*i[-1]-i[-2]) i = (i[1:]+i[:-1])/2 pnts = scipy.empty((x0.size, x1.size, x2.size,3)) xin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.cos(x1))) yin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.sin(x1))) zee = scipy.ones(yin.shape) for i in range(x2.size): pnts[:,:,i,0] = xin pnts[:,:,i,1] = yin pnts[:,:,i,2] = x2[i]*zee return pnts
def genCylGrid(x0, x1, x2, edges=False): if edges: for i in (x0, x1, x2): i = scipy.insert(i, 0, 2 * i[1] - i[2]) i = scipy.append(i, 2 * i[-1] - i[-2]) i = (i[1:] + i[:-1]) / 2 pnts = scipy.empty((x0.size, x1.size, x2.size, 3)) xin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.cos(x1))) yin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.sin(x1))) zee = scipy.ones(yin.shape) for i in range(x2.size): pnts[:, :, i, 0] = xin pnts[:, :, i, 1] = yin pnts[:, :, i, 2] = x2[i] * zee return pnts
def parse_intercept(self): """ Parse intercept factor """ # Sanity checks # TO-DO: CHECK THAT MODEL_OPTS AND DATA_OPTS ARE PROPERLY DEFINED K = self.dimensionalities["K"] M = self.dimensionalities["M"] N = self.dimensionalities["N"] # If we want to learn the intercept, we add a constant covariate of 1s if self.model_opts['learnIntercept']: if self.data_opts['covariates'] is not None: self.data_opts['covariates'] = s.insert( self.data_opts['covariates'], obj=0, values=1., axis=1) self.data_opts['scale_covariates'].insert(0, False) else: self.data_opts['covariates'] = s.ones((N, 1)) self.data_opts['scale_covariates'] = [False] # Parse intercept # self.model_opts['factors'] += 1 # self.dimensionalities["K"] += 1 # Remove sparsity from the Intercept factor # TO-DO: CHECK THAT THE MODEL IS ALREADY NOT SPARSE # TO-DO: RECHECK THIS, ITS UGLY # stop if not self.model_opts["learnIntercept"] == TRUE for m in range(M): # Weights # if self.model_opts["likelihoods"][m]=="gaussian": self.model_opts["initSW"]["mean_S1"][m][:, 0] = s.nanmean( self.data[m], axis=0) self.model_opts["initSW"]["var_S1"][m][:, 0] = 1e-10 # Theta self.model_opts['sparsity'][m][0] = 0. self.model_opts["initSW"]["Theta"][m][:, 0] = 1. self.model_opts["priorTheta"]['a'][m][0] = s.nan self.model_opts["priorTheta"]['b'][m][0] = s.nan self.model_opts["initTheta"]["a"][m][0] = s.nan self.model_opts["initTheta"]["b"][m][0] = s.nan self.model_opts["initTheta"]["E"][m][0] = 1.
def calcArealDens(l, te, halfte, rho, tped, tcore, nped, ncore): minrho = scipy.argmin(rho) spline1 = scipy.interpolate.interp1d(rho[:minrho + 1], l[:minrho + 1], bounds_error=False, kind='linear') spline2 = scipy.interpolate.interp1d(rho[minrho:], l[minrho:], bounds_error=False, kind='linear') # step 3, find te rho locations ne = GIWprofiles.ne(GIWprofiles.te2rho2(te, tcore, tped), ncore, nped) rhohalfte = GIWprofiles.te2rho2(halfte, tcore, tped) bounds = scipy.array([rho[minrho], 1.]) boundte = GIWprofiles.te(bounds, tcore, tped) bndidx = scipy.searchsorted( halfte, boundte) #add proper endpoints to the temperature array rhohalfte = scipy.insert(rhohalfte, bndidx, bounds) #assumes that Te is positively increasing #step 4, find l location for those 1/2 te locations AND rho=1 for endpoints l1 = spline1(rhohalfte) deltal1 = abs(l1[:-1] - l1[1:]) deltal1[scipy.logical_not(scipy.isfinite(deltal1))] = 0. l2 = spline2(rhohalfte) deltal2 = abs(l2[:-1] - l2[1:]) deltal2[scipy.logical_not(scipy.isfinite(deltal2))] = 0. #plt.semilogx(te,ne*(deltal1*deltal2)/1e19, '.') #plt.xlabel('deltal2') #plt.show() return pow(ne, 2) * (deltal1 + deltal2)
def xargsCyclics(self, x_args): """ Prepare x_args taking into account cyclics. `xargsCyclics` is used inside of function which is passed to `cubature` integrator. While we ommiting some coordinates due accounting cyclics, the function should be able to compute values for any valid `x_args`. The easiest workaround is to restore cyclic coordinates before using `x_args` for computations. Returns: NumPy array of cubature `x_args` with restored cyclic coordinates. """ isVec = len(x_args.shape) == 2 extShape = x_args.shape if isVec else x_args.shape + (1, ) args = x_args.T if isVec else x_args for i in range(len(self.area)): if i in self.cyclics.keys(): args = sp.insert(args, i, sp.tile(self.cyclics[i], extShape[1]), axis=0) return args.T if isVec else args
def plot_rc(self, save=False, hand=True, usgs=True, xs=True, xsapprox=True, sprnt=True, ci=True, dist=5000, raw=False, kind='power', alpha=0.05, div=5): """Plot HAND and xs rating curves with confidence intervals 'hand' - plot hand rating curve [T/F] 'xs' - plot xs rating curves [T/F] 'xsapprox' - plot xs rating curve approximation from n-value averages [T/F] 'ci' - plot confidence intervals [T/F] 'alpha' - alpha for confidence intervals [float(0.0,1.0)] 'div' - number of intervals for confidence interval [R]""" with open('results/output_{0}.csv'.format(self.comid), 'w') as f: writer = csv.writer(f) writer.writerow(['COMID:', self.comid]) writer.writerow(['LENGTH:', self.handlen]) if xs: # Plot all linearly-interpolated XS rating curves intervals = scipy.arange(dist, self.handlen + dist, dist) # print 'Intervals:',intervals cutoffub = [i / self.handlen * 100 for i in intervals] cutofflb = scipy.copy(cutoffub) cutofflb = scipy.insert(cutofflb, 0, 0)[:-1] cutoffs = zip(cutofflb, cutoffub) for l, u in cutoffs: idx = scipy.where( scipy.logical_and( scipy.greater_equal(self.xs_profs, l), scipy.less(self.xs_profs, u))) if u > 100: u = 100.00 if len(self.xs_disch[idx]) == 0: continue fig, ax = plt.subplots( ) # get figure and axes for plotting fname = 'results/sprntcompare/rc_comid_{0}_sprnt_compare.png'.format( self.comid, ('%.2f' % l), ('%.2f' % u)) for prof, disch, stage in zip(self.xs_profs[idx], self.xs_disch[idx], self.xs_stage[idx]): # Get interpolation function # print (('%.2f' % prof) + str(disch)) # print (('%.2f' % prof) + str(stage)) f = self.interp(x=disch, y=stage, kind=kind) if raw == True: # Plot raw data (ie. only HEC-RAS points) # interp over discharge writer.writerow(['PROFILE:', prof]) writer.writerow(['DISCHARGE:']) writer.writerow(disch) writer.writerow(['STAGE:']) writer.writerow(f(disch)) ax.plot(disch, f(disch), c='grey', linewidth=2) # interp over stage (switched axes) for testing # f = self.interp(x=stage,y=disch,kind=kind) # ax.plot(f(stage),stage,c='purple',linewidth=1) if raw == False: # Plot interpolated data (ie. 'div' many interpolated points) interval = disch[-1] / div qvals = scipy.arange(0, (disch[-1] + interval), interval) # [1:] writer.writerow(['PROFILE:', prof]) writer.writerow(['DISCHARGE:']) writer.writerow(qvals) writer.writerow(['STAGE:']) writer.writerow(f(qvals)) ax.plot(qvals, f(qvals), c='grey', linewidth=2) # print '\n------------------------\n' if ci: # Plot confidence interval bounds self.get_ci(alpha=alpha, div=div) # get upper- and lower-bound variables # upper bounds f = self.interp(x=self.ci_vals, y=self.ubounds, kind=kind) writer.writerow(['UPPER CI:']) writer.writerow(['DISCHARGE:']) writer.writerow(self.ci_vals) writer.writerow(['STAGE:']) writer.writerow(f(self.ci_vals)) ax.plot(self.ci_vals, f(self.ci_vals), label='{0}%CI'.format(int((1 - alpha) * 100)), c='orange', linewidth=5) # lower bounds f = self.interp(x=self.ci_vals, y=self.lbounds, kind=kind) writer.writerow(['LOWER CI:']) writer.writerow(['DISCHARGE:']) writer.writerow(self.ci_vals) writer.writerow(['STAGE:']) writer.writerow(f(self.ci_vals)) ax.plot(self.ci_vals, f(self.ci_vals), c='orange', linewidth=5) if xsapprox: # Add approximate rating curve from average n-values qvals, hvals = self.get_xs_q(upto=83) f = self.interp(x=qvals, y=hvals, kind=kind) writer.writerow(['XSAPPROX:']) writer.writerow(['DISCHARGE:']) writer.writerow(qvals) writer.writerow(['STAGE:']) writer.writerow(f(qvals)) ax.plot(qvals, f(qvals), label='Resistance Function', c='red', linewidth=5) if usgs: # Plot interpolated USGS rating curve # Get data try: self.get_usgsrc() # Fetch usgs stage and disch values # Plot curves for q, h in zip(self.usgsq, self.usgsh): if kind == 'cubic': print 'USGS interpolation plotted as power-law fit' f = self.interp(x=q, y=h, kind='power') else: f = self.interp(x=q, y=h, kind=kind) writer.writerow(['USGS:']) writer.writerow(['DISCHARGE:']) writer.writerow(q) writer.writerow(['STAGE:']) writer.writerow(f(q)) ax.plot(q, f(q), label='usgs', c='g', linewidth=5) except IndexError: print 'No USGS rating curve for comid {0}'.format( self.comid) if hand: # Plot interpolated HAND rating curve # Plot curves f = self.interp(x=self.handq, y=self.handh, kind=kind) writer.writerow(['HAND:']) writer.writerow(['DISCHARGE:']) writer.writerow(list(self.handq)) writer.writerow(['STAGE:']) writer.writerow(list(f(self.handq))) ax.plot(self.handq, f(self.handq), label='hand', c='b', linewidth=5) if sprnt: f = self.interp(x=self.sprntq, y=self.sprnth, kind=kind) writer.writerow(['SPRNT:']) writer.writerow(['DISCHARGE:']) writer.writerow(list(self.sprntq)) writer.writerow(['STAGE:']) writer.writerow(list(f(self.sprntq))) ax.plot(self.sprntq, f(self.sprntq), label='sprnt', c='y', linewidth=5) # Add one label for all cross-section curves ax.plot([], [], label='HEC-RAS', c='grey', linewidth=2) # Plot graph fig.set_size_inches(20, 16, forward=True) plt.gca().set_xlim(left=0, right=self.max_disch) plt.gca().set_ylim(bottom=0, top=self.max_stage) ax.set_xticks(ax.get_xticks()[::2]) ax.set_yticks(ax.get_yticks()[::2]) title = 'COMID {0}, ({1},{2})'.format(self.comid, ('%.2f' % l), ('%.2f' % u)) ax.set_title(title, y=1.04, fontsize=56) plt.xlabel('Q (cfs)', fontsize=56) plt.ylabel('H (ft)', fontsize=56) ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.rc('font', size=56) plt.legend(loc='upper left', fontsize=40) plt.tick_params(axis='both', labelsize=56) plt.grid() if save: fig.savefig(fname) plt.clf() if not save: mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) plt.show() plt.clf() writer.writerow('')
def main(): parser = argparse.ArgumentParser( 'clinical classification using GEM TCR burden and NN-distance') parser.add_argument('outbase', type=str, default='classify', help='base name for output files') parser.add_argument('clinical_groups', type=str, help='clinical group file') parser.add_argument('nndist_files', type=str, nargs='+', help='NN-dist files') args = parser.parse_args() # parse raw nndist files df = parse_nndist(args.nndist_files, args.clinical_groups) # first some grouping for the Van Rhijn et al. motifs df_burden = df.loc[df['sorted population'] == 'ex vivo', [ 'subject', 'clinical group', 'locus', 'V', 'J', 'CDR3 length', 'sorted population', 'abundance' ]] grouped = df_burden.groupby(['subject', 'clinical group']) def aggregator(group): match_alpha = ((group.locus == 'alpha') & group.V.str.contains('TRAV1-2\*') & group.J.str.contains('TRAJ9\*') & (group['CDR3 length'] == 13)) match_beta = ((group.locus == 'beta') & group.V.str.contains('TRBV6-2\*') & group.J.str.contains('TRBJ2-') & (group['CDR3 length'] == 14)) total_alpha = sum(group.locus == 'alpha') total_beta = sum(group.locus == 'beta') return pd.Series({ r'GEM-TCR$\alpha$' '\nmotif frequency': match_alpha.sum() / total_alpha, r'GEM-TCR$\beta$' '\nmotif frequency': match_beta.sum() / total_beta }) df_burden = grouped.apply(aggregator).reset_index() df_burden_rows_1052 = df_burden.subject.isin( ['TB-1052', 'TB-1052-2M', 'TB-1052-6M']) print('alpha GEM-TCR motif significance:') print(' latent Vs negative: {}'.format( ttest_ind( df_burden.loc[~df_burden_rows_1052 & (df_burden['clinical group'] == 'IGRA\nnegative'), r'GEM-TCR$\alpha$' '\nmotif frequency'], df_burden.loc[~df_burden_rows_1052 & (df_burden['clinical group'] == 'IGRA\npositive'), r'GEM-TCR$\alpha$' '\nmotif frequency'])[1])) print(' active Vs negative: {}'.format( ttest_ind( df_burden.loc[~df_burden_rows_1052 & (df_burden['clinical group'] == 'IGRA\nnegative'), r'GEM-TCR$\alpha$' '\nmotif frequency'], df_burden.loc[~df_burden_rows_1052 & (df_burden['clinical group'] == 'active\nTB'), r'GEM-TCR$\alpha$' '\nmotif frequency'])[1])) print('beta GEM-TCR motif significance:') print(' latent Vs negative: {}'.format( ttest_ind( df_burden.loc[~df_burden_rows_1052 & (df_burden['clinical group'] == 'IGRA\nnegative'), r'GEM-TCR$\beta$' '\nmotif frequency'], df_burden.loc[~df_burden_rows_1052 & (df_burden['clinical group'] == 'IGRA\npositive'), r'GEM-TCR$\beta$' '\nmotif frequency'])[1])) print(' active Vs negative: {}'.format( ttest_ind( df_burden.loc[~df_burden_rows_1052 & (df_burden['clinical group'] == 'IGRA\nnegative'), r'GEM-TCR$\beta$' '\nmotif frequency'], df_burden.loc[~df_burden_rows_1052 & (df_burden['clinical group'] == 'active\nTB'), r'GEM-TCR$\beta$' '\nmotif frequency'])[1])) plt.figure(figsize=(4, 4)) plt.subplot(2, 1, 1) ax = sns.boxplot(x='clinical group', y=r'GEM-TCR$\alpha$' '\nmotif frequency', data=df_burden[~df_burden_rows_1052], order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), color='white') plt.setp(ax.artists, edgecolor='k', facecolor='w') plt.setp(ax.lines, color='k') sns.swarmplot(x='clinical group', y=r'GEM-TCR$\alpha$' '\nmotif frequency', data=df_burden[~df_burden_rows_1052], order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), palette=('gray', 'orange', 'red'), dodge=True, edgecolor='k', linewidth=1) plt.ylim([0, None]) sns.despine() plt.tight_layout() plt.subplot(2, 1, 2) ax = sns.boxplot(x='clinical group', y=r'GEM-TCR$\beta$' '\nmotif frequency', data=df_burden[~df_burden_rows_1052], order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), color='white') plt.setp(ax.artists, edgecolor='k', facecolor='w') plt.setp(ax.lines, color='k') sns.swarmplot(x='clinical group', y=r'GEM-TCR$\beta$' '\nmotif frequency', data=df_burden[~df_burden_rows_1052], order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), palette=('gray', 'orange', 'red'), dodge=True, edgecolor='k', linewidth=1) plt.ylim([0, None]) sns.despine() plt.tight_layout() plt.savefig(args.outbase + '.canonical.pdf') grouped = df.groupby( ['subject', 'clinical group', 'locus', 'sorted population']) # aggregation function for mean, and total abundance def aggregator(group): total_abundance = group['abundance'].sum() mean_nndist = group['nearest neighbor distance'].mean() mean_cdr3length = group['CDR3 length'].mean() return pd.Series({ 'CD1b-GMM repertoire distance': mean_nndist, 'mean CDR3 length': mean_cdr3length, 'total abundance': total_abundance }) df_mean = grouped.apply(aggregator).reset_index() # need to keep track of 1052 for exclusion df_rows_1052 = df.subject.isin(['TB-1052', 'TB-1052-2M', 'TB-1052-6M']) df_mean_rows_1052 = df_mean.subject.isin( ['TB-1052', 'TB-1052-2M', 'TB-1052-6M']) # mean cdr3 length distributions plt.figure(figsize=(4, 5)) plt.subplot(2, 1, 1) ax = sns.boxplot( x='clinical group', y='mean CDR3 length', data=df_mean[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'ex vivo')], order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), color='white') plt.setp(ax.artists, edgecolor='k', facecolor='w') plt.setp(ax.lines, color='k') sns.swarmplot( x='clinical group', hue='clinical group', y='mean CDR3 length', data=df_mean[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'ex vivo')], order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), hue_order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), palette=('gray', 'orange', 'red'), edgecolor='k', linewidth=1) ax.legend_.remove() plt.ylabel(r'TCR$\alpha$' '\nmean CDR3 length') #plt.ylim([0, None]) sns.despine() plt.tight_layout() plt.subplot(2, 1, 2) ax = sns.boxplot( x='clinical group', y='mean CDR3 length', data=df_mean[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'ex vivo')], order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), color='white') plt.setp(ax.artists, edgecolor='k', facecolor='w') plt.setp(ax.lines, color='k') sns.swarmplot(x='clinical group', hue='clinical group', y='mean CDR3 length', data=df_mean[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'ex vivo')], order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), hue_order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), palette=('gray', 'orange', 'red'), edgecolor='k', linewidth=1) ax.legend_.remove() plt.ylabel(r'TCR$\beta$' '\nmean CDR3 length') #plt.ylim([0, None]) sns.despine() plt.tight_layout() plt.savefig(args.outbase + '.cdr3_length.pdf', bbox_inches='tight') # mean NNdists print('alpha TCRdist distance significance:') print('ex vivo') print(' latent Vs negative: {}'.format( ttest_ind( df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'ex vivo') & (df_mean['clinical group'] == 'IGRA\nnegative'), 'CD1b-GMM repertoire distance'], df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'ex vivo') & (df_mean['clinical group'] == 'IGRA\npositive'), 'CD1b-GMM repertoire distance'])[1])) print(' active Vs negative: {}'.format( ttest_ind( df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'ex vivo') & (df_mean['clinical group'] == 'IGRA\nnegative'), 'CD1b-GMM repertoire distance'], df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'ex vivo') & (df_mean['clinical group'] == 'active\nTB'), 'CD1b-GMM repertoire distance'])[1])) print('in vitro') print(' latent Vs negative: {}'.format( ttest_ind( df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'ex vivo') & (df_mean['clinical group'] == 'IGRA\nnegative'), 'CD1b-GMM repertoire distance'], df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'ex vivo') & (df_mean['clinical group'] == 'IGRA\npositive'), 'CD1b-GMM repertoire distance'])[1])) print(' active Vs negative: {}'.format( ttest_ind( df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'ex vivo') & (df_mean['clinical group'] == 'IGRA\nnegative'), 'CD1b-GMM repertoire distance'], df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'ex vivo') & (df_mean['clinical group'] == 'active\nTB'), 'CD1b-GMM repertoire distance'])[1])) print('beta TCRdist distance significance:') print('ex vivo') print(' latent Vs negative: {}'.format( ttest_ind( df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'in vitro') & (df_mean['clinical group'] == 'IGRA\nnegative'), 'CD1b-GMM repertoire distance'], df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'in vitro') & (df_mean['clinical group'] == 'IGRA\npositive'), 'CD1b-GMM repertoire distance'])[1])) print(' active Vs negative: {}'.format( ttest_ind( df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'in vitro') & (df_mean['clinical group'] == 'IGRA\nnegative'), 'CD1b-GMM repertoire distance'], df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'alpha') & (df_mean['sorted population'] == 'in vitro') & (df_mean['clinical group'] == 'active\nTB'), 'CD1b-GMM repertoire distance'])[1])) print('in vitro') print(' latent Vs negative: {}'.format( ttest_ind( df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'in vitro') & (df_mean['clinical group'] == 'IGRA\nnegative'), 'CD1b-GMM repertoire distance'], df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'in vitro') & (df_mean['clinical group'] == 'IGRA\npositive'), 'CD1b-GMM repertoire distance'])[1])) print(' active Vs negative: {}'.format( ttest_ind( df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'in vitro') & (df_mean['clinical group'] == 'IGRA\nnegative'), 'CD1b-GMM repertoire distance'], df_mean.loc[(~df_mean_rows_1052) & (df_mean.locus == 'beta') & (df_mean['sorted population'] == 'in vitro') & (df_mean['clinical group'] == 'active\nTB'), 'CD1b-GMM repertoire distance'])[1])) plt.figure(figsize=(5, 3)) plt.subplot(1, 2, 1) ax = sns.boxplot( x='sorted population', hue='clinical group', y='CD1b-GMM repertoire distance', data=df_mean[(~df_mean_rows_1052) & (df_mean.locus == 'alpha')], hue_order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), color='white') plt.setp(ax.artists, edgecolor='k', facecolor='w') plt.setp(ax.lines, color='k') sns.swarmplot(x='sorted population', hue='clinical group', y='CD1b-GMM repertoire distance', data=df_mean[(~df_mean_rows_1052) & (df_mean.locus == 'alpha')], hue_order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), palette=('gray', 'orange', 'red'), dodge=True, edgecolor='k', linewidth=1) ax.legend_.remove() ax.yaxis.set_major_locator(MaxNLocator(integer=True)) sns.despine() plt.tight_layout() plt.subplot(1, 2, 2) ax = sns.boxplot( x='sorted population', hue='clinical group', y='CD1b-GMM repertoire distance', data=df_mean[(~df_mean_rows_1052) & (df_mean.locus == 'beta')], hue_order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), color='white') plt.setp(ax.artists, edgecolor='k', facecolor='w') plt.setp(ax.lines, color='k') sns.swarmplot(x='sorted population', hue='clinical group', y='CD1b-GMM repertoire distance', data=df_mean[(~df_mean_rows_1052) & (df_mean.locus == 'beta')], hue_order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), palette=('gray', 'orange', 'red'), dodge=True, edgecolor='k', linewidth=1) ax.legend_.remove() ax.yaxis.set_major_locator(MaxNLocator(integer=True)) sns.despine() plt.tight_layout() plt.savefig(args.outbase + '.meanNNdist.pdf') df2 = pd.DataFrame( columns=('subject', 'clinical group', 'total alpha abundance', 'total beta abundance', r'mean TCR$\alpha$ CDR3 length', r'mean TCR$\beta$ CDR3 length', r'GEM-TCR$\alpha$' '\nmotif frequency', r'GEM-TCR$\beta$' '\nmotif frequency', 'x1', 'x2', 'x3', 'x4')) for i, (name, group) in enumerate(df_mean.groupby(['subject', 'clinical group'])): df2.loc[i] = list(name) + \ [group['total abundance'][group.locus == 'alpha'].iloc[0], group['total abundance'][group.locus == 'beta'].iloc[0], group['mean CDR3 length'][group.locus == 'alpha'].iloc[0], group['mean CDR3 length'][group.locus == 'beta'].iloc[0], df_burden.loc[df_burden['subject'] == name[0], r'GEM-TCR$\alpha$' '\nmotif frequency'].iloc[0], df_burden.loc[df_burden['subject'] == name[0], r'GEM-TCR$\beta$' '\nmotif frequency'].iloc[0], group['CD1b-GMM repertoire distance'][(group['sorted population'] == 'ex vivo') & (group.locus == 'alpha')].iloc[0], group['CD1b-GMM repertoire distance'][(group['sorted population'] == 'in vitro') & (group.locus == 'alpha')].iloc[0], group['CD1b-GMM repertoire distance'][(group['sorted population'] == 'ex vivo') & (group.locus == 'beta')].iloc[0], group['CD1b-GMM repertoire distance'][(group['sorted population'] == 'in vitro') & (group.locus == 'beta')].iloc[0]] df2_rows_1052 = df2.subject.isin(['TB-1052', 'TB-1052-2M', 'TB-1052-6M']) X_canonical = df2.loc[:, [ r'GEM-TCR$\alpha$' '\nmotif frequency', r'GEM-TCR$\beta$' '\nmotif frequency' ]].values X_nndist = df2.loc[:, ['x1', 'x2', 'x3', 'x4']].values y = df2['clinical group'].values # fit on only active vs negative, and no 1052 df_roc = pd.DataFrame() for method, X in zip(['GEM-TCR motifs', 'TCRdist'], [X_canonical, X_nndist]): clf = LinearDiscriminantAnalysis() clf.fit(X[~df2_rows_1052, :], y[~df2_rows_1052]) df2.loc[df2_rows_1052, 'active TB probability'] = clf.predict_proba( X[df2_rows_1052, :])[:, list(clf.classes_).index('active\nTB')] df2.loc[df2_rows_1052, 'prediction'] = clf.predict(X[df2_rows_1052, :]) cv_probs = cross_val_predict(clf, X[~df2_rows_1052, :], y[~df2_rows_1052], method='predict_proba', cv=10) df2.loc[~df2_rows_1052, 'active TB probability'] = cv_probs[:, 2] df2.loc[~df2_rows_1052, 'prediction'] = cross_val_predict(clf, X[~df2_rows_1052, :], y[~df2_rows_1052], cv=10) print(method) print( confusion_matrix( y[~df2_rows_1052], df2.loc[~df2_rows_1052, 'prediction'].values, labels=['IGRA\nnegative', 'IGRA\npositive', 'active\nTB'])) df2.loc[df2.subject == 'TB-1052', 'time point'] = 'pre-tx' df2.loc[df2.subject == 'TB-1052-2M', 'time point'] = '2 mo' df2.loc[df2.subject == 'TB-1052-6M', 'time point'] = '6 mo' plt.figure(figsize=(3, 3)) ax = sns.boxplot(data=df2[~df2_rows_1052], x='clinical group', y='active TB probability', order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), color='white') plt.setp(ax.artists, edgecolor='k', facecolor='w') plt.setp(ax.lines, color='k') sns.swarmplot(data=df2[~df2_rows_1052], x='clinical group', y='active TB probability', order=('IGRA\nnegative', 'IGRA\npositive', 'active\nTB'), palette=('gray', 'orange', 'red'), edgecolor='k', linewidth=1, clip_on=False) plt.axhline(y=.5, ls='--', color='k', lw=1) plt.ylim([0, 1]) sns.despine() plt.savefig('{}.{}.decision.pdf'.format(args.outbase, method).replace(' ', '_'), bbox_inches='tight') # cross validated roc (negative and active only) fpr, tpr, _ = roc_curve( y[~df2_rows_1052] == 'active\nTB', df2.loc[~df2_rows_1052, 'active TB probability']) fpr = scipy.insert(fpr, 0, 0.) tpr = scipy.insert(tpr, 0, 0.) df_roc = df_roc.append( pd.DataFrame({ 'false positive rate': fpr, 'true positive rate': tpr, 'method': '{}\n(AUC = {:.2})'.format(method, auc(fpr, tpr)) })) plt.figure() g = sns.FacetGrid(data=df_roc, hue='method', size=3) g = g.map(plt.plot, 'false positive rate', 'true positive rate', ls='-', lw=4, alpha=.8, clip_on=False) plt.legend(loc='lower right', fontsize='xx-small') g.set(xticks=(0., .2, .4, .6, .8, 1), yticks=(0., .2, .4, .6, .8, 1)) plt.plot((0, 1.), (0, 1), ls='--', c='black', lw=.5, zorder=0) plt.xlim([0, 1.02]) plt.ylim([0, 1.02]) plt.tight_layout() plt.savefig(args.outbase + '.roc.pdf')
def update_hull(hull,newx,newhx,newhpx,domain,isDomainFinite): """update_hull: update the hull with a new function evaluation Input: hull - the current hull (see setup_hull for a definition) newx - a new abcissa newhx - h(newx) newhpx - hp(newx) domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? Output: newhull History: 2009-05-21 - Written - Bovy (NYU) """ #BOVY: Perhaps add a check that newx is sufficiently far from any existing point #Find where newx fits in with the other xs if newx > hull[1][-1]: newxs= sc.append(hull[1],newx) newhxs= sc.append(hull[2],newhx) newhpxs= sc.append(hull[3],newhpx) #new z newz= ( newhx - hull[2][-1] - newx*newhpx + hull[1][-1]*hull[3][-1])/( hull[3][-1] - newhpx) newzs= sc.append(hull[4],newz) #New hu newhu= hull[3][-1]*(newz-hull[1][-1]) + hull[2][-1] newhus= sc.append(hull[6],newhu) else: indx= 0 while newx > hull[1][indx]: indx=indx+1 newxs= sc.insert(hull[1],indx,newx) newhxs= sc.insert(hull[2],indx,newhx) newhpxs= sc.insert(hull[3],indx,newhpx) #Replace old z with new zs if newx < hull[1][0]: newz= (hull[2][0]-newhx-hull[1][0]*hull[3][0]+newx*newhpx)/(newhpx-hull[3][0]) newzs= sc.insert(hull[4],0,newz) #Also add the new hu newhu= newhpx*(newz-newx)+newhx newhus= sc.insert(hull[6],0,newhu) else: newz1= (newhx-hull[2][indx-1] - newx*newhpx+hull[1][indx-1]*hull[3][indx-1])/(hull[3][indx-1]-newhpx) newz2= (hull[2][indx]-newhx - hull[1][indx]*hull[3][indx]+newx*newhpx)/(newhpx-hull[3][indx]) #Insert newz1 and replace z_old newzs= sc.insert(hull[4],indx-1,newz1) newzs[indx]= newz2 #Update the hus newhu1= hull[3][indx-1]*(newz1-hull[1][indx-1])+hull[2][indx-1] newhu2= newhpx*(newz2-newx)+newhx newhus= sc.insert(hull[6],indx-1,newhu1) newhus[indx]= newhu2 #Recalculate the cumulative sum nx= len(newxs) newscum= sc.zeros(nx-1) if isDomainFinite[0]: newscum[0]= 1./newhpxs[0]*(m.exp(newhus[0])-m.exp( newhpxs[0]*(domain[0]-newxs[0])+newhxs[0])) else: newscum[0]= 1./newhpxs[0]*m.exp(newhus[0]) if nx > 2: for jj in range(nx-2): if newhpxs[jj+1] == 0.: newscum[jj+1]= (newzs[jj+1]-newzs[jj])*m.exp(newhxs[jj+1]) else: newscum[jj+1]=1./newhpxs[jj+1]*(m.exp(newhus[jj+1])-m.exp(newhus[jj])) if isDomainFinite[1]: newcu=1./newhpxs[nx-1]*(m.exp(newhpxs[nx-1]*( domain[1]-newxs[nx-1])+newhxs[nx-1]) - m.exp(newhus[nx-2])) else: newcu=- 1./newhpxs[nx-1]*m.exp(newhus[nx-2]) newcu= newcu+sc.sum(newscum) newscum= sc.cumsum(newscum)/newcu newhull=[] newhull.append(newcu) newhull.append(newxs) newhull.append(newhxs) newhull.append(newhpxs) newhull.append(newzs) newhull.append(newscum) newhull.append(newhus) return newhull
def neo_logl_rv(theta, paramis): # PARAMS DEFINITIONS _t, AC, params = paramis time, rv, err = params[0], params[1], params[2] ins, staract, starflag = params[3], params[4], params[5] kplanets, nins, MOAV = params[6], params[7], params[8] totcornum, ACC = params[9], params[10] i, lnl = 0, 0 ndat = len(time) jitter, offset = sp.zeros(ndat), sp.ones(ndat) * sp.inf macoef, timescale = sp.array([ sp.zeros(ndat) for i in range(sp.amax(MOAV)) ]), sp.array([sp.zeros(ndat) for i in range(sp.amax(MOAV))]) model_params = kplanets * 5 ins_params = (nins + sp.sum(MOAV)) * 2 acc_params = ACC # THETA CORRECTION FOR FIXED THETAS for a in AC: theta = sp.insert(theta, a, _t[a].val) if ACC > 0: # recheck this at some point # DEL ACC = sp.polyval(sp.r_[0, theta[model_params:model_params + ACC]], (time - sp.amin(time))) # SETUP residuals = sp.zeros(ndat) for i in range(ndat): jitpos = int(model_params + acc_params + (ins[i] + sp.sum(MOAV[:int(ins[i])])) * 2) jitter[i], offset[i] = theta[jitpos], theta[jitpos + 1] # for jj in range(MOAV[int(ins[i])]): macoef[jj][i] = theta[jitpos + 2 * (jj + 1)] timescale[jj][i] = theta[jitpos + 2 * (jj + 1) + 1] a1 = (theta[:model_params]) # if kplanets > 0: # raise Exception('destroy') # CHECK THIS CHECK THIS if totcornum: #print 'SE ACTIBOY' COR = sp.array([ sp.array([sp.zeros(ndat) for k in range(len(starflag[i]))]) for i in range(len(starflag)) ]) SA = theta[model_params + acc_params + ins_params:] assert len(SA) == totcornum, 'error in correlations' AR = 0.0 # just to remember to add this counter = -1 for i in range(nins): for j in range(len(starflag[i])): counter += 1 passer = -1 for k in range(ndat): if starflag[i][j] == ins[k]: # passer += 1 COR[i][j][k] = SA[counter] * staract[i][j][passer] FMC = 0 for i in range(len(COR)): for j in range(len(COR[i])): FMC += COR[i][j] else: #print 'NO SE AKTIBOY' FMC = 0 MODEL = RV_model(a1, time, kplanets) + offset + ACC + FMC #MA = sp.zeros((sp.amax(MOAV),ndat)) # something awfully weird going out here #''' residuals = rv - MODEL for i in range(ndat): for c in range(MOAV[int(ins[i])]): if i > c: MA = macoef[c][i] * sp.exp( -sp.fabs(time[i - 1 - c] - time[i]) / timescale[c][i]) * residuals[i - 1 - c] residuals[i] -= MA #''' #if kplanets>0: # raise Exception('debug') inv_sigma2 = 1.0 / (err**2 + jitter**2) lnl = sp.sum(residuals**2 * inv_sigma2 - sp.log(inv_sigma2)) + sp.log(2 * sp.pi) * ndat return -0.5 * lnl
def plot_rc(self, save=False, xs=True, xsapprox=True, kind='power', dist=5000, raw=False, alpha=0.05, div=5, box=False): """Plot HAND and xs rating curves with confidence intervals 'hand' - plot hand rating curve [T/F] 'xs' - plot xs rating curves [T/F] 'xsapprox' - plot xs rating curve approximation from n-value averages [T/F] 'ci' - plot confidence intervals [T/F] 'alpha' - alpha for confidence intervals [float(0.0,1.0)] 'div' - number of intervals for confidence interval [R]""" if xs: # Plot all linearly-interpolated XS rating curves intervals = scipy.arange(dist, self.handlen + dist, dist) # print 'Intervals:',intervals cutoffub = [i / self.handlen * 100 for i in intervals] cutofflb = scipy.copy(cutoffub) cutofflb = scipy.insert(cutofflb, 0, 0)[:-1] cutoffs = zip(cutofflb, cutoffub) for l, u in cutoffs: idx = scipy.where( scipy.logical_and(scipy.greater_equal(self.xs_profs, l), scipy.less(self.xs_profs, u)))[0] if u > 100: u = 100.00 fig, ax = plt.subplots() # get figure and axes for plotting fname = 'results/by5000/{0}/rc__comid_{0}_from_{1}_to_{2}.png'.format( self.comid, ('%.2f' % l), ('%.2f' % u)) for prof, disch, stage in zip(self.xs_profs[idx], self.xs_disch[idx], self.xs_stage[idx]): # Get interpolation function # print (('%.2f' % prof) + str(disch)) # print (('%.2f' % prof) + str(stage)) f = self.interp(x=disch, y=stage, kind=kind) if raw == True: # Plot raw data (ie. only HEC-RAS points) # interp over discharge ax.plot(disch, f(disch), c='grey', linewidth=2) # interp over stage (switched axes) for testing # f = self.interp(x=stage,y=disch,kind=kind) # ax.plot(f(stage),stage,c='purple',linewidth=1) if raw == False: # Plot interpolated data (ie. 'div' many interpolated points) interval = disch[-1] / div qvals = scipy.arange(0, (disch[-1] + interval), interval) # [1:] ax.plot(qvals, f(qvals), c='grey', linewidth=2) # Add one label for all cross-section curves ax.plot([], [], label='HEC-RAS', c='grey', linewidth=2) # Plot graph fig.set_size_inches(20, 16, forward=True) plt.gca().set_xlim(left=0, right=self.max_disch) plt.gca().set_ylim(bottom=0, top=self.max_stage) ax.set_xticks(ax.get_xticks()[::2]) ax.set_yticks(ax.get_yticks()[::2]) title = 'COMID {0}, ({1},{2})'.format(self.comid, ('%.2f' % l), ('%.2f' % u)) ax.set_title(title, y=1.04, fontsize=56) plt.xlabel('Q (cfs)', fontsize=56) plt.ylabel('H (ft)', fontsize=56) ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.rc('font', size=56) plt.legend(loc='upper left', fontsize=40) plt.tick_params(axis='both', labelsize=56) plt.grid() # print '\n------------------------\n' if xsapprox: # Add approximate rating curve from average n-values qvals, hvals = self.get_xs_q(low=0, upto=83) f = self.interp(x=qvals, y=hvals, kind=kind) ax.plot(qvals, f(qvals), label='Resistance Function', c='red', linewidth=5) # Add approximate rating curve for these indices idxqvals, idxhvals = self.get_xs_q(low=idx[0], upto=idx[-1]) if len(idxqvals) == 0: print 'No data found for profiles {0} to {1}'.format( ('%.2f' % l), ('%.2f' % u)) break # f = self.interp(x=idxqvals,y=idxhvals,kind=kind) # ax.plot(idxqvals,f(idxqvals),label='Resistance Function Local Average',c='orange',linewidth=5) else: fig, ax = plt.subplots() if save: fig.savefig(fname) else: # mng = plt.get_current_fig_manager() # mng.resize(*mng.window.maxsize()) plt.show()
def solver_1D(x, V, units, num_states=15): """Returns eigenstates and eigenenergies of one dimensional potentials define on grid using a second order finite difference approximation for the kinetic energy. Assumes infinite walls at both ends of the problem space. Input x : np.array([x_i]) The spacial grid points including the endpoints V : np.array([V(x_i)]) The potential function defined on the grid units : class Class whose attributes are the fundamental constants hbar, e, m, c, etc. num_states : int number of states to be returns Output psi : np.array([psi_0(x), ..., psi_N(x)]) where N = NumStates Eigenfunctions of Hamiltonian E : np.array([E_0, ..., E_N]) Eigenvalues of Hamiltonian Optional: num_states : int, default 15 Dictates the number of states to solve for. Must be less than the number of spatial points - 2. """ # Determine number of points in spacial grid N = len(x) dx = x[1] - x[0] #Reset num_states if the resolution of the space is less than the called for # numer of states if num_states > N - 2: print("Resolution too poor for requested number of states." + str(N - 1) + "states returned.") num_states = N - 1 # Construct the Hamiltonian in position space H = solver_utils.make_hamiltonian(dx, V, units, boundary='hard_wall') # Compute eigenvalues and eigenfunctions: E, psi = linalg.eigh(H) # Truncate to the desired number of states E = E[:num_states] psi = psi[:, :num_states] # Hard walls are assumed at the boundary, so the wavefunction at the boundary # must be zero. We enforce this boundary condition: psi = sp.insert(psi, 0, sp.zeros(num_states), axis=0) psi = sp.insert(psi, len(x) - 1, sp.zeros(num_states), axis=0) # Normalize to unity: for i in range(num_states): psi[:, i] = psi[:, i] / sp.sqrt(sp.trapz(psi[:, i] * psi[:, i], x)) # Take the transpose so that psi[i] is the ith eigenfunction: psi = psi.transpose() return psi, E
def neo_logl_pm(theta, paramis): _t, AC, params = paramis time, flux, err = params[0], params[1], params[2] ins, kplanets, nins = params[3], params[4], params[5] # for linear, linear should be [1, 1] ld, batman_m, batman_p = params[6], params[7], params[8] gp, gaussian_processor = params[9], params[10] ndat = len(time) #logl_params = sp.array([self.time_pm, self.rv_pm, self.err_pm, # self.ins_pm, kplan, self.nins_pm]) # 0 correct for fixed values for a in AC: theta = sp.insert(theta, a, _t[a].val) # 1 armar el modelo con batman, es decir, llamar neo_lc model = neo_lightcurve(theta, params) # 2 calcular res PM_residuals = flux - model # why some people do the *1e6 # 3 invocar likelihood usando george (puede ser otra func), # pero lo haré abajo pq why not # 4 armar kernel, hacer GP(kernel), can this be done outside?! theta_gp = theta[-len(gp):] #theta_gp[1] = 10 ** theta_gp[1] # for k_r in Matern32Kernel gp.set_parameter_vector( theta_gp) # last <gp> params, check for fixed shit? # should be jitter with err #gp.compute(time, sp.sqrt(err**2+theta_gp[0]**2)) if gaussian_processor == 'george': return -gp.lnlikelihood(PM_residuals, quiet=True) # george if gaussian_processor == 'celerite': try: return -gp.log_likelihood(PM_residuals) # celerite except: return -sp.inf ''' try: # check which is which in gp gp.compute(t, sp.sqrt(err**2+theta_gp[0]**2)) global glob_asd print('correct iter= ', glob_asd) glob_asd +=1 except: return -sp.inf ''' pass # try el gp.compute y ret gp.like(res) # create noise model # t1^2*exp(-0.5*r^2/t2) # check vals #this should go outside ''' kernel = t1 ** 2 * kernels.ExpSquaredKernel(t2 ** 2) jitt = george.modeling.ConstantModel(sp.log((1e-4)**2.)) gp = george.GP(kernel, mean=0.0, fit_mean=False, white_noise=jitt, fit_white_noise=True) gp.compute(time) #likelihood gp.set_parameter_vector(p) return gp.lnlikelihood(flux, quiet=True) ''' pass
from scipy.stats import expon from numpy import histogram from scipy import mean,insert,cumsum,var,arange lambd = 1/40.0 n_process = 10000 time_intervals = expon.rvs(scale = 1/lambd, size = n_process - 1) arrival_times = insert(cumsum(time_intervals), 0, 0) print arrival_times size_intervals = 50000 bins = arange(0, arrival_times[-1], size_intervals) print "Intervals:", bins n_process_per_interval, edges = histogram(arrival_times, bins = bins) print n_process_per_interval print "Mean of # of process per interval", mean(n_process_per_interval) print "Std.Dev of process per interval", var(n_process_per_interval)**0.5 from matplotlib import pyplot pyplot.hist(arrival_times, bins = bins) pyplot.show()
print('Reading \'' + args.fil_ll[0] + '\'') cmd=['perl','-lne', ('$nu=substr($_,3,12); $S=substr($_,16,9); print "$nu" if /^ 2/ && {0}<$nu && $nu<{1} && $S>'+str(args.strength_cutoff[0])+';').format(w[0],w[1]), args.fil_ll[0]] #dum=subprocess.check_output(cmd) p=subprocess.Popen(cmd, stdout=subprocess.PIPE) frqs_str, err = p.communicate() frqs=[float(s) for s in frqs_str.split()] win_halfwidth=scipy.ones(frqs.size) * args.win_halfwidth_dflt[0] # block out additional line to block out if args.band[0]=='WCO2': line=6250.4 idx=scipy.where( (line < frqs) & (frqs < w[1])) if idx[0].size>0: frqs=scipy.insert(frqs, idx[0][0], line) win_halfwidth=scipy.insert(win_halfwidth, idx[0][0], 0.15) print('Copying \''+args.fil_in[0]+'\' to \''+args.fil_out[0]+'\'') shutil.copy2(args.fil_in[0],args.fil_out[0]) fp=h5py.File(args.fil_out[0],'a') mw_orig=scipy.array(fp['/Spectral_Window/microwindow']) Nspec=3 if args.line_method[0]=='select': Nwin=len(frqs) mw_test=scipy.zeros([Nspec,Nwin,2], dtype=scipy.float64) # leftmost window mw_test[band,0,0]=frqs[0] - win_halfwidth[0]
def running_mean(x, N): cumsum = scipy.cumsum(scipy.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / N
x0 = scipy.zeros((N + 1, )) x0_hat = scipy.zeros((N, )) # x0 = scipy.linspace(2, 0, N+1) x0 = scipy.linspace(xL, xR, N + 1) # # x0+= scipy.sin(ts/T*pi) # x0[0] = xL # x0[-1] = xR transform(x0, x0_hat) # x0_hat = N/scipy.arange(N, dtype=float) # x0_hat[0] = 0 inverseTransform(x0, x0_hat) x = x0.copy() x_hat = x0_hat.copy() # alg_times = scipy.arange(0, 10.01, 1.) alg_times = scipy.insert(scipy.power(10, scipy.arange(-3, 0, 1)), 0, 0) n = 0 for s in scipy.arange(0, alg_times[-1] + .5 * upsilon, upsilon): if s >= alg_times[n]: plt.plot(ts, x, '.') n += 1 print s x_hat = stepExpEuler(x, x_hat, f, f_hat) inverseTransform(x, x_hat) plt.show()
def neo_logl_rv(theta, paramis): # PARAMS DEFINITIONS # lock and load 'em _t, AC, params = paramis time, rv, err = params[0], params[1], params[2] ins, staract, starflag = params[3], params[4], params[5] kplanets, nins, MOAV = params[6], params[7], params[8] MOAV_STAR, totcornum, ACC = params[9], params[10], params[11] i, lnl = 0, 0 ndat = len(time) # THETA CORRECTION FOR FIXED THETAS for a in AC: theta = sp.insert(theta, a, _t[a].val) # count 'em # this could be outside!!!! model_params = kplanets * 5 ins_params = (nins + sp.sum(MOAV)) * 2 gen_moav_params = MOAV_STAR * 2 gen_params = ACC + gen_moav_params a1 = (theta[:model_params]) # keplerian a2 = theta[model_params:model_params + ACC] # acc a3 = theta[model_params + ACC:model_params + gen_params] # starmoav a4 = theta[model_params + gen_params:model_params + gen_params + ins_params] # instr moav a5 = theta[model_params + gen_params + ins_params:] # keplerian residuals = rv - RV_model(a1, time, kplanets) # general residuals -= acc_model(a2, time, ACC) # instrumental jitter, offset = sp.zeros(ndat), sp.ones(ndat) * sp.inf macoef, timescale = sp.array([ sp.zeros(ndat) for i in range(sp.amax(MOAV)) ]), sp.array([sp.zeros(ndat) for i in range(sp.amax(MOAV))]) # quitar el for de esta wea... array plox, ademas no es necesario recorrer ndat for i in range(ndat): jitpos = int(model_params + gen_params + (ins[i] + sp.sum(MOAV[:int(ins[i])])) * 2) jitter[i], offset[i] = theta[jitpos], theta[jitpos + 1] # for jj in range(MOAV[int(ins[i])]): macoef[jj][i] = theta[jitpos + 2 * (jj + 1)] timescale[jj][i] = theta[jitpos + 2 * (jj + 1) + 1] residuals -= offset # staract (instrumental) #if totcornum: for sa in range(totcornum): residuals[ins == starflag[sa]] -= a5[sa] * staract[sa] residuals = gen_model(a3, time, MOAV_STAR, residuals) #MODEL = RV_model(a1, time, kplanets) + offset + ACC + FMC # Instrumental MOAV for I in range(nins): t_I = time[ins == I] for i in range(len(t_I)): for c in range(MOAV[I]): if i > c: MA = macoef[c][i] * sp.exp( -sp.fabs(t_I[i - 1 - c] - t_I[i]) / timescale[c][i]) * residuals[i - 1 - c] residuals[i] -= MA #''' #if kplanets>0: inv_sigma2 = 1.0 / (err**2 + jitter**2) lnl = sp.sum(residuals**2 * inv_sigma2 - sp.log(inv_sigma2)) + sp.log(2 * sp.pi) * ndat if True: if lnl == sp.inf: print('like failed') #raise Exception('deb') return -0.5 * lnl
def update_hull(hull, newx, newhx, newhpx, domain, isDomainFinite): """update_hull: update the hull with a new function evaluation Input: hull - the current hull (see setup_hull for a definition) newx - a new abcissa newhx - h(newx) newhpx - hp(newx) domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? Output: newhull History: 2009-05-21 - Written - Bovy (NYU) """ #BOVY: Perhaps add a check that newx is sufficiently far from any existing point #Find where newx fits in with the other xs if newx > hull[1][-1]: newxs = sc.append(hull[1], newx) newhxs = sc.append(hull[2], newhx) newhpxs = sc.append(hull[3], newhpx) #new z newz = (newhx - hull[2][-1] - newx * newhpx + hull[1][-1] * hull[3][-1]) / (hull[3][-1] - newhpx) newzs = sc.append(hull[4], newz) #New hu newhu = hull[3][-1] * (newz - hull[1][-1]) + hull[2][-1] newhus = sc.append(hull[6], newhu) else: indx = 0 while newx > hull[1][indx]: indx = indx + 1 newxs = sc.insert(hull[1], indx, newx) newhxs = sc.insert(hull[2], indx, newhx) newhpxs = sc.insert(hull[3], indx, newhpx) #Replace old z with new zs if newx < hull[1][0]: newz = (hull[2][0] - newhx - hull[1][0] * hull[3][0] + newx * newhpx) / (newhpx - hull[3][0]) newzs = sc.insert(hull[4], 0, newz) #Also add the new hu newhu = newhpx * (newz - newx) + newhx newhus = sc.insert(hull[6], 0, newhu) else: newz1 = (newhx - hull[2][indx - 1] - newx * newhpx + hull[1][indx - 1] * hull[3][indx - 1]) / ( hull[3][indx - 1] - newhpx) newz2 = (hull[2][indx] - newhx - hull[1][indx] * hull[3][indx] + newx * newhpx) / (newhpx - hull[3][indx]) #Insert newz1 and replace z_old newzs = sc.insert(hull[4], indx - 1, newz1) newzs[indx] = newz2 #Update the hus newhu1 = hull[3][indx - 1] * (newz1 - hull[1][indx - 1]) + hull[2][ indx - 1] newhu2 = newhpx * (newz2 - newx) + newhx newhus = sc.insert(hull[6], indx - 1, newhu1) newhus[indx] = newhu2 #Recalculate the cumulative sum nx = len(newxs) newscum = sc.zeros(nx - 1) if isDomainFinite[0]: newscum[0] = 1. / newhpxs[0] * ( m.exp(newhus[0]) - m.exp(newhpxs[0] * (domain[0] - newxs[0]) + newhxs[0])) else: newscum[0] = 1. / newhpxs[0] * m.exp(newhus[0]) if nx > 2: for jj in range(nx - 2): if newhpxs[jj + 1] == 0.: newscum[jj + 1] = (newzs[jj + 1] - newzs[jj]) * m.exp( newhxs[jj + 1]) else: newscum[jj + 1] = 1. / newhpxs[jj + 1] * (m.exp(newhus[jj + 1]) - m.exp(newhus[jj])) if isDomainFinite[1]: newcu = 1. / newhpxs[nx - 1] * ( m.exp(newhpxs[nx - 1] * (domain[1] - newxs[nx - 1]) + newhxs[nx - 1]) - m.exp(newhus[nx - 2])) else: newcu = -1. / newhpxs[nx - 1] * m.exp(newhus[nx - 2]) newcu = newcu + sc.sum(newscum) newscum = sc.cumsum(newscum) / newcu newhull = [] newhull.append(newcu) newhull.append(newxs) newhull.append(newhxs) newhull.append(newhpxs) newhull.append(newzs) newhull.append(newscum) newhull.append(newhus) return newhull
('$nu=substr($_,3,12); $S=substr($_,16,9); print "$nu" if /^ 2/ && {0}<$nu && $nu<{1} && $S>' + str(args.strength_cutoff[0]) + ';').format(w[0], w[1]), args.fil_ll[0] ] #dum=subprocess.check_output(cmd) p = subprocess.Popen(cmd, stdout=subprocess.PIPE) frqs_str, err = p.communicate() frqs = [float(s) for s in frqs_str.split()] win_halfwidth = scipy.ones(frqs.size) * args.win_halfwidth_dflt[0] # block out additional line to block out if args.band[0] == 'WCO2': line = 6250.4 idx = scipy.where((line < frqs) & (frqs < w[1])) if idx[0].size > 0: frqs = scipy.insert(frqs, idx[0][0], line) win_halfwidth = scipy.insert(win_halfwidth, idx[0][0], 0.15) print('Copying \'' + args.fil_in[0] + '\' to \'' + args.fil_out[0] + '\'') shutil.copy2(args.fil_in[0], args.fil_out[0]) fp = h5py.File(args.fil_out[0], 'a') mw_orig = scipy.array(fp['/Spectral_Window/microwindow']) Nspec = 3 if args.line_method[0] == 'select': Nwin = len(frqs) mw_test = scipy.zeros([Nspec, Nwin, 2], dtype=scipy.float64) # leftmost window mw_test[band, 0, 0] = frqs[0] - win_halfwidth[0]
def solver_mp(x, V, units, num_states=15): """Uses finite difference to discretize and solve for the eigenstates and energy eigenvalues one dimensional potentials. The domain fed to this routine defines the problem space allowing non-uniform point density to pay close attention to particular parts of the potential. Assumes infinite walls at both ends of the problem space. Input x : np.array([x_i]) The spacial grid points including the endpoints V : np.array([V(x_i)]) The potential function defined on the grid units : class Class whose attributes are the fundamental constants hbar, e, m, c, etc. Output psi : np.array([psi_0(x), ..., psi_N(x)]) where N = NumStates Eigenfunctions of Hamiltonian E : np.array([E_0, ..., E_N]) Eigenvalues of Hamiltonian Optional: num_states : int, default 15 Dictates the number of states to solve for. Must be less than the number of spatial points - 2. """ # Determine number of points in spacial grid N = len(x) dx = x[1] - x[0] #Reset num_states if the resolution of the space is less than the called for # numer of states if num_states >= N - 2: print("Resolution too poor for requested number of states." + str(N - 1) + "states returned.") num_states = N - 1 # Construct the Hamiltonian in position space H = solver_utils.make_hamiltonian(dx, V, units, boundary='hard_wall', prec=30) # Compute eigenvalues and eigenfunctions: E, psi = mp.eigh(mp.matrix(H)) # Truncate to the desired number of states E = E[:num_states] psi = psi[:, :num_states] psi = sp.insert(np.array(psi.tolist()), 0, sp.zeros(num_states), axis=0) psi = sp.insert(np.array(psi.tolist()), len(x) - 1, sp.zeros(num_states), axis=0) for i in range(num_states): psi[:, i] = psi[:, i] / sp.sqrt(sp.trapz(psi[:, i] * psi[:, i], x)) psi = np.array(psi.tolist()) psi = psi.transpose() E = np.array(E.tolist()) return psi, E
def train(self, iterations, train_vector, iterative_update=False, grow=True, num_pts_to_grow=3): self.iterations = iterations for t in range(len(train_vector)): train_vector[t] = scipy.array(train_vector[t]) delta_nodes = scipy.zeros((self.width, self.height, self.FV_size), float) for i in range(0, iterations): cur_radius = self.radius_decay(i) cur_lr = self.learning_rate_decay(i) sys.stdout.write("\rTraining Iteration: " + str(i + 1) + "/" + str(iterations)) sys.stdout.flush() # Grow the map where it's doing worst if grow and not (i % 20): for iik in range(num_pts_to_grow): dist_mask = self.build_distance_mask() worst_loc = find_indices(scipy.argmax(dist_mask), self.width) worst_row = worst_loc[0] worst_col = worst_loc[1] # Insert the row prev_row = worst_row - 1 if worst_row - 1 >= 0 else self.height - 1 next_row = worst_row + 1 if worst_row + 1 < self.height else 0 self.nodes = scipy.insert(self.nodes, worst_row, [[0]], axis=0) self.height += 1 # Fill the new row with interpolated values for col in range(self.width): self.nodes[worst_row, col] = (self.nodes[prev_row, col] + self.nodes[next_row, col]) / 2 # Insert the column prev_col = worst_col - 1 if worst_col - 1 >= 0 else self.width - 1 next_col = worst_col + 1 if worst_col + 1 < self.width else 0 self.nodes = scipy.insert(self.nodes, worst_col, [[0]], axis=1) self.width += 1 # Fill the new column with interpolated values for row in range(self.height): self.nodes[row, worst_col] = (self.nodes[row, prev_col] + self.nodes[row, next_col]) / 2 self.radius = (self.height + self.width) / 4 delta_nodes = scipy.zeros( (self.width, self.height, self.FV_size), float) if not iterative_update: delta_nodes.fill(0) else: random.shuffle(train_vector) for j in range(len(train_vector)): best = self.best_match(train_vector[j]) # pick out the nodes that are within our decaying radius: for loc in self.find_neighborhood(best, cur_radius): influence = (-loc[2] + cur_radius ) / cur_radius # linear scaling of influence inf_lrd = influence * cur_lr delta_nodes[loc[0], loc[1]] += inf_lrd * ( train_vector[j] - self.nodes[loc[0], loc[1]]) if iterative_update: self.nodes += delta_nodes delta_nodes.fill(0) if not iterative_update: delta_nodes /= len(train_vector) self.nodes += delta_nodes sys.stdout.write("\n")
def BoardFlight(ac): ac.time=0 n_iter=0 time_step=0.1 exit_sum=sci.sum(ac.pass_q) pass_sum=sci.sum(ac.seats) #Imaging definitions ac.img_list=[] iters_per_snap=50 while(pass_sum!=exit_sum): #Try to move passenger inside the plane if passengers are left if(ac.pass_q.size!=0): ac.aisle_q,ac.pass_q,sum_time=MoveToAisle(ac.time,ac.aisle_q,ac.pass_q,ac.sum_time) #Scan the aisle first for non-negative units (passengers) for passg in ac.aisle_q: if(passg!=-1): #Store the row of passenger in aisle row=int(sci.where(ac.aisle_q==passg)[0][0]) #See if move has been assigned to passenger if(ac.moveto_time_dict[passg]!=0): #If move has been assigned check if it is time to move if(ac.time>ac.moveto_time_dict[passg]): #If it is time to move follow the procedure below #Check if move is forward in aisle or to seat if(ac.moveto_loc_dict[passg]=="a"): #If move is in the aisle, check if position ahead is empty if(ac.aisle_q[row+1]==-1): #If position is empty move passenger ahead and free the position behind ac.aisle_q[row+1]=passg ac.aisle_q[row]=-1 #Set moves to 0 again ac.moveto_loc_dict[passg]=0 ac.moveto_time_dict[passg]=0 elif(ac.moveto_loc_dict[passg]=="s"): #If move is to the seat, #Find seat row and column of passenger passg_row=int(ac.pass_dict[passg][0]) passg_col=int(ac.pass_dict[passg][1]) #Set seat matrix position to the passenger number ac.seats[passg_row,passg_col]=passg #Free the aisle ac.aisle_q[row]=-1 elif(ac.moveto_time_dict[passg]==0): #If move hasn't been assgined to passenger #Check passenger seat location passg_row=int(ac.pass_dict[passg][0]) passg_col=int(ac.pass_dict[passg][1]) if(passg_row==row): #If passenger at the row where his/her seat is, #Designate move type as seat ac.moveto_loc_dict[passg]="s" #Check what type of seat: aisle, middle or window #Depending upon seat type, designate when it is time to move if(passg_col==0): if(ac.seats[passg_row,1]!=-1 and ac.seats[passg_row,2]!=-1): ac.moveto_time_dict[passg]=ac.time+ac.aisle_middle_mult*ac.time_dict[passg] elif(ac.seats[passg_row,1]!=-1): ac.moveto_time_dict[passg]=ac.time+ac.middle_mult*ac.time_dict[passg] elif(ac.seats[passg_row,2]!=-1): ac.moveto_time_dict[passg]=ac.time+ac.aisle_mult*ac.time_dict[passg] else: ac.moveto_time_dict[passg]=ac.time+ac.empty_mult*ac.time_dict[passg] elif(passg_col==5): if(ac.seats[passg_row,4]!=-1 and ac.seats[passg_row,3]!=-1): ac.moveto_time_dict[passg]=ac.time+ac.aisle_middle_mult*ac.time_dict[passg] elif(ac.seats[passg_row,4]!=-1): ac.moveto_time_dict[passg]=ac.time+ac.middle_mult*ac.time_dict[passg] elif(ac.seats[passg_row,3]!=-1): ac.moveto_time_dict[passg]=ac.time+ac.aisle_mult*ac.time_dict[passg] else: ac.moveto_time_dict[passg]=ac.time+ac.empty_mult*ac.time_dict[passg] elif(passg_col==1): if(ac.seats[passg_row,2]!=-1): ac.moveto_time_dict[passg]=ac.time+ac.aisle_mult*ac.time_dict[passg] else: ac.moveto_time_dict[passg]=ac.time+ac.empty_mult*ac.time_dict[passg] elif(passg_col==4): if(ac.seats[passg_row,3]!=-1): ac.moveto_time_dict[passg]=ac.time+ac.aisle_mult*ac.time_dict[passg] else: ac.moveto_time_dict[passg]=ac.time+ac.empty_mult*ac.time_dict[passg] elif(passg_col==2 or passg_col==3): ac.moveto_time_dict[passg]=ac.time+ac.empty_mult*ac.time_dict[passg] elif(passg_row!=row): #If passenger is not at the row where his/her seat is, #Designate movement type as aisle ac.moveto_loc_dict[passg]="a" #Designate time to move ac.moveto_time_dict[passg]=ac.time+ac.time_dict[passg] #Imaging if(n_iter%iters_per_snap==0 and ac.repeat==1): snap=ac.seats.copy() snap=sci.insert(snap,3,ac.aisle_q,axis=1) ac.img_list.append(snap) #Iteration timekeeping ac.time+=time_step n_iter+=1 pass_sum=sci.sum(ac.seats) #Image final seat matrix if(ac.repeat==1): snap=ac.seats.copy() snap=sci.insert(snap,3,ac.aisle_q,axis=1) ac.img_list.append(snap)