def prepare_kernel(s): mod = SourceModule(""" __global__ void update_src(int idx, int tstep, float *f) { f[idx] += sin(0.1*tstep); } __global__ void update(int nx, int ny, float *c, float *f, float *g) { int tx = threadIdx.x; int idx = blockIdx.x*blockDim.x + tx; extern __shared__ float gs[]; gs[tx+1] = g[idx]; int i = idx/ny, j = idx%ny; if(j>0 && j<ny-1) { if(tx==0) gs[tx]=g[idx-1]; if(tx==blockDim.x-1) gs[tx+2]=g[idx+1]; } __syncthreads(); if(i>0 && j>0 && i<nx-1 && j<ny-1) { f[idx] = c[idx]*(g[idx+ny]+g[idx-ny]+gs[tx+2]+gs[tx]-4*gs[tx+1])+2*gs[tx+1]-f[idx]; } } """) s.update_src = mod.get_function("update_src") s.update = mod.get_function("update") Db, s.Dg = (256,1,1), (s.nx*s.ny/256+1, 1) s.nnx, s.nny = sc.int32(s.nx), sc.int32(s.ny) s.update_src.prepare("iiP", block=(1,1,1)) s.update.prepare("iiPPP", block=Db, shared=(256+2)*4)
def __init__( s, Nx, Ny, Nz, dx ): FdtdSpace.__init__( s, Nx, Ny, Nz, dx ) s.bytes_f = sc.zeros(1,'f').nbytes s.kNx = sc.int32(s.Nx) s.kNy = sc.int32(s.Ny) s.kNz = sc.int32(s.Nz)
def initMainArrays( Ntot, devFx, devFy, devFz, initArray ): TPB = 512 if ( Ntot%TPB == 0 ): BPG = Ntot/TPB else: BPG = Ntot/TPB + 1 print 'init main arrays: Ntot=%d, TPB=%d, BPG=%d' % (Ntot, TPB, BPG) initArray( sc.int32(Ntot), devFx, block=(TPB,1,1), grid=(BPG,1) ) initArray( sc.int32(Ntot), devFy, block=(TPB,1,1), grid=(BPG,1) ) initArray( sc.int32(Ntot), devFz, block=(TPB,1,1), grid=(BPG,1) )
def initMainArrays(Ntot, devFx, devFy, devFz, initArray): TPB = 512 if (Ntot % TPB == 0): BPG = Ntot / TPB else: BPG = Ntot / TPB + 1 print 'init main arrays: Ntot=%d, TPB=%d, BPG=%d' % (Ntot, TPB, BPG) initArray(sc.int32(Ntot), devFx, block=(TPB, 1, 1), grid=(BPG, 1)) initArray(sc.int32(Ntot), devFy, block=(TPB, 1, 1), grid=(BPG, 1)) initArray(sc.int32(Ntot), devFz, block=(TPB, 1, 1), grid=(BPG, 1))
def _extract_features_std(self, wav, text): wav_pre = audio.preemphasis(wav) linear_target = audio.spectrogram(wav_pre).astype(sp.float32) mel_target = audio.melspectrogram(wav_pre).astype(sp.float32) input_data = sp.asarray(text_to_sequence(str(text, encoding='utf8'), self._cleaner_names), dtype=sp.int32) input_length = sp.int32(len(input_data)) return input_data, [input_length], mel_target.T, linear_target.T, [ sp.int32(len(linear_target.T)) ]
def differentiate( self ): Ntot = (self.Nx - 1)*(self.Ny - 1) tpb = 512 if ( Ntot%tpb == 0 ): bpg = Ntot/tpb else: bpg = Ntot/tpb + 1 Db = ( tpb, 1, 1 ) Dg = ( bpg, 1 ) self.diff( sc.int32(self.Nx), sc.int32(self.Ny), sc.float32(self.dx), self.dev_A, self.dev_dA, block=Db, grid=Dg )
def initmem_in_dev( self ): Ntot = self.Nx*self.Ny tpb = 512 if ( Ntot%tpb == 0 ): bpg = Ntot/tpb else: bpg = Ntot/tpb + 1 Db = ( tpb, 1, 1 ) Dg = ( bpg, 1 ) self.initmem( sc.int32(Ntot), self.dev_A, block=Db, grid=Dg ) self.initmem( sc.int32(Ntot), self.dev_dA, block=Db, grid=Dg )
def splitTimeSeries(x, y, spliceTimes, nPtsPerPiece=1): """ Syntax: xPieces,yPieces=splitTimeSeries(x,y,spliceTimes, nPtsPerPiece=1) """ nPieces = len(spliceTimes) xPieces = list() yPieces = list() for n in sc.arange(nPieces - 1): a = sc.int32(sc.where(x >= spliceTimes[n])[0].min()) b = sc.int32(a + nPtsPerPiece) xPieces.append(x[a:b]) yPieces.append(y[a:b]) return sc.array(xPieces), sc.array(yPieces)
def read_pst(pst_path): """ read tillvision based .pst files as uint16. note: this func was flagged deprecated ("use the version in gioIO" instead, but that one never existed ... ") problematic: does not work on all .pst on my machine """ inf_path = os.path.splitext(pst_path)[0] + '.inf' # reading stack size from inf meta = {} with open(inf_path,'r') as fh: # fh.next() for line in fh.readlines(): try: k,v = line.strip().split('=') meta[k] = v except: pass shape = sp.int32((meta['Width'],meta['Height'],meta['Frames'])) raw = sp.fromfile(pst_path,dtype='int16') data = sp.reshape(raw,shape,order='F') return data.astype('uint16')
def DiscreteRand(alphabet): """ Returns a symbol from alphabet with a uniform probability distribution over alphabet alphabet - is a n x 1 numpy vector containing symbols of the alphabet [1:n] """ n = len(alphabet) U = sp.rand() X = sp.int32(n*U) + 1 return alphabet[X-1]
def Hacker(Stats, cards): hearts = [ 'Aheart', '2heart', '3heart', '4heart', '5heart', '6heart', '7heart', '8heart', '9heart', '10heart', 'Jheart', 'Qheart', 'Kheart' ] spades = [ 'Aspade', '2spade', '3spade', '4spade', '5spade', '6spade', '7spade', '8spade', '9spade', '10spade', 'Jspade', 'Qspade', 'Kspade' ] clubs = [ 'Aclub', '2club', '3club', '4club', '5club', '6club', '7club', '8club', '9club', '10club', 'Jclub', 'Qclub', 'Kclub' ] diamonds = [ 'Adiamond', '2diamond', '3diamond', '4diamond', '5diamond', '6diamond', '7diamond', '8diamond', '9diamond', '10diamond', 'Jdiamond', 'Qdiamond', 'Kdiamond' ] Deck = hearts + diamonds + clubs + spades DeckArray = sp.array(Deck) data = Counter(np.where(Stats[:, 0:3] == (convert(cards)))[0].tolist()) x = 0 while data.most_common(10)[x][1] == 3: index = data.most_common(10)[x][0] print index print DeckArray[sp.int32(Stats[index, :])] print x = x + 1
def getParameters(self, key="name", parse=True): """return the parameters of an xml model structure(key: key of the attributes, parse: True/False if true attributes are parsed, i.e. eval evaluated etc.""" params = self.getElementsByTagName('param', 1) rv = {} for param in params: value = param.getAttribute('value') if parse: ptype = param.getAttribute('type') if (param.getAttribute('eval')): value = eval(value) elif (ptype == 'matrix'): value = self.parseMatrixParameter(value) elif (ptype == 'double'): value = S.double(value) elif (ptype == 'int'): value = S.int32(value) elif (ptype == 'str'): #no action for string pass else: raise Exception( "Invalid Attribute exception attribute %s has no type or eval!" % param) rv[str(param.getAttribute(key))] = value return rv
def getFileList(dataDir, prefix, suffix, showNames=0): """ Example: files=getFileList(dataDir, prefix, suffix) """ files=list() for f in os.listdir(dataDir): a= sc.int32(os.path.isfile(os.path.join(dataDir,f))) b= sc.int32(str.find(f,prefix)>-1) c= sc.int32(str.find(f,suffix)>0) #print(a,b,c) if (a*b*c): files.append(f) nFiles = len(files) if showNames: print("Found %d files with the indicated string"%nFiles) print(files) return files
def createBurstingTrain(maxTime, burstRate=10.0, interBurstRate=30.0, nonBurstProp=0.1): nSpikes= sc.int32(maxTime/burstRate) nBursts = (1-nonBurstProp) * nSpikes b, ibis, ibrs= createNGammaTrains(nPulses=nSpikes, nTrains=1, meanRate=burstRate,graph=0.0) for n in sc.arange(nBursts): tr, isi, ifr= createNGammaTrains(nPulses=nSpikes, nTrains=1, meanRate=burstRate,graph=0.0) return train
def alphaFunction(x, A=1.0, tau=1.0, downAccel=1.0): """ alphaFunction creates an alpha function with amplitude A, time constant tau, and downward acceleration downAccel. Example: alphaFunction(x, A=1.0, tau=1.0, downAccel=1.0) """ aa = sc.int32(x > 0) xovertau = x / tau return A * aa * xovertau * sc.exp(downAccel * (1 - xovertau))
def importWave(self): """Wave file to ndarray""" wf = wave.open(self.filename, 'rb') waveframes = wf.readframes(wf.getnframes()) self.framerate = wf.getframerate() data = sp.fromstring(waveframes, sp.int16) self.duration = float(wf.getnframes()) / self.framerate if(wf.getnchannels() == 2): left = sp.array([data[i] for i in range(0, data.size, 2)]) right = sp.array([data[i] for i in range(1, data.size, 2)]) left = sp.int32(left); right = sp.int32(right) data = sp.int16(left+right) / 2 if(self.fs == None): self.fs = self.framerate else: #data = self.resample(data, data.size*(self.fs/self.framerate)) data = ssig.decimate(data, int(self.framerate/self.fs)) self.duration_list = sp.arange(0, self.duration, 1./self.fs) data = ssig.detrend(data) return data
def glmnetSet(opts = None): import scipy # default options options = { "weights" : scipy.empty([0]), "offset" : scipy.empty([0]), "alpha" : scipy.float64(1.0), "nlambda" : scipy.int32(100), "lambda_min" : scipy.empty([0]), "lambdau" : scipy.empty([0]), "standardize" : True, "intr" : True, "thresh" : scipy.float64(1e-7), "dfmax" : scipy.empty([0]), "pmax" : scipy.empty([0]), "exclude" : scipy.empty([0], dtype = scipy.integer), "penalty_factor" : scipy.empty([0]), "cl" : scipy.array([[scipy.float64(-scipy.inf)], [scipy.float64(scipy.inf)]]), "maxit" : scipy.int32(1e5), "gtype" : [], "ltype" : 'Newton', "standardize_resp" : False, "mtype" : 'ungrouped' } # quick return if no user opts if opts == None: print('pdco default options:') print(options) return options # if options are passed in by user, update options with values from opts optsInOptions = set(opts.keys()) - set(options.keys()); if len(optsInOptions) > 0: # assert 'opts' keys are subsets of 'options' keys print(optsInOptions, ' : unknown option for glmnetSet') raise ValueError('attempting to set glmnet options that are not known to glmnetSet') else: options.update(opts) # update values return options
def getFileList(dataDir, prefix, suffix, includeDataDir=1): """ getFileList look for files with specific prefix and suffix within the directory dataDir Example: files=getFileList(dataDir, prefix, suffix) """ files = list() for f in os.listdir(dataDir): a = sc.int32(os.path.isfile(os.path.join(dataDir, f))) b = sc.int32(str.find(f, prefix) > -1) c = sc.int32(str.find(f, suffix) > 0) #print(a,b,c) if (a * b * c): if includeDataDir: files.append(dataDir + f) else: files.append(f) nFiles = len(files) print("Found %d files with the indicated string" % nFiles) print(files) return files
def extractDebleachedData(filePath): allData = igor.load(filePath) bbb = allData.children[0].userstr[b"S_waveNames"] aaa = bbb.decode("UTF-8") dataNames = aaa.split(";")[:-1] waves = list() for m in sc.arange(len(dataNames)): waveNum = sc.int32(dataNames[m][1 + str.rfind(dataNames[0], "e"):]) str1 = "waves.append(allData." + dataNames[m] + "_%d.data)" % ( waveNum - 1) #print(dataNames[m],str1) exec(str1) return sc.array(waves)
def initmem_psi_in_dev( s ): initmem = s.get_kernel_initmem() N = sc.int32( s.size_x ) Db = ( s.tpb_x, 1, 1 ) Dg = ( s.bpg_x, 1 ) initmem( N, s.psixEyf, block=Db, grid=Dg ) initmem( N, s.psixEyb, block=Db, grid=Dg ) initmem( N, s.psixEzf, block=Db, grid=Dg ) initmem( N, s.psixEzb, block=Db, grid=Dg ) initmem( N, s.psixHyf, block=Db, grid=Dg ) initmem( N, s.psixHyb, block=Db, grid=Dg ) initmem( N, s.psixHzf, block=Db, grid=Dg ) initmem( N, s.psixHzb, block=Db, grid=Dg ) N = sc.int32( s.size_y ) Db = ( s.tpb_y, 1, 1 ) Dg = ( s.bpg_y, 1 ) initmem( N, s.psiyEzf, block=Db, grid=Dg ) initmem( N, s.psiyEzb, block=Db, grid=Dg ) initmem( N, s.psiyExf, block=Db, grid=Dg ) initmem( N, s.psiyExb, block=Db, grid=Dg ) initmem( N, s.psiyHzf, block=Db, grid=Dg ) initmem( N, s.psiyHzb, block=Db, grid=Dg ) initmem( N, s.psiyHxf, block=Db, grid=Dg ) initmem( N, s.psiyHxb, block=Db, grid=Dg ) N = sc.int32( s.size_z ) Db = ( s.tpb_z, 1, 1 ) Dg = ( s.bpg_z, 1 ) initmem( N, s.psizExf, block=Db, grid=Dg ) initmem( N, s.psizExb, block=Db, grid=Dg ) initmem( N, s.psizEyf, block=Db, grid=Dg ) initmem( N, s.psizEyb, block=Db, grid=Dg ) initmem( N, s.psizHxf, block=Db, grid=Dg ) initmem( N, s.psizHxb, block=Db, grid=Dg ) initmem( N, s.psizHyf, block=Db, grid=Dg ) initmem( N, s.psizHyb, block=Db, grid=Dg )
def compute_sample(ysample, xsample, binindex): upper_index = sp.int32(sp.ceil(binindex)) lower_index = sp.int32(sp.floor(binindex)) ppy_upper = interpolate.interp1d( bimodal_partial_cdf[:, upper_index], self.y_eval_space) ppy_lower = interpolate.interp1d( bimodal_partial_cdf[:, lower_index], self.y_eval_space) a = bimodal_partial_cdf[:, upper_index] b = bimodal_partial_cdf[:, lower_index] samples_upper = ppy_upper(ysample * (max(a) - min(a)) * 0.9999 + min(a) * 1.001) samples_lower = ppy_lower(ysample * (max(b) - min(b)) * 0.9999 + min(b) * 1.001) #Lerp over the lower and upper a = self.x_eval_space[upper_index] b = self.x_eval_space[lower_index] return samples_lower + (samples_upper - samples_lower) / (a - b) * (xsample - b)
def Hacker(Stats,cards): hearts=['Aheart','2heart','3heart','4heart','5heart','6heart','7heart','8heart','9heart','10heart','Jheart','Qheart','Kheart'] spades=['Aspade','2spade','3spade','4spade','5spade','6spade','7spade','8spade','9spade','10spade','Jspade','Qspade','Kspade'] clubs=['Aclub','2club','3club','4club','5club','6club','7club','8club','9club','10club','Jclub','Qclub','Kclub'] diamonds=['Adiamond','2diamond','3diamond','4diamond','5diamond','6diamond','7diamond','8diamond','9diamond','10diamond','Jdiamond','Qdiamond','Kdiamond'] Deck=hearts+diamonds+clubs+spades DeckArray = sp.array(Deck) data = Counter(np.where(Stats[:,0:3]==(convert(cards)))[0].tolist()) x=0 while data.most_common(10)[x][1]==3: index=data.most_common(10)[x][0] print index print DeckArray[sp.int32(Stats[index,:])] print x=x+1
def load_lst(self): """ reads metadata from a .lst file. Needed to generate output in the .gloDatamix format """ lst_path = self.OpenFileDialog(title='load lst',default_dir=self.Main.Options.general['cwd'],extension='*.lst')[0] # read self.Main.Data.Metadata.LSTdata = gio.read_lst(lst_path) self.Main.Options.flags['LST_was_read'] = True # update labels ind_map = self.map_lst_inds_to_path_inds() #concentration concs = [str(self.Main.Data.Metadata.LSTdata.loc[ind_map[n]]['OConc']) for n in range(self.Main.Data.nTrials)] new_concs = [] for conc in concs: if sp.int32(conc) > 0: # info is in dilutions new_conc = str(-1 * sp.around(sp.log10(sp.int32(conc)))) new_concs.append(new_conc) else: new_concs.append(conc) # label labels = [self.Main.Data.Metadata.LSTdata.loc[ind_map[n]]['Odour'] for n in range(self.Main.Data.nTrials)] # combine new_labels = [labels[i]+new_concs[i] for i in range(len(labels))] self.Main.Data.Metadata.trial_labels = new_labels self.Main.MainWindow.Front_Control_Panel.Data_Selector.set_current_labels(self.Main.Data.Metadata.trial_labels) # set stimulus timing # cycle time pass
def SetProjMatPETSC(cpCorArray,ProjMat,DA,vg): m = DA.getSizes()[0] dx = 4./m AO = DA.getAO() psnx = sp.int32(sp.floor_divide(cpCorArray[:,0]+2.,dx)) psny = sp.int32(sp.floor_divide(cpCorArray[:,1]+2.,dx)) idxbl = psnx+psny*m idxbr = psnx+1+psny*m idxul = psnx+(psny+1)*m idxur = psnx+1+(psny+1)*m idxbl = AO.app2petsc(idxbl) idxbr = AO.app2petsc(idxbr) idxul = AO.app2petsc(idxul) idxur = AO.app2petsc(idxur) start,end = vg.getOwnershipRange() modx = sp.mod(cpCorArray[:,0]+2.,dx) mody = sp.mod(cpCorArray[:,1]+2.,dx) dx2 = dx**2 for i in sp.arange(end-start): ProjMat[i+start,idxbl[i]] = (dx-modx[i])*(dx-mody[i])/dx2 ProjMat[i+start,idxbr[i]] = modx[i]*(dx-mody[i])/dx2 ProjMat[i+start,idxul[i]] = (dx-modx[i])*mody[i]/dx2 ProjMat[i+start,idxur[i]] = modx[i]*mody[i]/dx2 return
def get_spike_inds(SpikeTrain): """ get the indices of spike times relative to an AnalogSignal with equal sampling rate. Args: SpikeTrain (neo.core.SpikeTrain): the SpikeTrain Returns: list: a list of indices """ SpikeTrain = copy.deepcopy(SpikeTrain) SpikeTrain -= SpikeTrain.t_start SpikeTrain.t_start = 0 * pq.s inds = sp.int32((SpikeTrain * SpikeTrain.sampling_rate).simplified) return inds
def initmem_main_in_dev( s ): initmem = s.get_kernel_initmem() tpb = 512 bpg = s.calc_bpg( s.size2, tpb ) N = sc.int32( s.size2 ) Db = (tpb,1,1) Dg = (bpg,1) initmem( N, s.devEx, block=Db, grid=Dg ) initmem( N, s.devEy, block=Db, grid=Dg ) initmem( N, s.devEz, block=Db, grid=Dg ) initmem( N, s.devHx, block=Db, grid=Dg ) initmem( N, s.devHy, block=Db, grid=Dg ) initmem( N, s.devHz, block=Db, grid=Dg )
def reorder_labels(labels): nClusters = sp.int32(sp.amax(labels.flatten()) + 1) labels0_vec = sp.zeros((labels.shape[0], nClusters), 'bool') labelsi_vec = labels0_vec.copy() for i in range(nClusters): labels0_vec[:, i] = (labels[:, 0] == i) for i in range(labels.shape[1]): for j in range(nClusters): labelsi_vec[:, j] = (labels[:, i] == j) D = pairwise_distances(labelsi_vec.T, labels0_vec.T, metric='dice') D[~sp.isfinite(D)] = 1 ind1 = linear_assignment(D) labels[:, i] = ind1[sp.int16(labels[:, i]), 1] return labels
def draw_matches(img1, img2, sel_matches, k1, k2): h1, w1 = img1.shape[:2] h2, w2 = img2.shape[:2] view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8) view[:h1, :w1, 0] = img1 view[:h2, w1:, 0] = img2 view[:, :, 1] = view[:, :, 0] view[:, :, 2] = view[:, :, 0] position = None if (sel_matches is not None) and (k2 is not None): #don't use in final production for m in sel_matches: # draw the keypoints matches color = tuple([sp.random.randint(0, 255) for _ in iter(range(3))]) cv2.line( view, (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1])), (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1])), color) kp2 = [k2[m.trainIdx] for m in sel_matches] kp1 = [k1[m.queryIdx] for m in sel_matches] p1 = cv2.KeyPoint_convert(kp1) p2 = cv2.KeyPoint_convert(kp2) if sum(1 for _ in sel_matches) >= 4: H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) else: H, status = None, None if H is not None: corners = sp.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]]) position = cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) area = computeArea(position) print("Area : " + str(area)) if area > (w2 * h2 / 150) and area < (w2 * h2 / 1.4): corners = sp.int32(position + (w1, 0)) cv2.polylines(view, [corners], True, (255, 255, 255)) else: position = None cv2.imshow("view", view) return position
def read_pst(pst_path): """ read tillvision based .pst files as uint16 """ inf_path = os.path.splitext(pst_path)[0] + '.inf' # reading stack size from inf meta = {} with open(inf_path,'r') as fh: # fh.next() for line in fh.readlines(): try: k,v = line.strip().split('=') meta[k] = v except: pass shape = sp.int32((meta['Width'],meta['Height'],meta['Frames'])) raw = sp.fromfile(pst_path,dtype='int16') data = sp.reshape(raw,shape,order='F') return data.astype('uint16')
def getParameters(self,key="name",parse=True): """return the parameters of an xml model structure(key: key of the attributes, parse: True/False if true attributes are parsed, i.e. eval evaluated etc.""" params = self.getElementsByTagName('param',1) rv = {} for param in params: value = param.getAttribute('value') if parse: ptype = param.getAttribute('type') if(param.getAttribute('eval')): value = eval(value) elif(ptype=='matrix'): value = self.parseMatrixParameter(value) elif(ptype=='double'): value = S.double(value) elif(ptype=='int'): value = S.int32(value) elif(ptype=='str'): #no action for string pass else: raise Exception("Invalid Attribute exception attribute %s has no type or eval!" % param) rv[str(param.getAttribute(key))]=value return rv
def extractDebleachedFminData(filePath): """ extractDebleachedFminData takes a path to a pxp data file as only argument and extracts all the wave data found there, provided the data has the suffix F_min Example: dataDir="./microcircuitsNetworks/" fileName= "cort76dp1c.pxp" waveData, timeStamps=extractDebleachedFminData(dataDir+fileName) """ allData = igor.load(filePath) #dataNames= st.digits.split(allData.children[0].userstr["S_waveNames"], ";")[:-1] bbb = allData.children[0].userstr[b"S_waveNames"] aaa = bbb.decode("UTF-8") dataNames = aaa.split(";")[:-1] waves = list() for m in sc.arange(len(dataNames)): waveNum = sc.int32(dataNames[m][1 + str.rfind(dataNames[0], "e"):]) str1 = "waves.append(allData." + dataNames[m] + "_%dF_min.data)" % ( waveNum - 1) #print(dataNames[m],str1) exec(str1) return sc.array(waves), sc.array(allData.sec.data)
def extractPXPData(fName): """ extractPXPData Example: allData=extractPXPData("microcircuitsNetworks/cort76dp1c.pxp") """ allData = igor.load(fName) # Dictionary containing the names of recorded variables during the experiment bbb = allData.children[0].userstr[b"S_waveNames"] aaa = bbb.decode("UTF-8") dataNames = aaa.split(";")[:-1] extractedData1 = list() extractedData2 = list() for nam in dataNames: if len(nam) > 3: myInd = sc.int32(nam[4:]) - 1 str2 = "w2=allData." + nam + ".data" % (myInd) exec(str2) extractedData2.append(w2) else: print("Found empty string") rawData = sc.array(extractedData2) return rawData
def updateE( s, tstep, F ): s.update_src.prepared_call( (s.bpg,1), s.kNx, s.kNy, s.kNz, sc.int32(tstep), F )
def _calculate_length(self, data): """Calculates the length of the next entry in a dbus.ByteArray.""" return scipy.int32(len(data)).newbyteorder('B').tostring()
def alphaFunction(x, A=1.0, tau=1.0, downAccel=1.0): aa= sc.int32(x>0) xovertau = x/tau return A* xovertau * sc.exp( downAccel*(1 - xovertau))
def glmnet(*, x, y, family='gaussian', **options): # check inputs: make sure x and y are scipy, float64 arrays # fortran order is not checked as we force a convert later if not (isinstance(x, scipy.sparse.csc.csc_matrix)): if not (isinstance(x, scipy.ndarray) and x.dtype == 'float64'): raise ValueError('x input must be a scipy float64 ndarray') else: if not (x.dtype == 'float64'): raise ValueError('x input must be a float64 array') if not (isinstance(y, scipy.ndarray) and y.dtype == 'float64'): raise ValueError('y input must be a scipy float64 ndarray') # create options if options is None: options = glmnetSet() ## match the family, abbreviation allowed fambase = [ 'gaussian', 'binomial', 'poisson', 'multinomial', 'cox', 'mgaussian' ] # find index of family in fambase indxtf = [x.startswith(family.lower()) for x in fambase] # find index of family in fambase famind = [i for i in range(len(indxtf)) if indxtf[i] == True] if len(famind) == 0: raise ValueError('Family should be one of ' 'gaussian' ', ' 'binomial' ', ' 'poisson' ', ' 'multinomial' ', ' 'cox' ', ' 'mgaussian' '') elif len(famind) > 1: raise ValueError( 'Family could not be uniquely determined : Use a longer description of the family string.' ) else: family = fambase[famind[0]] ## prepare options options = glmnetSet(options) #print('glmnet.py options:') #print(options) ## error check options parameters alpha = scipy.float64(options['alpha']) if alpha > 1.0: print('Warning: alpha > 1.0; setting to 1.0') options['alpha'] = scipy.float64(1.0) if alpha < 0.0: print('Warning: alpha < 0.0; setting to 0.0') options['alpha'] = scipy.float64(0.0) parm = scipy.float64(options['alpha']) nlam = scipy.int32(options['nlambda']) nobs, nvars = x.shape # check weights length weights = options['weights'] if len(weights) == 0: weights = scipy.ones([nobs, 1], dtype=scipy.float64) elif len(weights) != nobs: raise ValueError('Error: Number of elements in ' 'weights' ' not equal to number of rows of ' 'x' '') # check if weights are scipy nd array if not (isinstance(weights, scipy.ndarray) and weights.dtype == 'float64'): raise ValueError('weights input must be a scipy float64 ndarray') # check y length nrowy = y.shape[0] if nrowy != nobs: raise ValueError('Error: Number of elements in ' 'y' ' not equal to number of rows of ' 'x' '') # check ne ne = options['dfmax'] if len(ne) == 0: ne = nvars + 1 # check nx nx = options['pmax'] if len(nx) == 0: nx = min(ne * 2 + 20, nvars) # check jd exclude = options['exclude'] # TBD: test this if not (len(exclude) == 0): exclude = scipy.unique(exclude) if scipy.any(exclude < 0) or scipy.any(exclude >= nvars): raise ValueError('Error: Some excluded variables are out of range') else: jd = scipy.append(len(exclude), exclude + 1) # indices are 1-based in fortran else: jd = scipy.zeros([1, 1], dtype=scipy.integer) # check vp vp = options['penalty_factor'] if len(vp) == 0: vp = scipy.ones([1, nvars]) # inparms inparms = glmnetControl() # cl cl = options['cl'] if any(cl[0, :] > 0): raise ValueError('Error: The lower bound on cl must be non-positive') if any(cl[1, :] < 0): raise ValueError('Error: The lower bound on cl must be non-negative') cl[0, cl[0, :] == scipy.float64('-inf')] = -1.0 * inparms['big'] cl[1, cl[1, :] == scipy.float64('inf')] = 1.0 * inparms['big'] if cl.shape[1] < nvars: if cl.shape[1] == 1: cl = cl * scipy.ones([1, nvars]) else: raise ValueError( 'Error: Require length 1 or nvars lower and upper limits') else: cl = cl[:, 0:nvars] exit_rec = 0 if scipy.any(cl == 0.0): fdev = inparms['fdev'] if fdev != 0: optset = dict() optset['fdev'] = 0 glmnetControl(optset) exit_rec = 1 isd = scipy.int32(options['standardize']) intr = scipy.int32(options['intr']) if (intr == True) and (family == 'cox'): print('Warning: Cox model has no intercept!') jsd = scipy.int32(options['standardize_resp']) thresh = options['thresh'] lambdau = options['lambdau'] lambda_min = options['lambda_min'] if len(lambda_min) == 0: if nobs < nvars: lambda_min = 0.01 else: lambda_min = 1e-4 lempty = (len(lambdau) == 0) if lempty: if (lambda_min >= 1): raise ValueError('ERROR: lambda_min should be less than 1') flmin = lambda_min ulam = scipy.zeros([1, 1], dtype=scipy.float64) else: flmin = 1.0 if any(lambdau < 0): raise ValueError('ERROR: lambdas should be non-negative') ulam = -scipy.sort(-lambdau) # reverse sort nlam = lambdau.size maxit = scipy.int32(options['maxit']) gtype = options['gtype'] if len(gtype) == 0: if (nvars < 500): gtype = 'covariance' else: gtype = 'naive' # ltype ltype = options['ltype'] ltypelist = ['newton', 'modified.newton'] indxtf = [x.startswith(ltype.lower()) for x in ltypelist] indl = [i for i in range(len(indxtf)) if indxtf[i] == True] if len(indl) != 1: raise ValueError('ERROR: ltype should be one of ' 'Newton' ' or ' 'modified.Newton' '') else: kopt = indl[0] if family == 'multinomial': mtype = options['mtype'] mtypelist = ['ungrouped', 'grouped'] indxtf = [x.startswith(mtype.lower()) for x in mtypelist] indm = [i for i in range(len(indxtf)) if indxtf[i] == True] if len(indm) == 0: raise ValueError('Error: mtype should be one of ' 'ungrouped' ' or ' 'grouped' '') elif (indm == 2): kopt = 2 # offset = options['offset'] # sparse (if is_sparse, convert to compressed sparse row format) is_sparse = False if scipy.sparse.issparse(x): is_sparse = True tx = scipy.sparse.csc_matrix(x, dtype=scipy.float64) x = tx.data x = x.reshape([len(x), 1]) irs = tx.indices + 1 pcs = tx.indptr + 1 irs = scipy.reshape(irs, [ len(irs), ]) pcs = scipy.reshape(pcs, [ len(pcs), ]) else: irs = scipy.empty([0]) pcs = scipy.empty([0]) if scipy.sparse.issparse(y): y = y.todense() ## finally call the appropriate fit code if family == 'gaussian': # call elnet fit = elnet(x, is_sparse, irs, pcs, y, weights, offset, gtype, parm, lempty, nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam, thresh, isd, intr, maxit, family) elif (family == 'binomial') or (family == 'multinomial'): # call lognet fit = lognet(x, is_sparse, irs, pcs, y, weights, offset, parm, nobs, nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam, thresh, isd, intr, maxit, kopt, family) elif family == 'cox': # call coxnet fit = coxnet(x, is_sparse, irs, pcs, y, weights, offset, parm, nobs, nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam, thresh, isd, maxit, family) elif family == 'mgaussian': # call mrelnet fit = mrelnet(x, is_sparse, irs, pcs, y, weights, offset, parm, nobs, nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam, thresh, isd, jsd, intr, maxit, family) elif family == 'poisson': # call fishnet fit = fishnet(x, is_sparse, irs, pcs, y, weights, offset, parm, nobs, nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam, thresh, isd, intr, maxit, family) else: raise ValueError( 'calling a family of fits that has not been implemented yet') if exit_rec == 1: optset['fdev'] = fdev #TODO: Call glmnetControl(optset) to set persistent parameters # return fit return fit
synth.setnchannels(1) synth.setsampwidth(2) synth.setframerate(samplingrate) remain = sound.getnframes() while remain > 0: s = min(chunk, remain) # read frames data_sound = sound.readframes(s) data_noise = noise.readframes(s) # convert ary_sound = sp.fromstring(data_sound, sp.int16) ary_noise = sp.fromstring(data_noise, sp.int16) int32_ary_sound = sp.int32(ary_sound) int32_ary_noise = sp.int32(ary_noise) ary2 = sp.int16(int32_ary_sound + int32_ary_noise) data2 = ary2.tostring() synth.writeframes(data2) remain = remain - s sound.close() noise.close() synth.close() infile = 'tools/sound/noisy.wav' signal, params = read_signal(infile, WINSIZE) nf = len(signal) / (WINSIZE / 2) - 1 sig_out = sp.zeros(len(signal), sp.float32) window = sp.hanning(WINSIZE)
def parse(self): self.mshfid = open(self.mshfilename, 'r') #Advance to nodes line = self.mshfid.readline() while(line.find("$Nodes") < 0): line = self.mshfid.readline() pass line = self.mshfid.readline() #This line should contain number of nodes #Check that number of nodes in file is still the number of nodes in memory if(not sp.int32(line) == self.Nnodes): self.__error__("Something wrong. Aborting.") exit(-1) self.__inform__("Parsing nodes") if len(self.nodes_rules) == 0: self.__inform__("No rules for nodes... skipping nodes.") for i in range(self.Nnodes): self.mshfid.readline() else: #Read all nodes and do stuff for i in range(self.Nnodes): #Parse the line sl = self.mshfid.readline().split() tag = sp.int32(sl[0]) x = sp.double(sl[1]) y = sp.double(sl[2]) z = sp.double(sl[3]) #Figure out the groups to which this node belongs physgroups = [] for grp in self.physical_groups: if self.nodes_in_physical_groups[grp][tag] == 1: physgroups.append(grp) for condition, action in self.nodes_rules: if condition(tag,x,y,z,physgroups): action(tag,x,y,z) pass #Read another 2 lines after nodes are done. This should be $Elements line = self.mshfid.readline() line = self.mshfid.readline() if(line.find("$Elements") == 0): self.__inform__("Parsing elements") else: self.__error__("Something wrong reading elements. ") exit(-1) line = self.mshfid.readline() #This line should contain number of elements #Check that number of elements in file is still the number of elements in memory if(not sp.int32(line) == self.Nelem): self.__error__("Something wrong. Aborting.") exit(-1) if len(self.elements_rules) == 0: self.__inform__("No rules for elements... skipping elements.") for i in range(self.Nelem): self.mshfid.readline() else: #Read all elements and do stuff nodes = [] for i in range(self.Nelem): sl = self.mshfid.readline().split() #Parse the line eletag = sp.int32(sl[0]) eletype = sp.int32(sl[1]) ntags = sp.int32(sl[2]) physgrp = sp.int32(sl[3]) partition = sp.int32(sl[4]) if ntags >= 2: physgrp = sp.int32(sl[3]) nodes = sp.array(sl[(3 + ntags)::], dtype=sp.int32) for condition, action in self.elements_rules: if condition(eletag,eletype,physgrp,nodes): action(eletag,eletype,physgrp,nodes) pass else: self.__error__(".msh file has < 2 tags element with tag " + str(eletag)) pass
def generate_gloDatamix_Meta(self): """ generates a pd.Dataframe with the Metadata ordered in such a way that it fits the .gloDatamix definition. """ # preparations gloMeta = [] inds_map = self.map_lst_inds_to_path_inds() for n, path in enumerate(self.Main.Data.Metadata.paths): for i in range(len(self.Main.ROIs.ROI_list)): lst_values = self.Main.Data.Metadata.LSTdata.loc[inds_map[n]] # roi centroid pos = self.Main.ROIs.ROI_list[i].get_center() # stim if self.Main.Options.preprocessing['stimuli'].shape[0] == 2: stim2on, stim2off = self.Main.Options.preprocessing[ 'stimuli'][1, :] else: stim2on, stim2off = ['-1', '-1'] # age if lst_values['Age'] != -1: try: NAge, NAgeMax = lst_values['Age'].split('-') except: NAge = '-1' NAgeMax = '-1' else: NAge = '-1' NAgeMax = '-1' row = OrderedDict() row['NGloTag'] = str(self.Main.ROIs.ROI_list[i].label) row['NOdorNr'] = '-999' row['NOConc'] = str(lst_values['OConc']) row['NStim_ON'] = str( self.Main.Options.preprocessing['stimuli'][0, 0]) row['NStim_Off'] = str( self.Main.Options.preprocessing['stimuli'][0, 1]) row['NNoFrames'] = str(self.Main.Data.nFrames) row['NFrameTime'] = str( sp.int32(self.Main.Options.preprocessing['dt'] * 1000)) row['NRealTime'] = str(lst_values['MTime']) row['NPhConc'] = str(lst_values['PhConc']) row['NshiftX'] = str(lst_values['ShiftX']) row['NshiftY'] = str(lst_values['ShiftY']) row['NcontMeasu'] = '0' row['NNumMeasu'] = '0' row['Nstim_ISI'] = '0' row['NodorN'] = '1' row['Nstim2ON'] = str(stim2on) row['Nstim2OFF'] = str(stim2off) row['NAge'] = str(NAge) row['NAgeMax'] = str(NAgeMax) row['TGloInfo'] = 'Coor' + str( int(sp.around(pos[0], decimals=0))) + ':' + str( int(sp.around(pos[1], decimals=0))) row['TOdour'] = str(lst_values['Odour']) row['T_dbb1'] = str(lst_values['DBB1']) row['Tcomment'] = str(lst_values['Comment']) row['TPharma'] = str(lst_values['Pharma']) row['TPhtime'] = str(lst_values['PhTime']) row['Tos9time'] = str(lst_values['PhTime']) row['TLabel'] = lst_values['Label'] row['Tanimal'] = lst_values['DBB1'].strip().split('\\')[0] row['T_dbb2'] = 'noDBB2' gloMeta.append(row) # make a pd.DataFrame out of it: gloMetaDF = pd.DataFrame(columns=list(gloMeta[0].keys())) for i in range(len(gloMeta)): gloMetaDF = gloMetaDF.append(pd.Series(gloMeta[i]), ignore_index=True) return gloMetaDF
def propagate(s, tn): s.update_src.prepared_call((1,1),s.src_pt,sc.int32(tn),s.g_gpu) s.update.prepared_call(S.Dg,s.nx,s.ny,s.c_gpu,s.f_gpu,s.g_gpu) s.update.prepared_call(S.Dg,s.nx,s.ny,s.c_gpu,s.g_gpu,s.f_gpu)
# Get the kernel from the modules initArray = mod_common.get_function("initArray") updateE = mod_dielectric.get_function("updateE") updateH = mod_dielectric.get_function("updateH") updateSrc = mod_source.get_function("updateSrc") ''' updateCPMLxE = mod_cpml.get_function("updateCPMLxE") updateCPMLxH = mod_cpml.get_function("updateCPMLxH") updateCPMLyE = mod_cpml.get_function("updateCPMLyE") updateCPMLyH = mod_cpml.get_function("updateCPMLyH") updateCPMLzE = mod_cpml.get_function("updateCPMLzE") updateCPMLzH = mod_cpml.get_function("updateCPMLzH") ''' # Initialize the device arrays kNtot_devF = sc.int32(Ntot_devF) initMainArrays( kNtot_devF, devEx, devEy, devEz, initArray ) initMainArrays( kNtot_devF, devHx, devHy, devHz, initArray ) ''' kNtotpmlx = sc.int32(Ntotpmlx) kNtotpmly = sc.int32(Ntotpmly) kNtotpmlz = sc.int32(Ntotpmlz) initPsiArrays( kNtotpmlx, TPBpmlx, BPGpmlx, psixEyf, psixEyb, psixEzf, psixEzb, initArray ) initPsiArrays( kNtotpmlx, TPBpmlx, BPGpmlx, psixHyf, psixHyb, psixHzf, psixHzb, initArray ) initPsiArrays( kNtotpmly, TPBpmly, BPGpmly, psiyEzf, psiyEzb, psiyExf, psiyExb, initArray ) initPsiArrays( kNtotpmly, TPBpmly, BPGpmly, psiyHzf, psiyHzb, psiyHxf, psiyHxb, initArray ) initPsiArrays( kNtotpmlz, TPBpmlz, BPGpmlz, psizExf, psizExb, psizEyf, psizEyb, initArray ) initPsiArrays( kNtotpmlz, TPBpmlz, BPGpmlz, psizHxf, psizHxb, psizHyf, psizHyb, initArray ) '''
""" ion() from matplotlib.patches import Rectangle rect1 = Rectangle((100,0),20,150,facecolor='0.4') rect2 = Rectangle((100,180),20,140,facecolor='0.4') rect3 = Rectangle((100,350),20,150,facecolor='0.4') gca().add_patch(rect1) gca().add_patch(rect2) gca().add_patch(rect3) imsh = imshow(sc.ones((500,500),'f').T, cmap=cm.hot, origin='lower', vmin=0, vmax=0.1) #imsh = imshow(c.T, cmap=cm.hot, origin='lower', vmin=0, vmax=0.1) colorbar() """ Db, Dg = (256,1,1), (nx*ny/256+1,1) nx, ny = sc.int32(nx), sc.int32(ny) src_pt = sc.int32((nx/3)*nx+ny/2) initzero(nx*ny,f_gpu,g_gpu,block=Db,grid=Dg) for tstep in xrange(1001): update_src(src_pt,sc.int32(tstep),g_gpu,block=(1,1,1),grid=(1,1)) update(nx,ny,c_gpu,f_gpu,g_gpu,block=Db,grid=Dg,shared=258*4) update(nx,ny,c_gpu,g_gpu,f_gpu,block=Db,grid=Dg,shared=258*4) """ if tstep>1000: #if tstep%10 == 0: print tstep cuda.memcpy_dtoh(f,f_gpu) imsh.set_array( sc.sqrt(f[nx/3*2-100:nx/3*2+400,ny/2-250:ny/2+250].T**2) ) #imsh.set_array( sc.sqrt(f.T**2) )
#This script checks the number of patches in patch_load.txt. #It prints the difference between the total number of patch and the theoretical number of patch. import scipy import matplotlib.pyplot as plt # USer defined parameters nmpi = 400 npatchx = 256 npatchy = 128 npatchz = 128 ########################### filename = "patch_load.txt" file = open(filename) data = file.readlines() noutput = len(data)/(nmpi+1) for iout in range(noutput): data_first =data[(nmpi+1)*iout+1:(nmpi+1)*iout+nmpi+1] for j in range(nmpi): data_first[j]= scipy.int32(data_first[j].split()[-1]) arrfirst = scipy.array(data_first) print arrfirst.sum()-(npatchx*npatchy*npatchz)
class gmshTranslator: """ gmshTranslator Class that takes an input gmsh file (.msh) and provides functionality to parse and transform the .msh to other formats. """ #################################################################################################### #################################################################################################### def __init__(self, mshfilename): self.mshfilename = mshfilename self.mshfid = open(mshfilename,"r") #Initially, parse elements to know what nodes are in which physical groups. reading_physnames = 0 reading_nodes = 0 reading_elements = 0 self.__inform__("Initializing...") self.Nphys = 0 self.Nnodes = 0 self.Nelem = 0 self.physical_groups = [] self.nodes_in_physical_groups = {} self.physical_group_dims = {} self.physical_group_names = {} linenumber = 1 for line in self.mshfid: ################################################# # Identify begining of nodes and elements sections if line.find("$PhysicalNames") >= 0: reading_physnames = 1 continue if line.find("$Nodes") >= 0: reading_nodes = 1 continue if line.find("$Elements") >= 0: reading_elements = 1 continue ################################################# ################################################# #Identify end of nodes and element sections if line.find("$EndPhysicalNames") >= 0: reading_physnames = 0 continue if line.find("$EndElements") >= 0: reading_elements = 0 continue if line.find("$EndNodes") >= 0: reading_nodes = 0 continue ################################################# #If this is the first line of nodes, read the number of nodes. if reading_physnames == 1: self.Nphys = sp.int32(line) self.__inform__("Mesh has " + str(self.Nphys) + " physical groups.") reading_physnames = 2 continue if reading_nodes == 1: self.Nnodes = sp.int32(line) self.__inform__("Mesh has " + str(self.Nnodes) + " nodes.") reading_nodes = 2 continue #If this is the first line of elements, read the number of elements if reading_elements == 1: self.Nelem = sp.int32(line) self.__inform__("Mesh has " + str(self.Nelem) + " elements.") reading_elements = 2 continue if reading_physnames == 2: sl = line.split() grpdim = sp.int32(sl[0]) # spatial dimension of the physical group (0 = point, 1 = line, 2 = surface, 3 = volume) physgrp = sp.int32(sl[1]) # group number grpname = ( " ".join(sl[2:]) )[1:-1] # strip quotation marks self.physical_group_dims[physgrp] = grpdim self.physical_group_names[physgrp] = grpname #Now parse elements and populate the list of nodes in groups if reading_elements == 2: sl = sp.array( line.split(), dtype = sp.int32) eletag = sl[0] eletype = sl[1] ntags = sl[2] physgrp = 0 partition = 0 if ntags >= 2: physgrp = sl[3] nodelist = sl[(3 + ntags)::] # sys.stdout.write(str(nodelist.size) + " ") if physgrp in self.physical_groups: self.nodes_in_physical_groups[physgrp][nodelist] = 1 else: self.nodes_in_physical_groups[physgrp] = -sp.ones(self.Nnodes+1, dtype=sp.int16) self.nodes_in_physical_groups[physgrp][nodelist] = 1 self.physical_groups.append(physgrp) pass else: self.__error__(".msh file has < 2 tags at line " + str(linenumber)) linenumber += 1 #end for line self.__inform__("Processed " + str(linenumber) +" lines.") self.__inform__("There are " + str(len(self.physical_groups)) + " physical groups available: ") for g in self.physical_groups: self.__inform__(" > %s: \"%s\" (dimension %d)" % (str(g), self.physical_group_names[g], self.physical_group_dims[g])) # create inverse mapping from names -> IDs so that the user can refer to physical groups by name # self.physical_groups_by_name = {} for k,v in self.physical_group_names.items(): self.physical_groups_by_name[v] = k self.nodes_rules = [] self.elements_rules = [] #end def __init__ self.mshfid.close() #################################################################################################### #################################################################################################### def __del__(self): self.mshfid.close() self.__inform__("Ending") #################################################################################################### #################################################################################################### def add_elements_rule(self, condition, action): self.elements_rules.append((condition, action)) pass #################################################################################################### #################################################################################################### def add_nodes_rule(self, condition, action): self.nodes_rules.append((condition, action)) pass #################################################################################################### #################################################################################################### def clear_rules(self): self.nodes_rules = [] self.elements_rules = [] pass #################################################################################################### #################################################################################################### def parse(self): self.mshfid = open(self.mshfilename, 'r') #Advance to nodes line = self.mshfid.readline() while(line.find("$Nodes") < 0): line = self.mshfid.readline() pass line = self.mshfid.readline() #This line should contain number of nodes #Check that number of nodes in file is still the number of nodes in memory if(not sp.int32(line) == self.Nnodes): self.__error__("Something wrong. Aborting.") exit(-1) self.__inform__("Parsing nodes") if len(self.nodes_rules) == 0: self.__inform__("No rules for nodes... skipping nodes.") for i in range(self.Nnodes): self.mshfid.readline() else: #Read all nodes and do stuff for i in range(self.Nnodes): #Parse the line sl = self.mshfid.readline().split() tag = sp.int32(sl[0]) x = sp.double(sl[1]) y = sp.double(sl[2]) z = sp.double(sl[3]) #Figure out the groups to which this node belongs physgroups = [] for grp in self.physical_groups: if self.nodes_in_physical_groups[grp][tag] == 1: physgroups.append(grp) for condition, action in self.nodes_rules: if condition(tag,x,y,z,physgroups): action(tag,x,y,z) pass #Read another 2 lines after nodes are done. This should be $Elements line = self.mshfid.readline() line = self.mshfid.readline() if(line.find("$Elements") == 0): self.__inform__("Parsing elements") else: self.__error__("Something wrong reading elements. ") exit(-1) line = self.mshfid.readline() #This line should contain number of elements #Check that number of elements in file is still the number of elements in memory if(not sp.int32(line) == self.Nelem): self.__error__("Something wrong. Aborting.") exit(-1) if len(self.elements_rules) == 0: self.__inform__("No rules for elements... skipping elements.") for i in range(self.Nelem): self.mshfid.readline() else: #Read all elements and do stuff nodes = [] for i in range(self.Nelem): sl = self.mshfid.readline().split() #Parse the line eletag = sp.int32(sl[0]) eletype = sp.int32(sl[1]) ntags = sp.int32(sl[2]) physgrp = sp.int32(sl[3]) partition = sp.int32(sl[4]) if ntags >= 2: physgrp = sp.int32(sl[3]) nodes = sp.array(sl[(3 + ntags)::], dtype=sp.int32) for condition, action in self.elements_rules: if condition(eletag,eletype,physgrp,nodes): action(eletag,eletype,physgrp,nodes) pass else: self.__error__(".msh file has < 2 tags element with tag " + str(eletag)) pass #Helper functions to do typical tasks, such as checking if node or element is in a group def is_element_in(self, this_physgrp): def is_element_in_physgrp(eletag,eletype,physgrp,nodes): if this_physgrp == "!any": return True return self.physical_groups_by_name[this_physgrp] == physgrp return is_element_in_physgrp def is_node_in(self, this_physgrp): def is_node_in_physgrp(tag,x,y,z,physgroups): if this_physgrp == "!any": return True return self.physical_groups_by_name[this_physgrp] in physgroups return is_node_in_physgrp #################################################################################################### #################################################################################################### def __inform__(self, msg): print ("gmshTranslator: " + msg) #################################################################################################### #################################################################################################### def __error__(self, msg): sys.stderr.write("gmshTranslator: ERROR! -> " + msg + "\n") #GMSH element definitions line_2_node = sp.int32(1) # 2-node line. triangle_3_node = sp.int32(2) # 3-node triangle. quadrangle_4_node = sp.int32(3) # 4-node quadrangle. tetrahedron_4_node = sp.int32(4) # 4-node tetrahedron. hexahedron_8_node = sp.int32(5) # 8-node hexahedron. prism_6_node = sp.int32(6) # 6-node prism. pyramid_5_node = sp.int32(7) # 5-node pyramid. line_3_node = sp.int32(8) # 3-node second order line (2 nodes associated with the vertices and 1 with the edge). triangle_6_node = sp.int32(9) # 6-node second order triangle (3 nodes associated with the vertices and 3 with the edges). quadrangle_9_node = sp.int32(10) # 9-node second order quadrangle (4 nodes associated with the vertices, 4 with the edges and 1 with the face). tetrahedron_10_node = sp.int32(11) # 10-node second order tetrahedron (4 nodes associated with the vertices and 6 with the edges). hexahedron_27_node = sp.int32(12) # 27-node second order hexahedron (8 nodes associated with the vertices, 12 with the edges, 6 with the faces and 1 with the volume). prism_18_node = sp.int32(13) # 18-node second order prism (6 nodes associated with the vertices, 9 with the edges and 3 with the quadrangular faces). pyramid_14_node = sp.int32(14) # 14-node second order pyramid (5 nodes associated with the vertices, 8 with the edges and 1 with the quadrangular face). point_1_node = sp.int32(15) # 1-node point. quadrangle_8_node = sp.int32(16) # 8-node second order quadrangle (4 nodes associated with the vertices and 4 with the edges). hexahedron_20_node = sp.int32(17) # 20-node second order hexahedron (8 nodes associated with the vertices and 12 with the edges). prism_15_node = sp.int32(18) # 15-node second order prism (6 nodes associated with the vertices and 9 with the edges). pyramid_13_node = sp.int32(19) # 13-node second order pyramid (5 nodes associated with the vertices and 8 with the edges). triangle_9_node_incomplete = sp.int32(20) # 9-node third order incomplete triangle (3 nodes associated with the vertices, 6 with the edges) triangle_10_node = sp.int32(21) # 10-node third order triangle (3 nodes associated with the vertices, 6 with the edges, 1 with the face) triangle_12_node_incomplete = sp.int32(22) # 12-node fourth order incomplete triangle (3 nodes associated with the vertices, 9 with the edges) triangle_15_node = sp.int32(23) # 15-node fourth order triangle (3 nodes associated with the vertices, 9 with the edges, 3 with the face) triangle_15_node_incomplete = sp.int32(24) # 15-node fifth order incomplete triangle (3 nodes associated with the vertices, 12 with the edges) triangle_21_node = sp.int32(25) # 21-node fifth order complete triangle (3 nodes associated with the vertices, 12 with the edges, 6 with the face) edge_4_node = sp.int32(26) # 4-node third order edge (2 nodes associated with the vertices, 2 internal to the edge) edge_5_node = sp.int32(27) # 5-node fourth order edge (2 nodes associated with the vertices, 3 internal to the edge) edge_6_node = sp.int32(28) # 6-node fifth order edge (2 nodes associated with the vertices, 4 internal to the edge) tetrahedron_20_node = sp.int32(29) # 20-node third order tetrahedron (4 nodes associated with the vertices, 12 with the edges, 4 with the faces) tetrahedron_35_node = sp.int32(30) # 35-node fourth order tetrahedron (4 nodes associated with the vertices, 18 with the edges, 12 with the faces, 1 in the volume) tetrahedron_56_node = sp.int32(31) # 56-node fifth order tetrahedron (4 nodes associated with the vertices, 24 with the edges, 24 with the faces, 4 in the volume) hexahedron_64_node = sp.int32(92) # 64-node third order hexahedron (8 nodes associated with the vertices, 24 with the edges, 24 with the faces, 8 in the volume) hexahedron_125_node = sp.int32(93) # 125-node fourth order hexahedron (8 nodes associated with the vertices, 36 with the edges, 54 with the faces, 27 in the volume)
theta0 = 10.0 Ntest = 100 # -------------------------------------------- # Load all database # -------------------------------------------- ttt = time.clock() if not os.path.exists('qm7.pkl'): os.system('wget http://www.quantum-machine.org/data/qm7.pkl') dataset = pickle.load(open('qm7.pkl','r')) # -------------------------------------------- # Extract training data and test set # -------------------------------------------- allP = dataset['P'][range(0,split)+range(split+1,5)].flatten() print "TIMER load_data", time.clock() - ttt nteach = sp.int32(sp.exp(sp.linspace(sp.log(2*Ntest), sp.log(allP.size), 25))) # -------------------------------------------- # Loop over different training set sizes # -------------------------------------------- alpha = [] alpha_std = [] mae_error = [] errors = [] for Nteach in nteach: # -------------------------------------------- # First time include the test set to calculate their alpha # -------------------------------------------- print "\n", "-"*60, "\n" print "N teach = %d" % Nteach
synth.setnchannels(1) synth.setsampwidth(2) synth.setframerate(samplingrate) remain = sound.getnframes() while remain > 0: s = min(chunk, remain) #read frames data_sound = sound.readframes(s) data_noise = noise.readframes(s) #convert ary_sound = sp.fromstring(data_sound, sp.int16) ary_noise = sp.fromstring(data_noise, sp.int16) int32_ary_sound = sp.int32(ary_sound) int32_ary_noise = sp.int32(ary_noise) ary2 = sp.int16(int32_ary_sound + int32_ary_noise) data2 = ary2.tostring() synth.writeframes(data2) remain = remain - s sound.close() noise.close() synth.close() infile = 'tools/sound/noisy.wav' signal, params = read_signal(infile, WINSIZE) nf = len(signal) / (WINSIZE / 2) - 1 sig_out = sp.zeros(len(signal), sp.float32) window = sp.hanning(WINSIZE)
ion() from matplotlib.patches import Rectangle rect1 = Rectangle((100,0),20,150,facecolor='0.4') rect2 = Rectangle((100,180),20,140,facecolor='0.4') rect3 = Rectangle((100,350),20,150,facecolor='0.4') gca().add_patch(rect1) gca().add_patch(rect2) gca().add_patch(rect3) #imsh = imshow(sc.ones((500,500),'f').T, cmap=cm.hot, origin='lower', vmin=0, vmax=0.1) imsh = imshow(output.T, cmap=cm.hot, origin='lower', vmin=0, vmax=0.1) colorbar() # main loop for tn in xrange(3000): if mpi.rank == 1: src_pt = sc.int32(100*ny + 1501) S.update_src.prepared_call((1,1), src_pt, sc.int32(tn), S.f_gpu) S.update.prepared_call(S.Dg, sc.int32(S.nx), sc.int32(ny), S.c_gpu, S.f_gpu, S.g_gpu) S.exchange(S.f_gpu) S.update.prepared_call(S.Dg, sc.int32(S.nx), sc.int32(ny), S.c_gpu, S.g_gpu, S.f_gpu) S.exchange(S.g_gpu) if tn>100 and tn%100 == 0: if mpi.rank == 0: cuda.memcpy_dtoh(S.f,S.f_gpu) output[:1000,:] = S.f[1:-1,1:-1] output[1000:2000,:] = mpi.world.recv(1,10) output[2000:3000,:] = mpi.world.recv(2,10) #imsh.set_array( sc.sqrt(output[1900:2400,1250:1750].T**2) )
y_start=p_coordinate[1]*(shape[1]-1) y_end=y_start+shape[1] k=0 #Counts the components for group in list_group: Fields_local[k,x_start:x_end,y_start:y_end]=group._f_getChild(namecycle).read()[:,:,0] k=k+1 h5proc_file.close() #End of read_proc loop #Reassemble data from each processors on procs 0,1 and 2 #### for k in range(K): if (rankproc > 0 ): buffer=Fields_local[k,1:,:].astype("float32") #Account for overlapping else: buffer=Fields_local[k,:,:].astype("float32") vec_count=scipy.arange(numproc,dtype='int32') #Initialize for all procs matsize=scipy.int32(buffer.size) gridcomm.Allgather([matsize,1,MPI.INT],[vec_count,1,MPI.INT]) vec_stride = [vec_count[:j].sum() for j in range(numproc)] gridcomm.Gatherv([buffer,buffer.size,MPI.FLOAT],[Fields_global,vec_count,vec_stride,MPI.FLOAT],root=k) #End of the field gathering loop ############ ################################################################# # Plot single frames by procs 0,1 and 2 ####################### if rankproc < K : if rankproc==0 and K==3: indice='x' elif rankproc==1 and K==3: indice='y' elif rankproc ==2 and K==3: indice='z' #if field == 'B' and grid == 0:
pa = { 'Flux_Ca': 0.0, 'tau_Ca': 20.0, 'ss_Ca': 0.01, 'alpha_p': 1.0, 'beta_p': 10.0, 'ss_x': 1.0, 'tau_x': 50.0, 'le_x': 1.0 } pa['beta/alpha_p'] = pa['beta_p'] / pa['alpha_p'] pinfty = U0[0] / (U0[0] + pa['beta/alpha_p']) print('p_infty = %g' % pinfty) timeStep = 1 / 40.0 timeMax = 550. nSteps = sc.int32(timeMax / timeStep) timeSamples = sc.arange(0, timeMax, timeStep) stimInterval = 50.0 stimTimes = stimInterval * sc.arange(1, 11) stimIdx = sc.int32(stimTimes / timeStep) print(stimIdx) ff = sc.zeros(len(timeSamples)) ff[stimIdx] = 200.0 * timeStep pa['ic'] = U0 pa['stepSize'] = timeStep pa['timeMax'] = timeMax pa['nSteps'] = nSteps c, p, x, f3d = stsp3d_profile(pa) # Runs on 2D U0 = sc.array([0.05, 0.5])
def __init__(self, mshfilename): self.mshfilename = mshfilename self.mshfid = open(mshfilename,"r") #Initially, parse elements to know what nodes are in which physical groups. reading_physnames = 0 reading_nodes = 0 reading_elements = 0 self.__inform__("Initializing...") self.Nphys = 0 self.Nnodes = 0 self.Nelem = 0 self.physical_groups = [] self.nodes_in_physical_groups = {} self.physical_group_dims = {} self.physical_group_names = {} linenumber = 1 for line in self.mshfid: ################################################# # Identify begining of nodes and elements sections if line.find("$PhysicalNames") >= 0: reading_physnames = 1 continue if line.find("$Nodes") >= 0: reading_nodes = 1 continue if line.find("$Elements") >= 0: reading_elements = 1 continue ################################################# ################################################# #Identify end of nodes and element sections if line.find("$EndPhysicalNames") >= 0: reading_physnames = 0 continue if line.find("$EndElements") >= 0: reading_elements = 0 continue if line.find("$EndNodes") >= 0: reading_nodes = 0 continue ################################################# #If this is the first line of nodes, read the number of nodes. if reading_physnames == 1: self.Nphys = sp.int32(line) self.__inform__("Mesh has " + str(self.Nphys) + " physical groups.") reading_physnames = 2 continue if reading_nodes == 1: self.Nnodes = sp.int32(line) self.__inform__("Mesh has " + str(self.Nnodes) + " nodes.") reading_nodes = 2 continue #If this is the first line of elements, read the number of elements if reading_elements == 1: self.Nelem = sp.int32(line) self.__inform__("Mesh has " + str(self.Nelem) + " elements.") reading_elements = 2 continue if reading_physnames == 2: sl = line.split() grpdim = sp.int32(sl[0]) # spatial dimension of the physical group (0 = point, 1 = line, 2 = surface, 3 = volume) physgrp = sp.int32(sl[1]) # group number grpname = ( " ".join(sl[2:]) )[1:-1] # strip quotation marks self.physical_group_dims[physgrp] = grpdim self.physical_group_names[physgrp] = grpname #Now parse elements and populate the list of nodes in groups if reading_elements == 2: sl = sp.array( line.split(), dtype = sp.int32) eletag = sl[0] eletype = sl[1] ntags = sl[2] physgrp = 0 partition = 0 if ntags >= 2: physgrp = sl[3] nodelist = sl[(3 + ntags)::] # sys.stdout.write(str(nodelist.size) + " ") if physgrp in self.physical_groups: self.nodes_in_physical_groups[physgrp][nodelist] = 1 else: self.nodes_in_physical_groups[physgrp] = -sp.ones(self.Nnodes+1, dtype=sp.int16) self.nodes_in_physical_groups[physgrp][nodelist] = 1 self.physical_groups.append(physgrp) pass else: self.__error__(".msh file has < 2 tags at line " + str(linenumber)) linenumber += 1 #end for line self.__inform__("Processed " + str(linenumber) +" lines.") self.__inform__("There are " + str(len(self.physical_groups)) + " physical groups available: ") for g in self.physical_groups: self.__inform__(" > %s: \"%s\" (dimension %d)" % (str(g), self.physical_group_names[g], self.physical_group_dims[g])) # create inverse mapping from names -> IDs so that the user can refer to physical groups by name # self.physical_groups_by_name = {} for k,v in self.physical_group_names.items(): self.physical_groups_by_name[v] = k self.nodes_rules = [] self.elements_rules = [] #end def __init__ self.mshfid.close()
def generate_gloDatamix_Meta(self): """ generates a pd.Dataframe with the Metadata ordered in such a way that it fits the .gloDatamix definition. """ # preparations gloMeta = [] inds_map = self.map_lst_inds_to_path_inds() for n,path in enumerate(self.Main.Data.Metadata.paths): for i in range(len(self.Main.ROIs.ROI_list)): lst_values = self.Main.Data.Metadata.LSTdata.loc[inds_map[n]] # roi centroid pos = self.Main.ROIs.ROI_list[i].get_center() # stim if self.Main.Options.preprocessing['stimuli'].shape[0] == 2: stim2on,stim2off = self.Main.Options.preprocessing['stimuli'][1,:] else: stim2on,stim2off = ['-1','-1'] # age if lst_values['Age'] != -1: try: NAge,NAgeMax = lst_values['Age'].split('-') except: NAge = '-1' NAgeMax = '-1' else: NAge = '-1' NAgeMax = '-1' row = OrderedDict() row['NGloTag'] = str(self.Main.ROIs.ROI_list[i].label) row['NOdorNr'] = '-999' row['NOConc'] = str(lst_values['OConc']) row['NStim_ON'] = str(self.Main.Options.preprocessing['stimuli'][0,0]) row['NStim_Off'] = str(self.Main.Options.preprocessing['stimuli'][0,1]) row['NNoFrames'] = str(self.Main.Data.nFrames) row['NFrameTime'] = str(sp.int32(self.Main.Options.preprocessing['dt'] * 1000)) row['NRealTime'] = str(lst_values['MTime']) row['NPhConc'] = str(lst_values['PhConc']) row['NshiftX'] = str(lst_values['ShiftX']) row['NshiftY'] = str(lst_values['ShiftY']) row['NcontMeasu'] = '0' row['NNumMeasu'] = '0' row['Nstim_ISI'] = '0' row['NodorN'] = '1' row['Nstim2ON'] = str(stim2on) row['Nstim2OFF'] = str(stim2off) row['NAge'] = str(NAge) row['NAgeMax'] = str(NAgeMax) row['TGloInfo'] = 'Coor' + str(int(sp.around(pos[0],decimals=0))) + ':' + str(int(sp.around(pos[1],decimals=0))) row['TOdour'] = str(lst_values['Odour']) row['T_dbb1'] = str(lst_values['DBB1']) row['Tcomment'] = str(lst_values['Comment']) row['TPharma'] = str(lst_values['Pharma']) row['TPhtime'] = str(lst_values['PhTime']) row['Tos9time'] = str(lst_values['PhTime']) row['TLabel'] = lst_values['Label'] row['Tanimal'] = lst_values['DBB1'].strip().split('\\')[0] row['T_dbb2'] = 'noDBB2' gloMeta.append(row) # make a pd.DataFrame out of it: gloMetaDF = pd.DataFrame(columns=gloMeta[0].keys()) for i in range(len(gloMeta)): gloMetaDF = gloMetaDF.append(pd.Series(gloMeta[i]),ignore_index=True) return gloMetaDF
# Load all database # -------------------------------------------- ttt = time.clock() if not os.path.exists('qm7.pkl'): os.system('wget http://www.quantum-machine.org/data/qm7.pkl') dataset = pickle.load(open('qm7.pkl','r')) # -------------------------------------------- # Extract training data and test set # -------------------------------------------- split = 1 N_models = 1 theta0 = 10.0 Nfixed = 100 allP = dataset['P'][range(0,split)+range(split+1,5)].flatten() nteachs = sp.int32(sp.exp(sp.linspace(sp.log(Nfixed+0.0), sp.log(allP.size), 30))) Ptest = dataset['P'][split] Xtest = dataset['X'][Ptest] Ttest = dataset['T'][Ptest] print "TIMER load_data", time.clock() - ttt alpha = [] covmat = [] alpha_std = [] for Nteach in nteachs: print "\n", "-"*60, "\n" print "N teach = %d" % Nteach # Select training data P = allP[:Nteach]