def estimated_waiting_time(self): deltatime = timedelta(days = 7) t1 = datetime.now() - deltatime possible_hit_jobs_total = Session.query(Job)\ .filter(Job.date_submitted > t1)\ .filter(Job.failed == False)\ .filter(Job.date_completed == None)\ .filter(Job.batchid == None)\ .order_by('-id')\ .limit(100)\ .all() ids = [i.id for i in possible_hit_jobs_total] if self.id not in ids: return 24*60*60 else: m1 = ids.index(self.id) m2 = len(possible_hit_jobs_total) N = 12 if (N - 1) <= m1: if m1 >= 50: p1 = 0.0 p2 = 0.0 else: p1 = 1.0/2.0*round(sum([scipy.misc.comb(m1,ii) for ii in range(0,N-1 + 1)]))/scipy.power(2,m1) p2 = 0.0 else: if (m2-m1) >= 50: p1 = 0.5 p2 = 0.0 else: p1 = 0.5 p2 = 1.0/2.0*round(sum([scipy.misc.comb(m2-m1,ii) for ii in range(0,N-1-m1 + 1)]))/scipy.power(2,m2-m1) p = p1 + p2 print m1, m2, p1, p2, p return int(ceil((self.n_spacers - self.n_completed_spacers)/2.0) * ceil(1.0/p * 60.0))
def plot_gridsearch_scores_per_metric(self, grid_scores): cols = int(sp.ceil(sp.sqrt(len(self.metrics_to_use)))) rows = int(sp.ceil(len(self.metrics_to_use) / cols)) for i, metric in enumerate(self.metrics_to_use): plt.subplot(rows, cols, i + 1) self.plot_gridsearch_scores(grid_scores, {self.metric_key: metric}) plt.title(metric_defs[metric][0])
def cmd_ylim(mu): if scipy.ceil(mu) - mu < mu - scipy.floor(mu): cmax = scipy.ceil(mu) + 1 else: cmax = scipy.ceil(mu) cmin = cmax - 3 return cmin, cmax
def __init__(self,centerFrequency = 440.2*1e6, bMag = 0.4e-4, nspec=64, sampfreq=50e3,collfreqmin=1e-2,alphamax=30.0,dFlag=False,f=None): """ Constructor for the class. Inputs : centerFrequency: The radar center frequency in Hz. bMag: The magnetic field magnitude in Teslas. nspec: the number of points of the spectrum. sampfreq: The sampling frequency of the A/Ds in Hz collfreqmin: (Default 1e-2) The minimum collision frequency needed to incorporate it into Gordeyev integral calculations in units of K*sqrt(Kb*Ts/ms) for each ion species. alphamax: (Default 30) The maximum magnetic aspect angle in which the B-field will be taken into account. dFlag: A debug flag, if set true will output debug text. Default is false. f: A numpy array of frequeny points, in Hz, the spectrum will be formed over. Default is None, at that point the frequency vector will be formed using the number of points for the spectrum and the sampling frequency to create a linearly sampled frequency vector. """ self.bMag = bMag self.dFlag = dFlag self.collfreqmin = collfreqmin self.alphamax = alphamax self.K = 2.0*sp.pi*2*centerFrequency/v_C_0 #The Bragg scattering vector, corresponds to half the radar wavelength. if f is None: minfreq = -sp.ceil((nspec-1.0)/2.0) maxfreq = sp.floor((nspec-1.0)/2.0+1) self.f = sp.arange(minfreq,maxfreq)*(sampfreq/(2*sp.ceil((nspec-1.0)/2.0))) else: self.f=f self.omeg = 2.0*sp.pi*self.f
def process(y, eeg, EPOCH_LENGTH, EPOCH_OFFSET, NUM_FOLDS, p=None): sr = eeg['sample_rate'] events = eeg['events'] event_types = events['uniqueLabel'] ns = int(ceil(EPOCH_LENGTH*sr)) #Identify artifacts artifact_indexes = zeros((y.shape[0],1)) artifact_indexes[eeg['artifact_indexes']]=1 num_occurances, events = remove_corrupted_events(event_types, events, artifact_indexes, ns) #Shift signal to account for negative response zpadpre=zeros((int(ceil(EPOCH_OFFSET*sr)), 1)) zpadpost=zeros((int(ceil((EPOCH_LENGTH-EPOCH_OFFSET)*sr)), 1)) y = concatenate((zpadpre, y, zpadpost)) artifact_indexes = concatenate((zpadpre, artifact_indexes, zpadpost)) result = np.empty((2, NUM_FOLDS, len(event_types), 2)) if not p==None: reg_parent_conn, reg_child_conn = mp.Pipe() av_parent_conn, av_child_conn = mp.Pipe() these_args = (y, events, artifact_indexes, ns, num_occurances, NUM_FOLDS,) res_reg = p.apply_async(cross_validate_regression, these_args) res_av = p.apply_async(cross_validate_average, these_args) result[0,:,:,:]=res_av.get() result[1,:,:,:]=res_reg.get() else: result[0,:,:,:] = cross_validate_average(y, events, artifact_indexes, ns, num_occurances, NUM_FOLDS); result[1,:,:,:] = cross_validate_regression(y, events, artifact_indexes, ns, num_occurances, NUM_FOLDS); return result
def rescale_target_superpixel_resolution(E_target): '''Rescale the target field to the superpixel resolution (currently only 4x4 superpixels implemented)''' superpixelSize = 4 ny,nx = scipy.shape(E_target) maskCenterX = scipy.ceil((nx+1)/2) maskCenterY = scipy.ceil((ny+1)/2) nSuperpixelX = int(nx/superpixelSize) nSuperpixelY = int(ny/superpixelSize) FourierMaskSuperpixelResolution = fourier_mask(ny,nx,superpixelSize) E_target_ft = fft.fftshift(fft.fft2(fft.ifftshift(E_target))) #Apply mask E_target_ft = FourierMaskSuperpixelResolution*E_target_ft #Remove zeros outside of mask E_superpixelResolution_ft = E_target_ft[(maskCenterY - scipy.ceil((nSuperpixelY-1)/2)-1):(maskCenterY + scipy.floor((nSuperpixelY-1)/2)),(maskCenterX - scipy.ceil((nSuperpixelX-1)/2)-1):(maskCenterX + scipy.floor((nSuperpixelX-1)/2))] # Add phase gradient to compensate for anomalous 1.5 pixel shift in real # plane phaseFactor = [[(scipy.exp(2*1j*pi*((k+1)/nSuperpixelY+(j+1)/nSuperpixelX)*3/8)) for j in range(nSuperpixelX)] for k in range(nSuperpixelY)] # QUESTION E_superpixelResolution_ft = E_superpixelResolution_ft*phaseFactor # Fourier transform back to DMD plane E_superpixelResolution = fft.fftshift(fft.ifft2(fft.ifftshift(E_superpixelResolution_ft))) return E_superpixelResolution
def getRegion(self,size=3e4,min_nSNPs=1,chrom_i=None,pos_min=None,pos_max=None): """ Sample a region from the piece of genotype X, chrom, pos minSNPnum: minimum number of SNPs contained in the region Ichrom: restrict X to chromosome Ichrom before taking the region cis: bool vector that marks the sorted region region: vector that contains chrom and init and final position of the region """ if (self.chrom is None) or (self.pos is None): bim = plink_reader.readBIM(self.bfile,usecols=(0,1,2,3)) chrom = SP.array(bim[:,0],dtype=int) pos = SP.array(bim[:,3],dtype=int) else: chrom = self.chrom pos = self.pos if chrom_i is None: n_chroms = chrom.max() chrom_i = int(SP.ceil(SP.rand()*n_chroms)) pos = pos[chrom==chrom_i] chrom = chrom[chrom==chrom_i] ipos = SP.ones(len(pos),dtype=bool) if pos_min is not None: ipos = SP.logical_and(ipos,pos_min<pos) if pos_max is not None: ipos = SP.logical_and(ipos,pos<pos_max) pos = pos[ipos] chrom = chrom[ipos] if size==1: # select single SNP idx = int(SP.ceil(pos.shape[0]*SP.rand())) cis = SP.arange(pos.shape[0])==idx region = SP.array([chrom_i,pos[idx],pos[idx]]) else: while 1: idx = int(SP.floor(pos.shape[0]*SP.rand())) posT1 = pos[idx] posT2 = pos[idx]+size if posT2<=pos.max(): cis = chrom==chrom_i cis*= (pos>posT1)*(pos<posT2) if cis.sum()>min_nSNPs: break region = SP.array([chrom_i,posT1,posT2]) start = SP.nonzero(cis)[0].min() nSNPs = cis.sum() if self.X is None: rv = plink_reader.readBED(self.bfile,useMAFencoding=True,start = start, nSNPs = nSNPs,bim=bim) Xr = rv['snps'] else: Xr = self.X[:,start:start+nSnps] return Xr, region
def plot_benchmark_results(self): plt.figure() cols = int(sp.ceil(sp.sqrt(len(self.results)))) rows = int(sp.ceil(float(len(self.results)) / cols)) for i, m in enumerate(self.results): plt.subplot(rows, cols, i + 1) self.plot_times(self.results[m]) plt.title(metrics[m][0])
def fourier_mask(ny,nx,resolution): # Create circular aperture around the center maskCenterX = int(scipy.ceil((nx+1)/2)) maskCenterY = int(scipy.ceil((ny+1)/2)) ### Code optimization purposes angle = ny/nx nres = (ny/resolution/2)**2 ### return [[(( i+1 - maskCenterY)**2 + (angle*( j+1 - maskCenterX))**2 < nres) for j in range(nx)] for i in range(ny)]
def get_cb_ticks(values): min_tick = sp.nanmin(values) max_tick = sp.nanmax(values) med_tick = min_tick + (max_tick - min_tick) / 2.0 if max_tick > 1.0: min_tick = sp.ceil(min_tick) max_tick = sp.floor(max_tick) med_tick = sp.around(med_tick) else: min_tick = sp.ceil(min_tick * 100.0) / 100.0 max_tick = sp.floor(max_tick * 100.0) / 100.0 med_tick = sp.around(med_tick, 2) return [min_tick, med_tick, max_tick]
def eliminateSmallClusters(self, mskDds, clusterSzThreshFraction): """ Performs a labelling of non-mask-value connected components of the :samp:`mskDds` image and eliminates clusters/objects which have number of voxels which is less than :samp:`clusterSzThreshFraction*mango.count_non_masked(mskDds)`. :type mskDds: :obj:`mango.Dds` :param mskDds: This image is modified by eliminating small clusters/objects (by setting small-cluster-voxels to value :samp:`mskDds.mtype.maskValue()`. :type clusterSzThreshFraction: :obj:`float` :param clusterSzThreshFraction: Value in interval :samp:`[0,1]`. Threshold fraction of total non-masked :samp:`mskDds` voxels for eliminating small clusters/objects. """ elimClustersSmallerThan = int(sp.ceil(mango.count_non_masked(mskDds)*clusterSzThreshFraction)) segDds = mango.ones_like(mskDds, mtype="segmented") mango.copy_masked(mskDds, segDds) rootLogger.info("eliminateSmallClusters: Labeling mskDds masked connected components...") lblDds = mango.image.label(segDds, 1) rootLogger.info("eliminateSmallClusters: Done labeling mskDds masked connected components...") self.writeIntermediateDds("_111MskDdsLabels", lblDds) rootLogger.info("eliminateSmallClusters: Eliminating clusters of size range [%s, %s]..." % (0, elimClustersSmallerThan)) lblDds = mango.image.eliminate_labels_by_size(lblDds, val=lblDds.mtype.maskValue(), minsz=0, maxsz=elimClustersSmallerThan) rootLogger.info("eliminateSmallClusters: Done eliminating clusters in size range [%s, %s]." % (0, elimClustersSmallerThan)) rootLogger.info("eliminateSmallClusters: Copying small-cluster mask to mskDds...") mango.copy_masked(lblDds, mskDds) rootLogger.info("eliminateSmallClusters: Done copying small-cluster mask to mskDds.")
def stability_selection(X, K, y, mu, n_reps, f_subset, **kwargs): """ run stability selection2 Input: X: Snp matrix: n_s x n_f y: phenotype: n_s x 1 K: kinship matrix: n_s x n_s mu: l1-penalty n_reps: number of repetitions f_subset: fraction of datasets that is used for creating one bootstrap output: selection frequency for all Snps: n_f x 1 """ time_start = time.time() [n_s, n_f] = X.shape n_subsample = scipy.ceil(f_subset * n_s) freq = scipy.zeros(n_f) for i in range(n_reps): print 'Iteration %d' % i idx = scipy.random.permutation(n_s)[:n_subsample] res = train(X[idx], K[idx][:, idx], y[idx], mu, **kwargs) snp_idx = (res['weights'] != 0).flatten() freq[snp_idx] += 1. freq /= n_reps time_end = time.time() time_diff = time_end - time_start print '... finished in %.2fs' % (time_diff) return freq
def kteo(data, k=1): """teager energy operator of range k [TEO] The discrete teager energy operator (TEO) of window size k is defined as: M{S{Psi}[x(n)] = x^2(n) - x(n-k) x(n+k)} :type data: ndarray :param data: The signal to operate on. ndim=1 :type k: int :param k: Parameter defining the window size for the TEO. :return: ndarray - Array of same shape as the input signal, holding the kteo response. :except: If inconsistant dims or shapes. """ # checks and inits if data.ndim != 1: raise ValueError( 'ndim != 1! ndim=%s with shape=%s' % (data.ndim, data.shape)) # apply nonlinear energy operator with range k rval = data ** 2 - sp.concatenate(([0] * sp.ceil(k / 2.0), data[:-k] * data[k:], [0] * sp.floor(k / 2.0))) # return return rval
def load_network(N=10,k=6,network='Random'): netdata=[] if network!='Random': for line in file(network): if line[0]!='#': line=line.split() if len(line)==3: netdata.append(line) data_array=sp.array(netdata) N=max(max(map(int,data_array[:,0])),max(map(int,data_array[:,1]))) else: for n1 in range(N): for nei in range(k): n2=n1 nvals=[n1] while n2 in nvals: n2=int(sp.ceil(N*sp.random.random())) nvals.append(n2) netdata.append([n1,n2,1.0]) data_array=sp.array(netdata) A_matrix=sp.mat(sp.zeros((int(N),int(N)))) for row in data_array: A_matrix[int(row[0])-1,int(row[1])-1]=float(row[2]) return A_matrix
def predict_interval_probabilities(self, prediction_interval, hyperparams, number_of_gibbs_iterations=10, messages=True): """ Predict propability for each input point whether it is more likely described by common, or individual model, respectively. **Parameters:** hyperparams : dict(['covar':covariance hyperparameters for interval regressor, \ 'lik': robust likelihood hyperparameters], ...) **Returns:** [double] : P(input_values correspond to common model, respectively?),\ [double] : prediction interval """ self._twosample_object.predict_model_likelihoods(messages=False) self._twosample_object.bayes_factor() probabilities = SP.zeros((number_of_gibbs_iterations, self._input.shape[0]),dtype='bool') for iteration in xrange(number_of_gibbs_iterations): for interval_index in xrange(self._input.shape[0]): self._indicators[interval_index] = self._resample_interval_index(interval_index, hyperparams) probabilities[iteration,:] = self._indicators # logging.info("Gibbs Iteration: %i"%(iteration)) # logging.info("Current Indicator: %s"% (self._indicators)) probabilities = SP.array(probabilities, dtype='bool') #get rid of training runs (first half) probabilities = probabilities[SP.ceil(number_of_gibbs_iterations/2):] # logging.info("End: Indicators %s"% (probabilities.mean(0))) self._predicted_model_distribution = self._calculate_indicator_mean(probabilities, hyperparams, prediction_interval) self._predicted_indicators = probabilities.mean(0) > .5 return self._predicted_model_distribution
def plot_pairwise_velocities_r(case,color,all_radial_distances,all_radial_velocities): dr = 0.3 # Mpc/h rmin, rmax = sp.amin(all_radial_distances), sp.amax(all_radial_distances) rrange = rmax-rmin N = int(sp.ceil(rrange/dr)) rs = sp.linspace(rmin,rmax,N) v12_of_r = [[] for index in range(N)] for r,v12 in zip(all_radial_distances,all_pairwise_velocities): index = int(sp.floor((r-rmin)/dr)) v12_of_r[index].append(v12) sigma_12s = sp.zeros(N) v12_means = sp.zeros(N) for index in range(len(sigma_12s)): v12_of_r_index = sp.array(v12_of_r[index]) print "number of counts in the", index,"th bin:", len(v12_of_r_index) sigma_12 = sp.sqrt(sp.mean(v12_of_r_index**2)) v12_mean = -sp.mean(v12_of_r_index) sigma_12s[index] = sigma_12 v12_means[index] = v12_mean plt.plot(rs,sigma_12s,color=color,label='$\sigma_{12}$') plt.plot(rs,v12_means,color=color,label='$|v_{12}|$') plt.xlabel('r [Mpc/h]') plt.ylabel('[km/s]') plt.xscale('log') plt.axis([0.5,100,0,600])
def getAxis(self,X,Y): """ return the proper axis limits for the plots """ out = [] mM = [(min(X),max(X)),(min(Y),max(Y))] for i,j in mM: #YJC: checking if values are negative, if yes, return 0 and break if j <0 or i <0: return 0 log_i = scipy.log10(i) d, I = scipy.modf(log_i) if log_i < 0: add = 0.5 *(scipy.absolute(d)<0.5) else: add = 0.5 *(scipy.absolute(d)>0.5) m = scipy.floor(log_i) + add out.append(10**m) log_j = scipy.log10(j) d, I = scipy.modf(log_j) if log_j < 0: add = - 0.5 *(scipy.absolute(d)>0.5) else: add = - 0.5 *(scipy.absolute(d)<0.5) m = scipy.ceil(log_j) + add out.append(10**m) return tuple(out)
def _msge_with_gradient_overdetermined(self, data, delta, xvschema, skipstep): """ Calculate the mean squared generalization error and it's gradient for overdetermined equation system. """ (l, m, t) = data.shape d = None l, k = 0, 0 nt = sp.ceil(t / skipstep) for s in range(0, t, skipstep): #print(s,drange) trainset, testset = xvschema(s, t) (a, b) = self._construct_eqns(sp.atleast_3d(data[:, :, trainset])) (c, d) = self._construct_eqns(sp.atleast_3d(data[:, :, testset])) #e = sp.linalg.inv(np.eye(a.shape[1])*delta**2 + a.transpose().dot(a), overwrite_a=True, check_finite=False) e = sp.linalg.inv(sp.eye(a.shape[1]) * delta ** 2 + a.transpose().dot(a)) ba = b.transpose().dot(a) dc = d.transpose().dot(c) bae = ba.dot(e) baee = bae.dot(e) baecc = bae.dot(c.transpose().dot(c)) l += sp.sum(baecc * bae - 2 * bae * dc) + sp.sum(d ** 2) k += sp.sum(baee * dc - baecc * baee) * 4 * delta return l / (nt * d.size), k / (nt * d.size)
def _hough_transform(img, angles): rows, cols = img.shape # determine the number of bins d = sp.ceil(sp.hypot(*img.shape)) nr_bins = 2 * d bins = sp.linspace(-d, d, nr_bins) # create the accumulator out = sp.zeros((nr_bins, len(angles)), dtype=sp.float64) # compute the sines/cosines cos_theta = sp.cos(angles) sin_theta = sp.sin(angles) # constructe the x and y values y = [] x = [] for i in xrange(rows): y += [i] * cols x += range(cols) y = sp.array(y) x = sp.array(x) # flatten image flattened_img = img.flatten() for i, (c, s) in enumerate(zip(cos_theta, sin_theta)): distances = x * c + y * s bin_indices = (sp.round_(distances) - bins[0]).astype(sp.uint8) bin_sums = sp.bincount(bin_indices, flattened_img) out[:len(bin_sums), i] = bin_sums return out
def fileLog(self): if self.ui.tabWidget.currentIndex(): self.oktoLoad() return else: dir = (os.path.dirname(self.filename) if self.filename is not None else ".") self.filetuple = QFileDialog.getOpenFileName(self,\ "Open Log File", dir,\ "Data (*.log)\nAll Files (*.*)") self.filename = self.filetuple[0] fname = self.filename if fname: self.logProcessor.processLog(fname) self.loadFile(fname) self.updateStatus('New Log file opened.') [self.logProcessor.timeS, self.logProcessor.av, self.logProcessor.error] = self.logProcessor.Allan.allanDevMills(self.logProcessor.offsets) self.type = 3 self.sizeOff = len(self.logProcessor.offsets) if(self.sizeOff%84 != 0): self.exceeds = self.sizeOff%84 self.numberOFTicks = scipy.ceil((float)(self.sizeOff)/84) self.ui.spinBox.setRange(1,self.numberOFTicks)
def _msge_with_gradient_underdetermined(self, data, delta, xvschema, skipstep): """ Calculate the mean squared generalization error and it's gradient for underdetermined equation system. """ (l, m, t) = data.shape d = None j, k = 0, 0 nt = sp.ceil(t / skipstep) for s in range(0, t, skipstep): trainset, testset = xvschema(s, t) (a, b) = self._construct_eqns(sp.atleast_3d(data[:, :, trainset])) (c, d) = self._construct_eqns(sp.atleast_3d(data[:, :, testset])) e = sp.linalg.inv(sp.eye(a.shape[0]) * delta ** 2 + a.dot(a.transpose())) cc = c.transpose().dot(c) be = b.transpose().dot(e) bee = be.dot(e) bea = be.dot(a) beea = bee.dot(a) beacc = bea.dot(cc) dc = d.transpose().dot(c) j += sp.sum(beacc * bea - 2 * bea * dc) + sp.sum(d ** 2) k += sp.sum(beea * dc - beacc * beea) * 4 * delta return j / (nt * d.size), k / (nt * d.size)
def makesumrule(ptype,plen,ts,lagtype='centered'): """ This function will return the sum rule. Inputs ptype - The type of pulse. plen - Length of the pulse in seconds. ts - Sample time in seconds. lagtype - Can be centered forward or backward. Output sumrule - A 2 x nlags numpy array that holds the summation rule. """ nlags = sp.round_(plen/ts) if ptype.lower()=='long': if lagtype=='forward': arback=-sp.arange(nlags,dtype=int) arforward = sp.zeros(nlags,dtype=int) elif lagtype=='backward': arback = sp.zeros(nlags,dtype=int) arforward=sp.arange(nlags,dtype=int) else: arback = -sp.ceil(sp.arange(0,nlags/2.0,0.5)).astype(int) arforward = sp.floor(sp.arange(0,nlags/2.0,0.5)).astype(int) sumrule = sp.array([arback,arforward]) elif ptype.lower()=='barker': sumrule = sp.array([[0],[0]]) return sumrule
def plotiono(ionoin): sns.set_style("whitegrid") sns.set_context("notebook") (figmplf, axmat) = plt.subplots(1, 2, figsize=(10, 5), facecolor='w', sharey=True) species = ionoin.Species zvec = ionoin.Cart_Coords[:, 2] params = ionoin.Param_List[:, 0] maxden = 10**sp.ceil(sp.log10(params[:, -1, 0].max())) for iplot, ispec in enumerate(species): axmat[0].plot(params[:, iplot, 0], zvec, label=ispec +'Density') axmat[1].plot(params[:, iplot, 1], zvec, label=ispec+'Temperture') axmat[0].set_title('Number Density') axmat[0].set_xscale('log') axmat[0].set_ylim([50, 1000]) axmat[0].set_xlim([maxden*1e-5, maxden]) axmat[0].set_xlabel(r'Densities in m$^{-3}$') axmat[0].set_ylabel('Alt in km') axmat[0].legend() axmat[1].set_title('Temperture') axmat[1].set_xlim([100., 2500.]) axmat[1].set_xlabel(r'Temp in $^{\circ}$ K') axmat[1].set_ylabel('Alt in km') axmat[1].legend() plt.tight_layout() return figmplf,axmat
def plotLcurveall(alphaarr,datadif,constdif): """ This will plot the L-curve for all of the lags """ sns.set_style('whitegrid') sns.set_context('notebook') Nlag=datadif.shape[-1] nlagplot=4. nrows=int(sp.ceil(float(Nlag)/(2*nlagplot))) fig ,axmat= plt.subplots(nrows=nrows,ncols=2,facecolor='w',figsize=(8,4*nrows),sharey=True) axlist=axmat.flatten() for iaxn,iax in enumerate(axlist): strlist=[] handlist=[] for ilag in range(int(nlagplot)): curlag=int(iaxn*nlagplot+ilag) if curlag>=Nlag: break handlist.append(iax.plot(datadif[:,curlag],constdif[:,curlag])[0]) strlist.append('Lag {0}'.format(curlag)) iax.set_xscale('log') iax.set_yscale('log') iax.set_title('L Curve',fontsize=fs) iax.set_xlabel(r'$\|Ax-b\|_2$',fontsize=fs) iax.set_ylabel(r'$f(x)$',fontsize=fs) iax.legend(handlist,strlist,loc='upper right',fontsize='large') plt.tight_layout() return(fig,axlist)
def plotgpsonly(TEClist,gpslist,plotdir,m,ax,fig,latlim,lonlim): """ Makes a set of plots when only gps data is avalible.""" maxplot = len(gpslist) strlen = int(sp.ceil(sp.log10(maxplot))+1) fmstr = '{0:0>'+str(strlen)+'}_' plotnum=0 for gps_cur in gpslist: gpshands = [] gpsmin = sp.inf gpsmax = -sp.inf for igpsn, (igps,igpslist) in enumerate(zip(TEClist,gps_cur)): print('Plotting GPS data from rec {0} of {1}'.format(igpsn,len(gps_cur))) # check if there's anything to plot if len(igpslist)==0: continue (sctter,scatercb) = scatterGD(igps,'alt',3.5e5,vbounds=[0,20],time = igpslist,gkey = 'vTEC',cmap='plasma',fig=fig, ax=ax,title='',cbar=True,err=.1,m=m) gpsmin = sp.minimum(igps.times[igpslist,0].min(),gpsmin) gpsmax = sp.maximum(igps.times[igpslist,0].max(),gpsmax) gpshands.append(sctter) scatercb.set_label('vTEC in TECu') #change he z order print('Ploting {0} of {1} plots'.format(plotnum,maxplot)) plt.savefig(os.path.join(plotdir,fmstr.format(plotnum)+'GPSonly.png')) plotnum+=1 for i in reversed(gpshands): i.set_zorder(i.get_zorder()+1)
def _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p): """ Calculate the mean squared generalization error and it's gradient for overdetermined equation system. """ (l, m, t) = data.shape d = None l, k = 0, 0 nt = sp.ceil(t / skipstep) for trainset, testset in xvschema(t, skipstep): (a, b) = _construct_var_eqns(sp.atleast_3d(data[:, :, trainset]), p) (c, d) = _construct_var_eqns(sp.atleast_3d(data[:, :, testset]), p) e = sp.linalg.inv(sp.eye(a.shape[1]) * delta ** 2 + a.transpose().dot(a)) ba = b.transpose().dot(a) dc = d.transpose().dot(c) bae = ba.dot(e) baee = bae.dot(e) baecc = bae.dot(c.transpose().dot(c)) l += sp.sum(baecc * bae - 2 * bae * dc) + sp.sum(d ** 2) k += sp.sum(baee * dc - baecc * baee) * 4 * delta return l / (nt * d.size), k / (nt * d.size)
def traj_ensemble_quantiles(traj_set, quantiles=(0.025, 0.5, 0.975)): """ Return a list of trajectories, each one corresponding the a given passed-in quantile. """ all_values = scipy.array([traj.values for traj in traj_set]) sorted_values = scipy.sort(all_values, 0) q_trajs = [] for q in quantiles: # Calculate the index corresponding to this quantile. The q is because # Python arrays are 0 indexed index = q * (len(sorted_values) - 1) below = int(scipy.floor(index)) above = int(scipy.ceil(index)) if above == below: q_values = sorted_values[below] else: # Linearly interpolate... q_below = (1.0*below)/(len(sorted_values)-1) q_above = (1.0*above)/(len(sorted_values)-1) q_values = sorted_values[below] + (q - q_below)*(sorted_values[above] - sorted_values[below])/(q_above - q_below) q_traj = copy.deepcopy(traj_set[0]) q_traj.values = q_values q_trajs.append(q_traj) return q_trajs
def f_PSD_from_file(filename, fLow, fNyq, deltaF): """ Read a detector ascii ASD file and return the PSD and frequency vector for use with ffts. """ f_in,S_in = numpy.loadtxt(filename, unpack=True) f = numpy.linspace(fLow,fNyq,scipy.ceil((fNyq-fLow)/deltaF)+1) S = pylab.interp(f, f_in, S_in) # packing is of the form: # [0 deltaF 2*deltaF ... fNyquist-deltaF fNyquist -fNyquist+deltaF ... -2*deltaF -deltaF] PSD = scipy.zeros(2*(fNyq/deltaF), dtype='float')+scipy.inf PSD[round(fLow/deltaF):fNyq/deltaF+1] = S**2 if -round(fLow/deltaF) == 0: PSD[fNyq/deltaF+1:] = S[-2:0:-1]**2 else: PSD[fNyq/deltaF+1:-round(fLow/deltaF)] = S[-2:0:-1]**2 f = f_for_fft(fLow, fNyq, deltaF) nNyq = round(fNyq/deltaF) nLow = round(fLow/deltaF) PSD = scipy.zeros(2*nNyq)+scipy.inf PSD[nLow:nNyq+1] = S**2 if -nLow == 0: PSD[nNyq+1:] = S[-2:0:-1]**2 else: PSD[nNyq+1:-nLow] = S[-2:0:-1]**2 return f,PSD
def plotopticsonly(allsky_data,plotdir,m,ax,fig,latlim,lonlim): """ Make a set of pots when only all sky is avalible.""" maxplot = len(allsky_data.times) strlen = int(sp.ceil(sp.log10(maxplot))+1) fmstr = '{0:0>'+str(strlen)+'}_' optictimes = allsky_data.times plotnum=0 firstbar = True optbnds = [300,1100] for iop in range(len(optictimes)): (slice3,cbar3) = slice2DGD(allsky_data,'alt',150,optbnds,title='', time = iop,cmap='gray',gkey = 'image',fig=fig,ax=ax,cbar=True,m=m) slice3.set_norm(colors.PowerNorm(gamma=0.6,vmin=optbnds[0],vmax=optbnds[1])) if firstbar: firstbar=False cbaras = plt.colorbar(slice3,ax=ax,orientation='horizontal') cbaras.set_label('All Sky Scale') plt.title(insertinfo('All Sky $tmdy $thmsehms',posix=allsky_data.times[iop,0],posixend=allsky_data.times[iop,1])) print('Ploting {0} of {1} plots'.format(plotnum,maxplot)) plt.savefig(os.path.join(plotdir,fmstr.format(plotnum)+'ASonly.png')) plotnum+=1 slice3.remove()
def plotCircleData(self,plotdir,ax,fig): plotdir = Path(plotdir).expanduser() timelist = self.Regdict['Time'] Nt = len(timelist) strlen = int(sp.ceil(sp.log10(Nt))+1) fmstr = '{0:0>'+str(strlen)+'}_' plotnum=0 cbarax = [] for itime in range(Nt): #plot the data hands,cbarax = self.plotCircle(ax,fig,itime,cbarax) ofn= plotdir /(fmstr.format(plotnum)+'ASwGPS.png') print('Ploting {} of {} {}'.format(plotnum,Nt-1,ofn)) plt.savefig(str(ofn)) plotnum+=1 for ihand in hands: if hasattr(ihand, "__len__"): for ihand2 in ihand: ihand2.remove() elif hasattr(ihand,'collections'): for ihand2 in ihand.collections: ihand2.remove() else: ihand.remove() # write out ini file to record plot parameters ininame = Path(self.inifile).name writeini(self.params,plotdir/ininame)
def __init__(self): gr.top_block.__init__(self) self._N = 100000 # number of samples to use self._fs = 2000 # initial sampling rate self._interp = 5 # Interpolation rate for PFB interpolator self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler # Frequencies of the signals we construct freq1 = 100 freq2 = 200 # Create a set of taps for the PFB interpolator # This is based on the post-interpolation sample rate self._taps = gr.firdes.low_pass_2(self._interp, self._interp*self._fs, freq2+50, 50, attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS) # Create a set of taps for the PFB arbitrary resampler # The filter size is the number of filters in the filterbank; 32 will give very low side-lobes, # and larger numbers will reduce these even farther # The taps in this filter are based on a sampling rate of the filter size since it acts # internally as an interpolator. flt_size = 32 self._taps2 = gr.firdes.low_pass_2(flt_size, flt_size*self._fs, freq2+50, 150, attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS) # Calculate the number of taps per channel for our own information tpc = scipy.ceil(float(len(self._taps)) / float(self._interp)) print "Number of taps: ", len(self._taps) print "Number of filters: ", self._interp print "Taps per channel: ", tpc # Create a couple of signals at different frequencies self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5) self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5) self.signal = gr.add_cc() self.head = gr.head(gr.sizeof_gr_complex, self._N) # Construct the PFB interpolator filter self.pfb = blks2.pfb_interpolator_ccf(self._interp, self._taps) # Construct the PFB arbitrary resampler filter self.pfb_ar = blks2.pfb_arb_resampler_ccf(self._ainterp, self._taps2, flt_size) self.snk_i = gr.vector_sink_c() #self.pfb_ar.pfb.print_taps() #self.pfb.pfb.print_taps() # Connect the blocks self.connect(self.signal1, self.head, (self.signal,0)) self.connect(self.signal2, (self.signal,1)) self.connect(self.signal, self.pfb) self.connect(self.signal, self.pfb_ar) self.connect(self.signal, self.snk_i) # Create the sink for the interpolated signals self.snk1 = gr.vector_sink_c() self.snk2 = gr.vector_sink_c() self.connect(self.pfb, self.snk1) self.connect(self.pfb_ar, self.snk2)
def main(wind_angle): # for wind_mag in np.arange(0.4,3.8,0.2): # wind_mag = float(sys.argv[1]) # wind_angle = 13*scipy.pi/8. wind_mag = 1.4 file_name = 'trap_arrival_by_wind_live_coarse_dt' file_name = file_name + '_wind_mag_' + str( wind_mag) + '_wind_angle_' + str(wind_angle)[0:4] output_file = file_name + '.pkl' dt = 0.25 plume_dt = 0.25 frame_rate = 20 times_real_time = 60 # seconds of simulation / sec in video capture_interval = int(scipy.ceil(times_real_time * (1. / frame_rate) / dt)) simulation_time = 50. * 60. #seconds release_delay = 30. * 60 #/(wind_mag) t_start = 0.0 t = 0. - release_delay # Set up figure fig = plt.figure(figsize=(11, 11)) ax = fig.add_subplot(111) #Video FFMpegWriter = animate.writers['ffmpeg'] metadata = { 'title': file_name, } writer = FFMpegWriter(fps=frame_rate, metadata=metadata) writer.setup(fig, file_name + '.mp4', 500) wind_param = { 'speed': wind_mag, 'angle': wind_angle, 'evolving': False, 'wind_dt': None, 'dt': dt } wind_field_noiseless = wind_models.WindField(param=wind_param) #traps number_sources = 8 radius_sources = 1000.0 trap_radius = 0.5 location_list, strength_list = utility.create_circle_of_sources( number_sources, radius_sources, None) trap_param = { 'source_locations': location_list, 'source_strengths': strength_list, 'epsilon': 0.01, 'trap_radius': trap_radius, 'source_radius': radius_sources } traps = trap_models.TrapModel(trap_param) #Wind and plume objects #Odor arena xlim = (-1500., 1500.) ylim = (-1500., 1500.) sim_region = models.Rectangle(xlim[0], ylim[0], xlim[1], ylim[1]) wind_region = models.Rectangle(xlim[0] * 2, ylim[0] * 2, xlim[1] * 2, ylim[1] * 2) source_pos = scipy.array( [scipy.array(tup) for tup in traps.param['source_locations']]).T #wind model setup diff_eq = False constant_wind_angle = wind_angle aspect_ratio = (xlim[1] - xlim[0]) / (ylim[1] - ylim[0]) noise_gain = 3. noise_damp = 0.071 noise_bandwidth = 0.71 wind_grid_density = 200 Kx = Ky = 10000 #highest value observed to not cause explosion: 10000 wind_field = models.WindModel(wind_region, int(wind_grid_density * aspect_ratio), wind_grid_density, noise_gain=noise_gain, noise_damp=noise_damp, noise_bandwidth=noise_bandwidth, Kx=Kx, Ky=Ky, diff_eq=diff_eq, angle=constant_wind_angle, mag=wind_mag) # Set up plume model plume_width_factor = 1. centre_rel_diff_scale = 2. * plume_width_factor # puff_release_rate = 0.001 puff_release_rate = 10 puff_spread_rate = 0.005 puff_init_rad = 0.01 max_num_puffs = int(2e5) # max_num_puffs=100 plume_model = models.PlumeModel( sim_region, source_pos, wind_field, simulation_time + release_delay, plume_dt, plume_cutoff_radius=1500, centre_rel_diff_scale=centre_rel_diff_scale, puff_release_rate=puff_release_rate, puff_init_rad=puff_init_rad, puff_spread_rate=puff_spread_rate, max_num_puffs=max_num_puffs) # Create a concentration array generator array_z = 0.01 array_dim_x = 1000 array_dim_y = array_dim_x puff_mol_amount = 1. array_gen = processors.ConcentrationArrayGenerator(sim_region, array_z, array_dim_x, array_dim_y, puff_mol_amount) #Setup fly swarm wind_slippage = (0., 1.) swarm_size = 2000 use_empirical_release_data = False #Grab wind info to determine heading mean wind_x, wind_y = wind_mag * scipy.cos(wind_angle), wind_mag * scipy.sin( wind_angle) beta = 1. release_times = scipy.random.exponential(beta, (swarm_size, )) kappa = 2. heading_data = None swarm_param = { 'swarm_size': swarm_size, 'heading_data': heading_data, 'initial_heading': scipy.radians(scipy.random.uniform(0.0, 360.0, (swarm_size, ))), 'x_start_position': scipy.zeros(swarm_size), 'y_start_position': scipy.zeros(swarm_size), 'flight_speed': scipy.full((swarm_size, ), 1.5), 'release_time': release_times, 'release_delay': release_delay, 'cast_interval': [1, 3], 'wind_slippage': wind_slippage, 'odor_thresholds': { 'lower': 0.0005, 'upper': 0.05 }, 'schmitt_trigger': False, 'low_pass_filter_length': 3, #seconds 'dt_plot': capture_interval * dt, 't_stop': 3000., 'cast_timeout': 20, 'airspeed_saturation': True } swarm = swarm_models.BasicSwarmOfFlies(wind_field_noiseless, traps, param=swarm_param, start_type='fh', track_plume_bouts=False, track_arena_exits=False) # xmin,xmax,ymin,ymax = -1000,1000,-1000,1000 #Initial concentration plotting conc_array = array_gen.generate_single_array(plume_model.puffs) xmin = sim_region.x_min xmax = sim_region.x_max ymin = sim_region.y_min ymax = sim_region.y_max im_extents = (xmin, xmax, ymin, ymax) vmin, vmax = 0., 50. cmap = matplotlib.colors.ListedColormap(['white', 'orange']) conc_im = ax.imshow(conc_array.T[::-1], extent=im_extents, vmin=vmin, vmax=vmax, cmap=cmap) xmin, xmax, ymin, ymax = -1000, 1000, -1000, 1000 #For looking at the distace-bound plumes xmin, xmax, ymin, ymax = -3000, 3000, -3000, 3000 buffr = 100 ax.set_xlim((xmin - buffr, xmax + buffr)) ax.set_ylim((ymin - buffr, ymax + buffr)) #Conc array gen to be used for the flies sim_region_tuple = plume_model.sim_region.as_tuple() box_min, box_max = sim_region_tuple[1], sim_region_tuple[2] #for the plume distance cutoff version, make sure this is at least 2x radius box_min, box_max = -3000., 3000. r_sq_max = 20 epsilon = 0.00001 N = 1e6 array_gen_flies = processors.ConcentrationValueFastCalculator( box_min, box_max, r_sq_max, epsilon, puff_mol_amount, N) #Initial fly plotting #Sub-dictionary for color codes for the fly modes Mode_StartMode = 0 Mode_FlyUpWind = 1 Mode_CastForOdor = 2 Mode_Trapped = 3 edgecolor_dict = { Mode_StartMode: 'blue', Mode_FlyUpWind: 'red', Mode_CastForOdor: 'red', Mode_Trapped: 'black' } facecolor_dict = { Mode_StartMode: 'blue', Mode_FlyUpWind: 'red', Mode_CastForOdor: 'white', Mode_Trapped: 'black' } fly_edgecolors = [edgecolor_dict[mode] for mode in swarm.mode] fly_facecolors = [facecolor_dict[mode] for mode in swarm.mode] fly_dots = plt.scatter(swarm.x_position, swarm.y_position, edgecolor=fly_edgecolors, facecolor=fly_facecolors, alpha=0.9) #Put the time in the corner (xmin, xmax) = ax.get_xlim() (ymin, ymax) = ax.get_ylim() text = '0 min 0 sec' timer = ax.text(xmax, ymax, text, color='r', horizontalalignment='right') ax.text(1., 1.02, 'time since release:', color='r', transform=ax.transAxes, horizontalalignment='right') #Wind arrow plt.arrow(0.5, 0.5, 0.07, -0.07, transform=ax.transAxes, color='b', width=0.001) ax.text(0.75, 0.9, 'Wind', transform=ax.transAxes, color='b') # #traps for x, y in traps.param['source_locations']: #Black x plt.scatter(x, y, marker='x', s=50, c='k') # Red circles # p = matplotlib.patches.Circle((x, y), 15,color='red') # ax.add_patch(p) #Remove plot edges and add scale bar fig.patch.set_facecolor('white') plt.plot([-900, -800], [900, 900], color='k') #,transform=ax.transData,color='k') ax.text(-900, 820, '100 m') plt.axis('off') #Fly behavior color legend for mode, fly_facecolor, fly_edgecolor, a in zip( ['Dispersing', 'Surging', 'Casting', 'Trapped'], facecolor_dict.values(), edgecolor_dict.values(), [0, 50, 100, 150]): plt.scatter([1000], [-600 - a], edgecolor=fly_edgecolor, facecolor=fly_facecolor, s=20) plt.text(1050, -600 - a, mode, verticalalignment='center') # plt.ion() # plt.show() # raw_input() while t < simulation_time: for k in range(capture_interval): #update flies print('t: {0:1.2f}'.format(t)) #update the swarm for j in range(int(dt / plume_dt)): wind_field.update(plume_dt) plume_model.update(plume_dt, verbose=True) # velocity_field = wind_field.velocity_field # u,v = velocity_field[:,:,0],velocity_field[:,:,1] # u,v = u[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor],\ # v[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor] # vector_field.set_UVC(u,v) if t > 0.: swarm.update(t, dt, wind_field_noiseless, array_gen_flies, traps, plumes=plume_model, pre_stored=False) t += dt # time.sleep(0.001) # Update live display # '''plot the flies''' if t > 0: # Update time display release_delay = release_delay / 60. text = '{0} min {1} sec'.format(int(scipy.floor(abs(t / 60.))), int(scipy.floor(abs(t) % 60.))) timer.set_text(text) fly_dots.set_offsets(scipy.c_[swarm.x_position, swarm.y_position]) fly_edgecolors = [edgecolor_dict[mode] for mode in swarm.mode] fly_facecolors = [facecolor_dict[mode] for mode in swarm.mode] # fly_dots.set_edgecolor(fly_edgecolors) fly_dots.set_facecolor(fly_facecolors) trap_list = [] for trap_num, trap_loc in enumerate( traps.param['source_locations']): mask_trap = swarm.trap_num == trap_num trap_cnt = mask_trap.sum() trap_list.append(trap_cnt) total_cnt = sum(trap_list) conc_array = array_gen.generate_single_array(plume_model.puffs) # non_inf_log = log_im = scipy.log(conc_array.T[::-1]) cutoff_l = scipy.percentile(log_im[~scipy.isinf(log_im)], 10) cutoff_u = scipy.percentile(log_im[~scipy.isinf(log_im)], 99) # im = (log_im>cutoff_l) & (log_im<0.1) # n = matplotlib.colors.Normalize(vmin=0,vmax=1) # image.set_data(im) # image.set_norm(n) conc_im.set_data(log_im) n = matplotlib.colors.Normalize(vmin=cutoff_l, vmax=cutoff_u) conc_im.set_norm(n) # plt.pause(0.0001) writer.grab_frame() writer.finish() with open(output_file, 'w') as f: pickle.dump((wind_field_noiseless, swarm), f) #Trap arrival plot trap_locs = (2 * scipy.pi / swarm.num_traps) * scipy.array( swarm.list_all_traps()) sim_trap_counts = swarm.get_trap_counts() #Set 0s to 1 for plotting purposes sim_trap_counts[sim_trap_counts == 0] = .5 radius_scale = 0.3 plot_size = 1.5 plt.figure(200 + int(10 * wind_mag)) ax = plt.subplot(aspect=1) trap_locs_2d = [(scipy.cos(trap_loc), scipy.sin(trap_loc)) for trap_loc in trap_locs] patches = [ plt.Circle(center, size) for center, size in zip( trap_locs_2d, radius_scale * sim_trap_counts / max(sim_trap_counts)) ] coll = matplotlib.collections.PatchCollection(patches, facecolors='blue', edgecolors='blue') ax.add_collection(coll) ax.set_ylim([-plot_size, plot_size]) ax.set_xlim([-plot_size, plot_size]) ax.set_xticks([]) ax.set_xticklabels('') ax.set_yticks([]) ax.set_yticklabels('') #Wind arrow plt.arrow(0.5, 0.5, 0.1 * scipy.cos(wind_angle), 0.1 * scipy.sin(wind_angle), transform=ax.transAxes, color='b', width=0.001) # ax.text(0.55, 0.5,'Wind',transform=ax.transAxes,color='b') ax.text(0, 1.5, 'N', horizontalalignment='center', verticalalignment='center', fontsize=25) ax.text(0, -1.5, 'S', horizontalalignment='center', verticalalignment='center', fontsize=25) ax.text(1.5, 0, 'E', horizontalalignment='center', verticalalignment='center', fontsize=25) ax.text(-1.5, 0, 'W', horizontalalignment='center', verticalalignment='center', fontsize=25) # plt.title('Simulated') fig.patch.set_facecolor('white') plt.axis('off') ax.text(0, 1.7, 'Trap Counts' + ' (Wind Mag: ' + str(wind_mag)[0:3] + ')', horizontalalignment='center', verticalalignment='center', fontsize=20) plt.savefig(file_name + '.png', format='png')
def PatchDenoiseParallel2(net, src, originalLowdoseImg, patchSize=40, stride=32): outputImg = np.zeros(src.shape) outputWeight = np.zeros(src.shape) sz = patchSize fullSz = src.shape[-1] mask1D = scipy.signal.gaussian(sz, sz / 3.0) mask = np.matlib.repmat(mask1D, sz, 1) mask = np.multiply(mask, mask.transpose()) mask = mask.astype(np.float32) nPatches = int(scipy.ceil((fullSz - sz / 2) / float(stride))) # extract patches patches = np.zeros([nPatches * nPatches, 2, patchSize, patchSize], dtype=np.float32) baseCoords = np.zeros([nPatches * nPatches, 2], dtype=np.int) ind = 0 for ix in range(0, nPatches): for iy in range(0, nPatches): basex = ix * stride basey = iy * stride if basex + sz > fullSz: basex = fullSz - sz if basey + sz > fullSz: basey = fullSz - sz patches[ind, 0, ...] = src[basey:basey + sz, basex:basex + sz] patches[ind, 1, ...] = originalLowdoseImg[basey:basey + sz, basex:basex + sz] baseCoords[ind, 0] = basex baseCoords[ind, 1] = basey ind += 1 # put the patches through the network batch by batch batchSize = net.blobs['dataSrc'].data.shape[0] nBatches = int(math.ceil(patches.shape[0] / float(batchSize))) for i in range(0, nBatches - 1): indStart = i * batchSize net.blobs['dataSrc'].data[...] = patches[indStart:indStart + batchSize, ...] net.forward() patches[indStart:indStart + batchSize, 0, ...] = net.blobs[net.outputs[0]].data.squeeze() indStart = (nBatches - 1) * batchSize nLeftPatches = patches.shape[0] - indStart net.blobs['dataSrc'].data[0:nLeftPatches, ...] = patches[indStart:, ...] net.forward() patches[indStart:, 0, ...] = net.blobs[net.outputs[0]].data[0:nLeftPatches, ...].squeeze() # put the patches back together for ind in range(0, baseCoords.shape[0]): basex = baseCoords[ind, 0] basey = baseCoords[ind, 1] outputImg[basey:basey + sz, basex:basex + sz] += np.multiply(patches[ind, 0, ...], mask) outputWeight[basey:basey + sz, basex:basex + sz] += mask outputImg = np.divide(outputImg, outputWeight) return outputImg
def __init__(self, buf, windowWidth): milliseconds = settings.frameRate self.size = int(sp.ceil(buf.sampleRate / 1e3 * milliseconds)) self.buf = buf self.windowWidth = windowWidth
def create_gabor(rot, RF_siz, Div, plot): count = 0 numFilterSizes = len(RF_siz) numSimpleFilters = len(rot) lamb = (RF_siz * 2) / Div sigma = lamb * 0.8 G = 0.3 phases = [0] # Initialize Filterbank alt_fb = np.zeros((65, 65, 1, 136), dtype=np.float32) # for k in tqdm(range(0,numFilterSizes-1)): for k in tqdm(range(1, numFilterSizes + 1)): for r in tqdm(range(1, numSimpleFilters + 1)): f = np.zeros( [RF_siz[numFilterSizes - 1], RF_siz[numFilterSizes - 1]]) fx = np.zeros( [RF_siz[numFilterSizes - 1], RF_siz[numFilterSizes - 1]]) ## Parameters theta = rot[r - 1] * (np.pi / 180) filtSize = RF_siz[k - 1] img_center = ceil(33.0) ## New center for padding with zeros center = ceil(filtSize / 2.0) ## Old and possibly more accurate center filtSizeL = center - 1 filtSizeR = filtSize - filtSizeL - 1 sigmaq = (sigma[k - 1]) * (sigma[k - 1]) # Compute filter values for iPhi in range(1, 2): for i in range(int(-1 * filtSizeL), int(filtSizeR + 1)): for j in range(int(-1 * filtSizeL), int(filtSizeR + 1)): if (sqrt((i**2) + (j**2)) > (filtSize / 2)): E = 0 else: x = i * np.cos(theta) - j * np.sin(theta) y = i * np.sin(theta) + j * np.cos(theta) E = np.exp((-1 * ((x**2) + (G**2) * (y**2))) / (2 * sigmaq)) * np.cos(2 * np.pi * x / lamb[k - 1] + phases[iPhi - 1]) f[int(j + img_center - 1), int(i + img_center - 1)] = E ## Append to fb (filterbank) f = f - np.mean(np.mean(f)) f = f / sqrt(np.sum(np.sum(f**2))) # Reshaped image alt_fb[:, :, 0, count] = f count += 1 if (plot): plt.imshow(f, cmap='Greys') plt.show() return (np.array(alt_fb))
def plot_sensor_data(ifig, sensor_data, time, initial_door_dist=None, axis = None, \ flux_timestep=1, \ savefig=False, filename='fig.png', cmap='winter'): """ When a sensor line is defined this function allows to draw the \ repartition of the people exit times. Parameters ---------- ifig: int figure number sensor_data : numpy array [time, direction, intersection_point[2]] for each individual time: float time in seconds initial_door_dist: numpy array people initial distance to the door axis: numpy array matplotlib axis : [xmin, xmax, ymin, ymax] flux_timestep: float timestep for the fluxes : number of persons per flux_timestep seconds savefig: boolean writes the figure as a png file if true filename: string png filename used to write the figure cmap: string matplotlib colormap name """ Np = sensor_data.shape[0] tmin = 0 tmax = time fig = plt.figure(ifig) plt.clf() ax1 = fig.add_subplot(211) if (initial_door_dist is None): ax1.plot(sp.arange(Np), sensor_data[:, 0], 'b+') ax1.set_title('Crossing time (s) vs people id') else: ax1.plot(initial_door_dist, sensor_data[:, 0], 'b+') ax1.set_title('Crossing time (s) vs initial door distance (m)') if (axis): ax1.set_xlim(axis[0], axis[1]) ax1.set_ylim(axis[2], axis[3]) #ax1.set_xticks([]) #ax1.set_yticks([]) #ax1.axis('off') tgrid = sp.arange(tmin, tmax, step=flux_timestep) tgrid = sp.append(tgrid, tgrid[-1] + flux_timestep) flux_exits = sp.zeros(tgrid.shape) flux_entries = sp.zeros(tgrid.shape) exits = sp.where(sensor_data[:, 1] == 1)[0] entries = sp.where(sensor_data[:, 1] == -1)[0] t_exits = sp.ceil((sensor_data[exits, 0] - tmin) / flux_timestep) t_entries = sp.ceil((sensor_data[entries, 0] - tmin) / flux_timestep) #t_exits = sp.floor((sensor_data[exits,0]-tmin)/flux_timestep) #t_entries = sp.floor((sensor_data[entries,0]-tmin)/flux_timestep) unique_exits, counts_exits = sp.unique(t_exits, return_counts=True) unique_entries, counts_entries = sp.unique(t_entries, return_counts=True) flux_exits[unique_exits.astype(int)] = counts_exits flux_entries[unique_entries.astype(int)] = counts_entries ax2 = fig.add_subplot(212) ax2.plot(tgrid, flux_entries, ':og', tgrid, flux_exits, ':or') ax2.set_title("Entries (green) and exits (red) per " + str(flux_timestep) + " s") if (axis): ax2.set_xlim(axis[0], axis[1]) ax2.set_ylim(axis[2], axis[3]) #ax2.set_xticks([]) #ax2.set_yticks([]) #ax2.axis('off') # Optionally : adds some histograms # if (exits.shape[0]>0): # ax3 = fig.add_subplot(413) # t_exits_sorted = sp.sort(sensor_data[exits,0]) # #print("t_exits_sorted = ",t_exits_sorted) # tmp = sp.concatenate(([0],t_exits_sorted)) # bins = 0.5*(tmp[:-1]+tmp[1:]) # widths = tmp[1:]-tmp[:-1] # heights = 1/widths # ax3.bar(bins, heights, width=widths,color='r',align='center') # # if (entries.shape[0]>0): # ax4 = fig.add_subplot(414) # t_entries_sorted = sp.sort(sensor_data[entries,0]) # tmp = sp.concatenate(([0],t_entries_sorted)) # bins = 0.5*(tmp[:-1]+tmp[1:]) # widths = tmp[1:]-tmp[:-1] # heights = 1/widths # ax4.bar(bins, heights, width=widths,color='r',align='center') fig.set_tight_layout(True) fig.canvas.draw() if (savefig): fig.savefig(filename, dpi=300)
def doskysub(straight, ylen, xlen, sci, yback, sky2x, sky2y, ccd2wave, disp, mswave, offsets, cutoff, airmass): sci = sci.copy() # If cutoff is not a float, we are using the blueside locutoff = cutoff hicutoff = 10400. nsci = sci.shape[0] width = sci.shape[2] # Perform telluric correction coords = spectools.array_coords(sci[0].shape) x = coords[1].flatten() y = coords[0].flatten() for k in range(nsci): w = genfunc(x, y, ccd2wave[k]) telluric = correct_telluric.correct(w, airmass[k], disp) sci[k] *= telluric.reshape(sci[k].shape) del coords, x, y, telluric # Create arrays for output images outcoords = spectools.array_coords((ylen, xlen)) outcoords[1] *= disp outcoords[1] += mswave - disp * xlen / 2. xout = outcoords[1].flatten() yout = outcoords[0].flatten() out = scipy.zeros((nsci, ylen, xlen)) fudge = scipy.ceil(abs(offsets).max()) bgimage = scipy.zeros((nsci, ylen + fudge, xlen)) varimage = bgimage.copy() bgcoords = spectools.array_coords((ylen + fudge, xlen)) bgcoords[1] *= disp bgcoords[1] += mswave - disp * xlen / 2. # # Cosmic Ray Rejection and Background Subtraction # yfit = yback.flatten() ycond = (yfit > straight - 0.4) & (yfit < straight + ylen - 0.6) coords = spectools.array_coords(yback.shape) xvals = coords[1].flatten() yvals = coords[0].flatten() ap_y = scipy.zeros(0) aper = scipy.zeros(0) for k in range(nsci): xfit = genfunc(xvals, yfit - straight, ccd2wave[k]) zfit = sci[k].flatten() x = xfit[ycond] y = yfit[ycond] z = zfit[ycond] # The plus/minus 20 provides a better solution for the edges wavecond = (x > locutoff - 20.) & (x < hicutoff + 20.) x = x[wavecond] y = y[wavecond] z = z[wavecond] # If only resampling... if RESAMPLE == 1: coords = outcoords.copy() samp_x = genfunc(xout, yout, sky2x[k]) samp_y = genfunc(xout, yout, sky2y[k]) coords[0] = samp_y.reshape(coords[0].shape) coords[1] = samp_x.reshape(coords[1].shape) out[k] = scipy.ndimage.map_coordinates(sci[k], coords, output=scipy.float64, order=5, cval=-32768, prefilter=False) out[k][xout.reshape(coords[1].shape) < locutoff] = scipy.nan out[k][xout.reshape(coords[1].shape) > hicutoff] = scipy.nan out[k][out[k] == -32768] = scipy.nan continue bgfit = skysub.skysub(x, y, z, disp) background = zfit.copy() for indx in range(background.size): x0 = xfit[indx] y0 = yfit[indx] if x0 < locutoff - 10 or x0 > hicutoff + 10: background[indx] = scipy.nan else: background[indx] = interpolate.bisplev(x0, y0, bgfit) sub = zfit - background sub[scipy.isnan(sub)] = 0. sky = sub * 0. sky[ycond] = sub[ycond] sky = sky.reshape(sci[k].shape) sub = sky.copy() background[scipy.isnan(background)] = 0. # Note that 2d filtering may flag very sharp source traces! sub = sub.reshape(sci[k].shape) sky = ndimage.median_filter(sky, 5) diff = sub - sky model = scipy.sqrt(background.reshape(sci[k].shape) + sky) crmask = scipy.where(diff > 4. * model, diff, 0.) sub -= crmask sci[k] -= crmask # Create straightened slit coords = outcoords.copy() samp_x = genfunc(xout, yout, sky2x[k]) samp_y = genfunc(xout, yout, sky2y[k]) coords[0] = samp_y.reshape(coords[0].shape) coords[1] = samp_x.reshape(coords[1].shape) out[k] = scipy.ndimage.map_coordinates(sci[k], coords, output=scipy.float64, order=5, cval=magicnum, prefilter=False) out[k][xout.reshape(coords[1].shape) < locutoff] = scipy.nan out[k][xout.reshape(coords[1].shape) > hicutoff] = scipy.nan out[k][out[k] == magicnum] = scipy.nan # Output bgsub image coords = bgcoords.copy() bgy = bgcoords[0].flatten() + offsets[k] bgx = bgcoords[1].flatten() samp_x = genfunc(bgx, bgy, sky2x[k]) samp_y = genfunc(bgx, bgy, sky2y[k]) coords[0] = samp_y.reshape(coords[0].shape) coords[1] = samp_x.reshape(coords[1].shape) varimage[k] = scipy.ndimage.map_coordinates(sci[k], coords, output=scipy.float64, order=5, cval=magicnum, prefilter=False) # Only include good data (ie positive variance, wavelength # greater than dichroic cutoff) cond = (bgcoords[0] + offsets[k] < 0.) | (bgcoords[0] + offsets[k] > ylen) cond = (varimage[k] <= 0) | cond cond = (bgcoords[1] < locutoff) | (bgcoords[1] > hicutoff) | cond varimage[k][cond] = scipy.nan bgimage[k] = scipy.ndimage.map_coordinates(sub, coords, output=scipy.float64, order=5, cval=magicnum, prefilter=False) bgimage[k][cond] = scipy.nan bgimage[k][bgimage[k] == magicnum] = scipy.nan # Shouldn't be # necessary... if RESAMPLE == 1: return out, bgimage, varimage bgimage = fastmed(bgimage) varimage = fastmed(varimage) / nsci return out, bgimage, varimage
def main(x_0, K): #np.arange(0.4,3.8,0.2): file_name = 'logistic_prob_sim_x_0_' + str(x_0) + '_K_' + str(K) output_file = file_name + '.pkl' dt = 0.25 frame_rate = 20 times_real_time = 20 # seconds of simulation / sec in video capture_interval = int(scipy.ceil(times_real_time * (1. / frame_rate) / dt)) simulation_time = 50. * 60. #seconds release_delay = 0. * 60 #/(wind_mag) t_start = 0.0 t = 0. - release_delay # Set up figure fig = plt.figure(figsize=(11, 11)) ax = fig.add_subplot(111) # Video FFMpegWriter = animate.writers['ffmpeg'] metadata = { 'title': file_name, } writer = FFMpegWriter(fps=frame_rate, metadata=metadata) writer.setup(fig, file_name + '.mp4', 500) wind_angle = 7 * scipy.pi / 8. wind_mag = 1.6 # wind_angle = 7*scipy.pi/4. wind_param = { 'speed': wind_mag, 'angle': wind_angle, 'evolving': False, 'wind_dt': None, 'dt': dt } wind_field = wind_models.WindField(param=wind_param) #traps number_sources = 8 radius_sources = 1000.0 trap_radius = 0.5 location_list, strength_list = utility.create_circle_of_sources( number_sources, radius_sources, None) trap_param = { 'source_locations': location_list, 'source_strengths': strength_list, 'epsilon': 0.01, 'trap_radius': trap_radius, 'source_radius': radius_sources } traps = trap_models.TrapModel(trap_param) #Wind and plume objects #Odor arena xlim = (-1500., 1500.) ylim = (-1500., 1500.) im_extents = xlim[0], xlim[1], ylim[0], ylim[1] source_pos = scipy.array( [scipy.array(tup) for tup in traps.param['source_locations']]) # Set up logistic prob plume object logisticPlumes = models.LogisticProbPlume(K, x_0, source_pos, wind_angle) #Setup fly swarm wind_slippage = (0., 1.) # wind_slippage = (0.,0.) swarm_size = 2000 use_empirical_release_data = False #Grab wind info to determine heading mean wind_x, wind_y = wind_mag * scipy.cos(wind_angle), wind_mag * scipy.sin( wind_angle) beta = 1. release_times = scipy.random.exponential(beta, (swarm_size, )) kappa = 2. heading_data = None #Flies also use parameters (for schmitt_trigger, detection probabilities) # determined in #fly_behavior_sim/near_plume_simulation_sutton.py swarm_param = { 'swarm_size': swarm_size, 'heading_data': heading_data, 'initial_heading': scipy.radians(scipy.random.uniform(0.0, 360.0, (swarm_size, ))), 'x_start_position': scipy.zeros(swarm_size), 'y_start_position': scipy.zeros(swarm_size), # For testing the 'inside band masks' # 'initial_heading' : (11./8)*scipy.pi*np.ones((swarm_size,)), # 'x_start_position' : scipy.linspace(0,800,swarm_size), # 'y_start_position' : scipy.zeros(swarm_size), 'flight_speed': scipy.full((swarm_size, ), 1.5), 'release_time': release_times, 'release_delay': release_delay, 'cast_interval': [1, 3], 'wind_slippage': wind_slippage, 'odor_thresholds': { 'lower': 0.0005, 'upper': 0.05 }, 'schmitt_trigger': False, 'low_pass_filter_length': 3, #seconds 'dt_plot': capture_interval * dt, 't_stop': 3000., 'cast_timeout': 20, 'airspeed_saturation': True # 'airspeed_saturation':False } swarm = swarm_models.ReducedSwarmOfFlies(wind_field, traps, param=swarm_param, start_type='fh') # xmin,xmax,ymin,ymax = -1000,1000,-1000,1000 #Concentration plotting conc_d = logisticPlumes.conc_im(im_extents) cmap = matplotlib.colors.ListedColormap(['white', 'orange']) cmap = 'YlOrBr' conc_im = plt.imshow(conc_d, extent=im_extents, interpolation='none', cmap=cmap, origin='lower') plt.colorbar() xmin, xmax, ymin, ymax = -1000, 1000, -1000, 1000 buffr = 100 ax.set_xlim((xmin - buffr, xmax + buffr)) ax.set_ylim((ymin - buffr, ymax + buffr)) #Initial fly plotting #Sub-dictionary for color codes for the fly modes Mode_StartMode = 0 Mode_FlyUpWind = 1 Mode_CastForOdor = 2 Mode_Trapped = 3 edgecolor_dict = { Mode_StartMode: 'blue', Mode_FlyUpWind: 'red', Mode_CastForOdor: 'red', Mode_Trapped: 'black' } facecolor_dict = { Mode_StartMode: 'blue', Mode_FlyUpWind: 'red', Mode_CastForOdor: 'white', Mode_Trapped: 'black' } fly_edgecolors = [edgecolor_dict[mode] for mode in swarm.mode] fly_facecolors = [facecolor_dict[mode] for mode in swarm.mode] fly_dots = plt.scatter(swarm.x_position, swarm.y_position, edgecolor=fly_edgecolors, facecolor=fly_facecolors, alpha=0.9) #Put the time in the corner (xmin, xmax) = ax.get_xlim() (ymin, ymax) = ax.get_ylim() text = '0 min 0 sec' timer = ax.text(xmax, ymax, text, color='r', horizontalalignment='right') ax.text(1., 1.02, 'time since release:', color='r', transform=ax.transAxes, horizontalalignment='right') # #traps for x, y in traps.param['source_locations']: #Black x plt.scatter(x, y, marker='x', s=50, c='k') # Red circles # p = matplotlib.patches.Circle((x, y), 15,color='red') # ax.add_patch(p) #Remove plot edges and add scale bar fig.patch.set_facecolor('white') plt.plot([-900, -800], [900, 900], color='k') #,transform=ax.transData,color='k') ax.text(-900, 820, '100 m') plt.axis('off') #Fly behavior color legend for mode, fly_facecolor, fly_edgecolor, a in zip( ['Dispersing', 'Surging', 'Casting', 'Trapped'], facecolor_dict.values(), edgecolor_dict.values(), [0, 50, 100, 150]): plt.scatter([1000], [-600 - a], edgecolor=fly_edgecolor, facecolor=fly_facecolor, s=20) plt.text(1050, -600 - a, mode, verticalalignment='center') plt.ion() # plt.show() # raw_input() while t < simulation_time: for k in range(capture_interval): #update flies print('t: {0:1.2f}'.format(t)) swarm.update(t, dt, wind_field, logisticPlumes, traps) t += dt # Update time display release_delay = release_delay / 60. text = '{0} min {1} sec'.format(int(scipy.floor(abs(t / 60.))), int(scipy.floor(abs(t) % 60.))) timer.set_text(text) # # '''plot the flies''' fly_dots.set_offsets(scipy.c_[swarm.x_position, swarm.y_position]) fly_edgecolors = [edgecolor_dict[mode] for mode in swarm.mode] fly_facecolors = [facecolor_dict[mode] for mode in swarm.mode] # fly_dots.set_edgecolor(fly_edgecolors) fly_dots.set_facecolor(fly_facecolors) # plt.pause(0.0001) writer.grab_frame() trap_list = [] for trap_num, trap_loc in enumerate(traps.param['source_locations']): mask_trap = swarm.trap_num == trap_num trap_cnt = mask_trap.sum() trap_list.append(trap_cnt) total_cnt = sum(trap_list) writer.finish()
def main(wind_mag,i):#np.arange(0.4,3.8,0.2): random_state = np.random.RandomState(i) file_name = 'test_lazy_plumes_single_plume_wind_mag_'+str(wind_mag) output_file = file_name+'.pkl' dt = 0.25 frame_rate = 20 times_real_time = 20 # seconds of simulation / sec in video capture_interval = int(scipy.ceil(times_real_time*(1./frame_rate)/dt)) simulation_time = 50.*60. #seconds release_delay = 0.*60#/(wind_mag) t_start = 0.0 t = 0. - release_delay # Set up figure fig = plt.figure(figsize=(11, 11)) ax = fig.add_subplot(111) # #Video FFMpegWriter = animate.writers['ffmpeg'] metadata = {'title':file_name,} writer = FFMpegWriter(fps=frame_rate, metadata=metadata) writer.setup(fig, file_name+'.mp4', 500) wind_angle = 0. wind_param = { 'speed': wind_mag, 'angle': wind_angle, 'evolving': False, 'wind_dt': None, 'dt': dt } wind_field_noiseless = wind_models.WindField(param=wind_param) #traps source_locations = [(0.,0.),] source_pos = scipy.array([scipy.array(tup) for tup in source_locations]).T trap_param = { 'source_locations' : [source_pos], 'source_strengths' : [1.], 'epsilon' : 0.01, 'trap_radius' : 1., 'source_radius' : 1000. } traps = trap_models.TrapModel(trap_param) #Odor arena xlim = (0., 1800.) ylim = (-500., 500.) sim_region = models.Rectangle(xlim[0], ylim[0], xlim[1], ylim[1]) wind_region = models.Rectangle(xlim[0]*2,ylim[0]*2, xlim[1]*2,ylim[1]*2) im_extents = xlim[0], xlim[1], ylim[0], ylim[1] #lazy plume parameters puff_mol_amount = 1. r_sq_max=20;epsilon=0.00001;N=1e6 lazyPompyPlumes = models.OnlinePlume(sim_region, source_pos, wind_field_noiseless, simulation_time,dt,r_sq_max,epsilon,puff_mol_amount,N) # Concentration plotting # conc_d = lazyPompyPlumes.conc_im(im_extents) # # cmap = matplotlib.colors.ListedColormap(['white', 'orange']) # cmap = 'YlOrBr' # # conc_im = plt.imshow(conc_d,extent=im_extents, # interpolation='none',cmap = cmap,origin='lower') # # plt.colorbar() # # # # # #traps # for x,y in traps.param['source_locations']: # # #Black x # plt.scatter(x,y,marker='x',s=50,c='k') # # plt.ion() # plt.show() while t<simulation_time: for k in range(capture_interval): #update flies print('t: {0:1.2f}'.format(t)) x_locs,y_locs = np.linspace(0., 1800.,1000),np.random.uniform(-500., 500.,1000) lazyPompyPlumes.value(x_locs,y_locs) raw_input() t+= dt # time.sleep(0.001) # Update live display # Update time display release_delay = release_delay/60. text ='{0} min {1} sec'.format( int(scipy.floor(abs(t/60.))),int(scipy.floor(abs(t)%60.))) timer.set_text(text) # '''plot the flies''' fly_dots.set_offsets(scipy.c_[swarm.x_position,swarm.y_position]) fly_edgecolors = [edgecolor_dict[mode] for mode in swarm.mode] fly_facecolors = [facecolor_dict[mode] for mode in swarm.mode] # fly_dots.set_edgecolor(fly_edgecolors) fly_dots.set_facecolor(fly_facecolors) plt.pause(0.0001) writer.grab_frame() trap_list = [] for trap_num, trap_loc in enumerate(traps.param['source_locations']): mask_trap = swarm.trap_num == trap_num trap_cnt = mask_trap.sum() trap_list.append(trap_cnt) total_cnt = sum(trap_list) # writer.finish() with open(output_file, 'w') as f: pickle.dump((wind_field,swarm),f)
def genphen(y_G0, G1, covDat, options, nInd, K1=None, fracCausal=None, randseed=None): ''' Generate synthetic phenotype with a LMM and linear kernels, using SNPs in G1 for signal, snps in GO for background, and one of two link functions. If genlink=='linear', uses linear LMM. If genlink='logistic', then thresholds to get binary. fracCausal is the fraction of SNPs that are causal (rounding up) when G1 is provided Only one of G1 and K1 can be not None (G1 is good for low rank, K1 for full rank) Returns: y (binary, or real-valued, as dictated by genlink) If y is binary, casefrac are 1s, and the rest 0s (default casefrac=0.5) Notes: uses sp.random.X so that the seed that was set can be used ''' sp.random.seed(int(randseed % 2147483647)) #old maxint if "numBackSnps" in options and options["numBackSnps"] > 0: raise Exception( "I accidentally deleted this move from FastLMmSet to here, see code for FastLmmSet.py from 11/24/2013" ) ## generate from the causal (not background) SNPs--------------- assert not (G1 is not None and K1 is not None), "need to provide only either G1 or K1" fracCausal = options['fracCausal'] if G1 is not None and options["varG"] > 0: if fracCausal > 1.0 or fracCausal < 0.01: raise Exception("fraCausal should be between 0.01 and 1") nSnp = G1.shape[1] if fracCausal != 1.0: nSnpNew = sp.ceil(fracCausal * nSnp) permutationIndex = utilx.generate_permutation( sp.arange(0, nSnp), randseed)[0:nSnpNew] G1new = G1[:, permutationIndex] else: nSnpNew = nSnp G1new = G1 elif K1 is not None: assert (fracCausal == 1.0 or fracCausal is None) pass else: assert options[ 'varG'] == 0, "varG is not zero, but neither G1 nor K1 were provided" stdG = sp.sqrt(options['varG']) if stdG > 0: if G1 is not None: y_G1 = stdG * G1new.dot(sp.random.randn(nSnpNew, 1)) #good for low rank else: K1chol = la.cholesky(K1) y_G1 = stdG * K1chol.dot(sp.random.randn(nInd, 1)) #good for full rank else: y_G1 = 0.0 ##---------------------------------------------------------------- if covDat is not None: nCov = covDat.shape[1] covWeights = sp.random.randn(nCov, 1) * sp.sqrt(options['varCov']) y_beta = covDat.dot(covWeights) else: y_beta = 0.0 y_noise_t = 0 #heavy-tailed noise if options['varET'] > 0: y_noise_t = sp.random.standard_t( df=options['varETd'], size=(nInd, 1)) * sp.sqrt(options['varET']) else: y_noise_t = 0 #gaussian noise y_noise = sp.random.randn(nInd, 1) * sp.sqrt(options['varE']) y = y_noise + y_noise_t + y_G0 + y_beta + y_G1 y = y[:, 0] #y.flatten() if options['link'] == 'linear': return y elif options['link'] == 'logistic': if options['casefrac'] is None: options['casefrac'] = 0.5 ysort = sp.sort(y, axis=None) thresh = ysort[sp.floor(nInd * options['casefrac'])] ybin = sp.array(y > thresh, dtype="float") return ybin else: raise Exception("Invald link function for data generation")
def __init__(self, Ionodict, inifile, outdir, outfilelist=None): """ This function will create an instance of the RadarData class. It will take in the values and create the class and make raw IQ data. Inputs: sensdict - A dictionary of sensor parameters angles - A list of tuples which the first position is the az angle and the second position is the el angle. IPP - The interpulse period in seconds represented as a float. Tint - The integration time in seconds as a float. This will be the integration time of all of the beams. time_lim - The length of time of the simulation the number of time points will be calculated. pulse - A numpy array that represents the pulse shape. rng_lims - A numpy array of length 2 that holds the min and max range that the radar will cover. """ (sensdict, simparams) = readconfigfile(inifile) self.simparams = simparams N_angles = len(self.simparams['angles']) NNs = int(self.simparams['NNs']) self.sensdict = sensdict Npall = sp.floor(self.simparams['TimeLim'] / self.simparams['IPP']) Npall = int(sp.floor(Npall / N_angles) * N_angles) Np = Npall / N_angles print("All spectrums created already") filetimes = Ionodict.keys() filetimes.sort() ftimes = sp.array(filetimes) simdtype = self.simparams['dtype'] pulsetimes = sp.arange(Npall) * self.simparams['IPP'] + ftimes.min() pulsefile = sp.array( [sp.where(itimes - ftimes >= 0)[0][-1] for itimes in pulsetimes]) # differentiate between phased arrays and dish antennas if sensdict['Name'].lower() in ['risr', 'pfisr', 'risr-n']: beams = sp.tile(sp.arange(N_angles), Npall / N_angles) else: # for dish arrays brate = simparams['beamrate'] beams2 = sp.repeat(sp.arange(N_angles), brate) beam3 = sp.concatenate((beams2, beams2[::-1])) ntile = int(sp.ceil(Npall / len(beam3))) leftover = int(Npall - ntile * len(beam3)) if ntile > 0: beams = sp.tile(beam3, ntile) beams = sp.concatenate((beams, beam3[:leftover])) else: beams = beam3[:leftover] pulsen = sp.repeat(sp.arange(Np), N_angles) pt_list = [] pb_list = [] pn_list = [] fname_list = [] self.datadir = outdir self.maindir = outdir.parent self.procdir = self.maindir / 'ACF' Nf = len(filetimes) progstr = 'Data from {:d} of {:d} being processed Name: {:s}.' if outfilelist is None: print('\nData Now being created.') Noisepwr = v_Boltz * sensdict['Tsys'] * sensdict['BandWidth'] self.outfilelist = [] for ifn, ifilet in enumerate(filetimes): outdict = {} ifile = Ionodict[ifilet] ifilename = Path(ifile).name update_progress( float(ifn) / Nf, progstr.format(ifn, Nf, ifilename)) curcontainer = IonoContainer.readh5(ifile) if ifn == 0: self.timeoffset = curcontainer.Time_Vector[0, 0] pnts = pulsefile == ifn pt = pulsetimes[pnts] pb = beams[pnts] pn = pulsen[pnts].astype(int) rawdata = self.__makeTime__(pt, curcontainer.Time_Vector, curcontainer.Sphere_Coords, curcontainer.Param_List, pb) d_shape = rawdata.shape n_tempr = sp.random.randn(*d_shape).astype(simdtype) n_tempi = 1j * sp.random.randn(*d_shape).astype(simdtype) noise = sp.sqrt(Noisepwr / 2) * (n_tempr + n_tempi) outdict['AddedNoise'] = noise outdict['RawData'] = rawdata + noise outdict['RawDatanonoise'] = rawdata outdict['NoiseData'] = sp.sqrt(Noisepwr / 2) * ( sp.random.randn(len(pn), NNs).astype(simdtype) + 1j * sp.random.randn(len(pn), NNs).astype(simdtype)) outdict['Pulses'] = pn outdict['Beams'] = pb outdict['Time'] = pt fname = '{0:d} RawData.h5'.format(ifn) newfn = self.datadir / fname self.outfilelist.append(str(newfn)) dict2h5(str(newfn), outdict) #Listing info pt_list.append(pt) pb_list.append(pb) pn_list.append(pn) fname_list.append(fname) infodict = { 'Files': fname_list, 'Time': pt_list, 'Beams': pb_list, 'Pulses': pn_list } dict2h5(str(outdir.joinpath('INFO.h5')), infodict) else: infodict = h52dict(str(outdir.joinpath('INFO.h5'))) alltime = sp.hstack(infodict['Time']) self.timeoffset = alltime.min() self.outfilelist = outfilelist
def cov_from_bam(chrm, start, stop, files, subsample=0, verbose=False, bins=None, log=False, ax=None, ymax=0, outfile=None, frm='pdf', xlim=None, title=None, xoff=None, yoff=None, intron_cov=False, intron_cnt=False, marker_pos=None, col_idx=None, color_cov='blue', color_intron_cov='red', color_intron_edge='green', grid=False, strand=None, highlight=None, highlight_color='magenta', highlight_label=None, min_intron_cnt=0, return_legend_handle=False, label=None): """This function takes a list of bam files and a set of coordinates (chrm, start, stop), to plot a coverage overview of that files in that region.""" ### subsampling if subsample > 0 and len(files) > subsample: npr.seed(23) files = sp.array(files) files = npr.choice(files, subsample) ### augment chromosome name #chr_name = 'chr%s' % chrm chr_name = chrm (counts, intron_counts, intron_list) = _get_counts(chr_name, start, stop, files, intron_cov, intron_cnt, verbose, collapsed=True) ### get mean counts over all bam files counts /= len(files) if intron_cov: intron_counts /= len(files) if intron_cnt: for intron in intron_list: intron_list[intron] = math.ceil(intron_list[intron] / float(len(files))) if min_intron_cnt > 0: intron_list = dict([(x, intron_list[x]) for x in intron_list if intron_list[x] >= min_intron_cnt]) if col_idx is not None: counts = counts[col_idx] if intron_cov: intron_counts = intron_counts[col_idx] if intron_cnt: print >> sys.stderr, 'ERROR: column subsetting is currently not implemented for intron edges' sys.exit(1) ### bin counts according to options if bins is None: bins = counts.shape[0] bin_counts = counts bin_intron_counts = intron_counts if col_idx is not None: counts_x = sp.arange(col_idx.shape[0]) else: counts_x = range(start, stop + 1) else: if verbose: print >> sys.stdout, '... binning counts ...' bin_counts = sp.zeros((bins, )) bin_intron_counts = sp.zeros((bins, )) binsize = int(sp.ceil(float(counts.shape[0]) / bins)) for ii, i in enumerate(xrange(0, counts.shape[0], binsize)): bin_counts[ii] = sp.sum( counts[i:min(i + binsize, counts.shape[0] - 1)]) / binsize if intron_cov: bin_intron_counts[ii] = sp.sum( intron_counts[i:min(i + binsize, intron_counts.shape[0] - 1)]) / binsize if col_idx is not None: counts_x = sp.linspace(0, col_idx.shape[0], num=bins) else: counts_x = sp.linspace(start, stop, num=bins) ### use log if chosen if log: bin_counts = sp.log10(bin_counts + 1) bin_intron_counts = sp.log10(bin_intron_counts + 1) if intron_cnt: for intron in intron_list: if intron_list[intron] > 0: intron_list[intron] = sp.log10(intron_list[intron] + 1) if ax is None: fig = plt.figure(figsize=(10, 4)) ax = fig.add_subplot(111) if intron_cov: ax.fill_between(counts_x, bin_intron_counts, facecolor=color_intron_cov, edgecolor='none', alpha=0.5) ax.fill_between(counts_x, bin_counts, facecolor=color_cov, edgecolor='none', alpha=0.5) #ax.set_xticklabels([str(int(x)) for x in sp.linspace(start, stop, num = len(ax.get_xticklabels()))]) ax.set_xlabel('Position on contig %s' % chrm) ### draw strand if strand == '+': ax.arrow(0.05, 0.9, 0.2, 0, head_width=0.05, head_length=0.02, fc='#cccccc', ec='#cccccc', transform=ax.transAxes) elif strand == '-': ax.arrow(0.25, 0.9, -0.2, 0, head_width=0.05, head_length=0.02, fc='#cccccc', ec='#cccccc', transform=ax.transAxes) ### draw grid if grid: ax.grid(b=True, which='major', linestyle='--', linewidth=0.2, color='#222222') ax.xaxis.grid(False) if marker_pos is not None: ax.plot(0, marker_pos, 'or') if log: ax.set_ylabel('Read Coverage (log10)') else: ax.set_ylabel('Read Coverage') if ymax > 0: ax.set_ylim([0, ymax]) if highlight is not None: highlight_x(ax, highlight, highlight_color=highlight_color, label=highlight_label) if xlim is not None: ax.set_xlim(xlim) ax.autoscale(axis='y') ylim = ax.get_ylim() ax.set_ylim([0, ylim[1]]) if title is not None: ax.set_title(title) if xoff: ax.axes.get_xaxis().set_visible(False) if yoff: ax.axes.get_yaxis().set_visible(False) if intron_cnt: for intron in intron_list: add_intron_patch2(ax, start + intron[0], start + intron[1] + intron[0], intron_list[intron], color=color_intron_edge) if outfile is not None: plt.savefig(outfile, dpi=1200, format=frm) if return_legend_handle: if label is not None: return mpatches.Patch(color=color_cov, alpha=0.5, label=label) else: return mpatches.Patch(color=color_cov, alpha=0.5, label='Expression')
def numberOfSegments(self): return int(sp.ceil(float(self.buf.size) / float(self.size)))
V_reg = Vs/N_PT # d. voltage drop in the compensator impedance is V_drop = Zd_ohm * I_comp # e. the voltage across the voltage relay V_R = V_reg - V_drop # f. recall that on a 120-V base, one step change on the regulator changes the voltage 0.75 V if V_R < Vset_min: Tap=floor((Vset_min-abs(V_R))/0.75) action=['raise',Tap] a_R = 1 - 0.00625*Tap elif V_R > Vset_max: Tap=ceil((Vset_max-abs(V_R))/0.75) action=['raise',Tap] a_R = 1 - 0.00625*Tap ### g. regulator model coefficients ## ##a= a_R ##b=0 ##c=0 ##d=1/a_R ## ### 3. Example 7.6 on page 173 ###~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### Using the results of Examples 7.5, calculate the actual voltage at the load center ### assuming the 2500 kVA at 4.16 kV is measured at the substation transformer ### low-voltage terminals ##
def WcaretN(x): Delta = T / N prior = scipy.floor(x / Delta).astype(int) subsequent = scipy.ceil(x / Delta).astype(int) return scipy.sqrt(Delta) * (S[prior] + (x / Delta - prior) * (S[subsequent] - S[prior]))
def create_gabor(rot, RF_siz, Div, plot, num=10): """ this function creates a series of gabor filters @param rot: number rotations @param RF_siz: receptive field size @param Div: receptive field degree @param num: interval of pixel size to plot """ count = 0 numFilterSizes = len(RF_siz) numSimpleFilters = len(rot) lamb = (RF_siz * 2.) / Div sigma = lamb * 0.8 G = 0.3 phases = [0, np.pi / 2] # initialize filterbank alt_fb = np.zeros((65, 65, 1, 272), dtype=np.float32) # loop through number of filter sizes for k in tqdm(range(1, numFilterSizes + 1)): for r in tqdm(range(1, numSimpleFilters + 1)): f = np.zeros( [RF_siz[numFilterSizes - 1], RF_siz[numFilterSizes - 1]]) fx = np.zeros( [RF_siz[numFilterSizes - 1], RF_siz[numFilterSizes - 1]]) ## Parameters theta = rot[r - 1] * (np.pi / 180) filtSize = RF_siz[k - 1] img_center = ceil(33.0) ## New center for padding with zeros center = ceil(filtSize / 2.0) ## Old and possibly more accurate center filtSizeL = center - 1 filtSizeR = filtSize - filtSizeL - 1 sigmaq = (sigma[k - 1]) * (sigma[k - 1]) # Compute filter values for iPhi in range(1, 3): for i in range(int(-1 * filtSizeL), int(filtSizeR + 1)): for j in range(int(-1 * filtSizeL), int(filtSizeR + 1)): if (sqrt((i**2) + (j**2)) > (filtSize / 2)): E = 0 else: x = i * np.cos(theta) - j * np.sin(theta) y = i * np.sin(theta) + j * np.cos(theta) E = np.exp((-1 * ((x**2) + (G**2) * (y**2))) / (2 * sigmaq)) * np.cos(2 * np.pi * x / lamb[k - 1] + phases[iPhi - 1]) f[int(j + img_center - 1), int(i + img_center - 1)] = E ## Append to fb (filterbank) f = f - np.mean(np.mean(f)) f = f / sqrt(np.sum(np.sum(f**2))) # Reshaped image alt_fb[:, :, 0, count] = f count += 1 if (plot): if count % num == 0: plt.imshow(f, cmap='Greys') plt.show() return (np.array(alt_fb))
def edgebuffer(self, threshold, smooth): """ Calculates how many coordinates to ignore on the end by determining the ceiling of the minimum number of coordinates to meet threshold """ return (int(scipy.ceil(threshold / min(self._ld))))
# plt.show() target = 3 rumble = np.zeros([1, target * fs]) i = 0 print(len(rumble[0])) a = 0 while i < len(rumble[0]) - len(sound): for sample in sound: rumble[0, i] += sample i += 1 if a == 0: i -= int(ceil(len(sound) / 15)) a = 1 else: i -= int(ceil(len(sound) / 15)) a = 0 # plt.plot(rumble.flatten()) # plt.show() sd.play(rumble.flatten()) sd.wait() sd.play(rumble.flatten(), fs * 1.5) sd.wait() sd.play(rumble.flatten(), fs * 3) sd.wait()
def extent(arr, row): t = int(sp.floor(row.y1 / row.height * arr.shape[0])) l = int(sp.floor(row.x1 / row.width * arr.shape[1])) b = int(sp.ceil(row.y2 / row.height * arr.shape[0])) r = int(sp.ceil(row.x2 / row.width * arr.shape[1])) return (slice(t, b), slice(l, r))
def plotbeamparametersv2(times, configfile, maindir, fitdir='Fitted', params=['Ne'], filetemplate='params', suptitle='Parameter Comparison', werrors=False, nelog=True): """ This function will plot the desired parameters for each beam along range. The values of the input and measured parameters will be plotted Inputs Times - A list of times that will be plotted. configfile - The INI file with the simulation parameters that will be useds. maindir - The directory the images will be saved in. params - List of Parameter names that will be ploted. These need to match in the ionocontainer names. filetemplate - The first part of a the file names. suptitle - The supertitle for the plots. werrors - A bools that determines if the errors will be plotted. """ sns.set_style("whitegrid") sns.set_context("notebook") # rc('text', usetex=True) maindir = Path(maindir) ffit = maindir / fitdir / 'fitteddata.h5' inputfiledir = maindir / 'Origparams' (sensdict, simparams) = readconfigfile(configfile) paramslower = [ip.lower() for ip in params] Nt = len(times) Np = len(params) #Read in fitted data Ionofit = IonoContainer.readh5(str(ffit)) dataloc = Ionofit.Sphere_Coords pnames = Ionofit.Param_Names pnameslower = sp.array([ip.lower() for ip in pnames.flatten()]) p2fit = [ sp.argwhere(ip == pnameslower)[0][0] if ip in pnameslower else None for ip in paramslower ] time2fit = [None] * Nt # Have to fix this because of time offsets if times[0] == 0: times += Ionofit.Time_Vector[0, 0] for itn, itime in enumerate(times): filear = sp.argwhere(Ionofit.Time_Vector[:, 0] >= itime) if len(filear) == 0: filenum = len(Ionofit.Time_Vector) - 1 else: filenum = sp.argmin(sp.absolute(Ionofit.Time_Vector[:, 0] - itime)) time2fit[itn] = filenum times_int = [Ionofit.Time_Vector[i] for i in time2fit] # determine the beams angles = dataloc[:, 1:] rng = sp.unique(dataloc[:, 0]) b_arr = np.ascontiguousarray(angles).view( np.dtype((np.void, angles.dtype.itemsize * angles.shape[1]))) _, idx, invidx = np.unique(b_arr, return_index=True, return_inverse=True) beamlist = angles[idx] Nb = beamlist.shape[0] # Determine which imput files are to be used. dirlist = sorted(inputfiledir.glob('*.h5')) dirliststr = [str(i) for i in dirlist] sortlist, outime, outfilelist, timebeg, timelist_s = IonoContainer.gettimes( dirliststr) timelist = timebeg.copy() time2file = [None] * Nt time2intime = [None] * Nt # go through times find files and then times in files for itn, itime in enumerate(times): filear = sp.argwhere(timelist >= itime) if len(filear) == 0: filenum = [len(timelist) - 1] else: filenum = filear[0] flist1 = [] timeinflist = [] for ifile in filenum: filetimes = timelist_s[ifile] log1 = (filetimes[:, 0] >= times_int[itn][0]) & (filetimes[:, 0] < times_int[itn][1]) log2 = (filetimes[:, 1] > times_int[itn][0]) & (filetimes[:, 1] <= times_int[itn][1]) log3 = (filetimes[:, 0] <= times_int[itn][0]) & (filetimes[:, 1] > times_int[itn][1]) log4 = (filetimes[:, 0] > times_int[itn][0]) & (filetimes[:, 1] < times_int[itn][1]) curtimes1 = sp.where(log1 | log2 | log3 | log4)[0].tolist() flist1 = flist1 + [ifile] * len(curtimes1) timeinflist = timeinflist + curtimes1 time2intime[itn] = timeinflist time2file[itn] = flist1 nfig = int(sp.ceil(Nt * Nb)) imcount = 0 curfilenum = -1 # Loop for the figures for i_fig in range(nfig): lines = [None] * 2 labels = [None] * 2 (figmplf, axmat) = plt.subplots(int(sp.ceil(Np / 2)), 2, figsize=(20, 15), facecolor='w') axvec = axmat.flatten() # loop that goes through each axis loops through each parameter, beam # then time. for ax in axvec: if imcount >= Nt * Nb * Np: break imcount_f = float(imcount) itime = int(sp.floor(imcount_f / Nb / Np)) iparam = int(imcount_f / Nb - Np * itime) ibeam = int(imcount_f - (itime * Np * Nb + iparam * Nb)) curbeam = beamlist[ibeam] altlist = sp.sin(curbeam[1] * sp.pi / 180.) * rng curparm = paramslower[iparam] # Use Ne from input to compare the ne derived from the power. if curparm == 'nepow': curparm_in = 'ne' else: curparm_in = curparm curcoord = sp.zeros(3) curcoord[1:] = curbeam for iplot, filenum in enumerate(time2file[itime]): if curfilenum != filenum: curfilenum = filenum datafilename = dirlist[filenum] Ionoin = IonoContainer.readh5(str(datafilename)) if ('ti' in paramslower) or ('vi' in paramslower): Ionoin = maketi(Ionoin) pnames = Ionoin.Param_Names pnameslowerin = sp.array( [ip.lower() for ip in pnames.flatten()]) prmloc = sp.argwhere(curparm_in == pnameslowerin) if prmloc.size != 0: curprm = prmloc[0][0] # build up parameter vector bs the range values by finding the closest point in space in the input curdata = sp.zeros(len(rng)) for irngn, irng in enumerate(rng): curcoord[0] = irng tempin = Ionoin.getclosestsphere(curcoord)[0][ time2intime[itime]] Ntloc = tempin.shape[0] tempin = sp.reshape(tempin, (Ntloc, len(pnameslowerin))) curdata[irngn] = tempin[0, curprm] #actual plotting of the input data lines[0] = ax.plot(curdata, altlist, marker='o', c='b', linewidth=2)[0] labels[0] = 'Input Parameters' # Plot fitted data for the axis indxkep = np.argwhere(invidx == ibeam)[:, 0] curfit = Ionofit.Param_List[indxkep, time2fit[itime], p2fit[iparam]] rng_fit = dataloc[indxkep, 0] alt_fit = rng_fit * sp.sin(curbeam[1] * sp.pi / 180.) errorexist = 'n' + paramslower[iparam] in pnameslower if errorexist and werrors: eparam = sp.argwhere('n' + paramslower[iparam] == pnameslower)[0][0] curerror = Ionofit.Param_List[indxkep, time2fit[itime], eparam] lines[1] = ax.errorbar(curfit, alt_fit, xerr=curerror, fmt='-.', c='g', linewidth=2)[0] else: lines[1] = ax.plot(curfit, alt_fit, marker='o', c='g', linewidth=2)[0] labels[1] = 'Fitted Parameters' # get and plot the input data numplots = len(time2file[itime]) # set the limit for the parameter if curparm == 'vi': ax.set(xlim=[ -1.25 * sp.nanmax(sp.absolute(curfit)), 1.25 * sp.nanmax(sp.absolute(curfit)) ]) elif curparm_in != 'ne': ax.set(xlim=[ 0.75 * sp.nanmin(curfit), sp.minimum(1.25 * sp.nanmax(curfit), 8000.) ]) elif (curparm_in == 'ne') and nelog: ax.set_xscale('log') ax.set_xlabel(params[iparam]) ax.set_ylabel('Alt km') ax.set_title( '{0} vs Altitude, Time: {1}s Az: {2}$^o$ El: {3}$^o$'.format( params[iparam], times[itime], *curbeam)) imcount += 1 # save figure figmplf.suptitle(suptitle, fontsize=20) if None in labels: labels.remove(None) lines.remove(None) plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0.) fname = filetemplate + '_{0:0>3}.png'.format(i_fig) plt.savefig(fname) plt.close(figmplf)
import odor_tracking_sim.swarm_models as swarm_models import odor_tracking_sim.trap_models as trap_models import odor_tracking_sim.wind_models as wind_models import odor_tracking_sim.utility as utility import odor_tracking_sim.simulation_running_tools as srt from pompy import data_importers, processors, models wind_mag = 0.4 file_name = 'vectorized_plume_debug' output_file = file_name + '.pkl' dt = 0.25 frame_rate = 20 times_real_time = 20 # seconds of simulation / sec in video capture_interval = int(scipy.ceil(times_real_time * (1. / frame_rate) / dt)) simulation_time = 50. * 60. #seconds release_delay = 20. * 60 # Set up figure fig = plt.figure(figsize=(11, 11)) ax = fig.add_subplot(111) #Video FFMpegWriter = animate.writers['ffmpeg'] metadata = { 'title': file_name, } writer = FFMpegWriter(fps=frame_rate, metadata=metadata) writer.setup(fig, file_name + '.mp4', 500)
def plotacfs(coords, times, configfile, maindir, cartcoordsys=True, indisp=True, acfdisp=True, fitdisp=True, filetemplate='acf', suptitle='ACF Comparison', invacf=''): """ This will create a set of images that compare the input ISR acf to the output ISR acfs from the simulator. Inputs coords - An Nx3 numpy array that holds the coordinates of the desired points. times - A numpy list of times in seconds. configfile - The name of the configuration file used. cartcoordsys - (default True)A bool, if true then the coordinates are given in cartisian if false then it is assumed that the coords are given in sphereical coordinates. specsfilename - (default None) The name of the file holding the input spectrum. acfname - (default None) The name of the file holding the estimated ACFs. filetemplate (default 'spec') This is the beginning string used to save the images. """ # indisp = specsfilename is not None # acfdisp = acfname is not None maindir = Path(maindir).expanduser() sns.set_style("whitegrid") sns.set_context("notebook") acfname = maindir.joinpath('ACF', '00lags.h5') ffit = maindir.joinpath('Fitted', 'fitteddata.h5') specsfiledir = maindir.joinpath('Spectrums') (sensdict, simparams) = readconfigfile(configfile) simdtype = simparams['dtype'] npts = simparams['numpoints'] * 3.0 amb_dict = simparams['amb_dict'] if sp.ndim(coords) == 1: coords = coords[sp.newaxis, :] Nt = len(times) Nloc = coords.shape[0] sns.set_style("whitegrid") sns.set_context("notebook") pulse = simparams['Pulse'] ts = sensdict['t_s'] tau1 = sp.arange(pulse.shape[-1]) * ts if indisp: dirlist = [i.name for i in specsfiledir.glob('*.h5')] timelist = sp.array([float(i.split()[0]) for i in dirlist]) for itn, itime in enumerate(times): filear = sp.argwhere(timelist >= itime) if len(filear) == 0: filenum = len(timelist) - 1 else: filenum = filear[0][0] specsfilename = specsfiledir.joinpath(dirlist[filenum]) Ionoin = IonoContainer.readh5(str(specsfilename)) if itn == 0: specin = sp.zeros( (Nloc, Nt, Ionoin.Param_List.shape[-1])).astype( Ionoin.Param_List.dtype) omeg = Ionoin.Param_Names npts = Ionoin.Param_List.shape[-1] for icn, ic in enumerate(coords): if cartcoordsys: tempin = Ionoin.getclosest(ic, times)[0] else: tempin = Ionoin.getclosestsphere(ic, times)[0] # if sp.ndim(tempin)==1: # tempin = tempin[sp.newaxis,:] specin[icn, itn] = tempin[0, :] if acfdisp: Ionoacf = IonoContainer.readh5(str(acfname)) ACFin = sp.zeros( (Nloc, Nt, Ionoacf.Param_List.shape[-1])).astype(Ionoacf.Param_List.dtype) omeg = sp.arange(-sp.ceil((npts + 1) / 2), sp.floor( (npts + 1) / 2)) / ts / npts for icn, ic in enumerate(coords): if cartcoordsys: tempin = Ionoacf.getclosest(ic, times)[0] else: tempin = Ionoacf.getclosestsphere(ic, times)[0] if sp.ndim(tempin) == 1: tempin = tempin[sp.newaxis, :] ACFin[icn] = tempin # Determine the inverse ACF stuff if len(invacf) == 0: invacfbool = False else: invacfbool = True invfile = maindir.joinpath('ACFInv', '00lags' + invacf + '.h5') Ionoacfinv = IonoContainer.readh5(str(invfile)) ACFinv = sp.zeros((Nloc, Nt, Ionoacfinv.Param_List.shape[-1])).astype( Ionoacfinv.Param_List.dtype) for icn, ic in enumerate(coords): if cartcoordsys: tempin = Ionoacfinv.getclosest(ic, times)[0] else: tempin = Ionoacfinv.getclosestsphere(ic, times)[0] if sp.ndim(tempin) == 1: tempin = tempin[sp.newaxis, :] ACFinv[icn] = tempin if fitdisp: Ionofit = IonoContainer.readh5(str(ffit)) (omegfit, outspecsfit) = ISRspecmakeout(Ionofit.Param_List, sensdict['fc'], sensdict['fs'], simparams['species'], npts) Ionofit.Param_List = outspecsfit Ionofit.Param_Names = omegfit specfit = sp.zeros((Nloc, Nt, npts)) for icn, ic in enumerate(coords): if cartcoordsys: tempin = Ionofit.getclosest(ic, times)[0] else: tempin = Ionofit.getclosestsphere(ic, times)[0] if sp.ndim(tempin) == 1: tempin = tempin[sp.newaxis, :] specfit[icn] = tempin / npts / npts nfig = int(sp.ceil(Nt * Nloc / 3.)) imcount = 0 for i_fig in range(nfig): lines = [None] * 4 labels = [None] * 4 lines_im = [None] * 4 labels_im = [None] * 4 (figmplf, axmat) = plt.subplots(3, 2, figsize=(16, 12), facecolor='w') for ax in axmat: if imcount >= Nt * Nloc: break iloc = int(sp.floor(imcount / Nt)) itime = int(imcount - (iloc * Nt)) maxvec = [] minvec = [] if indisp: # apply ambiguity funciton to spectrum curin = specin[iloc, itime] (tau, acf) = spect2acf(omeg, curin) acf1 = scfft.ifftshift(acf)[:len(pulse)] * len(curin) rcs = acf1[0].real guess_acf = sp.dot(amb_dict['WttMatrix'], acf) guess_acf = guess_acf * rcs / guess_acf[0].real # fit to spectrums maxvec.append(guess_acf.real.max()) maxvec.append(guess_acf.imag.max()) minvec.append(acf1.real.min()) minvec.append(acf1.imag.min()) lines[0] = ax[0].plot(tau1 * 1e6, guess_acf.real, label='Input', linewidth=5)[0] labels[0] = 'Input ACF With Ambiguity Applied' lines_im[0] = ax[1].plot(tau1 * 1e6, guess_acf.imag, label='Input', linewidth=5)[0] labels_im[0] = 'Input ACF With Ambiguity Applied' if fitdisp: curinfit = specfit[iloc, itime] (taufit, acffit) = spect2acf(omegfit, curinfit) rcsfit = curinfit.sum() guess_acffit = sp.dot(amb_dict['WttMatrix'], acffit) guess_acffit = guess_acffit * rcsfit / guess_acffit[0].real lines[1] = ax[0].plot(tau1 * 1e6, guess_acffit.real, label='Input', linewidth=5)[0] labels[1] = 'Fitted ACF' lines_im[1] = ax[1].plot(tau1 * 1e6, guess_acffit.imag, label='Input', linewidth=5)[0] labels_im[1] = 'Fitted ACF' if acfdisp: lines[2] = ax[0].plot(tau1 * 1e6, ACFin[iloc, itime].real, label='Output', linewidth=5)[0] labels[2] = 'Estimated ACF' lines_im[2] = ax[1].plot(tau1 * 1e6, ACFin[iloc, itime].imag, label='Output', linewidth=5)[0] labels_im[2] = 'Estimated ACF' maxvec.append(ACFin[iloc, itime].real.max()) maxvec.append(ACFin[iloc, itime].imag.max()) minvec.append(ACFin[iloc, itime].real.min()) minvec.append(ACFin[iloc, itime].imag.min()) if invacfbool: lines[3] = ax[0].plot(tau1 * 1e6, ACFinv[iloc, itime].real, label='Output', linewidth=5)[0] labels[3] = 'Reconstructed ACF' lines_im[3] = ax[1].plot(tau1 * 1e6, ACFinv[iloc, itime].imag, label='Output', linewidth=5)[0] labels_im[3] = 'Reconstructed ACF' ax[0].set_xlabel(r'$\tau$ in $\mu$s') ax[0].set_ylabel('Amp') ax[0].set_title( 'Real Part' ) # Location {0}, Time {1}'.format(coords[iloc],times[itime])) ax[0].set_ylim(min(minvec), max(maxvec) * 1) ax[0].set_xlim([tau1.min() * 1e6, tau1.max() * 1e6]) ax[1].set_xlabel(r'$\tau$ in $\mu$s') ax[1].set_ylabel('Amp') ax[1].set_title( 'Imag Part' ) # Location {0}, Time {1}'.format(coords[iloc],times[itime])) ax[1].set_ylim(min(minvec), max(maxvec) * 1) ax[1].set_xlim([tau1.min() * 1e6, tau1.max() * 1e6]) imcount = imcount + 1 figmplf.suptitle(suptitle, fontsize=20) if None in labels: labels.remove(None) lines.remove(None) plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0.) fname = filetemplate + '_{0:0>3}.png'.format(i_fig) plt.savefig(fname, dpi=300) plt.close(figmplf)
def main(plume_width_factor): file_name = 'plume_width_testing' output_file = file_name+'.pkl' file_name = file_name +'plume_width_factor'+str(plume_width_factor) dt = 0.25 plume_dt = 0.25 frame_rate = 20 times_real_time = 20 # seconds of simulation / sec in video capture_interval = int(scipy.ceil(times_real_time*(1./frame_rate)/dt)) simulation_time = 2.*60. #seconds release_delay = 30.*60#/(wind_mag) t_start = 0.0 t = 0. - release_delay # Set up figure fig = plt.figure(figsize=(11, 11)) ax = fig.add_subplot(111) wind_mag = 1.8 wind_angle = 13*scipy.pi/8. wind_param = { 'speed': wind_mag, 'angle': wind_angle, 'evolving': False, 'wind_dt': None, 'dt': dt } wind_field_noiseless = wind_models.WindField(param=wind_param) #traps number_sources = 8 radius_sources = 1000.0 trap_radius = 0.5 location_list, strength_list = utility.create_circle_of_sources(number_sources, radius_sources,None) trap_param = { 'source_locations' : location_list, 'source_strengths' : strength_list, 'epsilon' : 0.01, 'trap_radius' : trap_radius, 'source_radius' : radius_sources } traps = trap_models.TrapModel(trap_param) #Wind and plume objects #Odor arena xlim = (-1500., 1500.) ylim = (-1500., 1500.) sim_region = models.Rectangle(xlim[0], ylim[0], xlim[1], ylim[1]) wind_region = models.Rectangle(xlim[0]*1.2,ylim[0]*1.2, xlim[1]*1.2,ylim[1]*1.2) source_pos = scipy.array([scipy.array(tup) for tup in traps.param['source_locations']]).T #wind model setup diff_eq = False constant_wind_angle = wind_angle aspect_ratio= (xlim[1]-xlim[0])/(ylim[1]-ylim[0]) noise_gain=3. noise_damp=0.071 noise_bandwidth=0.71 wind_grid_density = 200 Kx = Ky = 10000 #highest value observed to not cause explosion: 10000 wind_field = models.WindModel(wind_region,int(wind_grid_density*aspect_ratio), wind_grid_density,noise_gain=noise_gain,noise_damp=noise_damp, noise_bandwidth=noise_bandwidth,Kx=Kx,Ky=Ky, diff_eq=diff_eq,angle=constant_wind_angle,mag=wind_mag) #Initial wind plotting -- subsampled velocity_field = wind_field.velocity_field u,v = velocity_field[:,:,0],velocity_field[:,:,1] x_origins,y_origins = wind_field._x_points,wind_field._y_points full_size = scipy.shape(u)[0] print(full_size) shrink_factor = 10 u,v = u[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor],\ v[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor] # x_origins,y_origins = x_origins[0:-1:full_size-1],\ # y_origins[0:-1:full_size-1] x_origins,y_origins = x_origins[0:full_size-1:shrink_factor],\ y_origins[0:full_size-1:shrink_factor] coords = scipy.array(list(itertools.product(x_origins, y_origins))) x_coords,y_coords = coords[:,0],coords[:,1] vector_field = ax.quiver(x_coords,y_coords,u,v) # plt.show() # Set up plume model centre_rel_diff_scale = plume_width_factor*2. # puff_release_rate = 0.001 puff_release_rate = 10 puff_spread_rate=0.005 puff_init_rad = 0.01 max_num_puffs=int(2e5) # max_num_puffs=100 plume_model = models.PlumeModel( sim_region, source_pos, wind_field,simulation_time+release_delay,plume_dt, centre_rel_diff_scale=centre_rel_diff_scale, puff_release_rate=puff_release_rate, puff_init_rad=puff_init_rad,puff_spread_rate=puff_spread_rate, max_num_puffs=max_num_puffs) # Create a concentration array generator array_z = 0.01 array_dim_x = 1000 array_dim_y = array_dim_x puff_mol_amount = 1. array_gen = processors.ConcentrationArrayGenerator( sim_region, array_z, array_dim_x, array_dim_y, puff_mol_amount) #Setup fly swarm wind_slippage = (0.,1.) swarm_size=2000 use_empirical_release_data = False #Grab wind info to determine heading mean wind_x,wind_y = wind_mag*scipy.cos(wind_angle),wind_mag*scipy.sin(wind_angle) beta = 1. release_times = scipy.random.exponential(beta,(swarm_size,)) kappa = 2. heading_data=None # xmin,xmax,ymin,ymax = -1000,1000,-1000,1000 #Initial concentration plotting conc_array = array_gen.generate_single_array(plume_model.puffs) xmin = sim_region.x_min; xmax = sim_region.x_max ymin = sim_region.y_min; ymax = sim_region.y_max im_extents = (xmin,xmax,ymin,ymax) vmin,vmax = 0.,50. cmap = matplotlib.colors.ListedColormap(['white', 'orange']) conc_im = ax.imshow(conc_array.T[::-1], extent=im_extents, vmin=vmin, vmax=vmax, cmap=cmap) xmin,xmax,ymin,ymax = -1000,1000,-1000,1000 buffr = 100 ax.set_xlim((xmin-buffr,xmax+buffr)) ax.set_ylim((ymin-buffr,ymax+buffr)) #Conc array gen to be used for the flies sim_region_tuple = plume_model.sim_region.as_tuple() box_min,box_max = sim_region_tuple[1],sim_region_tuple[2] #Put the time in the corner (xmin,xmax) = ax.get_xlim();(ymin,ymax) = ax.get_ylim() # text = '0 min 0 sec' # timer= ax.text(xmax,ymax,text,color='r',horizontalalignment='right') # ax.text(1.,1.02,'time since release:',color='r',transform=ax.transAxes, # horizontalalignment='right') # #traps for x,y in traps.param['source_locations']: #Black x plt.scatter(x,y,marker='x',s=50,c='orange') # Red circles # p = matplotlib.patches.Circle((x, y), 15,color='red') # ax.add_patch(p) # #Remove plot edges and add scale bar fig.patch.set_facecolor('white') # plt.plot([-900,-800],[900,900],color='k')#,transform=ax.transData,color='k') # ax.text(-900,820,'100 m') plt.axis('off') # plt.ion() # plt.show() # raw_input() while t<simulation_time: for k in range(capture_interval): #update flies print('t: {0:1.2f}'.format(t)) #update the swarm for j in range(int(dt/plume_dt)): wind_field.update(plume_dt) plume_model.update(plume_dt,verbose=True) t+= dt # plt.show() # Update live display if t>-30.*60.: # if t>-10.*60.: velocity_field = wind_field.velocity_field u,v = velocity_field[:,:,0],velocity_field[:,:,1] u,v = u[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor],\ v[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor] vector_field.set_UVC(u,v) # conc_array = array_gen.generate_single_array(plume_model.puffs) # conc_im.set_data(conc_array.T[::-1]) # # log_im = scipy.log(conc_array.T[::-1]) # cutoff_l = scipy.percentile(log_im[~scipy.isinf(log_im)],10) # cutoff_u = scipy.percentile(log_im[~scipy.isinf(log_im)],99) # # conc_im.set_data(log_im) # n = matplotlib.colors.Normalize(vmin=cutoff_l,vmax=cutoff_u) # conc_im.set_norm(n) plt.savefig(file_name+'.png',format='png') plt.show() t = simulation_time
def computeTreeParameters(my_id, tmpDirName, a, k, N_levels, params_simu): # L computation NB_DIGITS = params_simu.NB_DIGITS L = zeros(N_levels - 1, 'i') # array of poles numbers: 1 number per level for i in range(L.shape[0]): L[i] = L_computation(k, a * (2**i), NB_DIGITS) if (my_id == 0) and (params_simu.VERBOSE == 1): print("L = " + str(L)) # integration and interpolation data octtreeXcosThetas, octtreeWthetas, octtreeNthetas = octtreeXWN_computation( -1.0, 1.0, L, N_levels, params_simu.int_method_theta, params_simu.INCLUDE_BOUNDARIES) octtreeXthetas = zeros(octtreeXcosThetas.shape, 'd') for i in range(octtreeNthetas.shape[0]): Npoints = octtreeNthetas[i] octtreeXthetas[i, :Npoints] = arccos(octtreeXcosThetas[i, Npoints - 1::-1]) octtreeXphis, octtreeWphis, octtreeNphis = octtreeXWN_computation( 0.0, 2.0 * pi, L, N_levels, params_simu.int_method_phi, params_simu.INCLUDE_BOUNDARIES) #if (my_id==0): # print "Nthetas =", octtreeNthetas # print "Nphis =", octtreeNphis # order of interpolation NOrderInterpTheta = L[0] NOrderInterpPhi = L[0] # number of zones per theta #num_proc = MPI.COMM_WORLD.Get_size() #Ntheta_zones, Nphi_zones = directions_zones_calculation(num_proc) # now we write the info to disk writeScalarToDisk( NOrderInterpTheta, os.path.join(tmpDirName, 'octtree_data/NOrderInterpTheta.txt')) writeScalarToDisk( NOrderInterpPhi, os.path.join(tmpDirName, 'octtree_data/NOrderInterpPhi.txt')) #writeScalarToDisk(Ntheta_zones, os.path.join(tmpDirName, 'octtree_data/Ntheta_zones.txt') ) #writeScalarToDisk(Nphi_zones, os.path.join(tmpDirName, 'octtree_data/Nphi_zones.txt') ) writeASCIIBlitzArrayToDisk( L, os.path.join(tmpDirName, 'octtree_data/LExpansion.txt')) writeScalarToDisk( params_simu.alphaTranslation_smoothing_factor, os.path.join(tmpDirName, 'octtree_data/alphaTranslation_smoothing_factor.txt')) writeScalarToDisk( params_simu.alphaTranslation_thresholdRelValueMax, os.path.join(tmpDirName, 'octtree_data/alphaTranslation_thresholdRelValueMax.txt')) writeScalarToDisk( params_simu.alphaTranslation_RelativeCountAboveThreshold, os.path.join( tmpDirName, 'octtree_data/alphaTranslation_RelativeCountAboveThreshold.txt')) writeASCIIBlitzArrayToDisk( octtreeNthetas, os.path.join(tmpDirName, 'octtree_data/octtreeNthetas.txt')) writeASCIIBlitzArrayToDisk( octtreeNphis, os.path.join(tmpDirName, 'octtree_data/octtreeNphis.txt')) writeASCIIBlitzArrayToDisk( octtreeXthetas, os.path.join(tmpDirName, 'octtree_data/octtreeXthetas.txt')) writeASCIIBlitzArrayToDisk( octtreeXphis, os.path.join(tmpDirName, 'octtree_data/octtreeXphis.txt')) writeASCIIBlitzArrayToDisk( octtreeWthetas, os.path.join(tmpDirName, 'octtree_data/octtreeWthetas.txt')) writeASCIIBlitzArrayToDisk( octtreeWphis, os.path.join(tmpDirName, 'octtree_data/octtreeWphis.txt')) A_theta, B_theta, A_phi, B_phi = 0., pi, 0., 2. * pi N_theta, N_phi = octtreeNthetas[0], octtreeNphis[0] INCLUDED_THETA_BOUNDARIES, INCLUDED_PHI_BOUNDARIES = 0, 0 if (abs(octtreeXthetas[0, 0] - A_theta) <= 1.e-8) and ( abs(octtreeXthetas[0, N_theta - 1] - B_theta) <= 1.e-8): INCLUDED_THETA_BOUNDARIES = 1 if (abs(octtreeXphis[0, 0] - A_phi) <= 1.e-8) and (abs(octtreeXphis[0, N_phi - 1] - B_phi) <= 1.e-8): INCLUDED_PHI_BOUNDARIES = 1 writeScalarToDisk( INCLUDED_THETA_BOUNDARIES, os.path.join(tmpDirName, 'octtree_data/INCLUDED_THETA_BOUNDARIES.txt')) writeScalarToDisk( INCLUDED_PHI_BOUNDARIES, os.path.join(tmpDirName, 'octtree_data/INCLUDED_PHI_BOUNDARIES.txt')) # we now have to calculate the theta/phi abscissas for the coarsest level # These are needed for far-field computation L_coarsest = L_computation(k, a * (2**N_levels), NB_DIGITS) # theta abscissas NpointsTheta = L_coarsest + 1 DTheta = 0 if not params_simu.AUTOMATIC_THETAS and (params_simu.USER_DEFINED_NB_THETA > 0): NpointsTheta = params_simu.USER_DEFINED_NB_THETA else: NpointsThetaTmp = NpointsTheta * (params_simu.STOP_THETA - params_simu.START_THETA) / pi NpointsTheta = int(ceil(NpointsThetaTmp)) + 1 octtreeXthetas_coarsest = zeros(NpointsTheta, 'd') if NpointsTheta > 1: DTheta = (params_simu.STOP_THETA - params_simu.START_THETA) / (NpointsTheta - 1) for i in range(NpointsTheta): octtreeXthetas_coarsest[i] = params_simu.START_THETA + i * DTheta # make sure the last element is params_simu.STOP_THETA octtreeXthetas_coarsest[-1] = params_simu.STOP_THETA else: octtreeXthetas_coarsest[0] = params_simu.START_THETA # phis abscissas NpointsPhi = 2 * L_coarsest DPhi = 0 if not params_simu.AUTOMATIC_PHIS and (params_simu.USER_DEFINED_NB_PHI > 0): NpointsPhi = params_simu.USER_DEFINED_NB_PHI else: NpointsPhiTmp = NpointsPhi * (params_simu.STOP_PHI - params_simu.START_PHI) / (2.0 * pi) NpointsPhi = int(ceil(NpointsPhiTmp)) + 1 octtreeXphis_coarsest = zeros(NpointsPhi, 'd') if NpointsPhi > 1: DPhi = (params_simu.STOP_PHI - params_simu.START_PHI) / (NpointsPhi - 1) for i in range(NpointsPhi): octtreeXphis_coarsest[i] = params_simu.START_PHI + i * DPhi # make sure the last element is params_simu.STOP_PHI octtreeXphis_coarsest[-1] = params_simu.STOP_PHI else: octtreeXphis_coarsest[0] = params_simu.START_PHI if (my_id == 0): print( "Summary of sampling points at the coarsest level (used for far-field sampling)." ) print("L_coarsest =", L_coarsest) print("For 0 < theta < 180, NpointsTheta = L_coarsest + 1 =", L_coarsest + 1) print("For", params_simu.START_THETA / pi * 180, "< theta <", params_simu.STOP_THETA / pi * 180, ", NpointsTheta =", NpointsTheta, ", DTheta =", DTheta / pi * 180, "degrees") print("For 0 < phi < 360, NpointsPhi = 2 * L_coarsest =", 2 * L_coarsest) print("For", params_simu.START_PHI / pi * 180, "< phi <", params_simu.STOP_PHI / pi * 180, ", NpointsPhi =", NpointsPhi, ", DPhi =", DPhi / pi * 180, "degrees") writeASCIIBlitzArrayToDisk( octtreeXthetas_coarsest, os.path.join(tmpDirName, 'octtree_data/octtreeXthetas_coarsest.txt')) writeASCIIBlitzArrayToDisk( octtreeXphis_coarsest, os.path.join(tmpDirName, 'octtree_data/octtreeXphis_coarsest.txt')) MPI.COMM_WORLD.Barrier()
def plotspecs(coords, times, configfile, maindir, cartcoordsys=True, indisp=True, acfdisp=True, fitdisp=True, filetemplate='spec', suptitle='Spectrum Comparison'): """ This will create a set of images that compare the input ISR spectrum to the output ISR spectrum from the simulator. Inputs coords - An Nx3 numpy array that holds the coordinates of the desired points. times - A numpy list of times in seconds. configfile - The name of the configuration file used. cartcoordsys - (default True)A bool, if true then the coordinates are given in cartisian if false then it is assumed that the coords are given in sphereical coordinates. specsfilename - (default None) The name of the file holding the input spectrum. acfname - (default None) The name of the file holding the estimated ACFs. filetemplate (default 'spec') This is the beginning string used to save the images. """ sns.set_style("whitegrid") sns.set_context("notebook") maindir = Path(maindir).expanduser() acfname = maindir.joinpath('ACF', '00lags.h5') ffit = maindir.joinpath('Fitted', 'fitteddata.h5') specsfiledir = maindir.joinpath('Spectrums') (sensdict, simparams) = readconfigfile(configfile) simdtype = simparams['dtype'] npts = simparams['numpoints'] * 3.0 amb_dict = simparams['amb_dict'] if sp.ndim(coords) == 1: coords = coords[sp.newaxis, :] Nt = len(times) Nloc = coords.shape[0] sns.set_style("whitegrid") sns.set_context("notebook") if indisp: dirlist = [i.name for i in specsfiledir.glob('*.h5')] timelist = sp.array([float(i.split()[0]) for i in dirlist]) for itn, itime in enumerate(times): filear = sp.argwhere(timelist >= itime) if len(filear) == 0: filenum = len(timelist) - 1 else: filenum = filear[0][0] specsfilename = specsfiledir.joinpath(dirlist[filenum]) Ionoin = IonoContainer.readh5(str(specsfilename)) if itn == 0: specin = sp.zeros( (Nloc, Nt, Ionoin.Param_List.shape[-1])).astype( Ionoin.Param_List.dtype) omeg = Ionoin.Param_Names npts = Ionoin.Param_List.shape[-1] for icn, ic in enumerate(coords): if cartcoordsys: tempin = Ionoin.getclosest(ic, times)[0] else: tempin = Ionoin.getclosestsphere(ic, times)[0] specin[icn, itn] = tempin[0, :] / npts fs = sensdict['fs'] if acfdisp: Ionoacf = IonoContainer.readh5(str(acfname)) ACFin = sp.zeros( (Nloc, Nt, Ionoacf.Param_List.shape[-1])).astype(Ionoacf.Param_List.dtype) ts = sensdict['t_s'] omeg = sp.arange(-sp.ceil((npts - 1.) / 2.), sp.floor((npts - 1.) / 2.) + 1) / ts / npts for icn, ic in enumerate(coords): if cartcoordsys: tempin = Ionoacf.getclosest(ic, times)[0] else: tempin = Ionoacf.getclosestsphere(ic, times)[0] if sp.ndim(tempin) == 1: tempin = tempin[sp.newaxis, :] ACFin[icn] = tempin specout = scfft.fftshift(scfft.fft(ACFin, n=npts, axis=-1), axes=-1) if fitdisp: Ionofit = IonoContainer.readh5(str(ffit)) (omegfit, outspecsfit) = ISRspecmakeout(Ionofit.Param_List, sensdict['fc'], sensdict['fs'], simparams['species'], npts) Ionofit.Param_List = outspecsfit Ionofit.Param_Names = omegfit specfit = sp.zeros((Nloc, Nt, npts)) for icn, ic in enumerate(coords): if cartcoordsys: tempin = Ionofit.getclosest(ic, times)[0] else: tempin = Ionofit.getclosestsphere(ic, times)[0] if sp.ndim(tempin) == 1: tempin = tempin[sp.newaxis, :] specfit[icn] = tempin / npts / npts nfig = int(sp.ceil(Nt * Nloc / 6.0)) imcount = 0 for i_fig in range(nfig): lines = [None] * 3 labels = [None] * 3 (figmplf, axmat) = plt.subplots(2, 3, figsize=(16, 12), facecolor='w') axvec = axmat.flatten() for iax, ax in enumerate(axvec): if imcount >= Nt * Nloc: break iloc = int(sp.floor(imcount / Nt)) itime = int(imcount - (iloc * Nt)) maxvec = [] if fitdisp: curfitspec = specfit[iloc, itime] rcsfit = curfitspec.sum() (taufit, acffit) = spect2acf(omegfit, curfitspec) guess_acffit = sp.dot(amb_dict['WttMatrix'], acffit) guess_acffit = guess_acffit * rcsfit / guess_acffit[0].real spec_intermfit = scfft.fftshift(scfft.fft(guess_acffit, n=npts)) lines[1] = ax.plot(omeg * 1e-3, spec_intermfit.real, label='Fitted Spectrum', linewidth=5)[0] labels[1] = 'Fitted Spectrum' if indisp: # apply ambiguity function to spectrum curin = specin[iloc, itime] rcs = curin.real.sum() (tau, acf) = spect2acf(omeg, curin) guess_acf = sp.dot(amb_dict['WttMatrix'], acf) guess_acf = guess_acf * rcs / guess_acf[0].real # fit to spectrums spec_interm = scfft.fftshift(scfft.fft(guess_acf, n=npts)) maxvec.append(spec_interm.real.max()) lines[0] = ax.plot(omeg * 1e-3, spec_interm.real, label='Input', linewidth=5)[0] labels[0] = 'Input Spectrum With Ambiguity Applied' if acfdisp: lines[2] = ax.plot(omeg * 1e-3, specout[iloc, itime].real, label='Output', linewidth=5)[0] labels[2] = 'Estimated Spectrum' maxvec.append(specout[iloc, itime].real.max()) ax.set_xlabel('f in kHz') ax.set_ylabel('Amp') ax.set_title('Location {0}, Time {1}'.format( coords[iloc], times[itime])) ax.set_ylim(0.0, max(maxvec) * 1) ax.set_xlim([-fs * 5e-4, fs * 5e-4]) imcount = imcount + 1 figmplf.suptitle(suptitle, fontsize=20) if None in labels: labels.remove(None) lines.remove(None) plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0.) fname = filetemplate + '_{0:0>3}.png'.format(i_fig) plt.savefig(fname) plt.close(figmplf)
def getRegion(self, size=3e4, min_nSNPs=1, chrom_i=None, pos_min=None, pos_max=None): """ Sample a region from the piece of genotype X, chrom, pos minSNPnum: minimum number of SNPs contained in the region Ichrom: restrict X to chromosome Ichrom before taking the region cis: bool vector that marks the sorted region region: vector that contains chrom and init and final position of the region """ if (self.chrom is None) or (self.pos is None): bim = plink_reader.readBIM(self.bfile, usecols=(0, 1, 2, 3)) chrom = SP.array(bim[:, 0], dtype=int) pos = SP.array(bim[:, 3], dtype=int) else: chrom = self.chrom pos = self.pos if chrom_i is None: n_chroms = chrom.max() chrom_i = int(SP.ceil(SP.rand() * n_chroms)) pos = pos[chrom == chrom_i] chrom = chrom[chrom == chrom_i] ipos = SP.ones(len(pos), dtype=bool) if pos_min is not None: ipos = SP.logical_and(ipos, pos_min < pos) if pos_max is not None: ipos = SP.logical_and(ipos, pos < pos_max) pos = pos[ipos] chrom = chrom[ipos] if size == 1: # select single SNP idx = int(SP.ceil(pos.shape[0] * SP.rand())) cis = SP.arange(pos.shape[0]) == idx region = SP.array([chrom_i, pos[idx], pos[idx]]) else: while True: idx = int(SP.floor(pos.shape[0] * SP.rand())) posT1 = pos[idx] posT2 = pos[idx] + size if posT2 <= pos.max(): cis = chrom == chrom_i cis *= (pos > posT1) * (pos < posT2) if cis.sum() > min_nSNPs: break region = SP.array([chrom_i, posT1, posT2]) start = SP.nonzero(cis)[0].min() nSNPs = cis.sum() if self.X is None: rv = plink_reader.readBED(self.bfile, useMAFencoding=True, start=start, nSNPs=nSNPs, bim=bim) Xr = rv['snps'] else: Xr = self.X[:, start:start + nSnps] return Xr, region
def f(x_0,K): try: file_name = 'logistic_prob_sim_x_0_'+str(x_0)+'_K_'+str(K) output_file = file_name+'.pkl' dt = 0.25 frame_rate = 20 times_real_time = 20 # seconds of simulation / sec in video capture_interval = int(scipy.ceil(times_real_time*(1./frame_rate)/dt)) simulation_time = 50.*60. #seconds release_delay = 0.*60#/(wind_mag) t_start = 0.0 t = 0. - release_delay wind_angle = 7*scipy.pi/8. wind_mag = 1. # wind_angle = 7*scipy.pi/4. wind_param = { 'speed': wind_mag, 'angle': wind_angle, 'evolving': False, 'wind_dt': None, 'dt': dt } wind_field = wind_models.WindField(param=wind_param) #traps number_sources = 8 radius_sources = 1000.0 trap_radius = 0.5 location_list, strength_list = utility.create_circle_of_sources(number_sources, radius_sources,None) trap_param = { 'source_locations' : location_list, 'source_strengths' : strength_list, 'epsilon' : 0.01, 'trap_radius' : trap_radius, 'source_radius' : radius_sources } traps = trap_models.TrapModel(trap_param) #Wind and plume objects #Odor arena xlim = (-1500., 1500.) ylim = (-1500., 1500.) im_extents = xlim[0], xlim[1], ylim[0], ylim[1] source_pos = scipy.array([scipy.array(tup) for tup in traps.param['source_locations']]) # Set up logistic prob plume object logisticPlumes = models.LogisticProbPlume(K,x_0,source_pos,wind_angle) #To document the plume parameters, save a reference plot of the plume probability curve plt.figure() inputs = np.linspace(0,1000,1000) outputs = logisticPlumes.logistic_1d(inputs) plt.plot(inputs,outputs) plt.title('Logistic Curve with K: '+str(K)+', x_0: '+str(x_0),color='purple') plt.xlim(0,1000.) plt.ylim(-0.02,1.) plt.xlabel('Distance from Trap (m)') plt.ylabel('Trap Arrival Probability') plt.savefig(file_name+'.png',format='png') # Setup fly swarm wind_slippage = (0.,1.) # swarm_size=2000 swarm_size=20000 use_empirical_release_data = False #Grab wind info to determine heading mean wind_x,wind_y = wind_mag*scipy.cos(wind_angle),wind_mag*scipy.sin(wind_angle) beta = 1. release_times = scipy.random.exponential(beta,(swarm_size,)) kappa = 2. heading_data=None #Flies also use parameters (for schmitt_trigger, detection probabilities) # determined in #fly_behavior_sim/near_plume_simulation_sutton.py swarm_param = { 'swarm_size' : swarm_size, 'heading_data' : heading_data, 'initial_heading' : scipy.radians(scipy.random.uniform(0.0,360.0,(swarm_size,))), 'x_start_position' : scipy.zeros(swarm_size), 'y_start_position' : scipy.zeros(swarm_size), 'flight_speed' : scipy.full((swarm_size,), 1.5), 'release_time' : release_times, 'release_delay' : release_delay, 'cast_interval' : [1, 3], 'wind_slippage' : wind_slippage, 'odor_thresholds' : { 'lower': 0.0005, 'upper': 0.05 }, 'schmitt_trigger':False, 'low_pass_filter_length':3, #seconds 'dt_plot': capture_interval*dt, 't_stop':3000., 'cast_timeout':20, 'airspeed_saturation':True } swarm = swarm_models.ReducedSwarmOfFlies(wind_field,traps,param=swarm_param, start_type='fh') # xmin,xmax,ymin,ymax = -1000,1000,-1000,1000 xmin,xmax,ymin,ymax = -1000,1000,-1000,1000 # plt.show() # raw_input() while t<simulation_time: for k in range(capture_interval): #update flies print('t: {0:1.2f}'.format(t)) swarm.update(t,dt,wind_field,logisticPlumes,traps) t+= dt # time.sleep(0.001) with open(output_file, 'w') as f: pickle.dump((wind_field,swarm),f) except(ValueError): print('p>1 error for (x_0,K) pair '+str((x_0,K))) sys.exit()
def _update_canvas(self): """ Update the figure when the user changes an input value :return: """ # Get the parameters from the form bandwidth = float(self.bandwidth.text()) pulsewidth = float(self.pulsewidth.text()) range_window_length = float(self.range_window_length.text()) target_range = self.target_range.text().split(',') target_rcs = self.target_rcs.text().split(',') t_range = [float(r) for r in target_range] t_rcs = [float(r) for r in target_rcs] # Get the selected window from the form window_type = self.window_type.currentText() # Number of samples number_of_samples = int(ceil(4 * bandwidth * range_window_length / c)) if window_type == 'Kaiser': coefficients = kaiser(number_of_samples, 6, True) elif window_type == 'Blackman-Harris': coefficients = blackmanharris(number_of_samples, True) elif window_type == 'Hanning': coefficients = hanning(number_of_samples, True) elif window_type == 'Hamming': coefficients = hamming(number_of_samples, True) elif window_type == 'Rectangular': coefficients = ones(number_of_samples) # Time sampling t, dt = linspace(-0.5 * pulsewidth, 0.5 * pulsewidth, number_of_samples, retstep=True) # Sampled signal after mixing so = zeros(number_of_samples, dtype=complex) for r, rcs in zip(t_range, t_rcs): so += sqrt(rcs) * exp(1j * 2.0 * pi * bandwidth / pulsewidth * (2 * r / c) * t) # Fourier transform so = fftshift(fft(so * coefficients, 4 * number_of_samples)) # FFT frequencies frequencies = fftshift(fftfreq(4 * number_of_samples, dt)) # Range window range_window = 0.5 * frequencies * c * pulsewidth / bandwidth # Clear the axes for the updated plot self.axes1.clear() # Create the line plot self.axes1.plot( range_window, 20.0 * log10(abs(so) / number_of_samples + finfo(float).eps), '') self.axes1.set_xlim(min(t_range) - 5, max(t_range) + 5) self.axes1.set_ylim( -60, max(20.0 * log10(abs(so) / number_of_samples)) + 10) # Set the x and y axis labels self.axes1.set_xlabel("Range (m)", size=12) self.axes1.set_ylabel("Amplitude (dBsm)", size=12) # Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Set the plot title and labels self.axes1.set_title('Stretch Processor Range Profile', size=14) # Set the tick label size self.axes1.tick_params(labelsize=12) # Update the canvas self.my_canvas.draw()
def draw_from_gaussian_mix(N, Nx, gaussians, xint=[]): weights = sp.array([g.weight for g in gaussians]) mus = sp.array([g.mu for g in gaussians]) sigmas = sp.array([g.sigma for g in gaussians]) Ns = sp.array( [int(sp.ceil(g.weight * N / sum(weights))) for g in gaussians]) K = len(gaussians) # Draw >= N data points from mixture model xis = [] for k in range(K): s = mus[k] + sigmas[k] * sp.random.randn(Ns[k]) xis.extend(s) # Shuffle xis and keep only N samples sp.random.shuffle(xis) xis = xis[:N] # Determine x interval xint_tight = max(xis) - min(xis) xmin = min(xis) - 0.2 * xint_tight xmax = max(xis) + 0.2 * xint_tight xspan = xmax - xmin xmid = 0.5 * (xmax + xmin) # If xint is specified, move all data so that it falls within this interval if len(xint) == 2: new_xint = xint new_xmid = sp.mean(new_xint) new_xspan = new_xint[1] - new_xint[0] assert (new_xspan > 0) # Shift and rescale xis and mus xis = (new_xspan / xspan) * (xis - xmid) + new_xmid mus = (new_xspan / xspan) * (mus - xmid) + new_xmid # Rescale sigmas sigmas = (new_xspan / xspan) * sigmas # Now set rest of stuff xmin = new_xint[0] xmax = new_xint[1] xspan = new_xspan xmid = new_xmid # Make new gaussians gaussians = make_gaussian_mix(K=K, mus=mus, sigmas=sigmas, weights=weights) # Create a grid of points dx = (xmax - xmin) / (Nx - 1) xgrid = sp.linspace(xmin, xmax, Nx) xint = sp.array([xmin, xmax]) # Compute true distribution at gridpoints R = sp.zeros(Nx) for k in range(K): R = R + (weights[k] / sp.sqrt(2 * sp.pi * sigmas[k]**2)) * sp.exp( -(xgrid - mus[k])**2 / (2 * sigmas[k]**2)) Q = R / sum(dx * R) details = Details() details.extended_Q_star = Q details.xis = xis details.extended_xgrid = xgrid details.xint = xint details.gaussians = gaussians # Compute cubic spline interpolation of true distribution field phi_true_func = interp1d(xgrid, -sp.log(Q), kind='cubic') Q_true_func = lambda x: sp.exp(-phi_true_func(x)) # Return #return [xis, xgrid, Q] return [xis, xint, Q_true_func, details]
def lagdict2ionocont(DataLags, NoiseLags, sensdict, simparams, time_vec): """This function will take the data and noise lags and create an instance of the Ionocontanier class. This function will also apply the summation rule to the lags. Inputs DataLags - A dictionary """ # Pull in Location Data angles = simparams['angles'] ang_data = sp.array([[iout[0], iout[1]] for iout in angles]) rng_vec = simparams['Rangegates'] n_samps = len(rng_vec) # pull in other data pulse = simparams['Pulse'] p_samps = len(pulse) pulsewidth = p_samps * sensdict['t_s'] txpower = sensdict['Pt'] if sensdict['Name'].lower() in ['risr', 'pfisr', 'risr-n']: Ksysvec = sensdict['Ksys'] else: beamlistlist = sp.array(simparams['outangles']).astype(int) inplist = sp.array([i[0] for i in beamlistlist]) Ksysvec = sensdict['Ksys'][inplist] ang_data_temp = ang_data.copy() ang_data = sp.array( [ang_data_temp[i].mean(axis=0) for i in beamlistlist]) sumrule = simparams['SUMRULE'] rng_vec2 = simparams['Rangegatesfinal'] Nrng2 = len(rng_vec2) minrg = p_samps - 1 + sumrule[0].min() maxrg = Nrng2 + minrg # Copy the lags lagsData = DataLags['ACF'].copy() # Set up the constants for the lags so they are now # in terms of density fluxtuations. angtile = sp.tile(ang_data, (Nrng2, 1)) rng_rep = sp.repeat(rng_vec2, ang_data.shape[0], axis=0) coordlist = sp.zeros((len(rng_rep), 3)) [coordlist[:, 0], coordlist[:, 1:]] = [rng_rep, angtile] (Nt, Nbeams, Nrng, Nlags) = lagsData.shape # make a range average to equalize out the conntributions from each gate plen2 = int(sp.floor(float(p_samps - 1) / 2)) samps = sp.arange(0, p_samps, dtype=int) rng_ave = sp.zeros((Nrng, p_samps)) for isamp in range(plen2, Nrng + plen2): for ilag in range(p_samps): toplag = int(sp.floor(float(ilag) / 2)) blag = int(sp.ceil(float(ilag) / 2)) if toplag == 0: sampsred = samps[blag:] else: sampsred = samps[blag:-toplag] cursamps = isamp - sampsred keepsamps = sp.logical_and(cursamps >= 0, cursamps < Nrng) cursamps = cursamps[keepsamps] rng_samps = rng_vec[cursamps]**2 * 1e6 keepsamps2 = rng_samps > 0 if keepsamps2.sum() == 0: continue rng_samps = rng_samps[keepsamps2] rng_ave[isamp - plen2, ilag] = 1. / (sp.mean(1. / (rng_samps))) rng_ave_temp = rng_ave.copy() if simparams['Pulsetype'].lower() is 'barker': rng_ave_temp = rng_ave[:, 0][:, sp.newaxis] # rng_ave = rng_ave[int(sp.floor(plen2)):-int(sp.ceil(plen2))] # rng_ave = rng_ave[minrg:maxrg] rng3d = sp.tile(rng_ave_temp[sp.newaxis, sp.newaxis, :, :], (Nt, Nbeams, 1, 1)) ksys3d = sp.tile(Ksysvec[sp.newaxis, :, sp.newaxis, sp.newaxis], (Nt, 1, Nrng, Nlags)) # rng3d = sp.tile(rng_ave[:, sp.newaxis, sp.newaxis, sp.newaxis], (1, Nlags, Nt, Nbeams)) # ksys3d = sp.tile(Ksysvec[sp.newaxis, sp.newaxis, sp.newaxis, :], (Nrng2, Nlags, Nt, 1)) radar2acfmult = rng3d / (pulsewidth * txpower * ksys3d) pulses = sp.tile(DataLags['Pulses'][:, :, sp.newaxis, sp.newaxis], (1, 1, Nrng, Nlags)) time_vec = time_vec[:Nt] # Divid lags by number of pulses lagsData = lagsData / pulses # Set up the noise lags and divid out the noise. lagsNoise = NoiseLags['ACF'].copy() lagsNoise = sp.mean(lagsNoise, axis=2) pulsesnoise = sp.tile(NoiseLags['Pulses'][:, :, sp.newaxis], (1, 1, Nlags)) lagsNoise = lagsNoise / pulsesnoise lagsNoise = sp.tile(lagsNoise[:, :, sp.newaxis, :], (1, 1, Nrng, 1)) # multiply the data and the sigma by inverse of the scaling from the radar lagsData = lagsData * radar2acfmult lagsNoise = lagsNoise * radar2acfmult # Apply summation rule # lags transposed from (time,beams,range,lag)to (range,lag,time,beams) lagsData = sp.transpose(lagsData, axes=(2, 3, 0, 1)) lagsNoise = sp.transpose(lagsNoise, axes=(2, 3, 0, 1)) lagsDatasum = sp.zeros((Nrng2, Nlags, Nt, Nbeams), dtype=lagsData.dtype) lagsNoisesum = sp.zeros((Nrng2, Nlags, Nt, Nbeams), dtype=lagsNoise.dtype) for irngnew, irng in enumerate(sp.arange(minrg, maxrg)): for ilag in range(Nlags): lsumtemp = lagsData[irng + sumrule[0, ilag]:irng + sumrule[1, ilag] + 1, ilag].sum(axis=0) lagsDatasum[irngnew, ilag] = lsumtemp nsumtemp = lagsNoise[irng + sumrule[0, ilag]:irng + sumrule[1, ilag] + 1, ilag].sum(axis=0) lagsNoisesum[irngnew, ilag] = nsumtemp # subtract out noise lags lagsDatasum = lagsDatasum - lagsNoisesum # Put everything in a parameter list Paramdata = sp.zeros((Nbeams * Nrng2, Nt, Nlags), dtype=lagsData.dtype) # Put everything in a parameter list # transpose from (range,lag,time,beams) to (beams,range,time,lag) # lagsDatasum = lagsDatasum*radar2acfmult # lagsNoisesum = lagsNoisesum*radar2acfmult lagsDatasum = sp.transpose(lagsDatasum, axes=(3, 0, 2, 1)) lagsNoisesum = sp.transpose(lagsNoisesum, axes=(3, 0, 2, 1)) # multiply the data and the sigma by inverse of the scaling from the radar # lagsDatasum = lagsDatasum*radar2acfmult # lagsNoisesum = lagsNoisesum*radar2acfmult # Calculate a variance using equation 2 from Hysell's 2008 paper. Done use full covariance matrix because assuming nearly diagonal. # Get the covariance matrix pulses_s = sp.transpose(pulses, axes=(1, 2, 0, 3))[:, :Nrng2] Cttout = makeCovmat(lagsDatasum, lagsNoisesum, pulses_s, Nlags) Paramdatasig = sp.zeros((Nbeams * Nrng2, Nt, Nlags, Nlags), dtype=Cttout.dtype) curloc = 0 for irng in range(Nrng2): for ibeam in range(Nbeams): Paramdata[curloc] = lagsDatasum[ibeam, irng].copy() Paramdatasig[curloc] = Cttout[ibeam, irng].copy() curloc += 1 ionodata = IonoContainer(coordlist, Paramdata, times=time_vec, ver=1, paramnames=sp.arange(Nlags) * sensdict['t_s']) ionosigs = IonoContainer( coordlist, Paramdatasig, times=time_vec, ver=1, paramnames=sp.arange(Nlags**2).reshape(Nlags, Nlags) * sensdict['t_s']) return (ionodata, ionosigs)