def test_plot(expect, ans, options=0): xd = pylab.linspace(0,1,100) expr = '[ %s for x in xd]' % ans try: yd = eval(expr) except Exception as err: msg = "Sorry, cannot evaluate your expression, err=%s" % str(err).replace('<','<') return dict(ok=False, msg=msg) imgdata = StringIO.StringIO() fig = pylab.figure() ax = fig.add_subplot(111) ax.plot(xd, yd, 'ro') ax.plot(xd, yd) ax.grid() fig.savefig(imgdata, format='png') pylab.close() imgdata.seek(0) # rewind the data uri = 'data:image/png;base64,' + urllib.quote(base64.b64encode(imgdata.buf)) msg = '<html><img src = "%s"/>' % uri area = sum(yd)/(1.0*len(xd)) msg += '<p>Area=%s, expected area=%s</p></html>' % (area, options) ok = abs(area-float(options))<0.001 return dict(ok=bool(ok), msg=msg)
def plotKerasExperimentcifar10(): index = 5 for experiment_number in range(1,index+1): outputPath_part_final = os.path.realpath( "/home/jie/docker_folder/random_keras/output_cifar10_mlp/errorFile/hyperopt_experiment_withoutparam_accuracy" + str(experiment_number) + ".txt") output_plot = os.path.realpath( "/home/jie/docker_folder/random_keras/output_cifar10_mlp/errorFile/plotErrorCurve" + str(experiment_number) + ".pdf") df = pd.read_csv(outputPath_part_final,delimiter='\t',header=None) df.drop(df.columns[[600]], axis=1, inplace=True) i=1 epochnum = [] while i<=250: epochnum.append(i) i = i+1 i=0 while i<10: df_1=df[df.columns[0:250]].ix[i] np.reshape(df_1, (1,250)) plt.plot(epochnum,df_1) i = i+1 # plt.show() # plt.show() plt.savefig(output_plot) plt.close()
def Doplots_monthly(mypathforResults,PlottingDF,variable_to_fill, Site_ID,units,item): ANN_label=str(item+"_NN") #Do Monthly Plots print "Doing MOnthly plot" #t = arange(1, 54, 1) NN_label='Fc' Plottemp = PlottingDF[[NN_label,item]][PlottingDF['day_night']!=1] #Plottemp = PlottingDF[[NN_label,item]].dropna(how='any') figure(1) pl.title('Nightime ANN v Tower by year-month for '+item+' at '+Site_ID) try: xdata1a=Plottemp[item].groupby([lambda x: x.year,lambda x: x.month]).mean() plotxdata1a=True except: plotxdata1a=False try: xdata1b=Plottemp[NN_label].groupby([lambda x: x.year,lambda x: x.month]).mean() plotxdata1b=True except: plotxdata1b=False if plotxdata1a==True: pl.plot(xdata1a,'r',label=item) if plotxdata1b==True: pl.plot(xdata1b,'b',label=NN_label) pl.ylabel('Flux') pl.xlabel('Year - Month') pl.legend() pl.savefig(mypathforResults+'/ANN and Tower plots by year and month for variable '+item+' at '+Site_ID) #pl.show() pl.close() time.sleep(1)
def plot_elecs_and_neurons(neuron_dict, ext_sim_dict, neural_sim_dict): pl.close('all') fig_all = pl.figure(figsize=[15,15]) ax_all = fig_all.add_axes([0.1, 0.1, 0.8, 0.8], frameon=False) for elec in xrange(len(ext_sim_dict['elec_z'])): ax_all.plot(ext_sim_dict['elec_z'][elec], ext_sim_dict['elec_y'][elec], color='b',\ marker='$E%i$'%elec, markersize=20 ) legends = [] for i, neur in enumerate(neuron_dict): folder = os.path.join(neural_sim_dict['output_folder'], neuron_dict[neur]['name']) coor = np.load(os.path.join(folder,'coor.npy')) x,y,z = coor n_compartments = len(x) fig = pl.figure(figsize=[10, 10]) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], frameon=False) # Plot the electrodes for elec in xrange(len(ext_sim_dict['elec_z'])): ax.plot(ext_sim_dict['elec_z'][elec], ext_sim_dict['elec_y'][elec], color='b',\ marker='$%i$'%elec, markersize=20 ) # Plot the neuron xmid, ymid, zmid = np.load(folder + '/coor.npy') xstart, ystart,zstart = np.load(folder + '/coor_start.npy') xend, yend, zend = np.load(folder + '/coor_end.npy') diam = np.load(folder + '/diam.npy') length = np.load(folder + '/length.npy') n_compartments = len(diam) for comp in xrange(n_compartments): if comp == 0: xcoords = pl.array([xmid[comp]]) ycoords = pl.array([ymid[comp]]) zcoords = pl.array([zmid[comp]]) diams = pl.array([diam[comp]]) else: if zmid[comp] < 0.400 and zmid[comp] > -.400: xcoords = pl.r_[xcoords, pl.linspace(xstart[comp], xend[comp], length[comp]*3*1000)] ycoords = pl.r_[ycoords, pl.linspace(ystart[comp], yend[comp], length[comp]*3*1000)] zcoords = pl.r_[zcoords, pl.linspace(zstart[comp], zend[comp], length[comp]*3*1000)] diams = pl.r_[diams, pl.linspace(diam[comp], diam[comp], length[comp]*3*1000)] argsort = pl.argsort(-xcoords) ax.scatter(zcoords[argsort], ycoords[argsort], s=20*(diams[argsort]*1000)**2, c=xcoords[argsort], edgecolors='none', cmap='gray') ax_all.plot(zmid[0], ymid[0], marker='$%i$'%i, markersize=20, label='%i: %s' %(i, neur)) #legends.append('%i: %s' %(i, neur)) ax.axis(ext_sim_dict['plot_range']) ax.axis('equal') ax.axis(ext_sim_dict['plot_range']) ax.set_xlabel('z [mm]') ax.set_ylabel('y [mm]') fig.savefig(os.path.join(neural_sim_dict['output_folder'],\ 'neuron_figs', '%s.png' % neur)) ax_all.axis('equal') ax.axis(ext_sim_dict['plot_range']) ax_all.set_xlabel('z [mm]') ax_all.set_ylabel('y [mm]') ax_all.legend() fig_all.savefig(os.path.join(neural_sim_dict['output_folder'], 'fig.png'))
def plotEventTime(library, num, eventNames, sizes, times, events, filename = None): from pylab import close, legend, plot, savefig, show, title, xlabel, ylabel import numpy as np close() arches = sizes.keys() bs = events[arches[0]].keys()[0] data = [] names = [] for event, color in zip(eventNames, ['b', 'g', 'r', 'y']): for arch, style in zip(arches, ['-', ':']): if event in events[arch][bs]: names.append(arch+'-'+str(bs)+' '+event) data.append(sizes[arch][bs]) data.append(np.array(events[arch][bs][event])[:,0]) data.append(color+style) else: print 'Could not find %s in %s-%d events' % (event, arch, bs) print data plot(*data) title('Performance on '+library+' Example '+str(num)) xlabel('Number of Dof') ylabel('Time (s)') legend(names, 'upper left', shadow = True) if filename is None: show() else: savefig(filename) return
def manhattonPlot(phenotype_ID, pvalues_lm, ouFprefix, pos, chromBounds): for ip, p_ID in enumerate(phenotype_ID): pl.figure(figsize=[12,4]) plot_manhattan(posCum=pos['pos_cum'],pv=pvalues_lm[p_ID].values,chromBounds=chromBounds,thr_plotting=0.05) pl.title(p_ID) pl.savefig(ouFprefix + '.' + p_ID + '.pdf') pl.close('all')
def testTelescope(self): import matplotlib matplotlib.use('AGG') import matplotlib.mlab as ml import pylab as pl import time w0 = 8.0 k = 2*np.pi/3.0 gb = GaussianBeam(w0, k) lens = ThinLens(150, 150) gb2 = lens*gb self.assertAlmostEqual(gb2._z0, gb._z0 + 2*150.0) lens2 = ThinLens(300, 600) gb3 = lens2*gb2 self.assertAlmostEqual(gb3._z0, gb2._z0 + 2*300.0) self.assertAlmostEqual(gb._w0, gb3._w0/2.0) z = np.arange(0, 150) z2 = np.arange(150, 600) z3 = np.arange(600, 900) pl.plot(z, gb.w(z, k), z2, gb2.w(z2, k), z3, gb3.w(z3, k)) pl.grid() pl.xlabel('z') pl.ylabel('w') pl.savefig('testTelescope1.png') time.sleep(0.1) pl.close('all')
def plot(self, outputDirectory): """ Plot both the raw kinetics data and the Arrhenius fit versus temperature. The plot is saved to the file ``kinetics.pdf`` in the output directory. The plot is not generated if ``matplotlib`` is not installed. """ # Skip this step if matplotlib is not installed try: import pylab except ImportError: return Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.05) klist = numpy.zeros_like(Tlist) klist2 = numpy.zeros_like(Tlist) for i in range(Tlist.shape[0]): klist[i] = self.reaction.calculateTSTRateCoefficient(Tlist[i]) klist2[i] = self.reaction.kinetics.getRateCoefficient(Tlist[i]) order = len(self.reaction.reactants) klist *= 1e6 ** (order-1) klist2 *= 1e6 ** (order-1) pylab.semilogy(1000.0 / Tlist, klist, 'ok') pylab.semilogy(1000.0 / Tlist, klist2, '-k') pylab.xlabel('1000 / Temperature (1000/K)') pylab.ylabel('Rate coefficient ({0})'.format(self.kunits)) pylab.savefig(os.path.join(outputDirectory, 'kinetics.pdf')) pylab.close()
def log_posterior(self,theta): model_g1 , model_g2, limit_mask , _ , _ = self.draw_model(theta) likelihood = self.log_likelihood(model_g1,model_g2,limit_mask) prior = self.log_prior(theta) if not np.isfinite(prior): posterior = -np.inf else: # use no info from prior for now posterior = likelihood if logger.level == logging.DEBUG: n_progress = 10 elif logger.level == logging.INFO: n_progress = 1000 if self.n_model_evals % n_progress == 0: logger.info('%7d post=% 2.8e like=% 2.8e prior=% 2.4e M200=% 6.3e ' % (self.n_model_evals,posterior,likelihood,prior,theta[0])) if np.isnan(posterior): import pdb; pdb.set_trace() if self.save_all_models: self.plot_residual_g1g2(model_g1,model_g2,limit_mask) pl.suptitle('model post=% 10.8e M200=%5.2e' % (posterior,theta[0]) ) filename_fig = 'models/res2.%04d.png' % self.n_model_evals pl.savefig(filename_fig) logger.debug('saved %s' % filename_fig) pl.close() return posterior
def save(self, name=None, format="png", dirc=None): """Saves Bloch sphere to file of type ``format`` in directory ``dirc``. Parameters ---------- name : str Name of saved image. Must include path and format as well. i.e. '/Users/Paul/Desktop/bloch.png' This overrides the 'format' and 'dirc' arguments. format : str Format of output image. dirc : str Directory for output images. Defaults to current working directory. Returns ------- File containing plot of Bloch sphere. """ self.make_sphere() if dirc: if not os.path.isdir(os.getcwd() + "/" + str(dirc)): os.makedirs(os.getcwd() + "/" + str(dirc)) if name == None: if dirc: savefig(os.getcwd() + "/" + str(dirc) + "/bloch_" + str(self.savenum) + "." + format) else: savefig(os.getcwd() + "/bloch_" + str(self.savenum) + "." + format) else: savefig(name) self.savenum += 1 if self.fig: close(self.fig)
def plot(self, filesuffix=('.png',)): pylab.figure() kPluses = 10**numpy.linspace(0, 3, 100) kMinuses = 10**numpy.linspace(6, 9, 100) figureOfMerits = numpy.zeros((len(kPluses), len(kMinuses), 4), 'd') for i, kPlus in enumerate(kPluses): for j, kMinus in enumerate(kMinuses): figureOfMerits[i, j, :] = self.figureOfMerit(self.generateData({'kPlus' : kPlus, 'kMinus' : kMinus})) data = self.generateData({'kPlus' : kPluses[0], 'kMinus' : kMinuses[0]}) self._contourf(kMinuses, kPluses, figureOfMerits, data) pylab.xticks((10**6, 10**7, 10**8, 10**9), fontsize=self.fontsize) pylab.yticks((10**0, 10**1, 10**2, 10**3), fontsize=self.fontsize) pylab.xlabel(r'$k^-$ $\left(1\per\metre\right)$', fontsize=self.fontsize) pylab.ylabel(r'$k^+$ $\left(\power{\metre}{3}\per\mole\cdot\second\right)$', fontsize=self.fontsize) pylab.text(2 * 10**6, 7 * 10**2, r'I', fontsize=self.fontsize) pylab.text(3 * 10**7, 7 * 10**2, r'II', fontsize=self.fontsize) pylab.text(6 * 10**8, 7 * 10**2, r'III', fontsize=self.fontsize) pylab.text(6 * 10**8, 7 * 10**1, r'IV', fontsize=self.fontsize) for fP, kPlus, paxeslabel in ((1.143,3.51e+00, False), (0.975, 9.33e+00, False), (0.916, 3.51e+01, False), (0.89, 9.33e+01, False), (0.87, 3.51e+02, True)): for fM, kMinus, maxeslabel in ((1.4, 2.48e+06, False), (1.07, 7.05e+6, False), (0.96, 2.48e+07, False), (0.91, 7.05e+7, False), (0.88, 2.48e+08, True)): xpos = (numpy.log10(kMinus) - 6.) / 3. * fM ypos = numpy.log10(kPlus) / 3. * fP self.makeBackGroundPlot({'kPlus' : kPlus, 'kMinus' : kMinus}, xpos, ypos, axeslabel=paxeslabel and maxeslabel) for fs in filesuffix: pylab.savefig('kPlusVkMinus' + fs) pylab.close('all')
def plot_cumulative_score(smod, seqs, size=(6, 2), fname=None): """plot_cumulative_score.""" sig = cumulative_score(seqs, smod) plt.figure(figsize=size) sigp = np.copy(sig) sigp[sigp < 0] = 0 plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g') sign = np.copy(sig) sign[sign >= 0] = 0 plt.bar(range(len(sign)), sign, alpha=0.3, color='r') plt.grid() plt.xlabel('Position') plt.ylabel('Importance score') if fname: plt.draw() figname = '%s_importance.png' % (fname) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname
def plot_anat(brain): import os.path import pylab as pl from nibabel import load from nipy.labs import viz import numpy as np img = load(brain) data = img.get_data() data[np.isnan(data)] = 0 affine = img.get_affine() viz.plot_anat(anat=data, anat_affine=affine, draw_cross=False, slicer='x') x_view = os.path.abspath('x_view.png') y_view = os.path.abspath('y_view.png') z_view = os.path.abspath('z_view.png') pl.savefig(x_view,bbox_inches='tight') viz.plot_anat(anat=data, anat_affine=affine, draw_cross=False, slicer='y') pl.savefig(y_view,bbox_inches='tight') viz.plot_anat(anat=data, anat_affine=affine, draw_cross=False, slicer='z') pl.savefig(z_view,bbox_inches='tight') images = [x_view, y_view, z_view] pl.close() return images
def plot_location(needle, haystack, cluster_id=None, nbins=20, size=(17, 2), fname=None): """plot_location.""" locs = [] for h, s in haystack: for match in re.finditer(needle, s): s = match.start() e = match.end() m = s + (e - s) / 2 locs.append(m) plt.figure(figsize=size) n, bins, patches = plt.hist( locs, nbins, normed=0, facecolor='blue', alpha=0.3) plt.grid() plt.title(needle) plt.xlabel('Position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_loc_%d.png' % (fname, cluster_id) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname
def plot_distance(cluster_id_i, cluster_id_j, regex_i, regex_j, distances, nbins=5, size=(6, 2), fname=None): """plot_distance.""" ds = distances[(cluster_id_i, cluster_id_j)] plt.figure(figsize=size) n, bins, patches = plt.hist( ds, nbins, normed=0, facecolor='green', alpha=0.3) plt.grid() plt.title('%s vs %s' % (regex_i, regex_j)) plt.xlabel('Relative position') plt.ylabel('Num occurrences') if fname: plt.draw() figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j) plt.savefig( figname, bbox_inches='tight', transparent=True, pad_inches=0) else: figname = None plt.show() plt.close() return figname
def mamPlot(funct,args): pl=args[0] x=np.array([]) ymin=np.array([]) yavg=np.array([]) ymax=np.array([]) f=np.array([]) x=np.append(x,funct.rmsSet[:,0]) ymin=np.append(ymin,funct.rmsSet[:,1]) ymax=np.append(ymax,funct.rmsSet[:,2]) t1=funct.rmsSet[:,3] t2=funct.rmsSet[:,5] yavg=np.append(yavg,t1/t2) f=np.append(f,funct.rmsSet[:,5]) if centroidP(x,yavg): pl.set_yscale('log') pl.set_xscale('log') else: pl.ticklabel_format(axis='both', style='sci', scilimits=(-2,5),pad=5,direction="bottom") pl.axis([0, np.amax(x)+(2*np.amax(x)/100), 0, np.amax(ymax)+(2*np.amax(ymax)/100)]) pl.set_xlabel('read memory size',fontsize=8) pl.set_ylabel("cost",fontsize=8) pl.grid(True) pl.set_title("Min/Avg/Max Cost",fontsize=14) pl.tick_params(axis='x', labelsize=7) pl.tick_params(axis='y', labelsize=7) sc=pl.scatter(x,ymax,s=7,c='r', marker = 'o',lw=0.0) sc1=pl.scatter(x,yavg,s=5.5,c='g', marker = 'o',lw=0.0) sc2=pl.scatter(x,ymin,s=4,c='b', marker = 'o',lw=0.0) pl.legend((sc2,sc1,sc),("Min","Avg","Max"),scatterpoints=1,ncol=3,bbox_to_anchor=[0.5, mamAdjust],loc="lower center",fontsize=8) pylab.close()
def CostVariancePlot(funct,args): pl=args[0] x=np.array([]) y=np.array([]) f=np.array([]) z=np.array([]) x=np.append(x,funct.rmsSet[:,0]) y=np.append(y,funct.rmsSet[:,3]) f=np.append(f,funct.rmsSet[:,5]) z=np.append(z,funct.rmsSet[:,4]) v=np.array([]) v=np.append(v,[0]) i=0 while i<len(x): v=np.append(v,(z[i]/f[i])-(y[i]/f[i])*(y[i]/f[i])) i+=1 v=np.delete(v,0) if centroidP(x,v): pl.set_yscale('log') pl.set_xscale('log') else: pl.ticklabel_format(axis='both', style='sci', scilimits=(-2,5),pad=5,direction="bottom") pl.axis([0, np.amax(x)+(10*np.amax(x)/100), 0, np.amax(v)+(10*np.amax(v)/100)]) pl.set_xlabel("read memory size",fontsize=8) pl.set_ylabel("cost",fontsize=8) pl.set_title("Variance Cost",fontsize=14) pl.grid(True) pl.tick_params(axis='x', labelsize=7) pl.tick_params(axis='y', labelsize=7) sc=pl.scatter(x,v,c=f,s=6,marker = 'o',lw=0.0,cmap=cmap,norm=norm) pylab.close()
def explore_data(data, images, target): # try to determine the type of data... print "data_type belonging to key data:" try: print np.dtype(data) except TypeError as err: print err print "It has dimension", np.shape(data) # plot a 3 # get indices of all threes in target threes = np.where(target == 3) #assert threes is not empty assert(len(threes) > 0) # choose the first 3 three_indx = threes[0] # get the image img = images[three_indx][0] #plot it plot.figure() plot.gray() plot.imshow(img, interpolation = "nearest") plot.show() plot.close()
def plot_worker(jobq,mask,pid,lineshape,range): ''' args[0] = array file name args[1] = output figure name if mask, where masked==0 is masked ''' if lineshape: lines = shapefile.load_shape_list(lineshape) else: lines = None while True: #--get some args from the queue args = jobq.get() #--check if this is a sentenial if args == None: break #--load if args[2]: arr = np.fromfile(args[0],dtype=np.float32) arr.resize(bro.nrow,bro.ncol) else: arr = np.loadtxt(args[0]) if mask != None: arr = np.ma.masked_where(mask==0,arr) #print args[0],arr.min(),arr.max(),arr.mean() #--generic plotting fig = pylab.figure() ax = pylab.subplot(1,1,1,aspect='equal') if range: vmax = range[1] vmin = range[0] else: vmax = arr.max() vmin = arr.min() #p = ax.imshow(arr,interpolation='none') p = ax.pcolor(bro.X,bro.Y,np.flipud(arr),vmax=vmax,vmin=vmin) pylab.colorbar(p) if lines: for line in lines: ax.plot(line[0,:],line[1,:],'k-',lw=1.0) #break ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_xlim(bro.plt_x) ax.set_ylim(bro.plt_y) ax.set_title(args[0]) fmt = args[1].split('.')[-1] pylab.savefig(args[1],dpi=300,format=fmt) pylab.close(fig) #--mark this task as done jobq.task_done() print 'plot worker',pid,' finished',args[0] #--mark the sentenial as done jobq.task_done() return
def arcRVs(booSave = False, booShow = True, booFit = False): arcRVs = np.load('npy/arcRVs.npy') MJDs = np.load('npy/JDs.npy') colors = ['b','g','r','c'] for epoch,MJD in enumerate(MJDs): for cam in range(4): y = arcRVs[:,epoch,cam] plt.plot(y,'.'+colors[cam]) if booFit==True: x = np.arange(len(y)) p = np.polyfit(x[-np.isnan(y)],y[-np.isnan(y)],1) plt.plot(x,x*p[0]+p[1]) plt.title('MJD='+str(MJD)) if booSave==True: try: plotName = 'plots/arcRVs_'+str(epoch) print 'Attempting to save', plotName plt.savefig(plotName) except Exception,e: print str(e) print 'FAILED' if booShow==True: plt.show() plt.close()
def generate_glassbrain_image(image_pk): from neurovault.apps.statmaps.models import Image import neurovault import matplotlib as mpl mpl.rcParams['savefig.format'] = 'jpg' my_dpi = 50 fig = plt.figure(figsize=(330.0/my_dpi, 130.0/my_dpi), dpi=my_dpi) img = Image.objects.get(pk=image_pk) f = BytesIO() try: glass_brain = plot_glass_brain(img.file.path, figure=fig) glass_brain.savefig(f, dpi=my_dpi) except: # Glass brains that do not produce will be given dummy image this_path = os.path.abspath(os.path.dirname(__file__)) f = open(os.path.abspath(os.path.join(this_path, "static","images","glass_brain_empty.jpg"))) raise finally: plt.close('all') f.seek(0) content_file = ContentFile(f.read()) img.thumbnail.save("glass_brain_%s.jpg" % img.pk, content_file) img.save()
def plot_quality_scores(pp, data): names = data.keys() # Plot mean quality for name in names: mean_quality = data[name][QUALITY_SCORE_NAME]['mean_quality'] indices = range(0, len(mean_quality)) pl.plot(indices, mean_quality, linewidth=2) pl.ylim([0, 40]) pl.xlabel("Base position") pl.ylabel("Mean Phred Score") pl.title("Mean quality score by position") pl.legend(names, loc="lower left") pl.savefig(pp, format='pdf') pl.close() # Plot >q30 fraction for name in names: q30_fraction = data[name][QUALITY_SCORE_NAME]['fraction_q30'] indices = range(0, len(q30_fraction)) pl.plot(indices, q30_fraction) pl.xlabel("Base position") pl.ylabel("Fraction at least Q30") pl.title("Fraction of bases at least Q30") pl.legend(names, loc="lower left") pl.savefig(pp, format='pdf') pl.close()
def plot_gc_distribution(pp, data): names = data.keys() # Plot the 2D histogram of coverage vs gc for name in names: x = [ i * 100 for i in data[name][GC_DISTRIBUTION_NAME]['gc_samples'] ] y = data[name][GC_DISTRIBUTION_NAME]['cov_samples'] # Use the median to determine the range to show and round # to nearest 100 to avoid aliasing artefacts m = np.median(y) y_limit = math.ceil( 2*m / 100) * 100 hist,xedges,yedges = np.histogram2d(x,y, bins=[20, 50], range=[ [0, 100.0], [0, y_limit] ]) # draw the plot extent = [xedges[0], xedges[-1], yedges[0], yedges[-1] ] pl.imshow(hist.T,extent=extent,interpolation='nearest',origin='lower', aspect='auto') pl.colorbar() pl.title(name + ' GC Bias') pl.xlabel("GC %") pl.ylabel("k-mer coverage") pl.savefig(pp, format='pdf') pl.close()
def test(cv,model,data,user,code,comp): test_power=np.array([float(r[2+comp])/max_power for r in data ]) times=[datetime.datetime.strptime(r[0],'%Y-%m-%d %H:%M:%S UTC') for r in data] features=np.array([d[8:] for d in data],dtype=np.float) features[:,0]=features[:,0]/time_scale jobs=list(set([(r[1],r[2]) for r in data])) name_features=cv.transform([d[2] for d in data]).toarray() features=np.hstack((features,name_features)) job_ids=[r[1] for r in data] prediction=model.predict(features) rmse=math.sqrt(np.average(((prediction-test_power)*max_power)**2)) nrmse=math.sqrt(np.average(((prediction-test_power)/test_power)**2)) corr=np.corrcoef(prediction,test_power)[0,1] r2=1-(sum((prediction-test_power)**2)/sum((test_power-np.average(test_power))**2)) pl.figure(figsize=(6,7)) pl.subplot(211) pl.plot(prediction*max_power,test_power*max_power,'+') if math.isnan(corr) or math.isnan(r2) or math.isnan(rmse): pl.title("RMSPE="+str(nrmse)+"RMSE="+str(rmse)+" Corr="+str(corr)+" R2="+str(r2)) else: pl.title("RMSPE="+str(int(nrmse*1000)/1000.0)+" RMSE="+str(int(rmse*1000)/1000.0)+" Corr="+str(int(corr*1000)/1000.0)+" R2="+str(int(r2*1000)/1000.0)) pl.xlabel('Predicted power') pl.ylabel('Real power') pl.plot([max(pl.xlim()[0],pl.ylim()[0]),min(pl.xlim()[1],pl.ylim()[1])],[max(pl.xlim()[0],pl.ylim()[0]),min(pl.xlim()[1],pl.ylim()[1])]) pl.subplot(212) pl.plot(test_power*max_power) pl.plot(prediction*max_power) pl.ylabel('Power') pl.xlabel('Data point') #pl.legend(('Real power','Predicted power')) pl.subplots_adjust(hspace=0.35) pl.savefig('results'+str(month)+'global'+str(min_train)+'/'+user+code+'.pdf') pl.close() pkl.dump((nrmse,rmse,corr,r2,prediction*max_power,test_power*max_power,times,job_ids),file=gzip.open('results'+str(month)+'global'+str(min_train)+'/'+user+'test'+code+'.pkl.gz','w')) return prediction*max_power
def on_key_press(event): global old_t new_t=time.time() print old_t-new_t old_t=new_t if event.key == '+': a = axis() w = a[1] - a[0] axis([a[0] + w * .2, a[1] - w * .2, a[2], a[3]]) draw() if event.key in ['-', '\'']: a = axis() w = a[1] - a[0] axis([a[0] - w / 3.0, a[1] + w / 3.0, a[2], a[3]]) draw() if event.key == '.': a = axis() w = a[1] - a[0] axis([a[0] + w * .2, a[1] + w * .2, a[2], a[3]]) draw() if event.key == ',': a = axis() w = a[1] - a[0] axis([a[0] - w * .2, a[1] - w * .2, a[2], a[3]]) draw() if event.key == 'q': close()
def plot_values(X, Y, xlabel, ylabel, suffix, ptype='plot'): output_filename = constants.ATTRACTIVENESS_FOLDER_NAME + constants.DATASET + '_' + suffix X1 = [X[i] for i in range(len(X)) if X[i]>0 and Y[i]>0] Y1 = [Y[i] for i in range(len(X)) if X[i]>0 and Y[i]>0] X = X1 Y = Y1 pylab.close("all") pylab.figure(figsize=(8, 7)) #pylab.rcParams.update({'font.size': 20}) pylab.scatter(X, Y) #pylab.axis(vis.get_bounds(X, Y, False, False)) #pylab.xscale('log') pylab.yscale('log') pylab.xlabel(xlabel) pylab.ylabel(ylabel) #pylab.xlim(0.1,1) #pylab.ylim(ymin=0.01) #pylab.tight_layout() pylab.savefig(output_filename + '.pdf')
def main(): base_path = "/caps2/tsupinie/1kmf-control/" temp = goshen_1km_temporal(start=14400, end=14400) grid = goshen_1km_grid() n_ens_members = 40 np.seterr(all='ignore') ens = loadEnsemble(base_path, [ 11 ], temp.getTimes(), ([ 'pt', 'p' ], computeDensity)) ens = ens[0, 0] zs = decompressVariable(nio.open_file("%s/ena001.hdfgrdbas" % base_path, mode='r', format='hdf').variables['zp']) xs, ys = grid.getXY() xs = xs[np.newaxis, ...].repeat(zs.shape[0], axis=0) ys = ys[np.newaxis, ...].repeat(zs.shape[0], axis=0) eff_buoy = effectiveBuoyancy(ens, (zs, ys, xs), plane={'z':10}) print eff_buoy pylab.figure() pylab.contourf(xs[0], ys[0], eff_buoy[0], cmap=matplotlib.cm.get_cmap('RdBu_r')) pylab.colorbar() grid.drawPolitical() pylab.suptitle("Effective Buoyancy") pylab.savefig("eff_buoy.png") pylab.close() return
def plot_fragment_sizes(pp, data): # Trim outliers from the histograms names = data.keys() for name in names: h = data[name][FRAGMENT_SIZE_NAME]['sizes'] sizes = {} for i in h: if i not in sizes: sizes[i] = 1 else: sizes[i] += 1 n = len(h) x = list() y = list() sum = 0 for i,j in sorted(sizes.items()): f = float(j) / n x.append(i) y.append(f) sum += f pl.plot(x, y) pl.xlim([0, 1000]) pl.xlabel("Fragment Size (bp)") pl.ylabel("Proportion") pl.title("Estimated Fragment Size Histogram") pl.legend(names) pl.savefig(pp, format='pdf') pl.close()
def test_varying_inclination(self): #""" Test that the waveform is consistent for changes in inclination #""" sigmas = [] incs = numpy.arange(0, 21, 1.0) * lal.PI / 10.0 for inc in incs: # WARNING: This does not properly handle the case of SpinTaylor* # where the spin orientation is not relative to the inclination hp, hc = get_waveform(self.p, inclination=inc) s = sigma(hp, low_frequency_cutoff=self.p.f_lower) sigmas.append(s) f = pylab.figure() pylab.axes([.1, .2, 0.8, 0.70]) pylab.plot(incs, sigmas) pylab.title("Vary %s inclination, $\\tilde{h}$+" % self.p.approximant) pylab.xlabel("Inclination (radians)") pylab.ylabel("sigma (flat PSD)") info = self.version_txt pylab.figtext(0.05, 0.05, info) if self.save_plots: pname = self.plot_dir + "/%s-vary-inclination.png" % self.p.approximant pylab.savefig(pname) if self.show_plots: pylab.show() else: pylab.close(f) self.assertAlmostEqual(sigmas[-1], sigmas[0], places=7) self.assertAlmostEqual(max(sigmas), sigmas[0], places=7) self.assertTrue(sigmas[0] > sigmas[5])
def plot(x, z): # plot input x versus convolution z pylab.close(1) pylab.figure(1) pylab.plot (x, 'b', label="sine") pylab.plot ([abs(i/max(z)) for i in z], 'r', label="convolution") pylab.title ("convolution of pure sine") pylab.legend()
trajectory = [] ideal = [] # Start at the origin. trajectory.append(movement.cartesian) ideal.append([ideal_num, ideal_num]) for i in range(300): # Move the object to cartesian coordinates (3, 3) movement.move(delta) # Create a list of points the object visited. trajectory.append(movement.cartesian) # Create a list of the ideal path. ideal_num += ideal_delta ideal.append([ideal_num, ideal_num]) # Create numpy arrays out of the coordinate lists. trajectory = np.array(trajectory) ideal = np.array(ideal) # Plot the lists. plt.close('all') plt.plot(trajectory[:, 0], trajectory[:, 1], label='polar') plt.plot(ideal[:, 0], ideal[:, 1], label='ideal') plt.legend() print("Final cartesian coordinates: " + str(movement.cartesian)) # --------------------------------------------------------------------------
def manually_refine_components(Y, xxx_todo_changeme, A, C, Cn, thr=0.9, display_numbers=True, max_number=None, cmap=None, **kwargs): """Plots contour of spatial components against a background image and allows to interactively add novel components by clicking with mouse Args: Y: ndarray movie in 2D (dx,dy): tuple dimensions of the square used to identify neurons (should be set to the galue of gsiz) A: np.ndarray or sparse matrix Matrix of Spatial components (d x K) Cn: np.ndarray (2D) Background image (e.g. mean, correlation) thr: scalar between 0 and 1 Energy threshold for computing contours (default 0.995) display_number: Boolean Display number of ROIs if checked (default True) max_number: int Display the number for only the first max_number components (default None, display all numbers) cmap: string User specifies the colormap (default None, default colormap) Returns: A: np.ndarray matrix A os estimated spatial component contributions C: np.ndarray array of estimated calcium traces """ (dx, dy) = xxx_todo_changeme if issparse(A): A = np.array(A.todense()) else: A = np.array(A) d1, d2 = np.shape(Cn) d, nr = np.shape(A) if max_number is None: max_number = nr x, y = np.mgrid[0:d1:1, 0:d2:1] pl.imshow(Cn, interpolation=None, cmap=cmap) cm = com(A, d1, d2) Bmat = np.zeros((np.minimum(nr, max_number), d1, d2)) for i in range(np.minimum(nr, max_number)): indx = np.argsort(A[:, i], axis=None)[::-1] cumEn = np.cumsum(A[:, i].flatten()[indx]**2) cumEn /= cumEn[-1] Bvec = np.zeros(d) Bvec[indx] = cumEn Bmat[i] = np.reshape(Bvec, np.shape(Cn), order='F') T = np.shape(Y)[-1] pl.close() fig = pl.figure() ax = pl.gca() ax.imshow(Cn, interpolation=None, cmap=cmap, vmin=np.percentile(Cn[~np.isnan(Cn)], 1), vmax=np.percentile(Cn[~np.isnan(Cn)], 99)) for i in range(np.minimum(nr, max_number)): pl.contour(y, x, Bmat[i], [thr]) if display_numbers: for i in range(np.minimum(nr, max_number)): ax.text(cm[i, 1], cm[i, 0], str(i + 1)) A3 = np.reshape(A, (d1, d2, nr), order='F') while True: pts = fig.ginput(1, timeout=0) if pts != []: print(pts) xx, yy = np.round(pts[0]).astype(np.int) coords_y = np.array(list(range(yy - dy, yy + dy + 1))) coords_x = np.array(list(range(xx - dx, xx + dx + 1))) coords_y = coords_y[(coords_y >= 0) & (coords_y < d1)] coords_x = coords_x[(coords_x >= 0) & (coords_x < d2)] a3_tiny = A3[coords_y[0]:coords_y[-1] + 1, coords_x[0]:coords_x[-1] + 1, :] y3_tiny = Y[coords_y[0]:coords_y[-1] + 1, coords_x[0]:coords_x[-1] + 1, :] dy_sz, dx_sz = np.shape(a3_tiny)[:-1] y2_tiny = np.reshape(y3_tiny, (dx_sz * dy_sz, T), order='F') a2_tiny = np.reshape(a3_tiny, (dx_sz * dy_sz, nr), order='F') y2_res = y2_tiny - a2_tiny.dot(C) y3_res = np.reshape(y2_res, (dy_sz, dx_sz, T), order='F') a__, c__, center__, b_in__, f_in__ = greedyROI( y3_res, nr=1, gSig=[ np.floor(old_div(dx_sz, 2)), np.floor(old_div(dy_sz, 2)) ], gSiz=[dx_sz, dy_sz]) a_f = np.zeros((d, 1)) idxs = np.meshgrid(coords_y, coords_x) a_f[np.ravel_multi_index(idxs, (d1, d2), order='F').flatten()] = a__ A = np.concatenate([A, a_f], axis=1) C = np.concatenate([C, c__], axis=0) indx = np.argsort(a_f, axis=None)[::-1] cumEn = np.cumsum(a_f.flatten()[indx]**2) cumEn /= cumEn[-1] Bvec = np.zeros(d) Bvec[indx] = cumEn bmat = np.reshape(Bvec, np.shape(Cn), order='F') pl.contour(y, x, bmat, [thr]) pl.pause(.01) elif pts == []: break nr += 1 A3 = np.reshape(A, (d1, d2, nr), order='F') return A, C
xpositions = y[:, 0] ypositions = y[:, 1] plot.clf() if LABELS: for x, y, nr in zip(xpositions, ypositions, range(len(xpositions))): plot.scatter(x, y, 2, marker='*', color='green') plot.annotate(nr, xy=(x, y), size=2, color='green') out = "{}_{}_labels".format(word, year) else: plot.scatter(xpositions, ypositions, 5, marker='*', color='green') out = "{}_{}".format(word, year) plot.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) plot.tick_params(axis='y', which='both', left=False, right=False, labelleft=False) plot.title("{} in {}'s".format(word, year)) plot.savefig(out + '_PCA.png', dpi=300, bbox_inches='tight') plot.close() plot.clf()
def plot_seis(stat, filename, label, units, outfile, rrup=None): """ Plots the seismogram for station stat, and outputs a png file outfile """ ts1 = [] ns1 = [] ew1 = [] ver1 = [] cmt1 = ["", ""] # Read input file input_file = open(filename, 'r') for data in input_file: # Remove leading spaces data = data.strip() # Skip comments if data.startswith('#') or data.startswith('%'): if cmt1[0] == "": cmt1[0] = data else: tmp = [] tmp = data.split() ts1.append(float(tmp[0])) ns1.append(float(tmp[1])) ew1.append(float(tmp[2])) ver1.append(float(tmp[3])) # Don't forget to close the file input_file.close() min_x, max_x = calculate_x_coords(ts1, rrup) min_horiz_y = 1.1 * min([min(ns1), min(ew1)]) max_horiz_y = 1.1 * max([max(ns1), max(ew1)]) min_vert_y = 1.1 * min(ver1) max_vert_y = 1.1 * max(ver1) pylab.clf() pylab.suptitle('Seismograms for run %s, station %s' % (label, stat), size=14) pylab.subplots_adjust(hspace=0.4) pylab.subplot(311, title='N/S') pylab.plot(ts1, ns1, lw=plot_config.line_width) pylab.xlim(min_x, max_x) pylab.ylim(min_horiz_y, max_horiz_y) if units == 'vel': pylab.ylabel("Velocity (cm/s)") elif units == 'acc': pylab.ylabel("Acceleration (cm/s/s)") pylab.subplot(312, title='E/W') pylab.plot(ts1, ew1, lw=plot_config.line_width) pylab.xlim(min_x, max_x) pylab.ylim(min_horiz_y, max_horiz_y) if units == 'vel': pylab.ylabel("Velocity (cm/s)") elif units == 'acc': pylab.ylabel("Acceleration (cm/s/s)") pylab.subplot(313, title='Ver') pylab.plot(ts1, ver1, lw=plot_config.line_width) pylab.xlim(min_x, max_x) pylab.ylim(min_vert_y, max_vert_y) if units == 'vel': pylab.ylabel("Velocity (cm/s)") elif units == 'acc': pylab.ylabel("Acceleration (cm/s/s)") pylab.gcf().set_size_inches(6, 7) pylab.savefig(outfile, format="png", dpi=plot_config.dpi) pylab.close()
def plot_overlay_with_arias(stat, obs_filename, comp_filename, obs_arias_n_filename, obs_arias_e_filename, obs_arias_z_filename, comp_arias_n_filename, comp_arias_e_filename, comp_arias_z_filename, obs_label, comp_label, outfile, rrup=None, y_label="Velocity (cm/s)", goflabel=None, gofdata=None): """ This function plots observed and computed seismograms side by side for easy comparison """ # Initialize variables textx = 0.53 texty = 0.05 fig = pylab.plt.figure() fig.clf() # Read all files (ts1, ns1, ew1, ver1) = read_seismogram_file(obs_filename) (ts2, ns2, ew2, ver2) = read_seismogram_file(comp_filename) ta1, tmp1, tmp2, an1 = read_seismogram_file(obs_arias_n_filename) ta1, tmp1, tmp2, ae1 = read_seismogram_file(obs_arias_e_filename) ta1, tmp1, tmp2, az1 = read_seismogram_file(obs_arias_z_filename) ta2, tmp1, tmp2, an2 = read_seismogram_file(comp_arias_n_filename) ta2, tmp1, tmp2, ae2 = read_seismogram_file(comp_arias_e_filename) ta2, tmp1, tmp2, az2 = read_seismogram_file(comp_arias_z_filename) # Determine min and max X and Y for N/S/E/W, and Ver, for scaling min_x = 0 #max_x = min(max([max(ts1), max(ts2)]), 100) max_x = max([max(ts1), max(ts2)]) min_horiz_y = 1.1 * min([min(ns1), min(ns2), min(ew1), min(ew2)]) max_horiz_y = 1.1 * max([max(ns1), max(ns2), max(ew1), max(ew2)]) # Adjust so min and max are equal if abs(min_horiz_y) > abs(max_horiz_y): max_horiz_y = -1 * min_horiz_y else: min_horiz_y = -1 * max_horiz_y min_vert_y = 1.1 * min([min(ver1), min(ver2)]) max_vert_y = 1.1 * max([max(ver1), max(ver2)]) if abs(min_vert_y) > abs(max_vert_y): max_vert_y = -1 * min_vert_y else: min_vert_y = -1 * max_vert_y # For arias plots, min=0, max=100% min_y_arias = 0 max_y_arias = 100 if goflabel is None or gofdata is None: fig.suptitle('%s vs. %s, station %s' % (obs_label, comp_label, stat), size=14) else: txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[0]) fig.suptitle('%s vs. %s, station %s (%s)' % (obs_label, comp_label, stat, txt), size=14) fig.subplots_adjust(top=0.85) fig.subplots_adjust(left=0.075) fig.subplots_adjust(right=0.925) fig.subplots_adjust(hspace=0.4) fig.subplots_adjust(wspace=0.3) # FS: May 2013: for 3-comp plot below is #331 ax = fig.add_subplot(321, title='%s, N/S' % obs_label) ax.plot(ts1, ns1, color='black', label=obs_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_horiz_y, max_horiz_y) ax.set_ylabel(y_label) # FS: May 2013: for 3-comp plot below is #334 ax = fig.add_subplot(323, title='%s, N/S' % comp_label) ax.plot(ts2, ns2, color='red', label=comp_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_horiz_y, max_horiz_y) ax.set_ylabel(y_label) # print "GOFLABEL, GOFDATA", goflabel, gofdata if goflabel is not None and gofdata is not None: txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[2]) ax.text(textx, texty, txt, transform=ax.transAxes, bbox=dict(facecolor='red', alpha=0.5)) #legend(prop=matplotlib.font_manager.FontProperties(size=10)) # FS: May 2013: for 3-comp plot below is #332 ax = fig.add_subplot(322, title='%s, E/W' % obs_label) ax.plot(ts1, ew1, color='black', label=obs_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_horiz_y, max_horiz_y) #ylabel(y_label) # FS: May 2013: for 3-comp plot below is #335 ax = fig.add_subplot(324, title='%s, E/W' % comp_label) ax.plot(ts2, ew2, color='red', label=comp_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_horiz_y, max_horiz_y) if goflabel is not None and gofdata is not None: txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[1]) ax.text(textx, texty, txt, transform=ax.transAxes, bbox=dict(facecolor='red', alpha=0.5)) #ylabel(y_label) #legend(prop=matplotlib.font_manager.FontProperties(size=10)) # FS: May 2013: Code commented out to remove vertical component # ax = fig.add_subplot(333, title='%s, ver' % obs_label) # ax.plot(ts1, ver1, color='black', label=obs_label, # lw=plot_config.line_width) # ax.set_xlim(min_x, max_x) # ax.set_ylim(min_vert_y, max_vert_y) # #ylabel(y_label) # ax = fig.add_subplot(336, title='%s, ver' % comp_label) # ax.plot(ts2, ver2, color='red', label=comp_label, # lw=plot_config.line_width) # ax.set_xlim(min_x, max_x) # ax.set_ylim(min_vert_y, max_vert_y) # if goflabel is not None and gofdata is not None: # txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[3]) # ax.text(textx, texty, txt, transform=ax.transAxes, # bbox=dict(facecolor='red', alpha=0.5)) #Ylabel(y_label) #legend(prop=matplotlib.font_manager.FontProperties(size=10)) # FS: May 2013: for 3-comp plot below is #337 ax = fig.add_subplot(325, title='Arias N/S') ax.plot(ta1, an1, color='black', lw=plot_config.line_width) ax.plot(ta2, an2, color='red', lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_y_arias, max_y_arias) ax.set_ylabel("Norm Arias Int (%)") # FS: May 2013: for 3-comp plot below is #338 ax = fig.add_subplot(326, title='Arias E/W') ax.plot(ta1, ae1, color='black', lw=plot_config.line_width) ax.plot(ta2, ae2, color='red', lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_y_arias, max_y_arias) # FS: May 2013: Code commented out to remove vertical component # ax = fig.add_subplot(339, title='Arias ver') # ax.plot(ta1, az1, color='black', lw=plot_config.line_width) # ax.plot(ta2, az2, color='red', lw=plot_config.line_width) # ax.set_xlim(min_x, max_x) # ax.set_ylim(min_y_arias, max_y_arias) pylab.gcf().set_size_inches(10, 7.5) pylab.savefig(outfile, format="png", dpi=plot_config.dpi) pylab.close()
def plot_overlay(stat, obs_filename, comp_filename, obs_label, comp_label, outfile, y_label="Velocity (cm/s)", goflabel=None, gofdata=None): """ This function plots observed and computed seismograms side by side for easy comparison """ # Initialize variables textx = 0.53 texty = 0.05 fig = pylab.plt.figure() fig.clf() ts1, ns1, ew1, ver1 = read_seismogram_file(obs_filename) ts2, ns2, ew2, ver2 = read_seismogram_file(comp_filename) # Determine min and max X and Y for N/S/E/W, and Ver, for scaling min_x = 0 max_x = min(max([max(ts1), max(ts2)]), 100) min_horiz_y = 1.1 * min([min(ns1), min(ns2), min(ew1), min(ew2)]) max_horiz_y = 1.1 * max([max(ns1), max(ns2), max(ew1), max(ew2)]) # Adjust so min and max are equal if abs(min_horiz_y) > abs(max_horiz_y): max_horiz_y = -1 * min_horiz_y else: min_horiz_y = -1 * max_horiz_y min_vert_y = 1.1 * min([min(ver1), min(ver2)]) max_vert_y = 1.1 * max([max(ver1), max(ver2)]) if abs(min_vert_y) > abs(max_vert_y): max_vert_y = -1 * min_vert_y else: min_vert_y = -1 * max_vert_y if goflabel is None or gofdata is None: fig.suptitle('%s vs. %s, station %s' % (obs_label, comp_label, stat), size=14) else: txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[0]) fig.suptitle('%s vs. %s, station %s (%s)' % (obs_label, comp_label, stat, txt), size=14) fig.subplots_adjust(top=0.85) fig.subplots_adjust(left=0.075) fig.subplots_adjust(right=0.925) fig.subplots_adjust(hspace=0.3) fig.subplots_adjust(wspace=0.3) ax = fig.add_subplot(231, title='%s, N/S' % obs_label) ax.plot(ts1, ns1, color='black', label=obs_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_horiz_y, max_horiz_y) ax.set_ylabel(y_label) ax = fig.add_subplot(234, title='%s, N/S' % comp_label) ax.plot(ts2, ns2, color='red', label=comp_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_horiz_y, max_horiz_y) ax.set_ylabel(y_label) # print "GOFLABEL, GOFDATA", goflabel, gofdata if goflabel is not None and gofdata is not None: txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[2]) ax.text(textx, texty, txt, transform=ax.transAxes, bbox=dict(facecolor='red', alpha=0.5)) #legend(prop=matplotlib.font_manager.FontProperties(size=10)) ax = fig.add_subplot(232, title='%s, E/W' % obs_label) ax.plot(ts1, ew1, color='black', label=obs_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_horiz_y, max_horiz_y) #ylabel(y_label) ax = fig.add_subplot(235, title='%s, E/W' % comp_label) ax.plot(ts2, ew2, color='red', label=comp_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_horiz_y, max_horiz_y) if goflabel is not None and gofdata is not None: txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[1]) ax.text(textx, texty, txt, transform=ax.transAxes, bbox=dict(facecolor='red', alpha=0.5)) #ylabel(y_label) #legend(prop=matplotlib.font_manager.FontProperties(size=10)) ax = fig.add_subplot(233, title='%s, ver' % obs_label) ax.plot(ts1, ver1, color='black', label=obs_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_vert_y, max_vert_y) #ylabel(y_label) ax = fig.add_subplot(236, title='%s, ver' % comp_label) ax.plot(ts2, ver2, color='red', label=comp_label, lw=plot_config.line_width) ax.set_xlim(min_x, max_x) ax.set_ylim(min_vert_y, max_vert_y) if goflabel is not None and gofdata is not None: txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[3]) ax.text(textx, texty, txt, transform=ax.transAxes, bbox=dict(facecolor='red', alpha=0.5)) #ylabel(y_label) #legend(prop=matplotlib.font_manager.FontProperties(size=10)) pylab.gcf().set_size_inches(10, 5) pylab.savefig(outfile, format="png", dpi=plot_config.dpi) pylab.close()
for counter, evtid in enumerate(evtids): if evt_plotted > max_evts: break run, subrun, gate, phys_evt = decode_eventid(evtid) print('{0} - {1} - {2} - {3}'.format(run, subrun, gate, phys_evt)) targ = labels[counter] evt = [] if data_x is not None: evt.append(data_x[counter]) if data_u is not None: evt.append(data_u[counter]) if data_v is not None: evt.append(data_v[counter]) fig = pylab.figure(figsize=(9, 3)) gs = pylab.GridSpec(1, len(evt)) # print np.where(evt == np.max(evt)) # print np.max(evt) for i in range(len(evt)): ax = pylab.subplot(gs[i]) ax.axis('off') # images are normalized such the max e-dep has val 1, independent # of view, so set vmin, vmax here to keep matplotlib from # normalizing each view on its own ax.imshow(evt[i][0], cmap=pylab.get_cmap('jet'), interpolation='nearest', vmin=0, vmax=1) figname = 'evt_%s_%s_%s_%s_targ_%d.pdf' % \ (run, subrun, gate, phys_evt, targ) pylab.savefig(figname) pylab.close() evt_plotted += 1
def Metallicity(self, G): print('Plotting the metallicities') seed(2222) plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure w = np.where((G.Type == 0) & (G.ColdGas / (G.StellarMass + G.ColdGas) > 0.1) & (G.StellarMass > 0.01))[0] if (len(w) > dilute): w = sample(w, dilute) mass = np.log10(G.StellarMass[w] * 1.0e10 / self.Hubble_h) Z = np.log10((G.MetalsColdGas[w] / G.ColdGas[w]) / 0.02) + 9.0 plt.scatter(mass, Z, marker='o', s=1, c='k', alpha=0.5, label='Model galaxies') # overplot Tremonti et al. 2003 (h=0.7) w = np.arange(7.0, 13.0, 0.1) Zobs = -1.492 + 1.847 * w - 0.08026 * w * w if (whichimf == 0): # Conversion from Kroupa IMF to Slapeter IMF plt.plot(np.log10((10**w * 1.5)), Zobs, 'b-', lw=2.0, label='Tremonti et al. 2003') elif (whichimf == 1): # Conversion from Kroupa IMF to Slapeter IMF to Chabrier IMF plt.plot(np.log10((10**w * 1.5 / 1.8)), Zobs, 'b-', lw=2.0, label='Tremonti et al. 2003') plt.ylabel(r'$12\ +\ \log_{10}[\mathrm{O/H}]$') # Set the y... plt.xlabel(r'$\log_{10} M_{\mathrm{stars}}\ (M_{\odot})$' ) # and the x-axis labels # Set the x and y axis minor ticks ax.xaxis.set_minor_locator(plt.MultipleLocator(0.05)) ax.yaxis.set_minor_locator(plt.MultipleLocator(0.25)) plt.axis([8.0, 12.0, 8.0, 9.5]) leg = plt.legend(loc='lower right') leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '7.Metallicity' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def BaryonicMassFunction(self, G): print('Plotting the baryonic mass function') plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure binwidth = 0.1 # mass function histogram bin width # calculate BMF w = np.where(G.StellarMass + G.ColdGas > 0.0)[0] mass = np.log10( (G.StellarMass[w] + G.ColdGas[w]) * 1.0e10 / self.Hubble_h) mi = np.floor(min(mass)) - 2 ma = np.floor(max(mass)) + 2 NB = (ma - mi) / binwidth (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB) # Set the x-axis values to be the centre of the bins xaxeshisto = binedges[:-1] + 0.5 * binwidth # Bell et al. 2003 BMF (h=1.0 converted to h=0.73) M = np.arange(7.0, 13.0, 0.01) Mstar = np.log10(5.3 * 1.0e10 / self.Hubble_h / self.Hubble_h) alpha = -1.21 phistar = 0.0108 * self.Hubble_h * self.Hubble_h * self.Hubble_h xval = 10.0**(M - Mstar) yval = np.log(10.) * phistar * xval**(alpha + 1) * np.exp(-xval) if (whichimf == 0): # converted diet Salpeter IMF to Salpeter IMF plt.plot(np.log10(10.0**M / 0.7), yval, 'b-', lw=2.0, label='Bell et al. 2003') # Plot the SMF elif (whichimf == 1): # converted diet Salpeter IMF to Salpeter IMF, then to Chabrier IMF plt.plot(np.log10(10.0**M / 0.7 / 1.8), yval, 'g--', lw=1.5, label='Bell et al. 2003') # Plot the SMF # Overplot the model histograms plt.plot(xaxeshisto, counts / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'k-', label='Model') plt.yscale('log', nonposy='clip') plt.axis([8.0, 12.5, 1.0e-6, 1.0e-1]) # Set the x-axis minor ticks ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) plt.ylabel( r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$') # Set the y... plt.xlabel(r'$\log_{10}\ M_{\mathrm{bar}}\ (M_{\odot})$' ) # and the x-axis labels leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '2.BaryonicMassFunction' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def MassReservoirScatter(self, G): print( 'Plotting the mass in stellar, cold, hot, ejected, ICS reservoirs') seed(2222) plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure w = np.where((G.Type == 0) & (G.Mvir > 1.0) & (G.StellarMass > 0.0))[0] if (len(w) > dilute): w = sample(w, dilute) mvir = np.log10(G.Mvir[w] * 1.0e10) plt.scatter(mvir, np.log10(G.StellarMass[w] * 1.0e10), marker='o', s=0.3, c='k', alpha=0.5, label='Stars') plt.scatter(mvir, np.log10(G.ColdGas[w] * 1.0e10), marker='o', s=0.3, color='blue', alpha=0.5, label='Cold gas') plt.scatter(mvir, np.log10(G.HotGas[w] * 1.0e10), marker='o', s=0.3, color='red', alpha=0.5, label='Hot gas') plt.scatter(mvir, np.log10(G.EjectedMass[w] * 1.0e10), marker='o', s=0.3, color='green', alpha=0.5, label='Ejected gas') plt.scatter(mvir, np.log10(G.IntraClusterStars[w] * 1.0e10), marker='o', s=10, color='yellow', alpha=0.5, label='Intracluster stars') plt.ylabel(r'$\mathrm{stellar,\ cold,\ hot,\ ejected,\ ICS\ mass}$' ) # Set the y... plt.xlabel(r'$\log\ M_{\mathrm{vir}}\ (h^{-1}\ M_{\odot})$' ) # and the x-axis labels plt.axis([10.0, 14.0, 7.5, 12.5]) leg = plt.legend(loc='upper left') leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') plt.text(13.5, 8.0, r'$\mathrm{All}') outputFile = OutputDir + '9.MassReservoirScatter' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def VelocityDistribution(self, G): print('Plotting the velocity distribution of all galaxies') seed(2222) mi = -40.0 ma = 40.0 binwidth = 0.5 NB = (ma - mi) / binwidth # set up figure plt.figure() ax = plt.subplot(111) pos_x = G.Pos[:, 0] / self.Hubble_h pos_y = G.Pos[:, 1] / self.Hubble_h pos_z = G.Pos[:, 2] / self.Hubble_h vel_x = G.Vel[:, 0] vel_y = G.Vel[:, 1] vel_z = G.Vel[:, 2] dist_los = np.sqrt(pos_x * pos_x + pos_y * pos_y + pos_z * pos_z) vel_los = (pos_x / dist_los) * vel_x + (pos_y / dist_los) * vel_y + ( pos_z / dist_los) * vel_z dist_red = dist_los + vel_los / (self.Hubble_h * 100.0) tot_gals = len(pos_x) (counts, binedges) = np.histogram(vel_los / (self.Hubble_h * 100.0), range=(mi, ma), bins=NB) xaxeshisto = binedges[:-1] + 0.5 * binwidth plt.plot(xaxeshisto, counts / binwidth / tot_gals, 'k-', label='los-velocity') (counts, binedges) = np.histogram(vel_x / (self.Hubble_h * 100.0), range=(mi, ma), bins=NB) xaxeshisto = binedges[:-1] + 0.5 * binwidth plt.plot(xaxeshisto, counts / binwidth / tot_gals, 'r-', label='x-velocity') (counts, binedges) = np.histogram(vel_y / (self.Hubble_h * 100.0), range=(mi, ma), bins=NB) xaxeshisto = binedges[:-1] + 0.5 * binwidth plt.plot(xaxeshisto, counts / binwidth / tot_gals, 'g-', label='y-velocity') (counts, binedges) = np.histogram(vel_z / (self.Hubble_h * 100.0), range=(mi, ma), bins=NB) xaxeshisto = binedges[:-1] + 0.5 * binwidth plt.plot(xaxeshisto, counts / binwidth / tot_gals, 'b-', label='z-velocity') plt.yscale('log', nonposy='clip') plt.axis([mi, ma, 1e-5, 0.5]) # plt.axis([mi, ma, 0, 0.13]) plt.ylabel(r'$\mathrm{Box\ Normalised\ Count}$') # Set the y... plt.xlabel(r'$\mathrm{Velocity / H}_{0}$') # and the x-axis labels leg = plt.legend(loc='upper left', numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '11.VelocityDistribution' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def GasMassFunction(self, G): print('Plotting the cold gas mass function') plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure binwidth = 0.1 # mass function histogram bin width # calculate all w = np.where(G.ColdGas > 0.0)[0] mass = np.log10(G.ColdGas[w] * 1.0e10 / self.Hubble_h) sSFR = (G.SfrDisk[w] + G.SfrBulge[w]) / (G.StellarMass[w] * 1.0e10 / self.Hubble_h) mi = np.floor(min(mass)) - 2 ma = np.floor(max(mass)) + 2 NB = (ma - mi) / binwidth (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB) # Set the x-axis values to be the centre of the bins xaxeshisto = binedges[:-1] + 0.5 * binwidth # additionally calculate red w = np.where(sSFR < 10.0**sSFRcut)[0] massRED = mass[w] (countsRED, binedges) = np.histogram(massRED, range=(mi, ma), bins=NB) # additionally calculate blue w = np.where(sSFR > 10.0**sSFRcut)[0] massBLU = mass[w] (countsBLU, binedges) = np.histogram(massBLU, range=(mi, ma), bins=NB) # Baldry+ 2008 modified data used for the MCMC fitting Zwaan = np.array([[6.933, -0.333], [7.057, -0.490], [7.209, -0.698], [7.365, -0.667], [7.528, -0.823], [7.647, -0.958], [7.809, -0.917], [7.971, -0.948], [8.112, -0.927], [8.263, -0.917], [8.404, -1.062], [8.566, -1.177], [8.707, -1.177], [8.853, -1.312], [9.010, -1.344], [9.161, -1.448], [9.302, -1.604], [9.448, -1.792], [9.599, -2.021], [9.740, -2.406], [9.897, -2.615], [10.053, -3.031], [10.178, -3.677], [10.335, -4.448], [10.492, -5.083]], dtype=np.float32) ObrRaw = np.array([[7.300, -1.104], [7.576, -1.302], [7.847, -1.250], [8.133, -1.240], [8.409, -1.344], [8.691, -1.479], [8.956, -1.792], [9.231, -2.271], [9.507, -3.198], [9.788, -5.062]], dtype=np.float32) ObrCold = np.array([[8.009, -1.042], [8.215, -1.156], [8.409, -0.990], [8.604, -1.156], [8.799, -1.208], [9.020, -1.333], [9.194, -1.385], [9.404, -1.552], [9.599, -1.677], [9.788, -1.812], [9.999, -2.312], [10.172, -2.656], [10.362, -3.500], [10.551, -3.635], [10.740, -5.010]], dtype=np.float32) ObrCold_xval = np.log10(10**(ObrCold[:, 0]) / self.Hubble_h / self.Hubble_h) ObrCold_yval = (10**(ObrCold[:, 1]) * self.Hubble_h * self.Hubble_h * self.Hubble_h) Zwaan_xval = np.log10(10**(Zwaan[:, 0]) / self.Hubble_h / self.Hubble_h) Zwaan_yval = (10**(Zwaan[:, 1]) * self.Hubble_h * self.Hubble_h * self.Hubble_h) ObrRaw_xval = np.log10(10**(ObrRaw[:, 0]) / self.Hubble_h / self.Hubble_h) ObrRaw_yval = (10**(ObrRaw[:, 1]) * self.Hubble_h * self.Hubble_h * self.Hubble_h) plt.plot(ObrCold_xval, ObrCold_yval, color='black', lw=7, alpha=0.25, label='Obr. \& Raw. 2009 (Cold Gas)') plt.plot(Zwaan_xval, Zwaan_yval, color='cyan', lw=7, alpha=0.25, label='Zwaan et al. 2005 (HI)') plt.plot(ObrRaw_xval, ObrRaw_yval, color='magenta', lw=7, alpha=0.25, label='Obr. \& Raw. 2009 (H2)') # Overplot the model histograms plt.plot(xaxeshisto, counts / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'k-', label='Model - Cold Gas') plt.yscale('log', nonposy='clip') plt.axis([8.0, 11.5, 1.0e-6, 1.0e-1]) # Set the x-axis minor ticks ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) plt.ylabel( r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$') # Set the y... plt.xlabel(r'$\log_{10} M_{\mathrm{X}}\ (M_{\odot})$' ) # and the x-axis labels leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '3.GasMassFunction' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
'z', 'th', 'u', 'ug', 'v', 'vg')) for k in range(kmax): proffile.write( '{0:1.14E} {1:1.14E} {2:1.14E} {3:1.14E} {4:1.14E} {5:1.14E} \n'. format(z[k], th[k], u[k], ug[k], v[k], vg[k])) proffile.close() # write surface temperature timefile = open('gabls4s3.time', 'w') timefile.write('{0:^20s} {1:^20s} \n'.format('t', 'sbot[th]')) for t in range(s3.t.size): timefile.write('{0:1.14E} {1:1.14E} \n'.format(s3.t[t], s3.ths[t])) timefile.close() # Plot pl.close('all') pl.figure() pl.subplot(221) pl.plot(th, z, 'k-', label='mhh') pl.plot(s3.th, s3.z, 'go', mfc='none', label='s3') pl.ylim(0, 1100) pl.xlim(270, 285) pl.legend(frameon=False, loc=2) pl.subplot(222) pl.plot(u, z, 'k-', label='mhh') pl.plot(s3.u, s3.z, 'go', mfc='none', label='s3') pl.plot(ug, z, 'k--', label='mhh') pl.plot(s3.ug, s3.z, 'bo', mfc='none', label='s3') pl.ylim(0, 1100)
def StellarMassFunction(self, G): print('Plotting the stellar mass function') plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure binwidth = 0.1 # mass function histogram bin width # calculate all w = np.where(G.StellarMass > 0.0)[0] mass = np.log10(G.StellarMass[w] * 1.0e10 / self.Hubble_h) sSFR = (G.SfrDisk[w] + G.SfrBulge[w]) / (G.StellarMass[w] * 1.0e10 / self.Hubble_h) mi = np.floor(min(mass)) - 2 ma = np.floor(max(mass)) + 2 NB = (ma - mi) / binwidth (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB) # Set the x-axis values to be the centre of the bins xaxeshisto = binedges[:-1] + 0.5 * binwidth # additionally calculate red w = np.where(sSFR < 10.0**sSFRcut)[0] massRED = mass[w] (countsRED, binedges) = np.histogram(massRED, range=(mi, ma), bins=NB) # additionally calculate blue w = np.where(sSFR > 10.0**sSFRcut)[0] massBLU = mass[w] (countsBLU, binedges) = np.histogram(massBLU, range=(mi, ma), bins=NB) # Baldry+ 2008 modified data used for the MCMC fitting Baldry = np.array([ [7.05, 1.3531e-01, 6.0741e-02], [7.15, 1.3474e-01, 6.0109e-02], [7.25, 2.0971e-01, 7.7965e-02], [7.35, 1.7161e-01, 3.1841e-02], [7.45, 2.1648e-01, 5.7832e-02], [7.55, 2.1645e-01, 3.9988e-02], [7.65, 2.0837e-01, 4.8713e-02], [7.75, 2.0402e-01, 7.0061e-02], [7.85, 1.5536e-01, 3.9182e-02], [7.95, 1.5232e-01, 2.6824e-02], [8.05, 1.5067e-01, 4.8824e-02], [8.15, 1.3032e-01, 2.1892e-02], [8.25, 1.2545e-01, 3.5526e-02], [8.35, 9.8472e-02, 2.7181e-02], [8.45, 8.7194e-02, 2.8345e-02], [8.55, 7.0758e-02, 2.0808e-02], [8.65, 5.8190e-02, 1.3359e-02], [8.75, 5.6057e-02, 1.3512e-02], [8.85, 5.1380e-02, 1.2815e-02], [8.95, 4.4206e-02, 9.6866e-03], [9.05, 4.1149e-02, 1.0169e-02], [9.15, 3.4959e-02, 6.7898e-03], [9.25, 3.3111e-02, 8.3704e-03], [9.35, 3.0138e-02, 4.7741e-03], [9.45, 2.6692e-02, 5.5029e-03], [9.55, 2.4656e-02, 4.4359e-03], [9.65, 2.2885e-02, 3.7915e-03], [9.75, 2.1849e-02, 3.9812e-03], [9.85, 2.0383e-02, 3.2930e-03], [9.95, 1.9929e-02, 2.9370e-03], [10.05, 1.8865e-02, 2.4624e-03], [10.15, 1.8136e-02, 2.5208e-03], [10.25, 1.7657e-02, 2.4217e-03], [10.35, 1.6616e-02, 2.2784e-03], [10.45, 1.6114e-02, 2.1783e-03], [10.55, 1.4366e-02, 1.8819e-03], [10.65, 1.2588e-02, 1.8249e-03], [10.75, 1.1372e-02, 1.4436e-03], [10.85, 9.1213e-03, 1.5816e-03], [10.95, 6.1125e-03, 9.6735e-04], [11.05, 4.3923e-03, 9.6254e-04], [11.15, 2.5463e-03, 5.0038e-04], [11.25, 1.4298e-03, 4.2816e-04], [11.35, 6.4867e-04, 1.6439e-04], [11.45, 2.8294e-04, 9.9799e-05], [11.55, 1.0617e-04, 4.9085e-05], [11.65, 3.2702e-05, 2.4546e-05], [11.75, 1.2571e-05, 1.2571e-05], [11.85, 8.4589e-06, 8.4589e-06], [11.95, 7.4764e-06, 7.4764e-06], ], dtype=np.float32) # Finally plot the data # plt.errorbar( # Baldry[:, 0], # Baldry[:, 1], # yerr=Baldry[:, 2], # color='g', # linestyle=':', # lw = 1.5, # label='Baldry et al. 2008', # ) Baldry_xval = np.log10(10**Baldry[:, 0] / self.Hubble_h / self.Hubble_h) if (whichimf == 1): Baldry_xval = Baldry_xval - 0.26 # convert back to Chabrier IMF Baldry_yvalU = (Baldry[:, 1] + Baldry[:, 2] ) * self.Hubble_h * self.Hubble_h * self.Hubble_h Baldry_yvalL = (Baldry[:, 1] - Baldry[:, 2] ) * self.Hubble_h * self.Hubble_h * self.Hubble_h plt.fill_between(Baldry_xval, Baldry_yvalU, Baldry_yvalL, facecolor='purple', alpha=0.25, label='Baldry et al. 2008 (z=0.1)') # This next line is just to get the shaded region to appear correctly in the legend plt.plot(xaxeshisto, counts / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, label='Baldry et al. 2008', color='purple', alpha=0.3) # # Cole et al. 2001 SMF (h=1.0 converted to h=0.73) # M = np.arange(7.0, 13.0, 0.01) # Mstar = np.log10(7.07*1.0e10 /self.Hubble_h/self.Hubble_h) # alpha = -1.18 # phistar = 0.009 *self.Hubble_h*self.Hubble_h*self.Hubble_h # xval = 10.0 ** (M-Mstar) # yval = np.log(10.) * phistar * xval ** (alpha+1) * np.exp(-xval) # plt.plot(M, yval, 'g--', lw=1.5, label='Cole et al. 2001') # Plot the SMF # Overplot the model histograms plt.plot(xaxeshisto, counts / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'k-', label='Model - All') plt.plot(xaxeshisto, countsRED / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'r:', lw=2, label='Model - Red') plt.plot(xaxeshisto, countsBLU / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'b:', lw=2, label='Model - Blue') plt.yscale('log', nonposy='clip') plt.axis([8.0, 12.5, 1.0e-6, 1.0e-1]) # Set the x-axis minor ticks ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) plt.ylabel( r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$') # Set the y... plt.xlabel(r'$\log_{10} M_{\mathrm{stars}}\ (M_{\odot})$' ) # and the x-axis labels plt.text(12.2, 0.03, whichsimulation, size='large') leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '1.StellarMassFunction' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def generate_subject_stats_report( stats_report_filename, contrasts, z_maps, mask, design_matrices=None, subject_id=None, anat=None, display_mode="z", cut_coords=None, threshold=2.3, cluster_th=15, start_time=None, title=None, user_script_name=None, progress_logger=None, shutdown_all_reloaders=True, **glm_kwargs): """Generates a report summarizing the statistical methods and results Parameters ---------- stats_report_filename: string: html file to which output (generated html) will be written contrasts: dict of arrays contrasts we are interested in; same number of contrasts as zmaps; same keys zmaps: dict of image objects or strings (image filenames) zmaps for contrasts we are interested in; one per contrast id mask: 'nifti image object' brain mask for ROI design_matrix: list of 'DesignMatrix', `numpy.ndarray` objects or of strings (.png, .npz, etc.) for filenames design matrices for the experimental conditions contrasts: dict of arrays dictionary of contrasts of interest; the keys are the contrast ids, the values are contrast values (lists) z_maps: dict of 3D image objects or strings (image filenames) dict with same keys as 'contrasts'; the values are paths of z-maps for the respective contrasts anat: 3D array (optional) brain image to serve bg unto which activation maps will be plotted anat_affine: 2D array (optional) affine data for the anat threshold: float (optional) threshold to be applied to activation maps voxel-wise cluster_th: int (optional) minimal voxel count for clusteres declared as 'activated' cmap: cmap object (default viz.cm.cold_hot) color-map to use in plotting activation maps start_time: string (optional) start time for the stats analysis (useful for the generated report page) user_script_name: string (optional, default None) existing filename, path to user script used in doing the analysis progress_logger: ProgressLogger object (optional) handle for logging progress shutdown_all_reloaders: bool (optional, default True) if True, all pages connected to the stats report page will be prevented from reloading after the stats report page has been completely generated **glm_kwargs: kwargs used to specify the control parameters used to specify the experimental paradigm and the GLM """ # prepare for stats reporting if progress_logger is None: progress_logger = base_reporter.ProgressReport() output_dir = os.path.dirname(stats_report_filename) if not os.path.exists(output_dir): os.makedirs(output_dir) # copy css and js stuff to output dir base_reporter.copy_web_conf_files(output_dir) # initialize gallery of design matrices design_thumbs = base_reporter.ResultsGallery( loader_filename=os.path.join(output_dir, "design.html") ) # initialize gallery of activation maps activation_thumbs = base_reporter.ResultsGallery( loader_filename=os.path.join(output_dir, "activation.html") ) # get caller module handle from stack-frame if user_script_name is None: user_script_name = sys.argv[0] user_source_code = base_reporter.get_module_source_code( user_script_name) methods = """ GLM and Statistical Inference have been done using the <i>%s</i> script, \ powered by <a href="%s">nistats</a>.""" % (user_script_name, base_reporter.NISTATS_URL) # report the control parameters used in the paradigm and analysis design_params = "" glm_kwargs["contrasts"] = contrasts if len(glm_kwargs): design_params += ("The following control parameters were used for " " specifying the experimental paradigm and fitting the " "GLM:<br/><ul>") # reshape glm_kwargs['paradigm'] if "paradigm" in glm_kwargs: paradigm_ = glm_kwargs['paradigm'] paradigm = {'name' : paradigm_['name'], 'onset' : paradigm_['onset']} if 'duration' in paradigm_.keys(): paradigm['duration'] = paradigm_['duration'] paradigm['n_conditions'] = len(set(paradigm['name'])) paradigm['n_events'] = len(paradigm['name']) paradigm['type'] = 'event' if 'duration' in paradigm.keys() and paradigm['duration'][0] > 0: paradigm['type'] = 'block' glm_kwargs['paradigm'] = paradigm design_params += base_reporter.dict_to_html_ul(glm_kwargs) if start_time is None: start_time = base_reporter.pretty_time() if title is None: title = "GLM and Statistical Inference" if not subject_id is None: title += " for subject %s" % subject_id level1_html_markup = base_reporter.get_subject_report_stats_html_template( title=title, start_time=start_time, subject_id=subject_id, # insert source code stub source_script_name=user_script_name, source_code=user_source_code, design_params=design_params, methods=methods, threshold=threshold) with open(stats_report_filename, 'w') as fd: fd.write(str(level1_html_markup)) fd.close() progress_logger.log("<b>Level 1 statistics</b><br/><br/>") # create design matrix thumbs if not design_matrices is None: if not hasattr(design_matrices, '__len__'): design_matrices = [design_matrices] for design_matrix, j in zip(design_matrices, range(len(design_matrices))): # Nistats: design matrices should be strings or pandas dataframes if isinstance(design_matrix, str): if not isinstance(design_matrix, pd.DataFrame): # XXX should be a DataFrame pickle here ? print(design_matrix) design_matrix = pd.read_pickle(design_matrix) else: raise TypeError( "Unsupported design matrix type: %s" % type( design_matrix)) # plot design_matrix proper ax = plot_design_matrix(design_matrix) ax.set_position([.05, .25, .9, .65]) dmat_outfile = os.path.join(output_dir, 'design_matrix_%i.png' % (j + 1)) pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200) pl.close() thumb = base_reporter.Thumbnail() thumb.a = base_reporter.a(href=os.path.basename(dmat_outfile)) thumb.img = base_reporter.img(src=os.path.basename(dmat_outfile), height="500px") thumb.description = "Design Matrix" thumb.description += " %s" % (j + 1) if len( design_matrices) > 1 else "" # commit activation thumbnail into gallery design_thumbs.commit_thumbnails(thumb) # create activation thumbs for contrast_id, contrast_val in contrasts.items(): z_map = z_maps[contrast_id] # load the map if isinstance(z_map, str): z_map = nibabel.load(z_map) # generate level 1 stats table title = "Level 1 stats for %s contrast" % contrast_id stats_table = os.path.join(output_dir, "%s_stats_table.html" % ( contrast_id)) generate_level1_stats_table( z_map, mask, stats_table, cluster_th=cluster_th, z_threshold=threshold, title=title) # plot activation proper # XXX: nilearn's plotting bug's about rotations inf affine, etc. z_map = reorder_img(z_map, resample="continuous") if not anat is None: anat = reorder_img(anat, resample="continuous") plot_stat_map(z_map, anat, threshold=threshold, display_mode=display_mode, cut_coords=cut_coords, black_bg=True) # store activation plot z_map_plot = os.path.join(output_dir, "%s_z_map.png" % contrast_id) pl.savefig(z_map_plot, dpi=200, bbox_inches='tight', facecolor="k", edgecolor="k") pl.close() # create thumbnail for activation thumbnail = base_reporter.Thumbnail( tooltip="Contrast vector: %s" % contrast_val) thumbnail.a = base_reporter.a(href=os.path.basename(stats_table)) thumbnail.img = base_reporter.img(src=os.path.basename(z_map_plot), height="150px",) thumbnail.description = contrast_id activation_thumbs.commit_thumbnails(thumbnail) # we're done, shut down re-loaders progress_logger.log('<hr/>') # prevent stats report page from reloading henceforth progress_logger.finish(stats_report_filename) # prevent any related page from reloading if shutdown_all_reloaders: progress_logger.finish_dir(output_dir) # return generated html with open(stats_report_filename, 'r') as fd: stats_report = fd.read() fd.close() return stats_report
def threshold(self, x_train, y_train, x_valid, y_valid, plot_graph=True): """ Obtain optimal threshold using FBeta as parameter using a range (0.1, 1.0, 200) for evaluation """ if self.sampling is None: class_weight = self.class_weight elif self.sampling == 'ALLKNN': x_train, y_train = under_sampling(x_train, y_train) class_weight = None else: x_train, y_train = over_sampling(x_train, y_train, model=self.sampling) class_weight = None if isinstance(x_train, pd.DataFrame): x_train = x_train.values if isinstance(y_train, (pd.DataFrame, pd.Series)): y_train = y_train.values if isinstance(x_valid, pd.DataFrame): x_valid = x_valid.values if isinstance(y_valid, (pd.DataFrame, pd.Series)): y_valid = y_valid.values min_sample_leaf = round(x_train.shape[0] * 0.01) min_sample_split = min_sample_leaf * 10 max_features = None file_model = ensemble.ExtraTreesClassifier(criterion='gini', bootstrap=self.bootstrap, min_samples_leaf=min_sample_leaf, min_samples_split=min_sample_split, n_estimators=self.n_estimators, max_depth=self.max_depth, max_features=max_features, oob_score=self.oob_score, random_state=531, verbose=1, class_weight=class_weight, n_jobs=1) cv = StratifiedKFold(n_splits=10, random_state=None) file_model.fit(x_train, y_train) thresholds = np.linspace(0.1, 1.0, 200) scores = [] y_pred_score = cross_val_predict(file_model, x_valid, y_valid, cv=cv, method='predict_proba') y_pred_score = np.delete(y_pred_score, 0, axis=1) for threshold in thresholds: y_hat = (y_pred_score > threshold).astype(int) y_hat = y_hat.tolist() y_hat = [item for sublist in y_hat for item in sublist] scores.append([ recall_score(y_pred=y_hat, y_true=y_valid), precision_score(y_pred=y_hat, y_true=y_valid), fbeta_score(y_pred=y_hat, y_true=y_valid, beta=self.beta, average=self.metric_weight)]) scores = np.array(scores) if plot_graph: plot.plot(thresholds, scores[:, 0], label='$Recall$') plot.plot(thresholds, scores[:, 1], label='$Precision$') plot.plot(thresholds, scores[:, 2], label='$F_2$') plot.ylabel('Score') plot.xlabel('Threshold') plot.legend(loc='best') plot.close() self.final_threshold = thresholds[scores[:, 2].argmax()] print(self.final_threshold) return self.final_threshold
def sample(ndim, nwalkers, nsteps, burnin, start, ur, sigma_ur, nuvu, sigma_nuvu, age, id, ra, dec, get_c_one, use_table, thegrid, lu=None, savedir="./"): """ Function to implement the emcee EnsembleSampler function for the sample of galaxies input. Burn in is run and calcualted fir the length specified before the sampler is reset and then run for the length of steps specified. :ndim: The number of parameters in the model that emcee must find. In this case it always 2 with tq, tau. :nwalkers: The number of walkers that step around the parameter space. Must be an even integer number larger than ndim. :nsteps: The number of steps to take in the final run of the MCMC sampler. Integer. :burnin: The number of steps to take in the inital burn-in run of the MCMC sampler. Integer. :start: The positions in the tq and tau parameter space to start for both disc and smooth parameters. An array of shape (1,4). :ur: Observed u-r colour of a galaxy; k-corrected. An array of shape (N,1) or (N,). :sigma_ur: Error on the observed u-r colour of a galaxy. An array of shape (N,1) or (N,). :nuvu: Observed nuv-u colour of a galaxy; k-corrected. An array of shape (N,1) or (N,). :sigma_nuvu: Error on the observed nuv-u colour of a galaxy. An array of shape (N,1) or (N,). :age: Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr. An array of shape (N,1) or (N,). :id: ID number to specify which galaxy this run is for. :ra: right ascension of source, used for identification purposes :dec: declination of source, used for identification purposes RETURNS: :samples: Array of shape (nsteps*nwalkers, 4) containing the positions of the walkers at all steps for all 4 parameters. :samples_save: Location at which the :samples: array was saved to. """ tq, tau, ages = thegrid grid = N.array(list(product(ages, tau, tq))) if use_table: global u global v a = N.searchsorted(ages, age) b = N.array([a - 1, a]) print 'interpolating function, bear with...' g = grid[N.where( N.logical_or(grid[:, 0] == ages[b[0]], grid[:, 0] == ages[b[1]]))] values = lu[N.where( N.logical_or(grid[:, 0] == ages[b[0]], grid[:, 0] == ages[b[1]]))] f = LinearNDInterpolator(g, values, fill_value=(-N.inf)) look = f(age, grid[:10000, 1], grid[:10000, 2]) lunuv = look[:, 0].reshape(100, 100) v = interp2d(tq, tau, lunuv) luur = look[:, 1].reshape(100, 100) u = interp2d(tq, tau, luur) else: pass print 'emcee running...' p0 = [start + 1e-4 * N.random.randn(ndim) for i in range(nwalkers)] sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=2, args=(ur, sigma_ur, nuvu, sigma_nuvu, age, get_c_one)) """ Burn in run here...""" pos, prob, state = sampler.run_mcmc(p0, burnin) lnp = sampler.flatlnprobability N.save( savedir + 'lnprob_burnin_' + str(int(id)) + '_' + str(ra) + '_' + str(dec) + '_' + str(time.strftime('%H_%M_%d_%m_%y')) + '.npy', lnp) samples = sampler.chain[:, :, :].reshape((-1, ndim)) samples_save = savedir + 'samples_burn_in_' + str( int(id)) + '_' + str(ra) + '_' + str(dec) + '_' + str( time.strftime('%H_%M_%d_%m_%y')) + '.npy' N.save(samples_save, samples) sampler.reset() print 'Burn in complete...' """ Main sampler run here...""" sampler.run_mcmc(pos, nsteps) lnpr = sampler.flatlnprobability N.save( savedir + 'lnprob_run_' + str(int(id)) + '_' + str(ra) + '_' + str(dec) + '_' + str(time.strftime('%H_%M_%d_%m_%y')) + '.npy', lnpr) samples = sampler.chain[:, :, :].reshape((-1, ndim)) samples_save = savedir + 'samples_' + str( int(id)) + '_' + str(ra) + '_' + str(dec) + '_' + str( time.strftime('%H_%M_%d_%m_%y')) + '.npy' N.save(samples_save, samples) print 'Main emcee run completed.' P.close('all') P.clf() P.cla() return samples, samples_save
bgr = histogram_equalize_hsv(frame2, size) cv2.imshow("equalizeHist_hsv", bgr) cv2.imwrite("equalizeHist_hsv.jpg", bgr) bgr = histogram_equalize_treat(frame2, size) cv2.imshow("equalize_treat", bgr) cv2.imwrite("equalizeHist_treat.jpg", bgr) fig, ax = plt.subplots(2, 3, figsize=(12, 4)) histOrgY, histLutY = something(frame, frame2, 0) plot_hist(histOrgY, histLutY, 0, 0, "frame", "frame2") histOrgY, histLutY = something(frame, frame2, 1) plot_hist(histOrgY, histLutY, 0, 1, "frame", "frame2") histOrgY, histLutY = something(frame, frame2, 2) plot_hist(histOrgY, histLutY, 0, 2, "frame", "frame2") #plt.pause(1) #plt.close() #fig, ax = plt.subplots(1, 3, figsize=(12, 4)) histOrgY, histLutY = something(frame, bgr, 0) plot_hist(histOrgY, histLutY, 1, 0, "frame", "bgr") histOrgY, histLutY = something(frame, bgr, 1) plot_hist(histOrgY, histLutY, 1, 1, "frame", "bgr") histOrgY, histLutY = something(frame, bgr, 2) plot_hist(histOrgY, histLutY, 1, 2, "frame", "bgr") plt.show() plt.close() k = cv2.waitKey(30) & 0xff if k == 27: break
def lin_fit(crvfile, refcrvfile, outname, dump_frequency, Er, Ed, recoil_relaxation_time=10000, start_timeoffset=500): crv = CRV.CRV(crvfile)[0] eps = crv['lz'] x = crv['step'] pyy = crv['pyy'] n_atoms = crv['atoms'][0] crv1 = CRV.CRV(refcrvfile)[0] eps_ref = crv1['lz'] # eps = np.subtract(eps,eps_ref) print eps nrecoils = [] #pressure tensor component pxx = [] ezz = [] ezz_ref = [] dump_factor = int(recoil_relaxation_time / dump_frequency) pressure_conversion = 1e9 #GPa -> Pa #reduce timeaxis to recoil axis for i in range(len(x) / dump_factor): nrecoils.append((x[i * dump_factor] - start_timeoffset) / float(recoil_relaxation_time)) pxx.append(pyy[i * dump_factor] * pressure_conversion) # ezz.append(((eps[i*dump_factor] - eps[0])/eps[0] - (eps_ref[i*dump_factor] - eps_ref[0])/eps_ref[0])) # ezz.append(abs( (eps[i*dump_factor] -eps_ref[i*dump_factor])/ (eps[0] - eps_ref[0]))) ezz.append(((-eps[dump_factor] + eps[i * dump_factor]))) ezz_ref.append(((-eps_ref[dump_factor] + eps_ref[i * dump_factor]))) del nrecoils[0] del pxx[0] del ezz[0] del ezz_ref[0] #number of displacements per target atom ndpa = np.multiply(nrecoils, (Er / (2.5 * Ed * n_atoms))) fit_start = 0 fit_end = 20 dezz = np.multiply(np.subtract(ezz, ezz_ref), 1. / eps[dump_factor]) #fit1 popt1, pcov1 = opt.curve_fit( lambda ndpa, eta, offset: eta_lin(ndpa, pxx[0], eta, offset), ndpa[fit_start:fit_end], dezz[fit_start:fit_end]) #popt1 = [1,1] #fit2 # popt2, pcov2 = opt.curve_fit( lambda ndpa,eta,offset: eta_lin(ndpa,pxx[0],eta,offset), ndpa[0:fit_start], ezz[0:fit_start]) #popt2 = [1,1] #anotate to add value to plot # print 'RIV (lin)= {:.4e}'.format(popt1[0]) # npa interpolated ndpa_interp = np.linspace(0, ndpa[-1] * 1.5, 1000) fig = plt.figure(1, figsize=fsize) plt.xlim(0, ndpa_interp[-1]) # plt.ylim(ezz[-1]*0.8, ezz[-1]*1.1) plt.grid() plt.plot(ndpa[fit_start:fit_end], dezz[fit_start:fit_end], 'bs', markeredgecolor='blue', markerfacecolor='None', markeredgewidth=mew, markersize=ms, label='MD Simulation') # plt.plot(ndpa, ezz_ref, 'bs', markeredgecolor = 'magenta', markerfacecolor= 'None', markeredgewidth=mew, markersize = ms, label = 'MD Simulation Reference') plt.plot(ndpa_interp, eta_lin(ndpa_interp, pxx[0], *popt1), 'r-', linewidth=lw, label='Fit') # plt.plot(ndpa_interp, eta_lin(ndpa_interp,pxx[0],*popt2), '-', color = 'black', linewidth = lw, label='Fit2') plt.xlabel('Number of displacements per atom') plt.ylabel(r'$ \Delta \varepsilon_{zz} $') # legtitle = r'$ \eta_{ri,1} = $' + '{:.4e}'.format(popt1[0]) + ' $ \mathrm{Pa \cdot dpa} $' + '\n' + r'$ \eta_{ri,2} = $' + '{:.4e}'.format(popt2[0]) + ' $ \mathrm{Pa \cdot dpa} $' + '\n'r'$\sigma_0 = $' + '{:.2e}'.format(abs(pxx[0])) + r'$\,\mathrm{Pa}$' + '\n' + r'$E_D = ' + '{:.1e}'.format(Ed) + 'eV $'+ '\n' + r'$ E_R = $' + '{:.1e}'.format(Er) + ' $ eV $' legtitle = r'$ \eta^\prime = $' + '{:.4e}'.format( popt1[0] ) + ' $ \mathrm{Pa \cdot dpa} $' + '\n' r'$\sigma_0 = $' + '{:.2e}'.format( abs(pxx[0]) ) + r'$\,\mathrm{Pa}$' + '\n' + r'$E_D = ' + '{:.1e}'.format( Ed) + '\mathrm{eV} $' + '\n' + r'$ E_R = $' + '{:.1e}'.format( Er) + ' $ \mathrm{eV} $' plt.legend(loc='best', shadow=False, title=legtitle, prop={'size': legpropsize}, numpoints=1) #every other tick label for label in plt.gca().xaxis.get_ticklabels()[::2]: label.set_visible(False) #plt.show() fig.tight_layout() fig.savefig(outname) print "Png file written to " + outname plt.close("all")
def validate(self, epoch): preds = [] dice_val = [] loss_val = [] if self.val_interval and ((epoch + 1) % self.val_interval == 0) and (epoch >= self.start_validation): if self.use_ADC: channel_idx = -2 else: channel_idx = -1 self.is_val = True for (i, idx) in enumerate(self.val_iter._batch_sampler): batch = nd.array(self.val_set[idx], ctx=self.ctx) if Training.use_ADC and Training.use_multi_branches: x1 = batch[:-1, -2] x2 = batch[:-1, [0, 1, -2]] else: x = batch[:-1, :-1] gt = batch[:-1, -1] tmp = batch[-1, -1, 0].asnumpy() sl_idx = np.reshape(tmp[tmp > -999], (x.shape[0], -1)).astype('int') count_ = np.zeros((sl_idx.max() + 1)) pred = np.zeros( (2, sl_idx.max() + 1, x.shape[-2], x.shape[-1])) gt_wp = np.zeros( (1, sl_idx.max() + 1, x.shape[-2], x.shape[-1])) x_wp = np.zeros( (2, sl_idx.max() + 1, x.shape[-2], x.shape[-1])) tt = np.zeros(sl_idx.max() + 1) for ii in range(x.shape[0]): if Training.use_ADC and Training.use_multi_branches: if np.all((-100 < sl_idx[ii]) & (sl_idx[ii] <= 0)): sl_idx[ii] = np.abs(sl_idx[ii]) pred[:, sl_idx[ii]] += np.flip(self.model.net( x1[ii:ii + 1], x2[ii:ii + 1])[0].asnumpy(), axis=-1) elif np.all((-200 < sl_idx[ii]) & (sl_idx[ii] <= -100)): sl_idx[ii] = np.abs(sl_idx[ii] + 100) pred[:, sl_idx[ii]] += np.flip(self.model.net( x1[ii:ii + 1], x2[ii:ii + 1])[0].asnumpy(), axis=-2) # pred_tmp = self.model.net(x[ii:ii + 1])[0].asnumpy() # pred[:, sl_idx[ii]] += ndimage.rotate(pred_tmp, angle=+5, axes=[-2, -1], reshape=False) elif np.all(sl_idx[ii] >= 0): pred[:, sl_idx[ii]] += self.model.net( x1[ii:ii + 1], x2[ii:ii + 1])[0].asnumpy() gt_wp[:, sl_idx[ii]] = gt[ii].asnumpy() x_wp[:, sl_idx[ii]] = x[ii, channel_idx].asnumpy() else: if np.all((-100 < sl_idx[ii]) & (sl_idx[ii] <= 0)): sl_idx[ii] = np.abs(sl_idx[ii]) pred[:, sl_idx[ii]] += np.flip(self.model.net( x[ii:ii + 1])[0].asnumpy(), axis=-1) elif np.all((-200 < sl_idx[ii]) & (sl_idx[ii] <= -100)): sl_idx[ii] = np.abs(sl_idx[ii] + 100) pred[:, sl_idx[ii]] += np.flip(self.model.net( x[ii:ii + 1])[0].asnumpy(), axis=-2) # pred_tmp = self.model.net(x[ii:ii + 1])[0].asnumpy() # pred[:, sl_idx[ii]] += ndimage.rotate(pred_tmp, angle=+5, axes=[-2, -1], reshape=False) elif np.all(sl_idx[ii] >= 0): pred[:, sl_idx[ii]] += self.model.net( x[ii:ii + 1])[0].asnumpy() gt_wp[:, sl_idx[ii]] = gt[ii].asnumpy() x_wp[:, sl_idx[ii]] = x[ii, channel_idx].asnumpy() tt[sl_idx[ii]] += 1 count_[sl_idx[ii]] += 1 # pred /= count_[np.newaxis, :, np.newaxis, np.newaxis] # x_wp /= count_[np.newaxis, :, np.newaxis, np.newaxis] tt /= count_ pred = post_proc(pred[np.newaxis].argmax(axis=1)[0]) # pred = pred[np.newaxis].argmax(axis=1)[0] gt_wp = nd.array(gt_wp) pred = nd.array(pred[np.newaxis]) dice_val.append(dice_wp(pred, gt_wp).expand_dims(0)) loss_val.append(self.loss(pred, gt_wp).expand_dims(0)) if self.show_val: pred = pred[0].asnumpy() fig = plt.figure(0) for jj in range(pred.shape[0]): plt.subplot(4, np.ceil(pred.shape[0] / 4), jj + 1) plt.imshow(x_wp[-1, jj], cmap='gray', vmin=0, vmax=1) if gt_wp[0, jj].sum() > 0: plt.contour(gt_wp[0, jj].asnumpy(), linewidths=.2) if pred[jj].sum() > 0: plt.contour(pred[jj], colors='r', linewidths=.2) plt.axis('off') plt.savefig('{:s}/{:04d}_{:02d}_{:.2f}.png'.format( self.dir_fig, epoch, i, dice_val[-1][0].asscalar()), dpi=350) plt.close('all') if self.display_img: plt.show() # if self.file_suffix is "_full": # pred_ = nd.zeros(shape=(pred.shape[0], pred.shape[1], 41, pred.shape[-1], pred.shape[-2])) # pred_[:, :, :pred.shape[2]] = pred # preds.append(pred_) # else: # preds.append(pred) # preds = nd.concat(*preds, dim=0).asnumpy() # dice_val = self.get_dice_wp(preds) logging.info(nd.concat(*dice_val)[0] * 100) logging.info((nd.concat(*dice_val)[0] * 100).mean()) return dice_val, loss_val
# classes_txt文件中的每一行 eachline = eachline.strip('\n') f = open(DATA_JSON + rf"\{eachline}.ndjson", 'r') for j in range(0, 10): line = f.readline() setting = json.loads(line) for i in range(0, len(setting['drawing'])): x = setting['drawing'][i][0] y = setting['drawing'][i][1] pl.plot(x, y, 'k') ax = pl.gca() ax.xaxis.set_ticks_position('top') ax.invert_yaxis() pl.axis('off') pl.savefig( rf"{BUTING_PATH}\code\static\dist\img\sp\{eachline}-{j}.png") pl.close() oldimg = cv2.imread( fr"{BUTING_PATH}\code\static\dist\img\sp\{eachline}-{j}.png", cv2.IMREAD_GRAYSCALE) newimg = cv2.resize(oldimg, (200, 200), interpolation=cv2.INTER_CUBIC) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10)) dil = cv2.erode(newimg, kernel) plt.imsave( rf"{BUTING_PATH}\code\static\dist\img\sp\{eachline}-{j}.png", dil, cmap='gray') f.close() Log(f"{eachline} finished!") f_data.close()
P.append(pdf(x)) for itr in tqdm(range(nb_iter)): px = pdf(x) x_ = transition(x) px_ = pdf(x_) r = px_ / px if r > 1 or (np.random.rand() < r): x = x_ if itr > 0.30 * nb_iter: X.append(x) P.append(px_) return X, P if __name__ == "__main__": # Set up the parameters. max_x = 1 max_y = 1 X = [] for k in range(5): X.extend(mcmc(pdf)) X = np.array(X) plt.ion() plt.close("all") plt.plot(X[:, 0], X[:, 1], alpha=0.2) plt.plot(X[:, 0], X[:, 1], ".") plt.xlim([0, 1]) plt.ylim([0, 1])
def PlotOutput(): x = len(LENGTH) y = len(CONTRAST) frameW = 1.5 frameH = 1.5 gapw = 0.05 * np.ones(x + 1) gapw[0] = 0.30 gaph = 0.05 * np.ones(y + 1) #gaph[0] = 0.30 gaph[4] = 0.30 fig = FigArray(frameW, frameH, gapw, gaph) (W, H) = fig.dimensions() plt.figure(1, figsize=(W, H)) objlist, objmaxlist = grab_files() ctr = 0 plt.text(-0.1, 1.09, "L = 1.0 pix", fontsize=14, rotation=0, color='k') plt.text(0.11, 1.09, "L = 1.5 pix", fontsize=14, rotation=0, color='k') plt.text(0.32, 1.09, "L = 2.0 pix", fontsize=14, rotation=0, color='k') plt.text(0.53, 1.09, "L = 2.5 pix", fontsize=14, rotation=0, color='k') plt.text(0.74, 1.09, "L = 3.0 pix", fontsize=14, rotation=0, color='k') plt.text(0.95, 1.09, "L = 4.0 pix", fontsize=14, rotation=0, color='k') plt.text(-0.152, 1, "C = 5 mag", fontsize=14, rotation=90, color='k') plt.text(-0.152, 0.7, "C = 4 mag", fontsize=14, rotation=90, color='k') plt.text(-0.152, 0.4, "C = 3 mag", fontsize=14, rotation=90, color='k') plt.text(-0.152, 0.1, "C = 2 mag", fontsize=14, rotation=90, color='k') #plt.text(0.2,-0.11, "Detector Scale Truth Model", fontsize=16, rotation=0, color='k') plt.axis("off") for i in range(x): for j in range(y): dispim = np.power(objlist[ctr], PWR) dispim = dispim[35:45, 35:45] objmax = np.power(objmaxlist[ctr], PWR) #origin set in top-left corner a = plt.axes(fig.axes(i + 1, j + 1)) #plt.text(0.1,6, "length = %.2f" %(LENGTH[i]), fontsize=5, rotation=0, color='w') #plt.text(0.1,13, "C = %.1f" %(CONTRAST[j]), fontsize=5, rotation=0, color='w') #plt.text(0.1, 20, "i = %d, j = %d"%(i,j), fontsize=5, rotation=0, color='y') #plt.text(0.1, 25, "ctr = %d"%(ctr), fontsize=5, rotation=0, color='y') p = plt.imshow(dispim, vmax=objmax, vmin=0, cmap='gist_heat', interpolation='nearest') a.xaxis.set_major_locator(plt.NullLocator()) a.yaxis.set_major_locator(plt.NullLocator()) plt.gray() # overrides current and sets default ctr += 1 plt.savefig(LOC + object_plot % (), dpi=150) plt.close()
def exp_fit(crvfile, outname, dump_frequency, Er, Ed, Ebi, recoil_relaxation_time=30000, start_timeoffset=500): ## CRV File - renamed to .crv and manually changed header (crv format) from extracted data file ## PNG output ## timestep used in recoil.in during recoil insertion --> datapoint after each recoil crv = CRV.CRV(crvfile)[0] #extract mech. stress tensor in x or y pyy = crv['pyy'] x = crv['step'] n_atoms = crv['atoms'][0] #calc timeaxis out of pxx or pyy size #x = np.arange(0,len(pyy)*timestep,timestep) #number of recoils nrecoils = [] #pressure tensor component pxx = [] dump_factor = int(recoil_relaxation_time / dump_frequency) print dump_factor #reduce timeaxis to recoil axis for i in range(len(x) / dump_factor): nrecoils.append((x[i * dump_factor] - start_timeoffset) / float(recoil_relaxation_time)) pxx.append(abs(pyy[i * dump_factor] / pyy[0])) #number of displacements per target atom ndpa = np.multiply(nrecoils, (Er / (2.5 * Ed * n_atoms))) #fit popt, pcov = opt.curve_fit(lambda ndpa, eta: eta_exp(ndpa, Ebi, eta), ndpa, pxx, p0=2e8) #anotate to add value to plot print 'RIV (exp)= {:.4e}'.format(popt[0]) # npa interpolated ndpa_interp = np.linspace(0, ndpa[-1] * 5, 1000) fig = plt.figure(1, figsize=fsize) plt.xlim(0, ndpa_interp[-1]) #plt.ylim(0.95*pxx[-1] , 1) plt.grid() plt.plot(ndpa, pxx, 'bs', markeredgecolor='blue', markerfacecolor='None', markeredgewidth=mew, markersize=ms, label='MD Simulation') plt.plot(ndpa_interp, eta_exp(ndpa_interp, Ebi, *popt), 'r-', linewidth=lw, label='Fit') plt.xlabel('Number of displacements per atom') plt.ylabel(r'$ \frac{\sigma}{\vert \sigma_0 \vert} $') legtitle = r'$ \eta_{ri} = $' + '{:.4e}'.format( popt[0] ) + ' $ Pa \cdot dpa $' + '\n' + r'$ E_{bi} = $' + '{:.3e}'.format( Ebi) + ' $ Pa $' + '\n' + r'$ E_D = $' + '{:.1e}'.format( Ed) + ' $ \mathrm{eV} $' + '\n' + r'$ E_R = $' + '{:.1e}'.format( Er) + ' $ \mathrm{eV} $' plt.legend(loc='best', shadow=False, title=legtitle, prop={'size': legpropsize}, numpoints=1) #every other tick label for label in plt.gca().xaxis.get_ticklabels()[::2]: label.set_visible(False) #plt.show() fig.savefig(outname) print "Png file written to " + outname plt.close("all") return popt[0]
summaryPlots[id]['N'] = yc.__len__() summaryPlots[id]['Litter'] = -1. summaryPlots[id]['LitterHa'] = -1. summaryPlots[id]['AgbHa'] = summaryPlots[id]['YcHa'] # write out summary ground truth for each plot # write out summary ground truth for each plot outFileName = 'C:\Data\Development\Projects\PhD GeoInformatics\Code\Results\Baviaans2017FieldTrialAnalysis\Summary - Woody & Litter.csv' with open(outFileName, 'wb') as outfile: writer = DictWriter(outfile, summaryPlots.values()[0].keys()) writer.writeheader() writer.writerows(summaryPlots.values()) # vars = [model['vars'] for model in allometricModels.values()] # print np.unique(vars) pylab.close('all') i = 1 ycTtl = 0. pylab.figure() plotSummary = [] for plotKey, plot in plots.iteritems(): yc = np.array([record['yc'] for record in plot]) height = np.float64([record['height'] for record in plot]) ycTtl += yc.sum() plotSummary.append({ 'ID': plotKey.replace('-', '_'), 'Yc': yc.sum(), 'N': yc.__len__() }) kde = gaussian_kde(height) #, bw_method=bandwidth / height.std(ddof=1))
# plots as PDF. def plot_save_data(proc_data, binned_data, file_type = 'pdf'): for sample in proc_data.keys() temp_data = proc_data[sample] temp_bin = binned_data[sample] PL.plot(proc_data[sample][:,1], proc_data[sample][:,3], '.', markersize = 12, \ alpha = .05, label = "{0}".format(sample)) PL.plot(temp_bin[:,0],temp_bin[:,3],'-k', linewidth = 2) PL.legend(loc = "upper left") PL.xlabel("SSC (Volume)") PL.ylabel("GFP Intensity (AU)") PL.ylim(0,1050) PL.xlim(0,1050) savepath = '{0}.{1}'.format(sample,file_type) PL.savefig(savepath) PL.close() # New raw data is processed data def analyze_data(fsc_thr, ssc_thr, bg_strain): print "Analyzing current folder..." print "Gating data..." data, strains = filter_all_data(fsc_thr, ssc_thr) print "Current folder data: {0}".format(strains) print "Binning data..." bin_data = bin_all(data) print "Performing background subtraction..." new_bin, new_raw = bg_sub_wrap(data, bin_data, bg_strain, strains) print "Plotting all data..." plot_save_data(new_raw, new_bin, file_type = "tif")
def visualize(self, dp, visual_params): w1, b1 = self.weights[1] w2, b2 = self.weights[-1] if visual_params['save']: if self.cfg.learning == "disc": num_filters = w1.shape[0] nn.show_images(w1, (4, num_filters / 4)) if self.cfg.arch == "dense" and self.cfg.learning == "auto": # plt.figure(num=None, figsize=(30,90), dpi=80, facecolor='w', edgecolor='k') size = (10, 20) if self.cfg.dataset == "mnist": # print size, w2.shape nn.show_images(w2[:size[0] * size[1]], (size[0], size[1]), unit=1, scale=2) elif self.cfg.dataset in ("cifar10", "svhn-ram"): nn.show_images(w2[:size[0] * size[1]].reshape( size[0] * size[1], 3, 32, 32), (size[0], size[1]), unit=1) elif self.cfg.dataset == "faces": nn.show_images(w2[:size[0] * size[1]].reshape( size[0] * size[1], 1, 32, 32), (size[0], size[1]), unit=1) elif self.cfg.dataset == "faces48": nn.show_images(w2[:size[0] * size[1]].reshape( size[0] * size[1], 1, 48, 48), (size[0], size[1]), unit=1) elif self.cfg.dataset == "frey": nn.show_images(w2[:size[0] * size[1]].reshape( size[0] * size[1], 1, 20, 20), (size[0], size[1]), unit=1) elif self.cfg.dataset == "cifar10-patch": #CIFAR10 dense patches size = int(np.sqrt(self.cfg[0].shape / 3)) nn.show_images(w2[:256].reshape(256, 3, size, size), (16, 16), unit=1, scale=2) else: #CIFAR10 dense patches size = int(np.sqrt(self.cfg[0].shape)) # print size nn.show_images(w2[:200].reshape(200, 1, size, size), (10, 20), unit=1, scale=2) # nn.show_images(w2[:64].reshape(64,1,size,size),(8,8),unit=1,scale=2) if self.cfg.learning == "auto" and self.cfg.arch == "conv": # print w2.as_numpy_array()[:num_filters,:,:,:].shape num_filters = w2.shape[0] # print w2.shape nn.show_images(w2.as_numpy_array()[:num_filters, :, :, :], (4, num_filters / 4), unit=1, scale=2) # plt.subplot(212) # num_filters = w1.shape[0] # nn.show_images(w1.as_numpy_array()[:num_filters,:,:,:],(4,num_filters/4),unit=1) # print w1.shape # nn.show_images(np.swapaxes(w2.as_numpy_array(),0,1)[:num_filters,:,:,:],(4,num_filters/4),unit=1) # plt.show() plt.savefig(self.cfg.directory + self.cfg.name + ".png", format="png") plt.close() else: if not nn.is_interactive(): if self.cfg.learning == "auto" and not (self.cfg.dataset in ( "cifar10-second", "svhn-second", "mnist-second")): plt.figure(num=1, figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k') else: plt.figure(num=1, figsize=(15, 5), dpi=80, facecolor='w', edgecolor='k') # X = dp.X_id(0) # x = nn.data_convertor(X,0,1) w1, b1 = self.weights[1] w2, b2 = self.weights[-1] if self.cfg.arch == "dense" and self.cfg.learning == "disc": #dense if nn.is_interactive(): plt.figure(num=1, figsize=(15, 5), dpi=80, facecolor='w', edgecolor='k') plt.figure(1) plt.subplot(131) self.plot_train() plt.subplot(132) self.plot_test() plt.subplot(133) if self.cfg.dataset == "mnist": if w1.shape[1] > 25: nn.show_images(w1[:, :25].T, (5, 5)) #MNIST dense else: nn.show_images(w1[:, :].T, (5, w1.shape[1] / 5)) #MNIST softmax elif self.cfg.dataset in ("cifar10", "svhn-ram"): # print w1.shape if w1.shape[1] > 25: nn.show_images(w1.T[:25, :].reshape(25, 3, 32, 32), (5, 5)) #CIFAR10 dense else: nn.show_images(w1[:, :].reshape(3, 32, 32, 10), (5, 2)) #CIFAR10 softmax elif self.cfg.dataset in ("svhn-torch"): # print w1.shape if w1.shape[1] > 25: nn.show_images(w1.T[:25, :].reshape(25, 3, 32, 32), (5, 5), yuv=True) #CIFAR10 dense else: nn.show_images(w1[:, :].reshape(3, 32, 32, 10), (5, 2), yuv=True) #CIFAR10 softmax elif self.cfg.dataset == "cifar10-patches": #CIFAR10 dense patches if u == None: nn.show_images(w1[:, :25].reshape(3, 8, 8, 25), (5, 5)) else: nn.show_images(whiten_undo( w1[:, :25].T.as_numpy_array(), u, s).T.reshape(3, 8, 8, 25), (5, 5), unit=True) elif self.cfg.dataset == "mnist-patches": #MNIST dense patches nn.show_images(w1[:, :25].T.as_numpy_array().T.reshape( 1, 8, 8, 16), (4, 4), unit=True) else: channel = self.H[0].shape[1] size = self.H[0].shape[2] if w1.shape[1] > 25: nn.show_images( w1.T[:25, :].reshape(25, channel, size, size), (5, 5)) else: nn.show_images( w1[:, :].reshape(10, channel, size, size), (5, 2)) if self.cfg.arch == "dense" and self.cfg.learning == "auto" and not self.cfg.dataset_extra: #dense if nn.is_interactive(): plt.figure(num=1, figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k') plt.figure(1) plt.subplot2grid((2, 3), (0, 0), colspan=1) self.plot_train() plt.subplot2grid((2, 3), (0, 1), colspan=2) if self.cfg.dataset == "mnist": nn.show_images(w2[:50], (5, 10)) # nn.show_images(w2[:25],(5,5)) elif self.cfg.dataset in ("cifar10", "svhn-ram"): # print w2.shape nn.show_images(w2[:50].reshape(50, 3, 32, 32), (5, 10)) elif self.cfg.dataset == "svhn-torch": #CIFAR10 dense patches nn.show_images(w2[:50].reshape(50, 3, 32, 32), (5, 10), yuv=True) elif self.cfg.dataset == "cifar10-patch": #CIFAR10 dense patches size = int(np.sqrt(self.H[0].shape[1] / 3)) # print size nn.show_images(w2[:50].reshape(50, 3, size, size), (5, 10)) # if u==None: nn.show_images(w1[:,:25].reshape(3,8,8,25),(5,5)) # else: nn.show_images(whiten_undo(w1[:,:25].T.as_numpy_array(),u,s).T.reshape(3,8,8,25),(5,5),unit=True) elif self.cfg.dataset == "mnist-patches": #MNIST dense patches nn.show_images(w1[:, :25].T.as_numpy_array().T.reshape( 1, 8, 8, 16), (4, 4), unit=True) else: #CIFAR10 dense patches size = int(np.sqrt(self.H[0].shape[1])) # print w2[:50].shape,size nn.show_images(w2[:50].reshape(50, 1, size, size), (5, 10)) plt.subplot2grid((2, 3), (1, 0), colspan=1) if self.cfg.dataset in ("natural", "mnist"): nn.show_images(w1[:, :25].T, (5, 5)) # w1,b1 = self.weights[1] # w2,b2 = self.weights[2] # print w1[:5,:5] # print w2[:5,:5].T # print "------" plt.subplot2grid((2, 3), (1, 1), colspan=1) if self.cfg.dataset == "mnist": nn.show_images(self.H[0][0].reshape(1, 1, 28, 28), (1, 1)) elif self.cfg.dataset in ("cifar10", "svhn-ram"): nn.show_images(self.H[0][0].reshape(1, 3, 32, 32), (1, 1)) elif self.cfg.dataset == "svhn-torch": nn.show_images(self.H[0][0].reshape(1, 3, 32, 32), (1, 1), yuv=True) elif self.cfg.dataset == "cifar10-patch": #CIFAR10 dense patches size = int(np.sqrt(self.H[0].shape[1] / 3)) nn.show_images(self.H[0][0].reshape(1, 3, size, size), (1, 1)) else: #CIFAR10 dense patches size = int(np.sqrt(self.H[0].shape[1])) nn.show_images(self.H[0][0].reshape(1, 1, size, size), (1, 1)) plt.subplot2grid((2, 3), (1, 2), colspan=1) if self.cfg.dataset == "mnist": nn.show_images(self.H[-1][0].reshape(1, 1, 28, 28), (1, 1)) elif self.cfg.dataset in ("cifar10", "svhn-ram"): nn.show_images(self.H[-1][0].reshape(1, 3, 32, 32), (1, 1)) elif self.cfg.dataset == "svhn-torch": nn.show_images(self.H[-1][0].reshape(1, 3, 32, 32), (1, 1), yuv=True) elif self.cfg.dataset == "cifar10-patch": #CIFAR10 dense patches size = int(np.sqrt(self.H[0].shape[1] / 3)) nn.show_images(self.H[-1][0].reshape(1, 3, size, size), (1, 1)) else: #CIFAR10 dense patches size = int(np.sqrt(self.H[0].shape[1])) nn.show_images(self.H[-1][0].reshape(1, 1, size, size), (1, 1)) if self.cfg.arch == "conv" and self.cfg.learning == "disc": if nn.is_interactive(): plt.figure(num=1, figsize=(15, 5), dpi=80, facecolor='w', edgecolor='k') plt.figure(1) plt.subplot(131) self.plot_train() plt.subplot(132) self.plot_test() plt.subplot(133) nn.show_images(w1[:16, :, :, :], (4, 4)) if self.cfg.arch == "conv" and self.cfg.learning == "auto": if nn.is_interactive(): plt.figure(num=1, figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k') plt.figure(1) # w2,b2 = self.weights[-1] # x=X[:,:,:,:1] # self.feedforward(x) if self.cfg.dataset in ("cifar10-second", "svhn-second", "mnist-second"): #CIFAR10 plt.subplot(131) # print self.H[0].shape,self.H[-1].shape,self.H[-1].max() nn.show_images(np.swapaxes( self.H[0][:1, :16, :, :].as_numpy_array(), 0, 1), (4, 4), bg="white") plt.subplot(132) nn.show_images(np.swapaxes( self.H[-1][:1, :16, :, :].as_numpy_array(), 0, 1), (4, 4), bg="white") plt.subplot(133) nn.show_images(np.swapaxes( self.H[-2][:1, :16, :, :].as_numpy_array(), 0, 1), (4, 4), bg="white") # print self.H[-1] else: plt.subplot(231) nn.show_images( self.H[0][0, :, :, :].reshape(1, self.H[0].shape[1], self.H[0].shape[2], self.H[0].shape[3]), (1, 1)) plt.subplot(232) nn.show_images( self.H[-1][0, :, :, :].reshape(1, self.H[-1].shape[1], self.H[-1].shape[2], self.H[-1].shape[3]), (1, 1)) plt.subplot(233) self.plot_train() plt.subplot(234) # if self.H[1].shape[1]>=16: # H1 = self.H[1].as_numpy_array() # H1 = H1.reshape(16*100,28*28) # print np.nonzero(H1)[0].shape nn.show_images(np.swapaxes( self.H[-2][:1, :16, :, :].as_numpy_array(), 0, 1), (4, 4), bg="white") # else: # nn.show_images(np.swapaxes(self.H[1][:1,:8,:,:].as_numpy_array(),0,1),(2,4),bg="white") plt.subplot(235) # if w1.shape[0]>16: nn.show_images(w1[:16, :, :, :], (4, 4)) # else: # nn.show_images(w1[:8,:,:,:],(2,4)) plt.subplot(236) # if w2.shape[0]>=16: # print w2.shape # if self.cfg.dataset == "svhn-torch": # nn.show_images(np.swapaxes(w2.as_numpy_array(),0,1)[:16,:,:,:],(4,4),unit=1,yuv=1) if self.cfg[-1].type == "convolution": nn.show_images(np.swapaxes(w2.as_numpy_array(), 0, 1)[:16, :, :, :], (4, 4), unit=1) if self.cfg[-1].type == "deconvolution": nn.show_images(w2[:16, :, :, :], (4, 4), unit=1) # else: # nn.show_images(np.swapaxes(w2.as_numpy_array(),0,1)[:,:8,:,:],(2,4),unit=True) if nn.is_interactive(): plt.show() else: plt.draw() plt.pause(.01) if self.cfg.dataset_extra == "generate": #dense for k in self.cfg.index_dense: if self.cfg[k].l2_activity != None: index = k if nn.is_interactive(): plt.figure(num=1, figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k') plt.figure(1) plt.subplot2grid((2, 3), (0, 0), colspan=1) self.plot_train() plt.subplot2grid((2, 3), (0, 1), colspan=2) if self.cfg.dataset == "mnist": nn.show_images(w2[:50], (5, 10)) plt.subplot2grid((2, 3), (1, 1), colspan=1) # if self.cfg.dataset == "mnist": # print self.H[index].shape x = self.H[index][:, 0].as_numpy_array() y = self.H[index][:, 1].as_numpy_array() plt.plot(x, y, 'bo') # plt.grid() x = self.T_sort[:, 0].as_numpy_array() y = self.T_sort[:, 1].as_numpy_array() plt.plot(x, y, 'ro') # plt.grid() # x = self.test_rand[:,0].as_numpy_array() # y = self.test_rand[:,1].as_numpy_array() # plt.plot(x,y,'go') plt.grid() # nn.show_images(self.H[0][0].reshape(1,1,28,28),(1,1)) plt.subplot2grid((2, 3), (1, 2), colspan=1) if self.cfg.dataset == "mnist": nn.show_images(self.H[-1][0].reshape(1, 1, 28, 28), (1, 1)) if self.cfg.dataset == "mnist" and self.cfg.dataset_extra in ( "vae", "generate"): plt.figure(num=5, figsize=(15, 10), dpi=80, facecolor='w', edgecolor='k') temp = nn.randn((64, 784)) self.test_mode = True self.feedforward(temp) self.test_mode = False nn.show_images(self.H[-1].reshape(64, 1, 28, 28), (8, 8)) if visual_params['save']: plt.savefig(self.cfg.directory + self.cfg.name + "_samples.png", format="png") else: plt.draw() plt.pause(.01)
def plot_splines(self, pred_f_3d, best_parameters_cv_array, verbose_plot=False): if os.path.exists(self.directory_save_splines): shutil.rmtree(self.directory_save_splines) os.makedirs(self.directory_save_splines) directory_save_dfs = 'dataset/output_data/data_frames/' if os.path.exists(directory_save_dfs): shutil.rmtree(directory_save_dfs) os.makedirs(directory_save_dfs) pred_f_3d_avg = np.nanmean(pred_f_3d, axis=2) pred_f_3d_sd = np.nanstd(pred_f_3d, axis=2) pred_f_3d_max = np.nanmax(pred_f_3d, axis=2) pred_f_3d_min = np.nanmin(pred_f_3d, axis=2) spline_smooth = np.round(np.mean(best_parameters_cv_array[:, 1]), 2) for i in range(len(self.stable_feature_indices)): if verbose_plot: print(("Generating figure %i out of %i" % (i + 1, len(self.stable_feature_indices)))) sel_idx = self.stable_feature_indices[i] pl.figure(figsize=(16, 9), dpi=100, facecolor='w', edgecolor='k') ii = np.argsort(self.X_train[:, sel_idx]) y_avg_splined, dummy_variable = model.train_spline( self.X_train[:, sel_idx][ii], pred_f_3d_avg[:, sel_idx][ii], self.X_train[:, sel_idx][ii], smooth_factor=spline_smooth) y_sd_lower, dummy_variable = model.train_spline( self.X_train[:, sel_idx][ii], y_avg_splined - pred_f_3d_sd[:, sel_idx][ii], self.X_train[:, sel_idx][ii], smooth_factor=spline_smooth) y_sd_upper, dummy_variable = model.train_spline( self.X_train[:, sel_idx][ii], y_avg_splined + pred_f_3d_sd[:, sel_idx][ii], self.X_train[:, sel_idx][ii], smooth_factor=spline_smooth) pl.plot(self.X_train[:, sel_idx][ii], y_avg_splined, "r-") pl.plot(self.X_train[:, sel_idx][ii], pred_f_3d_avg[:, sel_idx][ii], "k.") # pl.errorbar(self.X_train[:, sel_idx][ii],pred_f_3d_avg[:, sel_idx][ii], # yerr=pred_f_3d_sd[:, sel_idx][ii],fmt='go') pl.fill_between(self.X_train[:, sel_idx][ii], y_sd_lower, y_sd_upper, alpha=1, edgecolor='gainsboro', facecolor='gainsboro') pl.xlabel("Feature value") pl.ylabel("Prediction f(x) value") pl.title("Predictions for feature " + self.feature_names[sel_idx]) pl.grid() pl.savefig(self.directory_save_splines + 'spam_feature_' + str(sel_idx) + '.png') pl.close() dataframe_data = np.transpose( np.array([ self.X_train[:, sel_idx][ii], pred_f_3d_avg[:, sel_idx][ii], pred_f_3d_sd[:, sel_idx][ii], pred_f_3d_max[:, sel_idx][ii], pred_f_3d_min[:, sel_idx][ii] ])) dataframe_columns = [ 'feat_values_sorted', 'pred_f_3d_avg_sorted', 'pred_f_3d_sd_sorted', 'pred_f_3d_max_sorted', 'pred_f_3d_min_sorted' ] df_save = pd.DataFrame(data=dataframe_data, columns=dataframe_columns) df_save.to_csv(directory_save_dfs + 'spam_feature_' + str(sel_idx) + '.txt', header=True, index=False, sep='\t', float_format='%.5f') return True
def process_summary(summary_filename): if ('fake' in summary_filename) or \ ('H3' in summary_filename) or \ ('H4' in summary_filename) or \ ('H7' in summary_filename) or \ ('H8' in summary_filename): logging.debug("Skipping %s" % summary_filename) return summary = physio.summary.Summary(summary_filename) logging.debug("Processing %s" % summary._filename) # cull trials by success trials = summary.get_trials() if len(trials) == 0: logging.error("No trails for %s" % summary._filename) return trials = trials[trials['outcome'] == 0] # and gaze gaze = clean_gaze(summary.get_gaze()) if len(gaze) > 0: logging.debug("N Trials before gaze culling: %i" % len(trials)) trials = cull_trials_by_gaze(trials, gaze) logging.debug("N Trials after gaze culling: %i" % len(trials)) for ch in xrange(1, 33): for cl in summary.get_cluster_indices(ch): outdir = '%s/%s_%i_%i' % \ (resultsdir, os.path.basename(summary._filename), ch, cl) info_dict = {} logging.debug("ch: %i, cl: %i" % (ch, cl)) # rate spike_times = summary.get_spike_times(ch, cl) # find start of isolation isolation_start = physio.spikes.times.\ find_isolation_start_by_isi(spike_times) spike_times = spike_times[spike_times >= isolation_start] nspikes = len(spike_times) info_dict['nspikes'] = nspikes if nspikes < min_spikes: logging.warning("\t%i < min_spikes[%i]" % \ (nspikes, min_spikes)) continue trange = (spike_times.min(), spike_times.max()) # trange = summary.get_epoch_range() rate = nspikes / (trange[1] - trange[0]) info_dict['rate'] = rate if rate < min_rate: logging.warning("\t%i < min_rate[%i]" % \ (rate, min_rate)) continue # filter trials dtrials = summary.filter_trials(trials, \ {'name': {'value': 'BlueSquare', 'op': '!='}}, \ timeRange=trange) if len(dtrials) == 0: logging.error("Zero trials for %i %i %s" % \ (ch, cl, summary._filename)) continue # snr TODO # location try: location = summary.get_location(ch) except Exception as E: location = (0, 0, 0) print "Attempt to get location failed: %s" % str(E) info_dict['location'] = list(location) # significant bins #bins = summary.get_significant_bins(ch, cl, attr="name", \ # blacklist="BlueSquare", spike_times=spike_times, \ # timeRange=trange) if default_bins is None: bins = summary.get_significant_bins(ch, cl, trials=dtrials, \ spike_times=spike_times) else: bins = default_bins info_dict['bins'] = bins baseline = summary.get_baseline(ch, cl, prew, trials=trials, \ spike_times=spike_times) info_dict['baseline'] = baseline # selectivity #resps, means, stds, ns = summary.get_binned_response( \ # ch, cl, 'name', bins=bins, spike_times=spike_times, \ # blacklist="BlueSquare", timeRange=trange) resps, means, stds, ns = summary.get_binned_response( \ ch, cl, 'name', bins=bins, spike_times=spike_times, \ trials=dtrials, timeRange=trange) if len(resps) == 0: logging.warning("No responses") continue sel_index = physio.spikes.selectivity.selectivity(resps.values()) #if numpy.isnan(sel_index): # raise Exception("Selectivity is nan") sorted_names = sorted(resps, key=lambda k: resps[k]) info_dict['selectivity'] = sel_index info_dict['sorted_names'] = sorted_names if not os.path.exists(outdir): os.makedirs(outdir) with open(outdir + '/info_dict.p', 'w') as f: pickle.dump(info_dict, f, 2) with open(outdir + '/sel_info.p', 'w') as f: pickle.dump({'resps': resps, 'means': means, 'stds': stds, \ 'ns': ns}, f, 2) x = pylab.arange(len(resps)) y = pylab.zeros(len(resps)) err = pylab.zeros(len(resps)) pylab.figure(1) for (i, name) in enumerate(sorted_names): y[i] = resps[name] # TODO fix this to be something reasonable #err[i] = (pylab.sum(stds[name][bins]) / float(len(bins))) / \ # pylab.sqrt(ns[name]) err[i] = 0 pylab.errorbar(x, y, err) xl = pylab.xlim() pylab.xticks(x, sorted_names) pylab.xlim(xl) pylab.ylabel('average binned response') pylab.title('Selectivity: %.2f' % sel_index) pylab.savefig(outdir + '/by_name.png') pylab.close(1) # separability # get stims without bluesquare stims = summary.get_stimuli({'name': \ {'value': 'BlueSquare', 'op': '!='}}) attr_combinations = {} sep_info = {} for (ai, attr1) in enumerate(attrs[:-1]): uniques1 = numpy.unique(stims[attr1]) for attr2 in attrs[ai + 1:]: uniques2 = numpy.unique(stims[attr2]) if attr1 == attr2: continue M = summary.get_response_matrix(ch, cl, attr1, attr2, \ bins=bins, spike_times=spike_times, stims=stims, \ uniques1=uniques1, uniques2=uniques2, \ timeRange=trange, trials=dtrials) if M.shape[0] == 1 or M.shape[1] == 1: logging.warning("M.shape %s, skipping" % \ str(M.shape)) continue sep, spi, ps = physio.spikes.separability.\ separability_permutation(M) if not pylab.any(pylab.isnan(M)): pylab.figure(1) pylab.imshow(M, interpolation='nearest') pylab.colorbar() pylab.xlabel(attr2) xl = pylab.xlim() yl = pylab.ylim() pylab.xticks(range(len(uniques2)), uniques2) pylab.ylabel(attr1) pylab.yticks(range(len(uniques1)), uniques1) pylab.xlim(xl) pylab.ylim(yl) pylab.title('Sep: %s, %.4f, (%.3f, %.3f)' % \ (str(sep), spi, ps[0], ps[1])) pylab.savefig(outdir + '/%s_%s.png' % \ (attr1, attr2)) pylab.close(1) sep_info['_'.join((attr1, attr2))] = { \ 'sep': sep, 'spi': spi, 'ps': ps} with open(outdir + '/sep_info.p', 'w') as f: pickle.dump(sep_info, f, 2) # compute separability at each name name_sep_info = {} for name in sorted_names: stims = summary.get_stimuli({'name': name}) for (ai, attr1) in enumerate(attrs[:-1]): uniques1 = numpy.unique(stims[attr1]) for attr2 in attrs[ai + 1:]: uniques2 = numpy.unique(stims[attr2]) if attr1 == attr2 or \ attr1 == 'name' or attr2 == 'name': continue M = summary.get_response_matrix(ch, cl, attr1, \ attr2, bins=bins, spike_times=spike_times,\ stims=stims, uniques1=uniques1, \ uniques2=uniques2, timeRange=trange, \ trials=dtrials) if M.shape[0] == 1 or M.shape[1] == 1: logging.debug("M.shape incompatible" \ " with separability: %s" % \ str(M.shape)) continue else: sep, spi, ps = physio.spikes.separability.\ separability_permutation(M) if not pylab.any(pylab.isnan(M)): pylab.figure(1) pylab.imshow(M, interpolation='nearest') pylab.colorbar() pylab.xlabel(attr2) xl = pylab.xlim() yl = pylab.ylim() pylab.xticks(range(len(uniques2)), uniques2) pylab.ylabel(attr1) pylab.yticks(range(len(uniques1)), uniques1) pylab.xlim(xl) pylab.ylim(yl) pylab.title('Sep: %s, %.4f, (%.3f, %.3f)' \ % (str(sep), spi, ps[0], ps[1])) pylab.savefig(outdir + '/%s_%s_%s.png' % \ (name, attr1, attr2)) pylab.close(1) name_sep_info['_'.join((name, attr1, attr2))] \ = {'sep': sep, 'spi': spi, 'ps': ps} with open(outdir + '/name_sep_info.p', 'w') as f: pickle.dump(name_sep_info, f, 2)
def plot_station_map(plottitle, plotregion, topo, coastal, border, fault, sta, map_prefix, hypocenter_list=None): """ Genereate the station map plot """ # Read in topo data topo_points = read_topo(topo, plotregion) # Read in fault data fault_x, fault_y = read_fault(fault) # Read in station data sta_x, sta_y = read_stations(sta) # Read coastlines coast_x, coast_y = read_coastal(coastal, plotregion) # Read borders bord_x, bord_y = read_coastal(border, plotregion) # Set plot dims pylab.gcf().set_size_inches(6, 6) pylab.gcf().clf() # Adjust title y-position t = pylab.title(plottitle, size=12) t.set_y(1.06) # Setup color scale cmap = cm.gist_earth norm = mcolors.Normalize(vmin=-1000.0, vmax=3000.0) # Plot basemap pylab.imshow(topo_points, cmap=cmap, norm=norm, extent=plotregion, interpolation='nearest') # Freeze the axis extents pylab.gca().set_autoscale_on(False) # Plot coast lines for i in xrange(0, len(coast_x)): pylab.plot(coast_x[i], coast_y[i], linestyle='-', color='0.5') # Plot borders for i in xrange(0, len(bord_x)): pylab.plot(bord_x[i], bord_y[i], linestyle='-', color='0.75') # Plot fault trace pylab.plot(fault_x, fault_y, linestyle='-', color='k') # Plot stations pylab.plot(sta_x, sta_y, marker='o', color='r', linewidth=0) # Plot hypocenter if provided if hypocenter_list is not None: hypo_lat = [] hypo_lon = [] for hypocenter in hypocenter_list: hypo_lat.append(hypocenter['lat']) hypo_lon.append(hypocenter['lon']) pylab.plot(hypo_lon, hypo_lat, marker='*', markersize=12, color='y', linewidth=0) # Set degree formatting of tick values majorFormatter = FormatStrFormatter(u'%.1f\u00b0') pylab.gca().xaxis.set_major_formatter(majorFormatter) pylab.gca().yaxis.set_major_formatter(majorFormatter) # Turn on ticks for both sides of axis for tick in pylab.gca().xaxis.get_major_ticks(): tick.label1On = True tick.label2On = True for tick in pylab.gca().yaxis.get_major_ticks(): tick.label1On = True tick.label2On = True # Set font size for tick in pylab.gca().get_xticklabels(): tick.set_fontsize(8) for tick in pylab.gca().get_yticklabels(): tick.set_fontsize(8) print("==> Creating Plot: %s.png" % (map_prefix)) pylab.savefig('%s.png' % (map_prefix), format="png", transparent=False, dpi=plot_config.dpi) pylab.close()