def func_plotxy(self, event): def get_axis_data(self, axis=1): axis_int = axis axis = 'y%d' % axis y = self.data[prop[axis].pop('var')] x = self.data[prop['x']['var']] idx = self.get_yidx(axis=axis_int)* \ (x >= prop['x'] ['min']) * \ (x <= prop['x'] ['max']) * \ (y >= prop[axis]['min']) * \ (y <= prop[axis]['max']) x = np.array(x[idx]) y = np.array(y[idx]) return x, y, prop prop = self.get_plot_properties() if not prop: return None x1, y1, prop = get_axis_data(self, axis=1) x_lims = prop['x']['min'], prop['x']['max'] y1_lims = prop['y1'].pop('min'), prop['y1'].pop('max') plt.figure() if prop['x']['var'] == 'wg_datenum': plt.plot_date(x1, y1, **prop['y1']) plt.xticks(rotation=45, ha='right') plt.subplots_adjust(bottom=0.2) else: plt.plot(x1, y1, **prop['y1']) plt.xlim(x_lims) plt.ylim(y1_lims) plt.ylabel(prop['y1']['label'], color=prop['y1']['color'], size=14) plt.yticks(color=prop['y1']['color']) if prop.has_key('y2'): x2, y2, prop = get_axis_data(self, axis=2) x_lims = prop['x'].pop('min'), prop['x'].pop('max') y2_lims = prop['y2'].pop('min'), prop['y2'].pop('max') plt.twinx() if prop['x']['var'] == 'wg_datenum': plt.plot_date(x2, y2, **prop['y2']) else: plt.plot(x2, y2, **prop['y2']) plt.xlim(x_lims) plt.ylim(y2_lims) plt.ylabel(prop['y2']['label'], color=prop['y2']['color'], size=14) plt.yticks(color=prop['y2']['color']) plt.title(self.xy_title.GetValue()) plt.show()
def plot_validation_cost(train_error, val_error, class_rate=None, savefilename=None): epochs = range(len(train_error)) fig, ax1 = plt.subplots() ax1.plot(epochs, train_error, label='train error') ax1.plot(epochs, val_error, label='validation error') ax1.set_xlabel('epoch') ax1.set_ylabel('cost') plt.title('Validation Cost') lines = ax1.get_lines() # Shrink current axis's height by 10% on the bottom box = ax1.get_position() ax1.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) if class_rate is not None: ax2 = plt.twinx(ax1) ax2.plot(epochs, class_rate, label='classification rate', color='r') ax2.set_ylabel('classification rate') lines.extend(ax2.get_lines()) ax2.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) labels = [l.get_label() for l in lines] # Put a legend below current axis ax1.legend(lines, labels, loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=False, shadow=False, ncol=5) # ax1.legend(lines, labels, loc='lower right') if savefilename: plt.savefig(savefilename) plt.show()
def plot_validation_cost(train_error, val_error, class_rate=None, savefilename=None): epochs = range(len(train_error)) fig, ax1 = plt.subplots() ax1.plot(epochs, train_error, label='train error') ax1.plot(epochs, val_error, label='validation error') ax1.set_xlabel('epoch') ax1.set_ylabel('cost') plt.title('Validation Cost') lines = ax1.get_lines() # Shrink current axis's height by 10% on the bottom box = ax1.get_position() ax1.set_position( [box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) if class_rate is not None: ax2 = plt.twinx(ax1) ax2.plot(epochs, class_rate, label='classification rate', color='r') ax2.set_ylabel('classification rate') lines.extend(ax2.get_lines()) ax2.set_position( [box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) labels = [l.get_label() for l in lines] # Put a legend below current axis ax1.legend(lines, labels, loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=False, shadow=False, ncol=5) # ax1.legend(lines, labels, loc='lower right') if savefilename: plt.savefig('{}.png'.format(savefilename)) plt.show()
#tamanho da janela em horas para plotagem tj = 7 * 24 #dia x hora #tj = len(matonda) #mes #tj = len(dt1) #figuras das boias de rg, fl, st pl.subplot(3,3,ff) pl.title('PNBOIA - ' + boia + '\n '+ local[ff-1]) pl.plot(dt1,hs,'b',dt1,hmax,'--r') pl.xticks(visible=False), pl.grid() pl.xlim(dt1[-tj],dt1[-1]), pl.ylim(0,10) if ff == 1: pl.ylabel('Hs, Hmax (m)') ax = pl.twinx() ax.plot(dt0,ws1,'.g',dt0,ws2,'.y',markersize=5,alpha=0.9) pl.xlim(dt1[-tj],dt1[-1]) ax.set_ylim(0,20) if ff == 3: ax.set_ylabel('WS (m/s)') pl.subplot(3,3,ff+3) pl.plot(dt1,tp,'ob') pl.xticks(visible=False), pl.grid() pl.xlim(dt1[-tj],dt1[-1]), pl.ylim(0,20) if ff == 1: pl.ylabel('Tp (s)') pl.subplot(3,3,ff+6) pl.plot(dt1,dp,'ob')
def correlator_online(fileinput='input.txt',dark_file='default',mask_file='default',plot='yes'): global datreg, nq, corf, n, sl, sr, nchannels, nchannels2, index_in_q, lind, dt, norm, srr, sll, nc, oneq, I_avg, I_avg2, lm1, lm2, lm1b, lm2b, ax1,ax1b, ax2, ax2b, ccd_img, ttdata,tcalc_cum, tplot_cum, tread_cum, tdrop_cum, tI_avg,q_2tcf, I_avgs, rch, rcr,totsaxs,input_info,l,y,v, sst, xnq, wtotmask time1=time.time() p.rc('image',origin = 'lower') p.rc('image',interpolation = 'nearest') print 'reading input...' input_info=get_input(fileinput) ##########################processing input file############################### dir= input_info['dir'] dir_dark= input_info['dark dir'] if dir_dark=='none': dir_dark=dir file_prefix=input_info['file_prefix'] ext = input_info['file_suffix'] #####New version has capabilities of reading also .gz files#### if ext == '.edf.gz': dataread=EdfFile.EdfGzipFile else: dataread=EdfFile.EdfFile ################################################## firstfile=int(input_info['n_first_image']) lastfile=int(input_info['n_last_image'])+1 firstdark=input_info['n_first_dark'] if firstdark.lower() != 'none': firstdark=int(input_info['n_first_dark']) lastdark=int(input_info['n_last_dark'])+1 geometry=input_info['geometry'].lower() tolerance=float32(float(input_info['tolerance'])) avgt = input_info['lag time'].lower() dt=find_lagt(avgt,input_info,dataread) print 'lag time =', dt q_2tcf=(input_info['q for TRC']).lower() if q_2tcf!='none': q_2tcf=int(q_2tcf) out_dir=get_dir(input_info['output directory']) out_prefix=get_prefix(input_info['output filename prefix']) out_tot=out_dir+out_prefix print '...done' ##################end processing input file################################### firstname=dir+file_name(file_prefix,ext,firstfile) f=dataread(firstname) ccd_info=f.GetStaticHeader(0) ncol=int(ccd_info['Dim_1']) nrows=int(ccd_info['Dim_2']) static=out_tot+'static.edf' static_data=asfarray(loadedf(static),dtype=float32) if input_info['n_first_dark'].lower()=='none': print 'not using darks' tot_darks=0*static_data else: print 'using darks' if dark_file=='default': dark_file=out_tot+'dark.edf' print 'using dark file:', dark_file tot_darks=asfarray(loadedf(dark_file),dtype=float32) toplot=static_data+.001 #to avoid zeros in plotting logarithm### print '...done' print '...reading mask' if mask_file=='default': mask_file=out_tot+'mask.edf' print 'using mask file:', mask_file totmask=loadedf(mask_file,0)+loadedf(mask_file,1) wtotmask=where(totmask==0) p.ion() fileq=out_tot+'qmask.edf' q=loadedf(fileq) maxval=int(amax(q)+2) detector=input_info['detector'] flatfield_file=input_info['flatfield file'] if detector=='medipix': flat_field=flatfield(detector,flatfield_file) else: flat_field=1.0 print '...done' if geometry=='saxs': print '...correcting static for baseline' xbeam=int(input_info['x direct beam']) ybeam=int(input_info['y direct beam']) static_data=rad_average(static_data,totmask,xbeam,ybeam) qaxis_list=[] npix_per_q=[] oneq=[] index_in_q=[] firstq=float32(float(input_info['first q'])) deltaq=float32(float(input_info['delta q'])) stepq=float32(float(input_info['step q'])) qvalue=firstq+deltaq/2 static_corrected=ones(shape(static_data),dtype=float32) q*=abs(totmask-1) for i in range(2,maxval,2): indices=where(q==i) index_in_q.append(indices)#gives the indices of pixels that are not masked at this q if geometry=='saxs': static_corrected[indices]=mean(static_data[indices])/static_data[indices] npixel=len(static_data[indices]) npix_per_q.append(npixel) oneq.append(ones((1,npixel))) qaxis_list.append(qvalue) qvalue+=deltaq+stepq print '...done' nq=len(npix_per_q) xnq=xrange(nq) tmpdat=loadtxt(out_tot+'1Dstatic.dat') qaxis=tmpdat[:,0] I_q=tmpdat[:,1] del tmpdat ##FINISHED INITIALIZING PART OF THE CODE###### ##START MAIN PART FOR CORRELATION##### nchannels=16. nchannels2=nchannels/2 nfile=lastfile-firstfile datregt=[] datreg=[] rch=int(ceil(log(nfile/nchannels)/log(2))+1) ###2time if q_2tcf!='none': ttdata=zeros((nfile,npix_per_q[q_2tcf-1]),dtype=float32) ###2time for ir in xrange(rch): for iq in xnq: datregt.append(zeros((npix_per_q[iq],nchannels),dtype=float32)) datreg.append(datregt) datregt=[] del datregt rcr=nchannels+nchannels2*ceil(log(nfile/nchannels)/log(2)) corf=zeros((nq,rcr),dtype=float32) lag=zeros((1,rcr),dtype=float32) data_shape=p.shape(toplot) smatr=zeros(data_shape,dtype=float32) matr=zeros(data_shape,dtype=float32) sl=zeros((nq,rcr),dtype=float32) sr=zeros((nq,rcr),dtype=float32) norm=zeros((1,rcr),dtype=float32) sll=[] srr=[] sst=[] for ir in xrange(rch): if ir==0: lag[0,:nchannels]=dt*arange(1,nchannels+1,1) norm[0,:nchannels]=1./arange(nfile-2,nfile-nchannels-2,-1) else: lag[0,nchannels2*(ir+1):nchannels2*(ir+2)]=(dt*2**ir)*arange(1+nchannels2,nchannels+1) norm[0,nchannels2*(ir+1):nchannels2*(ir+2)]=1./arange((nfile-1)/(2**ir)-nchannels2-1,(nfile-1)/(2**ir)-nchannels-1,-1) sll.append(zeros((nq,nchannels),dtype=float32)) srr.append(zeros((nq,nchannels),dtype=float32)) sst.append(arange(nq)*0.0) #END of declaring and initializing variables#### #READING FILES filenames=[] for k in xrange(firstfile,lastfile): filenames.append(file_name(file_prefix,ext,k)) n=0 lind=npix_per_q if plot!='no': ax1=p.axes([0.11, 0.08, 0.75, 0.57]) ax1.set_xlabel('t [sec]') ax1.set_ylabel('g^2(q,t)') ax1b=p.twinx(ax1) ax1b.yaxis.tick_right() ax2=p.axes([0.11, 0.73, 0.75, 0.19]) ax2.xaxis.tick_bottom() ax2.set_xlabel('t [sec]') ax2.set_ylabel('I(q,t) [a.u.]') ax2b=p.gcf().add_axes(ax2.get_position(),frameon=False) ax2b.xaxis.tick_top() ax2b.yaxis.tick_right() ax2b.xaxis.set_label_position('top') ax2b.set_xlabel('Image no.') label1='q= %2.1e 1/Ang' % qaxis_list[0] label2='q= %2.1e 1/Ang' % qaxis_list[nq/2] lm1,=ax1.semilogx((1,),(1,),'ro-',label=label1) lm1b,=ax1b.semilogx((1,),(1,),'bo-',label=label2) ax1.legend(loc='lower left') ax1b.legend(loc=(0.02,0.1)) lm2,=ax2.plot((1,),(1,),'r-') lm2b,=ax2b.plot((1,),(1,),'b-') p.setp(ax1.get_yticklabels(), color='r') p.setp(ax1b.get_yticklabels(), color='b') p.setp(ax2.get_yticklabels(), color='r') p.setp(ax2b.get_yticklabels(), color='b') tplot_cum=0 tread_cum=0 tdrop_cum=0 tcalc_cum=0 I_avg=zeros((1,nfile),float32) I_avg2=zeros((1,nfile),float32) I_avgs=zeros((nq,nfile),float32) tI_avg=zeros((1,nfile),float32) mon=zeros((1,nfile),int) totsaxs=0*static_data detector=input_info['detector'].lower() Mythread=Thread #Mythread=Process checkfile=os.path.exists n=0 firstdata=dir+filenames[n] f=dataread(firstdata) dread(f,n,tot_darks,flat_field,static_corrected) goodsize=os.path.getsize(dir+filenames[n]) nnfile=nfile-1 stop=0 if input_info['normalize'].lower()!= 'none': print "normalizing to ", input_info['normalize'] else: print "not normalizing" while n<nnfile: #nc=n+1. nc=n+1 ccd_imgn=ccd_img file=filenames[n+1] tempfile=dir+file wait=0 t0=time.time() while checkfile(tempfile) is False: p.draw() sys.stdout.write(50*'\x08') sys.stdout.write('waiting for file'+ file+'...') sys.stdout.flush() t1=time.time() wait+=t1-t0 time.sleep(1) t0=t1 if wait>10*dt: print nfile ans=raw_input('\n will this file ever arrive? (y/N)') if ans.lower()=='y': print '\n keep waiting...\n' time.sleep(3*dt) wait=0 else: stop=1 nfile=n+1 break if stop==1: break if ext=='.edf': #cannot do the check for gz files as their size are not to be equal. filesize=os.path.getsize(tempfile) while filesize!=goodsize: sys.stdout.write(50*'\x08') sys.stdout.write('file '+ file+'still not ready...') sys.stdout.flush() time.sleep(2) filesize=os.path.getsize(tempfile) tmf=dataread(tempfile) thrd=Mythread(target=dread,args=([tmf,n+1,tot_darks,flat_field,static_corrected])) thcor=Mythread(target=correlator,args=([0,ccd_imgn])) mon[0,n]= monitor #for plot. TO be faster, I only updated plot each nchannels files. if nc%nchannels==0: pct=float32(n)/float32(nfile)*100 sys.stdout.write(50*'\x08') sys.stdout.write('read '+str(int(pct))+'% of files'+32*' ') sys.stdout.flush() if plot!='no': ttplot(corf,sr,sl,norm,n+1, I_avg[0,:n+1], I_avg2[0,:n+1],lag) #thplot.join() #thplot=Mythread(target=ttplot,args=([corf,sr,sl,norm,n+1, I_avg[0,:n+1], I_avg2[0,:n+1]])) #thplot.start() thrd.start() thcor.start() thrd.join() thcor.join() n+=1 #END OF MAIN LOOP #calculate 2 times correlation function sys.stdout.write(50*'\x08') sys.stdout.flush() print "read 100% of files" #calculate correlation functions if stop==1: print nfile, shape(I_avgs) tI_avg=tI_avg[:,:nfile] mon=mon[:,:nfile] I_avgs=I_avgs[:,:nfile] rch=int(ceil(log(nfile/nchannels)/log(2))+1) for ir in xrange(rch): if ir==0: norm[0,:nchannels]=1./arange(nfile-2,nfile-nchannels-2,-1) else: norm[0,nchannels2*(ir+1):nchannels2*(ir+2)]=1./arange((nfile-1)/(2**ir)-nchannels2-1,(nfile-1)/(2**ir)-nchannels-1,-1) indt=int(nchannels+nchannels2*log(nfile/nchannels)/log(2))-2 cc=zeros((indt,nq+1),float32) q_title='#q values:' trace_title='#file_no. , time, monitor, q values:' for cindex in xnq: q_title=q_title+' '+str(qaxis_list[cindex]) trace_title=trace_title+' '+str(qaxis_list[cindex]) cc[:,cindex+1]=corf[cindex,:indt]/(sl[cindex,:indt]*sr[cindex,:indt])/\ norm[0,:indt] cc[:,0]=lag[0,:indt] q_title=q_title+'\n' trace_title=trace_title+'\n' del indt f=open(out_tot+'cf.dat','w') f.write(q_title) savetxt(f, cc) f.close() del cc f=open(out_tot+'trace.dat','w') f.write(trace_title) traces=zeros((nfile,nq+3),float32) traces[:,0]=tI_avg/dt+firstfile traces[:,1]=tI_avg traces[:,2]=mon traces[:,3:]=transpose(I_avgs[:,:]) savetxt(f,traces) f.close() del traces static=out_tot+'static.edf' totsaxs=totsaxs/nfile-tot_darks totsaxs[totsaxs<=0]=0 saveedf(static,totsaxs) del static del totsaxs del tot_darks print 'correlation functions are saved to ', out_tot+'cf.dat' print 'traces are saved to ', out_tot+'trace.dat' if input_info['normalize'].lower()!= 'none': print "data normalized to ", input_info['normalize'] else: print "data not normalized" if input_info['dropletize'].lower()=='yes': print "data dropletized: !!!!CAUTION this is valid only for andor 13 micron and dark subtracted images (adu per photon = 1930 +/- 100, zero photon = 0 +/- 200)" #if plot!='no': # p.hold(True) # raw_input('Press enter to close window') # p.close() p.hold(True) p.close() if q_2tcf!='none': print "calculating time resolved cf and chi4..." if nfile>6000: #this is for 4 GB RAM PC nfile=6000 n=6000 if q_2tcf!='none': lind2=npix_per_q[q_2tcf-1]/16 l=arange(5)*0 y=[] v=[] for i in range(5): y.append([]) v.append([]) ib=0 for i in xrange(16): sys.stdout.write(50*'\x08') sys.stdout.write('done '+str(int(i/16.*100))+'% of data'+32*' ') sys.stdout.flush() ie=ib+lind2 y[0].append(trc(ttdata[:n-1,ib:ie])) v[0].append(vartrc(y[0][-1])) if l[0]==1: recurf(0) else: l[0]+=1 ib+=lind2 vm=[] for i in range(4,-1,-1): vm.append(mean(v[i],0)) vm=array(vm) del ttdata del v sys.stdout.write(50*'\x08') sys.stdout.flush() file_2times=out_tot+'2times_q_'+str(q_2tcf)+'.edf' ytrc.write(file_2times,y[4][0]) print 'Time resolved CF is saved to '+ out_tot+'2times_q_'+str(q_2tcf)+'.edf' N=array([[1],[2],[4],[8],[16]])/float(npix_per_q[q_2tcf-1]) data=concatenate((N,vm),1).T #print 'number of pixels ',lind[ttcf_par] #print 'q value=', qv[ttcf_par] p0=[0.0,1.0] it=range(len(data[1:,0])) p1=zeros((len(data[1:,0]),len(p0)+1)) p1[:,0]=(asfarray(it)+1.0)*dt xdata=data[0,:] for i in it: ydata=data[i+1,:] p1[i,1:],success=leastsq(errfunc,p0,args=(xdata,ydata)) outfile=out_tot+'fitchi4_q_'+str(q_2tcf)+'.dat' f=open(outfile,'w') f.write("#time chi4 error q value:"+str(qaxis_list[q_2tcf-1])+"\n") savetxt(f,p1) f.close() print 'file is saved to '+outfile print "saving results..." time2=time.time() print 'elapsed time', time2-time1 if plot!='no': print 'elapsed time for plotting', tplot_cum print 'elapsed time for reading', tread_cum-tdrop_cum if input_info['dropletize'].lower()=='yes': print 'elapsed time for dropletizing', tdrop_cum print 'elapsed time for correlating', tcalc_cum print 'calculations have finished:)'
feature_dir = os.path.join(logdir, 'features') os.mkdir(feature_dir) # Visualize parts #plt.clf() plt.figure() ag.plot.images(d.visparts, show=False, zero_to_one=False) plt.savefig(os.path.join(feature_dir, 'visparts.png')) plt.close() # Show scores and counts of parts if 'scores' in d.extra and 'counts' in d.extra: #plt.clf() plt.figure() plt.plot(d.extra['scores'], label='Scores') plt.twinx() plt.plot(d.extra['counts'], label='Counts') plt.legend(fontsize='xx-small', framealpha=0.2) plt.savefig(os.path.join(feature_dir, 'scores-and-counts.png')) plt.close() parts_dir = os.path.join(feature_dir, 'parts') os.mkdir(parts_dir) originals = d.extra.get('originals') for pi in xrange(d.num_features): #plt.clf() f = plt.figure() # Look inside view_bkg_stack.py for code to go here. plt.subplot2grid((7,10), (0, 0), colspan=4, rowspan=4).set_axis_off() plt.imshow(d.visparts[pi], interpolation='nearest', cmap=plt.cm.gray)
import matplotlib.pylab as pl ax1 = pl.subplot(111) t = [1., 2., 3., 4.] aa = [11.4, 12.7, 13.1, 14.56] pl.plot(t, aa, 'b-o', label="aa") pl.xlabel('no') pl.ylabel('aa') #pl.show() ax2 = pl.twinx() bb = [0.9, 2.2, 3.54, 4.0] pl.plot(t, bb, 'r-s', label="bb") pl.ylabel('bb') ax2.yaxis.tick_right() pl.show()
def plot_ice_cover_eb_simple( ice_cover, energy_balance, observed_ice, date, temp, snotot, filename): """ :param ice_cover: :param energy_balance: :param observed_ice: :param date: :param temp: :param snotot: :param filename: :return: Note: http://matplotlib.org/mpl_examples/color/named_colors.png """ fsize = (16, 16) plt.figure(figsize=fsize) #fig = pplt.figure(figsize=fsize) plt.clf() ############## First subplot plt.subplot2grid((5, 1), (0, 0), rowspan=2) # depending on how many days are in the plot, the line weight of the modelled data should be adjusted modelledLineWeight = 1100/len(ice_cover) # dont need to keep the colunm coordinates, but then again, why not..? Usefull for debuging allColumnCoordinates = [] # plot total snow depth on land plb.plot(date, snotot, "gray") plb.title('{0} - {1} days plotted.'.format(filename, len(ice_cover))) # a variable for the lowest point on the ice_cover. It is used for setting the lower left y-limit . lowest_point = 0. # Plot ice_cover for ic in ice_cover: # some idea of progress on the plotting if ic.date.day == 1: print((ic.date).strftime('%Y%m%d')) # make data for plotting. [icelayers.. [fro, too, icetype]]. columncoordinates = [] too = -ic.water_line # water line is on xaxis for i in range(len(ic.column)-1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height columncoordinates.append([fro, too, layer.type]) if fro < lowest_point: lowest_point = fro # add coordinates to a vline plot plb.vlines(ic.date, fro, too, lw=modelledLineWeight, color=layer.get_colour()) #ic.getColour(layer.type)) allColumnCoordinates.append(columncoordinates) # plot observed ice columns for ic in observed_ice: if len(ic.column) == 0: height = 0.05 plb.vlines(ic.date, -height, height, lw=4, color='white') plb.vlines(ic.date, -height, height, lw=2, color='red') else: # some idea of progress on the plotting print("Plotting observations.") # make data for plotting. [ice layers.. [fro, too, icetype]]. too = -ic.water_line # water line is on xaxis for i in range(len(ic.column)-1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height if fro < lowest_point: lowest_point = fro padding = 0. padding_color = 'white' # outline the observations in orange if I have modelled the ice height after observation. if ic.metadata.get('IceHeightAfter') == 'Modeled': padding_color = 'orange' # add coordinates to a vline plot plb.vlines(ic.date, fro-padding, too+padding, lw=6, color=padding_color) plb.vlines(ic.date, fro, too, lw=4, color=layer.get_colour()) # the limits of the left side y-axis is defined relative the lowest point in the ice cover # and the highest point of the observed snow cover. plb.ylim(lowest_point*1.1, max(snotot)*1.05) # Plot temperatures on a separate y axis plb.twinx() temp_pluss = [] temp_minus = [] for i in range(0, len(temp), 1): if temp[i] >= 0: temp_pluss.append(temp[i]) temp_minus.append(np.nan) else: temp_minus.append(temp[i]) temp_pluss.append(np.nan) plb.plot(date, temp, "black") plb.plot(date, temp_pluss, "red") plb.plot(date, temp_minus, "blue") plb.ylim(-4*(max(temp)-min(temp)), max(temp)) ######################################## temp_atm = [] temp_surf = [] atm_minus_surf = [] itterations = [] EB = [] S = [] L = [] H = [] LE = [] T = [] R = [] G = [] s_inn = [] albedo = [] SC = [] R_i = [] stability_correction = [] CC = [] SM = [] if energy_balance[0].date > date[0]: i = 0 while energy_balance[0].date > date[i]: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) T.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) i += 1 for eb in energy_balance: if eb.EB is None: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) T.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) else: temp_atm.append(eb.temp_atm) temp_surf.append(eb.temp_surface) atm_minus_surf.append(eb.temp_atm-eb.temp_surface) itterations.append(eb.iterations) EB.append(eb.EB) S.append(eb.S) L.append(eb.L_a+eb.L_t) H.append(eb.H) LE.append(eb.LE) T.append(eb.H+eb.LE) R.append(eb.R) G.append(eb.G) s_inn.append(eb.s_inn) albedo.append(eb.albedo) SC.append(eb.SC) R_i.append(eb.R_i) stability_correction.append(eb.stability_correction) CC.append(eb.CC) SM.append(eb.SM) ############################# plt.subplot2grid((5, 1), (2, 0), rowspan=3) plb.plot(date, SM, "red", lw=2) plb.plot(date, SC, "blue", lw=2) plb.plot(date, [0.]*len(date), "white", lw=2) #plb.plot(date, H, "blue") #plb.plot(date, LE, "navy") #plb.plot(date, T, "blue") plb.plot(date, R, "black") #plb.plot(date, G, "crimson") #plb.plot(date, L, "green", lw=1) #plb.plot(date, S, "gold", lw=1) #plb.plot(date, s_inn, "gold", lw=1) #plb.plot(date, CC, "pink", lw=1) #plb.plot(date, EB, "black") plb.ylim(-5000, 5000) plb.xlim(date[0], date[-1]) #fig.tight_layout() plb.ylabel("Q [kJ/m2/24hrs]") plb.savefig(filename)
def parallel_coordinates(fig, axes, data_sets, colors=None, lws=None, style=None): """ #Function to plot parallel_coordinates http://stackoverflow.com/questions/8230638/parallel-coordinates-plot-in-matplotlib """ dims = len(data_sets[0]) x = range(dims) if style is None: style = ['r-']*len(data_sets) if colors is None: colors = ['r']*len(data_sets) if lws is None: lws = [1.0]*len(data_sets) # Calculate the limits on the data min_max_range = list() for m in zip(*data_sets): mn = min(m) mx = max(m) if mn == mx: mn -= 0.5 mx = mn + 1. r = float(mx - mn) min_max_range.append((mn, mx, r)) # Normalize the data sets norm_data_sets = list() for ds in data_sets: nds = [(value - min_max_range[dimension][0]) / min_max_range[dimension][2] for dimension,value in enumerate(ds)] #nds = [value for dimension,value in enumerate(ds)] norm_data_sets.append(nds) data_sets = norm_data_sets # Plot the datasets on all the subplots for i, ax in enumerate(axes): for dsi, d in enumerate(data_sets): ax.plot(x, d, style[dsi], c=colors[dsi], lw=lws[dsi]) ax.set_xlim([x[i], x[i+1]]) # Set the x axis ticks for dimension, (axx,xx) in enumerate(zip(axes, x[:-1])): axx.xaxis.set_major_locator(ticker.FixedLocator([xx])) ticks = len(axx.get_yticklabels()) labels = list() step = min_max_range[dimension][2] / (ticks - 1) mn = min_max_range[dimension][0] for i in xrange(ticks): v = mn + i*step labels.append('%.d' % v) axx.set_yticklabels(labels) # Move the final axis' ticks to the right-hand side axx = plt.twinx(axes[-1]) dimension += 1 axx.xaxis.set_major_locator(ticker.FixedLocator([x[-2], x[-1]])) ticks = len(axx.get_yticklabels()) step = min_max_range[dimension][2] / (ticks - 1) mn = min_max_range[dimension][0] labels = ['%4.2f' % (mn + i*step) for i in xrange(ticks)] axx.set_yticklabels(labels) # Stack the subplots plt.subplots_adjust(wspace=0) return plt
def plot_ice_cover_eb(ice_cover, energy_balance, observed_ice, date, temp, snotot, filename, prec=None, wind=None, clouds=None): """ :param ice_cover: :param energy_balance: :param observed_ice: :param date: :param temp: :param snotot: :param filename: :param prec: :param wind: :param clouds: :return: Note: http://matplotlib.org/mpl_examples/color/named_colors.png """ fsize = (16, 16) plt.figure(figsize=fsize) #fig = pplt.figure(figsize=fsize) plt.clf() ############## First subplot plt.subplot2grid((11, 1), (0, 0), rowspan=2) # depending on how many days are in the plot, the line weight of the modelled data should be adjusted modelledLineWeight = 1100 / len(ice_cover) # dont need to keep the colunm coordinates, but then again, why not..? Usefull for debuging allColumnCoordinates = [] # plot total snow depth on land plb.plot(date, snotot, "gray") plb.title('{0} - {1} days plotted.'.format(filename, len(ice_cover))) # a variable for the lowest point on the ice_cover. It is used for setting the lower left y-limit . lowest_point = 0. # Plot ice_cover for ic in ice_cover: # some idea of progress on the plotting if ic.date.day == 1: print((ic.date).strftime('%Y%m%d')) # make data for plotting. [icelayers.. [fro, too, icetype]]. columncoordinates = [] too = -ic.water_line # water line is on xaxis for i in range(len(ic.column) - 1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height columncoordinates.append([fro, too, layer.type]) if fro < lowest_point: lowest_point = fro # add coordinates to a vline plot plb.vlines(ic.date, fro, too, lw=modelledLineWeight, color=layer.get_colour()) #ic.getColour(layer.type)) allColumnCoordinates.append(columncoordinates) # plot observed ice columns for ic in observed_ice: if len(ic.column) == 0: height = 0.05 plb.vlines(ic.date, -height, height, lw=4, color='white') plb.vlines(ic.date, -height, height, lw=2, color='red') else: # some idea of progress on the plotting print("Plotting observations.") # make data for plotting. [ice layers.. [fro, too, icetype]]. too = -ic.water_line # water line is on xaxis for i in range(len(ic.column) - 1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height if fro < lowest_point: lowest_point = fro padding = 0. padding_color = 'white' # outline the observations in orange if I have modelled the ice height after observation. if ic.metadata.get('IceHeightAfter') == 'Modeled': padding_color = 'orange' # add coordinates to a vline plot plb.vlines(ic.date, fro - padding, too + padding, lw=6, color=padding_color) plb.vlines(ic.date, fro, too, lw=4, color=layer.get_colour()) # the limits of the left side y-axis is defined relative the lowest point in the ice cover # and the highest point of the observed snow cover. plb.ylim(lowest_point * 1.1, max(snotot) * 1.05) # Plot temperatures on a separate y axis plb.twinx() temp_pluss = [] temp_minus = [] for i in range(0, len(temp), 1): if temp[i] >= 0: temp_pluss.append(temp[i]) temp_minus.append(np.nan) else: temp_minus.append(temp[i]) temp_pluss.append(np.nan) plb.plot(date, temp, "black") plb.plot(date, temp_pluss, "red") plb.plot(date, temp_minus, "blue") plb.ylim(-4 * (max(temp) - min(temp)), max(temp)) ######################################## temp_atm = [] temp_surf = [] atm_minus_surf = [] itterations = [] EB = [] S = [] L = [] H = [] LE = [] R = [] G = [] s_inn = [] albedo = [] SC = [] R_i = [] stability_correction = [] CC = [] SM = [] if energy_balance[0].date > date[0]: i = 0 while energy_balance[0].date > date[i]: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) i += 1 for eb in energy_balance: if eb.EB is None: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) else: temp_atm.append(eb.temp_atm) temp_surf.append(eb.temp_surface) atm_minus_surf.append(eb.temp_atm - eb.temp_surface) itterations.append(eb.iterations) EB.append(eb.EB) S.append(eb.S) L.append(eb.L_a + eb.L_t) H.append(eb.H) LE.append(eb.LE) R.append(eb.R) G.append(eb.G) s_inn.append(eb.s_inn) albedo.append(eb.albedo) SC.append(eb.SC) R_i.append(eb.R_i) stability_correction.append(eb.stability_correction) CC.append(eb.CC) SM.append(eb.SM) ############### Second sub plot ########################## plt.subplot2grid((11, 1), (2, 0), rowspan=1) plb.bar(date, itterations, label="Iterations for T_sfc", color="gray") plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylabel("#") # l = plb.legend() # l.set_zorder(20) ############## CC, wind and prec ########################## plt.subplot2grid((11, 1), (3, 0), rowspan=1) # plot precipitation prec_mm = [p * 1000. for p in prec] plb.bar(date, prec_mm, width=1, lw=0.5, label="Precipitation", color="deepskyblue", zorder=10) plb.ylabel("RR [mm]") plb.xlim(date[0], date[-1]) plb.ylim(0, max(prec_mm) * 1.1) plb.xticks([]) # plot cloud cover for i in range(0, len(clouds) - 1, 1): if clouds[i] > 0: plb.hlines(0, date[i], date[i + 1], lw=190, color=str(-(clouds[i] - 1.))) elif clouds[i] == np.nan: plb.hlines(0, date[i], date[i + 1], lw=190, color="pink") else: plb.hlines(0, date[i], date[i + 1], lw=190, color=str(-(clouds[i] - 1.))) plb.twinx() plb.plot(date, wind, color="greenyellow", label="Wind 2m", lw=2, zorder=15) plb.ylabel("FFM [m/s]") ############ Temp diff sfc and atm ############################# plt.subplot2grid((11, 1), (4, 0), rowspan=2) plb.plot(date, temp_atm, "black", zorder=5) plb.plot(date, temp, "blue", zorder=10) plb.plot(date, temp_surf, "green") area = np.minimum(temp_atm, temp_surf) plb.fill_between(date, temp_atm, area, color='red') #, alpha='0.5') plb.fill_between(date, temp_surf, area, color='blue') #, alpha='0.5') plb.ylim(-50, 20) plb.ylabel("[C]") # this plots temperature on separate right side axis plb.twinx() temp_pluss = [] temp_minus = [] for i in range(0, len(atm_minus_surf), 1): if atm_minus_surf[i] >= 0: temp_pluss.append(atm_minus_surf[i]) temp_minus.append(np.nan) else: temp_minus.append(atm_minus_surf[i]) temp_pluss.append(np.nan) plb.plot(date, atm_minus_surf, "black", lw=2) plb.plot(date, temp_pluss, "red", lw=2) plb.plot(date, temp_minus, "blue", lw=2) plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylim(-1, 15) plb.ylabel("atm minus surf [C]") ################# Richardson no and stability correction of turbulent fluxes ####################### plt.subplot2grid((11, 1), (6, 0), rowspan=1) plb.plot(date, R_i, color="blue", label="Richardson no.", lw=1, zorder=15) plb.ylabel("R_i (b) []") plb.twinx() stable = [] unstable = [] for i in range(0, len(R_i), 1): if R_i[i] > 0: stable.append(stability_correction[i]) unstable.append(np.nan) elif R_i[i] < 0: unstable.append(stability_correction[i]) stable.append(np.nan) else: unstable.append(np.nan) stable.append(np.nan) plb.plot(date, stability_correction, "black", lw=2) plb.plot(date, stable, "green", lw=2) plb.plot(date, unstable, "red", lw=2) plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylabel("stable(g) unstable(r) []") ############# Energy terms and albedo ################ plt.subplot2grid((11, 1), (7, 0), rowspan=4) # plot surface albedo for i in range(0, len(albedo) - 1, 1): if albedo[i] > 0.: plb.hlines(-11000, date[i], date[i + 1], lw=25, color=str(albedo[i])) elif clouds[i] == np.nan: plb.hlines(-11000, date[i], date[i + 1], lw=25, color="1.0") plb.plot(date, SM, "red", lw=3) plb.plot(date, SC, "blue", lw=3) plb.plot(date, [0.] * len(date), "white", lw=2) plb.plot(date, H, "blue") plb.plot(date, LE, "navy") plb.plot(date, R, "turquoise") plb.plot(date, G, "crimson") plb.plot(date, L, "green", lw=1) plb.plot(date, S, "gold", lw=1) #plb.plot(date, s_inn, "gold", lw=1) plb.plot(date, CC, "pink", lw=1) plb.plot(date, EB, "black") plb.ylim(-12000, 13000) plb.xlim(date[0], date[-1]) #fig.tight_layout() plb.ylabel("Q [kJ/m2/24hrs]") plb.savefig(filename)
def plot_ice_cover_eb_simple(ice_cover, energy_balance, observed_ice, date, temp, snotot, filename): """ :param ice_cover: :param energy_balance: :param observed_ice: :param date: :param temp: :param snotot: :param filename: :return: Note: http://matplotlib.org/mpl_examples/color/named_colors.png """ fsize = (16, 16) plt.figure(figsize=fsize) #fig = pplt.figure(figsize=fsize) plt.clf() ############## First subplot plt.subplot2grid((5, 1), (0, 0), rowspan=2) # depending on how many days are in the plot, the line weight of the modelled data should be adjusted modelledLineWeight = 1100 / len(ice_cover) # dont need to keep the colunm coordinates, but then again, why not..? Usefull for debuging allColumnCoordinates = [] # plot total snow depth on land plb.plot(date, snotot, "gray") plb.title('{0} - {1} days plotted.'.format(filename, len(ice_cover))) # a variable for the lowest point on the ice_cover. It is used for setting the lower left y-limit . lowest_point = 0. # Plot ice_cover for ic in ice_cover: # some idea of progress on the plotting if ic.date.day == 1: print((ic.date).strftime('%Y%m%d')) # make data for plotting. [icelayers.. [fro, too, icetype]]. columncoordinates = [] too = -ic.water_line # water line is on xaxis for i in range(len(ic.column) - 1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height columncoordinates.append([fro, too, layer.type]) if fro < lowest_point: lowest_point = fro # add coordinates to a vline plot plb.vlines(ic.date, fro, too, lw=modelledLineWeight, color=layer.get_colour()) #ic.getColour(layer.type)) allColumnCoordinates.append(columncoordinates) # plot observed ice columns for ic in observed_ice: if len(ic.column) == 0: height = 0.05 plb.vlines(ic.date, -height, height, lw=4, color='white') plb.vlines(ic.date, -height, height, lw=2, color='red') else: # some idea of progress on the plotting print("Plotting observations.") # make data for plotting. [ice layers.. [fro, too, icetype]]. too = -ic.water_line # water line is on xaxis for i in range(len(ic.column) - 1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height if fro < lowest_point: lowest_point = fro padding = 0. padding_color = 'white' # outline the observations in orange if I have modelled the ice height after observation. if ic.metadata.get('IceHeightAfter') == 'Modeled': padding_color = 'orange' # add coordinates to a vline plot plb.vlines(ic.date, fro - padding, too + padding, lw=6, color=padding_color) plb.vlines(ic.date, fro, too, lw=4, color=layer.get_colour()) # the limits of the left side y-axis is defined relative the lowest point in the ice cover # and the highest point of the observed snow cover. plb.ylim(lowest_point * 1.1, max(snotot) * 1.05) # Plot temperatures on a separate y axis plb.twinx() temp_pluss = [] temp_minus = [] for i in range(0, len(temp), 1): if temp[i] >= 0: temp_pluss.append(temp[i]) temp_minus.append(np.nan) else: temp_minus.append(temp[i]) temp_pluss.append(np.nan) plb.plot(date, temp, "black") plb.plot(date, temp_pluss, "red") plb.plot(date, temp_minus, "blue") plb.ylim(-4 * (max(temp) - min(temp)), max(temp)) ######################################## temp_atm = [] temp_surf = [] atm_minus_surf = [] itterations = [] EB = [] S = [] L = [] H = [] LE = [] T = [] R = [] G = [] s_inn = [] albedo = [] SC = [] R_i = [] stability_correction = [] CC = [] SM = [] if energy_balance[0].date > date[0]: i = 0 while energy_balance[0].date > date[i]: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) T.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) i += 1 for eb in energy_balance: if eb.EB is None: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) T.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) else: temp_atm.append(eb.temp_atm) temp_surf.append(eb.temp_surface) atm_minus_surf.append(eb.temp_atm - eb.temp_surface) itterations.append(eb.iterations) EB.append(eb.EB) S.append(eb.S) L.append(eb.L_a + eb.L_t) H.append(eb.H) LE.append(eb.LE) T.append(eb.H + eb.LE) R.append(eb.R) G.append(eb.G) s_inn.append(eb.s_inn) albedo.append(eb.albedo) SC.append(eb.SC) R_i.append(eb.R_i) stability_correction.append(eb.stability_correction) CC.append(eb.CC) SM.append(eb.SM) ############################# plt.subplot2grid((5, 1), (2, 0), rowspan=3) plb.plot(date, SM, "red", lw=2) plb.plot(date, SC, "blue", lw=2) plb.plot(date, [0.] * len(date), "white", lw=2) #plb.plot(date, H, "blue") #plb.plot(date, LE, "navy") #plb.plot(date, T, "blue") plb.plot(date, R, "black") #plb.plot(date, G, "crimson") #plb.plot(date, L, "green", lw=1) #plb.plot(date, S, "gold", lw=1) #plb.plot(date, s_inn, "gold", lw=1) #plb.plot(date, CC, "pink", lw=1) #plb.plot(date, EB, "black") plb.ylim(-5000, 5000) plb.xlim(date[0], date[-1]) #fig.tight_layout() plb.ylabel("Q [kJ/m2/24hrs]") plb.savefig(filename)
def correlator_online_mp(fileinput='input.txt', dark_file='default', mask_file='default', plot='yes'): global nq, n, chn, chn2, rcr, index_in_q, lag, dt, norm, nc, I_avg, I_avg2, lm1, lm2, lm1b, lm2b, ax1, ax1b, ax2, ax2b, nq, detector, ccd_img, flat_field, tot_darks, totmask, ttdata, tcalc_cum, tplot_cum, tread_cum, tI_avg, static_corrected, firstfile, tolerance, I_avgs, xnq, Mythread, l, y, v, input_info, wtotmask, totsaxs, tI_avg, q_2tcf time1 = time.time() p.rc('image', origin='lower') p.rc('image', interpolation='nearest') p.close() print 'multiprocessor' print 'reading input...' input_info = get_input(fileinput) ##processing input file##### dir = input_info['dir'] dir_dark = input_info['dark dir'] if dir_dark == 'none': dir_dark = dir file_prefix = input_info['file_prefix'] ext = input_info['file_suffix'] # New version has capabilities of reading also .gz files if ext == '.edf.gz': dataread = EdfFile.EdfGzipFile else: dataread = EdfFile.EdfFile firstfile = int(input_info['n_first_image']) lastfile = int(input_info['n_last_image']) + 1 firstdark = input_info['n_first_dark'] if firstdark.lower() != 'none': firstdark = int(input_info['n_first_dark']) lastdark = int(input_info['n_last_dark']) + 1 geometry = input_info['geometry'].lower() tolerance = float32(float(input_info['tolerance'])) avgt = input_info['lag time'].lower() if avgt == 'auto': lagt = [] lagt1 = 0 for k in xrange(firstfile + 40, firstfile + 100): filename = file_name(dir + file_prefix, ext, k) while os.path.exists(filename) is False: sys.stdout.write(50 * '\x08') sys.stdout.write('file ' + filename + 'still not ready') sys.stdout.flush() #rint 'file ' ,filename, 'still not ready' time.sleep(10) f = dataread(filename) params = f.GetHeader(0) if input_info['detector'] == 'medipix': lagt2 = float32(float(params['time_of_frame'])) lagt.append(lagt2 - lagt1) lagt1 = lagt2 # if (input_info['detector']=='princeton' or input_info['detector']=='andor'): else: counters = params['counter_mne'].split(' ') lagt_ind = counters.index('ccdtavg') values = params['counter_pos'].split(' ') lagt.append(float32(float(values[lagt_ind]))) del lagt[0] dt = average(array(lagt, dtype=float32)) print 'lag time =', dt else: dt = float32(float(input_info['lag time'])) q_2tcf = (input_info['q for TRC']).lower() if q_2tcf != 'none': q_2tcf = int(q_2tcf) out_dir = get_dir(input_info['output directory']) out_prefix = get_prefix(input_info['output filename prefix']) out_tot = out_dir + out_prefix ##end processing input file##### firstname = dir + file_name(file_prefix, ext, firstfile) f = dataread(firstname) ccd_info = f.GetStaticHeader(0) ncol = int(ccd_info['Dim_1']) nrows = int(ccd_info['Dim_2']) static = out_tot + 'static.edf' static = EdfFile.EdfFile(static) static_data = asfarray(static.GetData(0), dtype=float32) if input_info['n_first_dark'].lower() == 'none': print 'not using darks' tot_darks = 0 * static_data else: print 'using darks' if dark_file == 'default': dark_file = out_tot + 'dark.edf' print 'using dark file:', dark_file dark = EdfFile.EdfFile(dark_file) tot_darks = asfarray(dark.GetData(0), dtype=float32) toplot = static_data + .001 #to avoid zeros in plotting logarithm### print '...done' print '...reading q mask' if mask_file == 'default': mask_file = out_tot + 'mask.edf' print 'using mask file:', mask_file tot = EdfFile.EdfFile(mask_file) totmask = float32(tot.GetData(0) + tot.GetData(1)) wtotmask = where(totmask == 0) p.ion() fileq = out_tot + 'qmask.edf' file = EdfFile.EdfFile(fileq) q = file.GetData(0) maxval = int(amax(q) + 2) detector = input_info['detector'] flatfield_file = input_info['flatfield file'] if detector == 'medipix': flat_field = flatfield(detector, flatfield_file) else: flat_field = 1.0 print '...done' if geometry == 'saxs': print '...correcting static for baseline' xbeam = int(input_info['x direct beam']) ybeam = int(input_info['y direct beam']) static_data = rad_average(static_data, totmask, xbeam, ybeam) qaxis_list = [] npix_per_q = [] oneq = [] index_in_q = [] firstq = float32(float(input_info['first q'])) deltaq = float32(float(input_info['delta q'])) stepq = float32(float(input_info['step q'])) qvalue = firstq + deltaq / 2 static_corrected = ones(shape(static_data), dtype=float32) q *= abs(totmask - 1) total_pixels = 0 for i in range(2, maxval, 2): indices = where(q == i) index_in_q.append( indices ) #gives the indices of pixels that are not masked at this q if geometry == 'saxs': static_corrected[indices] = mean( static_data[indices]) / static_data[indices] npixel = len(static_data[indices]) npix_per_q.append(npixel) oneq.append(ones((1, npixel))) qaxis_list.append(qvalue) qvalue += deltaq + stepq total_pixels += npixel print '...done' nq = len(npix_per_q) xnq = xrange(nq) ncores = 1 ncores = min(ncores, nq) tmp_pix = 0 q_sec = [] if nq == 1: q_sec.append(0) elif ncores >= nq: q_sec = range(1, nq) else: for ii in xnq: if tmp_pix < total_pixels / (ncores): tmp_pix += npix_per_q[ii] if ii == nq - 1: q_sec.append(ii) else: q_sec.append(ii) tmp_pix = 0 + npix_per_q[ii] ncores = len(q_sec) tmpdat = loadtxt(out_tot + '1Dstatic.dat') qaxis = tmpdat[:, 0] I_q = tmpdat[:, 1] del tmpdat ##FINISHED INITIALIZING PART OF THE CODE###### ##START MAIN PART FOR CORRELATION##### chn = 16. chn2 = chn / 2 nfile = lastfile - firstfile rch = int(ceil(log(nfile / chn) / log(2)) + 1) ###2time if q_2tcf != 'none': ttdata = zeros((nfile, npix_per_q[q_2tcf - 1]), dtype=float32) ###2time rcr = chn + chn2 * ceil(log(nfile / chn) / log(2)) lag = zeros((1, rcr), dtype=float32) data_shape = p.shape(toplot) smatr = zeros(data_shape, dtype=float32) matr = zeros(data_shape, dtype=float32) norm = zeros((1, rcr), dtype=float32) for ir in xrange(rch): if ir == 0: lag[0, :chn] = dt * arange(1, chn + 1, 1) norm[0, :chn] = 1. / arange(nfile - 2, nfile - chn - 2, -1) else: lag[0, chn2 * (ir + 1):chn2 * (ir + 2)] = (dt * 2**ir) * arange(1 + chn2, chn + 1) norm[0, chn2 * (ir + 1):chn2 * (ir + 2)] = 1. / arange( (nfile - 1) / (2**ir) - chn2 - 1, (nfile - 1) / (2**ir) - chn - 1, -1) #END of declaring and initializing variables#### #READING FILES filenames = [] for k in xrange(firstfile, lastfile): filenames.append(file_name(file_prefix, ext, k)) n = 0 if plot != 'no': ax1 = p.axes([0.11, 0.08, 0.75, 0.57]) ax1.set_xlabel('t [sec]') ax1.set_ylabel('g^2(q,t)') ax1b = p.twinx(ax1) ax1b.yaxis.tick_right() ax2 = p.axes([0.11, 0.73, 0.75, 0.19]) ax2.xaxis.tick_bottom() ax2.set_xlabel('t [sec]') ax2.set_ylabel('I(q,t) [a.u.]') ax2b = p.gcf().add_axes(ax2.get_position(), frameon=False) ax2b.xaxis.tick_top() ax2b.yaxis.tick_right() ax2b.xaxis.set_label_position('top') ax2b.set_xlabel('Image no.') label1 = 'q= %2.1e 1/Ang' % qaxis_list[0] label2 = 'q= %2.1e 1/Ang' % qaxis_list[nq / 2] lm1, = ax1.semilogx((1, ), (1, ), 'ro-', label=label1) lm1b, = ax1b.semilogx((1, ), (1, ), 'bo-', label=label2) ax1.legend(loc='lower left') ax1b.legend(loc=(0.02, 0.1)) lm2, = ax2.plot((1, ), (1, ), 'r-') lm2b, = ax2b.plot((1, ), (1, ), 'b-') p.setp(ax1.get_yticklabels(), color='r') p.setp(ax1b.get_yticklabels(), color='b') p.setp(ax2.get_yticklabels(), color='r') p.setp(ax2b.get_yticklabels(), color='b') tplot_cum = 0 tread_cum = 0 tcalc_cum = 0 tqueue_cum = 0 I_avg = zeros((1, nfile), float32) I_avg2 = zeros((1, nfile), float32) I_avgs = zeros((nfile, nq), float32) tI_avg = zeros((1, nfile), float32) mon = zeros((1, nfile), int16) detector = input_info['detector'].lower() Mythread = threading.Thread checkfile = os.path.exists n = 0 totsaxs = 0 * static_data goodsize = os.path.getsize(dir + filenames[n]) nnfile = nfile - 1 #if plot!='no': # tmpf=lambda x : True # thplot=Process(target=tmpf,args=([0])) # thplot.start() ######################multiprocessing####################################################### qur = [] qure = [] pcorr = [] for i in xrange(ncores): qur.append(Queue()) qure.append(Queue()) #qur.append(Queue()) quplot = Queue() for i in xrange(ncores): if i == 0: q_beg = 0 else: q_beg = q_sec[i - 1] q_end = q_sec[i] if i == ncores - 1: q_end = nq pcorr.append( Process(target=mp_corr, args=(i, nfile, chn, plot, npix_per_q[q_beg:q_end], index_in_q[q_beg:q_end], qur[i], qure[i], quplot))) for i in xrange(ncores): pcorr[i].start() n = 0 nc = 0 nnfile = nfile - 1 if input_info['normalize'].lower() != 'none': normalize = input_info['normalize'] print "normalizing to ", input_info['normalize'] else: print "not normalizing" while n < nnfile: tread = time.time() nc = n + 1 file = filenames[n] tmf = dir + file wait = 0 t0 = time.time() stop = 0 while checkfile(tmf) is False: p.draw() sys.stdout.write(50 * '\x08') sys.stdout.write('waiting for file' + file + '...') sys.stdout.flush() t1 = time.time() wait += t1 - t0 time.sleep(dt) t0 = t1 if wait > 10 * dt: print nfile ans = raw_input('\n will this file ever arrive? (y/N)') if ans.lower() == 'y': print '\n keep waiting...\n' time.sleep(3 * dt) wait = 0 else: stop = 1 nfile = n + 1 break if stop == 1: break if ext == '.edf': filesize = os.path.getsize(tmf) while filesize != goodsize: sys.stdout.write(50 * '\x08') sys.stdout.write('file ' + file + 'still not ready...') sys.stdout.flush() time.sleep(dt) filesize = os.path.getsize(tmf) f = dataread(tmf) dread(f, n, tot_darks, flat_field, static_corrected) mon[0, n] = monitor #for plot. TO be faster, I only updated plot each chn files. jj = 0 tmp_put = [] tqueue = time.time() for i in xnq: if i < q_sec[jj]: tmp_put.append(ccd_img[index_in_q[i]]) elif i == nq - 1: tmp_put.append(ccd_img[index_in_q[i]]) qur[jj].put(tmp_put) else: qur[jj].put(tmp_put) tmp_put = [] tmp_put.append(ccd_img[index_in_q[i]]) jj += 1 tqueue_cum += time.time() - tqueue if nc % chn == 0: pct = 100.0 * n / nfile sys.stdout.write(50 * '\x08') sys.stdout.write('read ' + str(int(pct)) + '% of files' + 32 * ' ') sys.stdout.flush() if plot != 'no': #thplot.join() xx = quplot.get() ttplot(xx[0], xx[1], xx[2], n + 1, I_avg[0, :n + 1], I_avg2[0, :n + 1]) #thplot=Process(target=ttplot,args=([xx[0],xx[1],xx[2],n+1,I_avg[0,:n+1],I_avg2[0,:n+1]])) #thplot.start() #thplot.join() n += 1 #if plot!='no': #thplot.join() sys.stdout.write(50 * '\x08') sys.stdout.flush() print "read 100% of files" ############################################################################################### from_proc = [] for i in xrange(ncores): from_proc.append(qure[i].get()) pcorr[i].join() qure[i].close ############################################################################################# #END OF MAIN LOOP #calculate 2 times correlation function print "saving results..." if stop == 1: tI_avg = tI_avg[:, :nfile] mon = mon[:, :nfile] I_avgs = I_avgs[:nfile, :] rch = int(ceil(log(nfile / nchannels) / log(2)) + 1) for ir in xrange(rch): if ir == 0: norm[0, :nchannels] = 1. / arange(nfile - 2, nfile - nchannels - 2, -1) else: norm[0, nchannels2 * (ir + 1):nchannels2 * (ir + 2)] = 1. / arange( (nfile - 1) / (2**ir) - nchannels2 - 1, (nfile - 1) / (2**ir) - nchannels - 1, -1) #calculate correlation functions corf = from_proc[0][0] sl = from_proc[0][1] sr = from_proc[0][2] tcalc_cum = from_proc[0][3] for i in xrange(1, ncores): corf = concatenate((corf, from_proc[i][0]), axis=0) sl = concatenate((sl, from_proc[i][1]), axis=0) sr = concatenate((sr, from_proc[i][2]), axis=0) tcalc_cum = max(tcalc_cum, from_proc[i][3]) indt = int(chn + chn2 * log(nfile / chn) / log(2)) - 2 cc = zeros((indt, nq + 1), float32) q_title = '#q values:' trace_title = '#file_no. , time, monitor, q values:' for cindex in xnq: q_title = q_title + ' ' + str(qaxis_list[cindex]) trace_title = trace_title + ' ' + str(qaxis_list[cindex]) cc[:,cindex+1]=corf[cindex,:indt]/(sl[cindex,:indt]*sr[cindex,:indt])/\ norm[0,:indt] cc[:, 0] = lag[0, :indt] q_title = q_title + '\n' trace_title = trace_title + '\n' del indt f = open(out_tot + 'cf.dat', 'w') f.write(q_title) savetxt(f, cc) f.close() del cc f = open(out_tot + 'trace.dat', 'w') f.write(trace_title) traces = zeros((nfile, nq + 3), float32) traces[:, 0] = tI_avg / dt + firstfile traces[:, 1] = tI_avg traces[:, 2] = mon traces[:, 3:] = I_avgs savetxt(f, traces) f.close() del traces static = out_tot + 'static.edf' static = EdfFile.EdfFile(static) totsaxs = totsaxs / n - tot_darks totsaxs[totsaxs <= 0] = 0 static.WriteImage({}, totsaxs, 0) del static print 'correlation functions are saved to ', out_tot + 'cf.dat' print 'traces are saved to ', out_tot + 'trace.dat' if plot != 'no': p.hold(True) p.close() if q_2tcf != 'none': print "calculating time resolved cf and chi4..." if nfile > 6000: #this is for 4 GB RAM PC nfile = 6000 n = 6000 lind2 = npix_per_q[q_2tcf - 1] / 16 l = arange(5) * 0 y = [] v = [] for i in range(5): y.append([]) v.append([]) ib = 0 for i in xrange(16): sys.stdout.write(50 * '\x08') sys.stdout.write('done ' + str(int(i / 16. * 100)) + '% of data' + 32 * ' ') sys.stdout.flush() ie = ib + lind2 y[0].append(trc(ttdata[:n - 1, ib:ie])) v[0].append(vartrc(y[0][-1])) if l[0] == 1: recurf(0) else: l[0] += 1 ib += lind2 vm = [] for i in range(4, -1, -1): vm.append(mean(v[i], 0)) vm = array(vm) del ttdata del v sys.stdout.write(50 * '\x08') sys.stdout.flush() file_2times = out_tot + '2times_q_' + str(q_2tcf) + '.edf' ytrc.write(file_2times, y[4][0]) print 'Time resolved CF is saved to ' + out_tot + '2times_q_' + str( q_2tcf) + '.edf' N = array([[1], [2], [4], [8], [16]]) / float(npix_per_q[q_2tcf - 1]) data = concatenate((N, vm), 1).T #print 'number of pixels ',lind[ttcf_par] #print 'q value=', qv[ttcf_par] p0 = [0.0, 1.0] it = range(len(data[1:, 0])) p1 = zeros((len(data[1:, 0]), len(p0) + 1)) p1[:, 0] = (asfarray(it) + 1.0) * dt xdata = data[0, :] for i in it: ydata = data[i + 1, :] p1[i, 1:], success = leastsq(errfunc, p0, args=(xdata, ydata)) outfile = out_tot + 'fitchi4_q_' + str(q_2tcf) + '.dat' f = open(outfile, 'w') f.write("#time chi4 error q value:" + str(qaxis_list[q_2tcf - 1]) + "\n") savetxt(f, p1) f.close() print 'file is saved to ' + outfile print "saving results..." time2 = time.time() print 'elapsed time', time2 - time1 print 'elapsed time for plotting', tplot_cum print 'elapsed time for reading', tread_cum print 'elapsed time for correlating', tcalc_cum print 'elapsed time for queueing', tqueue_cum print 'used ncores=', ncores
def meanCurvesT(data, confidence=0.95, plot=True, numPoints=200, epslim=None, stressLevel=0.05, picPrefix=None): expKeys = list(data.keys()) tmax = data[expKeys[0]]['время(мкс)'][-1] tmin = data[expKeys[0]]['время(мкс)'][0] stress0 = stressLevel * max(data[expKeys[0]]['напряжение(МПа)']) for j, s in enumerate(data[expKeys[0]]['напряжение(МПа)']): if s >= stress0: t0 = data[expKeys[0]]['время(мкс)'][j] break for exp in data: for j, s in enumerate(data[exp]['напряжение(МПа)']): if s >= stress0: tt = data[exp]['время(мкс)'][j] break data[exp]['время(мкс)'] = list( map(lambda x: x - tt + t0, data[exp]['время(мкс)'])) tmin = max(tmin, data[exp]['время(мкс)'][0]) tmax = min(tmax, data[exp]['время(мкс)'][-1]) tt = np.linspace(tmin, tmax, numPoints) ee = [] ss = [] dee = [] for d in data: ee.append(np.interp(tt, data[d]['время(мкс)'], data[d]['деформация'])) ss.append( np.interp(tt, data[d]['время(мкс)'], data[d]['напряжение(МПа)'])) dee.append( np.interp(tt, data[d]['время(мкс)'], data[d]['скорость деформации(1/c)'])) e, he = mean_confidence_interval(ee, confidence) s, hs = mean_confidence_interval(ss, confidence) de, hde = mean_confidence_interval(dee, confidence) if plot: c = [] for i, d in enumerate(data): l, = plt.plot(data[d]['деформация'], data[d]['напряжение(МПа)'], label=d) c.append(l.get_color()) plt.grid() plt.legend(bbox_to_anchor=(1.5, 1)) plt.xlabel('деформация') plt.ylabel('напряжение, МПа') plt.twinx() plt.ylabel('скорость деформации, 1/c') for i, d in enumerate(data): l, = plt.plot(data[d]['деформация'], data[d]['скорость деформации(1/c)'], '--', color=c[i], label=d) if epslim: plt.xlim(0, epslim) if picPrefix: plt.gcf().savefig(picPrefix + '-all.png') plt.figure() for d in data: plt.plot(data[d]['деформация'], data[d]['напряжение(МПа)']) plt.errorbar(e, s, yerr=hs, xerr=he, color='k', errorevery=3) plt.grid() plt.xlabel('деформация') plt.ylabel('напряжение, МПа') if picPrefix: plt.gcf().savefig(picPrefix + '-es.png') plt.figure() for d in data: plt.plot(data[d]['время(мкс)'], data[d]['деформация']) plt.grid() plt.errorbar(tt, e, yerr=he, color='k', errorevery=3) plt.xlabel('время, мкс') plt.ylabel('деформация') if picPrefix: plt.gcf().savefig(picPrefix + '-te.png') plt.figure() for d in data: plt.plot(data[d]['время(мкс)'], data[d]['напряжение(МПа)']) plt.grid() plt.errorbar(tt, s, yerr=hs, color='k', errorevery=3) plt.xlabel('время, мкс') plt.ylabel('напряжение, МПа') if picPrefix: plt.gcf().savefig(picPrefix + '-ts.png') plt.figure() for d in data: plt.plot(data[d]['время(мкс)'], data[d]['скорость деформации(1/c)']) plt.grid() plt.errorbar(tt, de, yerr=hde, color='k', errorevery=3) plt.xlabel('время, мкс') plt.ylabel('скорость деформации, 1/c') if picPrefix: plt.gcf().savefig(picPrefix + '-tde.png') plt.show() return { 'et': list(e), 'st': list(s), 'det': list(de), 'he': list(he), 'hs': list(hs), 'hde': list(hde), 't': list(tt) }
def plot_ice_cover_eb( ice_cover, energy_balance, observed_ice, date, temp, snotot, filename, prec=None, wind=None, clouds=None): """ :param ice_cover: :param energy_balance: :param observed_ice: :param date: :param temp: :param snotot: :param filename: :param prec: :param wind: :param clouds: :return: Note: http://matplotlib.org/mpl_examples/color/named_colors.png """ fsize = (16, 16) plt.figure(figsize=fsize) #fig = pplt.figure(figsize=fsize) plt.clf() ############## First subplot plt.subplot2grid((11, 1), (0, 0), rowspan=2) # depending on how many days are in the plot, the line weight of the modelled data should be adjusted modelledLineWeight = 1100/len(ice_cover) # dont need to keep the colunm coordinates, but then again, why not..? Usefull for debuging allColumnCoordinates = [] # plot total snow depth on land plb.plot(date, snotot, "gray") plb.title('{0} - {1} days plotted.'.format(filename, len(ice_cover))) # a variable for the lowest point on the ice_cover. It is used for setting the lower left y-limit . lowest_point = 0. # Plot ice_cover for ic in ice_cover: # some idea of progress on the plotting if ic.date.day == 1: print((ic.date).strftime('%Y%m%d')) # make data for plotting. [icelayers.. [fro, too, icetype]]. columncoordinates = [] too = -ic.water_line # water line is on xaxis for i in range(len(ic.column)-1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height columncoordinates.append([fro, too, layer.type]) if fro < lowest_point: lowest_point = fro # add coordinates to a vline plot plb.vlines(ic.date, fro, too, lw=modelledLineWeight, color=layer.get_colour()) #ic.getColour(layer.type)) allColumnCoordinates.append(columncoordinates) # plot observed ice columns for ic in observed_ice: if len(ic.column) == 0: height = 0.05 plb.vlines(ic.date, -height, height, lw=4, color='white') plb.vlines(ic.date, -height, height, lw=2, color='red') else: # some idea of progress on the plotting print("Plotting observations.") # make data for plotting. [ice layers.. [fro, too, icetype]]. too = -ic.water_line # water line is on xaxis for i in range(len(ic.column)-1, -1, -1): layer = ic.column[i] fro = too too = too + layer.height if fro < lowest_point: lowest_point = fro padding = 0. padding_color = 'white' # outline the observations in orange if I have modelled the ice height after observation. if ic.metadata.get('IceHeightAfter') == 'Modeled': padding_color = 'orange' # add coordinates to a vline plot plb.vlines(ic.date, fro-padding, too+padding, lw=6, color=padding_color) plb.vlines(ic.date, fro, too, lw=4, color=layer.get_colour()) # the limits of the left side y-axis is defined relative the lowest point in the ice cover # and the highest point of the observed snow cover. plb.ylim(lowest_point*1.1, max(snotot)*1.05) # Plot temperatures on a separate y axis plb.twinx() temp_pluss = [] temp_minus = [] for i in range(0, len(temp), 1): if temp[i] >= 0: temp_pluss.append(temp[i]) temp_minus.append(np.nan) else: temp_minus.append(temp[i]) temp_pluss.append(np.nan) plb.plot(date, temp, "black") plb.plot(date, temp_pluss, "red") plb.plot(date, temp_minus, "blue") plb.ylim(-4*(max(temp)-min(temp)), max(temp)) ######################################## temp_atm = [] temp_surf = [] atm_minus_surf = [] itterations = [] EB = [] S = [] L = [] H = [] LE = [] R = [] G = [] s_inn = [] albedo = [] SC = [] R_i = [] stability_correction = [] CC = [] SM = [] if energy_balance[0].date > date[0]: i = 0 while energy_balance[0].date > date[i]: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) i += 1 for eb in energy_balance: if eb.EB is None: temp_atm.append(np.nan) temp_surf.append(np.nan) atm_minus_surf.append(np.nan) itterations.append(np.nan) EB.append(np.nan) S.append(np.nan) L.append(np.nan) H.append(np.nan) LE.append(np.nan) R.append(np.nan) G.append(np.nan) s_inn.append(np.nan) albedo.append(np.nan) SC.append(np.nan) R_i.append(np.nan) stability_correction.append(np.nan) CC.append(np.nan) SM.append(np.nan) else: temp_atm.append(eb.temp_atm) temp_surf.append(eb.temp_surface) atm_minus_surf.append(eb.temp_atm-eb.temp_surface) itterations.append(eb.iterations) EB.append(eb.EB) S.append(eb.S) L.append(eb.L_a+eb.L_t) H.append(eb.H) LE.append(eb.LE) R.append(eb.R) G.append(eb.G) s_inn.append(eb.s_inn) albedo.append(eb.albedo) SC.append(eb.SC) R_i.append(eb.R_i) stability_correction.append(eb.stability_correction) CC.append(eb.CC) SM.append(eb.SM) ############### Second sub plot ########################## plt.subplot2grid((11, 1), (2, 0), rowspan=1) plb.bar(date, itterations, label="Iterations for T_sfc", color="gray") plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylabel("#") # l = plb.legend() # l.set_zorder(20) ############## CC, wind and prec ########################## plt.subplot2grid((11, 1), (3, 0), rowspan=1) # plot precipitation prec_mm = [p*1000. for p in prec] plb.bar(date, prec_mm, width=1, lw=0.5, label="Precipitation", color="deepskyblue", zorder=10) plb.ylabel("RR [mm]") plb.xlim(date[0], date[-1]) plb.ylim(0, max(prec_mm)*1.1) plb.xticks([]) # plot cloud cover for i in range(0, len(clouds) - 1, 1): if clouds[i] > 0: plb.hlines(0, date[i], date[i + 1], lw=190, color=str(-(clouds[i] - 1.))) elif clouds[i] == np.nan: plb.hlines(0, date[i], date[i + 1], lw=190, color="pink") else: plb.hlines(0, date[i], date[i + 1], lw=190, color=str(-(clouds[i] - 1.))) plb.twinx() plb.plot(date, wind, color="greenyellow", label="Wind 2m", lw=2, zorder=15) plb.ylabel("FFM [m/s]") ############ Temp diff sfc and atm ############################# plt.subplot2grid((11, 1), (4, 0), rowspan=2) plb.plot(date, temp_atm, "black", zorder=5) plb.plot(date, temp, "blue", zorder=10) plb.plot(date, temp_surf, "green") area = np.minimum(temp_atm, temp_surf) plb.fill_between(date, temp_atm, area, color='red') #, alpha='0.5') plb.fill_between(date, temp_surf, area, color='blue') #, alpha='0.5') plb.ylim(-50, 20) plb.ylabel("[C]") # this plots temperature on separate right side axis plb.twinx() temp_pluss = [] temp_minus = [] for i in range(0, len(atm_minus_surf), 1): if atm_minus_surf[i] >= 0: temp_pluss.append(atm_minus_surf[i]) temp_minus.append(np.nan) else: temp_minus.append(atm_minus_surf[i]) temp_pluss.append(np.nan) plb.plot(date, atm_minus_surf, "black", lw=2) plb.plot(date, temp_pluss, "red", lw=2) plb.plot(date, temp_minus, "blue", lw=2) plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylim(-1, 15) plb.ylabel("atm minus surf [C]") ################# Richardson no and stability correction of turbulent fluxes ####################### plt.subplot2grid((11, 1), (6, 0), rowspan=1) plb.plot(date, R_i, color="blue", label="Richardson no.", lw=1, zorder=15) plb.ylabel("R_i (b) []") plb.twinx() stable = [] unstable = [] for i in range(0, len(R_i), 1): if R_i[i] > 0: stable.append(stability_correction[i]) unstable.append(np.nan) elif R_i[i] < 0: unstable.append(stability_correction[i]) stable.append(np.nan) else: unstable.append(np.nan) stable.append(np.nan) plb.plot(date, stability_correction, "black", lw=2) plb.plot(date, stable, "green", lw=2) plb.plot(date, unstable, "red", lw=2) plb.xlim(date[0], date[-1]) plb.xticks([]) plb.ylabel("stable(g) unstable(r) []") ############# Energy terms and albedo ################ plt.subplot2grid((11, 1), (7, 0), rowspan=4) # plot surface albedo for i in range(0, len(albedo) - 1, 1): if albedo[i] > 0.: plb.hlines(-11000, date[i], date[i + 1], lw=25, color=str(albedo[i])) elif clouds[i] == np.nan: plb.hlines(-11000, date[i], date[i + 1], lw=25, color="1.0") plb.plot(date, SM, "red", lw=3) plb.plot(date, SC, "blue", lw=3) plb.plot(date, [0.]*len(date), "white", lw=2) plb.plot(date, H, "blue") plb.plot(date, LE, "navy") plb.plot(date, R, "turquoise") plb.plot(date, G, "crimson") plb.plot(date, L, "green", lw=1) plb.plot(date, S, "gold", lw=1) #plb.plot(date, s_inn, "gold", lw=1) plb.plot(date, CC, "pink", lw=1) plb.plot(date, EB, "black") plb.ylim(-12000, 13000) plb.xlim(date[0], date[-1]) #fig.tight_layout() plb.ylabel("Q [kJ/m2/24hrs]") plb.savefig(filename)
alpha=0.5, label='neg', bins=bins, normed=True) plt.hist(dct['pos_llhs'], alpha=0.5, label='pos', bins=bins, normed=True) neg_N = len(dct['neg_llhs']) pos_N = len(dct['pos_llhs']) if not adjusted: print 'Plotting', i plt.twinx() info = sinfo[i] #x0 = np.asarray([info['start'] + info['step'] * k for k in xrange(len(info['points']))]) y = info['points'] x0 = np.arange(y.size) * info['step'] + info['start'] plt.plot(x0, y, linewidth=2.0, color='red') print x0[0], x0[-1], y[0], y[-1] #plt.plot(x0, y2, linewidth=1.0, color='blue') plt.xlim((mn, mx)) #plt.ylim((-30, 30)) if i == L - 1: if adjusted: label = 'score' else: label = 'LLH (without const.)'
def plot_fit_summary(model, i, fit): import scipy.stats from matplotlib import pylab sel_sum = model.model_selection_summary(fit) sel_levels = { k : p["selection_level"] if p["selection_level"] else 0 for k, p in list(model.population_data.items())} sel_fracs = { k : p["P_sel"][i] / p["P_sel"].sum() for k, p in list(model.population_data.items())} pylab.xticks( list(sel_levels.values()), list(sel_levels.keys())) pylab.xlim((-1, 7)) # porder = [ # k for k, p in # sorted(model.population_data.items(), key=lambda (k, p): p["selection_level"])] pylab.plot( [sel_levels[k] for k in porder], [sel_fracs[k] for k in porder], "-o", color="black", label="observed") lbl = False for k in sel_sum: n = sel_sum[k]["P_sel"].sum() p = sel_sum[k]["pop_fraction"][i] sel_level = model.population_data[k]["selection_level"] counts=sel_sum[k]["P_sel"][i] plt.text(sel_levels[k] + 0.2, sel_fracs[k], '%.0f' % counts) if p<=0: continue bn = scipy.stats.binom(n=n, p=p) parkey = model.population_data[k]["parent"] pylab.plot( [sel_levels[parkey], sel_levels[k]], [sel_fracs[parkey], float(bn.ppf(.5)) / n], "--", color="red", alpha=.25 ) for ci in (.68, .95, .99): pylab.plot( [sel_level] * 2, bn.ppf([ci, 1-ci]) / n, linewidth=10, color="red", alpha=.25, label="predicted" if not lbl else None ) lbl=True pylab.legend(fontsize="large", loc="best") pylab.twinx() xs = numpy.linspace(-2, 8) sel_ec50 = fit["sel_ec50"][i] sel_k = fit["sel_k"][i] if len(fit["sel_k"]) > 1 else fit["sel_k"] pylab.plot(xs, scipy.special.expit(-sel_k * (xs - sel_ec50)), alpha=.75) pylab.yticks([], []) pylab.title("%s - ec50: %.2f - k: %.2f" % (i, sel_ec50, sel_k))
#test_loss, = ax1.plot(test_log['#Iters'], test_log['TestLoss'], linewidth=2, color='green') train_loss = plt.plot(train_iter, train_loss, label='Loss', color='red', linewidth=1) plt.xlabel('Iterations', fontsize=15) plt.ylabel('Loss', fontsize=15) plt.yscale('log') plt.tick_params(labelsize=10) plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.) #Plotting Accuracy ax2 = plt.twinx() #test_accuracy, = plt.plot(test_log['#Iters'], test_acc, label='Acc. ImageNet',linewidth=3, color='red', linestyle=':') train_lr, = plt.plot(test_log['#Iters'], train_lr, label='learning rate', linewidth=3, color='blue', linestyle=':') ax2.set_ylim(ymin=0.00001, ymax=0.1) ax2.set_ylabel('Accuracy', fontsize=15) ax2.tick_params(labelsize=10) plt.legend(bbox_to_anchor=(0.03, 1), loc=2, borderaxespad=0.) plt.title('Training Curve', fontsize=18) #Saving learning curve
# Plot electrode plt.gca().add_patch( plt.Rectangle(((-distance - radius) / 2., plt.ylim()[0]), radius, plt.ylim()[1] - plt.ylim()[0], color="grey")) plt.gca().add_patch( plt.Rectangle(((distance - radius) / 2., plt.ylim()[0]), radius, plt.ylim()[1] - plt.ylim()[0], color="grey")) plt.xlabel('Depth [$\mathrm{\mu m}$]') h1, l1 = plt.gca().get_legend_handles_labels() ax2 = plt.twinx(plt.gca()) # Plot planar weighting potential in 1 dim E_x, E_y = get_weighting_field(x=x_1d, y=np.zeros_like(x_1d), D=distance, S=radius, is_planar=False) ax2.plot(x_1d, E_x, linestyle='-.', linewidth=2., label='$\mathrm{E_{x, w}}$', color='blue') h2, l2 = ax2.get_legend_handles_labels()
def correlator_online_mp(fileinput='input.txt',dark_file='default',mask_file='default',plot='yes'): global nq, n, chn,chn2,rcr,index_in_q, lag, dt, norm, nc, I_avg, I_avg2, lm1, lm2, lm1b, lm2b, ax1,ax1b, ax2, ax2b, nq, detector, ccd_img, flat_field, tot_darks, totmask, ttdata,tcalc_cum, tplot_cum, tread_cum, tI_avg,static_corrected, firstfile, tolerance, I_avgs, xnq, Mythread,l,y,v, input_info, wtotmask,totsaxs,tI_avg,q_2tcf time1=time.time() p.rc('image',origin = 'lower') p.rc('image',interpolation = 'nearest') p.close() print 'multiprocessor' print 'reading input...' input_info=get_input(fileinput) ##processing input file##### dir= input_info['dir'] dir_dark= input_info['dark dir'] if dir_dark=='none': dir_dark=dir file_prefix=input_info['file_prefix'] ext = input_info['file_suffix'] # New version has capabilities of reading also .gz files if ext == '.edf.gz': dataread=EdfFile.EdfGzipFile else: dataread=EdfFile.EdfFile firstfile=int(input_info['n_first_image']) lastfile=int(input_info['n_last_image'])+1 firstdark=input_info['n_first_dark'] if firstdark.lower() != 'none': firstdark=int(input_info['n_first_dark']) lastdark=int(input_info['n_last_dark'])+1 geometry=input_info['geometry'].lower() tolerance=float32(float(input_info['tolerance'])) avgt = input_info['lag time'].lower() if avgt=='auto': lagt=[] lagt1=0 for k in xrange(firstfile+40,firstfile+100): filename=file_name(dir+file_prefix,ext,k) while os.path.exists(filename) is False: sys.stdout.write(50*'\x08') sys.stdout.write('file '+filename+'still not ready') sys.stdout.flush() #rint 'file ' ,filename, 'still not ready' time.sleep(10) f=dataread(filename) params=f.GetHeader(0) if input_info['detector']=='medipix': lagt2=float32(float(params['time_of_frame'])) lagt.append(lagt2-lagt1) lagt1=lagt2 # if (input_info['detector']=='princeton' or input_info['detector']=='andor'): else: counters=params['counter_mne'].split(' ') lagt_ind=counters.index('ccdtavg') values=params['counter_pos'].split(' ') lagt.append(float32(float(values[lagt_ind]))) del lagt[0] dt=average(array(lagt,dtype=float32)) print 'lag time =', dt else: dt=float32(float(input_info['lag time'])) q_2tcf=(input_info['q for TRC']).lower() if q_2tcf!='none': q_2tcf=int(q_2tcf) out_dir=get_dir(input_info['output directory']) out_prefix=get_prefix(input_info['output filename prefix']) out_tot=out_dir+out_prefix ##end processing input file##### firstname=dir+file_name(file_prefix,ext,firstfile) f=dataread(firstname) ccd_info=f.GetStaticHeader(0) ncol=int(ccd_info['Dim_1']) nrows=int(ccd_info['Dim_2']) static=out_tot+'static.edf' static=EdfFile.EdfFile(static) static_data=asfarray(static.GetData(0),dtype=float32) if input_info['n_first_dark'].lower()=='none': print 'not using darks' tot_darks=0*static_data else: print 'using darks' if dark_file=='default': dark_file=out_tot+'dark.edf' print 'using dark file:', dark_file dark=EdfFile.EdfFile(dark_file) tot_darks=asfarray(dark.GetData(0),dtype=float32) toplot=static_data+.001 #to avoid zeros in plotting logarithm### print '...done' print '...reading q mask' if mask_file=='default': mask_file=out_tot+'mask.edf' print 'using mask file:', mask_file tot=EdfFile.EdfFile(mask_file) totmask=float32(tot.GetData(0)+tot.GetData(1)) wtotmask=where(totmask==0) p.ion() fileq=out_tot+'qmask.edf' file=EdfFile.EdfFile(fileq) q=file.GetData(0) maxval=int(amax(q)+2) detector=input_info['detector'] flatfield_file=input_info['flatfield file'] if detector=='medipix': flat_field=flatfield(detector,flatfield_file) else: flat_field=1.0 print '...done' if geometry=='saxs': print '...correcting static for baseline' xbeam=int(input_info['x direct beam']) ybeam=int(input_info['y direct beam']) static_data=rad_average(static_data,totmask,xbeam,ybeam) qaxis_list=[] npix_per_q=[] oneq=[] index_in_q=[] firstq=float32(float(input_info['first q'])) deltaq=float32(float(input_info['delta q'])) stepq=float32(float(input_info['step q'])) qvalue=firstq+deltaq/2 static_corrected=ones(shape(static_data),dtype=float32) q*=abs(totmask-1) total_pixels=0 for i in range(2,maxval,2): indices=where(q==i) index_in_q.append(indices)#gives the indices of pixels that are not masked at this q if geometry=='saxs': static_corrected[indices]=mean(static_data[indices])/static_data[indices] npixel=len(static_data[indices]) npix_per_q.append(npixel) oneq.append(ones((1,npixel))) qaxis_list.append(qvalue) qvalue+=deltaq+stepq total_pixels+=npixel print '...done' nq=len(npix_per_q) xnq=xrange(nq) ncores=1 ncores=min(ncores,nq) tmp_pix=0 q_sec=[] if nq==1: q_sec.append(0) elif ncores>=nq: q_sec=range(1,nq) else: for ii in xnq: if tmp_pix<total_pixels/(ncores): tmp_pix+=npix_per_q[ii] if ii== nq-1: q_sec.append(ii) else: q_sec.append(ii) tmp_pix=0+npix_per_q[ii] ncores=len(q_sec) tmpdat=loadtxt(out_tot+'1Dstatic.dat') qaxis=tmpdat[:,0] I_q=tmpdat[:,1] del tmpdat ##FINISHED INITIALIZING PART OF THE CODE###### ##START MAIN PART FOR CORRELATION##### chn=16. chn2=chn/2 nfile=lastfile-firstfile rch=int(ceil(log(nfile/chn)/log(2))+1) ###2time if q_2tcf!='none': ttdata=zeros((nfile,npix_per_q[q_2tcf-1]),dtype=float32) ###2time rcr=chn+chn2*ceil(log(nfile/chn)/log(2)) lag=zeros((1,rcr),dtype=float32) data_shape=p.shape(toplot) smatr=zeros(data_shape,dtype=float32) matr=zeros(data_shape,dtype=float32) norm=zeros((1,rcr),dtype=float32) for ir in xrange(rch): if ir==0: lag[0,:chn]=dt*arange(1,chn+1,1) norm[0,:chn]=1./arange(nfile-2,nfile-chn-2,-1) else: lag[0,chn2*(ir+1):chn2*(ir+2)]=(dt*2**ir)*arange(1+chn2,chn+1) norm[0,chn2*(ir+1):chn2*(ir+2)]=1./arange((nfile-1)/(2**ir)-chn2-1,(nfile-1)/(2**ir)-chn-1,-1) #END of declaring and initializing variables#### #READING FILES filenames=[] for k in xrange(firstfile,lastfile): filenames.append(file_name(file_prefix,ext,k)) n=0 if plot!='no': ax1=p.axes([0.11, 0.08, 0.75, 0.57]) ax1.set_xlabel('t [sec]') ax1.set_ylabel('g^2(q,t)') ax1b=p.twinx(ax1) ax1b.yaxis.tick_right() ax2=p.axes([0.11, 0.73, 0.75, 0.19]) ax2.xaxis.tick_bottom() ax2.set_xlabel('t [sec]') ax2.set_ylabel('I(q,t) [a.u.]') ax2b=p.gcf().add_axes(ax2.get_position(),frameon=False) ax2b.xaxis.tick_top() ax2b.yaxis.tick_right() ax2b.xaxis.set_label_position('top') ax2b.set_xlabel('Image no.') label1='q= %2.1e 1/Ang' % qaxis_list[0] label2='q= %2.1e 1/Ang' % qaxis_list[nq/2] lm1,=ax1.semilogx((1,),(1,),'ro-',label=label1) lm1b,=ax1b.semilogx((1,),(1,),'bo-',label=label2) ax1.legend(loc='lower left') ax1b.legend(loc=(0.02,0.1)) lm2,=ax2.plot((1,),(1,),'r-') lm2b,=ax2b.plot((1,),(1,),'b-') p.setp(ax1.get_yticklabels(), color='r') p.setp(ax1b.get_yticklabels(), color='b') p.setp(ax2.get_yticklabels(), color='r') p.setp(ax2b.get_yticklabels(), color='b') tplot_cum=0 tread_cum=0 tcalc_cum=0 tqueue_cum=0 I_avg=zeros((1,nfile),float32) I_avg2=zeros((1,nfile),float32) I_avgs=zeros((nfile,nq),float32) tI_avg=zeros((1,nfile),float32) mon=zeros((1,nfile),int16) detector=input_info['detector'].lower() Mythread=threading.Thread checkfile=os.path.exists n=0 totsaxs=0*static_data goodsize=os.path.getsize(dir+filenames[n]) nnfile=nfile-1 #if plot!='no': # tmpf=lambda x : True # thplot=Process(target=tmpf,args=([0])) # thplot.start() ######################multiprocessing####################################################### qur=[] qure=[] pcorr=[] for i in xrange(ncores): qur.append(Queue()) qure.append(Queue()) #qur.append(Queue()) quplot=Queue() for i in xrange(ncores): if i==0: q_beg=0 else: q_beg=q_sec[i-1] q_end=q_sec[i] if i==ncores-1: q_end=nq pcorr.append(Process(target=mp_corr, args=(i,nfile,chn,plot,npix_per_q[q_beg:q_end],index_in_q[q_beg:q_end],qur[i],qure[i],quplot))) for i in xrange(ncores): pcorr[i].start() n=0 nc=0 nnfile=nfile-1 if input_info['normalize'].lower()!= 'none': normalize=input_info['normalize'] print "normalizing to ", input_info['normalize'] else: print "not normalizing" while n<nnfile: tread=time.time() nc=n+1 file=filenames[n] tmf=dir+file wait=0 t0=time.time() stop=0 while checkfile(tmf) is False: p.draw() sys.stdout.write(50*'\x08') sys.stdout.write('waiting for file'+ file+'...') sys.stdout.flush() t1=time.time() wait+=t1-t0 time.sleep(dt) t0=t1 if wait>10*dt: print nfile ans=raw_input('\n will this file ever arrive? (y/N)') if ans.lower()=='y': print '\n keep waiting...\n' time.sleep(3*dt) wait=0 else: stop=1 nfile=n+1 break if stop==1: break if ext=='.edf': filesize=os.path.getsize(tmf) while filesize!=goodsize: sys.stdout.write(50*'\x08') sys.stdout.write('file '+ file+'still not ready...') sys.stdout.flush() time.sleep(dt) filesize=os.path.getsize(tmf) f=dataread(tmf) dread(f,n,tot_darks,flat_field,static_corrected) mon[0,n]=monitor #for plot. TO be faster, I only updated plot each chn files. jj=0 tmp_put=[] tqueue=time.time() for i in xnq: if i <q_sec[jj]: tmp_put.append(ccd_img[index_in_q[i]]) elif i==nq-1: tmp_put.append(ccd_img[index_in_q[i]]) qur[jj].put(tmp_put) else: qur[jj].put(tmp_put) tmp_put=[] tmp_put.append(ccd_img[index_in_q[i]]) jj+=1 tqueue_cum+=time.time()-tqueue if nc%chn==0: pct=100.0*n/nfile sys.stdout.write(50*'\x08') sys.stdout.write('read '+str(int(pct))+'% of files'+32*' ') sys.stdout.flush() if plot!='no': #thplot.join() xx=quplot.get() ttplot(xx[0],xx[1],xx[2],n+1,I_avg[0,:n+1],I_avg2[0,:n+1]) #thplot=Process(target=ttplot,args=([xx[0],xx[1],xx[2],n+1,I_avg[0,:n+1],I_avg2[0,:n+1]])) #thplot.start() #thplot.join() n+=1 #if plot!='no': #thplot.join() sys.stdout.write(50*'\x08') sys.stdout.flush() print "read 100% of files" ############################################################################################### from_proc=[] for i in xrange(ncores): from_proc.append(qure[i].get()) pcorr[i].join() qure[i].close ############################################################################################# #END OF MAIN LOOP #calculate 2 times correlation function print "saving results..." if stop==1: tI_avg=tI_avg[:,:nfile] mon=mon[:,:nfile] I_avgs=I_avgs[:nfile,:] rch=int(ceil(log(nfile/nchannels)/log(2))+1) for ir in xrange(rch): if ir==0: norm[0,:nchannels]=1./arange(nfile-2,nfile-nchannels-2,-1) else: norm[0,nchannels2*(ir+1):nchannels2*(ir+2)]=1./arange((nfile-1)/(2**ir)-nchannels2-1,(nfile-1)/(2**ir)-nchannels-1,-1) #calculate correlation functions corf=from_proc[0][0] sl=from_proc[0][1] sr=from_proc[0][2] tcalc_cum=from_proc[0][3] for i in xrange(1,ncores): corf=concatenate((corf,from_proc[i][0]),axis=0) sl=concatenate((sl,from_proc[i][1]),axis=0) sr=concatenate((sr,from_proc[i][2]),axis=0) tcalc_cum=max(tcalc_cum,from_proc[i][3]) indt=int(chn+chn2*log(nfile/chn)/log(2))-2 cc=zeros((indt,nq+1),float32) q_title='#q values:' trace_title='#file_no. , time, monitor, q values:' for cindex in xnq: q_title=q_title+' '+str(qaxis_list[cindex]) trace_title=trace_title+' '+str(qaxis_list[cindex]) cc[:,cindex+1]=corf[cindex,:indt]/(sl[cindex,:indt]*sr[cindex,:indt])/\ norm[0,:indt] cc[:,0]=lag[0,:indt] q_title=q_title+'\n' trace_title=trace_title+'\n' del indt f=open(out_tot+'cf.dat','w') f.write(q_title) savetxt(f, cc) f.close() del cc f=open(out_tot+'trace.dat','w') f.write(trace_title) traces=zeros((nfile,nq+3),float32) traces[:,0]=tI_avg/dt+firstfile traces[:,1]=tI_avg traces[:,2]=mon traces[:,3:]=I_avgs savetxt(f,traces) f.close() del traces static=out_tot+'static.edf' static=EdfFile.EdfFile(static) totsaxs=totsaxs/n-tot_darks totsaxs[totsaxs<=0]=0 static.WriteImage({},totsaxs,0) del static print 'correlation functions are saved to ', out_tot+'cf.dat' print 'traces are saved to ', out_tot+'trace.dat' if plot!='no': p.hold(True) p.close() if q_2tcf!='none': print "calculating time resolved cf and chi4..." if nfile>6000: #this is for 4 GB RAM PC nfile=6000 n=6000 lind2=npix_per_q[q_2tcf-1]/16 l=arange(5)*0 y=[] v=[] for i in range(5): y.append([]) v.append([]) ib=0 for i in xrange(16): sys.stdout.write(50*'\x08') sys.stdout.write('done '+str(int(i/16.*100))+'% of data'+32*' ') sys.stdout.flush() ie=ib+lind2 y[0].append(trc(ttdata[:n-1,ib:ie])) v[0].append(vartrc(y[0][-1])) if l[0]==1: recurf(0) else: l[0]+=1 ib+=lind2 vm=[] for i in range(4,-1,-1): vm.append(mean(v[i],0)) vm=array(vm) del ttdata del v sys.stdout.write(50*'\x08') sys.stdout.flush() file_2times=out_tot+'2times_q_'+str(q_2tcf)+'.edf' ytrc.write(file_2times,y[4][0]) print 'Time resolved CF is saved to '+ out_tot+'2times_q_'+str(q_2tcf)+'.edf' N=array([[1],[2],[4],[8],[16]])/float(npix_per_q[q_2tcf-1]) data=concatenate((N,vm),1).T #print 'number of pixels ',lind[ttcf_par] #print 'q value=', qv[ttcf_par] p0=[0.0,1.0] it=range(len(data[1:,0])) p1=zeros((len(data[1:,0]),len(p0)+1)) p1[:,0]=(asfarray(it)+1.0)*dt xdata=data[0,:] for i in it: ydata=data[i+1,:] p1[i,1:],success=leastsq(errfunc,p0,args=(xdata,ydata)) outfile=out_tot+'fitchi4_q_'+str(q_2tcf)+'.dat' f=open(outfile,'w') f.write("#time chi4 error q value:"+str(qaxis_list[q_2tcf-1])+"\n") savetxt(f,p1) f.close() print 'file is saved to '+outfile print "saving results..." time2=time.time() print 'elapsed time', time2-time1 print 'elapsed time for plotting', tplot_cum print 'elapsed time for reading', tread_cum print 'elapsed time for correlating', tcalc_cum print 'elapsed time for queueing', tqueue_cum print 'used ncores=', ncores