def makeChart(path,data): filepath = path #get datas size = [x[0] for x in data] alloc = [x[1] for x in data] free = [x[2] for x in data] length = len(size) #drop the 1st line of the data, which is the name of the data. size.pop(0) alloc.pop(0) free.pop(0) try: if length >10: #declare a figure object to plot fig = plt.figure(figsize=(12,8)) #plot chart plt.plot(size,label = 'Total') plt.plot(alloc,label = 'Alloc') plt.plot(free,label = 'Free') plt.xlabel(u'次数(单位:20S)',fontproperties='SimHei') #plt.ylabel('Memory') plt.xlim(0.0,len(data)) plt.ylabel(u'内存大小(单位:KB)',fontproperties='SimHei') plt.grid(True) #advance settings plt.title('Native Heap') #show the figure plt.legend(loc=2,bbox_to_anchor=(1.01,1.00),borderaxespad=0.) plt.legend() #save chart workdir = os.path.dirname(sys.argv[0]) plt.savefig(os.path.join(filepath,'meminfo.png')) #plt.show() except KeyboardInterrupt: pass
def draw_ranges_for_parameters(data, title='', save_path='./pictures/'): parameters = data.columns.values.tolist() # remove flight name parameter for idx, parameter in enumerate(parameters): if parameter == 'flight_name': del parameters[idx] flight_names = np.unique(data['flight_name']) print len(flight_names) for parameter in parameters: plt.figure() axis = plt.gca() # ax.set_xticks(numpy.arange(0,1,0.1)) axis.set_yticks(flight_names) axis.tick_params(labelright=True) axis.set_ylim([94., 130.]) plt.grid() plt.title(title) plt.xlabel(parameter) plt.ylabel('flight name') colors = iter(cm.rainbow(np.linspace(0, 1,len(flight_names)))) for flight in flight_names: temp = data[data.flight_name == flight][parameter] plt.plot([np.min(temp), np.max(temp)], [flight, flight], c=next(colors), linewidth=2.0) plt.savefig(save_path+title+'_'+parameter+'.jpg') plt.close()
def plot(self, nbins=100, range=None): plt.plot([self.F_[0], self.F_[0]], [0, 100], '--r', lw=2) h = plt.hist(self.F_, nbins, range) plt.xlabel('F-value') plt.ylabel('Count') plt.grid() return h
def plotaBarra(nEntradas, nSaidas): n_groups = 2 means_men = (nEntradas, nSaidas) fig, ax = plt.subplots() index = np.arange(n_groups) bar_width = 0.15 opacity = 0.4 rects1 = plt.bar(index, means_men, bar_width, alpha=opacity, color='b') #plt.xlabel('Tipo de passagem') plt.ylabel(u'Valor absoluto', size=20) plt.title(u'Número de passagens', size=20) plt.xticks(index + bar_width/2, ('Entradas', u'Saídas'), size=16) plt.legend() plt.grid(True) plt.axis((0, 2, 0, nSaidas + nEntradas + 1)) plt.tight_layout() #plt.show() plt.ylim(0,nEntradas + nSaidas + 1) plt.savefig('grafico.png')
def plotTestData(tree): plt.figure() plt.axis([0,1,0,1]) plt.xlabel("X axis") plt.ylabel("Y axis") plt.title("Green: Class1, Red: Class2, Blue: Class3, Yellow: Class4") for value in class1: plt.plot(value[0],value[1],'go') plt.hold(True) for value in class2: plt.plot(value[0],value[1],'ro') plt.hold(True) for value in class3: plt.plot(value[0],value[1],'bo') plt.hold(True) for value in class4: plt.plot(value[0],value[1],'yo') plotRegion(tree) for value in classPlot1: plt.plot(value[0],value[1],'g.',ms=3.0) plt.hold(True) for value in classPlot2: plt.plot(value[0],value[1],'r.', ms=3.0) plt.hold(True) for value in classPlot3: plt.plot(value[0],value[1],'b.', ms=3.0) plt.hold(True) for value in classPlot4: plt.plot(value[0],value[1],'y.', ms=3.0) plt.grid(True) plt.show()
def plot_wav_fft(wav_filename, desc=None): plt.clf() plt.figure(num=None, figsize=(6, 4)) sample_rate, X = scipy.io.wavfile.read(wav_filename) spectrum = np.fft.fft(X) freq = np.fft.fftfreq(len(X), 1.0 / sample_rate) plt.subplot(211) num_samples = 200.0 plt.xlim(0, num_samples / sample_rate) plt.xlabel("time [s]") plt.title(desc or wav_filename) plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples]) plt.grid(True) plt.subplot(212) plt.xlim(0, 5000) plt.xlabel("frequency [Hz]") plt.xticks(np.arange(5) * 1000) if desc: desc = desc.strip() fft_desc = desc[0].lower() + desc[1:] else: fft_desc = wav_filename plt.title("FFT of %s" % fft_desc) plt.plot(freq, abs(spectrum), linewidth=5) plt.grid(True) plt.tight_layout() rel_filename = os.path.split(wav_filename)[1] plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0], bbox_inches='tight')
def _plot(self,names,title,style,when=0,showLegend=True): if isinstance(names,str): names = [names] assert isinstance(names,list) legend = [] for name in names: assert isinstance(name,str) legend.append(name) # if it's a differential state if name in self.xNames: index = self.xNames.index(name) ys = np.squeeze(self._log['x'])[:,index] ts = np.arange(len(ys))*self.Ts plt.plot(ts,ys,style) if name in self.outputNames: index = self.outputNames.index(name) ys = np.squeeze(self._log['outputs'][name]) ts = np.arange(len(ys))*self.Ts plt.plot(ts,ys,style) if title is not None: assert isinstance(title,str), "title must be a string" plt.title(title) plt.xlabel('time [s]') if showLegend is True: plt.legend(legend) plt.grid()
def mlr_val_vseq( RM, yE, v_seq, disp = True, graph = True): """ Validation is performed using vseq indexed values. """ org_seq = list(range( len( yE))) t_seq = [x for x in org_seq if x not in v_seq] RMt, yEt = RM[ t_seq, :], yE[ t_seq, 0] RMv, yEv = RM[ v_seq, :], yE[ v_seq, 0] clf = linear_model.LinearRegression() clf.fit( RMt, yEt) print('Weight value') #print clf.coef_.flatten() plt.plot( clf.coef_.flatten()) plt.grid() plt.xlabel('Tap') plt.ylabel('Weight') plt.title('Linear Regression Weights') plt.show() if disp: print('Training result') mlr_show( clf, RMt, yEt, disp = disp, graph = graph) if disp: print('Validation result') r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph) #if r_sqr < 0: # print 'v_seq:', v_seq, '--> r_sqr = ', r_sqr return r_sqr, RMSE
def mlr_val_vseq_MMSE( RM, yE, v_seq, alpha = .5, disp = True, graph = True): """ Validation is peformed using vseq indexed values. """ org_seq = list(range( len( yE))) t_seq = [x for x in org_seq if x not in v_seq] RMt, yEt = RM[ t_seq, :], yE[ t_seq, 0] RMv, yEv = RM[ v_seq, :], yE[ v_seq, 0] w, RMt_1 = mmse_with_bias( RMt, yEt) yEt_c = RMt_1*w print('Weight values') #print clf.coef_.flatten() plt.plot( w.A1) plt.grid() plt.xlabel('Tap') plt.ylabel('Weight') plt.title('Linear Regression Weights') plt.show() RMv_1 = add_bias_xM( RMv) yEv_c = RMv_1*w if disp: print('Training result') regress_show( yEt, yEt_c, disp = disp, graph = graph) if disp: print('Validation result') r_sqr, RMSE = regress_show( yEv, yEv_c, disp = disp, graph = graph) #if r_sqr < 0: # print 'v_seq:', v_seq, '--> r_sqr = ', r_sqr return r_sqr, RMSE
def make_fish(zoom=False): plt.close(1) plt.figure(1, figsize=(6, 4)) plt.plot(plot_limits['pitch'], plot_limits['rolldev'], '-g', lw=3) plt.plot(plot_limits['pitch'], -plot_limits['rolldev'], '-g', lw=3) plt.plot(pitch.midvals, roll.midvals, '.b', ms=1, alpha=0.7) p, r = make_ellipse() # pitch, off nominal roll plt.plot(p, r, '-c', lw=2) gf = -0.08 # Fudge on pitch value for illustrative purposes plt.plot(greta['pitch'] + gf, -greta['roll'], '.r', ms=1, alpha=0.7) plt.plot(greta['pitch'][-1] + gf, -greta['roll'][-1], 'xr', ms=10, mew=2) if zoom: plt.xlim(46.3, 56.1) plt.ylim(4.1, 7.3) else: plt.ylim(-22, 22) plt.xlim(40, 180) plt.xlabel('Sun pitch angle (deg)') plt.ylabel('Sun off-nominal roll angle (deg)') plt.title('Mission off-nominal roll vs. pitch (5 minute samples)') plt.grid() plt.tight_layout() plt.savefig('fish{}.png'.format('_zoom' if zoom else ''))
def mlr_val_ridge( RM, yE, rate = 2, more_train = True, center = None, alpha = 0.5, disp = True, graph = True): """ Validation is peformed as much as the given ratio. """ RMt, yEt, RMv, yEv = jchem.get_valid_mode_data( RM, yE, rate = rate, more_train = more_train, center = center) print("Ridge: alpha = {}".format( alpha)) clf = linear_model.Ridge( alpha = alpha) clf.fit( RMt, yEt) print('Weight value') #print clf.coef_.flatten() plt.plot( clf.coef_.flatten()) plt.grid() plt.xlabel('Tap') plt.ylabel('Weight') plt.title('Linear Regression Weights') plt.show() print('Training result') mlr_show( clf, RMt, yEt, disp = disp, graph = graph) print('Validation result') r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph) return r_sqr, RMSE
def plot2DLine(x, y, threshold, xlabel = "x", ylabel = "y", figname = "figure", title = "Track", equal = False): fig = plt.figure(figname) ax = fig.add_subplot(111) start = 0 #set x and y axis to equal if equal: ax.axis('equal') #break lines if range of two point bigger than threshold for i in range(len(x) - 1): if sqrt((x[i] - x[i + 1])**2 + (y[i] - y[i + 1])**2) > threshold: ax.plot(x[start:i + 1], y[start:i + 1], 'r*') start = i + 1 ax.plot(x[start:], y[start:], 'r*') #disable scientific notation of plot formatter = ScalarFormatter(useOffset=False) ax.yaxis.set_major_formatter(formatter) ax.xaxis.set_major_formatter(formatter) ax.set_title(title, size = 20) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.grid()
def make_let_im(let_file, dim = 16, y_lo = 70, y_hi = 220, x_lo = 10, x_hi = 200, edge_pix = 150, plot_let = False): letter = mpimg.imread(let_file) letter = letter[y_lo:y_hi, x_lo:x_hi, 0] for i in range(letter.shape[1]): if letter[0:edge_pix, i].any() == 0: # here is to remove the edge letter[0:edge_pix, i] = 1 plt.imshow(letter, cmap='gray') plt.grid('off') plt.show() x = np.arange(letter.shape[1]) y = np.arange(letter.shape[0]) f2d = interp2d(x, y, letter) x_new = np.linspace(0, letter.shape[1], dim) # dim = 16 y_new = np.linspace(0, letter.shape[0], dim) letter_new = f2d(x_new, y_new) letter_new -= np.mean(letter_new) if plot_let: plt.imshow(letter_new, cmap = 'gray') plt.grid('off') plt.show() letter_flat = letter_new.flatten() # letter_flat is a 1-dimensional array containing 256 elements return letter_new, letter_flat
def png(self, start_timestamp, end_timestamp): self.load(start_timestamp, end_timestamp) plt.figure(figsize=(10, 7.52)) plt.rc("axes", labelsize=12, titlesize=14) plt.rc("font", size=10) plt.rc("legend", fontsize=7) plt.rc("xtick", labelsize=8) plt.rc("ytick", labelsize=8) plt.axes([0.08, 0.08, 1 - 0.27, 1 - 0.15]) for plot in self.plots: plt.plot(self.timestamps, self.plots[plot], self.series_fmt(plot), label=self.series_label(plot)) plt.axis("tight") plt.gca().xaxis.set_major_formatter( matplotlib.ticker.FuncFormatter(lambda x, pos=None: time.strftime("%H:%M\n%b %d", time.localtime(x))) ) plt.gca().yaxis.set_major_formatter( matplotlib.ticker.FuncFormatter(lambda x, pos=None: locale.format("%.*f", (0, x), True)) ) plt.grid(True) plt.legend(loc=(1.003, 0)) plt.xlabel("Time/Date") plt.title( self.description() + "\n%s to %s" % ( time.strftime("%H:%M %d-%b-%Y", time.localtime(start_timestamp)), time.strftime("%H:%M %d-%b-%Y", time.localtime(end_timestamp)), ) ) output_buffer = StringIO.StringIO() plt.savefig(output_buffer, format="png") return output_buffer.getvalue()
def plotFFT(self): # Generates plot of the FFT output. To view, run plotFFT.py in a separate terminal figure1 = plt.figure(num= None, figsize=(12,12), dpi=80, facecolor='w', edgecolor='w') plot1 = figure1.add_subplot(111) line1, = plot1.plot( np.arange(0,512,0.5), np.zeros(1024), 'g-') plt.xlabel('freq (MHz)',fontsize = 12) plt.ylabel('Amplitude',fontsize = 12) plt.title('Pre-mixer FFT',fontsize = 12) plt.xticks(np.arange(0,512,50)) plt.xlim((0,512)) plt.grid() plt.show(block = False) count = 0 stop = 1.0e6 while(count < stop): overflow = np.fromstring(self.fpga.read('overflow', 4), dtype = '>B') print overflow self.fpga.write_int('fft_snap_ctrl',0) self.fpga.write_int('fft_snap_ctrl',1) fft_snap = (np.fromstring(self.fpga.read('fft_snap_bram',(2**9)*8),dtype='>i2')).astype('float') I0 = fft_snap[0::4] Q0 = fft_snap[1::4] I1 = fft_snap[2::4] Q1 = fft_snap[3::4] mag0 = np.sqrt(I0**2 + Q0**2) mag1 = np.sqrt(I1**2 + Q1**2) fft_mags = np.hstack(zip(mag0,mag1)) plt.ylim((0,np.max(fft_mags) + 300.)) line1.set_ydata((fft_mags)) plt.draw() count += 1
def testPlots(self): import matplotlib.pyplot as plt frames = list(self.it) print(len(frames)) # assert(512==list(frames)) from pprint import pprint pprint(list(self.it._consumed.items())) x = np.array([(q['start']-self.start).total_seconds() for q in frames]) def gety(name, frames=frames): return np.array([q[name] for q in frames]) a = gety('a') b = gety('b') c = gety('c') fig = plt.figure() ax = plt.axes() ax.plot(x,a, '.') ax.plot(x,b, '.') ax.plot(x,c, '.') plt.grid() plt.legend(['a1', 'a2', 'a3', 'b', 'c']) plt.title('linear time interpolation test') plt.draw() fn = '/tmp/resampler.png' fig.savefig(fn, dpi=200) print("wrote " + fn) return None
def exec_transmissions(): IP,IP_AP,files=parser_reduce() plt.figure("GRAPHE_D'EVOLUTION_DES_TRANSMISSIONS") ENS_TEMPS_, TRANSMISSION_ = transmissions(files) plt.plot(ENS_TEMPS_, TRANSMISSION_,"r.", label="Transmissions: ") lot = map(inet_aton, IP) lot.sort() iplist1 = map(inet_ntoa, lot) for i in iplist1: #ici j'affiche les annotations et vérifie si j'ai des @ip de longueur 9 ou 8 pour connaitre la taille de la fenetre du graphe if len(i)==9: maxim_=i[-2:] #Sera utilisé pour la taille de la fenetre du graphe plt.annotate(' Machine: '+ i ,horizontalalignment='left', xy=(1, float(i[-2:])), xytext=(1, float(i[-2:])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),) else: maxim_=i[-1:] #Sera utilisé pour la taille de la fenetre du graphe plt.annotate(' Machine: '+ i ,horizontalalignment='left', xy=(1, float(i[7])), xytext=(1, float(i[7])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),) for i in IP_AP: #ACCESS POINT ( cas spécial ) if i[-2:]: plt.annotate(' access point: '+ i , xy=(1, i[7]), xytext=(1, float(i[7])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),) plt.ylim(0, (float(maxim_))+1) #C'est à ça que sert le tri plt.xlim(1, 1.1) plt.legend(loc='best',prop={'size':10}) plt.xlabel('Temps (s)') plt.ylabel('IP machines transmettrices') plt.grid(True) plt.title("GRAPHE_D'EVOLUTION_DES_TRANSMISSIONS") plt.legend(loc='best') plt.show()
def test_prop(self): N = 800.0 V = linspace(5.0,51.0,50) rho = 1.2255 beta = 45.0 J = list() CT = list() CP = list() effy = list() for v in V: data = self.analyze_prop(beta,N,v,rho) J.append(data[2]) CT.append(data[3]) CP.append(data[4]) effy.append(data[5]) plt.figure(1) plt.grid(True) plt.hold(True) plt.plot(J,CT,'o-') plt.xlabel('J') plt.plot(J,CP,'ro-') plt.axis([0,2.5,0,0.15]) plt.figure(2) plt.plot(J,effy,'gs-') plt.hold(True) plt.grid(True) plt.axis([0,2.5,0,1.0]) plt.xlabel('advance ratio') plt.ylabel('efficiency') plt.show()
def visualizeEigenvalues(eVal, verboseLevel): real = [] imag = [] for z in eVal: rp = z.real im = z.imag if not (rp == np.inf or rp == - np.inf) \ and not (im == np.inf or im == - np.inf): real.append(rp) imag.append(im) if verboseLevel>=1: print("length of regular real values=" + str(len(real))) print("length of regular imag values=" + str(len(imag))) print("minimal real part=" + str(min(real)), "& maximal real part=" + str(max(real))) print("minimal imag part=" + str(min(imag)), "& maximal imag part=" + str(max(imag))) if verboseLevel==2: print("all real values:", str(real)) print("all imag values:", str(imag)) # plt.scatter(real[4:],img[4:]) plt.scatter(real, imag) plt.grid(True) plt.xlabel("realpart") plt.ylabel("imagpart") plt.xlim(-10, 10) plt.ylim(-10, 10) plt.show()
def run_demo(with_plots=True): """ Distillation2 model """ curr_dir = os.path.dirname(os.path.abspath(__file__)); fmu_name = compile_fmu("JMExamples.Distillation.Distillation2", curr_dir+"/files/JMExamples.mo") dist2 = load_fmu(fmu_name) res = dist2.simulate(final_time=7200) # Extract variable profiles x16 = res['x[16]'] x32 = res['x[32]'] t = res['time'] print "t = ", repr(N.array(t)) print "x16 = ", repr(N.array(x16)) print "x32 = ", repr(N.array(x32)) if with_plots: # Plot plt.figure(1) plt.plot(t,x16,t,x32) plt.grid() plt.ylabel('x') plt.xlabel('time') plt.show()
def plotter(fname, fdelimiter, foutput, fargument, fdir): """ This function parse the data in csv file and then calls different plotting functions" """ try: fopen = open(fname,'r') csvreader = csv.reader(fopen, delimiter = ",") fields = csvreader.next() except: print "Can not open input csv file",fname fopen.close data = np.loadtxt(fname, delimiter = fdelimiter, unpack = True, skiprows = 1) mpl.plotfile(fname, cols = range(len(fields)/2), delimiter = fdelimiter, subplots = True, newfig = True) print "creating a example figure with ", len(fields)/2-1, "subplots" allfig = fdir + os.path.sep + "All_fig" + "." + foutput mpl.savefig(allfig) mpl.clf() i = 0; while i < len(fields): if fargument != i: mpl.plot(data[fargument], data[i]) fieldname=fdir +os.path.sep + fields[fargument] +"_" + fields[i] + "." + foutput mpl.xlabel(fields[fargument]) mpl.ylabel(fields[i]) mpl.title(fields[fargument] + " vs " + fields[i]) #Issues with transparent option when png is opted need fine tuning. #Check Howto section of matplotlib website #mpl.savefig(fieldname, transparent = True) mpl.grid(True) mpl.savefig(fieldname) print "Saving", fieldname mpl.clf() i=i+1
def PlotIOCurve(stRaster, rasterKey, figPath=[]): """ Plot the IO curves for the spikes in stRaster. :param stRaster: dict of pandas.DataFrame of spike times for each cycle, for each intensity :type stRaster: dict of pandas.DataFrame :param rasterKey: Raster key with intensity in dB following '_' :type rasterKey: str :param figPath: Directory location for plots to be saved :type figPath: str :returns: tuningCurves: pandas.DataFrame with frequency, intensity, response rate and standard deviation """ tuning = [] sortedKeys = sorted(stRaster.keys()) for traceKey in sortedKeys: spl = int(traceKey.split('_')[-1]) raster = stRaster[traceKey] res = ResponseStats( raster ) tuning.append({'intensity': spl, 'response': res[0], 'responseSTD': res[1]}) tuningCurves = pd.DataFrame(tuning) testNum = int(rasterKey.split('_')[-1]) tuningCurves.plot(x='intensity', y='response', yerr='responseSTD', capthick=1, label='test '+str(testNum)) plt.legend(loc='upper left', fontsize=12, frameon=True) sns.despine() plt.grid(False) plt.xlabel('Intensity (dB)', size=14) plt.ylabel('Response Rate (Hz)', size=14) plt.tick_params(axis='both', which='major', labelsize=14) title = rasterKey.split('_')[0]+'_'+rasterKey.split('_')[1]+'_'+rasterKey.split('_')[2] plt.title(title, size=14) if len(figPath)>0: plt.savefig(figPath + 'ioCurves_' + title +'.png') return tuningCurves
def makePlot( k, counts, yaxis=[], width=0.8, figsize=[14.0,8.0], title="", ylabel='tmpylabel', xlabel='tmpxlabel', labels=[], show=False, grid=True, xticks=[], yticks=[], steps=5, save=False ): ''' ''' if not list(yaxis): yaxis = np.arange(len(counts)) if not labels: labels = yaxis index = np.arange(len(yaxis)) fig, ax = plt.subplots() fig.set_size_inches(figsize[0],figsize[1]) plt.bar(index, counts, width) plt.title(title) if not xticks: print ('Making xticks') ticks = makeTicks(yMax=len(yaxis),steps=steps) xticks.append(ticks+width/2.) xticks.append(labels) print ('Done making xticks') if yticks: print ('Making yticks') # plt.yticks([1,2000],[0,100]) plt.yticks(yticks[0],yticks[1]) # ax.set_yticks(np.arange(0,100,10)) print ('Done making yticks') plt.xticks(xticks[0]+width/2., xticks[1]) plt.ylabel(ylabel) plt.xlabel(xlabel) # ax.set_xticks(range(0,len(counts)+2)) fig.autofmt_xdate() # ax.set_xticklabels(ks) plt.axis([0, len(yaxis), 0, max(counts) + (max(counts)/100)]) plt.grid(grid) location = ROOT_FOLDER + "/../muchBazar/src/image/" + k + "distribution.png" if save: plt.savefig(location) print ('Distribution written to: %s' % location) if show: plt.show()
def plot_models(x, y, models, fname, mx=None, ymax=None, xmin=None): plt.clf() plt.scatter(x, y, s=10) plt.title("Web traffic over the last month") plt.xlabel("Time") plt.ylabel("Hits/hour") plt.xticks([w * 7 * 24 for w in range(10)], ["week %i" % w for w in range(10)]) if models: if mx is None: mx = sp.linspace(0, x[-1], 1000) for model, style, color in zip(models, linestyles, colors): # print "Model:",model # print "Coeffs:",model.coeffs plt.plot(mx, model(mx), linestyle=style, linewidth=2, c=color) plt.legend(["d=%i" % m.order for m in models], loc="upper left") plt.autoscale(tight=True) plt.ylim(ymin=0) if ymax: plt.ylim(ymax=ymax) if xmin: plt.xlim(xmin=xmin) plt.grid(True, linestyle="-", color="0.75")
def createResponsePlot(dataframe,plotdir): mag = dataframe['MAGPDE'].as_matrix() response = (dataframe['TFIRSTPUB'].as_matrix())/60.0 response[response > 60] = 60 #anything over 60 minutes capped at 6 minutes imag5 = (mag >= 5.0).nonzero()[0] imag55 = (mag >= 5.5).nonzero()[0] fig = plt.figure(figsize=(8,6)) n,bins,patches = plt.hist(response[imag5],color='g',bins=60,range=(0,60)) plt.hold(True) plt.hist(response[imag55],color='b',bins=60,range=(0,60)) plt.xlabel('Response Time (min)') plt.ylabel('Number of earthquakes') plt.xticks(np.arange(0,65,5)) ymax = text.ceilToNearest(max(n),10) yinc = ymax/10 plt.yticks(np.arange(0,ymax+yinc,yinc)) plt.grid(True,which='both') plt.hold(True) x = [20,20] y = [0,ymax] plt.plot(x,y,'r',linewidth=2,zorder=10) s1 = 'Magnitude 5.0, Events = %i' % (len(imag5)) s2 = 'Magnitude 5.5, Events = %i' % (len(imag55)) plt.text(35,.85*ymax,s1,color='g') plt.text(35,.75*ymax,s2,color='b') plt.savefig(os.path.join(plotdir,'response.pdf')) plt.savefig(os.path.join(plotdir,'response.png')) plt.close() print 'Saving response.pdf'
def plot_data(l): fig, ax = plt.subplots() counts, bins, patches = ax.hist(l,30,facecolor='yellow', edgecolor='gray') # Set the ticks to be at the edges of the bins. #ax.set_xticks(bins) # Set the xaxis's tick labels to be formatted with 1 decimal place... #ax.xaxis.set_major_formatter(FormatStrFormatter('%0.1f')) # Label the raw counts and the percentages below the x-axis... bin_centers = 0.5 * np.diff(bins) + bins[:-1] for count, x in zip(counts, bin_centers): # Label the raw counts ax.annotate(str(int(count)), xy=(x, 0), xycoords=('data', 'axes fraction'), xytext=(0, -40), textcoords='offset points', va='top', ha='center') # Label the percentages percent = '%0.0f%%' % (100 * float(count) / counts.sum()) ax.annotate(percent, xy=(x, 0), xycoords=('data', 'axes fraction'), xytext=(0, -50), textcoords='offset points', va='top', ha='center') # Give ourselves some more room at the bottom of the plot plt.subplots_adjust(bottom=0.15) plt.grid(True) plt.xlabel("total reply") plt.ylabel("pages") plt.title("2014/10/21-2014/10/22 sina new pages") plt.show()
def show_trajectory(target, xc, yc): # pragma: no cover plt.clf() plot_arrow(target.x, target.y, target.yaw) plt.plot(xc, yc, "-r") plt.axis("equal") plt.grid(True) plt.pause(0.1)
def draw_robot_way_2d(data_1, data_2, label_names, plate_name): plt.plot(data_1, data_2) plt.xlabel(label_names[0]) plt.ylabel(label_names[1]) plt.title(plate_name) plt.grid(True) plt.show()
def visualize_singular_values(args): param_values = load_parameter_values(args.load_path) for d in range(args.layers): if args.rnn_type == 'lstm': ws = param_values["/recurrentstack/lstm_" + str(d) + ".W_state"] w_rec = ws[:, 3 * args.state_dim:] elif args.rnn_type == 'simple': w_rec = param_values["/recurrentstack/simplerecurrent_" + str(d) + ".W_state"] else: raise NotImplementedError U, s, V = np.linalg.svd(w_rec, full_matrices=True) plt.subplot(2, 1, 1) plt.plot(np.arange(s.shape[0]), s, label='Layer_' + str(d)) plt.grid(True) plt.legend(loc='upper right') plt.title("Singular_values_of_recurrent_weights") plt.subplot(2, 1, 2) plt.plot(np.arange(s.shape[0]), np.log(s + 1E-15), label='Layer_' + str(d)) plt.grid(True) plt.title("Log_singular_values_of_recurrent_weights") plt.tight_layout() plt.savefig(args.save_path + "/visualize_singular_values.png") logger.info("Figure \"visualize_singular_values" ".png\" saved at directory: " + args.save_path)
def DrawFig(filename): n1, t1 = ReadList2("ssd_read.txt") n2, t2 = ReadList2("hdd_read.txt") n3, t3 = ReadList2("ssd_write.txt") n4, t4 = ReadList2("hdd_write.txt") fig = plt.figure() ax = fig.add_subplot(111) fig.suptitle("HBase (replica 10)") plt.xlabel('# Entries') plt.ylabel('time (sec)') # ax.set_yscale('log') ax.plot(n1, t1, 'r--', label="ssd read") ax.plot(n1, t1, 'ro') ax.plot(n3, t3, 'r-', label="ssd write") ax.plot(n3, t3, 'r^') ax.plot(n2, t2, 'b--', label="hdd read") ax.plot(n2, t2, 'bo') ax.plot(n4, t4, 'b-', label="hdd write") ax.plot(n4, t4, 'b^') #plt.xlim([0.8, 8]) plt.ylim([-20, 300]) plt.grid(b=True, which='both', color='0.65',linestyle='-') ax.legend(bbox_to_anchor=(0.1, 0.9), loc=2, borderaxespad=0.) plt.savefig('hbase_read_write_r10.png') plt.show() return
print("slope = ", slope, "intercept = ", intercept, '\n') x_linreg = np.linspace(0, 1, 500) y_linreg = x_linreg * slope + intercept plt.plot(x_linreg, y_linreg, 'k', label=u"Экспериментальные данные") th_slope = np.linspace(0, 0.045, 500) plt.plot(th_slope, th_slope * 1000, 'k', ls='-.', label=u"Теоретические данные") plt.plot(x, y, '.', color='k') plt.grid(which='major', ls='-', lw=0.5, c='k') plt.grid(which='minor', ls='--', lw=0.5, c='grey') plt.errorbar(x, y, xerr=xErr, yerr=yErr, ecolor='k', capsize=3, elinewidth=1, fmt=' ') plt.xlim(0, 0.045) plt.ylim(0, 40) plt.title(u"A. Зависимость " + r"$\Delta\nu(\frac{1}{\tau})$") plt.ylabel(r'$\Delta\nu$' + ", " + u"кГц", size="x-large") plt.xlabel(r'$\frac{1}{\tau}$' + ", " + u"$мкс^{-1}$", size="x-large")
print(class_names) print(train_images.shape) print( len(train_labels)) print(train_labels) print(test_images.shape) print(len(test_labels)) plt.figure() plt.imshow(train_images[0]) plt.colorbar() plt.grid(False) plt.show() train_images = train_images / 255.0 test_images = test_images / 255.0 plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(class_names[train_labels[i]])
# define a font properties using dict font = {'family' : 'serif', 'color' : 'red', 'weight' : 'normal', 'size' : 11 } # importing data from a file data = genfromtxt('data_plot.txt') # separating data [raw : column] mass = data[:, 0] radius = data[:, 1] print(mass) # plot data plot(mass, radius, 'r', label=r'data plot') # add title and axis's label plt.title(r'Contoh Relasi Masa dan Radius Bintang', fontdict=font) plt.xlabel(r'$M {\odot}$', fontdict=font) plt.ylabel(r'$M {\odot}$', fontdict=font) plt.legend(loc='upper right') # add a legend plt.grid(True) # start to grid a plot # Tweak spacing to prevent clipping of ylabel plt.subplots_adjust(left=0.10) plt.show()
#Processing #De-spike data_in=data_in-mean(data_in) fcorner=0.00005 dt=t_in[2]-t_in[1] #data_fil=highpass(data_in,fcorner,1/dt,2) data_fil=data_in #Put in sac file st=Stream(Trace()) st[0].stats.station=station st[0].stats.delta=dt st[0].stats.starttime=t1 st[0].data=data_fil st[0].trim(starttime=time_epi-timedelta(days=2),endtime=time_epi+timedelta(hours=70)) st[0].data=st[0].data-mean(st[0].data[0:20]) st.write(fout,format='SAC') #Make plot and save #Plot processed #plt.close("all") plt.figure(figsize=(10,3)) plt.plot(st[0].times()-2*86400,st[0].data) plt.grid() plt.ylim([-0.12,0.12]) plt.xlim([-48,72]) plt.xlabel('Minutes after OT') plt.ylabel('Sea surface height (m)') plt.subplots_adjust(bottom=0.2) plt.title('Station '+station.upper()) plt.savefig(plot_out) plt.show()
def train(self, epochs, batch_size=128, save_interval=50): data_dir = './data' X_train = self.get_batch(glob(os.path.join(data_dir, '*.jpg'))[:5000], 28, 28, 'RGB') #Rescale -1 to 1 X_train = (X_train.astype(np.float32) - 127.5) / 127.5 half_batch = int(batch_size / 2) #Create lists for logging the losses d_loss_logs_r = [] d_loss_logs_f = [] g_loss_logs = [] for epoch in range(epochs): # --------------------- # Train Discriminator # --------------------- # Select a random half batch of images idx = np.random.randint(0, X_train.shape[0], half_batch) imgs = X_train[idx] noise = np.random.normal(0, 1, (half_batch, 100)) # Generate a half batch of new images gen_imgs = self.generator.predict(noise) # Train the discriminator d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1))) d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1))) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # --------------------- # Train Generator # --------------------- noise = np.random.normal(0, 1, (batch_size, 100)) # The generator wants the discriminator to label the generated samples # as valid (ones) valid_y = np.array([1] * batch_size) # Train the generator g_loss = self.combined.train_on_batch(noise, valid_y) # Plot the progress print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss)) #Append the logs with the loss values in each training step d_loss_logs_r.append([epoch, d_loss[0]]) d_loss_logs_f.append([epoch, d_loss[1]]) g_loss_logs.append([epoch, g_loss]) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_imgs(epoch) #Convert the log lists to numpy arrays d_loss_logs_r_a = np.array(d_loss_logs_r) d_loss_logs_f_a = np.array(d_loss_logs_f) g_loss_logs_a = np.array(g_loss_logs) #Generate the plot at the end of training plt.plot(d_loss_logs_r_a[:,0], d_loss_logs_r_a[:,1], label="Discriminator Loss - Real") plt.plot(d_loss_logs_f_a[:,0], d_loss_logs_f_a[:,1], label="Discriminator Loss - Fake") plt.plot(g_loss_logs_a[:,0], g_loss_logs_a[:,1], label="Generator Loss") plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.title('Variation of losses over epochs') plt.grid(True) plt.show()
def train(network, train_loader, test_loader, epochs, learning_rate, ravel_init=False, device='cpu', tolerate_keyboard_interrupt=True): loss = nn.NLLLoss() optimizer = torch.optim.Adam(network.parameters(), lr=learning_rate) train_loss_epochs = [] test_loss_epochs = [] train_accuracy_epochs = [] test_accuracy_epochs = [] network = network.to(device) try: for epoch in range(epochs): network.train() losses, accuracies = _epoch(network, loss, train_loader, True, optimizer, device, ravel_init) train_loss_epochs.append(np.mean(losses)) train_accuracy_epochs.append(np.mean(accuracies)) network.eval() losses, accuracies = _epoch(network, loss, test_loader, False, optimizer, device, ravel_init) test_loss_epochs.append(np.mean(losses)) test_accuracy_epochs.append(np.mean(accuracies)) clear_output(True) print('Epoch {0}... (Train/Test) NLL: {1:.3f}/{2:.3f}\tAccuracy: {3:.3f}/{4:.3f}'.format( epoch, train_loss_epochs[-1], test_loss_epochs[-1], train_accuracy_epochs[-1], test_accuracy_epochs[-1])) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(train_loss_epochs, label='Train') plt.plot(test_loss_epochs, label='Test') plt.xlabel('Epochs', fontsize=16) plt.ylabel('Loss', fontsize=16) plt.legend(loc=0, fontsize=16) plt.grid() plt.subplot(1, 2, 2) plt.plot(train_accuracy_epochs, label='Train accuracy') plt.plot(test_accuracy_epochs, label='Test accuracy') plt.xlabel('Epochs', fontsize=16) plt.ylabel('Loss', fontsize=16) plt.legend(loc=0, fontsize=16) plt.grid() plt.show() except KeyboardInterrupt: if tolerate_keyboard_interrupt: pass else: raise KeyboardInterrupt return train_loss_epochs, \ test_loss_epochs, \ train_accuracy_epochs, \ test_accuracy_epochs
T = 100 A[np.where(C<T)] = np.nan B[np.where(C<T)] = np.nan C[np.where(C<T)] = np.nan from pcc import get_my_cmap plt.errorbar(range(0,len(A)),A, B, fmt='o', zorder=1, color='grey') plt.scatter(range(0,len(A)),A,c=C,s=300,linewidths=0.001, vmin=T, vmax=1000, cmap=get_my_cmap(), zorder=2) cb = plt.colorbar(shrink=0.5) cb.set_label("Hits in #",fontsize=ff) cb.ax.tick_params(labelsize=ff) plt.ylim(-1,1) plt.xlim(0,len(A)) plt.grid() plt.ylabel('Correlation',fontsize=ff) plt.xlabel('overpasses with time',fontsize=ff) plt.title('Overpass statistics betwen DPR and RADOLAN, \n Threshold: N = '+str(T),fontsize=ff) plt.show() dataframes = [df, df2] names = ['RADOLAN','BoXPol'] for j in range(2): DF = dataframes[j] hits = DF['H'].values.copy() th = np.arange(np.nanmax(hits)) anzahl_overpass = np.zeros(len(th))
def task3(wine): data = pd.read_csv("winequality-" + wine + ".csv", sep=';') edges_dict = {} features_scores = [] for feature in data.columns: print("Now doing "+feature) X = data.drop(feature, axis=1) y = data[feature] feature_list = list(X.columns) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0) rf = RandomForestRegressor(n_estimators=30, random_state=0) rf.fit(X_train, y_train) print("score: ", rf.score(X_test, y_test)) features_scores.append(rf.score(X_test, y_test)) importances = list(rf.feature_importances_) feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)] feature_importances = sorted(feature_importances, key=lambda x: x[1], reverse=True) [print('Variable: {:24} Importance: {}'.format(*pair)) for pair in feature_importances] importances = [importance for feature, importance in feature_importances] feature_list = [feature for feature, importance in feature_importances] plt.bar(x=range(1, 12), height=importances, tick_label=feature_list) plt.ylabel('ylabel') plt.xlabel('xlabel') plt.title(wine + ' wine '+feature+' Feature Importances') plt.show() used_features = [] scores = [] for f in feature_list: used_features.append(f) used_X = data[used_features] X_train, X_test, y_train, y_test = train_test_split(used_X, y, test_size=0.25, random_state=0) rf = RandomForestRegressor(n_estimators=5, random_state=0) rf.fit(X_train, y_train) scores.append(rf.score(X_test, y_test)) plt.style.use("fivethirtyeight") plt.plot(range(1, 12), scores) plt.xticks(range(1, 12)) plt.xlabel("Number of Features") plt.ylabel("R2 score") plt.title(wine + ' wine ' + feature + ' R2 score') plt.show() edges_dict[feature] = feature_list[:scores.index(max(scores))+1] print() cell_text = [[round(score, 2) for score in features_scores]] col_labels = list(data.columns) plt.clf() col_widths = [len(a) / 120 for a in col_labels] col_widths[8] *= 3 the_table = plt.table(cellText=cell_text, colLabels=col_labels, loc="center", colWidths=col_widths) the_table.auto_set_font_size(False) the_table.set_fontsize(13) plt.title(wine + ' wine R2 Score of Each of 12 attributes') plt.axis("off") plt.grid(False) plt.show() G = nx.DiGraph() edges = [] feature_count_dict = {} for feature, learning_features in edges_dict.items(): for learning_feature in learning_features: if learning_feature not in feature_count_dict.keys(): feature_count_dict[learning_feature] = 0 feature_count_dict[learning_feature] += 1 quantity_add = {feature: str(feature_count_dict[feature]) + "\n" + feature for feature in edges_dict.keys()} for feature, learning_features in edges_dict.items(): for learning_feature in learning_features: edges.append((quantity_add[feature], quantity_add[learning_feature])) G.add_edges_from(edges) pos = nx.circular_layout(G) nx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('jet'), node_size=500, node_color="red") nx.draw_networkx_labels(G, pos, font_weight="bold") nx.draw_networkx_edges(G, pos, edgelist=edges, edge_color='blue', arrows=True) plt.title(wine + ' wine Network of Features') plt.show()
scalarname = func.__getattribute__('pyfunc') name = scalarname.__name__+'_auto_vector' fncnames.append(name) t0 = time.clock() CD[name] = func(ReNrs) exec_times[name] = time.clock() - t0 fnames_sorted=sorted(exec_times,key=exec_times.__getitem__) exec_time_sorted=sorted(exec_times.values()) for i in range(len(fnames_sorted)): print fnames_sorted[i], '\t execution time = ', exec_time_sorted[i] # set fontsize prms fnSz = 16; font = {'size' : fnSz}; rc('font',**font) # plot the result for all functions for name in fncnames: loglog(ReNrs,CD[name]) hold('on') legend(fncnames) # xlabel('$Re$',fontsize=fnSz) ylabel('$C_D$',fontsize=fnSz) grid('on','both','both') # savefig('example_sphere.png') show()
q2_fit, dq2_fit, gc_fit, dgc_fit = [numpy.hstack(x) for x in [q2, dq2, gc, dgc]] index_q2 = numpy.argsort(q2_fit) with open('bin_errors.dat', 'w') as f: f.write('106\n') for q2_i, dq2_i, gc_i, dgc_i in zip(q2_fit[index_q2], dq2_fit[index_q2], gc_fit[index_q2], dgc_fit[index_q2]): print(f'{q2_i:8.6f} {gc_i:12.6e} {dgc_i:12.6e}') f.write(f'{q2_i:8.6f} {gc_i:12.6e} {dgc_i:12.6e}\n') fitter = RFitter() fitter.load_data(q2=q2_fit, ge=gc_fit, dge=dgc_fit) fitter.set_range(0, 1.5) r, _ = fitter.fit(model=('ratio', 1, 1), method='least_squares', r0=2.094) print(r) gc_0, gm_0, gq_0 = form_factors.abbott_2000_1(q2_fit[index_q2]) fig = plt.Figure() ax = plt.gca() ax.minorticks_on() plt.grid(True) plt.xlabel(r'$q2 / fm^{-2}$') plt.ylabel(r'$G_C$') plt.errorbar(q2_fit[:52], gc_fit[:52], yerr=dgc_fit[:52], fmt='r.', label='1.1 GeV') plt.errorbar(q2_fit[52:], gc_fit[52:], yerr=dgc_fit[52:], fmt='b.', label='2.2 GeV') plt.plot(q2_fit[index_q2], gc_0, 'k--') plt.legend() plt.show()
label='Full Perfect on CPU') pyplot.plot(Y2, Y5, color='orange', linestyle='-', linewidth=1.0, marker='o', label='7 write, 1 read on CPU') pyplot.plot(Y2, Y6, color='blue', linestyle='-', linewidth=1.0, marker='o', label='1 write, 3 read on CPU') pyplot.plot(Y2, Y7, color='red', linestyle='-', linewidth=1.0, marker='o', label='Compact CPU') pyplot.grid() pyplot.legend(loc=2) pyplot.suptitle("Comparison of Neighbor Finding Algorithms on the CPU", fontdict={'fontsize': 16}) pyplot.savefig('neighbors_cpu.pdf', format='pdf') pyplot.show()
for kb in keepBottom: kb = int(kb) B = vars()[pic + setpoint] b = A[kb] vars()['T' + setpoint].append(b) if (setpoint=='22'): # label = vars()['Labels' + pic] # l = label[kt] vars()['Labels'].append('B' + pic + str(kb+1)) print('B' + pic + str(kb+1)) data = vars()['T' + setpoint] plt.plot(INDEX, data, label = setpoint, color = colors[counter]) counter += 1 # Don't print all the x-labels. which_labels = [INDEX[i] for i in INDEX if i%5 == 0] l = [Labels[i] for i in INDEX if i%5 == 0] plt.xticks(which_labels, l, size='small', rotation='60') plt.grid(alpha=0.5) plt.ylabel('Temperature in T') plt.xlim(INDEX[0], INDEX[-1]) plt.tight_layout() lgd = plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left", mode="expand", borderaxespad=0, ncol=3) plt.savefig('test.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight') #plt.show()
def create_plot_cam_axes(cam, fig, form='separate'): """Plots pos, vel, acc and jerk from one or more cam profile objects :param cam: one or more cam profile objects :param fig: A Figure object :param form: 'combine' or 'separate'. 'combine': plots pos, vel, acc and jerk for each cam profile object in one plot with multiple vertical axes 'separate': plots pos, vel, acc and jerk for each cam profile object in separate plots, each with one vertical axes. """ if form == 'combine': # A single axes containing all cam curves; pos, vel, accel and jerk. from mpl_toolkits.axes_grid1 import host_subplot from mpl_toolkits import axisartist host = host_subplot(111, axes_class=axisartist.Axes) plt.subplots_adjust(right=0.65) par1 = host.twinx() par2 = host.twinx() par3 = host.twinx() par2.axis["right"] = par2.new_fixed_axis(loc="right", offset=(55, 0)) par3.axis["right"] = par3.new_fixed_axis(loc="right", offset=(100, 0)) par1.axis["right"].toggle(all=True) par2.axis["right"].toggle(all=True) par3.axis["right"].toggle(all=True) p0, = host.plot(cam.t, cam.pos, label="Density") p1, = par1.plot(cam.t, cam.vel, label="Temperature") p2, = par2.plot(cam.t, cam.acc, label="Velocity") p3, = par3.plot(cam.t, cam.jerk) # host.set_xlim(0, 2) # host.set_ylim(0, 2) # par1.set_ylim(0, 4) # par2.set_ylim(1, 65) host.set_xlabel("time") host.set_ylabel("pos") par1.set_ylabel("vel") par2.set_ylabel("accel") par3.set_ylabel("jerk") # host.legend() host.axis["left"].label.set_color(p0.get_color()) par1.axis["right"].label.set_color(p1.get_color()) par2.axis["right"].label.set_color(p2.get_color()) par3.axis["right"].label.set_color(p3.get_color()) plt.grid() return host elif form == 'separate': # Multiple axes if not isinstance(cam, (list, tuple, np.ndarray)): cam = (cam, ) ax00 = fig.add_subplot(221) ax01 = fig.add_subplot(222) ax10 = fig.add_subplot(223) ax11 = fig.add_subplot(224) ax00.set_title('Pos', fontsize=8) ax01.set_title('Vel', fontsize=8) ax10.set_title('Acc', fontsize=8) ax11.set_title('Jerk', fontsize=8) tick_kw = dict(labelsize=7) ax00.tick_params(**tick_kw) ax01.tick_params(**tick_kw) ax10.tick_params(**tick_kw) ax11.tick_params(**tick_kw) for c, i in zip(cam, range(len(cam))): if c is not None: ax00.plot(c.t, c.pos) ax01.plot(c.t, c.vel) ax10.plot(c.t, c.acc, label=str(i) + ' ' + c.__str__()) ax10.legend(fontsize=7, bbox_to_anchor=(0, -.2), loc='lower left', borderaxespad=0.) if c.jerk is not None: ax11.plot(c.t, c.jerk) return [[ax00, ax01], [ax10, ax11]]
def main(): ## Create Network net = Net() ## Optimization and Loss #criterion = nn.CrossEntropyLoss() # use a Classification Cross-Entropy loss criterion = nn.MSELoss() #criterion = nn.L1Loss() #criterion = nn.NLLLoss() #criterion = nn.BCELoss() #optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.1) optimizer = optim.Adam(net.parameters(), lr=0.01) trainingdataX, trainingdataY, testdataX, testdataY = get_data() ''' trainingdataX = trainingdataX.cuda() trainingdataY = trainingdataY.cuda() testdataX = testdataX.cuda() testdataY = testdataY.cuda() ''' losses = [] avg_running_loss = 0 #import time for epoch in range(NumEpoches): running_loss = 0.0 for i, data in enumerate(trainingdataX, 0): #time.sleep(3.0) inputs = data labels = trainingdataY[i] inputs = Variable(torch.FloatTensor(inputs)) labels = Variable(torch.FloatTensor(labels)) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.data[0] if ((i > 10) & (i % 20 == 0)): avg_running_loss = running_loss / NumEpoches losses.append(avg_running_loss) #print ("prediction: {} actual: {} loss: {} avg loss per sample: {}".format(outputs.data.numpy(), labels, running_loss, avg_running_loss)) running_loss = 0.0 ''' print(inputs) print(optimizer) print(outputs) print(loss) print() ''' #print ("Finished training...") predictions = [] actual = [] losses = [] for sample in range(len(testdataX)): batch_prediction = net(Variable(torch.FloatTensor(testdataX[sample]))) batch_prediction = batch_prediction.data.numpy() for p in range(len(batch_prediction)): predictions.append(batch_prediction[p]) actual.append(testdataY[sample][p]) losses.append(abs(predictions[-1] - testdataY[sample][p])) from sklearn.metrics import r2_score r2 = r2_score(actual,predictions) #print("r2: {}".format(r2)) #print("Hidden Layers: {} Median Error on Test Data: {} St Dev: {} r2: {}".format(hidden_layers,np.mean(losses),np.std(losses),r2)) #print("hidden_layers {} r2 {} pred: {} actual: {}".format(hidden_layers,r2,predictions[10],actual[10])) h = "{}-".format(input_size) for hid in hidden_layers: h += "{}-".format(hid) h += "{}".format(output_size) print("hid_layers: {} r2: {} pred[10]: {} actl[10]: {} train_size: {} test_size: {} epochs: {} batch_size: {}".format(h,r2,predictions[10],actual[10],training_set_size,test_set_size,NumEpoches,batch_size)) ''' predictions = [] losses = [] for i in range(len(trainingdataX)): test_sample = np.asarray(trainingdataX[i]) test_label = np.asarray(trainingdataY[i]) pred = (net(Variable(torch.FloatTensor(test_sample)))) for z in range(len(pred)): p = pred[z].data.numpy() predictions.append(p) err = 0.0 for p in range(len(pred)): err += pred[p] - (Variable(torch.FloatTensor(test_label[p]))) err /= len(pred) e = err.data.numpy() losses.append(e) print((losses[0])) print(trainingdataY[0]) print("predictions[0]: {}".format(predictions[0])) print("trainingdataY[0]: {}".format(trainingdataY[0])) errors = abs(predictions - trainingdataY) print("errors[0]: {}".format(errors[0])) sq_errors = errors ** 2 sum_sq_errors = np.sum(sq_errors) res_errors = abs(trainingdataY - np.mean(trainingdataY)) ** 2 sum_res_sq = np.sum(res_errors) r2 = 1 / (sum_sq_errors - sum_res_sq) print("r2: {}".format(r2)) ''' plt.figure() plt.plot(actual,predictions,'x',ms=2,mew=3) plt.grid(True) h_lays = "" for i in hidden_layers: h_lays += "{}-".format(i) plt.suptitle("Neural Network {}-{}{} Median Error: {}\nBatch Size: {} Epochs: {} Train Set: {} r2: {}".format(input_size,h_lays,output_size,np.median(losses),batch_size,NumEpoches,training_set_size,r2)) plt.ylim([0,-10]) plt.xlim([0,-10]) plt.xlabel("Actual Values") plt.ylabel("Predicted Values (kcal/mol)") #plt.show() '''
#Putting all months together if i == 1: al1 = anomoly else: temp = anomoly al1 = pd.concat([al1, temp]) #sorting the entire dataframe according to index al1 = al1.sort_index() al1.plot(label='Anomoly') pyplot.grid(True) pyplot.legend(loc='best') pyplot.title('OAKDALE_PM10') pyplot.show() """ The two arrays we have both contain NAN values at various positions. To do a linear regression on both to show how much the two arrays correlate: http://glowingpython.blogspot.de/2012/03/linear-regression-with-numpy.html However, the code line: slope, intercept, r_value, p_value, std_err = stats.linregress(varx, vary) results in nans for every output variable. --> Remove NaNs using a mask: mask = ~np.isnan(varx) & ~np.isnan(vary) slope, intercept, r_value, p_value, std_err = stats.linregress(varx[mask], vary[mask]) The ~ operator means "is not", only for for NumPy arrays (it's an abuse of the normal meaning, which is the bitwise not operator).
d97 = change['D97'] * 100 town = change['Oak Park Township'] * 100 park = change['Park District'] * 100 village = change['Village of Oak Park'] * 100 year = np.array(pivot.Year).astype(int) p1 = plt.plot(d200, color='#3366cc') p2 = plt.plot(d97, color='#dc3912') p3 = plt.plot(town, color='#ff9900') p4 = plt.plot(park, color='#109618') p5 = plt.plot(village, color='#990099') plt.xticks(np.arange(baseyear, taxyear, step=5)) plt.ylabel("Yearly Percentage Increase") plt.title('Yearly % Increase Oak Park Taxing Bodies') plt.xticks(np.arange(baseyear, taxyear, step=5)) plt.grid(axis='y', linewidth=0.5) plt.ylim(top=30) plt.legend((p1[0], p2[0], p3[0], p4[0], p5[0]), ('D200', 'D97', 'Oak Park Township', 'Park District', 'Village of Oak Park'), loc='lower left') plt.savefig(str(taxyear) + '/charts/percentage change levy by year.png') plt.close() plt.figure(figsize=(7, 6), dpi=200) all = change['All'] * 100 N = 4 rolling = pd.Series(all.values).rolling(window=N).mean().iloc[N - 1:].values print(taxyear - baseyear - rolling.size) rolling = np.pad(rolling, (taxyear - baseyear - rolling.size + 1, 0),
def plot(results, experiment_dir, agents, plot_file_name="", conf_intervals=[], use_cost=False, cumulative=False, episodic=True, open_plot=True, track_disc_reward=False): ''' Args: results (list of lists): each element is itself the reward from an episode for an algorithm. experiment_dir (str): path to results. agents (list): each element is an agent that was run in the experiment. plot_file_name (str) conf_intervals (list of floats) [optional]: confidence intervals to display with the chart. use_cost (bool) [optional]: If true, plots are in terms of cost. Otherwise, plots are in terms of reward. cumulative (bool) [optional]: If true, plots are cumulative cost/reward. episodic (bool): If true, labels the x-axis "Episode Number". Otherwise, "Step Number". open_plot (bool) track_disc_reward (bool): If true, plots discounted reward. Summary: Makes (and opens) a single reward chart plotting all of the data in @data. ''' # Set x-axis labels to be integers. from matplotlib.ticker import MaxNLocator ax = pyplot.figure().gca() ax.xaxis.set_major_locator(MaxNLocator(integer=True)) # Some nice markers and colors for plotting. markers = ['o', 's', 'D', '^', '*', 'x', 'p', '+', 'v', '|'] x_axis_unit = "episode" if episodic else "step" # Map them to floats in [0:1]. colors = [[shade / 255.0 for shade in rgb] for rgb in color_ls] # Puts the legend into the best location in the plot and use a tight layout. pyplot.rcParams['legend.loc'] = 'best' # Negate everything if we're plotting cost. if use_cost: results = [[-x for x in alg] for alg in results] agent_colors = _get_agent_colors(experiment_dir, agents) # Make the plot. print_prefix = "\nAvg. cumulative reward" if cumulative else "Avg. reward" # For each agent. for i, agent_name in enumerate(agents): # Add figure for this algorithm. agent_color_index = i if agent_name not in agent_colors else agent_colors[ agent_name] agent_marker_index = agent_color_index # Grab new color/marker if we've gone over. if agent_color_index >= len(colors): agent_color_index = agent_color_index % len(colors) if agent_marker_index >= len(markers): agent_marker_index = agent_marker_index % len(markers) series_color = colors[agent_color_index] series_marker = markers[agent_marker_index] y_axis = results[i] x_axis = list( drange(X_AXIS_START_VAL, X_AXIS_START_VAL + len(y_axis) * X_AXIS_INCREMENT, X_AXIS_INCREMENT)) # Plot Confidence Intervals. if conf_intervals != []: alg_conf_interv = conf_intervals[i] top = np.add(y_axis, alg_conf_interv) bot = np.subtract(y_axis, alg_conf_interv) pyplot.fill_between(x_axis, top, bot, facecolor=series_color, edgecolor=series_color, alpha=0.25) print("\t" + str(agents[i]) + ":", round(y_axis[-1], 5), "(conf_interv:", round(alg_conf_interv[-1], 2), ")") marker_every = max(len(y_axis) / 30, 1) pyplot.plot(x_axis, y_axis, color=series_color, marker=series_marker, markevery=marker_every, label=agent_name) pyplot.legend() print() # Configure plot naming information. unit = "Cost" if use_cost else "Reward" plot_label = "Cumulative" if cumulative else "Average" if "times" in experiment_dir: # If it's a time plot. unit = "Time" disc_ext = "Discounted " if track_disc_reward else "" # Set names. exp_dir_split_list = experiment_dir.split("/") if 'results' in exp_dir_split_list: exp_name = exp_dir_split_list[exp_dir_split_list.index('results') + 1] else: exp_name = exp_dir_split_list[0] experiment_dir = experiment_dir + "/" if experiment_dir[ -1] != "/" else experiment_dir plot_file_name = plot_file_name if plot_file_name != "" else experiment_dir + plot_label.lower( ) + "_" + unit.lower() + ".pdf" plot_title = CUSTOM_TITLE if CUSTOM_TITLE is not None else plot_label + " " + disc_ext + unit + ": " + exp_name if CUSTOM_TITLE is None: plot_title = _format_title(plot_title) # Axis labels. x_axis_label = X_AXIS_LABEL if X_AXIS_LABEL is not None else x_axis_unit[ 0].upper() + x_axis_unit[1:] + " Number" y_axis_label = Y_AXIS_LABEL if Y_AXIS_LABEL is not None else plot_label + " " + unit # Pyplot calls. pyplot.xlabel(x_axis_label) pyplot.ylabel(y_axis_label) pyplot.title(plot_title) pyplot.grid(True) pyplot.tight_layout() # Keeps the spacing nice. # Save the plot. pyplot.savefig(plot_file_name, format="pdf") if open_plot: # Open it. open_prefix = "gnome-" if sys.platform == "linux" or sys.platform == "linux2" else "" os.system(open_prefix + "open " + plot_file_name) # Clear and close. pyplot.cla() pyplot.close()
df_review = pd.DataFrame(columns=['polarity', 'subjectivity', 'sentiment']) df_review[['polarity', 'subjectivity', 'sentiment' ]] = extracted_data[['polarity', 'subjectivity', 'sentiment']].astype(float) df_review.index = pd.to_datetime(extracted_data['Date']) df_review = df_review.resample('D').mean().ffill() df_review['Date'] = df_review.index df_review.reset_index(inplace=True, drop=True) merge_all_reviews = pd.merge(nse_3months, df_review, on='Date') ## single plot x = merge_all_reviews['Date'] y = merge_all_reviews['polarity'] plt.figure(figsize=(30, 10)) plt.plot(x, y, color='blue') plt.title('date vs polarity', fontsize=28) plt.xlabel('stock Date', fontsize=28) plt.ylabel('polarity', fontsize=28) plt.xticks(rotation=40) plt.grid(linewidth=1) plt.show() ##### import matplotlib.pyplot as plt x = merge_all_reviews['Date'] plt.plot(x, merge_all_reviews['ClosePrice'], label='ClosePrice') plt.plot(x, merge_all_reviews['polarity'], label='Polarity') plt.legend(loc='best') plt.show()
def get_thresholds(in_dat, interactive=False, plot_events=False, fig_path=None, prefix=None): """Guess distance threshold for event filtering Analyse the events in the first million of Hi-C pairs in the library, plot the occurrences of each event type according to number of restriction fragments, and ask user interactively for the minimum threshold for uncuts and loops. Parameters ---------- in_dat: str Path to the .pairs file containing Hi-C pairs. interactive: bool If True, plots are diplayed and thresholds are required interactively. plot_events : bool Whether to show the plot fig_path : str Path where the figure will be saved. If None, the figure will be diplayed interactively. prefix : str If the library has a name, it will be shown on plots. Returns ------- dictionary dictionary with keys "uncuts" and "loops" where the values are the corresponding thresholds entered by the user. """ thr_uncut = None thr_loop = None max_sites = 50 # Map of event -> legend name of event for intrachromosomal pairs. legend = { "++": "++ (weird)", "--": "-- (weird)", "+-": "+- (uncuts)", "-+": "-+ (loops)", } colors = {"++": "#222222", "+-": "r", "--": "#666666", "-+": "tab:orange"} n_events = {event: np.zeros(max_sites) for event in legend} i = 0 # open the file for reading (just the first 1 000 000 lines) with open(in_dat, "r") as pairs: for line in pairs: # Skip header lines if line.startswith("#"): continue i += 1 # Only use the first million pairs to estimate thresholds if i == 1000000: break # Process Hi-C pair into a dictionary p = process_read_pair(line) # Type of event and number of restriction site between reads etype = p["type"] nsites = p["nsites"] # Count number of events for intrachrom pairs if etype != "inter" and nsites < max_sites: n_events[etype][nsites] += 1 def plot_event(n_events, legend, name): """Plot the frequency of a given event types over distance.""" plt.xlim([-0.5, 15]) plt.plot( range(n_events[name].shape[0]), n_events[name], "o-", label=legend[name], linewidth=2.0, c=colors[name], ) if interactive: # PLot: try: plt.figure(0) for event in legend: plot_event(n_events, legend, event) plt.grid() plt.xlabel("Number of restriction fragment(s)") plt.ylabel("Number of events") plt.yscale("log") plt.legend() plt.show(block=False) except Exception: logger.error( "Unable to show plots, skipping figure generation. Perhaps " "there is no Xserver running ? (might be due to windows " "environment). Try running without the interactive option.") # Asks the user for appropriate thresholds print( "Please enter the number of restriction fragments separating " "reads in a Hi-C pair below or at which loops and " "uncuts events will be excluded\n", file=sys.stderr, ) thr_uncut = int(input("Enter threshold for the uncuts events (+-):")) thr_loop = int(input("Enter threshold for the loops events (-+):")) try: plt.clf() except Exception: pass else: # Estimate thresholds from data for event in n_events: fixed = n_events[event] fixed[fixed == 0] = 1 n_events[event] = fixed all_events = np.log(np.array(list(n_events.values()))) # Compute median occurences at each restriction sites event_med = np.median(all_events, axis=0) # Compute MAD, to have a robust estimator of the expected deviation # from median at long distances mad = np.median(abs(all_events - event_med)) exp_stdev = mad / 0.67449 # Iterate over sites, from furthest to frag+2 for site in range(max_sites)[:1:-1]: # For uncuts and loops, keep the last (closest) site where the # deviation from other events <= expected_stdev if (abs(np.log(n_events["+-"][site]) - event_med[site]) <= exp_stdev): thr_uncut = site if (abs(np.log(n_events["-+"][site]) - event_med[site]) <= exp_stdev): thr_loop = site if thr_uncut is None or thr_loop is None: raise ValueError( "The threshold for loops or uncut could not be estimated. " "Please try running with -i to investigate the problem.") logger.info("Filtering with thresholds: uncuts={0} loops={1}".format( thr_uncut, thr_loop)) if plot_events: try: plt.figure(1) plt.xlim([-0.5, 15]) # Draw colored lines for events to discard plt.plot( range(0, thr_uncut + 1), n_events["+-"][:thr_uncut + 1], "o-", c=colors["+-"], label=legend["+-"], ) plt.plot( range(0, thr_loop + 1), n_events["-+"][:thr_loop + 1], "o-", c=colors["-+"], label=legend["-+"], ) plt.plot( range(0, 2), n_events["--"][:2], "o-", c=colors["--"], label=legend["--"], ) plt.plot( range(0, 2), n_events["++"][:2], "o-", c=colors["++"], label=legend["++"], ) # Draw black lines for events to keep plt.plot( range(thr_uncut, n_events["+-"].shape[0]), n_events["+-"][thr_uncut:], "o-", range(thr_loop, n_events["-+"].shape[0]), n_events["-+"][thr_loop:], "o-", range(1, n_events["--"].shape[0]), n_events["--"][1:], "o-", range(1, n_events["++"].shape[0]), n_events["++"][1:], "o-", label="kept", linewidth=2.0, c="g", ) plt.grid() plt.xlabel("Number of restriction site(s)") plt.ylabel("Number of events") plt.yscale("log") # Remove duplicate "kept" entries in legend handles, labels = plt.gca().get_legend_handles_labels() by_label = OrderedDict(zip(labels, handles)) plt.legend(by_label.values(), by_label.keys()) # Show uncut and loop threshold as vertical lines plt.axvline(x=thr_loop, color=colors["-+"]) plt.axvline(x=thr_uncut, color=colors["+-"]) if prefix: plt.title( "Library events by distance in {}".format(prefix)) plt.tight_layout() if fig_path: plt.savefig(fig_path) else: plt.show(block=False) # plt.clf() except Exception: logger.error( "Unable to show plots, skipping figure generation. Is " "an X server running? (might be due to windows " "environment). Try running without the plot option.") return thr_uncut, thr_loop
def plotHITStatus( savePath = '/home/ubuntu/amt_guis/cocoa_depth/plots/', filename = 'time_info' ): pdf = PdfPages( savePath + filename + '.pdf') fig = plt.figure() plt.clf() page_size = 100 ass_time_info_list = [] mtc = MTurkConnection( host = _host ) assignments = getReviewableAssignments() #hits = getReviewableHITs() #for hit in hits: # assignments = mtc.get_assignments( hit.HITId, page_size=page_size ) for ass in assignments: time_info = \ {'AcceptTime':ass.AcceptTime, 'SubmitTime':ass.SubmitTime, 'ExecutionTime': [ question_form_answer.fields[0] for question_form_answer in ass.answers[0] if question_form_answer.qid == '_hit_rt' ][0] } ass_time_info_list.append( time_info ) ass_time_info_list.sort(key=lambda x: datetime.datetime.strptime(x['AcceptTime'],'%Y-%m-%dT%H:%M:%SZ')) first_assignment = ass_time_info_list[0] ass_time_info_list.sort(key=lambda x: datetime.datetime.strptime(x['SubmitTime'],'%Y-%m-%dT%H:%M:%SZ')) last_assignment = ass_time_info_list[-1] time_since_beginning = int(( datetime.datetime.strptime(last_assignment['SubmitTime'],'%Y-%m-%dT%H:%M:%SZ') - datetime.datetime.strptime(first_assignment['AcceptTime'],'%Y-%m-%dT%H:%M:%SZ')).total_seconds()) completed_percentage = [] # time since beginning in one hour intervals time_range = range( 0, time_since_beginning + 3600, 3600 ) for s in time_range: currently_completed = \ [x for x in ass_time_info_list if datetime.datetime.strptime(x['SubmitTime'],'%Y-%m-%dT%H:%M:%SZ') < datetime.timedelta(seconds=s) + datetime.datetime.strptime(first_assignment['SubmitTime'],'%Y-%m-%dT%H:%M:%SZ')] perc = len( currently_completed ) / float( NUMBER_HITS * NUMBER_HIT_ASSIGNMENTS ) completed_percentage.append( perc ) per_hour_completion_rate = len(ass_time_info_list) / float(time_since_beginning / 3600) #print per_hour_completion_rate hours_to_completion = ((NUMBER_HITS * NUMBER_HIT_ASSIGNMENTS) - len(ass_time_info_list)) / per_hour_completion_rate #print hours_to_completion plt.plot( time_range, completed_percentage ) rows = ['Completed Assignments','Total Assignments','Hour Completion Rate','Hours to Completion'] data = [["%d"%(len(ass_time_info_list))],["%d"%(NUMBER_HITS * NUMBER_HIT_ASSIGNMENTS)],["%.2f" % per_hour_completion_rate],["%.2f" % hours_to_completion]] plt.table(cellText=data,rowLabels=rows,loc='center',colWidths = [0.1]*3) plt.title('Per hour completion percentage') plt.xticks( time_range[0::10], [str(x/3600) for x in time_range[0::10]] ) plt.yticks([0,0.2,0.4,0.6,0.8,1],['0%', '20%','40%','60%','80%','100%']) plt.ylabel('Completion Percentage') plt.xlabel('Hours since beginning of task') plt.grid() pdf.savefig() pdf.close() plt.close()
def plot_probability_fractions(nicola): data_path = "./data/" data_files = ["2018-maschile-ledro.xls"] # times = { "total": np.zeros((0, )), "swim": np.zeros((0, )), "t1": np.zeros((0, )), "bike": np.zeros((0, )), "t2": np.zeros((0, )), "run": np.zeros((0, )) } # matches = { "total": "TEMPO_UFFICIALE", "swim": "trs", "t1": "tr1", "bike": "trb", "t2": "tr2", "run": "trr" } for df in data_files: print(df) for key, v in matches.items(): times[key] = np.hstack( (times[key], tls.read_times_xls(data_path + df, v))) data_files = ["2016-maschile-ledro.csv", "2017-maschile-ledro.csv"] for df in data_files: print(df) for key, v in matches.items(): times[key] = np.hstack( (times[key], tls.read_ledro_csv_format(data_path + df)[key])) keys = ["swim", "t1", "bike", "t2", "run"] fig, ax = plt.subplots(2, 5) f = tls.get_time_evaluation_formulas() d = tls.get_distances() p = {} p["swim"] = np.arange(60, 180, 5) p["t1"] = np.arange(60, 300, 20) p["bike"] = np.flip(np.arange(25, 34, .5)) p["t2"] = np.arange(60, 200, 10) p["run"] = np.arange(200, 390, 10) bins = [] for k in keys: bins.append(f[k](d[k], p[k])) nicola_bin_id = {} for b, k in zip(bins, keys): nicola_bin_id[k] = int((np.floor(nicola[k] - b[0]) / (b[1] - b[0]))) for pic in range(5): n0, b0, p0 = ax[0, pic].hist(times[keys[pic]], bins[pic]) n1, b1, p1 = ax[1, pic].hist(times[keys[pic]], bins[pic], density=True, cumulative=True) p0[nicola_bin_id[keys[pic]]].set_fc('r') plt.sca(ax[0, pic]) plt.title(keys[pic]) plt.xticks(bins[pic], []) plt.grid(axis='x') plt.sca(ax[1, pic]) xlabels = [str(timedelta(seconds=int(t))) for t in p[keys[pic]]] bins_time = [str(timedelta(seconds=int(t))) for t in bins[pic]] if (keys[pic] == "bike"): xlabels = [str(t) for t in p[keys[pic]]] plt.xticks(bins[pic], xlabels, rotation="vertical") plt.grid(axis='x') p1[nicola_bin_id[keys[pic]]].set_fc('r') data_to_write = {} data_to_write["pace-" + keys[pic]] = xlabels[:-1] data_to_write["finishers-" + keys[pic]] = n0 data_to_write["percentile-" + keys[pic]] = n1 data_to_write["time-" + keys[pic]] = bins_time[:-1] df = pandas.DataFrame(data_to_write) df.to_csv("./ledroman-partial-" + keys[pic] + ".cvs", sep=',', index=False) plt.show()
# sky estimation flux_star = apert_sum[star_ID] - msky * ap_area # total - sky flux_err = np.sqrt(apert_sum[star_ID] * gain # Poissonian (star + sky) + ap_area * ronoise**2 # Gaussian + (ap_area * (gain * sky_std))**2 / nsky ) mag_ann[star_ID], merr_ann[star_ID] = mag_inst(flux_star, flux_err) #print('{0:7d}: {1:.5f} {2:.5f} {3:4d} {4:3d} {5:.3f} {6:.3f}'.format(i, msky, sky_std, nsky, nrej, mag_ann[i], merr_ann[i])) apert_result += '{0:04}, {1:.5f}, {2:.5f}, {3:4d}, {4:3d}, {5:.3f}, {6:.3f}\n'\ .format(star_ID, msky, sky_std, nsky, nrej, mag_ann[star_ID], merr_ann[star_ID]) fig = plt.figure(figsize=(12,12)) fig.add_subplot(2,3,1) plt.imshow(cutimg, vmin=np.mean(cutimg/5.0), vmax=np.mean(cutimg*2.0), origin='lower') plt.ylabel('pixels') plt.grid(ls=':') plt.xlabel('DAO Star area image\n sum: {0} \n mean: {1:.3f}\n std: {2:.3f} \n max: {3} \n min: {4}'\ .format(np.sum(cutimg), np.mean(cutimg), np.std(cutimg), np.max(cutimg), np.min(cutimg))) fig.add_subplot(2,3,2) plt.imshow(apert_apply, vmin=np.mean(cutimg/5.0), vmax=np.mean(cutimg*2.0), origin='lower') plt.ylabel('pixels') plt.grid(ls=':') plt.xlabel('DAO aperture area \n (r=2.0*FWHM)\n sum: {0:.0f} / {1:.3f} \n max: {2:.0f}\n min: {3:.0f} \n mean: {4:.3f} \n std: {5:.3f} \n Number of Pixel: {6} / {7:.5f}'\ .format(np.sum(apert_pixel), apert_sum[star_ID], np.max(apert_pixel), np.min(apert_pixel), np.mean(apert_pixel), np.std(apert_pixel), len(apert_pixel), ap_area)) fig.add_subplot(2,3,3) plt.imshow(sky_apply, vmin=np.mean(cutimg/5.0), vmax=np.mean(cutimg*2.0), origin='lower') plt.ylabel('pixels') plt.grid(ls=':') plt.xlabel('DAO annulus area \n (r_in=4*FWHM, r_out=6*FWHM)\n sum: {0:.0f} \n max: {1:.0f} \n min: {2:.0f} \n mean: {3:.3f} / {4:.3f}\n std: {5:.3f} / {6:.3f} \n Number of sky pixels: {7} \n Number of reject pixels: {8}'\
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF']) cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b']) y_show_hat = model.predict(x_show) # 预测值 print(y_show_hat.shape) print(y_show_hat) y_show_hat = y_show_hat.reshape(x1.shape) # 使之与输入的形状相同 print(y_show_hat) plt.figure(facecolor='w') plt.pcolormesh(x1, x2, y_show_hat, cmap=cm_light) # 预测值的显示 plt.scatter(x_test[0], x_test[1], c=y_test.ravel(), edgecolors='k', s=100, zorder=10, cmap=cm_dark, marker='*') # 测试数据 plt.scatter(x[0], x[1], c=y.ravel(), edgecolors='k', s=20, cmap=cm_dark) # 全部数据 plt.xlabel(iris_feature[0], fontsize=13) plt.ylabel(iris_feature[1], fontsize=13) plt.xlim(x1_min, x1_max) plt.ylim(x2_min, x2_max) plt.grid(b=True, ls=':', color='#606060') plt.title('鸢尾花数据的决策树分类', fontsize=15) plt.show() # 训练集上的预测结果 y_test = y_test.reshape(-1) print(y_test_hat) print(y_test) result = (y_test_hat == y_test) # True则预测正确,False则预测错误 acc = np.mean(result) print('准确度: %.2f%%' % (100 * acc)) # 过拟合:错误率 depth = np.arange(1, 15) err_list = [] for d in depth:
def obs_res(self, write=False, plot=False) : ## Déf des données du probleme for j, bruit in enumerate(self.bruits) : # Initialisation des champs u (boucles while) u, u_nNext = [], [] u = self.init_u() # u = np.sin(2*np.pi/self.L*self.line_x) + bruit r = self.dt/self.dx # Tracés figure initialisation : if plot == True : plt.figure("Resolution") plt.plot(self.line_x, u) plt.title("U vs X iteration 0 noise %d" %(j)) plt.ylim((-2.5, 2.5)) plt.pause(0.01) # pd_write_csv --->> np.save if write == True : filename = osp.join(self.datapath, "u_it0_%d_Nt%d_Nx%d_CFL%s_nu%s_%s.npy"%(j ,self.Nt ,self.Nx, self.CFL_str, self.nu_str, self.type_init)) np.save(filename, u) t = it = 0 while it <= self.itmax+1 : filename = osp.join(self.datapath, "u_it%d_%d_Nt%d_Nx%d_CFL%s_nu%s_%s.npy"%(it+1, j, self.Nt, self.Nx, self.CFL_str, self.nu_str, self.type_init)) if osp.exists(filename) == True : it += 1 continue fu = np.asarray([0.5*u_x**2 for u_x in u]) der_sec = [self.fac*(u[k+1] - 2*u[k] + u[k-1]) for k in range(1, len(u)-1)] der_sec.insert(0, self.fac*(u[1] - 2*u[0] + u[-1])) der_sec.insert(len(der_sec), self.fac*(u[0] - 2*u[-1] + u[-2])) for i in range(1,self.Nx-1) : # Pour prendre en compte le point Nx-2 u_m, u_p = intermediaires(u, fu, i, r) fu_m = 0.5*u_m**2 fu_p = 0.5*u_p**2 u_nNext.append( u[i] - r*( fu_p - fu_m ) + bruit[i] + der_sec[i] ) # Conditions aux limites u[1:self.Nx-1] = u_nNext u_nNext = [] u[0] = u[-2] u[-1]= u[1] u = np.asarray(u) if write == True : np.save(filename, u) it += 1 t += self.dt # Itération temporelle suivante if plot == True : if it % 10 == 0 : plt.clf() plt.plot(self.line_x[0:self.Nx-1], u[0:self.Nx-1], c='k') plt.grid() plt.title("u vs X, iteration %d bruit %d" %(it, j)) plt.xticks(np.arange(0, self.L-self.dx, 0.25)) # plt.yticks(np.arange(-2.5, 2.5, 0.5)) plt.ylim(-2.5,2.5) plt.pause(0.1)
time_list = [] temp1_list = [] temp2_list = [] with open(path) as f: data = f.read().strip().split('\n') # for each line in the data, print the for line in data[1:]: print(line) vals = line.split(',') print(vals) time_list += [float(vals[0])] temp1_list += [float(vals[1])] temp2_list += [float(vals[2])] everything = [time_list, temp1_list, temp2_list] plt.plot(time_list, temp1_list, color='b') plt.plot(time_list, temp2_list, color='r') plt.title("Time Vs Sound level") plt.xlabel('time (in Seconds)') plt.ylabel('Sound level (in db)') plt.grid('on') plt.legend((['Sound level A-weighted(dB)', 'Sound level C-weighted(dB)'])) plt.grid plt.show()
def main(): country = get_params() if not country: data = corona.get_data(True, True) COUNTRY = None else: c = r'\b' + re.escape(str(country).lower()) + r'\b' data = corona.get_data(True, False, c) if not data: print(u.error(), 'INVALID COUNTRY') quit() COUNTRY = 'US' if country == 'Us' else country # Get everything for plot & print xarr, yarr = build_func_data(data, COUNTRY) a, k_e, b, L, k_l, x0 = get_functions(xarr, yarr) B = 0 # b-value in exp function if COUNTRY is None: dates = list(data.keys()) else: dates = list(data[COUNTRY].keys()) # Print functions and data print_functions(a, k_e, B, L, k_l, x0) print_forecast(L, k_l, x0, a, k_e, B, dates, yarr, ndays) # Print last expected date and estimated number of confirmed cases based on logistic function start = datetime.strptime(dates[0], "%y-%m-%d").date() print_last_day(L, k_l, x0, start) # Plot graph try: # Generate dates for x-axis x_values = [ start + timedelta(days=x) for x in range(len(dates) + ndays) ] # Generate y-values end = len(x_values) x = numpy.linspace(0, end, num=end) y_values_e = exponential(x, *[a, k_e, B]) y_values_l = logistic(x, *[L, k_l, x0]) # Set size plt.rcParams['figure.figsize'] = [16, 9] plt.rc('font', size=10) # Format x-axis & y-axis fig, ax = plt.subplots() fig.autofmt_xdate() formatter = mdate.DateFormatter('%y-%m-%d') ax.xaxis.set_major_formatter(formatter) ax.get_xaxis().set_major_locator(mdate.DayLocator(interval=7)) ax.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}')) plt.scatter(x_values[:len(xarr)], yarr, zorder=3, marker='o', s=40, alpha=0.75, edgecolors="dimgrey", color="lightgrey", label="Real") plt.plot(x_values, y_values_e, marker='.', markersize=4, linewidth=1, color='firebrick', label="Exponential") plt.plot(x_values, y_values_l, marker='.', markersize=4, linewidth=1, color='green', label="Logistic") plt.text(x_values[-ndays], yarr[-1], '{:,.0f}'.format(yarr[-1]), color="dimgrey") plt.grid() plt.legend() plt.show() except Exception as e: print(e.args)
from matplotlib.pyplot import grid, legend, plot, scatter, show, title, xlabel, ylabel x = [1, 2, 3, 4, 5, 6] y = [20, 3, 40, 5, 60, 7] z = [1, 5, 3, 7, 11, 9] plot(x, y, color="#003B46", label="x to y") scatter(x, z, color="#F52549", label="x to z") title("X vs Y & X vs Z") xlabel("X data") ylabel("Y & Z data") legend() grid() show()
def graph(self, save=False): for i in range(len(self.names_stations)): if np.sum(np.isnan(self.Y_PM25[:, 0, i])) > 100: continue name_station = str(self.names_stations[i, ( np.logical_not(self.names_stations[i].mask))]) name_station = name_station.replace("]", '') name_station = name_station.replace("[", '') name_station = name_station.replace("b", '') name_station = name_station.replace("'", '') name_station = name_station.replace(" ", '') plt.figure(figsize=(30, 15)) plt.title(name_station, fontsize=30) plt.plot(self.date_Y[24 * self.ML:], pd.Series(self.Y_PM25[24 * self.ML:, 0, i]).rolling( window=self.window_moving_average, min_periods=1, center=False).mean().values, 'r*-', linewidth=3, markersize=10, label='Real Data') plt.plot(self.date_DA_ML[24 * self.ML:], pd.Series( self.Xa_PM25_ML[24 * self.ML:len(self.date_DA_ML), 0, i]).rolling( window=self.window_moving_average, min_periods=1, center=False).mean().values, 'k', linewidth=4, markersize=10, label='LE-DA') plt.plot(self.date_FC_ML[:-1], pd.Series(self.Xa_PM25_FC[:, 0, i]).rolling( window=self.window_moving_average, min_periods=1, center=False).mean().values, 'b', linewidth=4, markersize=10, label='LE-FC') plt.plot(self.date_FC_ML, pd.Series( self.Xa_PM25_ML[len(self.date_DA_ML):, 0, i]).rolling( window=self.window_moving_average, min_periods=1, center=False).mean().values, 'g', linewidth=4, markersize=10, label='LE-ML') #plt.plot(self.date_Y[24*self.ML:],pd.Series(self.Xb_PM25_ML[24*self.ML:,0,i]).rolling(window=self.window_moving_average,min_periods=1,center=False).mean().values,'k--',linewidth=3,markersize=10,label='LE') plt.axvline(self.date_FC_ML[0], linewidth=3, linestyle='--', color=[0.3, 0.3, 0.3]) ax = plt.gca() plt.rcParams['text.usetex'] = True plt.yticks(fontsize=30) plt.ylabel('PM$_{2.5}$ Concentration [$\mu$g/m$^3$]', fontsize=45) plt.grid(axis='x') plt.legend(fontsize=35) plt.xticks(fontsize=30) ax.set_xlim(self.date_ML[24 * self.ML], self.date_ML[-1]) ax.set_ylim(0, 150) ax.xaxis.set_major_locator(plt.MaxNLocator(20)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) if save: plt.savefig('./figures/' + name_station + '_ML_' + str(self.ML) + '.png', format='png') plt.show()
for d in data: plt.plot(PU,MC[d.name], label='mc '+d.name, linewidth = linewidth, ) for xsec in XSecs: plt.plot(PU,PUdata[xsec][:len(PU)], label = 'data '+xsec+' mb', linewidth = linewidth, ) #plt.plot(PU,ratioDataMC[xsec]*MC,label = 'mc '+xsec) ax = plt.gca() ax.set_xlim(puLim) plt.grid(linestyle='--') plt.title('PileUp '+era) plt.xlabel('PU') plt.ylabel('a.u.') plt.legend() ############################################ ratioDataMC = {} figsize = (6,3) fig1 = plt.figure(figsize=figsize) for xsec in XSecs: ratioDataMC[xsec] = PUdata[xsec][:len(PU)]/MC['DYJets'] plt.plot(PU,ratioDataMC[xsec], label = 'mc DYJets '+xsec, linewidth = linewidth, )
Два класса будут сгенерированы из двух нормальных распределений с разными средними.""" # Первый класс np.seed = 7 train_data = np.random.normal(size=(100, 2)) train_labels = np.zeros(100) # Целевой признак # Второй класс train_data = np.r_[train_data, np.random.normal(size=(100, 2), loc=2)] train_labels = np.r_[train_labels, np.ones(100)] # Отрисовка plt.rcParams['figure.figsize'] = (10, 8) plt.scatter(train_data[:, 0], train_data[:, 1], c=train_labels, s=100, cmap='autumn', edgecolors='black', linewidths=1.5) plt.plot(range(-2, 5), range(4, -3, -1)) # Прямая plt.grid() # Сетка plt.show() """Попробуем разделить эти два класса, обучив дерево решений. В дереве будем использовать параметр max_depth, ограничивающий глубину дерева. Визуализируем полученную границу разделения классов""" def get_grid(data): """Возвращает решетку""" x_min, x_max = data[:, 0].min() - 1, data[:, 0].max() + 1 y_min, y_max = data[:, 1].min() - 1, data[:, 1].max() + 1 return np.meshgrid(np.arange(x_min, x_max, 0.01), np.arange(y_min, y_max, 0.01)) # параметр min_samples_leaf указывает, при каком минимальном количестве