def functions_to_perform_onDF(df,stock,F2scores,shift,QUANTILES,date1,date2,out_dir='',xVar="???",yVar="RelRet"): print "\n Performing functions on a DF subset with number of observations = ", len(df) print stock.name print yVar, "is being explored" print date1.strftime('%d-%m-%y'), date2.strftime('%d-%m-%y') STAT_functions.ols_F2vsYvar(df,stock,F2scores,yVar,QUANTILES) """prep stuff for plotting""" s_dates = date1.strftime('%d-%m-%y')+"_"+date2.strftime('%d-%m-%y') fig_fn = out_dir+ stock.name+"_"+xVar+"_vs_"+yVar + "_" +s_dates #+ "_shift("+str(abs(shift))+")" suptitle = xVar+" vs "+yVar #+" (with Shift_Days="+str(abs(shift))+")" #the super-title above all subplots; can be omitted #Plotting.ScatterSubplots_F2vsYvar(df,stock,F2scores,QUANTILES,fig_fn+".jpg",date1,date2,suptitle,yVar,ymin=-0.04,ymax=0.04) fig_fn = out_dir+ stock_name+"_"+yVar+"_behaviour_"+s_dates+".jpg" #Plotting.stacked_TimeSeries(df,stock_name,[yVar],yVar+" behaviour of "+stock_name,fig_fn,date1,date2,mean_per=8,ymin=-0.04,ymax=0.04) F2scores = [F2scores[0]] for fscore in F2scores:#F2scores if all are needed if fscore.find('8')> -1: roll_mean = 8 else: roll_mean = 15 depVar = "ooRelRet(nextDay)" #yVar fig_fn = out_dir+ stock.name+"_behaviour_"+fscore+"_"+depVar+"_"+yVar + "_" +s_dates+".jpg" suptitle = stock.name+": behaviour of "+fscore Plotting.overlays_TimeSeries(df,stock_name,fscore,yVar1=yVar,yVar2=depVar,suptitle=suptitle, fig_fn=fig_fn,stock = stock,date1=date1,date2=date2,Quantiles=QUANTILES, mean_per=roll_mean,ymin=-0.04,ymax=0.04,) fig_fn = out_dir+ stock_name+"_"+yVar+"_Hist_"+s_dates+".jpg"
def plot(self, nbins=None, nperbin=5, hold=False, drawnow=True, **kwargs): import Plotting pylab = Plotting.load_pylab() import matplotlib r = numpy.array([min(self.x), max(self.x)]) r = r.mean() + 1.1 * (r - r.mean()) b = numpy.linspace(r[0], r[1], num=200, endpoint=True) p = self.psi(b) binned = [] if nbins == None: xx,yy = zip(*sorted(zip(self.x, self.y))) xx,yy = numpy.asarray(xx), numpy.asarray(yy) for i in range(0,len(xx),nperbin): x = (xx[i:i+nperbin]).mean() y = (yy[i:i+nperbin]>0).mean() binned.append((x,y)) else: bins = numpy.linspace(r[0], r[1], num=nbins, endpoint=True) bins = zip(bins[:-1], bins[1:]) for lo,up in bins: sel = numpy.logical_and(self.x < up, self.x >= lo) x = (self.x[sel]).mean() y = (self.y[sel]>0).mean() binned.append((x,y)) x,y = zip(*binned) if not hold: pylab.cla() lineprops = dict(kwargs, marker='None') line = pylab.plot(b, p, **lineprops) dotprops = dict({'marker':'o', 'markersize':10}, **kwargs); dotprops.update({'linestyle':'None', 'color':line[0].get_color()}) binned = pylab.plot(x, y, **dotprops) pylab.gca().grid(True) if drawnow: pylab.draw() return line, binned
def main(): usage = "usage: %prog [options]" # Handle the commands line options parser = optparse.OptionParser(usage=usage) parser.add_option("-i", "--input", dest="input", help="pickle with results data") parser.add_option("-o", "--output", dest="output", help="output directory") parser.add_option("-e", "--extension", dest="extension", default=".svg", help="file extension") (options, argv) = parser.parse_args() dir_to_save = options.output pickle_file = options.input if not os.path.isfile(pickle_file): raise Exception("invalid pickle_file " + pickle_file, "in main") print("pickle_file to read from= " + pickle_file) if not os.path.isdir(dir_to_save): raise Exception("invalid output directory " + dir_to_save, "in main") print "output path= " + dir_to_save (pcl, cam_points, intrin, result, cov_x, infodict) = pickle.load(open(pickle_file, 'rb')) trans_laser_cam = convert_vector_rot_trans_to_homogenous(result) def depth_func(p): return (p[0] ** 2 + p[1] ** 2 + p[2] ** 2) ** 0.5 print "plotting depth over image row" save_name = dir_to_save + "/Row_over_depth" + options.extension Plotting.plot_depth_over_row(pcl, cam_points, trans_laser_cam, intrin, save_name, depth_func) print "plotting back projection error over depth" save_name = dir_to_save + "/BackProjectionErrorWithOutliers" + options.extension Plotting.plot_back_projection_error(pcl, cam_points, trans_laser_cam, intrin, save_name, depth_func, 100000, (1., 8.), 20, 2.0, 1.5) save_name = dir_to_save + "/BackProjectionErrorWithoutOutliers" + options.extension Plotting.plot_back_projection_error(pcl, cam_points, trans_laser_cam, intrin, save_name, depth_func, 5, (1., 8.), 10, 2.0, 1.5)
def __plotGraphRec(self,mind,root,ptchsz,eldiam,h0,h1,dd,W,ax,fig): """ Private function used by plotGraph :param mind: index :type mind: int :param root: center of the ellipse :type root: list :param ptchsz: patch size :type ptchsz: int :param eldiam: diameter of the ellipse :type eldiam: float :param h0: height of branch :type h0: float :param h1: height of branch :type h1: float :param dd: distance :type dd: float :param W: array of matches :type W: numpy.ndarray :param ax: axis to plot on :type ax: matplotlib.pyplot.axis :param fig: figure to plot on :type fig: matplotlib.pyplot.figure """ # add ellipse for this node el = Ellipse(root, eldiam, eldiam) ax.add_patch(el) # count leaves and innder nodes at this node leaves = [] nodes = [] for k in range(self.l[mind]): if self.n[mind + (k,)] == 1: leaves.append(self.iByI[mind + (k,)][0]) else: nodes.append(mind + (k,)) # do we have leaves? If yes, there will be an extra branch for them if len(leaves) > 0: l = 1.0 + len(nodes) else: l = float(len(nodes)) # compute the height each branch gets dh = (h1-h0)/(l+1.0) # one for each node, one for all leaves # compute new heights newh = linspace(h0+dh,h1-dh,l) # label the current node with its p s = r"$p_{%d} = %.2f$" % (self.pdict[mind],self.p[self.pdict[mind]]) angle = arctan(( (h1-h0)*.5-dh )/dd)/(2*pi)*360.0 ax.text(root[0] - 2*eldiam, root[1] + eldiam,s, rotation= angle, horizontalalignment = 'left', verticalalignment = 'bottom') # draw arrows to nodes and call the plotting routine recursively hc = 0 for no in nodes: ax.arrow(root[0]+.5*eldiam, root[1], dd - eldiam, newh[hc] - root[1]) newroot = (root[0]+dd, newh[hc]) self.__plotGraphRec(no,newroot,ptchsz,eldiam,newh[hc]-.5*dh,newh[hc]+.5*dh,dd,W,ax,fig) hc += 1 # plot the leaves if len(leaves) >0: ax.arrow(root[0]+.5*eldiam, root[1], dd - eldiam, newh[hc] - root[1]) ny = floor(sqrt(float(len(leaves)))) while ny*ptchsz > dh and ny > 1: ny = ceil(ny/2) nx = ceil(len(leaves)/ny) a = fig.add_axes(ax2fig([root[0]+dd,newh[hc] - .5*ny*ptchsz,nx*ptchsz,ny*ptchsz],ax), \ xlim=(0,nx*ptchsz),ylim=(0,ny*ptchsz),autoscale_on=False) a.axis('off') Plotting.plotPatches(W[:,leaves],(nx,ny),ptchsz,ax=a)
Styles = { 'mini_gb2': ['k', 'solid'], 'mini_gb5': ['r', 'solid'], 'mini_lin': ['g', 'solid'], 'epsall_gb2': ['k', 'dashed'], 'epsall_gb5': ['r', 'dashed'], 'epsall_lin': ['g', 'dashed'], 'lin': ['b', 'solid'] } parser = argparse.ArgumentParser() parser.add_argument('--save', dest='save', action='store_true') Args = parser.parse_args(sys.argv[1:]) D1 = Plotting.read_dir("../results/mslr30k_T=36000_L=3_e=0.1/") D2 = Plotting.read_dir("../results/yahoo_T=40000_L=2_e=0.5/") print(mpl.rcParams['figure.figsize']) fig = plt.figure(figsize=(mpl.rcParams['figure.figsize'][0] * 2, mpl.rcParams['figure.figsize'][1] - 1)) ax = fig.add_subplot(111, frameon=False) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('none') ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.tick_params(labelcolor='none', top='off',
def setUp(self): self.job = Plotting.Plotting() self.job.add_option(["Samples", "test"]) self.job.add_option(["Input", "test"]) self.job.add_option(["CutFile", "test"])
def laser_fft_plot(): # make a plot of the FFT of a laser optical spectrum # deduce the laser cavity length from the FFT # R. Sheehan 9 - 8 - 2019 FUNC_NAME = ".laser_fft_plot()" # use this in exception handling messages ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME try: DATA_HOME = 'c:/users/robert/Research/FFT_Data/Examples_2019/' if os.path.isdir(DATA_HOME): os.chdir(DATA_HOME) print(os.getcwd()) #wl_file = "ed_laser_wl.csv" #pow_file = "ed_laser_pow.csv" wl_file = "PM_Laser_WL.csv" pow_file = "PM_Laser_Pow.csv" #wl_file = "WL_Meas_Shallow_I_90.txt" #pow_file = "Pow_Meas_Shallow_I_90.txt" #wl_file = "20C_wave.dat" #pow_file = "50mA.dat" # wl_file = "Wavelength.txt" # pow_file = "Spectrum_I_150.txt" #extension = ".txt" extension = ".csv" #extension = ".dat" frq_file = pow_file.replace(extension, "") + "_Frq_data" + extension fft_file = pow_file.replace(extension, "") + "_Abs_FFT_data" + extension if glob.glob(wl_file) and glob.glob(pow_file) and glob.glob( frq_file) and glob.glob(fft_file): # plot the time series wl_data = np.loadtxt(wl_file, unpack=True) spct_data = np.loadtxt(pow_file, unpack=True) args = Plotting.plot_arg_single() args.loud = True args.x_label = 'Wavelength / nm' args.y_label = 'Power / dBm' args.marker = 'r-' args.fig_name = pow_file.replace(extension, "") Plotting.plot_single_curve(wl_data, spct_data, args) del wl_data del spct_data # plot the computed FFT frq_data = np.loadtxt(frq_file, unpack=True) spct_data = np.loadtxt(fft_file, unpack=True) lambda_1 = 1500.0 #n_g = 3.2 # estimate of the material group index n_g = 1.3 # estimate of the material group index for i in range(0, len(frq_data), 1): frq_data[i] = ((lambda_1**2) / (2.0 * n_g)) * frq_data[ i] / 1000.0 # divide by 1000 to convert from nm to um args.loud = True args.x_label = 'Cavity Length / um' args.y_label = 'Signal FFT' args.marker = 'g-' args.fig_name = fft_file.replace(extension, "") args.plt_range = [0, 8e+3, 0, 30e+3] Plotting.plot_single_curve(frq_data, spct_data, args) del frq_data del spct_data else: ERR_STATEMENT = ERR_STATEMENT + "\nCannot locate input files" raise Exception else: raise EnvironmentError except EnvironmentError: print(ERR_STATEMENT) print("Cannot locate directory:", DATA_HOME) except Exception as e: print(ERR_STATEMENT) print(e)
plt.plot(composite1.wavelength[lowindex[0]:highindex[0]], composite1.flux[lowindex[0]:highindex[0]]) plt.plot(composite2.wavelength[lowindex[0]:highindex[0]], composite2.flux[lowindex[0]:highindex[0]]) plt.savefig('../plots/' + plot_name + '.png') plt.show() #Read whatever you sasved the table as Data = Table.read(composite1.savedname, format='ascii') #Checking to see how the table reads..right now it has a header that might be screwing things up. print Data #To be honest, I'm not sure entirely how this works. #Can someone who worked on this piece of code work with it? Relative_Flux = [Data["Wavelength"], Data["Flux"], composite1.name] # Want to plot a composite of multiple spectrum Residuals = [Data["Wavelength"], Data["Variance"], composite1.name] Spectra_Bin = [] Age = [] Delta = [] Redshift = [] Show_Data = [Relative_Flux,Residuals] image_title = "../plots/Composite_Spectrum_plotted.png" # Name the image (with location) title = "Composite Spectrum" # Available Plots: Relative Flux, Residuals, Spectra/Bin, Age, Delta, Redshift, Multiple Spectrum, Stacked Spectrum # 0 1 2 3 4 5 6, 7 Plots = [0] # the plots you want to create # The following line will plot the data #It's commented out until it works... Plotting.main(Show_Data , Plots, image_title , title)
def main(queries, plot_name, plots, labels): num = int(queries[1]) #Now the file name of the plot is labeled by time of creation, but you can personalize it if you want. #Or rename it once it's been saved. #plot_name = str(queries) + '_composite_comparison, ' + (time.strftime("%H,%M,%S")) wmin = 3000 wmax = 10000 d = {} for n in range(num): if len(queries) == num + 3: d["composite{0}".format(n + 1)] = composite.main( queries[n + 2], queries[num + 2]) if len(queries) == num + 4: d["composite{0}".format(n + 1)] = composite.main( queries[n + 2], queries[num + 2], queries[num + 3]) if len(queries) == num + 5: d["composite{0}".format(n + 1)] = composite.main( queries[n + 2], queries[num + 2], queries[num + 3], queries[num + 4]) else: d["composite{0}".format(n + 1)] = composite.main(queries[n + 2]) #Read whatever you sasved the table as, iterates over however many composites you used. #This is how you have to address things if you want to iterate over queries. #The n+1 makes the first item composite1, not composite0. for n in range(num): d["data{0}".format(n + 1)] = Table.read( d["composite{0}".format(n + 1)].savedname, format='ascii') d["wavelengths{0}".format(n + 1)] = np.array( [d["data{0}".format(n + 1)]["Wavelength"]]) d["fluxes{0}".format(n + 1)] = np.array( [d["data{0}".format(n + 1)]["Flux"]]) d["variances{0}".format(n + 1)] = np.array( [d["data{0}".format(n + 1)]["Variance"]]) d["ages{0}".format(n + 1)] = np.array( [d["data{0}".format(n + 1)]["Age"]]) d["dm15s{0}".format(n + 1)] = np.array( [d["data{0}".format(n + 1)]["Dm_15"]]) d["vels{0}".format(n + 1)] = np.array( [d["data{0}".format(n + 1)]["Velocity"]]) d["reds{0}".format(n + 1)] = np.array( [d["data{0}".format(n + 1)]["Redshift"]]) xmin = 0 xmax = 100000 for n in range(num): wave = d["wavelengths{0}".format(n + 1)][0] flux = d["fluxes{0}".format(n + 1)][0] good = np.where(flux != 0) lo_wave = wave[good][0] hi_wave = wave[good][len(wave[good]) - 1] print lo_wave, hi_wave if (lo_wave > xmin): xmin = lo_wave if (hi_wave < xmax): xmax = hi_wave # From now on, list the data you want to plot as [ Xdata, Ydata, Xdata_2, Ydata_2] #This chunk will create an array that's the right length for however many queries you used. plot_array = [] name_array = [] residual_array = [] variance_array = [] age_array = [] dm15_array = [] vel_array = [] red_array = [] for n in range(num): plot_array.append(d["wavelengths{0}".format(n + 1)]) plot_array.append(d["fluxes{0}".format(n + 1)]) residual_array.append(d["wavelengths{0}".format(n + 1)][0]) residual_list = np.array([d["fluxes{0}".format(n + 1)] - d["fluxes1"]]) residual_array.append(residual_list[0][0]) variance_array.append(d["wavelengths{0}".format(n + 1)][0]) variance_array.append(d["variances{0}".format(n + 1)][0]) age_array.append(d["wavelengths{0}".format(n + 1)][0]) age_array.append(d["ages{0}".format(n + 1)][0]) dm15_array.append(d["wavelengths{0}".format(n + 1)][0]) dm15_array.append(d["dm15s{0}".format(n + 1)][0]) vel_array.append(d["wavelengths{0}".format(n + 1)][0]) vel_array.append(d["vels{0}".format(n + 1)][0]) red_array.append(d["wavelengths{0}".format(n + 1)][0]) red_array.append(d["reds{0}".format(n + 1)][0]) name_array.append(labels[n]) name_array.append(" ") #print variance_array # there were some problems with dimensionality, fixed now. ################## #If you want to use custom names for your composites, #fill out and uncomment this next line #name_array = ["composite1name", " ", "composite2name", " ", etc] ################## Relative_Flux = plot_array #plots all given composites Variance = variance_array # Check it out! Variances plot now. Residuals = residual_array # Check it out! Residuals plot now. Spectra_Bin = [] Age = age_array Delta = dm15_array Redshift = red_array ## If you want custom names, uncomment and use line 83, for consistency. ##Otherwise it'll default to just labeling composites in order. Names = name_array Show_Data = [ Relative_Flux, Variance, Residuals, Spectra_Bin, Age, Delta, Redshift ] ## Available Plots: Relative Flux, Residuals, Spectra/Bin, Age, Delta, Redshift, Multiple Spectrum, Stacked Spectrum ## 0 1 2 3 4 5 6, 7 # the plots you want to create # All of these worked for me. Test with your own queries. (Sam, 4/16) # Choose the plot range and plot type! Plots = plots image_title = "../plots/" + plot_name + ".png" title = plot_name Plotting.main(Show_Data=Show_Data, Plots=Plots, image_title=image_title, title=title, Names=Names, xmin=xmin, xmax=xmax)
#!/usr/bin/env python # encoding: utf-8 """ FileCollections.py Created by Morten Dam Jørgensen on 2011-11-06. Copyright (c) 2011 . All rights reserved. """ import sys import os from ROOT import * import re import Plotting color = Plotting.new_color() # new color iterator class HistCollection(object): """Collection of histograms""" def __init__(self, hist_array = []): super(HistCollection, self).__init__() self.hist_array = hist_array def merge(self, i=0, j=None): """docstring for merge""" if not j: j = len(self.hist_array)-1 tmpHist = self.hist_array[i].th.Clone("new_%s" % self.hist_array[i].th.GetName()) for h in self.hist_array[i+1:j]:
def calculatePaths(x,y,z,avgDensity,gdict,allPoints,indsPtsMap,ptsIndsMap,startPoint,endPoint): r1 = min(gdict.keys(), key=lambda x: distancesq(x, startPoint)) r2 = min(gdict.keys(), key=lambda x: distancesq(x, endPoint)) start = time.time() print(avgDensity) sp = PathOpt.AStar(gdict,r1,r2,hScalar=1.0) end = time.time() print("Best path found (A*) in {0} seconds.".format(end-start)) fig2 = plt.figure(2) pl.setGeo(2) ax = pl.plot2dHeatMap(fig2, x, y, z) pl.plotPath(ax,sp[1]) ax.set_xlim(0, 1) ax.set_ylim(0, 1) fig3 = plt.figure(3) ax2 = pl.plot3dHeatMap(fig3, x, y, z) xp = [i[0] for i in sp[1]] yp = [j[1] for j in sp[1]] zp = [z[ptsIndsMap[p][0]][ptsIndsMap[p][1]] for p in sp[1]] ax2.plot(xp,yp,zp,'k',lw=2) pl.setGeo(3) plt.draw() start = time.time() sp = PathOpt.dijkstra(gdict,r1,r2) end = time.time() print("Best path found (Dijkstra) in {0} seconds.".format(end-start)) fig2 = plt.figure(4) pl.setGeo(2) ax = pl.plot2dHeatMap(fig2, x, y, z) pl.plotPath(ax,sp[1]) fig3 = plt.figure(5) ax2 = pl.plot3dHeatMap(fig3, x, y, z) xp = [i[0] for i in sp[1]] yp = [j[1] for j in sp[1]] zp = [z[ptsIndsMap[p][0]][ptsIndsMap[p][1]] for p in sp[1]] ax2.plot(xp,yp,zp,'k',lw=2) pl.setGeo(3)
def Compare_Higher_Temperature(static_device, eam_bias, loud=False): # compare data measured over higher temperatures # read in data # loop over quantity to make all the necessary plots # power data at T = 25, 30 was collected without the need for correction # R. Sheehan 16 - 11 - 2017 DATA_HOME = "C:/Users/Robert/Research/EU_TIPS/Data/Exp-2/Leak_SWP/Higher_Temperature/" try: if os.path.isdir(DATA_HOME): os.chdir(DATA_HOME) files = glob.glob("TIPS_1_EAM*T*I%(v1)s*VEAM_%(v2)0.2f.txt" % { "v1": static_device, "v2": eam_bias }) if files: # read in all data the_data = [] for i in range(0, len(files), 1): numbers = Common.extract_values_from_string(files[i]) if int(numbers[1]) == 20: correct_power = True else: correct_power = False the_data.append(read_Leak_data(files[i], correct_power)) del numbers # loop over quantity to make the necessary plots quantity = [DFB_VAL, SOA_VAL, EAM_VAL, PWR_VAL] #eam_str = "%(v2)0.2f"%{"v2":eam_bias} for q in quantity: hv_data = [] labels = [] marks = [] for i in range(0, len(the_data), 1): hv_data.append( [the_data[i][1][CURR_VAL], the_data[i][1][q]]) marks.append(Plotting.labs_lins[i]) labels.append('T = %(v1)0.0f C' % {"v1": the_data[i][0].temperature}) arguments = Plotting.plot_arg_multiple() arguments.loud = loud arguments.crv_lab_list = labels arguments.mrk_list = marks arguments.x_label = the_data[0][ 0].sweep_device + ' Current (mA)' arguments.y_label = get_Leak_label(q) #arguments.plt_range = get_Leak_plot_range(q) arguments.plt_title = the_data[0][ 0].static_device + ' Current = ' + str( the_data[0][0].static_device_current) + ' (mA)' arguments.fig_name = get_Leak_name( q) + '_I' + the_data[0][0].static_device + '_' + str( the_data[0][0].static_device_current).replace( '.0', '') + '_VEAM_' + '%(v2)0.2f' % { "v2": eam_bias } + '.png' #if q%2 == 0 and q < PWR_ERR: arguments.log_y = True Plotting.plot_multiple_curves(hv_data, arguments) del hv_data del labels del marks del the_data del files else: raise Exception else: raise EnvironmentError except EnvironmentError: print("Error: Leak_Analysis.Compare_Higher_Temperature()") print("Cannot find", DATA_HOME) except Exception: print("Error: Leak_Analysis.Compare_Higher_Temperature()")
def main_aircraft_processing(opts): """Control routine for processing.""" sat_dir = opts[0] flt_fil = opts[1] sensor = opts[2] flt_typ = opts[3] out_dir = opts[4] beg_t = opts[5] end_t = opts[6] mode = opts[7] comp = opts[8] lat_bnd = opts[9] lon_bnd = opts[10] sat_cmap = opts[11] bg_col = opts[12] ac_se_col = opts[13] ac_cmap = opts[14] ac_mina = opts[15] ac_maxa = opts[16] ac_pos_col = opts[17] txt_col = opts[18] txt_size = opts[19] txt_pos = opts[20] cache_dir = opts[21] res = opts[22] tag = opts[23] linewid = opts[24] dotsiz = opts[25] singlep = opts[26] print("Beginning processing") verbose = True if (flt_typ == 'CSV'): ac_traj = indata.read_aircraft_csv(flt_fil, beg_t, end_t) elif (flt_typ == 'FR24'): ac_traj = indata.read_aircraft_fr24(flt_fil, beg_t, end_t) else: print("ERROR: Unsupported flight data type:", flt_typ) quit() ac_traj2 = utils.interp_ac(ac_traj, '30S') if verbose: print("\t-\tLoaded aircraft trajectory.") if (singlep): plot_bounds = utils.calc_bounds_sp(ac_traj, lat_bnd, lon_bnd) else: plot_bounds = utils.calc_bounds_traj(ac_traj, lat_bnd, lon_bnd) n_traj_pts2 = len(ac_traj2) area = utils.create_area_def(plot_bounds, res) start_t, end_t, tot_time = utils.get_startend(ac_traj, sensor, mode) prev_time = datetime(1850, 1, 1, 0, 0, 0) old_scn = None sat_img = None for i in range(2, n_traj_pts2): outf = out_dir + str(i - 1).zfill(4) + '_' + comp + '_' + tag + '.png' if os.path.exists(outf): continue cur_time = ac_traj2.index[i] if verbose: print('\t-\tNow processing', cur_time) sat_time = utils.get_cur_sat_time(cur_time, sensor, mode) if sat_time != prev_time: if verbose: print('\t-\tLoading satellite data for', sat_time) sat_img = indata.load_sat(sat_dir, sat_time, comp, sensor, plot_bounds, cache_dir, mode) if sat_img is None and old_scn is not None: sat_img = old_scn elif sat_img is None: print("ERROR: No satellite data for", sat_time) old_scn = sat_img prev_time = sat_time else: if verbose: print('\t-\tSatellite data already loaded for', sat_time) if verbose: print('\t-\tPlotting and saving results') fig = acplot.setup_plot(plot_bounds, bg_col, linewid, sat_img[comp].attrs['area'].to_cartopy_crs()) if sat_img is not None: fig = acplot.overlay_sat(fig, sat_img, comp, sat_cmap) # fig = acplot.overlay_startend(fig, ac_traj2, ac_se_col, dotsiz) if not singlep: fig = acplot.overlay_ac(fig, ac_traj2, i, ac_cmap, ac_mina, ac_maxa, linewid) fig = acplot.add_acpos(fig, ac_traj2, i, ac_pos_col, dotsiz) fig = acplot.overlay_time(fig, cur_time, txt_col, txt_size, txt_pos) acplot.save_output_plot(outf, fig, 90) fig.clf() fig.close() print("Completed processing")
import Classification import NNC import Plotting import pandas as pd #%% Plotting.Plot() #%% print ("1 feature") l1 = Classification.LogReg(2,3) l2 = Classification.SVM(2,3) l3 = Classification.DTC(2,3) l4 = Classification.KNC(2,3) l5 = Classification.RFC(2,3) l6 = Classification.MLP(2,3) l7 = Classification.ABC(2,3) l8 = Classification.GNB(2,3) l9 = Classification.QDA(2,3) l10 = Classification.SGD(2,3) l11= NNC.NNC(2,3) df1 = pd.DataFrame(data = {"LogReg": l1, "SVM": l2, "DTC": l3, "KNC": l4, "RFC": l5,"MLP": l6, "ABC": l7, "GNB": l8, "QDA": l9, "SGD": l10, "NNC":l11}, index = [".9", ".8", ".5", ".25"]) print(df1) #%% print ("9 features") m1 = Classification.LogReg(2,11) m2 = Classification.SVM(2,11) m3 = Classification.DTC(2,11) m4 = Classification.KNC(2,11)
def multiple_FR_plot(dir_name, file_names, labels, s_param, plot_range, plt_title='', plt_name='', loudness=False): # plot the measured S21 data at multiple biases try: HOME = os.getcwd() if os.path.isdir(dir_name): os.chdir(dir_name) # test inputs for validity c1 = True if file_names is not None else False c2 = True if labels is not None else False c3 = True if len(file_names) == len(labels) else False c4 = True if plot_range is not None else False c5 = True if len(plot_range) == 4 else False c6 = True if c1 and c2 and c3 and c4 and c5 else False if c6: T = 0.0 VEAM = 0.0 name = "" hv_data = [] markers = [] count = 0 for i in range(0, len(file_names), 1): if glob.glob(file_names[i]): s2pdata = read_s2p_file(file_names[i]) # read s2p data from file hv_data.append([s2pdata[1], s2pdata[s_param]]) # store data needed for plot markers.append(Plotting.labs_lins[i % len( Plotting.labs_lins)]) # make a list of markers del s2pdata else: # this will raise an exception below print "\nError: FR_Analysis.multiple_FR_plot()\nCould not locate:", file_names[ i] # Need to have number of data sets equal to number of labels for plotting methods to work if len(hv_data) == len(labels): args = Plotting.plot_arg_multiple() args.loud = loudness args.crv_lab_list = labels args.mrk_list = markers args.x_label = 'Frequency (GHz)' # assign y-axis label based on S-parameter being plotted if s_param == 2: args.y_label = '$S_{11}$ (dB)' elif s_param == 3: args.y_label = '$S_{21}$ (dB)' elif s_param == 4: args.y_label = '$S_{12}$ (dB)' elif s_param == 5: args.y_label = '$S_{22}$ (dB)' else: args.y_label = '$S_{12}$ (dB)' args.plt_range = plot_range args.plt_title = plt_title args.fig_name = plt_name if s_param == 2: args.fig_name += "_S11" elif s_param == 3: args.fig_name += "_S21" elif s_param == 4: args.fig_name += "_S12" elif s_param == 5: args.fig_name += "_S22" else: args.fig_name += "_S21" Plotting.plot_multiple_curves(hv_data, args) del hv_data del markers else: raise Exception os.chdir(HOME) else: raise Exception else: raise EnvironmentError except EnvironmentError: print "\nError: FR_Analysis.multiple_FR_plot()" print "Cannot find", dir_name except Exception: print "\nError: FR_Analysis.multiple_FR_plot()" if c1 == False: print "dir_names not assigned correctly" if c2 == False: print "labels not assigned correctly" if c3 == False: print "dir_names and labels have different lengths" if c4 == False: print "range not assigned correctly" if c5 == False: print "range does not have correct length"
def closeSpecificPlot(self, plot): plot.remove() self.scene.removeItem(plot) self.plots.remove(plot) del plot
def main_aircraft_processing(opts): sat_dir = opts[0] flt_fil = opts[1] sensor = opts[2] flt_typ = opts[3] out_dir = opts[4] beg_t = opts[5] end_t = opts[6] mode = opts[7] comp = opts[8] lat_bnd = opts[9] lon_bnd = opts[10] sat_cmap = opts[11] bg_col = opts[12] ac_se_col = opts[13] ac_cmap = opts[14] ac_mina = opts[15] ac_maxa = opts[16] ac_pos_col = opts[17] txt_col = opts[18] txt_size = opts[19] txt_pos = opts[20] cache_dir = opts[21] res = opts[22] linewid = opts[23] dotsiz = opts[24] singlep = opts[25] if (singlep): print("Beginning processing for single point.") else: print("Beginning processing for trajectory.") if (flt_typ == 'CSV'): ac_traj = indata.read_aircraft_csv(flt_fil, beg_t, end_t) ac_traj2 = utils.interp_ac(ac_traj, '30S') print("\t-\tLoaded aircraft trajectory.") if (singlep): plot_bounds = utils.calc_bounds_sp(ac_traj, lat_bnd, lon_bnd) else: plot_bounds = utils.calc_bounds_traj(ac_traj, lat_bnd, lon_bnd) n_traj_pts2 = len(ac_traj2) area = utils.create_area_def(plot_bounds, res) start_t, end_t, tot_time = utils.get_startend(ac_traj, sensor, mode) prev_time = datetime(1850, 1, 1, 0, 0, 0) old_scn = None sat_img = None for i in range(2, n_traj_pts2): cur_time = ac_traj2.index[i] print('\t-\tNow processing', cur_time) sat_time = utils.get_cur_sat_time(cur_time, sensor, mode) if (sat_time != prev_time): print('\t-\tLoading satellite data for', sat_time) sat_img = indata.load_sat(sat_dir, sat_time, comp, sensor, area, cache_dir, mode) if (sat_img is None and old_scn is not None): sat_img = old_scn elif (sat_img is None): print("ERROR: No satellite data for", sat_time) old_scn = sat_img prev_time = sat_time else: print('\t-\tSatellite data already loaded for', sat_time) print('\t-\tPlotting and saving results') fig = acplot.setup_plot(plot_bounds, bg_col, linewid) if (sat_img is not None): fig = acplot.overlay_sat(fig, sat_img, comp, sat_cmap) fig = acplot.overlay_startend(fig, ac_traj2, ac_se_col, dotsiz) if (not singlep): fig = acplot.overlay_ac(fig, ac_traj2, i, ac_cmap, ac_mina, ac_maxa, linewid) fig = acplot.add_acpos(fig, ac_traj2, i, ac_pos_col, dotsiz) fig = acplot.overlay_time(fig, cur_time, txt_col, txt_size, txt_pos) acplot.save_output_plot( out_dir + str(i - 1).zfill(4) + '_' + comp + '.png', fig, 600) fig.clf() fig.close() print("Completed processing")
bounds = [(1, 1000)] * len(initial) a = datetime.datetime.now() result = tuner.tune(initial, bounds, simple_tune=True) b = datetime.datetime.now() print("Total time: ", b - a) print(result) else: df = sim.simulate(Ysp_fun, t_sim, Udv=Udv, save_data="data/cons", live_plot=False) tuner = Tuner.Tuner(sim, Ysp_fun, t_sim, Udv=Udv, error_method="ISE") tuner.df = df print(tuner.ISE()) Plotting.plot_all(df, show=False) import matplotlib.pyplot as plt plt.subplot(2, 1, 1) plt.legend([r"$T_4$", r"$T_{14}$"]) plt.subplot(2, 1, 2) plt.plot(df.ts, df.dv_1, '--') plt.legend([r"$F_R$", r"$F_S$", r"$X_F$"]) plt.ylim(ymin=0) plt.savefig("data/cons") plt.show()
def __plotGraphRec(self, mind, root, ptchsz, eldiam, h0, h1, dd, W, ax, fig): """ Private function used by plotGraph :param mind: index :type mind: int :param root: center of the ellipse :type root: list :param ptchsz: patch size :type ptchsz: int :param eldiam: diameter of the ellipse :type eldiam: float :param h0: height of branch :type h0: float :param h1: height of branch :type h1: float :param dd: distance :type dd: float :param W: array of matches :type W: numpy.ndarray :param ax: axis to plot on :type ax: matplotlib.pyplot.axis :param fig: figure to plot on :type fig: matplotlib.pyplot.figure """ # add ellipse for this node el = Ellipse(root, eldiam, eldiam) ax.add_patch(el) # count leaves and innder nodes at this node leaves = [] nodes = [] for k in range(self.l[mind]): if self.n[mind + (k, )] == 1: leaves.append(self.iByI[mind + (k, )][0]) else: nodes.append(mind + (k, )) # do we have leaves? If yes, there will be an extra branch for them if len(leaves) > 0: l = 1.0 + len(nodes) else: l = float(len(nodes)) # compute the height each branch gets dh = (h1 - h0) / (l + 1.0) # one for each node, one for all leaves # compute new heights newh = linspace(h0 + dh, h1 - dh, l) # label the current node with its p s = r"$p_{%d} = %.2f$" % (self.pdict[mind], self.p[self.pdict[mind]]) angle = arctan(((h1 - h0) * .5 - dh) / dd) / (2 * pi) * 360.0 ax.text(root[0] - 2 * eldiam, root[1] + eldiam, s, rotation=angle, horizontalalignment='left', verticalalignment='bottom') # draw arrows to nodes and call the plotting routine recursively hc = 0 for no in nodes: ax.arrow(root[0] + .5 * eldiam, root[1], dd - eldiam, newh[hc] - root[1]) newroot = (root[0] + dd, newh[hc]) self.__plotGraphRec(no, newroot, ptchsz, eldiam, newh[hc] - .5 * dh, newh[hc] + .5 * dh, dd, W, ax, fig) hc += 1 # plot the leaves if len(leaves) > 0: ax.arrow(root[0] + .5 * eldiam, root[1], dd - eldiam, newh[hc] - root[1]) ny = floor(sqrt(float(len(leaves)))) while ny * ptchsz > dh and ny > 1: ny = ceil(ny / 2) nx = ceil(len(leaves) / ny) a = fig.add_axes(ax2fig([root[0]+dd,newh[hc] - .5*ny*ptchsz,nx*ptchsz,ny*ptchsz],ax), \ xlim=(0,nx*ptchsz),ylim=(0,ny*ptchsz),autoscale_on=False) a.axis('off') Plotting.plotPatches(W[:, leaves], (nx, ny), ptchsz, ax=a)
def chgrad(self, plot=False, **kwargs): for k,v in kwargs.items(): self._setfield(k,v) pflat = self._reset() out = sstruct() out.params = self._p0.copy() out._setfield('f.exact', self._f(pflat, force1D=True)) out._setfield('df.exact', self._df(pflat, force2D=True)) out._setfield('df.approx', out.df.exact * numpy.nan) try: out._setfield('ddf.exact', self._ddf(pflat, force2D=True)) except NoSubclassMethod: do_ddf = False else: do_ddf = True; out._setfield('ddf.approx', out.ddf.exact * numpy.nan) try: out._setfield('H.exact', self._H(pflat)) except NoSubclassMethod: do_hessian = False else: do_hessian = True; out._setfield('H.approx', out.H.exact * numpy.nan) fdelta = 1e-4 e = fdelta * abs(pflat) e = numpy.fmax(1e-12, e) keepgoing = (e > 0) # all True to start rpt = 0 while keepgoing.any(): rpt += 1 for iparam in range(self._np): mask = pflat * 0.0 mask.flat[iparam] = 0.5 p_below = pflat - e * mask p_above = pflat + e * mask f_below = self._f(p_below) f_above = self._f(p_above) adfj = (f_above - f_below) / e[iparam] out.df.approx[:, iparam] = adfj # TODO(III): store data from multiple rpts? if do_hessian or do_ddf: df_below = self._df(p_below) df_above = self._df(p_above) addfj = (df_above - df_below) / e[iparam] if do_ddf: out.ddf.approx[:, iparam] = addfj[:, iparam] # TODO(III): store data from multiple rpts? if do_hessian: out.H.approx[:, iparam].flat = addfj.flat # TODO(III): store data from multiple rpts? converged = True # TODO(III): could check for convergence here. currently, only one iteration is performed if converged: keepgoing[iparam] = False e[keepgoing] /= 2 unflatten(self, pflat, include=self._free) self._reset() out.plabels = ['p'+x for x in flatten(self, include=self._free, labels=True)] out.flabels = ['f'+x for x in flatten(self.eval(), include=self._free, labels=True)] fields = ['df', 'ddf', 'H'] for k in fields: v = out._getfield(k, None) if v == None: continue v.abserr = abs(v.exact-v.approx) v.maxabserr = v.abserr.max() v.propabserr = v.abserr / numpy.fmax(abs(v.approx), 1e-13) v.maxpropabserr = v.propabserr.max() if plot: import Plotting pylab = Plotting.load_pylab() fields = ['df', 'ddf', 'H'] labels = {'df':(out.flabels,out.plabels, 'd%s / d%s'), 'ddf':(out.flabels,out.plabels, 'd^2%s / d%s^2'), 'H':(out.plabels,out.plabels, 'df/(d%s)(d%s)'), } got = dict([(k,k in out._fields) for k in fields]) nplots = sum(got.values()) for i,k in enumerate(fields): v = out._getfield(k, None) if v == None: continue if nplots > 1: pylab.subplot(1, nplots, i+1) x = abs(v.approx.flatten()) y = v.abserr.flatten() both0 = numpy.logical_and(x==0.0, y==0.0) x[both0] = numpy.nan y[both0] = numpy.nan x[x==0.0] = 1e-13 y[y==0.0] = 1e-13 x = numpy.log10(x) y = numpy.log10(y) pylab.cla() ax = pylab.gca() ax.indicator, = pylab.plot([numpy.nan,numpy.nan],[numpy.nan,numpy.nan], color=(1,0,1), linewidth=2) v.handles = pylab.plot(numpy.expand_dims(x,0), numpy.expand_dims(y,0), linestyle='None', marker='o', markersize=10, picker=5) lab = labels[k] for j,h in enumerate(v.handles): subs = numpy.unravel_index(j, v.approx.shape) h.txt = (lab[2]+'\nprop. abs. err. = %g') % (lab[0][subs[0]], lab[1][subs[1]], v.propabserr.flat[j]) ax.set_xlabel('log10 of abs approx value') ax.set_ylabel('log10 of abs error') ax.grid(True) def onpick(evt): h = evt.artist x,y = h.get_xdata(), h.get_ydata() ax = h.axes ax.set_title(h.txt) ind = ax.indicator xl,yl = numpy.asarray(ax.get_xlim()),numpy.asarray(ax.get_ylim()) ind.set_xdata([x.mean(), xl.mean()]) ind.set_ydata([y.mean(), yl.max() + numpy.diff(yl) * 0.01]) ind.set_clip_on(False) ind.set_color(h.get_color()) pylab.draw() pylab.gcf().canvas.mpl_connect('pick_event', onpick) pylab.draw() return out
def plot_layer_lengths_versus_width_ratio(): # make a plot of reflectivity versus waveguide width ratio # choose \lambda = 1590 nm # R. Sheehan 3 - 4 - 2018 FUNC_NAME = ".plot_layer_lengths_versus_width_ratio()" # use this in exception handling messages ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME try: Wh_list = range(500, 2500, 500) Wl = 250; # Extract the data from the files hv_data = []; labels = []; mark_list = []; Nl = 5 wratio = []; rvals = []; l1vals = []; l2vals = []; for Wh in Wh_list: wratio.append(Wh/Wl) data = read_DBR_data_file(Wl, Wh, 'Ey', Nl) rvals.append(data[7][3]); l1vals.append(data[4][3]); l2vals.append(data[6][3]); hv_data.append([wratio, rvals]); hv_data.append([wratio, l1vals]); hv_data.append([wratio, l2vals]); labels.append('$E_{y}$'); labels.append('$E_{y}$ $L_{W1}$'); labels.append('$E_{y}$ $L_{W2}$'); mark_list.append(Plotting.labs[0]); mark_list.append(Plotting.labs[1]); mark_list.append(Plotting.labs[2]); wratio = []; rvals = []; l1vals = []; l2vals = []; for Wh in Wh_list: wratio.append(Wh/Wl) data = read_DBR_data_file(Wl, Wh, 'Ex', Nl) rvals.append(data[7][3]); l1vals.append(data[4][3]); l2vals.append(data[6][3]); hv_data.append([wratio, rvals]); hv_data.append([wratio, l1vals]); hv_data.append([wratio, l2vals]); labels.append('$E_{x}$'); labels.append('$E_{x}$ $L_{W1}$'); labels.append('$E_{x}$ $L_{W2}$'); mark_list.append(Plotting.labs_dashed[0]); mark_list.append(Plotting.labs_dashed[1]); mark_list.append(Plotting.labs_dashed[2]); if len(hv_data) > 0: args = Plotting.plot_arg_multiple() args.loud = True args.x_label = "Waveguide Rib Width Ratio $W_{h}/W_{l}$" args.y_label = "Grating Period $\Lambda$ (nm)" args.mrk_list = mark_list args.crv_lab_list = labels args.plt_range = [2, 8, 100, 500] args.fig_name = 'Grating_Period_W_Ratio' Plotting.plot_multiple_curves(hv_data, args) del data; del hv_data; del labels; del mark_list; del args; else: raise Exception except Exception: print ERR_STATEMENT
# Get the data testlist = datadict[objecttype][objectid] test_data = [objdatapair[1] for objdatapair in testlist] test_obs = [objdatapair[0] for objdatapair in testlist] label = 1 if objecttype == "BH" else 0 test_labels = [label] * len(test_data) train_data, train_labels = dt.gettraindata(datadict, objectid) s = np.arange(train_labels.shape[0]) np.random.shuffle(s) train_data = train_data[s] train_labels = train_labels[s] train_labels = array(train_labels) test_labels = array(test_labels) print("\nTesting: " + objectid + '\n') accuracylist = [] if (sys.argv[1]) == "RF": accuracylist = tr.trainRF(train_data, test_data, train_labels, test_labels, objectid, label, rebinning) if (sys.argv[1]) == "NN": accuracylist = tr.trainNN(train_data, test_data, train_labels, test_labels, objectid, label, rebinning, int(nodes)) # Save data with open(targetdir + learntype + rebinning + '.txt', 'a') as accfile: accfile.write("Accuracy: " + str(objectid) + "\n" + str(accuracylist) + '\n_________________________\n') # Plot 3D colorcolor Plotting.plotccd(objectid, accuracylist, test_obs, objecttype, targetdir, colorpath)
def plot_all_in_dir(dir_to_plot, Test): dirCreateClean(dir_to_plot, ["*.pdf"]) os.chdir(dir_to_plot) filenames = glob.glob(os.path.join(dir_to_plot, '*.out')) for filename in filenames: plot = pl.Plotting() plot.read_data(os.path.join(dir_to_plot, filename)) if 'APT' in Test: plot.subplot_spec(0, (0, 'POC F'), title='Frequency', ylabel='Frequency (Hz)', scale=50.0, offset=50.0) plot.subplot_spec(1, (0, 'POC V'), title='Voltage', ylabel='Voltage (pu)', scale=1.0, offset=0.0) plot.subplot_spec(2, (0, 'POWR 50[ELAINE_WTG1 0.6500]1'), title='Active Power at WTG', ylabel='Active Power(MW)', scale=100.0, offset=0.0) plot.subplot_spec(2, (0, 'POWR 52[ELAINE_WTG3 0.6500]1'), scale=100.0) plot.subplot_spec(3, (0, 'P FLOW FROM POC'), title='Active Power at POC', ylabel='Active Power(MW)', scale=1.0, offset=0.0) plot.subplot_spec(3, (0, 'PSET IN PU'), scale=83.6) plot.subplot_spec(4, (0, 'VARS 50[ELAINE_WTG1 0.6500]1'), title='Reactive Power at WTG', ylabel='Reactive power(MVar)', scale=100.0, offset=0.0) plot.subplot_spec(4, (0, 'VARS 52[ELAINE_WTG3 0.6500]1'), scale=100.0) plot.subplot_spec(5, (0, 'Q FLOW FROM POC'), title='Reactive Power at POC', ylabel='Reactive power(MVar)', scale=1.0, offset=0.0) elif 'RPT' in Test: plot.subplot_spec(0, (0, 'POC F'), title='Frequency', ylabel='Frequency (Hz)', scale=50.0, offset=50.0) plot.subplot_spec(1, (0, 'POC V'), title='Voltage', ylabel='Voltage (pu)', scale=1.0, offset=0.0) plot.subplot_spec(2, (0, 'POWR 50[ELAINE_WTG1 0.6500]1'), title='Active Power at WTG', ylabel='Active Power(MW)', scale=100.0, offset=0.0) plot.subplot_spec(2, (0, 'POWR 52[ELAINE_WTG3 0.6500]1'), scale=100.0) plot.subplot_spec(3, (0, 'P FLOW FROM POC'), title='Active Power at POC', ylabel='Active Power(MW)', scale=1.0, offset=0.0) plot.subplot_spec(4, (0, 'VARS 50[ELAINE_WTG1 0.6500]1'), title='Reactive Power at WTG', ylabel='Reactive power(MVar)', scale=100.0, offset=0.0) plot.subplot_spec(4, (0, 'VARS 52[ELAINE_WTG3 0.6500]1'), scale=100.0) plot.subplot_spec(5, (0, 'Q FLOW FROM POC'), title='Reactive Power at POC', ylabel='Reactive power(MVar)', scale=1.0, offset=0.0) plot.subplot_spec(5, (0, 'QSET IN PU'), scale=83.6) plot.subplot_spec(5, (0, 'Qref after limiter'), scale=83.6) elif 'VCT' in Test or 'VDT' in Test: # plot.subplot_spec(0, (0, 'POC F'), title='Frequency', ylabel='Frequency (Hz)', scale=50.0, offset=50.0) plot.subplot_spec(0, (0, 'VSET IN PU'), title='Voltage', ylabel='Voltage (pu)', scale=1.0, offset=0.0) plot.subplot_spec( 1, (0, 'POC V'), title='Voltage', ylabel='Voltage (pu)', scale=1.0, offset=0.0, rstime=[50, 59, '', ''] ) #, plot_Vdroop = [32, 10, 0.05, 33.022, 0.005]) # plot_Vdroop = POC_V_ref_Channel_id,Q_POC_Channel_id, QDROOP, SBASE, deadband # plot.subplot_spec(0, (0, 'VSET IN PU')) plot.subplot_spec(2, (0, 'POWR 50[ELAINE_WTG1 0.6500]1'), title='Active Power at WTG', ylabel='Active Power(MW)', scale=100.0, offset=0.0) plot.subplot_spec(2, (0, 'POWR 52[ELAINE_WTG3 0.6500]1'), scale=100.0) plot.subplot_spec(3, (0, 'P FLOW FROM POC'), title='Active Power at POC', ylabel='Active Power(MW)', scale=1.0, offset=0.0) plot.subplot_spec(4, (0, 'VARS 50[ELAINE_WTG1 0.6500]1'), title='Reactive Power at WTG', ylabel='Reactive power(MVar)', scale=100.0, offset=0.0) plot.subplot_spec(4, (0, 'VARS 52[ELAINE_WTG3 0.6500]1'), scale=100.0) plot.subplot_spec(5, (0, 'Q FLOW FROM POC'), title='Reactive Power at POC', ylabel='Reactive power(MVar)', scale=1.0, offset=0.0) # plot.subplot_spec(5, (0, 'QSET(L) IN PU'), scale=83.6, offset=0.0 ) # plot.subplot_spec(5, (0, 'QSET(L+5) IN PU'), scale=83.6, offset=0.0) # plot.subplot_spec(5, (0, 'QSET(L+9) IN PU'), scale=83.6, offset=0.0) # plot.subplot_spec(5, (0, 'QSET(L+25) IN PU'), scale=83.6, offset=0.0) # plot.subplot_spec(5, (0, 'QSET(L+79) IN PU'), scale=83.6, offset=0.0) # plot.subplot_spec(5, (0, 'QSET(L+81) IN PU'), scale=83.6, offset=0.0) # plot.subplot_spec(5, (0, 'QSET(L+94) IN PU'), scale=83.6, offset=0.0) # plot.subplot_spec(5, (0, 'QSET(L+96) IN PU'), scale=83.6, offset=0.0) elif 'FCT' in Test: plot.subplot_spec(0, (0, 'SYSTEM FREQUENCY'), title='Frequency', ylabel='Frequency (Hz)', scale=50.0, offset=50.0) plot.subplot_spec(1, (0, 'POC_VOLTAGE'), title='Voltage', ylabel='Voltage (pu)', scale=1.0, offset=0.0) plot.subplot_spec(2, (0, 'WTG_PELEC_101'), title='Active Power at WTG', ylabel='Active Power(MW)', scale=100.0, offset=0.0) plot.subplot_spec(2, (0, 'WTG_PELEC_102'), scale=100.0) plot.subplot_spec(3, (0, 'P_POC'), title='Active Power at POC', ylabel='Active Power(MW)', scale=1.0, offset=0.0) plot.subplot_spec(4, (0, 'WTG_QELEC_101'), title='Reactive Power at WTG', ylabel='Reactive power(MVar)', scale=100.0, offset=0.0) plot.subplot_spec(4, (0, 'WTG_QELEC_102'), scale=100.0) plot.subplot_spec(5, (0, 'Q_POC'), title='Reactive Power at POC', ylabel='Reactive power(MVar)', scale=1.0, offset=0.0) else: plot.subplot_spec(0, (0, 'POC F'), title='Frequency', ylabel='Frequency (Hz)', scale=50.0, offset=50.0) plot.subplot_spec(1, (0, 'POC V'), title='Voltage', ylabel='Voltage (pu)', scale=1.0, offset=0.0) plot.subplot_spec(2, (0, 'POWR 50[ELAINE_WTG1 0.6500]1'), title='Active Power at WTG', ylabel='Active Power(MW)', scale=100.0, offset=0.0) plot.subplot_spec(2, (0, 'POWR 52[ELAINE_WTG3 0.6500]1'), scale=100.0) plot.subplot_spec(3, (0, 'P FLOW FROM POC'), title='Active Power at POC', ylabel='Active Power(MW)', scale=1.0, offset=0.0) plot.subplot_spec(4, (0, 'VARS 50[ELAINE_WTG1 0.6500]1'), title='Reactive Power at WTG', ylabel='Reactive power(MVar)', scale=100.0, offset=0.0) plot.subplot_spec(4, (0, 'VARS 52[ELAINE_WTG3 0.6500]1'), scale=100.0) plot.subplot_spec(5, (0, 'Q FLOW FROM POC'), title='Reactive Power at POC', ylabel='Reactive power(MVar)', scale=1.0, offset=0.0) plot.plot(figname=os.path.splitext(filename)[0], show=0) os.chdir(cwd)
def general_fft_plot(): # make a plot of the general FFT that is computed # R. Sheehan 9 - 8 - 2019 FUNC_NAME = ".general_fft_plot()" # use this in exception handling messages ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME try: time_file = "Time_Data.txt" spct_file = "Spec_Data.txt" frq_file = "LLM_FFT_Frq_data.txt" fft_file = "LLM_FFT_Abs_FFT_data.txt" #time_file = "HSSCP_Time.csv" #spct_file = "HSSCP_SPCT.csv" #frq_file = "HSSCP_SPCT_Frq_data.csv" #fft_file = "HSSCP_SPCT_Abs_FFT_data.csv" if glob.glob(time_file) and glob.glob(spct_file) and glob.glob( frq_file) and glob.glob(fft_file): # plot the time series time_data = np.loadtxt(time_file, unpack=True) spct_data = np.loadtxt(spct_file, unpack=True) # scale time values to ns #time_data = 1.0e+9*time_data # scale voltage values to mV #spct_data = 1.0e+3 * spct_data args = Plotting.plot_arg_single() args.loud = True args.x_label = 'Time / ns' args.y_label = 'Signal / mV' args.marker = 'r-' args.fig_name = 'Signal_Data' #Plotting.plot_single_curve(time_data, spct_data, args) del time_data del spct_data # plot the computed FFT frq_data = np.loadtxt(frq_file, unpack=True) spct_data = np.loadtxt(fft_file, unpack=True) # scale frq values to GHz #frq_data = 1.0e-9 * frq_data #frq_data = 1.0e+3 * frq_data args.loud = True #args.x_label = 'Frequency / GHz' args.x_label = 'Time / us' args.y_label = 'Signal FFT' args.marker = 'g-' args.fig_name = 'Signal_FFT' args.plt_range = [0, 1, 0, 2000] Plotting.plot_single_curve(frq_data, spct_data, args) del frq_data del spct_data else: ERR_STATEMENT = ERR_STATEMENT + "\nInput file not found" raise Exception except Exception as e: print(ERR_STATEMENT) print(e)
result = knn.predict(testDataFeatures) time1 = time() runningTime = time1 - time0 print("Result Is Ok.") tn, fp, fn, tp = confusion_matrix(test["class"], result).ravel() sensitivity = tp / (tp + fn) specificity = tn / (tn + fp) # evaluate model print("________________________________________________") print('AUC: %.2f' % roc_auc_score(test["class"], result)) print('Accuracy: %.2f' % accuracy_score(test["class"], result)) print('Sensitivity: %.2f' % sensitivity) print('Specificity: %.2f' % specificity) print('F-measure: %.2f' % f1_score(test["class"], result, average='binary')) print("Running Time: %.2f Seconds" % runningTime) print("________________________________________________") # visualize the results Plotting.plot_confusion_matrix(test["class"], result, classes=['Political', 'Non-political'], title='Confusion matrix for KNN Classifier') Plotting.plot_RUC(test["class"], result, title='Confusion matrix for KNN Classifier')
def setUp(self): self.job = Plotting.Plotting()
def final_data(exp, sequences, mass, runtime): """Output final run statistics""" import Parameters """Write Polymer list to file""" filename = ('%s/%i_run_statistics.txt' % (Parameters.dirname, exp)) file = open(filename, 'w') file.write("Run parameters \n") s = 'Total mass = ' + str(mass) + '\n' file.write(s) s = 'Length of Sequences = ' + str(Parameters.R_L) + '\n' file.write(s) s = 'kr = ' + str(Parameters.kr) + '\n' file.write(s) s = 'kh = ' + str(Parameters.kh) + '\n' file.write(s) s = 'km = ' + str(Parameters.km) + '\n' file.write(s) s = 'kc = ' + str(Parameters.kc) + '\n' file.write(s) s = 'ks = ' + str(Parameters.ks) + '\n' file.write(s) s = 'mu = ' + str(Parameters.mu) + '\n' file.write(s) s = 'k_ch = ' + str(Parameters.k_ch) + '\n' file.write(s) s = 'k_ca = ' + str(Parameters.k_ca) + '\n' file.write(s) s = 'k_cs = ' + str(Parameters.k_cs) + '\n' file.write(s) s = 'Run seed = ' + str(Parameters.run_seed) + '\n' #file.write(s) #s = 'Number of Steps = ' + str(Parameters.tot_step) + '\n' + '\n' file.write("Information about kMC run including run statistics: \n \n") file.write("Total run time = ") s= str(runtime) + ' sec' file.write(s) file.write('\n') file.write('\n') file.write("Number of replication events = ") s = str(Parameters.kr_events) file.write(s) file.write('\n') file.write("Number of error-prone replication events = ") s = str(Parameters.mr_events) file.write(s) file.write('\n') file.write('\n') file.write("Number of degradation events = ") s = str(Parameters.kh_events) file.write(s) file.write('\n') file.write("Number of mutagenic events ") s = str(Parameters.km_events) file.write(s) file.write('\n') file.write('\n') file.write("Number of cross-over/recombination events ") s = str(Parameters.kc_events) file.write(s) file.write('\n') file.write('\n') file.write("Number of spontaneous assembly events ") s = str(Parameters.ks_events) file.write(s) file.write('\n') file.write('\n') file.write("Number of catalyzed recycling events ") s = str(Parameters.kch_events) file.write(s) file.write('\n') file.write('\n') file.write("Number of catalyzed recombination events ") s = str(Parameters.kca_events) file.write(s) file.write('\n') file.write('\n') file.write("Number of catalyzed assembly events ") s = str(Parameters.kcs_events) file.write(s) file.write('\n') file.write('\n') file.write("Final number of monomers = ") s = str(Parameters.Nmono) file.write(s) file.write('\n') file.write("Final number of polymers = ") s = str(Parameters.Npoly) file.write(s) file.write('\n') file.write("Null Replication events = ") s = str(Parameters.null_event) file.write(s) file.write('\n') file.close() ''' Save a list of all initial and final replicator composition at the end of the run ''' initial_composition = [0, 0, 0, 0] surviving_composition = [0, 0, 0, 0] filename = ('%s/%i_surviving_species.dat' % (Parameters.dirname, exp)) file = open(filename, 'w') for ID in range(len(sequences)): if sequences[ID].tot_count !=0: s = sequences[ID].sequence + ' ' +str(sequences[ID].seq_ID) surviving_composition = np.add(sequences[ID].tot_count*np.array(sequences[ID].seq_list), surviving_composition) file.write(s) file.write('\n') file.close() for ID in range(len(Parameters.initial_replicators)): initial_composition = np.add(Parameters.R_N*np.array(sequences[ID].seq_list), initial_composition) compositions = zip(initial_composition, surviving_composition) filename = ('%s/%i_compositions.dat' % (Parameters.dirname, exp)) np.savetxt(filename, compositions) if Parameters.output_plots == True: import Plotting Plotting.output_plots(exp, sequences, Parameters.catalysts, Parameters.substrates)
def setUp(self): self.job = Plotting.Plotting() self.job.add_option(["Output", "test"]) self.job.add_option(["Description", "test"]) self.job.add_option(["Input", "test"]) self.job.add_option(["CutFile", "test"])
def get_plot(self, xmin, xmax, ymin=None, ymax=None, lineshape='Lorentz', hw=None, include_linespec=True, scale_linespec=0.2, spectype='General', label_maxima=True, label_minima=None, boxes=None): if label_minima is None: if spectype == 'ROA': label_min = True else: label_min = False else: label_min = label_minima xmi = min(xmin, xmax) xma = max(xmin, xmax) spec = [] style = [] lw = [] if include_linespec: spec.append(self.get_line_spectrum(xmi, xma, scale=scale_linespec)) style.append('g-') lw.append(0.5) else: spec.append((numpy.array([xmi, xma]), numpy.array([0., 0.]))) style.append('g-') lw.append(0.5) if lineshape == 'Lorentz': if hw is None: spec.append(self.get_lorentz_spectrum(xmi, xma)) else: spec.append(self.get_lorentz_spectrum(xmi, xma, halfwidth=hw)) elif lineshape == 'Gaussian': if hw is None: spec.append(self.get_gaussian_spectrum(xmi, xma)) else: spec.append(self.get_gaussian_spectrum(xmi, xma, halfwidth=hw)) style.append('k-') lw.append(2.0) xlims = [xmin, xmax] ylims = [ymin, ymax] if ymin is None: ylims[0] = (spec[0][1]).min() * 1.1 if ymax is None: ylims[1] = (spec[0][1]).max() * 1.1 pl = Plotting.SpectrumPlot() pl.plot(spec, style=style, lw=lw, spectype=spectype, xlims=xlims, ylims=ylims) if label_maxima: maxima = self.get_band_maxima(spec[1][0], spec[1][1]) pl.add_peaklabels(zip(maxima, [1] * len(maxima))) if label_min: minima = self.get_band_minima(spec[1][0], spec[1][1]) pl.add_peaklabels(zip(minima, [-1] * len(minima))) if boxes is not None: bands, names = boxes for b, n in zip(bands, names): fmin = numpy.min(self.freqs[b]) fmax = numpy.max(self.freqs[b]) indices = numpy.where((spec[-1][0] > fmin) & (spec[-1][0] < fmax)) imin = min(0.0, numpy.min(spec[-1][1][indices])) imax = max(0.0, numpy.max(spec[-1][1][indices])) pl.add_box(fmin, fmax, imin, imax, n) return pl
if forshortreads == True: vfile = os.getcwd()+'/human_TRBV_region.fasta' jfile = os.getcwd()+'/human_TRBJ_region.fasta' v_key, j_key, v_regions, j_regions = ShortReads.setup(vfile,jfile) #infile = str(inputfile) fileid = str(outputfile) param_set = [10, 2, 1400, 1.05] ShortReads.analyse_file( inputfile, newpath, fileid, v_key, j_key, v_regions, j_regions, param_set) else: f.analysis( inputfile, outputfile, with_reverse_complement_search=revsearch, barcode=barcoding, barcodestart1=barcodestart1, barcodeend1=barcodeend1, barcodestart2=barcodestart2, barcodeend2=barcodeend2, newpath=newpath, omitN=True ) if include_plots==True: print 'Plotting the results of the analysis' if forshortreads == True: if os.stat(newpath+outputfile+'_beta'+'.txt').st_size != 0: # if the file is non-empty, i.e. if TcRchain seqs were found... p.plot_v_usage( open(newpath+outputfile+'_beta'+'.txt', "rU"), chain='beta', savefilename = newpath+'Vusage', order="frequency") p.plot_j_usage( open(newpath+outputfile+'_beta'+'.txt', "rU"), chain='beta', savefilename = newpath+'Jusage', order="frequency") p.plot_del_v( open(newpath+outputfile+'_beta'+'.txt', "rU"), savefilename = newpath+'Vdels') p.plot_del_j( open(newpath+outputfile+'_beta'+'.txt', "rU"), savefilename = newpath+'Jdels') p.plot_vj_joint_dist( open(newpath+outputfile+'_beta'+'.txt', "rU"), chain=str(chain), savefilename = newpath+'VJusage') p.plot_insert_lengths( open(newpath+outputfile+'_beta'+'.txt', "rU"), savefilename = newpath+'InsertLengths') else: chains = ['alpha','beta','delta','gamma'] for chain in chains: if os.stat(newpath+outputfile+'_'+chain+'.txt').st_size != 0: # if the file is non-empty, i.e. if TcRchain seqs were found... p.plot_v_usage( open(newpath+outputfile+'_'+chain+'.txt', "rU"), chain=chain, savefilename = newpath+'Vusage'+chain, order="frequency") p.plot_j_usage( open(newpath+outputfile+'_'+chain+'.txt', "rU"), chain=chain, savefilename = newpath+'Jusage'+chain, order="frequency") p.plot_del_v( open(newpath+outputfile+'_'+chain+'.txt', "rU"), savefilename = newpath+'Vdels'+chain) p.plot_del_j( open(newpath+outputfile+'_'+chain+'.txt', "rU"), savefilename = newpath+'Jdels'+chain) p.plot_vj_joint_dist( open(newpath+outputfile+'_'+chain+'.txt', "rU"), chain=str(chain), savefilename = newpath+'VJusage'+chain) p.plot_insert_lengths( open(newpath+outputfile+'_'+chain+'.txt', "rU"), savefilename = newpath+'InsertLengths'+chain)
from tinamit.EnvolturasBF.SAHYSMOD.envoltura import leer_arch_egr import Plotting Path = 'D:\\Mars_new' Haveli_Circle = Plotting.leer_datos(nombre='Haveli', archivo=Path) Dataoes = Haveli_Circle.leer_arch_data(Path, 'Cr4#', 'A#', 'B#', 'Dw#', 'Cqf#') #Result = {} #Data = leer_arch_egr('D:\\Mars_new\\1\\1.out',2,215,1) print(Dataoes)
def multiple_optical_spectrum_plot(dir_name, file_names, labels, plot_range, plt_title = '', plt_name = '', loudness = False): # make a plot of a set of measured optical spectra # R. Sheehan 30 - 8 - 2017 try: HOME = os.getcwd() # Get current directory if os.path.isdir(dir_name): # check if dir_name is valid directory os.chdir(dir_name) # if dir_name is valid location move there # test inputs for validity c1 = True if file_names is not None else False c2 = True if labels is not None else False c3 = True if len(file_names) == len(labels) else False c4 = True if plot_range is not None else False c5 = True if len(plot_range) == 4 else False c6 = True if c1 and c2 and c3 and c4 and c5 else False if c6: delim = '\t' # should include something to check what delimiter is from the file being read hv_data = []; mark_list = [] for i in range(0, len(file_names), 1): if glob.glob(file_names[i]): # ensure that file in list exists #data = Common.read_matrix(file_names[i], delim) #data = Common.transpose_multi_col(data) # read the data from the file as it comes from the OSA data = numpy.loadtxt(file_names[i], delimiter = ',', skiprows = 3, unpack = True, max_rows = 2001) hv_data.append(data); mark_list.append(Plotting.labs_lins[i%len(Plotting.labs_lins)]); else: # this will raise an exception below print("\nError: SpctrmPlt.multiple_optical_spectrum_plot()\nCould not locate:",file_names[i]) # Need to have number of data sets equal to number of labels for plotting methods to work if len(hv_data) == len(labels): arguments = Plotting.plot_arg_multiple() arguments.loud = loudness arguments.x_label = 'Wavelength (nm)' arguments.y_label = 'Spectral Power (dBm/0.01 nm)' arguments.plt_range = plot_range arguments.crv_lab_list = labels arguments.mrk_list = mark_list arguments.plt_title = plt_title arguments.fig_name = plt_name Plotting.plot_multiple_curves(hv_data, arguments) del hv_data; del mark_list; else: raise Exception os.chdir(HOME) else: raise Exception else: raise EnvironmentError except EnvironmentError: print("\nError: SpctrmPlt.multiple_optical_spectrum_plot()") print("Error: Could not find",dir_name) except Exception as e: print("\nError: SpctrmPlt.multiple_optical_spectrum_plot()") if c1 == False: print("dir_names not assigned correctly") if c2 == False: print("labels not assigned correctly") if c3 == False: print("dir_names and labels have different lengths") if c4 == False: print("range not assigned correctly") if c5 == False: print("range does not have correct length") print(e)
[numpy.full(M, 0.468), numpy.full(M, 0.406), numpy.full(M, 0.036)]) # Simulation setup t_end = 600 t_sim = numpy.linspace(0, t_end, t_end * 10) sim = Simulate.SimulateMPC(G, N, M, P, dt_model, Q, R, dvs=2, known_dvs=1) tune = False if tune: tuner = Tuner.Tuner(sim, Ysp_fun, t_sim, error_method="ISE", Udv=Udv) initial = [9, 5, 0.1, 1e-8, 1e-8, 1e-8] bounds = [(1e-12, 100)] * len(initial) a = datetime.datetime.now() result = tuner.tune(initial, bounds, simple_tune=True) b = datetime.datetime.now() print("Total time: ", b - a) print(result) else: df = sim.simulate(Ysp_fun, t_sim, save_data="data/temp", live_plot=False, Udv=Udv) Plotting.plot_all(df, save_figure='data/temp.pdf')
CPTracker_Eq.append(CP) #----- Reset time to zero and initialize 'WorkAcc', which accumulates the work done by the control parameter over the protocol ----- time = 0 WorkAcc = 0 while time <= ProtocolDuration: time, position, velocity, CP, WorkStep = LangevinPropagator.Langevin( time, position, velocity, CP, CPVel) WorkAcc += WorkStep PositionTracker.append(position) VelocityTracker.append(velocity) CPTracker.append(CP) #----- Plot trajectory data ----- Plotting.PlotSimulationData(PositionTracker_Eq, VelocityTracker_Eq, CPTracker_Eq, PositionTracker, VelocityTracker, CPTracker) #----- Print the total work done in the protocol ----- print "TotalWork --> " + str(WorkAcc) + "\n" #----- Write trajectory data to file ----- ReadWrite.WriteTrajectory(WritePath, WriteName_Position, PositionTracker) ReadWrite.WriteTrajectory(WritePath, WriteName_Velocity, VelocityTracker) ReadWrite.WriteTrajectory(WritePath, WriteName_CP, CPTracker)
def run_simulation(filename, save_output, start_time, temp, RH, H2O, PInit, y_cond, input_dict, simulation_time, batch_step): from assimulo.solvers import RodasODE, CVode, RungeKutta4, LSODAR #Choose solver accoring to your need. from assimulo.problem import Explicit_Problem # In this function, we import functions that have been pre-compiled for use in the ODE solver # The function that calculates the RHS of the ODE is also defined within this function, such # that it can be used by the Assimulo solvers # The variables passed to this function are defined as follows: #------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------- # define the ODE function to be called def dydt_func(t, y): """ This function defines the right-hand side [RHS] of the ordinary differential equations [ODEs] to be solved input: • t - time variable [internal to solver] • y - array holding concentrations of all compounds in both gas and particulate [molecules/cc] output: dydt - the dy_dt of each compound in both gas and particulate phase [molecules/cc.sec] """ dy_dt = numpy.zeros((total_length_y, 1), ) #pdb.set_trace() # Calculate time of day time_of_day_seconds = start_time + t #pdb.set_trace() # make sure the y array is not a list. Assimulo uses lists y_asnumpy = numpy.array(y) Model_temp = temp sat_vap_water = numpy.exp((-0.58002206E4 / Model_temp) + 0.13914993E1 - \ (0.48640239E-1 * Model_temp) + (0.41764768E-4 * (Model_temp**2.0E0))- \ (0.14452093E-7 * (Model_temp**3.0E0)) + (0.65459673E1 * numpy.log(Model_temp))) sat_vp[-1] = (numpy.log10(sat_vap_water * 9.86923E-6)) Psat = numpy.power(10.0, sat_vp) # Convert the concentration of each component in the gas phase into a partial pressure using the ideal gas law # Units are Pascals Pressure_gas = (y_asnumpy[0:num_species, ] / NA) * 8.314E+6 * Model_temp #[using R] core_mass_array = numpy.multiply(ycore_asnumpy / NA, core_molw_asnumpy) ####### Calculate the thermal conductivity of gases according to the new temperature ######## K_water_vapour = ( 5.69 + 0.017 * (Model_temp - 273.15)) * 1e-3 * 4.187 #[W/mK []has to be in W/m.K] # Use this value for all organics, for now. If you start using a non-zero enthalpy of # vapourisation, this needs to change. therm_cond_air = K_water_vapour #---------------------------------------------------------------------------- #F2c) Extract the current gas phase concentrations to be used in pressure difference calculations C_g_i_t = y_asnumpy[0:num_species, ] #Set the values for oxidants etc to 0 as will force no mass transfer #C_g_i_t[ignore_index]=0.0 C_g_i_t = C_g_i_t[include_index] #pdb.set_trace() total_SOA_mass,aw_array,size_array,dy_dt_calc = dydt_partition_fortran(y_asnumpy,ycore_asnumpy,core_dissociation, \ core_mass_array,y_density_array_asnumpy,core_density_array_asnumpy,ignore_index_fortran,y_mw,Psat, \ DStar_org_asnumpy,alpha_d_org_asnumpy,C_g_i_t,N_perbin,gamma_gas_asnumpy,Latent_heat_asnumpy,GRAV, \ Updraft,sigma,NA,kb,Rv,R_gas,Model_temp,cp,Ra,Lv_water_vapour) #pdb.set_trace() # Add the calculated gains/losses to the complete dy_dt array dy_dt[0:num_species + (num_species_condensed * num_bins), 0] += dy_dt_calc[:] #pdb.set_trace() #---------------------------------------------------------------------------- #F4) Now calculate the change in water vapour mixing ratio. #To do this we need to know what the index key for the very last element is #pdb.set_trace() #pdb.set_trace() #print "elapsed time=", elapsedTime dydt_func.total_SOA_mass = total_SOA_mass dydt_func.size_array = size_array dydt_func.temp = Model_temp dydt_func.RH = Pressure_gas[-1] / (Psat[-1] * 101325.0) dydt_func.water_activity = aw_array #---------------------------------------------------------------------------- return dy_dt #------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------- #import static compilation of Fortran functions for use in ODE solver print("Importing pre-compiled Fortran modules") from partition_f2py import dydt_partition as dydt_partition_fortran # 'Unpack' variables from input_dict species_dict = input_dict['species_dict'] species_dict2array = input_dict['species_dict2array'] num_species = input_dict['num_species'] num_species_condensed = input_dict['num_species_condensed'] y_density_array_asnumpy = input_dict['y_density_array_asnumpy'] y_mw = input_dict['y_mw'] sat_vp = input_dict['sat_vp'] Delta_H = input_dict['Delta_H'] Latent_heat_asnumpy = input_dict['Latent_heat_asnumpy'] DStar_org_asnumpy = input_dict['DStar_org_asnumpy'] alpha_d_org_asnumpy = input_dict['alpha_d_org_asnumpy'] gamma_gas_asnumpy = input_dict['gamma_gas_asnumpy'] Updraft = input_dict['Updraft'] GRAV = input_dict['GRAV'] Rv = input_dict['Rv'] Ra = input_dict['Ra'] R_gas = input_dict['R_gas'] R_gas_other = input_dict['R_gas_other'] cp = input_dict['cp'] sigma = input_dict['sigma'] NA = input_dict['NA'] kb = input_dict['kb'] Lv_water_vapour = input_dict['Lv_water_vapour'] ignore_index = input_dict['ignore_index'] ignore_index_fortran = input_dict['ignore_index_fortran'] ycore_asnumpy = input_dict['ycore_asnumpy'] core_density_array_asnumpy = input_dict['core_density_array_asnumpy'] y_cond = input_dict['y_cond_initial'] num_bins = input_dict['num_bins'] core_molw_asnumpy = input_dict['core_molw_asnumpy'] core_dissociation = input_dict['core_dissociation'] N_perbin = input_dict['N_perbin'] include_index = input_dict['include_index'] y_gas = input_dict['y_gas_initial'] # pdb.set_trace() #Specify some starting concentrations [ppt] Cfactor = 2.55e+10 #ppb-to-molecules/cc # Create variables required to initialise ODE y0 = y_gas + y_cond #Initial concentrations t0 = 0.0 #T0 #Set the total_time of the simulation to 0 [havent done anything yet] total_time = 0.0 # Define a 'key' that represents the end of the composition variables to track total_length_y = len(y0) key = num_species + ((num_bins) * num_species_condensed) - 1 #pdb.set_trace() # Now run through the simulation in batches. # I do this to enable testing of coupling processes. Some initial investigations with non-ideality in # the condensed phase indicated that even defining a maximum step was not enough for ODE solvers to # overshoot a stable region. It also helps with in-simulation debugging. Its up to you if you want to keep this. # To not run in batches, just define one batch as your total simulation time. This will reduce any overhead with # initialising the solvers # Set total simulation time and batch steps in seconds # Note also that the current module outputs solver information after each batch step. This can be turned off and the # the batch step change for increased speed t_array = [] time_step = 0 number_steps = int( simulation_time / batch_step) # Just cycling through 3 steps to get to a solution # Define a matrix that stores values as outputs from the end of each batch step. Again, you can remove # the need to run in batches. You can tell the Assimulo solvers the frequency of outputs. y_matrix = numpy.zeros((int(number_steps), len(y0))) # Also define arrays and matrices that hold information such as total SOA mass SOA_matrix = numpy.zeros((int(number_steps), 1)) size_matrix = numpy.zeros((int(number_steps), num_bins)) print("Starting simulation") # In the following, we can while total_time < simulation_time: if total_time == 0.0: #Define an Assimulo problem #Define an explicit solver exp_mod = Explicit_Problem(dydt_func, y0, t0, name=filename) else: #y0 = y_output[-1,:] # Take the output from the last batch as the start of this exp_mod = Explicit_Problem(dydt_func, y_output[-1], t0, name=filename) # Define ODE parameters. # Initial steps might be slower than mid-simulation. It varies. #exp_mod.jac = dydt_jac # Define which ODE solver you want to use exp_sim = CVode(exp_mod) tol_list = [1.0e-3] * len(y0) exp_sim.atol = tol_list #Default 1e-6 exp_sim.rtol = 1.0e-6 #Default 1e-6 exp_sim.inith = 1.0e-6 #Initial step-size #exp_sim.discr = 'Adams' exp_sim.maxh = 100.0 # Use of a jacobian makes a big differece in simulation time. This is relatively # easy to define for a gas phase - not sure for an aerosol phase with composition # dependent processes. exp_sim.usejac = False # To be provided as an option in future update. #exp_sim.fac1 = 0.05 #exp_sim.fac2 = 50.0 exp_sim.report_continuously = True exp_sim.maxncf = 1000 #Sets the parameters t_output, y_output = exp_sim.simulate( batch_step) #Simulate 'batch' seconds total_time += batch_step t_array.append( total_time ) # Save the output from the end step, of the current batch, to a matrix y_matrix[time_step, :] = y_output[-1, :] SOA_matrix[time_step, 0] = dydt_func.total_SOA_mass size_matrix[time_step, :] = dydt_func.size_array print( "Predicted SOA mass from end of dynamic calculation [microgram/m3] = ", dydt_func.total_SOA_mass) print("Predicted size range from end of dynamic calculation = ", dydt_func.size_array) print("Predicted temperature from end of dynamic calculation = ", dydt_func.temp) print("Predicted RH from end of dynamic calculation = ", dydt_func.RH) print("Predicted water activity from end of dynamic calculation = ", dydt_func.water_activity) #pdb.set_trace() #now save this information into a matrix for later plotting. time_step += 1 if save_output is True: print( "Saving the model output as a pickled object for later retrieval") # save the dictionary to a file for later retrieval - have to do each seperately. with open(filename + '_y_output.pickle', 'wb') as handle: pickle.dump(y_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(filename + '_t_output.pickle', 'wb') as handle: pickle.dump(t_array, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(filename + '_SOA_output.pickle', 'wb') as handle: pickle.dump(SOA_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(filename + '_size_output.pickle', 'wb') as handle: pickle.dump(size_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(filename + 'include_index.pickle', 'wb') as handle: pickle.dump(include_index, handle, protocol=pickle.HIGHEST_PROTOCOL) print("Saving the model output as CSV files for later retrieval") df = pd.DataFrame(y_matrix) df.to_csv(filename + "_y_output.csv") df = pd.DataFrame(t_array) df.to_csv(filename + "_t_output.csv") df = pd.DataFrame(SOA_matrix) df.to_csv(filename + "_SOA_output.csv") df = pd.DataFrame(size_matrix) df.to_csv(filename + "_size_output.csv") with_plots = True #pdb.set_trace() #Plot the change in concentration over time for a given specie. For the user to change / remove #In a future release I will add this as a seperate module if with_plots: try: Plotting.stacked_bar(t_array, y_matrix, num_species_condensed, num_bins, numpy.array(y_mw[include_index]), NA) except: print( "There is a problem using Matplotlib in your environment. If using this within a docker container, you will need to transfer the data to the host or configure your container to enable graphical displays. More information can be found at http://wiki.ros.org/docker/Tutorials/GUI " )
Show_Data = [ Relative_Flux, Variance, Residuals, Spectra_Bin, Age, Delta, Redshift, Low_Confidence, Up_Confidence ] ## Available Plots: Relative Flux, Variances, Residuals, Spectra/Bin, Age, Delta, Redshift ## 0 1 2 3 4 5 6 # the plots you want to create # Choose the plot range and plot type! xmin = 2500 xmax = 10500 Plots = [0, 1, 2, 3, 4, 5, 6, 7] ################# #Now the file name of the plot is labeled by time of creation, but you can personalize it if you want. #Or rename it once it's been saved. plot_name = str(queries) + '_composite_comparison, ' + ( time.strftime("%H,%M,%S")) image_title = "../plots/" + plot_name + ".png" # Want a custom saved image name? Uncomment this next line...but leave the one above alone. #image_title = "../plots/" + "Custome Title" + ".png" #print "Plot saved as: " + image_title #Next line is the header on your plot title = "Insert Title Here" ################ Plotting.main(Show_Data, Plots, image_title, title, Names, xmin, xmax)
F2scores = ["F2(8,20)","F2(15,40)"] sheets = pd.ExcelFile(DATA_DIR+fn).sheet_names #print sheets df = pd.ExcelFile(DATA_DIR+fn).parse(sheets[0],index_col=0,parse_dates=True) print "number of observations retrieved:",len(df) stock_name = fn.split(" ")[0] Stock_Names.append(stock_name) print stock_name to_feed_anOld_habbit(DF,df,Stocks,stock_name) Date_Stock = {} #keys are dates, values are stocks df,shift,xVar,yVar,F2scores = transform_df_ASneeded(df,F2scores,shift,stock_name) fig_fn = out_dir+ stock_name+"_behaviour.jpg" Plotting.stacked_TimeSeries(df,stock_name,["ClosePrice","EWPoolClose","ccRelRet","barraBeta"],"Behaviour of "+stock_name,fig_fn) #g_xVar = 'barraBeta' #fig_fn = out_dir+ stock_name+"_Prices_vs_"+g_xVar+".jpg" #Plotting.stacked_ScatterPlots(df,stock_name,["ccRawRet","ccPoolRet"],g_xVar,stock_name,fig_fn) #now go thru the segments of dates if Dates != []: for d in range(len(Dates)-1): date1 = Dates[d] date2 = Dates[d+1] curr_df = df[date1.strftime('%d/%m/%Y'):date2.strftime('%d/%m/%Y')] stock = STOCK(stock_name) stock.df = curr_df #utils.simple_operations_on_DataFrame(curr_df,stock)
def plots(self,motionVid): sleep(1) Plotting.combineplots(motionVid.scale_size,motionVid.frame_results,motionVid.minSIZE,motionVid.file_destination + "/" + "Diagnostics.png",show=True)
def run_plotting_routines(callObject): import Plotting import PyPostSettings import PyPostTools ncFile_Name = callObject['filename'] _pySet = callObject['settings'] targetDir = callObject['tDir'] dask_threads = callObject['dask_threads'] logger = PyPostTools.pyPostLogger() daskArray = xarray.open_mfdataset(ncFile_Name, parallel=False, combine='by_coords') if(targetDir == ""): logger.write("Cannot run plotting routines, could not locate target directory.") return -1 # Draw Plots if(_pySet["plot_surface_map"] == '1'): Plotting.plot_surface_map(daskArray, targetDir, withTemperature = _pySet["plot_surface_map_temperature"] == '1', withWinds = _pySet["plot_surface_map_winds"] == '1', windScaleFactor = 75, withMSLP = _pySet["plot_surface_map_mslp"] == '1') if(_pySet["plot_simulated_reflectivity"] == '1'): Plotting.plot_simulated_reflectivity(daskArray, targetDir) if(_pySet["plot_precip_type"] == '1'): Plotting.plot_precipitation_type(daskArray, targetDir) if(_pySet["plot_accumulated_precip"] == '1'): Plotting.plot_accumulated_precip(daskArray, targetDir) if(_pySet["plot_accumulated_snowfall"] == '1'): Plotting.plot_accumulated_snowfall(daskArray, targetDir) if(_pySet["plot_precipitable_water"] == '1'): Plotting.plot_precipitable_water(daskArray, targetDir, withMSLP = _pySet["plot_precipitable_water_with_mslp_contours"] == '1') if(_pySet["plot_dewpoint_temperature"] == '1'): Plotting.plot_dewpoint_temperature(daskArray, targetDir, windScaleFactor = 75) if(_pySet["plot_surface_omega"] == '1'): Plotting.plot_surface_omega(daskArray, targetDir) if(_pySet["plot_10m_max_winds"] == '1'): Plotting.plot_10m_max_winds(daskArray, targetDir, windScaleFactor = 75) if(_pySet["plot_upper_lv_winds"] == '1'): Plotting.plot_upper_lv_winds(daskArray, targetDir, _pySet["upper_winds_levels"], windScaleFactor = 75, withHeights = _pySet["plot_upper_lv_winds_withheights"] == '1') if(_pySet["plot_theta_e"] == '1'): Plotting.plot_theta_e(daskArray, targetDir, _pySet["theta_e_levels"], withHeights = _pySet["plot_theta_e_heights"] == '1', withWinds = _pySet["plot_theta_e_winds"] == '1', windScaleFactor = 75) return 0
'mini_gb5': [colors[1], 'solid'], 'mini_lin': [colors[0], 'solid'], 'epsall_gb2': ['k', 'dashed'], 'epsall_gb5': [colors[1], 'dashed'], 'epsall_lin': [colors[0], 'dashed'], 'lin': [colors[3], 'solid'] } marker=10 band=False parser = argparse.ArgumentParser() parser.add_argument('--save', dest='save', action='store_true') Args = parser.parse_args(sys.argv[1:]) D1 = Plotting.read_dir("../results/mslr30k_T=36000_L=3_e=0.1/") fig = plt.figure(figsize=(mpl.rcParams['figure.figsize'][0],mpl.rcParams['figure.figsize'][1]-1)) ax = fig.add_subplot(111) plt.rc('font', size=18) plt.rcParams['text.usetex'] = True plt.rc('font', family='sans-serif') ticks=ax.get_yticks() print(ticks) ax.set_ylim(2.15, 2.35) print("Setting ylim to %0.2f, %0.2f" % (ticks[3], ticks[len(ticks)-2])) ticks = ax.get_yticks() print(ticks) # ticks = ["", "", "2.2", "", "2.3", ""] # ax.set_yticklabels(ticks,size=16)
def build_and_train_and_predict( ESN_obj, start_time, train_start_timestep, train_end_timestep, mse_array, list_of_beta_to_test, N_u, N_y, N_x, x_initial, state_target, scaling_W_fb, timesteps_for_prediction, scaling_W_in, system_name, print_timings_boolean, scaling_alpha, scaling_W, save_or_display, state, save_name, param_array): # First thing: Set parameters of ESN_obj correctly for this run: i, j, k, l, m = param_array ESN_obj.W_in = ESN_obj.build_W_in(N_x, N_u, scaling_W_in) ESN_obj.W_fb = ESN_obj.build_W_fb(N_x, N_u, scaling_W_fb) ESN_obj.alpha_matrix = ESN_obj.build_alpha_matrix(scaling_alpha * np.ones(N_x)) # Create "echoes" and record the activations # Run ESN for however many timesteps necessary to get enough activation elements x of reservoir for n in range(0, train_end_timestep): # Using Teacher Forcing method: ESN_obj.update_reservoir(state_target[:, n], n, state_target[:, n + 1]) print_timing(print_timings_boolean, start_time, "after_ESN_feed_time") for l, beta in enumerate(list_of_beta_to_test): print("Testing for " + str((scaling_W_in, scaling_W, scaling_alpha, beta, scaling_W_fb))) print("Has Indices: " + str((i, j, k, l, 0))) # Compute W_out (readout coefficients) ESN_obj.calculate_W_out(state_target, N_x, beta, train_start_timestep, train_end_timestep) print_timing(print_timings_boolean, start_time, "after_W_out_train_time") # Clear activations and state from train_end_timestep onward to make sure they aren't # being used improperly during prediction: ESN_obj.x[train_end_timestep + 1:] = 0 state[:, train_end_timestep + 1:] = 0 # Make prediction before end of training identical to target state state[:, 0:train_end_timestep + 1] = state_target[:, 0:train_end_timestep + 1] # Predict state at each next timestep, keeping training progress: for n in range(train_end_timestep, train_end_timestep + timesteps_for_prediction - 1): state[:, n + 1] = ESN_obj.output_Y(state[:, n], n) ESN_obj.update_reservoir(state[:, n], n, state[:, n + 1]) print_timing(print_timings_boolean, start_time, "after_Y_predict_train_time") np.savez(save_name + '.npz', ESN_obj=ESN_obj) print("mean_squared_error is: " + str( mean_squared_error( state_target.transpose() [train_end_timestep:train_end_timestep + timesteps_for_prediction], state[:, train_end_timestep:train_end_timestep + timesteps_for_prediction].transpose()))) mse_array[i, j, k, l] = mean_squared_error( state_target.transpose()[train_end_timestep:train_end_timestep + timesteps_for_prediction], state[:, train_end_timestep:train_end_timestep + timesteps_for_prediction].transpose()) print("Number of large W_out:" + str(np.sum(ESN_obj.W_out[ESN_obj.W_out > 0.5]))) print("Max of W_out is " + str(np.max(ESN_obj.W_out))) # Plotting.plot_activations(state_target, ESN_obj.x) if save_or_display is not None: Plotting.plot_orbits( state, state_target, train_start_timestep, train_end_timestep, system_name, timesteps_for_prediction, [scaling_W_in, scaling_W, scaling_alpha, beta, scaling_W_fb], save_or_display)