def GraphSeedsPropertyMean(self, quantity, ax=None, color='m', label=None, err_alpha=0.2): # Create figure, ax if not provided if not ax: fig, ax = plt.subplots( 1,1, figsize=(6,6) ) # Get timestep timestep = self.config['RunConfig']['timeSnap'] # Data for each seed nFrame = max( [ len(s.frames) for s in self.seeds]) means = np.empty( (len(self.seeds), nFrame)) stds = np.empty( (len(self.seeds), nFrame)) means[:] = np.NaN stds[:] = np.NaN # means = np.zeros( (len(self.seeds), len(self.seeds[0].frames)) ) # stds = np.zeros( (len(self.seeds), len(self.seeds[0].frames)) ) for i, seed in enumerate( self.seeds): # means[i,:], stds[i,:] = Plots.getSylinderMeanPropertyNorm( seed.frames, quantity) means[i,:len(seed.frames)],_ = Plots.getSylinderMeanPropertyNorm( seed.frames, quantity) # Find mean and std mean = np.nanmean( means, axis=0) std = np.nanstd( means, axis=0) time = timestep*np.arange( len(mean) ) # Plot ax.plot(time, mean, color=color, label=label) ax.fill_between(time, mean-std, mean+std, color=color, alpha=err_alpha) ax.set_xlabel( Plots.getPropertyLabel('time') ) ax.set_ylabel( Plots.getPropertyLabel(quantity) )
def plotter(samples, predictions, Ws, img_x, idx): plot_all_filters(Ws, idx) shp = (samples.shape[0], 1, img_x, img_x) samples = samples.reshape(shp) predictions = predictions.reshape(shp) Plots.plot_predictions_grid(samples, predictions, i, shp) return
def FitDiffVsTime(Time, Data, Range): hour = [] diff = [] for i in range(Range[0], Range[1], 1): Fit = Pl.FitExponential(Time, Data['Oxygen'], Range=[i, i + 20]) print(i, Fit[3], Fit[3][1]) hour.append(i) diff.append(1.0 / (Fit[3][1]) * 0.635**2 / np.pi**2 * 1E4) XMin, XMax = np.min(Fit[0]), np.max(Fit[0]) YMin, YMax = np.min(Fit[1]), np.max(Fit[1]) print(XMin, XMax, YMin, YMax) Pl.PlotBestFitOverTime(Time=[Time], Data=[Data], XRange=[XMin, XMax], YRange=[YMin, YMax], Fit=Fit) plt.savefig('rga_fit_%d.pdf' % i) # plt.show() plt.close() Pl.PlotScatter([hour], [diff], Labels=[ 'Time [Hours]', r'Diffusion Coefficient [cm$^2$/h $\times \, 10^{-4}$]' ], XRange=[20, 60], YRange=[8, 14], Legend=['Oxygen in Teflon']) plt.savefig('diff_vs_time.pdf')
def test_preprocessing_radiographs(img_idx, skip_amf=True): img = cv2.imread("%s%02d.tif" % ("Data/Radiographs/", img_idx)) directory = "Plots/Preprocessed/" img = img.copy() Plots.save_image(img, "Original.png", directory) if not skip_amf: img = task2.adaptive_median(img) #Plots.plot_image(img, title="Adaptive median filtered") Plots.save_image(img, "amf.png", directory) else: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img = task2.bilateral_filter(img) #Plots.plot_image(img, title="Bilateral filtered") Plots.save_image(img, "bilateral.png", directory) img = task2.mathematical_morphology(img) #Plots.plot_image(img, title="Top-hat and bottom-hat combined") Plots.save_image(img, "math_morph.png", directory) img = task2.clahe(img) #Plots.plot_image(img, title="CLAHE") Plots.save_image(img, "clahe.png", directory)
def color_ratio(vis, ir, z, maxint): output = 1.0 DZinv = 1.0 / (z[1] - z[0]) iz_600 = int(round(0.6 * DZinv)) - 1 iz_3000 = int(round(3.0 * DZinv)) - 1 sub_vis = vis[iz_600:iz_3000, :] sub_ir = ir[iz_600:iz_3000, :] threshold = (DZinv * 6E-3) * vis.shape[-1] / 96.0 with np.errstate(invalid='ignore'): cl = (sub_vis >= maxint).sum() if cl > threshold: with np.errstate(invalid='ignore'): x = sub_vis[sub_vis >= maxint] y = sub_ir[sub_vis >= maxint] params = np.polyfit(x, y, 1) output = params[0] if Debug: print "**** Function color_ratio ****" print "Number of Cloud points: {} - Color Ratio: {}".format( cl, output) print "Plotting channel relation..." if DoPlot: Plots.xy(x, y, params, "calibration.png") return output
def generate(self): size = (self.batch_size, 20, 18, 18) block = np.random.randn(*size) block = block.astype(np.float32) out = self.gen(block) print(out.shape) Plots.plot_testimg(out) return out
def fit_model(asm_list, incisor_list, test_img_idx, m, auto_estimate=True, save=False, show=False): global save_plots global test_idx global show_plots save_plots = save test_idx = test_img_idx show_plots = show test_img = task2.load([test_img_idx])[0] X_init_list = [] if auto_estimate: X_init_list = auto_init.get_estimate(asm_list, incisor_list, test_img_idx) else: lms_list = [] for asm in asm_list: lms_list.append(asm.sm.mean_shape) X_init_list = manual_init.get_estimate(lms_list, incisor_list, test_img) Plots.plot_landmarks_on_image(X_init_list, test_img, title="manual_init",\ show=False, save=False, wait=True, color=(0,255,0)) with Timer("Fitting Model in Multi Resolution Framework"): final_fit = [] for ind, X in enumerate(X_init_list): print("..For incisor %d" % (asm_list[ind].incisor)) pyramid = task1.gauss_pyramid(test_img, PYRAMID_LEVELS) X = X.scaleposition(1.0 / 2**(PYRAMID_LEVELS + 1)) level = PYRAMID_LEVELS for img in reversed(pyramid): #print("..Level %d" %(level)) X = X.scaleposition(2) X = fit_one_level(X, img, asm_list[ind], level, m, MAX_ITER) level -= 1 final_fit.append(X) print("") # just for elegant printing on screen if save_plots or show_plots: directory = "Plots/model_fit/test_img_" + str(test_idx) + "/" Plots.plot_landmarks_on_image(final_fit, test_img, directory=directory, title="Final_fit",\ show=show_plots, save=save_plots, wait=True, color=(0,255,0)) return final_fit
def test_auto_initial_estimate(incisor_list, test_img_idx, k): asm_list = task1.buildASM(incisor_list, test_img_idx, k) X_init_list = auto_init.get_estimate(asm_list, incisor_list, test_img_idx, show_bbox_dist=True, \ show_app_models=False, show_finding_bbox=True, \ show_autoinit_bbox=False, show_autoinit_lms=False, save=False) test_img = task2.load([test_img_idx])[0] directory = "Plots/auto_init/test_img_%02d/" % (test_img_idx) Plots.plot_landmarks_on_image(X_init_list, test_img, directory=directory, title="auto_init",\ show=False, save=True, wait=True, color=(0,255,0))
def reconstruct(self, Y): print(Y.shape) Y = Y.reshape((Y.shape[0], 1, 28, 28)) Y = Y[:self.batch_size, :] Y = Y.astype(np.float32) Yout = self.predict(Y)[0] print(Y.shape) print(Yout.shape) Plots.plot_predictions_grey(Y, Yout, 99999999, [self.batch_size, 1,28,28]) return
def polynomialRegressionCV(obj,X,y,numberoffolds,degree,label): rmseList = [] degreeList = [] for degree in range(1,degree+1): model = make_pipeline(PolynomialFeatures(degree), obj) rmse = callCrossValPoly(model, X, y, numberoffolds, degree, label) degreeList.append(degree) rmseList.append(rmse) #print('%s (Polynomial) - The best RMSE obtained is: %.4f for Degree: %d' % (label,minRMSE(rmseList),rmseList.index(minRMSE(rmseList))+1)) Plots.linePlot(degreeList, rmseList, 'Degree_of_Polynomial', 'RMSE', 'Degree VS RMSE', 'red', 'PolyRegressionCV'+label)
def reconstruct(self, Y): print(Y.shape) Y = Y.reshape((Y.shape[0], 1, 28, 28)) Y = Y[:self.batch_size, :] Y = Y.astype(np.float32) Yout = self.predict(Y)[0] print(Y.shape) print(Yout.shape) Plots.plot_predictions_grey(Y, Yout, 99999999, [self.batch_size, 1, 28, 28]) return
def test_GPA(incisor_list, test_img_idx): for incisor in incisor_list: train_lms = task1.load_all_landmarks_of(incisor, test_img_idx) mean_shape, aligned_shapes = GPA(train_lms) Plots.plot_procrustes(mean_shape, aligned_shapes, test_img_idx, incisor, show=False, save=True)
def plot_all_filters(Ws, idx): #for w in Ws: print(len(Ws)) for i in range(int(len(Ws)/2)): w = Ws[i].get_value() dim = np.sqrt(w.shape[0]) w = np.swapaxes(w, 0, 1) w = w.reshape(w.shape[0], 1, dim, dim) print(dim) print(w.shape) Plots.plot_filters(w, 1, idx, "layer" + str(i+1)) return
def test_manual_init(incisor_list, test_img_idx): asm_list = task1.buildASM(incisor_list, test_img_idx, k) lms_list = [] for asm in asm_list: lms_list.append(asm.sm.mean_shape) test_img = task2.load([test_img_idx])[0] X_init_list = manual_init.get_estimate(lms_list, test_img) Plots.plot_landmarks_on_image(X_init_list, test_img, title="manual_init",\ show=True, save=False, wait=True, color=(0,255,0))
def find_bbox(mean, evecs, test_img, def_width, def_height, is_upper, \ jaw_split, search_region): """ Finds the bounding box inside the search region, with the lowest reconstruction error """ lowest_error = float("inf") lowest_error_bbox = [(-1, -1), (-1, -1)] global plot_finding_bbox global save_plots global save_dir current_window = [] lowest_error_bbox = [] for wscale in np.arange(0.8, 1.3, 0.1): for hscale in np.arange(0.7, 1.2, 0.1): winW = int(def_width * wscale) winH = int(def_height * hscale) for (x, y, current_window) in sliding_window(test_img, search_region, step_size=20, \ window_size=(winW, winH)): if current_window.shape[0] != winH or current_window.shape[1] != winW: continue reCut = cv2.resize(current_window, (def_width, def_height)) X = reCut.flatten() Y = project(evecs, X, mean) Xacc = reconstruct(evecs, Y, mean) error = np.linalg.norm(Xacc - X) if error < lowest_error: lowest_error = error lowest_error_bbox = [(x, y), (x + winW, y + winH)] current_window = [(x, y), (x + winW, y + winH)] sub_dir = "upper_incisors/" if is_upper else "lower_incisors/" directory = save_dir+"finding_bboxes/"+sub_dir Plots.plot_autoinit(test_img, jaw_split, current_window, search_region, \ lowest_error_bbox, directory=directory, \ title="wscale="+str(wscale)+" hscale="+str(hscale), \ wait=False, show=plot_finding_bbox, save=False) # Plot of final chosen window title= "upper" if is_upper else "lower" if plot_finding_bbox or save_plots: Plots.plot_autoinit(test_img, jaw_split, current_window, search_region, \ lowest_error_bbox, directory=save_dir, \ title="initial_estimate_bbox_%s" %(title), wait=False, \ show=plot_finding_bbox, save=save_plots) return lowest_error_bbox
def polynomialRegression(obj,X_train,y_train,X_test,y_test, degree, label): rmseList = [] degreeList = [] for degree in range(1,degree+1): model = make_pipeline(PolynomialFeatures(degree), obj) model.fit(X_train, y_train) pred=model.predict(X_test) rmse=rootMeanSquareError(pred, y_test) degreeList.append(degree) rmseList.append(rmse) print("%s (Polynomial) - Root Mean Squared Error for degree %d: %.4f" % (label,degree,rmse)) #print('Variance score (Polynomial): %.4f' % model.score(X_test, y_test)) print('%s (Polynomial) - The best RMSE obtained is %.4f for degree: %d' % (label,minRMSE(rmseList),rmseList.index(minRMSE(rmseList))+1)) Plots.linePlot(degreeList, rmseList, 'Degree_of_Polynomial', 'RMSE', 'Degree VS RMSE', 'blue', 'PolyRegression'+label)
def GraphProperty(self, quantity, ax=None, color='m', label=None): # Create figure, ax if not provided if not ax: fig, ax = plt.subplots(1, 1, figsize=(6, 6)) # Get timestep timestep = self.config['RunConfig']['timeSnap'] Plots.graphSylinderMeanPropertyNorm(self.frames, quantity, ax, color=color, timestep=timestep, label=label)
def get_best_nearby_match(X, asm, img, gimg, glms, m, level): """ Examines a region of the given image around each point X_i to find """ Y = [] n_close = 0 profiles = [] best_pixels = [] fit_qualities = [] for ind in range(len(X.points)): profile = Profile(img, gimg, X, ind, m) profiles.append(profile) lowest_costs, best_pixel = np.inf, None costs_of_fit = [] for i in range(asm.k, asm.k + 2 * (m - asm.k) + 1): subprofile = profile.samples[i - asm.k:i + asm.k + 1] dist = glms[ind].quality_of_fit(subprofile) costs_of_fit.append(dist) if dist < lowest_costs: lowest_costs = dist best_pixel = i best_pixels.append(best_pixel) fit_qualities.append(lowest_costs) best_point = [int(c) for c in profile.points[best_pixel, :]] if (best_pixel > 3 * m / 4 and best_pixel < 5 * m / 4): n_close += 1 # Plot sample profile for 10th model point for instance if save_plots or show_plots: global test_idx if (n_close > 19): Plots.plot_profiles(profile.samples, glms[9].mean_profile, costs_of_fit, level, \ test_idx, save=save_plots, show=show_plots) # applying a median filter to get smooth boundary best_pixels.extend(best_pixels) best_pixels = np.rint(medfilt(np.asarray(best_pixels), 5)).astype(int) for best, profile in zip(best_pixels, profiles): best_point = [int(c) for c in profile.points[best, :]] Y.append(best_point) fit_quality = np.mean(fit_qualities) return Landmarks(np.array(Y)), n_close, fit_quality
def depol(par, per, z): output = 1.0 DZinv = 1.0 / (z[1] - z[0]) iz_12000 = int(round(10.0 * DZinv)) - 1 x = par[iz_12000, :] y = per[iz_12000, :] params = np.polyfit(x[~np.isnan(x)], y[~np.isnan(x)], 1) output = params[0] if Debug: print "**** Function depol ****" print "Linear fit: slope={}".format(output) if DoPlot: Plots.xy(x, y, params, "depol.png") return output
def __init__(self, mainWinObj): self.transFrame = AccountPageClasses.TransactionFrame( mainWinObj) # Frame where transactions appear self.controlFrame = AccountPageClasses.ControlFrame( mainWinObj) # Frame with the control buttons #self.plotFrame = AccountPageClasses.PlotFrame(mainWinObj) # Frame for the plots mainPlot = Plots.HomeWindowGraph( mainWinObj.accPageFrm, mainWinObj ) #Create Graph Object for the HomePage, (row = 0, column = 1)
def extract_roi_for_appModel(is_upper, test_img_idx): """ Extracts the region of interest (bounding box) surrounding the four upper(or lower) incisors """ bbox_list = [] train_idx = range(1,15) train_idx.remove(test_img_idx) for example_nr in train_idx: lms = landmarks.load_all_incisors_of_example(example_nr) img = cv2.imread('Data/Radiographs/'+str(example_nr).zfill(2)+'.tif') if is_upper: bbox = Plots.draw_bbox(img, lms[0:4],show=False,return_bbox=True) else: bbox = Plots.draw_bbox(img, lms[4:8],show=False,return_bbox=True) bbox_list.append(bbox) return bbox_list
def vis_factor(vis, z): DZ = z[1] - z[0] DZinv = 1.0 / DZ iz_1200 = int(round(1.2 * DZinv)) - 1 iz_6000 = int(round(6.0 * DZinv)) - 1 with np.errstate(divide='ignore', invalid='ignore'): a = np.where(vis[iz_1200:iz_6000, :] > 0, np.log10(vis[iz_1200:iz_6000, :]), np.nan) hist, bin_edges = np.histogram(a[~np.isnan(a)], bins=60, range=[-3, 2]) maxsub = np.nanargmax(hist) factor = 1E4 / 10.**(bin_edges[maxsub]) if Debug: print "**** Function vis_factor ****" print "Index of maximum in histogram: {}".format(maxsub) print "Maximum histogram: {}".format(bin_edges[maxsub]) if DoPlot: Plots.histo(hist, bin_edges) return factor
def analayze_decoderStat(NUMBER_OF_STRINGS_MAX, NUMBER_OF_STRINGS_MIN, strLen, resultForGraph, num_of_mis): i = 0 j = 0 tempRes = { "Z": [], "X": [] } j += 1 tempRes['X'] = resultForGraph['X'] for arr in resultForGraph["Z"]: tempRes["Z"].append([]) for x in arr: if x <= 0.01: tempRes["Z"][-1].append(100) elif x <= 0.05 and x > 0.01: tempRes["Z"][-1].append(75) elif x > 0.05 and x <= 0.1: tempRes["Z"][-1].append(20) else: tempRes["Z"][-1].append(0) Plots.py_plotAll(NUMBER_OF_STRINGS_MAX, NUMBER_OF_STRINGS_MIN, num_of_mis, tempRes, strLen, "Del len:" + str(strLen), False, False, True)
def __init__(self, mainWinObj): self.accFrame = HomePageClasses.AccountFrame( mainWinObj) #Create Frame Object for the account self.ccFrame = HomePageClasses.CreditCardFrame( mainWinObj.home) #Create Frame Object for the Credit Card self.bills = HomePageClasses.Bills( mainWinObj.home) #Create Status Bar Object mainPlot = Plots.HomeWindowGraph( mainWinObj.home, mainWinObj) #Create Graph Object for the HomePage self.accFrame.UpdateLabel("Todas")
def evaluate_results(test_img_idx, incisor_list, final_fit_list): """ Uses the Dice Coefficient to evaulate the similarity between the final landmarks given by the model and the ground truth landmarks of the test image """ test_img = task2.load([test_img_idx])[0] test_lms_list = load_landmarks(test_img_idx, incisor_list) dice_scores = [] height, width, _ = test_img.shape with Timer("Evaluating Results"): print("..Dice similarity score") for ind, incisor in enumerate(incisor_list): image1 = np.zeros((height, width), np.uint8) image2 = np.zeros((height, width), np.uint8) X1 = test_lms_list[ind].as_matrix() # X1 - ground truth cv2.fillPoly(image1, np.int32([X1]), 255) X2 = final_fit_list[ind].as_matrix() # X2 - best fit cv2.fillPoly(image2, np.int32([X2]), 255) dice = np.sum(image1[image2 == 255]) * 2.0 / (np.sum(image1) + np.sum(image2)) print("....for incisor %02d - %.2f" % (incisor, dice)) dice_scores.append(dice) if save_plots or show_plots: Plots.plot_results(np.arange(len(dice_scores)), incisor_list, dice_scores, \ test_img_idx, show=show_plots, save=save_plots)
def __new__(cls, NEvt=5, pbeam=5., filename=None, rootfilename=None): if cls.__instance is None: print('Simulation.__new__: creating the Simulation object') print('-------------------') cls.__instance = super(Simulation, cls).__new__(cls) cls.__Rnd.seed(int(cls.__RandomSeed)) cls._NEvt = NEvt cls._pbeam = pbeam cls._nufile = filename cls._rootfilename = rootfilename cls._nuStrt = nuPrdStrt.nuSTORMPrdStrght(filename) cls._plots = plots.Plots() # Summarise initialisation cls.print(cls) return cls.__instance
m3 = M.Mode(600, 500, 1) m4 = M.Mode(500, 450, 1) m5 = M.Mode(100, 50, 1) m6 = M.Mode(600, 250, 1) m7 = M.Mode(500, 250, 0.5) m8 = M.Mode(550, 250, 10) m9 = M.Mode(550, 400, 10) m10 = M.Mode(550, 450, 20) m11 = M.Mode(560, 450, 2) # m1 = M.Mode(501, 499, 1) # m2 = M.Mode(500.1, 300, 0.1) # m3 = M.Mode(5000.01, 5000, 0.1) Modes = [m1, m2, m3, m10, m5, m6] #, m7, m8, m9, m10, m11] E_electronic = 0.005 (energies, intensities) = genMultiModePoints(threshold, Modes, E_electronic, 11) wide = [0.01] * 11 med = [0.005] * 11 skinny = [0.001] * 11 energies.reverse() intensities.reverse() points = Plots.genSpectrum(energies, intensities, skinny) #Plots.plotSpectrum(points[0], points[1], "N Modes") #raw_input("Press ENTER to exit ")
def get_estimate(asm_list,incisor_list, test_img_idx, show_bbox_dist=False, show_app_models=False, \ show_finding_bbox=False, show_autoinit_bbox=False, show_autoinit_lms=False, save=False): """ Finds an initial estimate for all the incisors in the incisor_list """ global plot_bbox_dist global plot_app_models global plot_finding_bbox global plot_autoinit_bbox global plot_autoinit_lms global save_plots global save_dir global jaw_split plot_bbox_dist = show_bbox_dist plot_app_models = show_app_models plot_finding_bbox = show_finding_bbox plot_autoinit_bbox = show_autoinit_bbox plot_autoinit_lms = show_autoinit_lms save_plots = save save_dir = "Plots/auto_init/test_img_%02d/" %(test_img_idx) with Timer("Finding Initial Estimate automatically"): if any(incisor < 5 for incisor in incisor_list): # upper incisor is_upper= True with Timer("..for upper incisors", dots="...."): [(w1U, h1U), (w2U, h2U)] = get_big_bbox(is_upper, test_img_idx) if any(incisor > 4 for incisor in incisor_list): # lower incisor is_upper= False with Timer("..for lower incisors", dots="...."): [(w1L, h1L), (w2L, h2L)] = get_big_bbox(is_upper, test_img_idx) print("") # just for elegant printing on screen init_list = [] test_img = task2.load([test_img_idx])[0] img_org = test_img.copy() test_img = task2.enhance(test_img, skip_amf=True) for index,incisor in enumerate(incisor_list): # Assume all teeth have more or less the same width if incisor < 5: ind = incisor bbox = [(w1U +(ind-1)*(w2U-w1U)/4, h1U), (w1U +(ind)*(w2U-w1U)/4, h2U)] else: ind = incisor - 4 bbox = [(w1L +(ind-1)*(w2L-w1L)/4, h1L), (w1L +(ind)*(w2L-w1L)/4, h2L)] center = np.mean(bbox, axis=0) Plots.plot_autoinit(test_img, jaw_split, lowest_error_bbox=bbox, directory=save_dir, \ title="initial_estimate_bbox_incisor_%d" %(incisor), wait=True, \ show=plot_autoinit_bbox, save=False)#save=save_plots init = asm_list[index].sm.mean_shape.scale_to_bbox(bbox).translate(center) Plots.plot_landmarks_on_image([init], img_org, directory=save_dir, \ title="initial_estimate_lms_incisor_%d" %(incisor), \ show=plot_autoinit_lms, save=False, color=(0,255,0))#save=save_plots init_list.append(init) return init_list
__author__ = 'Pierzchalski' import Plots as plots import GetData as gd params = [ key for key in plots.paramsToIndexes.keys() ] plotIndex = 0 def myCmp(a, b): return cmp(sorted(plots.paramsToIndexes[a])[0], sorted(plots.paramsToIndexes[b])[0]) for param in sorted(params, myCmp): print("Plotting %d"%plotIndex) #plots.plotFn(param, plotIndex, fn=plots.fnPointwiseAverage(legendLabel="Assessment scores", assessment=True), save=False) #plots.plotFn(param, plotIndex, fn=plots.fnPointwiseAverage(legendLabel="Training scores", assessment=False), save=False) plots.plotFn(param, plotIndex, fn=plots.fnErrorBar(legendLabel="Assessment scores", assessment=True), save=False) plots.plotFn(param, plotIndex, fn=plots.fnErrorBar(legendLabel="Training scores", assessment=False), save=False) plots.setTitleAndAxes(plotIndex, title="", xLabel="Score index", yLabel="Score") plots.insertLegend(plotIndex, save=True) plots.copyReplaceParams(plotIndex, [param]) plotIndex += 1
m2 = M.Mode(300, 200, 1) m3 = M.Mode(600, 500, 1) m4 = M.Mode(500, 450, 1) m5 = M.Mode(100, 50, 1) m6 = M.Mode(600, 250, 1) m7 = M.Mode(500, 250, 0.5) m8 = M.Mode(550, 250, 10) m9 = M.Mode(550, 400, 10) m10 = M.Mode(550, 450, 20) m11 = M.Mode(560, 450, 2) # m1 = M.Mode(501, 499, 1) # m2 = M.Mode(500.1, 300, 0.1) # m3 = M.Mode(5000.01, 5000, 0.1) Modes = [m1, m2, m3, m10, m5, m6 ]#, m7, m8, m9, m10, m11] E_electronic = 0.005 (energies, intensities) = genMultiModePoints(threshold, Modes, E_electronic, 11) wide = [0.01]*11 med = [0.005]*11 skinny = [0.001]*11 energies.reverse() intensities.reverse() points = Plots.genSpectrum(energies, intensities, skinny) #Plots.plotSpectrum(points[0], points[1], "N Modes") #raw_input("Press ENTER to exit ")
filter_params, fc_params = get_params(img_x, filters, fc) params = filter_params + fc_params print(params) noise_py_x = model(X, img_x, filter_params, fc_params, 0.5, 0.5) py_x = model(X, img_x, filter_params, fc_params, 0.0, 0.0) y_x = T.argmax(py_x, axis=1) cost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y)) updates = RMSprop(cost, params, lr=0.001) train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True) predict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True) for i in range(10000): for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)): #print(end) #print(trX.shape) #if end <= trX.shape[0]: cost = train(trX[start:end], trY[start:end]) print(cost) #write(str(i) + ": " + str(start) + ": " + str(cost) + "\n") # if end % 3072 == 0: Plots.plot_filters(params[0].get_value(), channels, i, "") print("Predict........") print(np.mean(np.argmax(teY, axis=1) == predict(teX))) write(str(i) + ": " + str(np.mean(np.argmax(teY, axis=1) == predict(teX)))) write("\n")
category = train_1.target_names[train_1.target[i]] if(category in category_count): counter = category_count[category] counter = counter + 1 else: counter = 1 category_count[category] = counter if("comp" in category): computer_count = computer_count + 1 else: recreational_count = recreational_count + 1 print("Computer category count= "+str(computer_count)) print("Recreational category count= "+str(recreational_count)) Plots.barPlot(category_count) #################################################################################### #Removing punctuations #Tokenize string stop = stopwords.words('english') terms = [] def cleanDoc(doc): #print("Original\n",doc) cleaned ="" for i in word_tokenize(doc.lower()): if i not in stop: root = lmtzr.lemmatize(i) if root not in string.punctuation: cleaned = cleaned+ " "+root
from sklearn.linear_model import LinearRegression from sklearn import cross_validation from scipy.stats import randint as sp_randint from sklearn.grid_search import RandomizedSearchCV,GridSearchCV from pybrain.datasets import SupervisedDataSet from pybrain.tools.shortcuts import buildNetwork from pybrain.supervised.trainers import BackpropTrainer network_data = pandas.read_csv('network_backup_dataset.csv') #One Hot Encoding one_hot_data, _, _ = one_hot_dataframe(network_data, ['Day of Week', 'Work-Flow-ID','File Name'], replace=True) one_hot_subset = one_hot_data[one_hot_data['Week #'] <= 3] for num in range(0,5): Plots.plotWorkFlow(one_hot_subset,num, 'actual') feature_cols = [col for col in one_hot_data.columns if col not in ['Size of Backup (GB)']] X = one_hot_data[feature_cols] y = one_hot_data['Size of Backup (GB)'] X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3, random_state=3) #Linear Regression model = LinearRegression() Functions.callClassifierFeatures(model, X_train, y_train, X_test, y_test, feature_cols, 'Linear Regression') pred = model.predict(X_test) Plots.scatterPlot(pred, y_test, 'Fitted','Actual','Fitted VS Actual','green','NetBkpLRFitvsActual') Plots.residualPlot(pred, pred - y_test, 'Fitted','Resuduals','Fitted VS Residual','green','NetBkpLRFitvsResidual')
import Plots as plts # Creates the histogram to find the most common Pokémon typing. plts.create_type_histogram() # Creates the coloured scatter plot to see correlation between Base Total, Base Egg Steps, and Capture Rate. plts.create_scatter_with_stats() # Creates the linked plot to see correlation between Base Total, Base Egg Steps, and Capture Rate. plts.linked_plot() # Creates the unethical violin plot (no points, no sample size) to see what makes a Pokémon legendary. plts.create_legend_violin_plot(False, False, "ViolinPlot") # Creates the violin plot (including jittered points and sample size) to see what makes a Pokémon legendary. plts.create_legend_violin_plot(True, True, "ViolinPlotWithPoints") # Creates the boxplot to see what makes a Pokémon legendary. plts.create_legend_boxplot()
def invert(station_block, cfgfile): config = SafeConfigParser() config.read(cfgfile) #Read parameters from an external file (integer case) block = station_block wz = config.getint(block, "wz") wsmooth = config.getint(block, "wsmooth") #Read parameters from an external file (float case) s1 = config.getfloat(block, "s1") depol = config.getfloat(block, "depol") leakrate = config.getfloat(block, "leakrate") mdr = config.getfloat(block, "mdr") dep_s = config.getfloat(block, "dep_s") dep_d = config.getfloat(block, "dep_d") maxint = config.getfloat(block, "maxint") clgrad = config.getfloat(block, "clgrad") clth = config.getfloat(block, "clth") rth1 = config.getfloat(block, "rth1") rth4 = config.getfloat(block, "rth4") rth6 = config.getfloat(block, "rth6") pblth = config.getfloat(block, "pblth") #Read parameters from an external file (string case) station = config.get(block, "prefix") ncpath_raw = config.get("Paths", "ncpath_raw") ncpath_out = config.get("Paths", "ncpath_out") ncfile_raw = config.get("Paths", "ncfile_raw") ncfile_out = config.get("Paths", "ncfile_out") yrfile_vis = config.get("Paths", "yrfile_vis") yrfile_ir = config.get("Paths", "yrfile_ir") power_file = config.get("Paths", "power_file") ncpath_out = ncpath_out + block + '/' #Read parameters from an external file (boolean case) PlotRaw = config.getboolean("Output", "PlotRaw") PlotCal = config.getboolean("Output", "PlotCal") PlotBeta = config.getboolean("Output", "PlotBeta") PlotDep = config.getboolean("Output", "PlotDep") PlotAlpha = config.getboolean("Output", "PlotAlpha") NCDout = config.getboolean("Output", "NCDout") NCDmonth = config.getboolean("Output", "NCDmonth") if os.path.isfile(ncpath_raw+station+ncfile_raw): print "Opening file: ", ncpath_raw+station+ncfile_raw ### Read Time, Data (mV), Heigth (km) ds = Dataset(ncpath_raw+station+ncfile_raw) ch1 = ds.variables["ch1"][:] ch2 = ds.variables["ch2"][:] ch3 = ds.variables["ch3"][:] z = ds.variables["alt"][:] times = ds.variables["time"] day_0 = datetime(year=ds.YEAR, month=ds.MONTH, day=ds.DAY) height = ds.HEIGHT x = num2date(times[:],units=times.units) ds.close() else: print "Unable to open file: ", ncpath_raw+station+ncfile_raw exit() ch1 = ch1.T ch2 = ch2.T ch3 = ch3.T ### Number of profiles and vertical levels NX = len(x) NZ = len(z) ### Background removed and range-corrected signal intensity for it in range(NX): coeff = np.polyfit(z[-1000:], ch1[-1000:,it], 1) pol = np.poly1d(coeff) ch1[:,it] = (ch1[:,it] - pol(z))*z**2 ### coeff = np.polyfit(z[-1000:], ch2[-1000:,it], 1) pol = np.poly1d(coeff) ch2[:,it] = (ch2[:,it] - pol(z))*z**2 ### coeff = np.polyfit(z[-2000:], ch3[-2000:,it], 1) pol = np.poly1d(coeff) ch3[:,it] = (ch3[:,it] - pol(z))*z**2 ### Smoothing for IR channel if wsmooth>1: for it in range(NX): ch3[:,it] = np.convolve(ch3[:,it],np.ones(wsmooth)/wsmooth,mode='same') if Debug: print "Max Value for channel 1: {} mV km2".format(np.nanmax(ch1)) print "Max Value for channel 2: {} mV km2".format(np.nanmax(ch2)) print "Max Value for channel 3: {} mV km2".format(np.nanmax(ch3)) ### Power Correction if Debug: print "Performing power corrections..." c1, c2, c3 = Calibration.power(x,ncpath_out+power_file) for ix in range(NX): ch1[:,ix] = ch1[:,ix] * c1[ix] ch2[:,ix] = ch2[:,ix] * c2[ix] ch3[:,ix] = ch3[:,ix] * c3[ix] ### Polarization correction (Experimental!) depol_estimated = Calibration.depol(ch1, ch2, z) if depol_estimated>1.0: if Debug: print "Using depol={} instead of depol={}".format(depol_estimated, depol) depol = depol_estimated para = ch1 # Parallel component - Visible channel perp = (ch2 - ch1 * leakrate) / depol # Perpendicular component - Visible channel intvis = para + perp # Visible channel (total) intir = ch3 # Infrared channel with np.errstate(divide='ignore', invalid='ignore'): dep = np.where(para == 0, np.nan, perp / para) # Volume linear depolarization ratio if PlotRaw: print "Plotting raw data..." Plots.show_raw(x,intvis,z, ncpath_out+"raw_vis.png", zmax=16) Plots.show_raw(x,intir, z, ncpath_out+"raw_ir.png", zmax=16) ### Overlap Correction if Debug: print "Performing overlap corrections..." yrvis = Calibration.overlap(ncpath_out + yrfile_vis,z) yrir = Calibration.overlap(ncpath_out + yrfile_ir,z) for ix in range(NX): intvis[:,ix] = intvis[:,ix] * yrvis intir[:,ix] = intir[:,ix] * yrir ### Calibration factor = Calibration.vis_factor(intvis,z) intvis = intvis * factor # Attenuated Backscatter Coefficient at 532 nm color_r = Calibration.color_ratio(intvis, intir, z, maxint) intir = intir / color_r # Attenuated Backscatter Coefficient at 1064 nm if Debug: print "Calibration factor - Visible channel: {}".format(factor) print "Calibration factor - IR channel: {}".format(1.0/color_r) print "Vis. channel: min={}, max={}".format(np.nanmin(intvis), np.nanmax(intvis)) print "IR channel: min={}, max={}".format(np.nanmin(intir), np.nanmax(intir)) ### REBIN function: resizes a vector if NZ%wz==0: NZ = NZ/wz lvis = np.full((NZ,NX),np.nan) lir = np.full((NZ,NX),np.nan) ldep = np.full((NZ,NX),np.nan) lz = np.mean(z.reshape(-1, wz), axis=1) DZ = lz[1]-lz[0] DZinv = 1.0/DZ with warnings.catch_warnings(): # I expect to see RuntimeWarnings in this block warnings.simplefilter("ignore", category=RuntimeWarning) for it in range(NX): if not np.all(np.isnan(intvis[:,it])) and not np.all(np.isnan(intir[:,it])): lvis[:,it] = np.nanmean(intvis[:,it].reshape(-1, wz), axis=1) lir[:,it] = np.nanmean(intir[:,it].reshape(-1, wz), axis=1) ldep[:,it] = np.nanmean(dep[:,it].reshape(-1, wz), axis=1) if Debug: print "REBIN successful. Vertical resolution: {} m".format(1000*DZ) else: raise SystemExit("STOP: Rebin not performed: verify your configuration") ### Molecular profiles alpha_m, beta_m = Physics.rayleigh(1000.0*lz, 532.0, height) ### Cloud Detection zb, zt = CloudDetect.cloud_height(lvis,lir,lz,clgrad,clth) # Detect Cloud Base above 240 m rf, pbl, invtop = CloudDetect.phenomena(lvis,lir,ldep,lz,zb,rth1,rth4,pblth) if PlotCal: print "Plotting calibrated signal..." Plots.show_cal(x,intvis,z,zb,zt,ncpath_out+"cal_vis.png") Plots.show_cal(x,intir,z,zb,zt,ncpath_out+"cal_ir.png") #Plots.show_cal(x,intvis,z,pbl,invtop,ncpath_out+"cloud_vis.png") #Plots.show_cloud(x,intvis,z,pbl,5*rf,ncpath_out+"cloud_vis.png") ### Inversion ### Vertical indexes iz_100 = int(round(0.10 * DZinv))-1 iz_120 = int(round(0.12 * DZinv))-1 iz_150 = int(round(0.15 * DZinv))-1 iz_450 = int(round(0.45 * DZinv))-1 iz_600 = int(round(0.60 * DZinv))-1 iz_9km = int(round(9.00 * DZinv))-1 iz_18km = int(round(18. * DZinv))-1 ### Optical properties for aerosols ext_vis = np.full((NZ,NX), np.nan) # Extinction coefficient - Visible adr = np.full((NZ,NX), np.nan) # Particulate depolarization ratio ### Use the Fernald's algorithm - High clouds caseext for ix in range(NX): if rf[ix]>0 or invtop[ix] < 0.21: continue # profile_ir = lir [:,ix] profile_vis = 1.0*lvis[:,ix] if np.all(np.isnan(profile_vis)): continue ### In order to increase the signal-to-noise ratio at inversion height. ### This is the boundary condition for the inversion method and ### the whole profiles depend on this boundary condition iz_inv = int(round(invtop[ix] * DZinv))-1 profile_vis[iz_inv-1] = np.nanmean(profile_vis[iz_inv-2:iz_inv+1]) ### We assume that the molecular contribution ### is dominant at the inversion height if zb[ix]<3.0: continue else: bsc_ini = beta_m[iz_inv-1] * 1E-4 #Here I made a little modification to the Shimitzu code alpha, beta = Physics.fernald(profile_vis, lz, alpha_m, beta_m, s1, invtop[ix], bsc_ini) extmin = min(alpha[iz_150:1+max(iz_inv-iz_150, iz_150+1)]) ### for itc in range(1,16): if extmin > -1E-5: break alpha, beta = Physics.fernald(profile_vis, lz, alpha_m, beta_m, s1, invtop[ix], bsc_ini*2**itc) extmin = min(alpha[iz_150:1+max(iz_inv-iz_150, iz_150+1)]) ### sr1 = (beta + beta_m) / beta_m # Backscatter ratio adr1 = (ldep[:,ix] * (sr1 + sr1*mdr - mdr) - mdr) / (sr1 - 1 + sr1*mdr - ldep[:,ix]) # Aerosols depolarization ratio ext_vis[iz_100:iz_inv,ix] = alpha[iz_100:iz_inv] adr[iz_100:iz_inv,ix] = adr1[iz_100:iz_inv] ### Is this necessary? if zb[ix] < 9: iz_zb = int(round(zb[ix] * DZinv))-1 zmax = min(zt[ix],9) iz_zt = int(round(zmax * DZinv))-1 ext_vis[iz_zb:iz_zt,ix] = np.nan ### Verify variability? for ix in range(NX): profile = ext_vis[iz_120:iz_450,ix] if np.all(np.isnan(profile)): continue else: extstd = np.nanstd(profile) if extstd>rth6: rf[ix]=6 ext_vis[:,ix] = np.nan time_series = lvis[iz_600,:] / (ext_vis[iz_600,:]/s1 + beta_m[iz_600]) with np.errstate(invalid='ignore'): mask = ext_vis[iz_600,:]>=0 if mask.sum()>0: ext_int_r = np.median( time_series[mask] ) else: ext_int_r = 1e11 if Debug: print "Calibration constant: {}".format(ext_int_r) ### Use the Fernald's algorithm - Low clouds case for ix in range(NX): if np.isnan(zb[ix]) or zb[ix]>=3 or invtop[ix] < 0.21: continue profile_vis = 1.0*lvis[:,ix] if np.all(np.isnan(profile_vis)): continue iz_inv = int(round(invtop[ix] * DZinv))-1 profile_vis[iz_inv-1] = np.nanmean(profile_vis[iz_inv-2:iz_inv+1]) ### This boundary condition assumes a transmitance = 1 below of 1km ### Above 1km, we follow the same criterion as Shimitzu ### This is a very poor estimate if invtop[ix]<1.0: bsc_ini = profile_vis[iz_inv] / ext_int_r - beta_m[iz_inv] else: bsc_ini = profile_vis[iz_inv] / ext_int_r * 0.8 * np.exp(invtop[ix]/4.5) - beta_m[iz_inv] if bsc_ini<0: if Debug: print "Changing boundary condition bsc_ini = {}".format(bsc_ini) bsc_ini = beta_m[iz_inv-1] * 1E-4 alpha, beta = Physics.fernald(profile_vis, lz, alpha_m, beta_m, s1, invtop[ix], bsc_ini) ### sr1 = (beta + beta_m) / beta_m adr1 = (ldep[:,ix] * (sr1 + sr1*mdr - mdr) - mdr) / (sr1 - 1 + sr1*mdr - ldep[:,ix]) ### ext_vis[iz_100:iz_inv,ix] = alpha[iz_100:iz_inv] adr[iz_100:iz_inv,ix] = adr1[iz_100:iz_inv] ### iz_zb = int(round(zb[ix] * DZinv))-1 zmax = min(zt[ix],9) iz_zt = int(round(zmax * DZinv))-1 ext_vis[iz_zb:iz_zt,ix] = np.nan absc532 = lvis / ext_int_r absc1064 = lir / ext_int_r ### dr = (adr - dep_s) * (1 + dep_d) / (1 + adr) / (dep_d - dep_s) with np.errstate(invalid='ignore'): dr[dr<0]=0.0 dr[dr>1]=1.0 dust = ext_vis * dr sphere = ext_vis * (1.0-dr) bsc = ext_vis / s1 if Debug: print "Max. Att. Backscatter: {}".format(np.nanmax(absc532)) if PlotBeta: print "Plotting attenuated backscatter coefficients..." Plots.show_beta(x,1000.0*absc532,lz,ncpath_out+'absc_vis.png',zmax=18) Plots.show_beta(x,1000.0*absc1064,lz,ncpath_out+'absc_ir.png',zmax=18) if PlotAlpha: print "Plotting extinction coefficients..." Plots.show_alpha(x,1000.0*dust,lz,zb,zt,invtop,ncpath_out+'dust.png',zmax=9) Plots.show_alpha(x,1000.0*sphere,lz,zb,zt,invtop,ncpath_out+'sphere.png',zmax=9) if PlotDep: print "Plotting depolarization ratio..." with np.errstate(invalid='ignore'): ldep[absc532<1E-7]=np.nan Plots.show_dep(x,ldep,lz,ncpath_out+'dep.png',zmax=18) if NCDout: print "Creating NetCDF file: {}".format(ncpath_out+ncfile_out) NZ1 = iz_18km+1 NZ2 = iz_9km+1 InOut.save_ncd(ncpath_out+ncfile_out, station, x, lz, absc532, absc1064, ldep, dust, sphere, zb, zt, invtop, NZ1, NZ2) if NCDmonth: print "Creating monthly NetCDF files" NZ1 = iz_18km+1 NZ2 = iz_9km+1 InOut.monthly_ncd(ncpath_out, station, x, lz, absc532, absc1064, ldep, dust, sphere, zb, zt, invtop, NZ1, NZ2)
import SimpleOscillator as SF import matplotlib.pyplot as plt import Gaussian as G import numpy as np def calculateFCsAndEnergies(deltaE, deltaQ, w_wavenumbers, wprime_wavenumbers, widths): """ wprime must be greater than w widths is a list of desired width at half height for each peak """ intensities= DF.genIntensities(deltaE, deltaQ, w_wavenumbers, wprime_wavenumbers) energies = DF.genEnergies(deltaE, w_wavenumbers, wprime_wavenumbers) return [energies, intensities] dQ= 1 dE = 0.005 w = 501 wprime = 499 wide = [0.01]*11 med = [0.005]*11 skinny = [0.001]*11 [energies, intensities] = calculateFCsAndEnergies(0.005, dQ, w, wprime, skinny) L = Plots.genSpectrum(energies, intensities, skinny) Plots.plotSpectrum(L[0], L[1], "DeltaQ = " + str(dQ)) for i in range(0,11): DF.diffFreqOverlap([i, 499], [0, 501], 100) raw_input("Press ENTER to exit ")
mpl.use('Agg') import matplotlib.pyplot as plt import os import load data = load.load_data("cifar10") X = data[0] img_depth = data[4] img_x = data[5] import Plots print(X.shape) Plots.plot_testimg(X[:10, :]) batch_size = 32 num_f1 = 50 num_f2 = 50 num_f3 = 50 f1 = (num_f1, img_depth, 11, 11) f2 = (num_f2, num_f1, 2, 2) f3 = (num_f3, num_f2, 3, 3) filters = [f1] image_shape = (batch_size, img_depth, img_x, img_x) CONV = model.Meta_ConvNet(n_epochs=100, batch_size=batch_size, learning_rate=0.005, momentum=0.9,
def plotStuff(self, X, Ws, num_plotted): Plots.plot_filters(Ws[0].get_value(), self.image_shape[1], num_plotted, title="layer1") if len(Ws) > 1: Plots.plot_filters(Ws[1].get_value(), self.image_shape[1], num_plotted, title="layer2") samples = X[:self.batch_size, :, :, :] predictions, conv_volume0, conv_volume1, conv_volume2, conv_volume3, conv_volume4, deconv_volume = self.predict(samples) Plots.plot_predictions_grid(samples, predictions, num_plotted, self.image_shape) Plots.plot_volume(conv_volume0, num_plotted, title="layer0 ") Plots.plot_volume(conv_volume1, num_plotted, title="layer1 ") Plots.plot_volume(conv_volume2, num_plotted, title="layer2 ") Plots.plot_volume(conv_volume3, num_plotted, title="layer3 ") Plots.plot_volume(conv_volume4, num_plotted, title="layer4") Plots.plot_costs(self.costs) return
def fit(self, X, y=None): X = X.astype(np.float32) self.costs = [] N = X.shape[0] print("Fit %d examples (%d, %d, %d, %d) over %d epochs\n" % (N, X.shape[0], X.shape[1], X.shape[2], X.shape[3], self.n_epochs)) count = 0 num_plotted=0 for epoch in range(self.n_epochs): perm = np.random.permutation(N) #print("Epoch: %d\n" % epoch) for i in range(0, N, self.batch_size): if i + self.batch_size <= N: # plotting updates if count%50 == 0: self.plotStuff(X, self.Ws, num_plotted) #self.reconstruct(y) num_plotted += 1 count += 1 # continue with training rowindexes = perm[i:i+self.batch_size] minibatch_x = X[rowindexes, :, :, :] cost, out, g, outbefore = self.train(minibatch_x) print("\n(%d/%d) %d/%d cost: %d" % (epoch+1, self.n_epochs, i, N, cost)) print("output (min,max): (%f, %f)" % (np.min(out), np.max(out))) print("weights (min,max): (%f, %f)" % (np.min(self.Ws[0].get_value()), np.max(self.Ws[0].get_value()))) print("grads (min,max): (%f, %f)" % (np.min(g), np.max(g))) print("outputshape:", out.shape) print("outbefore:", np.max(outbefore)) f = open('costs.txt', 'a') f.write("\n(%d/%d) %d/%d cost: %f \n" % (epoch+1, self.n_epochs, i, N, cost)) f.write("output (min,max): (%f, %f) \n" % (np.min(out), np.max(out))) f.write("weights (min,max): (%f, %f) \n" % (np.min(self.Ws[0].get_value()), np.max(self.Ws[0].get_value()))) f.write("grads (min,max): (%f, %f) \n" % (np.min(g), np.max(g))) f.close() self.costs.append(cost) Plots.plot_costs(self.costs) print("cost: ", cost) f = open('costs.txt', 'a') f.write('epoch %d) cost: %f \n' % (epoch, cost)) f.close() # do some predictions: perm = np.random.permutation(N) rowindexes = perm[:self.batch_size] minibatch_x = X[rowindexes, :, :, :] predictions = self.predict(minibatch_x) self.samples = minibatch_x self.preds = predictions self.W1 = self.Ws[0].get_value() if len(self.Ws) > 1: self.W2 = self.Ws[1].get_value() pickle.dump(self.Ws, open("weights.p", "wb")) return self
# TFT.fireup_tensorboard(logdir='probeview') # Plots.line([errors, self.validation_error_history]) print("\nFinished Training") print("Training Cost: " + str(self.training_error_history[-1][1])) print("Training Error %: " + str(self.training_error_history[-1][1] * 100) + " %") print("Validation Error: " + str(self.validation_error_history[-1][1])) print("Validation Error %: " + str(self.validation_error_history[-1][1] * 100) + "%") # Plots.scatter([self.training_error_history, self.validation_error_history], # ["Training Error", "Validation Error"]) # Plots.plotWeights([self.grabbed_weigths_history]) TFT.viewprep(sess) Plots.line([self.training_error_history, self.validation_error_history], ["Training Cost", "Validation Error"]) print("\nResults for Training Set") self.do_testing(self.case_manager.get_training_cases()) print("\nResults for Testing Set") self.do_testing(self.case_manager.get_testing_cases()) if self.config.mbsize > 0: # Should run map test print("\nRunning Map Tests") map_batch_size = self.config.mbsize np.random.shuffle(case_list) # Select random cases for this minibatch cases = case_list[:map_batch_size] self.do_testing(cases, grabvars=self.grabvars, scenario="mapping") def should_run_validation_test(self, step):
for it in range(NX): coeff = np.polyfit(z[-2000:], data[-2000:, it], 1) pol = np.poly1d(coeff) data[:, it] = (data[:, it] - pol(z)) * z**2 # data[:,it] = (data[:,it] - np.mean(data[-100:,it])) *z**2 elif varname == 'ch2': for it in range(NX): coeff = np.polyfit(z[-1000:], data[-1000:, it], 1) pol = np.poly1d(coeff) data[:, it] = (data[:, it] - pol(z)) * z**2 else: for it in range(NX): coeff = np.polyfit(z[-1000:], data[-1000:, it], 1) pol = np.poly1d(coeff) data[:, it] = (data[:, it] - pol(z)) * z**2 ### Smoothing if Smooth: print "Performing smoothing with parameter wsmooth:{}".format(wsmooth) for it in range(NX): data[:, it] = np.convolve(data[:, it], np.ones(wsmooth) / wsmooth, mode='same') if Plot: Plots.show_raw(x, data, z, zmax=18.0, vmax=2) axarr[1].plot(data[:, n1:n2], z, '-') dr = SelectPoint(data[:, n1:n2], z) plt.show()
import numpy as np import matplotlib.pyplot as plt from IO import * import Plots import Solver result = Solver.solve() print('Finished!') # Plotting save = 0 # ENTER: 0 for plotting on screen, 1 for saving frames as png M, a, D, T, u0, v0, zeta0, Ntheta, Nphi, dt, Nt, m1, R1, zhat1, omega1, psi1 = read_data( ) Phi, Theta = get_grid() M = Plots.Mollweide(Theta, Phi) if save: plt.close() for i in range(Nt): data = result['z'][i] # which data to plot # Mask data? suggested to do this for 'z' as currently diverging at poles data[:4][:] = np.zeros((1, Nphi)) data[-4:][:] = np.zeros((1, Nphi)) M.update(data) M.set_time(dt * i) # With quiver plot M.quiver(result['v'][i], result['u'][i])
return hratio, hQvsQT, hQvscls, hrvscls import Plots import time print 'running' t0 = time.time() hratio, hQvsQT, hQvscls, hrvscls = ChargeStudies( 10000 ) hratio.GetXaxis().SetTitle('Q_{SiPMs}/Q_{Total}');hratio.GetYaxis().SetTitle('Entries') hQvsQT.GetXaxis().SetTitle('Q_{Total}');hQvsQT.GetYaxis().SetTitle('Q_{SiPMs}') hQvscls.GetXaxis().SetTitle('Distance to closest SiPM (mm)');hQvscls.GetYaxis().SetTitle('Q_{SiPMs}') hrvscls.GetXaxis().SetTitle('Distance to closest SiPM (mm)');hrvscls.GetYaxis().SetTitle('Q_{SiPMs}/Q_{Total}') histos = [ hratio, hrvscls, hQvsQT, hQvscls.ProfileX() ] c = Plots.PutInCanvas( histos, ['','','zcol',''] ) c.cd(2) histos[1].ProfileX().Draw('same') #sim = Simulator( UniformCircle( 4., 5., 5.), 1e7, 0. ) #xy = sim.GetXYDistribution( FullInformation = True ) #xy = Tools.Arrays.FromHistogram( xy ) #xy = NEXT.TrackingPlane.Discretize( xy ) #psf = Tools.Arrays.MakePSF( xy ) #psf.SetMarkerStyle(20) #psf.SetMarkerSize(1) #fun = ROOT.TF1('fit','[0]/( 1 + [1]*x^2 )^1.5') #fun.SetParameters( 1,1e-3 ) #fun.SetParLimits(0,0,10) #fun.SetParLimits(1,0,10) #psf.Fit( fun )
import pandas as pd import Functions from sklearn.linear_model import LinearRegression from sklearn import linear_model from sklearn import cross_validation import Plots data = pd.read_csv('housing_data.csv') feature_cols = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT'] X = data[feature_cols] y = data.MEDV X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3, random_state=3) lm = LinearRegression() predicted=Functions.callClassifierFeatures(lm, X_train, y_train, X_test, y_test, feature_cols, 'Linear Regression') # Plotting Plots.scatterPlot(predicted, y_test, 'Fitted', 'Actual', 'Fitted VS Actual LR', 'green', 'HousingLRScatterPlot') Plots.residualPlot(predicted, (predicted - y_test), 'Fitted', 'Residual', 'Fitted VS Residual LR', 'blue', 'HousingLRResidualPlot') # LR - cross val predicted=Functions.callCrossVal(lm, X, y, 10, 'Linear Regression') Plots.scatterPlot(predicted, y, 'Fitted', 'Actual', 'Fitted VS Actual LR-CV', 'green', 'HousingLRScatterPlotCV') Plots.residualPlot(predicted, (predicted - y), 'Fitted', 'Residual', 'Fitted VS Residual LR-CV', 'blue', 'HousingLRResidualPlotCV') # Polynomial Regression Functions.polynomialRegression(lm, X_train, y_train, X_test, y_test, 6,'Linear Regression') Functions.polynomialRegressionCV(lm, X, y, 10, 6, 'Linear Regression') # Ridge ridge = linear_model.RidgeCV(alphas=[0.1, 0.01, 0.001]) Functions.callClassifierFeatures(ridge, X_train, y_train, X_test, y_test,feature_cols, 'Ridge') print("The tuned alpha value selected for Ridge is: %.4f" %ridge.alpha_) Functions.callCrossVal(ridge, X, y, 10, 'Ridge') # Lasso lasso = linear_model.LassoCV(alphas=[0.1, 0.01, 0.001])
path = ncpath_out + block + '/' ncfile_out = "06_2017.nc" if Debug: print "Opening file: ", path + ncfile_out ### Read Time, Data (mV), Heigth (km) ds = Dataset(path + ncfile_out) bsc532 = ds.variables["bsc532"][:] bsc1064 = ds.variables["bsc1064"][:] dep = ds.variables["dep"][:] ext_d = ds.variables["ext_d"][:] ext_s = ds.variables["ext_s"][:] zb = ds.variables["zb"][:] zt = ds.variables["zt"][:] zinv = ds.variables["zinv"][:] z1 = ds.variables["alt1"][:] z2 = ds.variables["alt2"][:] times = ds.variables["time"] x = num2date(times[:], units=times.units) ds.close() ### Number of profiles and vertical levels NX = len(x) NZ1 = len(z1) NZ2 = len(z2) #Plots.show_beta(x,bsc532.T,z1) #Plots.show_beta(x,bsc1064.T,z1) #Plots.show_dep(x,dep.T,z1) #Plots.show_alpha(x,ext_d.T,z2,zb,zt,zinv,zmax=9.) Plots.show_alpha(x, ext_s.T, z2, zb, zt, zinv, zmax=9.)
Eground += mode.groundEnergy energies = [E_el - Eground]*len(states) for i in range(len(states)): state = states[i] for j in range(len(state)): energies[i] += Modes[j].excitedEnergy(state[j]) return energies m1 = M.Mode(501, 499, 1) m2 = M.Mode(500.1, 300, 0.1) m3 = M.Mode(5000.01, 5000, 0.1) # m1 = M.Mode(500, 450, 1) # m2 = M.Mode(300, 200, 20) # m3 = M.Mode(600, 500, 0.5) Modes = [m1, m2, m3] E_electronic = 0.005 [intensities, states] = genMultiModeIntensities(Modes) energies = genMultiModeEnergies(E_electronic, Modes, states) wide = [0.01]*11 med = [0.005]*11 skinny = [0.001]*11 points = Plots.genSpectrum(energies, intensities, med) Plots.plotSpectrum(points[0], points[1], "3 Modes") raw_input("Press ENTER to exit ")