def disc_norm(): x = np.linspace(-3,3,100) y = st.norm.pdf(x,0,1) fig, ax = plt.subplots() fig.canvas.draw() ax.plot(x,y) fill1_x = np.linspace(-2,-1.5,100) fill1_y = st.norm.pdf(fill1_x,0,1) fill2_x = np.linspace(-1.5,-1,100) fill2_y = st.norm.pdf(fill2_x,0,1) ax.fill_between(fill1_x,0,fill1_y,facecolor = 'blue', edgecolor = 'k',alpha = 0.75) ax.fill_between(fill2_x,0,fill2_y,facecolor = 'blue', edgecolor = 'k',alpha = 0.75) for label in ax.get_yticklabels(): label.set_visible(False) for tick in ax.get_xticklines(): tick.set_visible(False) for tick in ax.get_yticklines(): tick.set_visible(False) plt.rc("font", size = 16) plt.xticks([-2,-1.5,-1]) labels = [item.get_text() for item in ax.get_xticklabels()] labels[0] = r"$v_k$" labels[1] = r"$\varepsilon_k$" labels[2] = r"$v_{k+1}$" ax.set_xticklabels(labels) plt.ylim([0, .45]) plt.savefig('discnorm.pdf') plt.clf()
def plots(i,tcodnt, temp, forecast,added_tcodnt,n_steps,path, rsdlsc,rsdlpctgc, sqrpctgc,a,b,c,d,e): fig=mp.figure(figsize=[15,6])#5:2,89mm grid=mp.GridSpec(3,36,wspace=1,hspace=0.4) ax=mp.subplot(grid[:2,:18]) cpplot(i,tcodnt, temp, forecast,added_tcodnt,n_steps,path) #rscplot(i,tcodnt, rsdlsc, rsdlpctgc, sqrpctgc,path) ax=mp.subplot(grid[2,:18]) im1=colorbar(ax,tcodnt, rsdlsc,i,0,-1,c**2) ax.set_yticks([]) ax=mp.subplot(grid[2,21:35]) im2=colorbar(ax,tcodnt, rsdlsc,i,a,b,int(c/2)) ax.set_yticks([]) ax=mp.subplot(grid[:2,21:35]) partial_plot_for_ahead_interval(i,tcodnt, temp, forecast,added_tcodnt,n_steps,path,rsdlpctgc,a,b) ax1=mp.subplot(grid[:,35]) clb1=mp.colorbar(im2,cax=ax1,extend='both') #clb1.set_label('residuals',fontsize=12,color='grey') ax2=mp.subplot(grid[:,18]) clb2=mp.colorbar(im1,cax=ax2,extend='both') clb1.set_label('residuals',fontsize=size) # ax=mp.subplot(grid[:2,32:35]) # partial_plot_for_ahead_interval(i,tcodnt, temp, forecast,added_tcodnt,n_steps,path,rsdlpctgc,d,e) # ax=mp.subplot(grid[2,32:35]) # im3=colorbar(ax,tcodnt, rsdlsc,i,d,e,int(c/5)) # ax.set_yticks([]) # ax3=mp.subplot(grid[:,35]) # clb3=mp.colorbar(im3,cax=ax3,extend='both') # clb3.set_label('residuals',fontsize=size,color='grey') mp.savefig((str(path)+'\\'+'results'+'\\'+'Accuracy_Graph_%d'+'.jpg')%i,format='jpg',dpi=2000)
def plot_figure(data, row_labels, col_labels, abs_val, plot_bin, labels, time, logInterval): colors = ['#0077FF', '#FF0000', '#00FF00', 'magenta'] cmaps = [] for i in colors: cmaps.append(mpl.colors.LinearSegmentedColormap.from_list('m1',['black',i])) dim, rows, cols = data.shape vmax = np.amax(data) vmin = np.amin(data) fig, ax = plt.subplots() c = np.zeros([rows, cols, 4]) for i in range(dim): c = np.add(c, cmaps[i]((data[i]-vmin)/(vmax-vmin))) c = np.clip(c, 0, 1) pc = ax.imshow(c, aspect='auto', interpolation='none') #ax.set_title(labels[0]) fig.text(0.5, 0.04, 'Bin # along rod length', ha='center', fontsize=fontsize) fig.text(0.0, 0.5, 'Time (s)', va='center', rotation='vertical', fontsize=fontsize) #ax.add_patch(Rectangle((0.5, time/logInterval), cols-1, 10, edgecolor='w', # facecolor='none')) #ax.add_patch(Rectangle((bin_id, 0.5/logInterval), 1, rows-0.5/logInterval, # edgecolor='w', facecolor='none')) #plt.savefig('kymo.png', bbox_inches='tight') plt.savefig('3rods_1long_kymo.pdf') plt.show()
def make_fish(zoom=False): plt.close(1) plt.figure(1, figsize=(6, 4)) plt.plot(plot_limits['pitch'], plot_limits['rolldev'], '-g', lw=3) plt.plot(plot_limits['pitch'], -plot_limits['rolldev'], '-g', lw=3) plt.plot(pitch.midvals, roll.midvals, '.b', ms=1, alpha=0.7) p, r = make_ellipse() # pitch, off nominal roll plt.plot(p, r, '-c', lw=2) gf = -0.08 # Fudge on pitch value for illustrative purposes plt.plot(greta['pitch'] + gf, -greta['roll'], '.r', ms=1, alpha=0.7) plt.plot(greta['pitch'][-1] + gf, -greta['roll'][-1], 'xr', ms=10, mew=2) if zoom: plt.xlim(46.3, 56.1) plt.ylim(4.1, 7.3) else: plt.ylim(-22, 22) plt.xlim(40, 180) plt.xlabel('Sun pitch angle (deg)') plt.ylabel('Sun off-nominal roll angle (deg)') plt.title('Mission off-nominal roll vs. pitch (5 minute samples)') plt.grid() plt.tight_layout() plt.savefig('fish{}.png'.format('_zoom' if zoom else ''))
def plot_scatter_matrix(df, plotdir): "Plot scatter matrix." print('plotting scatter matrix, this may take a while') plt.clf() pd_scatter_matrix(df, figsize=(16,16)) plt.suptitle("Scatter Matrix", fontsize=14) plt.savefig(plotdir + 'scatter_matrix.png')
def scree_plot(pca_obj, fname=None): ''' Scree plot for variance & cumulative variance by component from PCA. Arguments: - pca_obj: a fitted sklearn PCA instance - fname: path to write plot to file Output: - scree plot ''' components = pca_obj.n_components_ variance = pca.explained_variance_ratio_ plt.figure() plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance') plt.plot(np.arange(1, components + 1), variance, label='Variance') plt.xlim([0.8, components]); plt.ylim([0.0, 1.01]) plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11) plt.legend(loc='best') plt.tight_layout() if fname is not None: plt.savefig(fname) plt.close() else: plt.show() return
def draw_ranges_for_parameters(data, title='', save_path='./pictures/'): parameters = data.columns.values.tolist() # remove flight name parameter for idx, parameter in enumerate(parameters): if parameter == 'flight_name': del parameters[idx] flight_names = np.unique(data['flight_name']) print len(flight_names) for parameter in parameters: plt.figure() axis = plt.gca() # ax.set_xticks(numpy.arange(0,1,0.1)) axis.set_yticks(flight_names) axis.tick_params(labelright=True) axis.set_ylim([94., 130.]) plt.grid() plt.title(title) plt.xlabel(parameter) plt.ylabel('flight name') colors = iter(cm.rainbow(np.linspace(0, 1,len(flight_names)))) for flight in flight_names: temp = data[data.flight_name == flight][parameter] plt.plot([np.min(temp), np.max(temp)], [flight, flight], c=next(colors), linewidth=2.0) plt.savefig(save_path+title+'_'+parameter+'.jpg') plt.close()
def test_minimized_rasterized(): # This ensures that the rasterized content in the colorbars is # only as thick as the colorbar, and doesn't extend to other parts # of the image. See #5814. While the original bug exists only # in Postscript, the best way to detect it is to generate SVG # and then parse the output to make sure the two colorbar images # are the same size. from xml.etree import ElementTree np.random.seed(0) data = np.random.rand(10, 10) fig, ax = plt.subplots(1, 2) p1 = ax[0].pcolormesh(data) p2 = ax[1].pcolormesh(data) plt.colorbar(p1, ax=ax[0]) plt.colorbar(p2, ax=ax[1]) buff = io.BytesIO() plt.savefig(buff, format='svg') buff = io.BytesIO(buff.getvalue()) tree = ElementTree.parse(buff) width = None for image in tree.iter('image'): if width is None: width = image['width'] else: if image['width'] != width: assert False
def do_plot(mode, content, wide): global style style.apply(mode, content, wide) data = np.load("data/prr_AsAu_%s%s.npz"%(content, wide)) AU, TAU = np.meshgrid(-data["Au_range_dB"], data["tau_range"]) Zu = data["PRR_U"] Zs = data["PRR_S"] assert TAU.shape == AU.shape == Zu.shape, "The inputs TAU, AU, PRR_U must have the same shape for plotting!" plt.clf() if mode in ("sync",): # Plot the inverse power ratio, sync signal is stronger for positive ratios CSf = plt.contourf(TAU, AU, Zs, levels=(0.0, 0.2, 0.4, 0.6, 0.8, 0.9, 1.0), colors=("1.0", "0.75", "0.5", "0.25", "0.15", "0.0"), origin="lower") CS2 = plt.contour(CSf, colors = ("r",)*5+("w",), linewidths=(0.75,)*5+(1.0,), origin="lower", hold="on") else: CSf = plt.contourf(TAU, AU, Zs, levels=(0.0, 0.2, 0.4, 0.6, 0.8, 0.9, 1.0), colors=("1.0", "0.75", "0.5", "0.25", "0.15", "0.0"), origin="lower") CS2f = plt.contour(CSf, levels=(0.0, 0.2, 0.4, 0.6, 0.8, 1.0), colors=4*("r",)+("w",), linewidths=(0.75,)*4+(1.0,), origin="lower", hold="on") #CS2f = plt.contour(TAU, -AU, Zu, levels=(0.9, 1.0), colors=("0.0",), linewidths=(1.0,), origin="lower", hold="on") if content in ("unif",): CSu = plt.contourf(TAU, AU, Zu, levels=(0.2, 1.0), hatches=("////",), colors=("0.75",), origin="lower") CS2 = plt.contour(CSu, levels=(0.2,), colors = ("r",), linewidths=(1.0,), origin="lower", hold="on") style.annotate(mode, content, wide) plt.axis([data["tau_range"][0], data["tau_range"][-1], -data["Au_range_dB"][-1], -data["Au_range_dB"][0]]) plt.ylabel(r"Signal power ratio ($\mathrm{SIR}$)", labelpad=2) plt.xlabel(r"Time offset $\tau$ ($/T$)", labelpad=2) plt.savefig("pdf/prrc2_%s_%s%s_z.pdf"%(mode, content, wide))
def vis_result(image, seg, gt, title1='Segmentation', title2='Ground truth', savefile=None): indices = np.where(seg >= 0.5) indices_gt = np.where(gt >= 0.5) im_norm = image / image.max() rgb_image = color.gray2rgb(im_norm) multiplier = [0., 1., 1.] multiplier_gt = [1., 1., 0.] im_seg = rgb_image.copy() im_gt = rgb_image.copy() im_seg[indices[0], indices[1], :] *= multiplier im_gt[indices_gt[0], indices_gt[1], :] *= multiplier_gt fig = plt.figure() a = fig.add_subplot(1, 2, 1) plt.imshow(im_seg) a.set_title(title1) a = fig.add_subplot(1, 2, 2) plt.imshow(im_gt) a.set_title(title2) if savefile is None: plt.show() else: plt.savefig(savefile) plt.close()
def Test(self): test_Dir = "Result"; if not os.path.exists(test_Dir): os.makedirs(test_Dir); test_Label_List = [0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5]; test_Label_Pattern = np.zeros((16, 10)); test_Label_Pattern[np.arange(16), test_Label_List] = 1; feed_Dict = { self.noise_Placeholder: np.random.uniform(-1., 1., size=[16, self.noise_Size]), self.label_for_Fake_Placeholder: test_Label_Pattern, self.is_Training_Placeholder: False }; #Batch is constant in the test. global_Step, mnist_List = self.tf_Session.run(self.test_Tensor_List, feed_dict = feed_Dict); fig = plt.figure(figsize=(4, 4)) gs = gridspec.GridSpec(4, 4) gs.update(wspace=0.05, hspace=0.05) for index, mnist in enumerate(mnist_List): ax = plt.subplot(gs[index]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(mnist.reshape(28, 28), cmap='Greys_r') plt.savefig('%s/S%d.png' % (test_Dir, global_Step), bbox_inches='tight'); plt.close();
def run_test(fld, seeds, plot2d=True, plot3d=True, add_title="", view_kwargs=None, show=False, scatter_mpl=False, mesh_mvi=True): interpolated_fld = viscid.interp_trilin(fld, seeds) seed_name = seeds.__class__.__name__ if add_title: seed_name += " " + add_title try: if not plot2d: raise ImportError from viscid.plot import vpyplot as vlt from matplotlib import pyplot as plt plt.clf() # plt.plot(seeds.get_points()[2, :], fld) mpl_plot_kwargs = dict() if interpolated_fld.is_spherical(): mpl_plot_kwargs['hemisphere'] = 'north' vlt.plot(interpolated_fld, **mpl_plot_kwargs) plt.title(seed_name) plt.savefig(next_plot_fname(__file__, series='2d')) if show: plt.show() if scatter_mpl: plt.clf() vlt.plot2d_line(seeds.get_points(), fld, symdir='z', marker='o') plt.savefig(next_plot_fname(__file__, series='2d')) if show: plt.show() except ImportError: pass try: if not plot3d: raise ImportError from viscid.plot import vlab _ = get_mvi_fig(offscreen=not show) try: if mesh_mvi: mesh = vlab.mesh_from_seeds(seeds, scalars=interpolated_fld) mesh.actor.property.backface_culling = True except RuntimeError: pass pts = seeds.get_points() p = vlab.points3d(pts[0], pts[1], pts[2], interpolated_fld.flat_data, scale_mode='none', scale_factor=0.02) vlab.axes(p) vlab.title(seed_name) if view_kwargs: vlab.view(**view_kwargs) vlab.savefig(next_plot_fname(__file__, series='3d')) if show: vlab.show(stop=True) except ImportError: pass
def default_run(self): """ Plots the results, saves the figure, and finally displays it from simulating codewords with Sum-prod and Max-prod algorithms across variance levels. This combines the results in one plot. :return: """ if not os.path.exists("./graphs"): os.makedirs("./graphs") self.save_time = str(int(time.time())) self.simulate(Decoder.SUM_PROD) self.compute_error() plt.plot([math.log10(x) for x in self.variance_levels], [math.log10(y) for y in self.bit_error_probability], "ro-", label="Sum-Prod") self.simulate(Decoder.MAX_PROD) self.compute_error() plt.plot([math.log10(x) for x in self.variance_levels], [math.log10(y) for y in self.bit_error_probability], "g^--", label="Max-Prod") plt.legend(loc=2) plt.title("Hamming Decoder Factor Graph Simulation Results\n" + r"$\log_{10}(\sigma^2)$ vs. $\log_{10}(P_e)$" + " for Max-Prod & Sum-Prod Algorithms\n" + "Sample Size n = %(codewords)s Codewords \n Variance Levels = %(levels)s" % {"codewords": str(self.iterations), "levels": str(self.variance_levels)}) plt.xlabel("$\log_{10}(\sigma^2)$") plt.ylabel(r"$\log_{10}(P_e)$") plt.savefig("graphs/%(time)s-max-prod-sum-prod-%(num_codewords)s-codewords-variance-bit_error_probability.png" % {"time": self.save_time, "num_codewords": str(self.iterations)}, bbox_inches="tight") plt.show()
def build_hist(self, coverage, show=False, save=False, save_fn="max_hist_plot"): """ Build a histogram to determine what the maxes look & visualize match_count Might be used to determine a resonable threshold @param coverage: the average coverage for an single nt @param show: Show visualization with match maxes @param save_fn: Save to disk with this file name or else it will be the default @return: the histogram array """ #import matplotlib #matplotlib.use("Agg") import matplotlib.pyplot as plt maxes = self.match_count.max(1) # get maxes along 1st dim h = plt.hist(maxes, bins=self.match_count.shape[0]) # figure out where the majority plt.ylabel("Frequency") plt.xlabel("Count per index") plt.title("Frequency count histogram") if show: plt.show() if save: plt.savefig(save_fn, dpi=160, frameon=False) return h[0]
def compare_chebhist(dname, mylambda, c, Nbin = 25): if mylambda == 'Do not exist': print('--!!Warning: eig file does not exist, can not display compare histgram') else: mylambda = 1 - mylambda lmin = max(min(mylambda), -1) lmax = min(max(mylambda), 1) # print c cheb_file_content = '\n'.join([str(st) for st in c]) x = np.linspace(lmin, lmax, Nbin + 1) y = plot_chebint(c, x) u = (x[1:] + x[:-1]) / 2 v = y[1:] - y[:-1] plt.clf() plt.hold(True) plt.hist(mylambda,Nbin) plt.plot(u, v, "r.", markersize=10) plt.hold(False) plt.show() filename = 'data/' + dname + '.png' plt.savefig(filename) cheb_filename = 'data/' + dname + '.cheb' f = open(cheb_filename, 'w+') f.write(cheb_file_content) f.close()
def plotErrorBars(dict_to_plot, x_lim, y_lim, xlabel, y_label, title, out_file, margin=[0.05, 0.05], loc=2): plt.title(title) plt.xlabel(xlabel) plt.ylabel(y_label) if y_lim is None: y_lim = [1 * float("Inf"), -1 * float("Inf")] max_val_seen_y = y_lim[1] - margin[1] min_val_seen_y = y_lim[0] + margin[1] print min_val_seen_y, max_val_seen_y max_val_seen_x = x_lim[1] - margin[0] min_val_seen_x = x_lim[0] + margin[0] handles = [] for k in dict_to_plot: means, stds, x_vals = dict_to_plot[k] min_val_seen_y = min(min(np.array(means) - np.array(stds)), min_val_seen_y) max_val_seen_y = max(max(np.array(means) + np.array(stds)), max_val_seen_y) min_val_seen_x = min(min(x_vals), min_val_seen_x) max_val_seen_x = max(max(x_vals), max_val_seen_x) handle = plt.errorbar(x_vals, means, yerr=stds) handles.append(handle) print max_val_seen_y plt.xlim([min_val_seen_x - margin[0], max_val_seen_x + margin[0]]) plt.ylim([min_val_seen_y - margin[1], max_val_seen_y + margin[1]]) plt.legend(handles, dict_to_plot.keys(), loc=loc) plt.savefig(out_file)
def plot_precision_recall_n(y_true, y_scores, model_name): ''' Takes the model, plots precision and recall curves ''' precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true, y_scores) precision_curve = precision_curve[:-1] recall_curve = recall_curve[:-1] pct_above_per_thresh = [] number_scored = len(y_scores) for value in pr_thresholds: num_above_thresh = len(y_scores[y_scores >= value]) pct_above_thresh = num_above_thresh / float(number_scored) pct_above_per_thresh.append(pct_above_thresh) pct_above_per_thresh = np.array(pct_above_per_thresh) plt.clf() fig, ax1 = plt.subplots() ax1.plot(pct_above_per_thresh, precision_curve, 'b') ax1.set_xlabel('percent of population') ax1.set_ylabel('precision', color='b') ax2 = ax1.twinx() ax2.plot(pct_above_per_thresh, recall_curve, 'r') ax2.set_ylabel('recall', color='r') name = model_name plt.title(name) plt.savefig("Eval/{}.png".format(name))
def display(spectrum): template = np.ones(len(spectrum)) #Get the plot ready and label the axes pyp.plot(spectrum) max_range = int(math.ceil(np.amax(spectrum) / standard_deviation)) for i in range(0, max_range): pyp.plot(template * (mean + i * standard_deviation)) pyp.xlabel('Units?') pyp.ylabel('Amps Squared') pyp.title('Mean Normalized Power Spectrum') if 'V' in Options: pyp.show() if 'v' in Options: tokens = sys.argv[-1].split('.') filename = tokens[0] + ".png" input = '' if os.path.isfile(filename): input = input("Error: Plot file already exists! Overwrite? (y/n)\n") while input != 'y' and input != 'n': input = input("Please enter either \'y\' or \'n\'.\n") if input == 'y': pyp.savefig(filename) else: print("Plot not written.") else: pyp.savefig(filename)
def export(data, F, k): '''Write data to a png image Arguments --------- data : numpy.ndarray array containing the data to be written as png image F : float feed rate of the current configuration k : float rate constant of the current configuration ''' figsize = tuple(s / 72.0 for s in data.shape) fig = plt.figure(figsize=figsize, dpi=72.0, facecolor='white') fig.add_axes([0, 0, 1, 1], frameon=False) plt.xticks([]) plt.yticks([]) plt.imshow(data, cmap=plt.cm.RdBu_r, interpolation='bicubic') plt.gci().set_clim(0, 1) filename = './study/F{:03d}-k{:03d}.png'.format(int(1000*F), int(1000*k)) plt.savefig(filename, dpi=72.0) plt.close()
def draw_img_for_viewing_ice(self): #print "Press 'p' to save PNG." global colmax global colmin fig = P.figure(num=None, figsize=(13.5, 5), dpi=100, facecolor='w', edgecolor='k') cid1 = fig.canvas.mpl_connect('key_press_event', self.on_keypress_for_viewing) cid2 = fig.canvas.mpl_connect('button_press_event', self.on_click) canvas = fig.add_subplot(121) canvas.set_title(self.filename) self.axes = P.imshow(self.inarr, origin='lower', vmax = colmax, vmin = colmin) self.colbar = P.colorbar(self.axes, pad=0.01) self.orglims = self.axes.get_clim() canvas = fig.add_subplot(122) canvas.set_title("Angular Average") maxAngAvg = (self.inangavg).max() numQLabels = len(eDD.iceHInvAngQ.keys())+1 labelPosition = maxAngAvg/numQLabels for i,j in eDD.iceHInvAngQ.iteritems(): P.axvline(j,0,colmax,color='r') P.text(j,labelPosition,str(i), rotation="45") labelPosition += maxAngAvg/numQLabels P.plot(self.inangavgQ, self.inangavg) P.xlabel("Q (A-1)") P.ylabel("I(Q) (ADU/srad)") pngtag = original_dir + "peakfit-gdvn_%s.png" % (self.filename) P.savefig(pngtag) print "%s saved." % (pngtag) P.close()
def draw_stat(actual_price, action): price_list = [] x_list = [] # idx = np.where(actual_price == 0)[0] # print idx # print actual_price[np.where(actual_price < 2000)] # idx = [0] + idx.tolist() # print idx # for i in range(len(idx)-1): # price_list.append(actual_price[idx[i]+1:idx[i+1]-1]) # x_list.append(range(idx[i]+i+1, idx[i+1]+i-1)) # for i in range(len(idx)-1): # print x_list[i] # print price_list[i] # plt.plot(x_list[i], price_list[i], 'r') x_list = range(1,50) price_list = actual_price[1:50] plt.plot(x_list, price_list, 'k') for i in range(1, 50): style = 'go' if action[i] == 1: style = 'ro' plt.plot(i, actual_price[i], style) plt.ylim(2140, 2144.2) # plt.show() plt.savefig("action.png")
def make_bar( x, y, f_name, title=None, legend=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None, ): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) if x_ticks is not None: plt.xticks(x, x_ticks) if y_ticks is not None: plt.yticks(y_ticks) plt.bar(x, y, align="center") if legend is not None: plt.legend(legend) plt.savefig(f_name) plt.close(fig)
def main(): parser = argparse.ArgumentParser(description="""Compute subset of users who rated at least 10 movies and plot fraction of users satisfied as a function of inventory size.""") parser.add_argument("infilename", help="Read from this file.", type=open) args = parser.parse_args() ratings = read_inputs(args.infilename) ratings = ratings.drop("timestamp", axis=1) movie_rankings = find_movie_rankings(ratings) ratings = ratings.drop("rating", axis=1) user_rankings = find_user_rankings(ratings, movie_rankings) num_users = user_rankings.user_id.unique().size num_movies = movie_rankings.shape[0] user_rankings = clean_rankings(user_rankings) us_levels_100 = find_satisfaction(user_rankings, num_users, num_movies) us_levels_90 = find_satisfaction(user_rankings, num_users, num_movies, satisfaction_level=0.9) rc('text', usetex=True) plt.title('Percent of Users Satisfied vs Inventory Size in the MovieLens Dataset') plt.xlabel('Inventory Size') plt.ylabel('Percent of Users Satisfied') plt.plot(us_levels_100, 'b', label=r'$100\% \ satisfaction$') plt.plot(us_levels_90, 'r--', label=r'$90\% \ satisfaction$') plt.legend() d = datetime.datetime.now().isoformat() plt.savefig('user_satisfaction_%s.png' % d)
def plot_jacobian(A, name, cmap= plt.cm.coolwarm, normalize=True, precision=1e-6): """ Customized visualization of jacobian matrices for observing sparsity patterns """ plt.figure() fig, ax = plt.subplots() if normalize is True: plt.imshow(A, interpolation='none', cmap=cmap, norm = mpl.colors.Normalize(vmin=-1.,vmax=1.)) else: plt.imshow(A, interpolation='none', cmap=cmap) plt.colorbar(format=ticker.FuncFormatter(fmt)) ax.spy(A, marker='.', markersize=0, precision=precision) ax.spines['right'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.xaxis.set_ticks_position('top') ax.yaxis.set_ticks_position('left') xlabels = np.linspace(0, A.shape[0], 5, True, dtype=int) ylabels = np.linspace(0, A.shape[1], 5, True, dtype=int) plt.xticks(xlabels) plt.yticks(ylabels) plt.savefig(name, bbox_inches='tight', pad_inches=0.05) plt.close() return
def plot(self, path, num_bins=0): """ draw a histogram to represent the data :param num_bins: number of bars, default is (Number different word in the file )/ 2, if it is too large take 50 as default (see '#default of num_bins') """ # plot data mu = self.Average # mean of distribution sigma = self.StdE # standard deviation of distribution if num_bins == 0: # default of num_bins num_bins = min([round(self.NumWord / 2), 50]) # print num_bins # the histogram of the data n, bins, patches = plt.hist(self.WordCount.values(), num_bins, normed=1, facecolor='green', alpha=0.5) # add a 'best fit' line y = mlab.normpdf(bins, mu, sigma) plt.plot(bins, y, 'r--') plt.xlabel('Word Count') plt.ylabel('Probability(how many words have this word count)') plt.title(r'Histogram of word count: $\mu=' + str(self.Average) + '$, $\sigma=' + str(self.StdE) + '$') # Tweak spacing to prevent clipping of ylabel plt.subplots_adjust(left=0.15) plt.savefig(path) plt.close()
def make_entity_plot(filename, title, fixed_noip, fixed_ip, dynamic_noip, dynamic_ip): plt.figure(figsize=(12,5)) plt.title("Settings comparison - " + title) plt.xlabel('Time (ms)', fontsize=12) plt.xlim([0,62000]) x = 0 barwidth = 0.5 bargroupspacing = 1.5 fixed_noip_mean,fixed_noip_conf = conf_stats(fixed_noip) fixed_ip_mean,fixed_ip_conf = conf_stats(fixed_ip) dynamic_noip_mean,dynamic_noip_conf = conf_stats(dynamic_noip) dynamic_ip_mean,dynamic_ip_conf = conf_stats(dynamic_ip) values = [fixed_noip_mean,fixed_ip_mean,dynamic_noip_mean, dynamic_ip_mean] errs = [fixed_noip_conf,fixed_ip_conf,dynamic_noip_conf, dynamic_ip_conf] y_pos = numpy.arange(len(values)) plt.barh(y_pos, values, xerr=errs, align='center', color=['r', 'b', 'r', 'b'], ecolor='black', alpha=0.7) plt.yticks(y_pos, ["Fixed | no I.P.", "Fixed | I.P.", "Dynamic | no I.P.", "Dynamic | I.P."]) plt.savefig(output_file(filename)) plt.clf()
def plot_dpi_dpr_distribution(args, dpis, dprs, diagnoses): print log.INFO, 'Plotting estimate distributions...' diagnoses = np.array(diagnoses) diagnoses[(0.25 <= diagnoses) & (diagnoses <= 0.75)] = 0.5 # Setup plot fig, ax = plt.subplots() pt.setup_axes(plt, ax) biomarkers_str = args.method if args.biomarkers is None else ', '.join(args.biomarkers) ax.set_title('DP estimation using {0} at {1}'.format(biomarkers_str, ', '.join(args.visits))) ax.set_xlabel('DP') ax.set_ylabel('DPR') plt.scatter(dpis, dprs, c=diagnoses, edgecolor='none', s=25.0, vmin=0.0, vmax=1.0, cmap=pt.progression_cmap, alpha=0.5) # Plot legend # noinspection PyUnresolvedReferences rects = [mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_cn + (0.5,), linewidth=0), mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_mci + (0.5,), linewidth=0), mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_ad + (0.5,), linewidth=0)] labels = ['CN', 'MCI', 'AD'] legend = ax.legend(rects, labels, fontsize=10, ncol=len(rects), loc='upper center', framealpha=0.9) legend.get_frame().set_edgecolor((0.6, 0.6, 0.6)) # Draw or save the plot plt.tight_layout() if args.plot_file is not None: plt.savefig(args.plot_file, transparent=True) else: plt.show() plt.close(fig)
def plot_wav_fft(wav_filename, desc=None): plt.clf() plt.figure(num=None, figsize=(6, 4)) sample_rate, X = scipy.io.wavfile.read(wav_filename) spectrum = np.fft.fft(X) freq = np.fft.fftfreq(len(X), 1.0 / sample_rate) plt.subplot(211) num_samples = 200.0 plt.xlim(0, num_samples / sample_rate) plt.xlabel("time [s]") plt.title(desc or wav_filename) plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples]) plt.grid(True) plt.subplot(212) plt.xlim(0, 5000) plt.xlabel("frequency [Hz]") plt.xticks(np.arange(5) * 1000) if desc: desc = desc.strip() fft_desc = desc[0].lower() + desc[1:] else: fft_desc = wav_filename plt.title("FFT of %s" % fft_desc) plt.plot(freq, abs(spectrum), linewidth=5) plt.grid(True) plt.tight_layout() rel_filename = os.path.split(wav_filename)[1] plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0], bbox_inches='tight')
def make_overview_plot(filename, title, noip_arrs, ip_arrs): plt.title("Inner parallelism - " + title) plt.ylabel('Time (ms)', fontsize=12) x = 0 barwidth = 0.5 bargroupspacing = 1.5 for z in zip(noip_arrs, ip_arrs): noip,ip = z noip_mean,noip_conf = conf_stats(noip) ip_mean,ip_conf = conf_stats(ip) b_noip = plt.bar(x, noip_mean, barwidth, color='r', yerr=noip_conf, ecolor='black', alpha=0.7) x += barwidth b_ip = plt.bar(x, ip_mean, barwidth, color='b', yerr=ip_conf, ecolor='black', alpha=0.7) x += bargroupspacing plt.xticks([0.5, 2.5, 4.5], ['50k', '100k', '200k'], rotation='horizontal') fontP = FontProperties() fontP.set_size('small') plt.legend([b_noip, b_ip], \ ('no inner parallelism', 'inner parallelism'), \ prop=fontP, loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=2) plt.ylim([0,62000]) plt.savefig(output_file(filename)) plt.clf()
def make_line( x, y, f_name, title=None, legend=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None, ): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) if x_ticks is not None: plt.xticks(x, x_ticks) if y_ticks is not None: plt.yticks(y_ticks) if isinstance(y[0], list): for data in y: plt.plot(x, data) else: plt.plot(x, y) if legend is not None: plt.legend(legend) plt.savefig(f_name) plt.close(fig)
fig, ax = plt.subplots() sns.barplot(data=rmses_ens2) ax.set(ylabel='RMSE', title="mean=%.3f, CI=%.3f-%.3f"%(mean_ens2, CI_ens2[0], CI_ens2[1])) plt.xticks() plt.savefig("plots/multiply_%s_rmse.pdf"%neuron_type) print('rmses: ', rmses_ens, rmses_ens_out, rmses_ens2) print('means: ', mean_ens, mean_ens_out, mean_ens2) print('confidence intervals: ', CI_ens, CI_ens_out, CI_ens2) np.savez('data/multiply_%s_results.npz'%neuron_type, rmses_ens=rmses_ens, rmses_ens_out=rmses_ens_out, rmses_ens2=rmses_ens2) return rmses_ens2 # rmses_lif = run(neuron_type=LIF()) # rmses_alif = run(neuron_type=AdaptiveLIFT()) # rmses_wilson = run(neuron_type=WilsonEuler(), dt=0.00005) # rmses_durstewitz = run(neuron_type=DurstewitzNeuron()) # , load_w="data/multiply_w.npz", load_df="data/multiply_DurstewitzNeuron()_df.npz") rmses_lif = np.load("data/multiply_LIF()_results.npz")['rmses_ens2'] rmses_alif = np.load("data/multiply_AdaptiveLIFT()_results.npz")['rmses_ens2'] rmses_wilson = np.load("data/multiply_WilsonEuler()_results.npz")['rmses_ens2'] rmses_durstewitz = np.load("data/multiply_DurstewitzNeuron()_results.npz")['rmses_ens2'] rmses = np.vstack((rmses_lif, rmses_alif, rmses_wilson, rmses_durstewitz)) nt_names = ['LIF', 'ALIF', 'Wilson', 'Durstewitz'] fig, ax = plt.subplots() sns.barplot(data=rmses.T) ax.set(ylabel='RMSE') plt.xticks(np.arange(len(nt_names)), tuple(nt_names), rotation=0) plt.savefig("figures/multiply_all_rmses.pdf")
train_source, train_steps, epochs = bc_config.EPOCHS, validation_data = test_source, validation_steps = validation_steps) timestamp = strftime('%d-%b-%y_%H:%M:%S') try: os.mkdir(bc_config.MODEL_DIR) except FileExistsError: pass model.save(bc_config.MODEL_DIR + '/' + timestamp + '.h5') if bc_config.PLOT_LOSSES: import matplotlib.pyplot as plt plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train Loss', 'Val. Loss'], loc='upper left') try: os.mkdir(bc_config.PLOT_DIR) except FileExistsError: pass plt.savefig(bc_config.PLOT_DIR + '/' + timestamp + '.png') print('done')
def main(fig_num, invert=False): if fig_num == '1': # Subplots: http://aplpy.readthedocs.io/en/stable/howto_subplot.html fig = plt.figure( figsize=(7, 5)) # height > optimal aspect ratio has no effect # MOST image alone to show structure (varying dark/bright regions) fmost = aplpy.FITSFigure(XMMPATH + '/../most/G309.2-0.6.fits', figure=fig, subplot=(1, 2, 1)) fmost.recenter(ra2deg(13, 46, 40), -62.87, width=18. / 60, height=18. / 60) fmost_cmap = 'afmhot' if invert: fmost_cmap += '_r' fmost.show_colorscale(vmin=1e-4, vmax=2e-1, stretch='arcsinh', cmap=fmost_cmap) # MOST beam notes: # - must specify parameters or else constructor will unsuccessfully # search for FITS keywords BMAJ, BMIN, BPA. # - hatching is too sparse at our image size, and so has no effect # Reference: Whiteoak & Green, 1996A&AS..118..329W fmost.add_beam(major=42. / np.sin(-62.9 * np.pi / 180) * u.arcsecond, minor=42 * u.arcsecond, angle=0, corner='bottom left', hatch=None, pad=1, color='white') if invert: fmost.beam.set_edgecolor('black') fmost.beam.set_facecolor('gray') format_ticks_and_labels(fmost, invert=invert) #fmost.tick_labels.hide_y() #fmost.axis_labels.hide_y() # Broadband X-ray image with sparse MOST contours to guide the eye # Base image has 2.5" pixels and is smoothed with 2px Gaussian (5") fxmm = aplpy.FITSFigure( XMMPATH + '/repro_merged_no_holes/corrected-800-3300_bin0_gauss2.fits', figure=fig, subplot=(1, 2, 2)) fxmm.recenter(ra2deg(13, 46, 40), -62.87, width=18. / 60, height=18. / 60) # Weird bug: # vmin=9e-6,vmax=5e-5 works w/colorbar, but vmin=1e-5,vmax=5e-5 fails fxmm_cmap = 'cubehelix' if invert: fxmm_cmap += '_r' # Idea: log scale here to show faint emission; linear show in full fov # image to show sources #fxmm.show_colorscale(vmin=0.2e-5, vmax=2e-5, stretch='linear',cmap=fxmm_cmap) fxmm.show_colorscale(vmin=2e-6, vmax=4e-5, stretch='log', cmap=fxmm_cmap) # TODO get colorbars working for both subplots # https://github.com/aplpy/aplpy/issues/119 # Warning: bottom colorbar not fully implemented =( #fxmm.add_colorbar(location='right', log_format=True) #fxmm.colorbar.set_font(size='small') ## TODO WARNING: tick labeling not good. ##fxmm.colorbar.set_axis_label_text(r'Counts $s^{-1}$') # Give in text ##fxmm.colorbar.set_axis_label_font(size='small') most = fits.open(XMMPATH + '/../most/G309.2-0.6.fits') #lev = np.linspace(0.02, 0.2, 10) #lev = np.logspace(np.log10(0.01), np.log10(0.2), 10) lev = np.logspace(-2, -0.5, 4) # Sparse log contours, 0.01 to 0.1*sqrt(10) lev_color = 'cyan' if invert: lev_color = 'gray' fxmm.show_contour(data=most, levels=lev, colors=lev_color, alpha=1) format_ticks_and_labels(fxmm, invert=invert) fxmm.tick_labels.hide_y() fxmm.axis_labels.hide_y() fxmm.refresh() fig.tight_layout() fig.canvas.draw() if invert: plt.savefig('fig_snr_xmm_most_invert.pdf', dpi=300) else: plt.savefig('fig_snr_xmm_most.pdf', dpi=300) elif fig_num == '2': # Subplots: http://aplpy.readthedocs.io/en/stable/howto_subplot.html fig = plt.figure( figsize=(8, 10)) # height > optimal aspect ratio has no effect cmap = 'afmhot' if invert: cmap = 'cubehelix_r' #cmap += '_r' # Broadband X-ray image with point source extractions # and interpolation f = aplpy.FITSFigure( XMMPATH + '/repro_merged_no_holes/corrected-800-3300_bin0_gauss2.fits', figure=fig, subplot=(1, 2, 1)) g = aplpy.FITSFigure( XMMPATH + '/repro_merged/corrected-800-3300_bin0_gauss2.fits', figure=fig, subplot=(1, 2, 2)) for aplfig in [f, g]: aplfig.recenter(ra2deg(13, 46, 40), -62 - 52. / 60, width=30. / 60, height=36. / 60) #aplfig.show_colorscale(vmin=1e-6, vmax=4e-5, stretch='log',cmap=cmap) aplfig.show_colorscale(vmin=0.2e-5, vmax=3e-5, stretch='linear', cmap=cmap) format_ticks_and_labels(aplfig, invert=invert) aplfig.ticks.set_color('black') # Because of FOV image if not invert: axleft_ticks = aplfig._ax1.yaxis.get_ticklines() axright_ticks = aplfig._ax2.yaxis.get_ticklines() axleft_minorticks = aplfig._ax1.yaxis.get_minorticklines() axright_minorticks = aplfig._ax2.yaxis.get_minorticklines() axleft_ticks[4].set_color('white') axleft_ticks[6].set_color('white') axright_ticks[5].set_color('white') for idx in [18, 20, 22, 24, 26, 28, 30, 32, 34, 36]: axleft_minorticks[idx].set_color('white') for idx in [17, 19, 21, 23]: axright_minorticks[idx].set_color('white') g.tick_labels.hide_y() g.axis_labels.hide_y() f.show_regions('regs-plot/all_point_sources.reg') #f.show_regions('regs/ann_000_100.reg') #f.show_regions('regs-plot/circ_200.reg') #f.show_regions('regs-plot/circ_300.reg') f.show_regions( 'regs/src.reg') # Equivalent to circ_400, but changed color/width #f.show_regions('regs-plot/circ_500.reg') f.show_regions('regs/bkg.reg') # Yes, do show background #f.image.figure.tight_layout() # Use the matplotlib figure instance fig.tight_layout() fig.canvas.draw() if invert: plt.savefig('fig_snr_fullfov_invert.pdf', dpi=300) # Drops into CWD else: plt.savefig('fig_snr_fullfov.pdf', dpi=300) # Drops into CWD elif fig_num == '3': fig = plt.figure(figsize=(7, 5)) cmap_lf = 'cubehelix' if invert: cmap_lf += '_r' cmap_ew = 'afmhot' if invert: cmap_ew += '_r' mg_line = aplpy.FITSFigure( XMMPATH + '/repro_merged/mg_lineflux_bin8_gauss2.fits', figure=fig, subplot=(2, 3, 1)) si_line = aplpy.FITSFigure( XMMPATH + '/repro_merged/si_lineflux_bin8_gauss2.fits', figure=fig, subplot=(2, 3, 2)) s_line = aplpy.FITSFigure(XMMPATH + '/repro_merged/s_lineflux_bin8_gauss2.fits', figure=fig, subplot=(2, 3, 3)) mg_eqw = aplpy.FITSFigure(XMMPATH + '/repro_merged/mg_eqwidth_bin16_gauss2.fits', figure=fig, subplot=(2, 3, 4)) si_eqw = aplpy.FITSFigure(XMMPATH + '/repro_merged/si_eqwidth_bin16_gauss2.fits', figure=fig, subplot=(2, 3, 5)) s_eqw = aplpy.FITSFigure(XMMPATH + '/repro_merged/s_eqwidth_bin16_gauss2.fits', figure=fig, subplot=(2, 3, 6)) mg_line.show_colorscale(vmin=0, vmax=3e-6, stretch='arcsinh', cmap=cmap_lf) si_line.show_colorscale(vmin=0, vmax=3e-6, stretch='arcsinh', cmap=cmap_lf) s_line.show_colorscale(vmin=0, vmax=3e-6, stretch='arcsinh', cmap=cmap_lf) mg_eqw.show_colorscale(vmin=0, vmax=250, stretch='linear', cmap=cmap_ew) si_eqw.show_colorscale(vmin=0, vmax=1000, stretch='linear', cmap=cmap_ew) s_eqw.show_colorscale(vmin=0, vmax=1000, stretch='linear', cmap=cmap_ew) for im in [mg_line, si_line, s_line, mg_eqw, si_eqw, s_eqw]: im.recenter(ra2deg(13, 46, 40), -62.87, width=18. / 60, height=18. / 60) format_ticks_and_labels(im, invert=invert) im.ticks.hide() im.tick_labels.hide() im.axis_labels.hide() most = fits.open(XMMPATH + '/../most/G309.2-0.6.fits') lev = [1e-2] lev_color = 'cyan' if invert: lev_color = 'gray' for im in [mg_line, si_line, s_line, mg_eqw, si_eqw, s_eqw]: im.show_contour(data=most, levels=lev, colors=lev_color, alpha=0.7) text_color = 'white' if invert: text_color = 'black' if invert: mg_line.add_label(0.96, 0.92, '1.3--1.4 keV', relative=True, horizontalalignment='right', size='small', color='black') si_line.add_label(0.96, 0.92, '1.8--1.9 keV', relative=True, horizontalalignment='right', size='small', color='black') s_line.add_label(0.96, 0.92, '2.4--2.5 keV', relative=True, horizontalalignment='right', size='small', color='black') else: mg_line.add_label(0.96, 0.92, '1.3--1.4 keV', relative=True, horizontalalignment='right', size='small', color='black', backgroundcolor='white') si_line.add_label(0.96, 0.92, '1.8--1.9 keV', relative=True, horizontalalignment='right', size='small', color='black', backgroundcolor='white') s_line.add_label(0.96, 0.92, '2.4--2.5 keV', relative=True, horizontalalignment='right', size='small', color='black', backgroundcolor='white') # MEH -- too awkward. # mg_line.add_label(0.96, 0.78, '1.3--1.4 keV', relative=True, horizontalalignment='right', size='small', color=text_color) # si_line.add_label(0.96, 0.78, '1.8--1.9 keV', relative=True, horizontalalignment='right', size='small', color=text_color) # s_line.add_label( 0.96, 0.78, '2.4--2.5 keV', relative=True, horizontalalignment='right', size='small', color=text_color) mg_eqw.add_label(0.96, 0.92, 'Mg EW', relative=True, horizontalalignment='right', size='small', color=text_color) si_eqw.add_label(0.96, 0.92, 'Si EW', relative=True, horizontalalignment='right', size='small', color=text_color) s_eqw.add_label(0.96, 0.92, 'S EW', relative=True, horizontalalignment='right', size='small', color=text_color) mg_eqw.add_colorbar(location='bottom') mg_eqw.colorbar.set_font(size='small') #mg_eqw.colorbar.set_axis_label_text(r'Equivalent width, eV') #mg_eqw.colorbar.set_axis_label_font(size='small') mg_eqw.colorbar.set_ticks([0, 50, 100, 150, 200, 250]) si_eqw.add_colorbar(location='bottom') si_eqw.colorbar.set_font(size='small') #si_eqw.colorbar.set_axis_label_text(r'Equivalent width, eV') #si_eqw.colorbar.set_axis_label_font(size='small') si_eqw.colorbar.set_ticks([0, 200, 400, 600, 800, 1000]) s_eqw.add_colorbar(location='bottom') s_eqw.colorbar.set_font(size='small') #s_eqw.colorbar.set_axis_label_text(r'Equivalent width, eV') #s_eqw.colorbar.set_axis_label_font(size='small') s_eqw.colorbar.set_ticks([0, 200, 400, 600, 800, 1000]) # Scalebar label commanding does not work :( #s_eqw.add_scalebar(1 * u.arcmin) # 1 arcminute scalebar #s_eqw.scalebar.set_label('1 arcmin.') #s_eqw.scalebar.set_corner('top right') #s_eqw.scalebar.set_font(size='small') #if invert: # s_eqw.scalebar.set_color('black') #else: # s_eqw.scalebar.set_color('white') fig.tight_layout() fig.canvas.draw() if invert: plt.savefig('fig_lineflux_eqwidth_invert.pdf', dpi=300) else: plt.savefig('fig_lineflux_eqwidth.pdf', dpi=300) elif fig_num == '4': # RGB image of Mg line flux, Si/S eq width aplpy.make_rgb_image([ XMMPATH + '/repro_merged/mg_lineflux_bin16_gauss2.fits', XMMPATH + '/repro_merged/si_eqwidth_bin16_gauss2.fits', XMMPATH + '/repro_merged/s_eqwidth_bin16_gauss2.fits' ], 'fig_rgb_soft_eqwidth.png', vmin_r=1e-7, vmax_r=1e-6, stretch_r='log', vmin_g=0, vmax_g=800, stretch_g='linear', vmin_b=0, vmax_b=800, stretch_b='linear', make_nans_transparent=True) fig = plt.figure(figsize=(3, 5)) f = aplpy.FITSFigure(XMMPATH + '/repro_merged/mg_lineflux_bin16_gauss2.fits', figure=fig) f.recenter(ra2deg(13, 46, 40), -62.87, width=18. / 60, height=18. / 60) f.show_rgb('fig_rgb_soft_eqwidth.png') format_ticks_and_labels(f, invert=False) f.ticks.hide() f.tick_labels.hide() f.axis_labels.hide() most = fits.open(XMMPATH + '/../most/G309.2-0.6.fits') lev = [1e-2] lev_color = 'magenta' f.show_contour(data=most, levels=lev, colors=lev_color, alpha=0.7) f.show_regions('regs/core.reg') f.show_regions('regs/lobe_ne.reg') f.show_regions('regs/lobe_sw.reg') f.show_regions('regs/ridge_nw.reg') f.show_regions('regs/ridge_se.reg') f.add_label(0.52, 0.34, 'Core', relative=True, size=8, color='white') f.add_label(0.09, 0.74, 'Lobe NE', relative=True, size=8, color='white') f.add_label(0.87, 0.75, 'Ridge NW', relative=True, size=8, color='white') f.add_label(0.10, 0.22, 'Ridge SE', relative=True, size=8, color='white') f.add_label(0.55, 0.05, 'Lobe SW', relative=True, size=8, color='white') fig.tight_layout() fig.canvas.draw() plt.savefig('fig_rgb_soft_eqwidth.pdf', dpi=300) else: raise Exception("Invalid figure number")
import time dataset = pd.read_csv("dataset1.txt", header = None,delim_whitespace=True) X = np.array(dataset[dataset.columns[0:dataset.shape[1]]]) x = np.array(dataset[dataset.columns[0:dataset.shape[1]-1]]) y =np.array(dataset[dataset.columns[dataset.shape[1]-1]]) from sklearn.cross_validation import train_test_split from sklearn.cluster import KMeans kmeans = KMeans() kmeans = kmeans.fit(X) k = len(kmeans.cluster_centers_) labels = kmeans.labels_ centroids = kmeans.cluster_centers_ from matplotlib import pyplot for i in range(k): ds = X[np.where(labels==i)] pyplot.plot(ds[:,0],ds[:,1],'o') lines = pyplot.plot(centroids[i,0],centroids[i,1],'kx') pyplot.setp(lines,ms=15.0) print("The number of clusters are ",k) pyplot.savefig("plot3.png")
fout.close() # Loss variables step_list = np.array(step_list) train_loss_list = np.array(train_loss_list) # for some reason, dev_loss_list.shape = [Ndev , 4] dev_loss_list = np.array(dev_loss_list) # Plot plt.figure() plt.plot(step_list, train_loss_list, label='Train') plt.plot(step_list, dev_loss_list[:,0], label='Dev') plt.xlabel('Step') plt.ylabel('Loss') plt.legend(loc = 'upper right') plt.savefig(f'figs/channel/loss2_{seed_no}.png', bbox_inches='tight') plt.figure() plt.semilogx(y_test * Re, b_pred[:,0,1],'x', label='TBNN') plt.semilogx(y_raw * Re, bij_raw[:,0,1],'-',label='DNS') plt.ylabel(r'$b_{12}$') plt.xlabel(r'$y^+$') plt.legend(loc='lower left') plt.savefig(f'figs/channel/tbnn2_log_{seed_no}.png', bbox_inches='tight') plt.figure() plt.plot(y_test, b_pred[:,0,1],'x', label='TBNN') plt.plot(y_raw, bij_raw[:,0,1],'-',label='DNS') plt.ylabel(r'$b_{12}$') plt.xlabel(r'$y$')
def graph_sas_curve(filename, x, y, title_text, x_lab, y_lab, x_min, x_max, y_min, y_max, **kwargs): """ Outputs graph of x and y to a pdf file. x and y are intended to be functions of the magnitude of scattering vector, q, and intensity, I. Values that are computed from the graph (outputs) and range of R? * qfit written on graph @type x: numpy array @param x: Function of the scattering vector magnitude, q, used for the x-axis of the plot. @type y: numpy array @param y: Function of the scattered intensity, I, used for the y-axis of the plot. @type x_lab: string @param x_lab: Label for the x-axis of the plot. @type y_lab: string @param y_lab: Label for the y-axis of the plot. @type x_min: float @param x_min: Minimum value to plot on x-axis. @type x_max: float @param x_max: Maximum value to plot on x-axis. @type y_min: float @param y_min: Minimum value to plot on x-axis. @type y_max: float @param y_max: Maximum value to plot on x-axis. @keyword fit_coeffs: Fit coefficients for a linear fit from a numpy polyfit of the input x and y values. @keyword outputs: Text of values that should be added to plot (e.g. Rg). @keyword rq_range: Text detailing the R*q values over the range of any fit performed on the x, y values. @keyword mask: Numpy style mask to select points used in fit so that these can be highlighted in the plot. """ fit_coeffs = kwargs.get('fitcoeffs', None) outputs = kwargs.get('outputs', None) rq_range = kwargs.get('rqrange', None) mask = kwargs.get('mask', None) # Plot the input x, y values and if provided a fit line # output is used to display values calculated from fit (Rg, Rxs?, R?*q over # fit values) on the plot plt.figure(figsize=(8, 6), dpi=300) ax = plt.subplot(111, xlabel=x_lab, ylabel=y_lab, title=title_text, xlim=(x_min, x_max), ylim=(y_min, y_max)) plt.scatter(x, y, s=5) for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]): item.set_fontsize(15) ax.tick_params(axis='both', which='major', labelsize=12) # Plot linear fit and highlight points used in its construction if fit_coeffs is not None: # Plot the fit line along the whole x range shown in teh plot fitLine = np.poly1d(fit_coeffs) x_points = np.linspace(x_min, x_max, 300) plt.plot(x_points, fitLine(x_points)) # Highlight points used in the fit if mask is None: plt.scatter(x, y, s=30) else: plt.scatter(x[mask], y[mask], s=30) plt.annotate(outputs + '\n' + rq_range, xy=(0.45, 0.85), xycoords='axes fraction') plt.savefig(filename)
def run(n_neurons=60, t=10, t_test=10, n_trains=10, n_encodes=20, n_tests=10, f=DoubleExp(1e-3, 3e-2), f_out=DoubleExp(1e-3, 1e-1), dt=0.001, neuron_type=LIF(), reg=1e-2, penalty=0.5, load_w=None, load_df=None): d_ens = np.zeros((n_neurons, 1)) f_ens = f w_ens = None e_ens = None w_ens2 = None e_ens2 = None f_smooth = DoubleExp(1e-2, 2e-1) print('\nNeuron Type: %s'%neuron_type) if isinstance(neuron_type, DurstewitzNeuron): if load_w: w_ens = np.load(load_w)['w_ens'] else: print('Optimizing ens1 encoders') for nenc in range(n_encodes): print("encoding trial %s"%nenc) stim_func1, stim_func2 = make_normed_flipped(value=1.4, t=t, N=1, f=f, seed=nenc) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, stim_func1=stim_func1, stim_func2=stim_func2, neuron_type=neuron_type, w_ens=w_ens, e_ens=e_ens, L=True) w_ens = data['w_ens'] e_ens = data['e_ens'] np.savez('data/multiply_w.npz', w_ens=w_ens, e_ens=e_ens) fig, ax = plt.subplots() sns.distplot(np.ravel(w_ens), ax=ax) ax.set(xlabel='weights', ylabel='frequency') plt.savefig("plots/tuning/multiply_%s_w_ens.pdf"%neuron_type) a_ens = f_smooth.filt(data['ens'], dt=dt) a_supv = f_smooth.filt(data['supv'], dt=dt) for n in range(n_neurons): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:,n], alpha=0.5, label='supv') ax.plot(data['times'], a_ens[:,n], alpha=0.5, label='ens') ax.set(ylim=((0, 40))) plt.legend() plt.savefig('plots/tuning/multiply_ens_nenc_%s_activity_%s.pdf'%(nenc, n)) plt.close('all') if load_df: load = np.load(load_df) d_ens = load['d_ens'] d_out1 = load['d_out1'] taus_ens = load['taus_ens'] taus_out1 = load['taus_out1'] f_ens = DoubleExp(taus_ens[0], taus_ens[1]) f_out1 = DoubleExp(taus_out1[0], taus_out1[1]) else: print('Optimizing ens1 filters and decoders') stim_func1, stim_func2 = make_normed_flipped(value=1.2, t=t, N=n_trains, f=f, seed=0) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t*n_trains, f=f, dt=dt, neuron_type=neuron_type, stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens) d_ens, f_ens, taus_ens = df_opt(data['x'][:,0]*data['x'][:,1], data['ens'], f, dt=dt, penalty=penalty, reg=reg, name='multiply_%s'%neuron_type) d_ens = d_ens.reshape((n_neurons, 1)) d_out1, f_out1, taus_out1 = df_opt(data['x'], data['ens'], f_out, dt=dt, name='multiply_%s'%neuron_type) np.savez('data/multiply_%s_df.npz'%neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_out1=d_out1, taus_out1=taus_out1) times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(times, f.impulse(len(times), dt=0.0001), label=r"$f^x, \tau_1=%.3f, \tau_2=%.3f$" %(-1./f.poles[0], -1./f.poles[1])) ax.plot(times, f_ens.impulse(len(times), dt=0.0001), label=r"$f^{ens}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" %(-1./f_ens.poles[0], -1./f_ens.poles[1], np.count_nonzero(d_ens), n_neurons)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() plt.savefig("plots/multiply_%s_filters_ens.pdf"%neuron_type) times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(times, f_out.impulse(len(times), dt=0.0001), label=r"$f^{out}, \tau=%.3f, \tau_2=%.3f$" %(-1./f_out.poles[0], -1./f_out.poles[1])) ax.plot(times, f_out1.impulse(len(times), dt=0.0001), label=r"$f^{out1}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" %(-1./f_out1.poles[0], -1./f_out1.poles[1], np.count_nonzero(d_out1), n_neurons)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() plt.savefig("plots/multiply_%s_filters_out1.pdf"%neuron_type) a_ens = f_ens.filt(data['ens'], dt=dt) x = f.filt(data['x'][:,0]*data['x'][:,1], dt=dt).ravel() xhat_ens = np.dot(a_ens, d_ens).ravel() rmse_ens = rmse(xhat_ens, x) fig, ax = plt.subplots() ax.plot(data['times'], x, linestyle="--", label='x') ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' %rmse_ens) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens1") plt.legend(loc='upper right') plt.savefig("plots/multiply_%s_ens1_train.pdf"%neuron_type) a_ens = f_out1.filt(data['ens'], dt=dt) x_out = f_out.filt(data['x'], dt=dt) xhat_ens_out = np.dot(a_ens, d_out1) rmse_ens_out1 = rmse(xhat_ens_out[:,0], x_out[:,0]) rmse_ens_out2 = rmse(xhat_ens_out[:,1], x_out[:,1]) fig, ax = plt.subplots() ax.plot(data['times'], x_out[:,0], linestyle="--", label='x_0') ax.plot(data['times'], x_out[:,1], linestyle="--", label='x_1') ax.plot(data['times'], xhat_ens_out[:,0], label='ens_0, rmse=%.3f' %rmse_ens_out1) ax.plot(data['times'], xhat_ens_out[:,1], label='ens_1, rmse=%.3f' %rmse_ens_out2) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens1") plt.legend(loc='upper right') plt.savefig("plots/multiply_%s_ens1_out_train.pdf"%neuron_type) if isinstance(neuron_type, DurstewitzNeuron): if load_w: w_ens2 = np.load(load_w)['w_ens2'] else: print('Optimizing ens2 encoders') for nenc in range(n_encodes): print("encoding trial %s"%nenc) stim_func1, stim_func2 = make_normed_flipped(value=1.4, t=t, N=1, f=f, seed=nenc) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, stim_func1=stim_func1, stim_func2=stim_func2, neuron_type=neuron_type, w_ens=w_ens, w_ens2=w_ens2, e_ens2=e_ens2, L2=True) w_ens2 = data['w_ens2'] e_ens2 = data['e_ens2'] np.savez('data/multiply_w.npz', w_ens=w_ens, e_ens=e_ens, w_ens2=w_ens2, e_ens2=e_ens2) fig, ax = plt.subplots() sns.distplot(np.ravel(w_ens2), ax=ax) ax.set(xlabel='weights', ylabel='frequency') plt.savefig("plots/tuning/multiply_%s_w_ens2.pdf"%neuron_type) a_ens = f_smooth.filt(data['ens2'], dt=dt) a_supv = f_smooth.filt(data['supv2'], dt=dt) for n in range(30): fig, ax = plt.subplots(1, 1) ax.plot(data['times'], a_supv[:,n], alpha=0.5, label='supv2') ax.plot(data['times'], a_ens[:,n], alpha=0.5, label='ens2') ax.set(ylim=((0, 40))) plt.legend() plt.savefig('plots/tuning/multiply_ens2_nenc_%s_activity_%s.pdf'%(nenc, n)) plt.close('all') if load_df: load = np.load(load_df) d_out2 = load['d_out2'] taus_out2 = load['taus_out2'] f_out2 = DoubleExp(taus_out2[0], taus_out2[1]) else: print('Optimizing ens2 filters and decoders') stim_func1, stim_func2 = make_normed_flipped(value=1.2, t=t, N=n_trains, f=f, seed=0) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t*n_trains, f=f, dt=dt, neuron_type=neuron_type, stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens, w_ens2=w_ens2) d_out2, f_out2, taus_out2 = df_opt(data['x2'], data['ens2'], f_out, dt=dt, name='multiply_%s'%neuron_type) np.savez('data/multiply_%s_df.npz'%neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_out1=d_out1, taus_out1=taus_out1, d_out2=d_out2, taus_out2=taus_out2) times = np.arange(0, 1, 0.0001) fig, ax = plt.subplots() ax.plot(times, f_out.impulse(len(times), dt=0.0001), label=r"$f^{out}, \tau=%.3f, \tau_2=%.3f$" %(-1./f_out.poles[0], -1./f_out.poles[1])) ax.plot(times, f_out2.impulse(len(times), dt=0.0001), label=r"$f^{out2}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$" %(-1./f_out2.poles[0], -1./f_out2.poles[1], np.count_nonzero(d_out2), 30)) ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10))) ax.legend(loc='upper right') plt.tight_layout() plt.savefig("plots/multiply_%s_filters_out2.pdf"%neuron_type) a_ens2 = f_out2.filt(data['ens2'], dt=dt) x2 = f_out.filt(data['x2'], dt=dt) xhat_ens2 = np.dot(a_ens2, d_out2) rmse_ens2 = rmse(xhat_ens2, x2) fig, ax = plt.subplots() ax.plot(data['times'], x2, linestyle="--", label='x') ax.plot(data['times'], xhat_ens2, label='ens2, rmse=%.3f' %rmse_ens2) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens2") plt.legend(loc='upper right') plt.savefig("plots/multiply_%s_ens2_train.pdf"%neuron_type) rmses_ens = np.zeros((n_tests)) rmses_ens_out = np.zeros((n_tests)) rmses_ens2 = np.zeros((n_tests)) for test in range(n_tests): print('test %s' %test) stim_func1, stim_func2 = make_normed_flipped(value=1.0, t=t_test, N=1, f=f, seed=100+test) data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_test, f=f, dt=dt, neuron_type=neuron_type, stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens, w_ens2=w_ens2) a_ens = f_ens.filt(data['ens'], dt=dt) x = f.filt(data['x'][:,0]*data['x'][:,1], dt=dt).ravel() xhat_ens = np.dot(a_ens, d_ens).ravel() rmse_ens = rmse(xhat_ens, x) a_ens_out = f_out1.filt(data['ens'], dt=dt) x_out = f_out.filt(data['x'], dt=dt) xhat_ens_out = np.dot(a_ens, d_out1) rmse_ens_out = rmse(xhat_ens_out, x_out) rmse_ens_out1 = rmse(xhat_ens_out[:,0], x_out[:,0]) rmse_ens_out2 = rmse(xhat_ens_out[:,1], x_out[:,1]) a_ens2 = f_out2.filt(data['ens2'], dt=dt) x2 = f_out.filt(data['x2'], dt=dt) xhat_ens2 = np.dot(a_ens2, d_out2) rmse_ens2 = rmse(xhat_ens2, x2) rmses_ens[test] = rmse_ens rmses_ens_out[test] = rmse_ens_out rmses_ens2[test] = rmse_ens2 fig, ax = plt.subplots() ax.plot(data['times'], x_out[:,0], linestyle="--", label='x_0') ax.plot(data['times'], x_out[:,1], linestyle="--", label='x_1') ax.plot(data['times'], xhat_ens_out[:,0], label='ens_0, rmse=%.3f' %rmse_ens_out1) ax.plot(data['times'], xhat_ens_out[:,1], label='ens_1, rmse=%.3f' %rmse_ens_out2) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens1 out") plt.legend(loc='upper right') plt.savefig("plots/multiply_%s_ens1_out_test_%s.pdf"%(neuron_type, test)) fig, ax = plt.subplots() ax.plot(data['times'], x, linestyle="--", label='x') ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' %rmse_ens) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens1") plt.legend(loc='upper right') plt.savefig("plots/multiply_%s_ens1_test_%s.pdf"%(neuron_type, test)) fig, ax = plt.subplots() ax.plot(data['times'], x2, linestyle="--", label='x') ax.plot(data['times'], xhat_ens2, label='ens2, rmse=%.3f' %rmse_ens2) ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens2") plt.legend(loc='upper right') plt.savefig("plots/multiply_%s_ens2_test_%s.pdf"%(neuron_type, test)) plt.close('all') mean_ens = np.mean(rmses_ens) mean_ens_out = np.mean(rmses_ens_out) mean_ens2 = np.mean(rmses_ens2) CI_ens = sns.utils.ci(rmses_ens) CI_ens_out = sns.utils.ci(rmses_ens_out) CI_ens2 = sns.utils.ci(rmses_ens2) fig, ax = plt.subplots() sns.barplot(data=rmses_ens2) ax.set(ylabel='RMSE', title="mean=%.3f, CI=%.3f-%.3f"%(mean_ens2, CI_ens2[0], CI_ens2[1])) plt.xticks() plt.savefig("plots/multiply_%s_rmse.pdf"%neuron_type) print('rmses: ', rmses_ens, rmses_ens_out, rmses_ens2) print('means: ', mean_ens, mean_ens_out, mean_ens2) print('confidence intervals: ', CI_ens, CI_ens_out, CI_ens2) np.savez('data/multiply_%s_results.npz'%neuron_type, rmses_ens=rmses_ens, rmses_ens_out=rmses_ens_out, rmses_ens2=rmses_ens2) return rmses_ens2
for i in xrange(M): plt.scatter(Xdd[i],Ydd[i],c = colors[i], facecolor='0.5', lw = 0,label = 'cluster'+str(i+1)); plt.scatter(train1[:,0],train1[:,1],c = 'k',label = 'train class 1'); plt.scatter(train2[:,0],train2[:,1],c = 'r',label = 'train class 2'); plt.xlabel('Feature 1') plt.ylabel('Feature 2') plt.title('Dataset5, k means clustering,iteration:'+str(iter)+'clusters = '+str(M)) plt.legend(loc='best') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.savefig("kmeansnormal1(b)"+str(iter)+"-"+str(M)+".png", bbox_inches='tight') plt.close(fig) z = np.zeros((N,M)) for n in xrange(N): minindex = 0 minval = LA.norm(np.subtract(X_train[n],mu[0])) for i in xrange(M): a = LA.norm(np.subtract(X_train[n],mu[i])) if(minval>a): minval = a minindex = i z[n][minindex] = 1 newmu = np.zeros((M,size[1])) flag = 0
n = 10 plt.figure(figsize=(20, 4)) for i in range(1, n): # display original ax = plt.subplot(2, n, i) plt.imshow(x_train[i+4*13*150].reshape(32, 32, 3)) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + n) plt.imshow(x_train_noisy[i+4*13*150].reshape(32, 32, 3)) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() plt.savefig('saves/cdA_gblur_noisy_vis.png') """Constructing the Model""" import keras from keras import losses from keras.models import Model, load_model from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, concatenate L = 5 # Total number of layers S = 2 # Number of shared layers F = [16, 32, 64, 128, 256] # Number of clean filters including input # Clean different encoding layers inp = Input(shape=(32, 32, 3)) enc = inp for i in range(L-S):
ax1.spines['bottom'].set_position(('data', 0.0)) ax1.spines['top'].set_color('none') # buffer layer annotation #ax1.axvspan(5.0, 30.0, ymin=0.049, alpha=1.0, color=Grey, zorder=0) #ax1.axvline(x=5.0, color=Grey) #ax1.axvline(x=30.0, color=Grey) ax1.text(6.5, 0.15, r"Buffer layer") # , color=Grey) # bbox=dict(facecolor=Grey)) ax1.annotate(s='', xy=(5.0, 0.11), xytext=(30.0, 0.11), arrowprops=dict(arrowstyle='|-|', linewidth=0.75, shrinkA=0.0, shrinkB=0.0, edgecolor=Grey)) # plot mode interactive (1) or pdf (2) if plot != 2: plt.tight_layout() plt.show() else: fig.tight_layout() #fnam = fnam.replace(".dat", "viscous.pdf") plt.savefig(fnamout, transparent=True) print('Written file:', fnamout) fig.clf() print("Done!")
def plot(rewards): clear_output(True) plt.figure(figsize=(20, 5)) plt.plot(rewards) plt.savefig('td3_lstm.png')
def _plot_and_save_attention(self, att_w, filename): plt = self.draw_attention_plot(att_w) plt.savefig(filename) plt.close()
d_mean_list.append( dagger_results['returns']['mean'] ) d_std_list.append( dagger_results['returns']['std'] ) c_mean_dict[envname] = np.array(c_mean_list, dtype = np.float32) c_std_dict[envname] = np.array(c_std_list, dtype = np.float32) d_mean_dict[envname] = np.array(d_mean_list, dtype = np.float32) d_std_dict[envname] = np.array(d_std_list, dtype = np.float32) expert_results_fname = os.path.join(expert_results_folder, 'train_' + envname +'.pkl') with open(expert_results_fname, 'rb') as f: expert_results = pickle.loads(f.read()) e_mean = np.array(expert_results['returns']['mean']) lines0 = plt.errorbar(x, c_mean_dict[envname], c_std_dict[envname],color='r', linewidth=2.0, label='Behavioral Cloning') lines1 = plt.errorbar(x, d_mean_dict[envname], d_std_dict[envname], color='g', linewidth=2.0, label='Dagger') lines2 = plt.plot(x, e_mean * np.ones_like(d_mean_dict[envname]) ) plt.setp(lines2, color='b', linewidth=2.0, label='Expert') plt.ylabel('Reward') plt.xlabel('# rollouts/ Dagger iterations') plt.legend() plt.savefig(os.path.join(out_folder,'%s.png'%(envname)), bbox_inches='tight') plt.clf()
label=False) ax3.set_title('d={}'.format(d_list[2])) fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None) fig.legend(loc="lower right", ncol=3) fig.text(0.5, 0.04, 'number of iterations \n ({} pass x {} samples)'.format(1, N), ha='center') fig.suptitle('Sharp prior:' + r'$\sigma_0={}$'.format(sigma0)) plt.savefig('./outputs/KL_HighDim_sharpPrior') if 'HD2' in Test: # sensitivity to dimension with Flat prior sigma0=1 sigma0 = 30 mu0 = 0 d_list = [30, 70, 100] N = 500 s = 2 c = 0 seed = 10 fig = plt.figure(num, figsize=set_size(ratio=0.5)) num = num + 1 ax1 = fig.add_subplot(131) XP_HighDim(np.array([ax1]),
def main(weather_data_dir, katkam_dir): weather_files = os.listdir(weather_data_dir) df = pd.concat((pd.read_csv(os.path.join(weather_data_dir, f), header=14, parse_dates=['Date/Time']) for f in weather_files)) df = df[df['Weather'].notnull()] weather_df = df[['Date/Time', 'Time', 'Weather']].copy() images = katkam_dir + '/*.*' x_images = io.imread_collection(images) images = pd.DataFrame({'filename': x_images.files, 'img': np.arange(0, len(x_images.files)) }) images['Date/Time'] = pd.to_datetime(images['filename'].str.extract('-([0-9]+)\.', expand=False), format='%Y%m%d%H%M%S') images = images.merge(weather_df, on='Date/Time') images['Weather'] = images['Weather'].apply(transform_weather) data = [] target = [] filenames = [] # need to do this for loop to get the images out of x_images or else we'd need to load the whole # x_images array instead of just the images we have data for for i, x in images.iterrows(): matrix = rgb2gray(x_images[x['img']]) matrix = np.reshape(matrix, (192*256)) data.append(matrix) target.append(x['Weather'].split(',')) filenames.append(x['filename']) mlb = MultiLabelBinarizer() y_enc = mlb.fit_transform(target) X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(np.array(data), y_enc, np.array(filenames)) model = make_pipeline( PCA(250), KNeighborsClassifier(n_neighbors=15) ) model.fit(X_train, y_train) predicted = model.predict(X_test) print("KNN Model Score: %f" % model.score(X_test, y_test)) result = np.empty(predicted.shape[0], dtype=np.bool) for i, (x,y) in enumerate(zip(predicted, y_test)): result[i] = np.array_equal(x,y) wrong = mlb.inverse_transform(predicted[~result]) real = mlb.inverse_transform(y_test[~result]) results_df = pd.DataFrame({'filename': idx2[~result], 'predicted': wrong, 'actual': real}) aggregated = results_df.groupby(['predicted', 'actual']).count().rename(columns={'filename': 'Predicted Incorrectly'}) aggregated.plot.bar() plt.tight_layout() plt.legend() plt.savefig('errors.png') correct = mlb.inverse_transform(predicted[result]) real = mlb.inverse_transform(y_test[result]) results_df = pd.DataFrame({'filename': idx2[result], 'predicted': correct, 'actual': real}) aggregated = results_df.groupby(['predicted', 'actual']).count().rename(columns={'filename': 'Predicted Correctly'}) aggregated.plot.bar() plt.tight_layout() plt.legend() plt.savefig('correct.png')
ax[0].set_ylabel("Total A \n Concentration \n [mol/L]",fontsize=15) ax[1].set_ylabel("Total A \n Sorbed Concent. \n [mol/m3]",fontsize=15) ax[2].set_title("Langmuir and Freundlich sorption models",fontsize=15) ax[3].set_xlabel("Distance (m)",fontsize=15) ax[2].set_ylabel("Total B, C \n Concentration \n [mol/L]",fontsize=15) ax[3].set_ylabel("Total B, C \n Sorbed Concent. \n [mol/m3]",fontsize=15) ax[0].legend(loc='upper right',fontsize=10) ax[1].legend(loc='upper right',fontsize=10) ax[0].set_xlim(left=30,right=70) ax[1].set_xlim(left=30,right=70) ax[2].legend(loc='upper right',fontsize=10) ax[3].legend(loc='upper right',fontsize=8) ax[2].set_xlim(left=30,right=70) ax[3].set_xlim(left=30,right=70) # plot adjustments plt.tight_layout() plt.subplots_adjust(left=0.20,bottom=0.15,right=0.95,top=0.90) plt.suptitle("Amanzi 1D "+root.title()+" Benchmark at 50 years",x=0.57,fontsize=20) plt.tick_params(axis='both', which='major', labelsize=15) # pyplot.show() plt.savefig(root+"_1d.png",format="png") # plt.close() # finally: # pass
for site in datasetObjects: print site, datasetObjects[site].data x = datasetObjects[site].getAgeValues() y = datasetObjects[site].getElevationValues() n = len(datasetObjects[site].getAgeValues()) plt.plot(x, y, mapSiteToColour(site) + 's', label=site+" n=%i" % n, markersize=4.0) datasetModels[site] = siteModelConnectTheDots(datasetObjects[site]) ##plt.title("Plot of Elevation by Age\nRaw Data only") plt.ylabel('Elevation (m IGLD1985)') plt.xlabel('Age Before Present (years)') plt.legend(loc=2, prop={'size': 17}) plt.savefig('./theDataRaw.png') plt.close() ############################################################################ ############################################################################ ## create the raw plot with the model included ############################# for ds in datasetObjects: print ds, datasetObjects[ds].data x = datasetObjects[ds].getAgeValues() y = datasetObjects[ds].getElevationValues() plt.plot(x, y, mapSiteToColour(ds) + 's', label="%s, n=%i" % (ds, len(x)), markersize=4.0) for d in datasets:
def main(): with timer('load data'): df = pd.read_csv(FOLD_PATH) with timer('preprocessing'): train_df, val_df = df[df.fold_id != FOLD_ID], df[df.fold_id == FOLD_ID] train_augmentation = Compose([ Flip(p=0.5), OneOf( [ #ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(p=0.5), OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5) ], p=0.5), #OneOf([ # ShiftScaleRotate(p=0.5), ## RandomRotate90(p=0.5), # Rotate(p=0.5) #], p=0.5), OneOf([ Blur(blur_limit=8, p=0.5), MotionBlur(blur_limit=8, p=0.5), MedianBlur(blur_limit=8, p=0.5), GaussianBlur(blur_limit=8, p=0.5) ], p=0.5), OneOf( [ #CLAHE(clip_limit=4, tile_grid_size=(4, 4), p=0.5), RandomGamma(gamma_limit=(100, 140), p=0.5), RandomBrightnessContrast(p=0.5), RandomBrightness(p=0.5), RandomContrast(p=0.5) ], p=0.5), OneOf([ GaussNoise(p=0.5), Cutout(num_holes=10, max_h_size=10, max_w_size=20, p=0.5) ], p=0.5) ]) val_augmentation = None train_dataset = SeverDataset(train_df, IMG_DIR, IMG_SIZE, N_CLASSES, id_colname=ID_COLUMNS, transforms=train_augmentation) val_dataset = SeverDataset(val_df, IMG_DIR, IMG_SIZE, N_CLASSES, id_colname=ID_COLUMNS, transforms=val_augmentation) train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2) del train_df, val_df, df, train_dataset, val_dataset gc.collect() with timer('create model'): model = smp.Unet('densenet201', encoder_weights='imagenet', classes=N_CLASSES) model.to(device) criterion = torch.nn.BCEWithLogitsLoss() optimizer = torch.optim.Adam(model.parameters(), lr=3e-4) scheduler_cosine = CosineAnnealingLR(optimizer, T_max=CLR_CYCLE, eta_min=3e-5) scheduler = GradualWarmupScheduler(optimizer, multiplier=1.1, total_epoch=CLR_CYCLE * 2, after_scheduler=scheduler_cosine) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) with timer('train'): train_losses = [] valid_losses = [] best_model_loss = 999 best_model_ep = 0 checkpoint = 0 for epoch in range(1, EPOCHS + 1): if epoch % (CLR_CYCLE * 2) == 0: if epoch != 0: y_val = y_val.reshape(-1, N_CLASSES, IMG_SIZE[0], IMG_SIZE[1]) best_pred = best_pred.reshape(-1, N_CLASSES, IMG_SIZE[0], IMG_SIZE[1]) for i in range(N_CLASSES): th, score, _, _ = search_threshold( y_val[:, i, :, :], best_pred[:, i, :, :]) LOGGER.info( 'Best loss: {} Best Dice: {} on epoch {} th {} class {}' .format(round(best_model_loss, 5), round(score, 5), best_model_ep, th, i)) checkpoint += 1 best_model_loss = 999 LOGGER.info("Starting {} epoch...".format(epoch)) tr_loss = train_one_epoch(model, train_loader, criterion, optimizer, device) train_losses.append(tr_loss) LOGGER.info('Mean train loss: {}'.format(round(tr_loss, 5))) valid_loss, val_pred, y_val = validate(model, val_loader, criterion, device) valid_losses.append(valid_loss) LOGGER.info('Mean valid loss: {}'.format(round(valid_loss, 5))) scheduler.step() if valid_loss < best_model_loss: torch.save( model.state_dict(), '{}_fold{}_ckpt{}.pth'.format(EXP_ID, FOLD_ID, checkpoint)) best_model_loss = valid_loss best_model_ep = epoch best_pred = val_pred del val_pred gc.collect() with timer('eval'): y_val = y_val.reshape(-1, N_CLASSES, IMG_SIZE[0], IMG_SIZE[1]) best_pred = best_pred.reshape(-1, N_CLASSES, IMG_SIZE[0], IMG_SIZE[1]) for i in range(N_CLASSES): th, score, _, _ = search_threshold(y_val[:, i, :, :], best_pred[:, i, :, :]) LOGGER.info( 'Best loss: {} Best Dice: {} on epoch {} th {} class {}'. format(round(best_model_loss, 5), round(score, 5), best_model_ep, th, i)) xs = list(range(1, len(train_losses) + 1)) plt.plot(xs, train_losses, label='Train loss') plt.plot(xs, valid_losses, label='Val loss') plt.legend() plt.xticks(xs) plt.xlabel('Epochs') plt.savefig("loss.png")
def get_thresholds(in_dat, interactive=False, plot_events=False, fig_path=None, prefix=None): """Guess distance threshold for event filtering Analyse the events in the first million of Hi-C pairs in the library, plot the occurrences of each event type according to number of restriction fragments, and ask user interactively for the minimum threshold for uncuts and loops. Parameters ---------- in_dat: str Path to the .pairs file containing Hi-C pairs. interactive: bool If True, plots are diplayed and thresholds are required interactively. plot_events : bool Whether to show the plot fig_path : str Path where the figure will be saved. If None, the figure will be diplayed interactively. prefix : str If the library has a name, it will be shown on plots. Returns ------- dictionary dictionary with keys "uncuts" and "loops" where the values are the corresponding thresholds entered by the user. """ thr_uncut = None thr_loop = None max_sites = 50 # Map of event -> legend name of event for intrachromosomal pairs. legend = { "++": "++ (weird)", "--": "-- (weird)", "+-": "+- (uncuts)", "-+": "-+ (loops)", } colors = {"++": "#222222", "+-": "r", "--": "#666666", "-+": "tab:orange"} n_events = {event: np.zeros(max_sites) for event in legend} i = 0 # open the file for reading (just the first 1 000 000 lines) with open(in_dat, "r") as pairs: for line in pairs: # Skip header lines if line.startswith("#"): continue i += 1 # Only use the first million pairs to estimate thresholds if i == 1000000: break # Process Hi-C pair into a dictionary p = process_read_pair(line) # Type of event and number of restriction site between reads etype = p["type"] nsites = p["nsites"] # Count number of events for intrachrom pairs if etype != "inter" and nsites < max_sites: n_events[etype][nsites] += 1 def plot_event(n_events, legend, name): """Plot the frequency of a given event types over distance.""" plt.xlim([-0.5, 15]) plt.plot( range(n_events[name].shape[0]), n_events[name], "o-", label=legend[name], linewidth=2.0, c=colors[name], ) if interactive: # PLot: try: plt.figure(0) for event in legend: plot_event(n_events, legend, event) plt.grid() plt.xlabel("Number of restriction fragment(s)") plt.ylabel("Number of events") plt.yscale("log") plt.legend() plt.show(block=False) except Exception: logger.error( "Unable to show plots, skipping figure generation. Perhaps " "there is no Xserver running ? (might be due to windows " "environment). Try running without the interactive option.") # Asks the user for appropriate thresholds print( "Please enter the number of restriction fragments separating " "reads in a Hi-C pair below or at which loops and " "uncuts events will be excluded\n", file=sys.stderr, ) thr_uncut = int(input("Enter threshold for the uncuts events (+-):")) thr_loop = int(input("Enter threshold for the loops events (-+):")) try: plt.clf() except Exception: pass else: # Estimate thresholds from data for event in n_events: fixed = n_events[event] fixed[fixed == 0] = 1 n_events[event] = fixed all_events = np.log(np.array(list(n_events.values()))) # Compute median occurences at each restriction sites event_med = np.median(all_events, axis=0) # Compute MAD, to have a robust estimator of the expected deviation # from median at long distances mad = np.median(abs(all_events - event_med)) exp_stdev = mad / 0.67449 # Iterate over sites, from furthest to frag+2 for site in range(max_sites)[:1:-1]: # For uncuts and loops, keep the last (closest) site where the # deviation from other events <= expected_stdev if (abs(np.log(n_events["+-"][site]) - event_med[site]) <= exp_stdev): thr_uncut = site if (abs(np.log(n_events["-+"][site]) - event_med[site]) <= exp_stdev): thr_loop = site if thr_uncut is None or thr_loop is None: raise ValueError( "The threshold for loops or uncut could not be estimated. " "Please try running with -i to investigate the problem.") logger.info("Filtering with thresholds: uncuts={0} loops={1}".format( thr_uncut, thr_loop)) if plot_events: try: plt.figure(1) plt.xlim([-0.5, 15]) # Draw colored lines for events to discard plt.plot( range(0, thr_uncut + 1), n_events["+-"][:thr_uncut + 1], "o-", c=colors["+-"], label=legend["+-"], ) plt.plot( range(0, thr_loop + 1), n_events["-+"][:thr_loop + 1], "o-", c=colors["-+"], label=legend["-+"], ) plt.plot( range(0, 2), n_events["--"][:2], "o-", c=colors["--"], label=legend["--"], ) plt.plot( range(0, 2), n_events["++"][:2], "o-", c=colors["++"], label=legend["++"], ) # Draw black lines for events to keep plt.plot( range(thr_uncut, n_events["+-"].shape[0]), n_events["+-"][thr_uncut:], "o-", range(thr_loop, n_events["-+"].shape[0]), n_events["-+"][thr_loop:], "o-", range(1, n_events["--"].shape[0]), n_events["--"][1:], "o-", range(1, n_events["++"].shape[0]), n_events["++"][1:], "o-", label="kept", linewidth=2.0, c="g", ) plt.grid() plt.xlabel("Number of restriction site(s)") plt.ylabel("Number of events") plt.yscale("log") # Remove duplicate "kept" entries in legend handles, labels = plt.gca().get_legend_handles_labels() by_label = OrderedDict(zip(labels, handles)) plt.legend(by_label.values(), by_label.keys()) # Show uncut and loop threshold as vertical lines plt.axvline(x=thr_loop, color=colors["-+"]) plt.axvline(x=thr_uncut, color=colors["+-"]) if prefix: plt.title( "Library events by distance in {}".format(prefix)) plt.tight_layout() if fig_path: plt.savefig(fig_path) else: plt.show(block=False) # plt.clf() except Exception: logger.error( "Unable to show plots, skipping figure generation. Is " "an X server running? (might be due to windows " "environment). Try running without the plot option.") return thr_uncut, thr_loop
def plotGradientConfidenceIntervals(giaRegressionsByCombo, keys, giaRegressionDescriptions, outputPathDict): def plotInterval(ax, y, xstart, xstop, intervalLabel, colord, colords): """Plot interval at y from xstart to xstop with given color.""" ax.hlines(y, xstart, xstop, colords, lw=7) ax.hlines(y, xstart, xstop, colord, lw=3, label=intervalLabel) ## plots the interval in the colours of both sites outputPath = convertListToRelativePath([outputPathDict[setting] for setting in getCurrentSettingOptions()]) y = 0 ## used in spacing out the intervals for each site vertically through the ## graph fig,ax = plt.subplots(1) for combo in keys: y += 1 combo1 = combo.split('-')[0] combo2 = combo.split('-')[1].split(':')[0] order = combo.split('-')[1].split(':')[1] if(order == 'forward'): direct = combo1 modelled = combo2 else: direct = combo2 modelled = combo1 est = giaRegressionsByCombo[combo]['gradientEstimator'] ciStart = giaRegressionsByCombo[combo]['gradient'][0] ciEnd = giaRegressionsByCombo[combo]['gradient'][1] if(est < 0): est = -est ciStart = -ciStart ciEnd = -ciEnd if(order == 'forward'): plotInterval(ax, y, ciStart, ciEnd, "", mapSiteToColour(direct), mapSiteToColour(modelled)) else: plotInterval(ax, y, ciStart, ciEnd, "", mapSiteToColour(direct), mapSiteToColour(modelled)) ax.vlines(est, y+0.3, y-0.3, mapSiteToColour(direct), lw=4) ax.set_xlabel('GIA (m/year)') ax.set_xlim([0,0.009]) plt.yticks(list(np.arange(1, len(keys)+1, 1.0)), [giaRegressionDescriptions[key] for key in keys], rotation=0) fileNameIdentifier = "_".join([outputPathDict[setting] for setting in getCurrentSettingOptions()]) plt.title("95p Confidence intervals on GIA\nfilters: %s" % fileNameIdentifier) for item in ax.get_yticklabels(): item.set_fontsize(8) outputFilePath = filePathOnRelativePath(outputPath+"gias/", fileName='intervals', ext="png") print "Saving gia intervals plot at '%s'" % outputFilePath verifyPath(outputPath+"gias/") plt.savefig(outputFilePath,bbox_inches='tight') outputFilePath = filePathOnRelativePath(outputPath+"gias/", fileName='%s_intervals' % fileNameIdentifier, ext="png") print "Saving gia intervals plot at '%s'" % outputFilePath plt.savefig(outputFilePath,bbox_inches='tight') plt.close()
gamma : float Width of Lorententzian component kt : float Thermal energy. If >0, will compute transitions from vibrationally excited states. Default 0. n_max : int Largest vibrational number in final manifold. If not supplied, a guess is provided, but may not be adequate. m_max : int Largest vibrational number in orginal manifold. If not supplied, a guess is provided, but may not be adequate. """ return amp*vibronic_ls(-x+x0, s, sigma, gamma, e_vib, kt=kt, **kw) x = np.linspace(1.8, 2.5, 1000) e0 = 2.17 s = 0.5 sigma = 0.01 gamma = 0.001 e_vib = 0.07 y1 = vibronic_emission(x, 1, e0, s, sigma, gamma, e_vib, 0) y2 = vibronic_emission(x, 1, e0, s, sigma, gamma, e_vib, 0.025) y3 = vibronic_emission(x, 1, e0, s, sigma, gamma, e_vib, 0.2) plt.figure() plt.plot(x, y1, label="kT=0") plt.plot(x, y2, label="kT=RT") plt.plot(x, y3, label="kT=200 meV") plt.legend() plt.savefig("fc_emission.png", dpi=150)
def filter_events( in_dat, out_filtered, thr_uncut, thr_loop, plot_events=False, fig_path=None, prefix=None, ): """Filter events (loops, uncuts and weirds) Filter out spurious intrachromosomal Hi-C pairs from input file. +- pairs with reads closer or at the uncut threshold and -+ pairs with reads closer or at the loop thresholds are excluded from the ouput file. -- and ++ pairs with both mates on the same fragments are also discarded. All others are written. Parameters ---------- in_dat : file object File handle in read mode to the 2D BED file containing Hi-C pairs. out_filtered : file object File handle in write mode the output filtered 2D BED file. thr_uncut : int Minimum number of restriction sites between reads to keep an intrachromosomal +- pair. thr_loop : int Minimum number of restriction sites between reads to keep an intrachromosomal -+ pair. plot_events : bool If True, a plot showing the proportion of each type of event will be shown after filtering. fig_path : str Path where the figure will be saved. If None, figure is displayed interactively. prefix : str If the library has a name, it will be shown on plots. """ n_uncuts = 0 n_loops = 0 n_weirds = 0 lrange_intra = 0 lrange_inter = 0 # open the files for reading and writing with open(in_dat, "r") as pairs, open(out_filtered, "w") as filtered: for line in pairs: # iterate over each line # Copy header lines to output if line.startswith("#"): filtered.write(line) continue p = process_read_pair(line) line_to_write = ("\t".join( map( str, ( p["readID"], p["chr1"], p["pos1"], p["chr2"], p["pos2"], p["strand1"], p["strand2"], p["frag1"], p["frag2"], ), )) + "\n") if p["chr1"] == p["chr2"]: # Do not report ++ and -- pairs on the same fragment (impossible) if p["frag1"] == p["frag2"] and p["strand1"] == p["strand2"]: n_weirds += 1 elif p["nsites"] <= thr_loop and p["type"] == "-+": n_loops += 1 elif p["nsites"] <= thr_uncut and p["type"] == "+-": n_uncuts += 1 else: lrange_intra += 1 filtered.write(line_to_write) if p["chr1"] != p["chr2"]: lrange_inter += 1 filtered.write(line_to_write) if lrange_inter > 0: ratio_inter = round( 100 * lrange_inter / float(lrange_intra + lrange_inter), 2) else: ratio_inter = 0 # Log quick summary of operation results kept = lrange_intra + lrange_inter discarded = n_loops + n_uncuts + n_weirds total = kept + discarded logger.info("Proportion of inter contacts: {0}% (intra: {1}, " "inter: {2})".format(ratio_inter, lrange_intra, lrange_inter)) logger.info( "{0} pairs discarded: Loops: {1}, Uncuts: {2}, Weirds: {3}".format( discarded, n_loops, n_uncuts, n_weirds)) logger.info("{0} pairs kept ({1}%)".format( kept, round(100 * kept / (kept + discarded), 2))) # Visualize summary if requested by user if plot_events: try: # Plot: make a square figure and axes to plot a pieChart: plt.figure(2, figsize=(6, 6)) # The slices will be ordered and plotted counter-clockwise. fracs = [n_uncuts, n_loops, n_weirds, lrange_intra, lrange_inter] # Format labels to include event names and proportion labels = list( map( lambda x: (x[0] + ": %.2f%%") % (100 * x[1] / total), [ ("Uncuts", n_uncuts), ("Loops", n_loops), ("Weirds", n_weirds), ("3D intra", lrange_intra), ("3D inter", lrange_inter), ], )) colors = ["salmon", "lightskyblue", "yellow", "palegreen", "plum"] patches, _ = plt.pie(fracs, colors=colors, startangle=90) plt.legend(patches, labels, loc=2) if prefix: plt.title( "Distribution of library events in {}".format(prefix), bbox={ "facecolor": "1.0", "pad": 5 }, ) plt.text( 0.3, 1.15, "Threshold Uncuts =" + str(thr_uncut), fontdict=None, withdash=False, ) plt.text( 0.3, 1.05, "Threshold Loops =" + str(thr_loop), fontdict=None, withdash=False, ) plt.text( -1.5, -1.2, "Total number of reads =" + str(total), fontdict=None, withdash=False, ) plt.text( -1.5, -1.3, "Ratio inter/(intra+inter) =" + str(ratio_inter) + "%", fontdict=None, withdash=False, ) percentage = round( 100 * float(lrange_inter + lrange_intra) / (n_loops + n_uncuts + n_weirds + lrange_inter + lrange_intra)) plt.text( -1.5, -1.4, "selected reads = {0}%".format(percentage), fontdict=None, withdash=False, ) if fig_path: plt.savefig(fig_path) else: plt.show() plt.clf() except Exception: logger.error( "Unable to show plots. Perhaps there is no Xserver running ?" "(might be due to windows environment) skipping figure " "generation.")
import numpy import matplotlib.pyplot as plt x = numpy.linspace(-3, 3, 100) y1 = numpy.sin(x) y2 = numpy.exp(x) plt.plot(x, y1, color="green", marker="o") plt.plot(x, y2, color="blue", marker="+", linewidth=3, linestyle="None") plt.xlabel("argumenty") plt.ylabel("wartości") plt.legend(["sin(x)", "exp(x)"]) plt.ylim([-1, 1]) plt.savefig("wykresik.png") plt.show()
from q30 import read_wakati from collections import Counter import matplotlib.pyplot as plt from matplotlib.font_manager import FontProperties wakati_list = read_wakati() words = Counter([wakati['surface'] for sentence in wakati_list for wakati in sentence]).most_common() words_cnt = [int(word[1]) for word in words] words_ctx = [word[0] for word in words] plt.hist(words_cnt, bins=50, range=(0, 50)) plt.savefig('fig38.jpg') ########### # 模範解答 # ########### from collections import Counter import matplotlib.pyplot as plt from q30 import get_neko_morphemes morphemes_list = get_neko_morphemes() words = Counter([morpheme["base"] for morphemes in morphemes_list for morpheme in morphemes]).most_common() _, word_count = list(zip(*words)) plt.rcParams["font.family"] = "IPAexGothic" plt.hist(word_count, bins=50, range=(1, 50)) plt.savefig("fig38.png")
def plot(): import matplotlib.pyplot as plt with open('results.pkl') as f: results, parser, initial_hypergrad = pickle.load(f) # ----- Nice versions of Alpha and beta schedules for paper ----- fig = plt.figure(0) fig.clf() ax = fig.add_subplot(411) #ax.set_title('Alpha learning curves') for cur_results, name in zip(results['log_alphas'][-1].T, parser.names): if name[0] == 'weights': ax.plot(np.exp(cur_results), 'o-', label=name) #ax.set_xlabel('Learning Iteration', fontproperties='serif') low, high = ax.get_ylim() ax.set_ylim([0, high]) ax.set_ylabel('Step size', fontproperties='serif') ax.set_xticklabels([]) ax.legend(numpoints=1, loc=1, frameon=False, bbox_to_anchor=(1.0, 0.5), prop={ 'family': 'serif', 'size': '12' }) ax = fig.add_subplot(412) #ax.set_title('Alpha learning curves') for cur_results, name in zip(results['invlogit_betas'][-1].T, parser.names): if name[0] == 'weights': ax.plot(logit(cur_results), 'o-', label=name) low, high = ax.get_ylim() ax.set_ylim([0, 1]) ax.set_xlabel('Learning Iteration', fontproperties='serif') ax.set_ylabel('Momentum', fontproperties='serif') ax = fig.add_subplot(413) #ax.set_title('Alpha learning curves') for cur_results, name in zip(results['log_alphas'][-1].T, parser.names): if name[0] == 'biases': ax.plot(np.exp(cur_results), 'o-', label=name) #ax.set_xlabel('Learning Iteration', fontproperties='serif') low, high = ax.get_ylim() ax.set_ylim([0, high]) ax.set_ylabel('Step size', fontproperties='serif') ax.set_xticklabels([]) ax.legend(numpoints=1, loc=1, frameon=False, bbox_to_anchor=(1.0, 0.5), prop={ 'family': 'serif', 'size': '12' }) ax = fig.add_subplot(414) #ax.set_title('Alpha learning curves') for cur_results, name in zip(results['invlogit_betas'][-1].T, parser.names): if name[0] == 'biases': ax.plot(logit(cur_results), 'o-', label=name) low, high = ax.get_ylim() ax.set_ylim([0, 1]) ax.set_xlabel('Learning Iteration', fontproperties='serif') ax.set_ylabel('Momentum', fontproperties='serif') fig.set_size_inches((6, 8)) #plt.show() plt.savefig('alpha_beta_paper.png') plt.savefig('alpha_beta_paper.pdf', pad_inches=0.05, bbox_inches='tight') fig.clf() fig.set_size_inches((6, 8)) # ----- Primal learning curves ----- ax = fig.add_subplot(311) ax.set_title('Primal learning curves') for i, y in enumerate(results['learning_curves']): ax.plot(y['learning_curve'], 'o-', label='Meta iter {0}'.format(i)) ax.set_xlabel('Epoch number') ax.set_ylabel('Negative log prob') #ax.legend(loc=1, frameon=False) ax = fig.add_subplot(312) ax.set_title('Meta learning curves') losses = ['train_loss', 'valid_loss', 'tests_loss'] for loss_type in losses: ax.plot(results[loss_type], 'o-', label=loss_type) ax.set_xlabel('Meta iter number') ax.set_ylabel('Negative log prob') ax.legend(loc=1, frameon=False) ax = fig.add_subplot(313) ax.set_title('Meta-gradient magnitude') ax.plot(results['meta_grad_magnitude'], 'o-', label='Meta-gradient magnitude') ax.plot(results['meta_grad_angle'], 'o-', label='Meta-gradient angle') ax.set_xlabel('Meta iter number') ax.set_ylabel('Meta-gradient Magnitude') ax.legend(loc=1, frameon=False) plt.savefig('learning_curves.png') # ----- Learning curve info ----- fig.clf() ax = fig.add_subplot(311) ax.set_title('Primal learning curves') for i, y in enumerate(results['learning_curves']): ax.plot(y['grad_norm'], 'o-', label='Meta iter {0}'.format(i)) ax.set_xlabel('Epoch number') #ax.legend(loc=1, frameon=False) ax.set_title('Grad norm') ax = fig.add_subplot(312) for i, y in enumerate(results['learning_curves']): ax.plot(y['weight_norm'], 'o-', label='Meta iter {0}'.format(i)) ax.set_xlabel('Epoch number') ax.legend(loc=1, frameon=False) ax.set_title('Weight norm') ax = fig.add_subplot(313) for i, y in enumerate(results['learning_curves']): ax.plot(y['velocity_norm'], 'o-', label='Meta iter {0}'.format(i)) ax.set_xlabel('Epoch number') ax.set_title('Velocity norm') ax.legend(loc=1, frameon=False) plt.savefig('extra_learning_curves.png') # ----- Alpha and beta schedules ----- fig.clf() ax = fig.add_subplot(211) ax.set_title('Alpha learning curves') for i, y in enumerate(results['log_alphas']): ax.plot(y, 'o-', label="Meta iter {0}".format(i)) ax.set_xlabel('Primal iter number') #ax.set_ylabel('Log alpha') ax.legend(loc=1, frameon=False) ax = fig.add_subplot(212) ax.set_title('Beta learning curves') for y in results['invlogit_betas']: ax.plot(y, 'o-') ax.set_xlabel('Primal iter number') ax.set_ylabel('Inv logit beta') plt.savefig('alpha_beta_curves.png') # ----- Init scale and L2 reg ----- fig.clf() ax = fig.add_subplot(211) ax.set_title('Init scale learning curves') for i, y in enumerate(zip(*results['log_param_scale'])): if parser.names[i][0] == 'weights': ax.plot(y, 'o-', label=parser.names[i]) ax.set_xlabel('Meta iter number') ax.set_ylabel('Log param scale') ax.legend(loc=1, frameon=False) ax = fig.add_subplot(212) ax.set_title('Init scale learning curves') for i, y in enumerate(zip(*results['log_param_scale'])): if parser.names[i][0] == 'biases': ax.plot(y, 'o-', label=parser.names[i]) ax.set_xlabel('Meta iter number') ax.set_ylabel('Log param scale') ax.legend(loc=1, frameon=False) plt.savefig('scale_and_reg.png')
# Pressure at transit radius as function of wavelength: sigma = 3.0 # Gauss-convolve for better-looking plots p = sip.interp1d(rad1[::-1], press[::-1]) pt1 = p(gaussf(rp1, sigma)) p = sip.interp1d(rad2[::-1], press[::-1]) pt2 = p(gaussf(rp2, sigma)) p = sip.interp1d(rad3[::-1], press[::-1]) pt3 = p(gaussf(rp3, sigma)) # Photospheric pressure modulation spectrum: lw = 1.5 plt.figure(-25) plt.clf() ax = plt.subplot(111) plt.semilogy(1e4/pyrat.spec.wn, pt3, lw=lw, color="orange", label=r"$100\times\,{\rm solar}$") plt.semilogy(1e4/pyrat.spec.wn, pt2, lw=lw, color="sienna", label=r"$1.0\times\,{\rm solar}$") plt.semilogy(1e4/pyrat.spec.wn, pt1, lw=lw, color="k", label=r"$0.1\times\,{\rm solar}$") plt.axvspan(0.74, 1.01, color="0.80") plt.xlim(0.5, 1.2) plt.ylim(3, 3e-7) plt.legend(loc="upper right", fontsize=15) plt.ylabel(r"$\rm Pressure\ \ (bar)$", fontsize=16) plt.xlabel(r"$\rm Wavelength\ \ (um)$", fontsize=16) plt.savefig("../plots/WASP49b_clear_spectra.ps")
#%% pnl[['yhat_lstm_1','y']].plot(figsize=(20,10)) #%% sample_yhat = pnl[['yhat_lstm_0','yhat_lstm_1']].values y_raw = pnl.y.values #%% plt.figure(figsize=(10,5)) sns.distplot(y_raw[sample_yhat.argmax(axis=1)==1],color="r",bins=30,kde=True) sns.distplot(y_raw[sample_yhat.argmax(axis=1)==0],color="b",bins=30,kde=True) plt.savefig("HIGH_CHG_7") plt.show() #%% from importlib import reload import model.DataReg as DR # reload(model.DataReg) dr = DR.DataReg() x0,ks = dr.plotROC(sample_yhat[:,1],pnl.y.apply(lambda x: 1 if x>=0 else 0).values) #%% x0
opt = Adam(lr=INIT_LR, decay=INIT_LR/EPOCHS) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) #train model print("[INFO] training model...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX,testY), steps_per_epoch=len(trainX) // BS, epochs = EPOCHS, verbose=1) #save the model to disk print("[INFO] serializing network...") model.save(args["model"]) # save the label binarizer to disk print("[INFO] serializing label binarizer...") f = open(args["labelbin"], "wb") f.write(pickle.dumps(lb)) f.close() #plot the training loss and accuracy plt.style.use("ggplot") plt.figure() N = EPOCHS plt.plot(np.arange(0,N), H.history["loss"], label="train_loss") plt.plot(np.arange(0,N), H.history["val_loss"], label="val_loss") plt.plot(np.arange(0,N), H.history["acc"], label="train_acc") plt.plot(np.arange(0,N), H.history["val_acc"], label="val_acc") plt.title("Training Loss and Accuracy") plt.xlabel("Epochs #") plt.ylabel("Loss/Accuracy") plt.legend(loc="upper left") plt.savefig(args["plot"])
data_list_2 = list() # fig = plt.figure() fig2 = plt.figure() # ax = fig.add_subplot(111) ax = fig2.add_subplot(121) ax2 = fig2.add_subplot(122) # for path in paths_list: for pb_path, shu_path in zip(paths, paths_2): no, pb, legend = calculate_graph_data(pb_path) esfe, shu, seg = calculate_graph_data(shu_path) #legend_str = path[0].split('/')[-1] #legend = legend_str.split('_')[0] ax.plot(shu[0], shu[1]) ax2.plot(pb[0], pb[1]) #plt.xlim((-0.6,0)) #plt.ylim((-0.06, 0.02)) #plt.legend(loc = 'lower right') #plt.title('E.coli MG1655 Pseudo starting at 10^5 CFU/ml after 5 hours Incubation with 1mM Resazurin') ax.set_xlabel('Voltage vs AgAgCl (V)') ax.set_ylabel('Current (mA)') ax2.set_xlabel('Voltage vs AgAgCl (V)') plt.savefig(os.path.join(directory2, 'MB_ITO_17-5-17.png'), dpi=300) plt.show()
print "total time", time.time() - start_time print "Surface temperatures:", surface_temperatures print "Luminosities:", stars_luminosities print "Masses:", stars_masses print "Radii:", stars_radii plt.figure(1) plt.plot(surface_temperatures, stars_luminosities) plt.xscale('log') plt.yscale('log') plt.gca().invert_xaxis() plt.xlabel('Surface Temperatures (K)') plt.ylabel('Luminosity ($L/L_{sun}$)') plt.title('Main Sequence: Hertzsprung-Russell Diagram') plt.savefig("main_sequence.png") plt.figure(2) plt.plot(stars_masses, stars_luminosities, '.') #plt.xscale('log') plt.yscale('log') plt.gca().invert_xaxis() plt.xlabel('Masses ($M/M_{sun}$)') plt.ylabel('Luminosity ($L/L_{sun}$)') plt.title('Main Sequence: Luminosity versus Mass') plt.savefig("main_sequence_luminosities_masses.png") plt.figure(3) plt.plot(stars_masses, stars_radii, '.') #plt.xscale('log') #plt.yscale('log')