def plotDif(outputname, outDir, title, x, y, colour): """Plot modelled data. To use: plotDif(outputname,outDir, title, x, y, colour)""" # matplotlib.rcParams["axes.grid"] = True matplotlib.rcParams["legend.fancybox"] = True matplotlib.rcParams["figure.figsize"] = 11.69, 8.27 # A4 matplotlib.rcParams["savefig.dpi"] = 300 plotName = outputname + ".pdf" pp1 = PdfPages(os.path.join(outDir, plotName)) fig1 = plt.figure(1) ax1 = fig1.add_subplot(111) xmax = max(x) xmin = min(x) ymax = max(y) ymin = min(y) labelString = title ax1.plot(x, y, color=colour, marker="o", linestyle="None", label=labelString) matplotlib.pyplot.axes().set_position([0.04, 0.065, 0.8, 0.9]) ax1.legend(bbox_to_anchor=(0.0, 1), loc=2, borderaxespad=0.1, ncol=3, title="Julian Day") ax1.plot([xmin, xmax], [xmin, xmax], "-k") plt.axis([xmin + 0.1, xmax + 0.1, ymin + 0.1, ymax + 0.1]) plt.xlabel("Measured Melt (m.w.e.)") plt.ylabel("Modelled Melt (m.w.e.)") plt.title("Modelled against Measured") # plt.show() pp1.savefig(bbox_inches="tight") pp1.close() plt.close() return 0
def savefig_pdf(self, fn, *args, **kwargs): # Get the git commit information. git_info = get_git_info() # See if we have any extra information to save extra_info = kwargs.get("extra_info", None) if extra_info is not None: git_info = update_git_info_with_extra(git_info, extra_info) # If there is no information, just call the mpl savefig. if git_info is None: return mpl_savefig(self, fn, *args, **kwargs) # Build the PDF object that will take the metadata. fn = os.path.splitext(fn)[0] + ".pdf" kwargs["format"] = "pdf" fig = PdfPages(fn) # Save the figure. ret = mpl_savefig(self, fig, *args, **kwargs) # Add the metadata. metadata = fig.infodict() metadata["Keywords"] = json.dumps(git_info, sort_keys=True) # Commit the changes. fig.close() return ret
def profile_batch(radius,output): pp = PDF(output) for sim in glob('sim*.fits'): print sim header = pyfits.open(sim)[0].header w = header['W'] N = header['N'] pitch = header['PITCH'] ang = header['VIEWANG'] pitch = int(np.pi*2/pitch) ang = int(np.pi*2/ang) v, line, _ = salty.line_profile(sim,radius,pxbin=4.,plot=False) ax = plt.figure().add_subplot(111) ax.set_xlabel('Velocity [km/s]') ax.set_ylabel('Normalized power') ax.set_title(sim) ax.text(300,0.005, '$w={}$\n$N={}$\n$p=\\tau/{}$\n$\\theta_{{view}}=\\tau/{}$'.\ format(w,N,pitch,ang)) ax.plot(v,line) pp.savefig(ax.figure) pp.close()
def show_or_save(plt, fig, use_x11, filename): if use_x11: plt.show() else: pp = PdfPages(filename) pp.savefig(fig) pp.close()
def make_lick_individual(targetSN, w1, w2): """ Make maps for the kinematics. """ filename = "lick_corr_sn{0}.tsv".format(targetSN) binimg = pf.getdata("voronoi_sn{0}_w{1}_{2}.fits".format(targetSN, w1, w2)) intens = "collapsed_w{0}_{1}.fits".format(w1, w2) extent = calc_extent(intens) bins = np.loadtxt(filename, usecols=(0,), dtype=str).tolist() bins = np.array([x.split("bin")[1] for x in bins]).astype(int) data = np.loadtxt(filename, usecols=np.arange(25)+1).T labels = [r'Hd$_A$', r'Hd$_F$', r'CN$_1$', r'CN$_2$', r'Ca4227', r'G4300', r'Hg$_A$', r'Hg$_F$', r'Fe4383', r'Ca4455', r'Fe4531', r'C4668', r'H$_\beta$', r'Fe5015', r'Mg$_1$', r'Mg$_2$', r'Mg$_b$', r'Fe5270', r'Fe5335', r'Fe5406', r'Fe5709', r'Fe5782', r'Na$_D$', r'TiO$_1$', r'TiO$_2$'] mag = "[mag]" ang = "[\AA]" units = [ang, ang, mag, mag, ang, ang, ang, ang, ang, ang, ang, ang, ang, ang, mag, mag, ang, ang, ang, ang, ang, ang, ang, mag, mag] lims = [[None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None], [None, None]] pdf = PdfPages("figs/lick_sn{0}.pdf".format(targetSN)) fig = plt.figure(1, figsize=(6.25,5)) plt.subplots_adjust(bottom=0.12, right=0.97, left=0.09, top=0.96) plt.minorticks_on() ax = plt.subplot(111) ax.minorticks_on() plot_indices = np.arange(12,22) for i, vector in enumerate(data): if i not in plot_indices: continue print "Making plot for {0}...".format(labels[i]) kmap = np.zeros_like(binimg) kmap[:] = np.nan for bin,v in zip(bins, vector): idx = np.where(binimg == bin) kmap[idx] = v vmin = lims[i][0] if lims[i][0] else np.median(vector) - 2 * vector.std() vmax = lims[i][1] if lims[i][1] else np.median(vector) + 2 * vector.std() m = plt.imshow(kmap, cmap="inferno", origin="bottom", vmin=vmin, vmax=vmax, extent=extent, aspect="equal") make_contours() plt.minorticks_on() plt.xlabel("X [kpc]") plt.ylabel("Y [kpc]") plt.xlim(extent[0], extent[1]) plt.ylim(extent[2], extent[3]) cbar = plt.colorbar(m) cbar.set_label("{0} {1}".format(labels[i], units[i])) pdf.savefig() plt.clf() pdf.close() return
def plot_psi_weights(output, modelfile='/d/monk/eigenbrot/WIYN/14B-0456/anal/models/allZ2_vardisp/allz2_vardisp_batch_interp.fits'): #Like the last page of all the fit plots, but for all pointings at once #cribbed from plot_bc_vardisp.py m = pyfits.open(modelfile)[1].data[0] numZ = np.unique(m['Z'][:,0]).size numAge = np.unique(m['AGE'][:,0]).size big_W = np.zeros((numZ,numAge)) for p in range(6): coeffile = 'NGC_891_P{}_bin30_allz2.coef.fits'.format(p+1) print coeffile coef_arr = pyfits.open(coeffile)[1].data numap = coef_arr['VSYS'].size for i in range(numap): wdata = coef_arr[i]['LIGHT_FRAC'].reshape(numZ,numAge) big_W += wdata/np.max(wdata) bwax = plt.figure().add_subplot(111) bwax.imshow(big_W,origin='lower',cmap='Blues',interpolation='none') bwax.set_xlabel('SSP Age [Gyr]') bwax.set_xticks(range(numAge)) bwax.set_xticklabels(m['AGE'][:numAge,0]/1e9) bwax.set_ylabel(r'$Z/Z_{\odot}$') bwax.set_yticks(range(numZ)) bwax.set_yticklabels(m['Z'][::numAge,0]) pp = PDF(output) pp.savefig(bwax.figure) pp.close() plt.close(bwax.figure) return
def main(): data = scipy.io.loadmat('data.mat') x1 = data['x1'][0] x2 = data['x2'][0] n = len(x1) kl = [1, 7, 14, 28] # k = 14 may be an optimal x = np.arange(-6, 6.05, 0.05) fig = plt.figure() plt.rcParams['font.size'] = 10 for i in range(len(kl)): k = kl[i] p1 = np.zeros(len(x)) p2 = np.zeros(len(x)) for j in range(len(x)): r1 = sorted(abs(x1 - x[j])) r2 = sorted(abs(x2 - x[j])) p1[j] = float(k) / (n * 2 * r1[k-1]) p2[j] = float(k) / (n * 2 * r2[k-1]) plt.subplot(2, 2, i+1) plt.plot(x, p1, label=r'$p(\mathbf{x} \mid c_1)$') plt.plot(x, p2, label=r'$p(\mathbf{x} \mid c_2)$') plt.legend(framealpha=0, fontsize=7) plt.title(r'$k = %d$' % k) plt.xlabel(r'$x$') plt.ylabel(r'$p(\mathbf{x} \mid c_i)$') plt.tight_layout() pp = PdfPages('knn.pdf') pp.savefig(fig) pp.close() plt.clf()
def makeStackedBarGraph(coords_list, bar_labels_list, stack_label_list, title, axis_labels, file_name): bar_width = 0.2 step = 2000 inds = np.arange(len(bar_labels_list)) fig = mpl.figure.Figure(figsize=(15,10)) canvas = FigureCanvas(fig) fig.suptitle(title) fig.subplots_adjust(wspace = 0.5, hspace = 0.5) # for color bar # colorRange, colorTable = orr.generateColorRange(stack_label_list, step=step) # norm = Normalize(vmax = max(stack_label_list), vmin = min(stack_label_list), clip = True) # cmap_xvalues = norm(xrange(min(stack_label_list), max(stack_label_list), step)) # cmap_xvalues[0] = 0. # cmap_xvalues[-1] = 1 # cmap_list = [(val, c) for val, c in zip(cmap_xvalues, orr.generateColorRange(stack_label_list, raw=True, step=step)[1])] # cmap = LinearSegmentedColormap.from_list('mycmap', cmap_list) # sm = ScalarMappable(norm=norm, cmap = cmap) # sm.set_array(stack_label_list) colorRange, colorTable = orr.generateColors(stack_label_list, [0], step=step) ax = fig.add_subplot(2,2,1) ax.set_title(title, fontsize=28) ax.set_xlabel(axis_labels[0], fontsize=24) ax.set_ylabel(axis_labels[1], fontsize=24) pre = coords_list[0][1] bars = [] bottom = np.zeros((len(coords_list[0][1]),)) for i, (coords, stack_label) in enumerate(zip(coords_list, stack_label_list)): # if i == 0: # bar = ax.bar(inds, coords[1], color=colorTable[colorRange.index(coords[0])], align='center', linewidth=0) # else: # bar = ax.bar(inds, coords[1], color=colorTable[colorRange.index(coords[0])], align='center', bottom = bottom, linewidth=0) if i == 0: bar = ax.bar(inds, coords[1], color=colorTable[i], align='center', linewidth=0) else: bar = ax.bar(inds, coords[1], color=colorTable[i], align='center', bottom = bottom, linewidth=0) bars.append(bar) bottom = bottom + np.array(coords[1]) ax.set_xticks(inds) ax.set_xticklabels(bar_labels_list, rotation=45, ha='right') ax.tick_params(axis='both', which='both', labelsize=18) ax.legend(bars, stack_label_list, loc=1, bbox_to_anchor=(1.2, 1), ncol=1, markerscale=0.2, fontsize=6) # fig.colorbar(sm) pdf = PdfPages(file_name) pdf.savefig(fig) pdf.close() print('Graphing Resource usage: {}kb'.format(getattr(resource.getrusage(resource.RUSAGE_SELF), 'ru_maxrss') / 1000))
def print_pdf_graph(file_f, regulon, conn): pdf = PdfPages(file_f) edgesLimits = [50, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] #CRP = regulon_set['LexA'] for lim in edgesLimits: print lim g = buildSimilarityGraph_top_10_v2(conn, lim) # Here the node is motif, eg 87878787_1, the first 8 digits represent gi node_color = [ 1 if node[0:8] in regulon else 0 for node in g ] pos = nx.graphviz_layout(g, prog="neato") plt.figure(figsize=(10.0, 10.0)) plt.axis("off") nx.draw(g, pos, node_color = node_color, node_size = 20, alpha=0.8, with_labels=False, cmap=plt.cm.jet, vmax=1.0, vmin=0.0 ) pdf.savefig() plt.close() pdf.close()
def plot_data_dict_1D(self, results_path, file_n, data, timepoints): print 'plotting now...' pp = PdfPages(results_path+'/'+file_n) #nBins = 50 cc = 0 xmin, xmax = -1, 7 x_grid = linspace(xmin, xmax, 1000) for tp in timepoints: dat = log10(1+data[tp][:,0]) dat[isneginf(dat)] = 0 print dat kde = st.gaussian_kde(dat, bw_method=0.2) pdf = kde.evaluate(x_grid) ax = plt.subplot(4, 5, cc + 1) ax.plot(x_grid, pdf, color='blue', alpha=0.5, lw=3) ax.set_xlim([-1, 7]) #plt.hist( data[tp], nBins ) #plt.xscale('log') cc += 1 pp.savefig() plt.close() pp.close()
def plot_data_comb_2D(self, results_path, file_n, data, fit, timepoints): pp = PdfPages(results_path+'/'+file_n) cc = 0 for tp in timepoints: xmin, xmax = -3, 3 ymin, ymax = -3, 3 xx, yy = mgrid[xmin:xmax:100j, ymin:ymax:100j] positions = vstack([xx.ravel(), yy.ravel()]) values = vstack([ log10(1+data[tp][:, 0]), log10(1+data[tp][:, 1])]) kernel = st.gaussian_kde(values) f = reshape(kernel(positions).T, xx.shape) xxf, yyf = mgrid[xmin:xmax:100j, ymin:ymax:100j] positions_f = vstack([xxf.ravel(), yyf.ravel()]) values_f = vstack([log10(1+fit[tp][:, 0]), log10(1+fit[tp][:, 1])]) kernel_f = st.gaussian_kde(values_f) ff = reshape(kernel_f(positions_f).T, xxf.shape) ax = plt.subplot(4, 5, cc+1) ax.contourf(xx, yy, f, cmap='Blues') ax.contourf(xxf, yyf, ff, cmap='Reds') ax.set_xlim([-1, 3]) ax.set_ylim([-1, 3]) cc += 1 pp.savefig() plt.close() pp.close()
def plot_distributions(distributions, bucket_pct, axes, out_file): x_max, y_max = axes pp = PdfPages(out_file) variances = sorted(distributions) subsamples = sorted(distributions[variances[-1]]) if x_max == 0.: x_max = distributions[0][-1] bucket_width = x_max * bucket_pct bucket_boundaries = np.arange(0, x_max + bucket_width / 2., bucket_width) x_axis_points = np.arange(bucket_width / 2., x_max, bucket_width) for i,v in enumerate(variances): plt.figure(i) plt.xlabel("regret distribution") if v == 0: plt.title("true game") cum_dist = np.array([bisect(distributions[0], b) for b in \ bucket_boundaries]) plt.plot(x_axis_points, (cum_dist[1:] - cum_dist[:-1]) / \ float(cum_dist[-1]), label="true game") else: plt.title("$\sigma \\approx$" +str(v)) for s in subsamples: cum_dist = np.array([bisect(distributions[v][s], b) for b in \ bucket_boundaries]) plt.plot(x_axis_points, (cum_dist[1:] - cum_dist[:-1]) / \ float(cum_dist[-1]), label=str(s)+" samples") plt.legend(loc="upper right", prop={'size':6}) if y_max != 0.: plt.axis([0, x_max, 0, y_max]) pp.savefig() pp.close()
def make_comp_plot_1D(self, results_path, file_n, data, sims, timepoints, ind=0): pp = PdfPages(results_path+'/'+file_n) cc = 0 xmin, xmax = -1, 7 x_grid = linspace(xmin, xmax, 1000) def kernel_est(d, ind, x_grid): dl = log10(1+d[:, ind]) #dl[isneginf(dl)] = 0 dl = dl[isfinite(dl)] kde = st.gaussian_kde(dl, bw_method=0.2) pdf = kde.evaluate(x_grid) return pdf for tp in timepoints: pdf_data = kernel_est(data[tp], ind, x_grid) pdf_sim = kernel_est(sims[tp], ind, x_grid) ax = plt.subplot(4, 5, cc + 1) ax.plot(x_grid, pdf_data, color='blue', alpha=0.5, lw=3) ax.plot(x_grid, pdf_sim, color='red', alpha=0.5, lw=3) cc += 1 pp.savefig() plt.close() pp.close()
def err_histogram(output, basedir='.',bins=10, field='MLWA', err='dMLWA', suffix='coef', label=r'$\delta\tau_{L,\mathrm{fit}}/\tau_L$',exclude=exclude, ymax=90): ratio_list = [] for p in range(6): coef = '{}/NGC_891_P{}_bin30_allz2.{}.fits'.format(basedir,p+1,suffix) print coef c = pyfits.open(coef)[1].data tmp = c[err] if field == 'TAUV': tmp *= 1.086 else: tmp /= c[field] tmp = np.delete(tmp,np.array(exclude[p]) - 1) ratio_list.append(tmp) ratio = np.hstack(ratio_list) ratio = ratio[ratio == ratio] ratio = ratio[np.where(ratio < 0.8)[0]] ax = plt.figure().add_subplot(111) ax.set_xlabel(label) ax.set_ylabel(r'$N$') ax.hist(ratio, bins=bins, histtype='step', color='k') ax.set_xlim(0,0.52) ax.set_xticks([0,0.1,0.2,0.3,0.4,0.5]) ax.set_ylim(0,ymax) ax.set_yticks(range(0,int(ymax/10)*10+10,int(int(ymax/10)/4)*10)) pp = PDF(output) pp.savefig(ax.figure) pp.close() plt.close(ax.figure) return
def pdfdiagnostics(self,what='specs',n_subplot = 5): print 'creating a diagnostic pdf of '+what from matplotlib.backends.backend_pdf import PdfPages exec('data = self.%s'%what) data.sort_index(axis=1,inplace=True)# arrange alphabetically pp = PdfPages('%s.pdf'%self.group) for i in xrange(0, len(data.columns), n_subplot+1): Axes = data[data.columns[i:i+n_subplot]].plot(subplots=True) tick_params(labelsize=6) #y ticklabels [setp(item.yaxis.get_majorticklabels(), 'size', 7) for item in Axes.ravel()] #x ticklabels [setp(item.xaxis.get_majorticklabels(), 'size', 5) for item in Axes.ravel()] #y labels [setp(item.yaxis.get_label(), 'size', 10) for item in Axes.ravel()] #x labels [setp(item.xaxis.get_label(), 'size', 10) for item in Axes.ravel()] tight_layout() ylabel('mix ratio') #plt.locator_params(axis='y',nbins=2) print '%.03f'%(float(i) / float(len(data.columns)) ) , '% done' savefig(pp, format='pdf') close('all') pp.close() print 'PDF out' close('all')
class PlotDocument(object): def __init__( self, pages, statistics_manager, page_size=( 17, 11), pdf_filename=None): if pdf_filename: self.plot_pdf = PdfPages(pdf_filename) else: self.plot_pdf = None self.pages = pages self.page_size = page_size self.statistics_manager = statistics_manager def make_pages(self): for page in self.pages: if page: if page.type == 'GridPlot': page.add_plots( self.statistics_manager, self.page_size, self.plot_pdf) else: page.add_text(self.page_size, self.plot_pdf) if self.plot_pdf: plt.close() self.plot_pdf.close()
def err_plot(output,basedir='.', field='MLWA', err='dMLWA', suffix='syserr', label=r'$\tau_L$',err_label=r'$\delta\tau_{L,\mathrm{sys}}$',exclude=exclude): fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(label) ax.set_ylabel(err_label) for p in range(6): coef = '{}/NGC_891_P{}_bin30_allz2.{}.fits'.format(basedir,p+1,suffix) print coef c = pyfits.open(coef)[1].data exarr = np.array(exclude[p]) - 1 d = np.delete(c[field], exarr) e = np.delete(c[err], exarr) ax.scatter(d,e/d, c='k', alpha=0.7, linewidth=0) ax.set_yticks([0.1,0.2,0.3,0.4,0.5]) pp = PDF(output) pp.savefig(fig) pp.close() plt.close(fig) return
def plot_tica_and_clusters(component_j, transformed_data, clusterer, lag_time, component_i, label = "dot", active_cluster_ids = [], intermediate_cluster_ids = [], inactive_cluster_ids = [], tica_dir = ""): trajs = np.concatenate(transformed_data) plt.hexbin(trajs[:,component_i], trajs[:,component_j], bins='log', mincnt=1) plt.xlabel("tIC %d" %(component_i + 1)) plt.ylabel('tIC %d' %(component_j+1)) centers = clusterer.cluster_centers_ indices = [j for j in range(0,len(active_cluster_ids),1)] for i in [active_cluster_ids[j] for j in indices]: center = centers[i,:] if label == "dot": plt.scatter([center[component_i]],[center[component_j]], marker='v', c='k', s=10) else: plt.annotate('%d' %i, xy=(center[component_i],center[component_j]), xytext=(center[component_i], center[component_j]),size=6) indices = [j for j in range(0,len(intermediate_cluster_ids),5)] for i in [intermediate_cluster_ids[j] for j in indices]: center = centers[i,:] if label == "dot": plt.scatter([center[component_i]],[center[component_j]], marker='8', c='m', s=10) else: plt.annotate('%d' %i, xy=(center[component_i],center[component_j]), xytext=(center[component_i], center[component_j]),size=6) indices = [j for j in range(0,len(inactive_cluster_ids),5)] for i in [inactive_cluster_ids[j] for j in indices]: center = centers[i,:] if label == "dot": plt.scatter([center[component_i]],[center[component_j]], marker='s', c='w', s=10) else: plt.annotate('%d' %i, xy=(center[component_i],center[component_j]), xytext=(center[component_i], center[component_j]),size=6) pp = PdfPages("%s/c%d_c%d_clusters%d.pdf" %(tica_dir, component_i, component_j, np.shape(centers)[0])) pp.savefig() pp.close() plt.clf()
def plot(results): xs = np.array(results.keys()) xs.sort() ys = np.array([results[k] for k in xs]) maxy = max(results.values()) # plt.plot(xs, ys, 'bo', alpha=0.5) plt.ylim([0, maxy * 1.1]) # plt.plot(xs, ys, 'ro--', label="Time") plt.bar(range(SAMPLES), ys, color="r", align="center") plt.xticks(range(SAMPLES), xs) # Styling plt.xlabel("seed") plt.ylabel("seconds") plt.title("Genetic Algorithm - Seeds") plt.grid(True, which="both", linestyle="dotted") plt.legend() from matplotlib.backends.backend_pdf import PdfPages pp = PdfPages("plots/{}.pdf".format(NAME)) plt.savefig(pp, format="pdf") pp.close() plt.show()
def plot_pnas_vs_tics(pnas_dir, tic_dir, pnas_names, directory, scale = 7.14, refcoords_file = None): pnas = np.concatenate(load_file(pnas_dir)) pnas[:,0] *= scale print(np.shape(pnas)) print(len(pnas_names)) if("ktICA" in tic_dir): tics = load_dataset(tic_dir) else: tics = verboseload(tic_dir) print(np.shape(tics)) tics = np.concatenate(tics) print(np.shape(tics)) if len(pnas_names) != np.shape(pnas)[1]: print("Invalid pnas names") return for i in range(0,np.shape(pnas)[1]): for j in range(0,np.shape(tics)[1]): tic = tics[:,j] pnas_coord = pnas[:,i] plt.hexbin(tic, pnas_coord, bins = 'log', mincnt=1) coord_name = pnas_names[i] tic_name = "tIC.%d" %(j+1) plt.xlabel(tic_name) plt.ylabel(coord_name) pp = PdfPages("%s/%s_%s_hexbin.pdf" %(directory, tic_name, coord_name)) pp.savefig() pp.close() plt.clf() return
def plot_tica(transformed_data_dir, lag_time): transformed_data = verboseload(transformed_data_dir) trajs = np.concatenate(transformed_data) plt.hexbin(trajs[:,0], trajs[:,1], bins='log', mincnt=1) pp = PdfPages("/scratch/users/enf/b2ar_analysis/tica_phi_psi_chi2_t%d.pdf" %lag_time) pp.savefig() pp.close()
def anal2pdf(): pp = PdfPages('../../datafiles/jul14/analplots.pdf') fnames = [ '../../datafiles/jul14/vsweep_10_1.h5', '../../datafiles/jul14/vsweep_10_1b.h5', '../../datafiles/jul14/vsweep_10_2.h5', '../../datafiles/jul14/vsweep_10_3.h5', '../../datafiles/jul14/vsweep_10_4.h5', '../../datafiles/jul14/vsweep_10_5.h5', '../../datafiles/jul14/vsweep_10_6.h5'] for fn in fnames: anal_vsweep(fn) figure(1) suptitle('Voltage sweep, 5096MHz, raw phase(Y), time_samples(X)') f=gcf() f.savefig(pp,format='pdf') figure(3) suptitle('Voltage sweep, 5096MHz, radians(Y) vs mV(X)') f=gcf() f.savefig(pp,format='pdf') pp.close()
def write_pressure_graph(self): P.xlabel("Temperature") P.ylabel("Pressure") P.title("Pressure per Temperature") P.axis([0.0,self.temp_points[-1]+10., 0.0,self.pressure_points[-1]+1]) ax = P.gca() ax.set_autoscale_on(False) popt,pcov = curve_fit(fit,self.temp_points,self.pressure_points) y_fit = [popt[0]*x+popt[1] for x in self.temp_points] y_fit.insert(0, popt[1]) fit_x_points = self.temp_points[:] fit_x_points.insert(0,0.) pp = PdfPages(str(SCREEN_SIZE)+"_"+str(N)+".pdf") P.plot(self.temp_points, self.pressure_points, "o", fit_x_points, y_fit, "--") #P.savefig() #P.plot( #pp.savefig() #print(self.pressure_points) #print(y_fit) #print(fit_x_points) #print(y_fit) pp.savefig() pp.close()
def readCurvesFromFileCallback(): # Ask for input file _filename = askForInputFile(fileFilter="*.cur") if _filename == "": return # Load data from file [_readCurves, _readVoltages] = loadDataFromFile(_filename) for i in range(0, len(_readVoltages)): _voltages = [] _currents = [] for j in range(0, len(_readCurves[i])): _voltages.extend([_readCurves[i][j][0]]) _currents.extend([_readCurves[i][j][1]]) # Plot read data onlyFilename = _filename.split("/")[-1] plotCurves(_readCurves, _readVoltages, onlyFilename, interpolate=True) # Ask if wants to save plot to PDF file d = YesNoDialog(rootWindowHandler, 'Save plot to PDF file?', 'Yes', 'No') rootWindowHandler.wait_window(d.top) if not yesNoReturnedValue: return _fileTypes = '*.*' _filename = askForOutputFilename(_fileTypes) if _filename == "": return _filename = fixExtensionOfFilename(_filename, 'pdf') pp = PdfPages(_filename) plot.savefig(pp, format='pdf') pp.close() print 'Saved curves to PDF file: "%s"' % _filename
def complexAll(self, f1=0., f2=0., amax=.16, nrows=1, ncols=1, antList=allAnts ) : pyplot.ioff() pp = PdfPages( 'ComplexLeaks.pdf' ) scale = 10./math.sqrt(ncols*nrows) if f1 == 0. : [f1, f2] = LkSet.xlimits( self ) # default is to find freq limits in the data print "frequency limits: %.3f - %.3f GHz" % (f1,f2) ymin = -1.*amax ymax = amax npanel = 0 for ant in antList : npanel = npanel + 1 if npanel > nrows * ncols : npanel = 1 pyplot.clf() p = pyplot.subplot(nrows, ncols, npanel, aspect='equal') # DL,DR in one panel p.tick_params( axis='both', which='major', labelsize=scale ) p.axis( [ymin, ymax, ymin, ymax] ) p.grid(True) for Leak in self.LeakList : if Leak.ant == ant : print "plotting DR and DL for antenna %d" % ant Leak.plotComplex( p, f1, f2 ) #pyplot.title("C%d DR (circles, solid) and DL (diamonds, dashed)" % ant, fontdict={'fontsize': scale}) if (npanel == nrows*ncols) or (ant == antList[-1] ) : pyplot.savefig( pp, format='pdf' ) pp.close()
def multipage(filename, figs=None, dpi=200): pp = PdfPages(filename) if figs is None: figs = [plt.figure(n) for n in plt.get_fignums()] for fig in figs: fig.savefig(pp, format='pdf') pp.close()
def plot_miri_comparison(): inst = webbpsf.MIRI() filtlist_W = [f for f in inst.filter_list if f[-1] == 'W'] filtlist_C = [f for f in inst.filter_list if f[-1] != 'W'] from matplotlib.backends.backend_pdf import PdfPages pdf=PdfPages('weights_miri_comparison.pdf') for filts in [filtlist_W, filtlist_C]: try: os.unlink('/Users/mperrin/software/webbpsf/data/MIRI/filters') except: pass os.symlink('/Users/mperrin/software/webbpsf/data/MIRI/real_filters', '/Users/mperrin/software/webbpsf/data/MIRI/filters') plotweights('miri', filtlist=filts) os.unlink('/Users/mperrin/software/webbpsf/data/MIRI/filters') os.symlink('/Users/mperrin/software/webbpsf/data/MIRI/fake_filters', '/Users/mperrin/software/webbpsf/data/MIRI/filters') plotweights('miri', filtlist=filts, overplot=True, ls='--') P.draw() pdf.savefig() pdf.close()
def compare_board_estimations(esti_extrinsics, board, board_dim, \ actual_boards, save_name=None): """ Plots true and estimated boards on the same figure Args: esti_extrinsics: dictionary, keyed by image number, values are Extrinsics board: board_dim: (board_width, board_height) actual_boards: list of dictionaries save_name: filename, string """ if save_name: pp = PdfPages(save_name) plt.clf() for i in xrange(len(actual_boards)): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') act_board = actual_boards[i] aX, aY, aZ = util.board_dict2array(act_board, board_dim) ax.plot_wireframe(aX, aY, aZ, color='b') if i in esti_extrinsics: esti_loc = esti_extrinsics[i].trans_vec esti_board = util.move_board(board, esti_loc) eX, eY, eZ = util.board_dict2array(esti_board, board_dim) ax.plot_wireframe(eX, eY, eZ, color='r') if pp: pp.savefig() else: plt.show() if pp: pp.close()
def heat_map_single(data, file = "heat_map_plate.pdf", *args, **kwargs): """ Create a heat_map for a single readout Create a heat_map for a single readout ..todo:: Share code between heat_map_single and heat_map_multiple """ np_data = data.data pp = PdfPages(os.path.join(PATH, file)) fig, ax = plt.subplots() im = ax.pcolormesh(np_data, vmin=np_data.min(), vmax=np_data.max()) # cmap='RdBu' fig.colorbar(im) # put the major ticks at the middle of each cell ax.set_xticks(np.arange(np_data.shape[1]) + 0.5, minor=False) ax.set_yticks(np.arange(np_data.shape[0]) + 0.5, minor=False) # Invert the y-axis such that the data is displayed as it appears on the plate. ax.invert_yaxis() ax.xaxis.tick_top() ax.set_xticklabels(data.axes['x'], minor=False) ax.set_yticklabels(data.axes['y'], minor=False) pp.savefig(fig) pp.close() fig.clear() return ax
def plotLoad(dataFolder, srv, TS, trace_type, tsInterval): srv_file = srv + "_" + TS + "_" + trace_type + ".json" srv_load = json.load(open(dataFolder + srv_file)) fig, ax = plt.subplots() print "Ploting QoE evaluation for server :", srv ts = [int(x) for x in srv_load.keys() if int(x) > tsInterval[0] and int(x) < tsInterval[1]] sorted_ts = sorted(ts) tr_vals = [srv_load[str(cur_ts)] for cur_ts in sorted_ts] srvName = '$S_{' + str(int(srv.split('-')[1])) + '}$' plt.plot(sorted_ts, tr_vals, 'b-', label=srvName, linewidth=2.0, markersize=8) ## Change the time stamp ticks num_intvs = int((tsInterval[1] - tsInterval[0])/900) + 1 ts_labels = [tsInterval[0] + x*900 for x in range(num_intvs)] str_ts = [datetime.datetime.fromtimestamp(x*900 + tsInterval[0]).strftime('%H:%M') for x in range(num_intvs)] plt.xticks(ts_labels, str_ts, fontsize=15) box = ax.get_position() #ax.legend(loc='lower center', bbox_to_anchor=(0.5, 0), # fancybox=True, shadow=True, ncol=4, prop={'size':20}) #params = {'legend.fontsize': 20, # 'legend.linewidth': 2} #plt.rcParams.update(params) # ax.set_title('Server QoE Score Observed on $S_{10}$', fontsize=20) # ax.set_xlabel("Time in a day", fontsize=20) ax.set_ylabel("Server Load", fontsize=20) # ax.set_ylim([0,5]) plt.show() pdf = PdfPages(dataFolder + '/imgs/' + cache_agent + '_' + 'load.pdf') pdf.savefig(fig) pdf.close()
def plot(sideband, nominal, x_range, fit, fit_err, outputname, titel='', signal=None, sideband_sigma=None, nominal_sigma=None): pp = PdfPages(outputname) plt.clf() #plt.semilogy() plt.yscale('log', nonposy='clip') ax = plt.gca() ax.set_ylim([.01, 10]) ax.set_xlim([650, 1850]) plt.title( titel, fontsize=10 ) # , loc='right')"CMS $\it{Preliminary}$ 35.9 fb$^{-1}$ (13 TeV)" plt.plot(x_range, nominal, label="Exp $95\%$ CL nominal", color='red', linestyle='dotted') #,color = exp_LH.bands[0][2]) plt.plot(x_range, sideband, label="Exp $95\%$ CL sideband", color='blue', linestyle='dotted') #,color = exp_LH.bands[0][2]) if signal: plt.plot(x_range, signal, label="signal cross section", color='black', linestyle='dotted') #,color = exp_LH.bands[0][2]) if sideband_sigma: plt.fill_between(x_range, sideband_sigma[0], sideband_sigma[1], alpha=0.3, facecolor='yellow', edgecolor='yellow', linewidth=0, label="$\pm$ 1 std. deviation sideband") if nominal_sigma: plt.fill_between(x_range, nominal_sigma[0], nominal_sigma[1], alpha=0.6, facecolor='yellow', edgecolor='yellow', linewidth=0, label="$\pm$ 1 std. deviation nominal") ax.errorbar(x_range, fit, yerr=fit_err, label="fitted cross section", color='green', linestyle='dotted') #,color = exp_LH.bands[0][2]) #ax.set_yscale('symlog') plt.xlabel('VLQ mass (GeV)') plt.ylabel( r'$\mathbf{\sigma \times}$ BR(VLQ$\mathbf{\rightarrow}$tW) (pb)') plt.legend(loc="upper center", prop={'size': 12}, frameon=False) plt.savefig(pp, format='pdf') pp.close()
import numpy as np import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages whats = ("Q2", "Tk", "ang") inpath = "qelma" with PdfPages("qel_delta_s_ratio.pdf") as pdf: for what in whats: plt.figure() plt.title("numu, nc, E = 1000 MeV") x1, y1 = np.loadtxt("{}/numu_s+0.2_{}.txt".format(inpath, what), delimiter=' ', unpack=True) x2, y2 = np.loadtxt("{}/numu_s-0.2_{}.txt".format(inpath, what), delimiter=' ', unpack=True) x1r, y1r = np.loadtxt("{}/numu_s+0.2r_{}.txt".format(inpath, what), delimiter=' ', unpack=True) x2r, y2r = np.loadtxt("{}/numu_s-0.2r_{}.txt".format(inpath, what), delimiter=' ', unpack=True) plt.xlabel(what) plt.ylabel("#events") plt.plot(x1, (y1r - y1) / (y1r + y1), 'r-', label='$\Delta s = 0.2$') plt.plot(x1, (y2r - y2) / (y2r + y2), 'b-', label='$\Delta s = -0.2$') plt.legend() pdf.savefig()
# define the radii to be used for aperture photometry radii = np.arange(40)+1 #make an array for the calculation of the area of each bagel (annulus) area = [0 for x in range(len(radii))] #calculate area of each bagel for i in range(0, len(area)): if i == 0: area[i] = math.pi*math.pow(radii[0],2) else: area[i] = math.pi*(math.pow(radii[i],2)-math.pow(radii[i-1],2)) # create a PDF file for the plots with PdfPages('jr_compilation_J0905.pdf') as pdf: fig = plt.figure() collection = ['F475W','F814W','F160W'] flux = np.zeros([len(collection),len(radii)]) subflux = np.zeros([len(collection),len(radii)]) for i in range (0, len(collection)): # read in the images file = glob.glob(dir+'J0905_final_'+collection[i]+'*sci.fits') hdu = fits.open(file[0]) data[i], header[i] = hdu[0].data, hdu[0].header fnu[i] = header[i]['PHOTFNU']
from piecewise import piecewise_plot from piecewise import piecewise from matplotlib.backends.backend_pdf import PdfPages # chrom start sample svclass size_bin IMD # 7 7365075 NSLC-0057-T01 inversion 1-10Kb 0.0 # 7 7369129 NSLC-0057-T01 inversion 1-10Kb 0.0 #input_dir = "/Users/khandekara2/iCloud/Sherlock-Lung/segments/" input_dir = "/Users/azhark/iCloud/dev/Pediatric-Tumors/results/IMD-Plots/" #input_dir = "/Users/khandekara2/iCloud/Alexandrov_Lab/data/SV/data/Mutographs_ESCC_Train9/segments/" os.chdir(input_dir) project = 'KiCS' #project = 'Mutographs-ESCC' output_path = "/Users/azhark/iCloud/dev/Pediatric-Tumors/results/IMD-Plots/" pp = PdfPages(output_path + project + '_IMD_plots' + '.pdf') #pp = PdfPages(project + '_IMD_plots' + '.pdf') sizes = { '>10Mb': 50, '1Mb-10Mb': 40, '1-10Kb': 10, '100kb-1Mb': 30, '10-100kb': 20 } size_order = ['1-10Kb', '10-100kb', '100kb-1Mb', '1Mb-10Mb', '>10Mb'] count = 0 for file in os.listdir('.'): if file.endswith(".IMD.tsv"): sample = file.split(".")[0].split("_")[0]
j += 1 diff = abs( (X.created_date_time[i] - X.created_date_time[curr_index]) / (60000)) if diff <= 1000 and factor == n: time_difference.append(diff) y.append(hh) i += 1 if len(y) > 0: # plot (time_difference, y) print(time_difference) print(y) path = '' pp = PdfPages('./Plot' + str(n) + '.pdf') for file_name in file_names: try: path = file_name dataset = pd.read_csv((path)) except: continue X = dataset.iloc[:, :] time_to_change_temperature(X) k += 1 pp.close()
def main() : import matplotlib.pyplot as plt import numpy as np from matplotlib.backends.backend_pdf import PdfPages import math import argparse #A parser for command line options. import sys import csv import glob #Since python cannot figure out my path I append my function directory to the path. sys.path.append('/home/jdw/UM2020Spring/M567/Functions/') #Now import StockFunctions.py. import StockFunctions as sf #Create an argparse object. # parser = argparse.ArgumentParser() #Add some optional arguments. #Get the name of the company for which the stock prices were obtained. # stockNameHelp = 'Input the name of a company whose stock prices you want to visualize.' # parser.add_argument('-s','--stockName', help = stockNameHelp, default = 'Google') #Get the year the stock prices were obtained. # yearHelp = 'Input the year the stock price was obtained. Value is integer.' # parser.add_argument('-y', '--year', help = yearHelp, default = 2020, type = int) #Get the month the stock prices were obtained. # monthHelp = 'Input the month the stock price was obtained. Value is integer.' # parser.add_argument('-m', '--month', help = monthHelp, default = 3, type = int) #Get the day the stock prices were obtained. # dayHelp = 'Input the day the stock price was obtained. Value is integer.' # parser.add_argument('-d', '--day', help = dayHelp, default = 2, type = int) #Get the input directory for the plot. # inputHelp = 'Input the full path of the directory in which the data is found.' # defaultInputDir = '/home/jdw/UM2020Spring/M567/Data/Google/' # parser.add_argument('-D','--Directory', help = inputHelp, default = defaultInputDir) #Get the output directory for the plot. # outputHelp = 'Input the full path of the directory in which to place the output file.' # defaultOutputDir = '/home/jdw/UM2020Spring/M567/Plots/' # parser.add_argument('-P','--Plots', help = outputHelp, default = defaultOutputDir) #Set the input arguments to the args object. # args = parser.parse_args() #Make up the input and output file names. # stockAbbrev = sf.GetStockAbbrev(args.stockName) # datetime = str(args.year) + str('{0:02d}'.format(args.month)) + str('{0:02d}'.format(args.day)) #Choose a company whose stock price you want to visualize. stockName = 'Verizon' stockAbbrev = sf.GetStockAbbrev(stockName) directory = ('/home/jdw/UM2020Spring/M567/Data/' + stockName + '/StockPrice_' + stockAbbrev + '*.txt') infiles = glob.glob(directory) for filename in infiles : if(filename[-4:] == '.txt') : datetime = filename[-16 : -4] month = filename[-12 : -10] month = month.lstrip('0') day = filename[-10 : -8] day = day.lstrip('0') year = filename[-16 : -12] #Create a output file name to where the plot will be saved. outfilepath = '/home/jdw/UM2020Spring/M567/Data/' + stockName + '/' outfilename = stockName + 'DailyStockPrice' + datetime + '.pdf' outfile = outfilepath + outfilename #Open and read in the saved data. We need to use infile[0] because glob returns a list. with open(filename) as csv_file : csv_reader = csv.reader(csv_file, delimiter = ',') line_count = 0 dateTime = [] price = [] for row in csv_reader : dateTime.append(float(row[0])) price.append(float(row[1])) #End of the for loop - for row in csv_reader: #End of the with/open loop. numHalfHours = 13 dpperhalfhour = math.ceil(len(price)/numHalfHours) tickval = np.zeros(8) for i in range(1, 8) : tickval[i] = dpperhalfhour*(2*i - 1) #End of the for loop - for i in range(1, 8) : #Create a time vector. t = np.arange(1, len(price) + 1) #Get the date for the data. monthNameStr = sf.convertMonthNumToMonthName(int(month)) dateStr = (monthNameStr + ' ' + day + ' ' + year) #Create a title string. titlestr = ('Stock Price vs Time for ' + dateStr + ' - ' + stockName) #Lets make sure we have a clean canvas. plt.close('all') plt.figure() plt.plot(t, price, 'b') plt.grid('on') plt.title(titlestr, fontsize = 9) plt.ylabel('Stock Price(Dollars)') plt.xlabel('Time') plt.xticks(tickval, ('7:30 ', '8:00AM','9:00AM','10:00AM','11:00AM','12:00PM','1:00PM','2:00PM'), fontsize = 7) #Save the plot to a file. pp = PdfPages(outfile) pp.savefig() pp.close() plt.cla() plt.clf()
nsupp, ny_2p5, nx2p5 = np.shape(xlocation) ncfile.close() print 'min, max rlon = ', np.min(rlon), np.max(rlon) # ---- read in the fraction of samples with climatological zero precipitation infile = data_directory + \ 'climatology_gamma_parameters_ndfd2p5_'+cleadb+'_to_'+cleade+'_'+cmonth+'.nc' print 'reading from ', infile nc = Dataset(infile) fraction_zero = nc.variables['fraction_zero'][:] nc.close() # ---- get nearest NDFD 2.5-km gridpoint to input lon, lat. Then same for CCPA with PdfPages('supp_locations_' + cleadb + '_to_' + cleade + '_' + cmonth + '.pdf') as pdf: for city in cities: rlonin, rlatin = cities[city] print 'rlonin, rlatin = ', rlonin, rlatin istat, iloc, jloc = find_nearest_latlon(rlonin, rlatin, rlon, rlat) print city, ' jloc, iloc = ', jloc, iloc print 'i, iloc, jloc, lon(2.5), lat(2.5) ' for i in range(50): ix = xlocation[i, jloc, iloc] jy = ylocation[i, jloc, iloc] print i, ix, jy, rlon[jy, ix], rlat[jy, ix] colorst = ['White','#ECFFFF','#D9F7FF','#C4E8FF','#E8FBE8','#C7F4C7','#92F592','Yellow','Gold',\ 'Orange','#FFB2B2','#EC5B71','Red','Magenta','DarkOrchid','White'] colorstblack=['White','Black','Black','Black','Black', 'Black','Black','Black',\
plt.annotate("mean: " + str(airpact_3d[sp][t,:,:].mean()) + " "+ unit_list[i], xy=(0, 1.02), xycoords='axes fraction') plt.savefig(outpng) plt.show() # This requires ffmpeg program, which is not easy to install in aeolus/kamiak # To make a video, download all the pngs in your computer and execute the command below # "ffmpeg -framerate 3 -i WRFChem_hourly_basemap_T2_%05d.png T2_output.mp4" # # check_call(["ffmpeg", "-framerate", "3", "-i", "outputs/WRFChem_hourly_basemap_"+sp+ "_%05d.png", "outputs/"+sp + "_output.mp4"]) ''' ############################################ # averaged domain basemaps ############################################ #save maps into the pdf file (two maps in single page) with PdfPages(base_dir + 'maps/airpact_avg_basemap_' + '_' + start.strftime("%Y%m%d") + '-' + end.strftime("%Y%m%d") + '.pdf') as pdf: for i, sp in enumerate(var_list): fig = plt.figure(figsize=(14, 10)) plt.title(sp) # compute auto color-scale using maximum concentrations #down_scale = np.percentile(airpact_3d[sp], 5) #up_scale = np.percentile(airpact_3d[sp], 95) down_scale = 28 up_scale = 46 clevs = np.round( np.arange(down_scale, up_scale, (up_scale - down_scale) / 10), 3) print("debug clevs", clevs, sp)
def main(): selected_cols_ids = [ '1.0', '2.0', '3.0', '4.0', '5.0', '6.0', '7.0', '8.0', '9.0', '10.0', '11.0', '12.0', '13.0', '14.0', '15.0', '15.5', '16.0', '16.5', '17.0' ] remove_questions = [ "pokken / kunt u aangeven welke van de onderstaande (kinder)ziektes u gehad hebt?", "op hoeveel momenten van de dag eet u iets?" ] input_df_path = sys.argv[1] # input questionnaire data input_question_overview = sys.argv[2] # input of the questionnaire items input_recode_info = sys.argv[ 3] # input file with the recoding and model information input_question_pgs_combinations = sys.argv[ 4] # selection file of the question x prs combinations input_question_pgs_combinations_pvalues = sys.argv[ 5] # pvalues from the meta analysis input_pgs_path_ugli = sys.argv[ 6] # path to the directory with the Global Screening Array input data input_pgs_path_cyto = sys.argv[ 7] # path to the directory with the HumanCytoSNP-12 input data input_analysis_output_ugli_dir = sys.argv[ 8] # path to the directory with the Global Screening Array output results input_analysis_output_cyto_dir = sys.argv[ 9] # path to the directory with the HumanCytoSNP-12 output results translation_questions_path = sys.argv[ 10] # input path with the question translations translation_prs_path = sys.argv[11] # input path wit the prs translations output_dir = sys.argv[12] # output dit # create output dit create_dir(output_dir) # read the input data df = pd.read_pickle(input_df_path) df_question_ids_total = pd.read_csv(input_question_overview, sep="\t", index_col=0, dtype="str") df_recode_info = pd.read_pickle(input_recode_info) df_recode_info = df_recode_info.set_index("question_id") df_question_pgs = pd.read_pickle(input_question_pgs_combinations) df_question_pgs_pvalues = pd.read_pickle( input_question_pgs_combinations_pvalues) #process ugli df_pgs_ugli = pd.read_pickle(input_pgs_path_ugli) df_pgs_ugli.columns = df_pgs_ugli.columns.str.replace( "/", ".").str.replace(" ", ".").str.replace("-", ".").str.replace( "(", ".").str.replace(")", ".") df_pgs_ugli = df_pgs_ugli.loc[df_pgs_ugli.index.intersection(df.index), :] #process cyto df_pgs_cyto = pd.read_pickle(input_pgs_path_cyto) df_pgs_cyto.columns = df_pgs_cyto.columns.str.replace( "/", ".").str.replace(" ", ".").str.replace("-", ".").str.replace( "(", ".").str.replace(")", ".") df_pgs_cyto = df_pgs_cyto.loc[df_pgs_cyto.index.intersection(df.index), :] # process the translations df_translation_info = None if translation_questions_path is not None: df_translation_info = pd.read_csv(translation_questions_path, sep="\t", index_col="Question") df_translation_prs_info = None if translation_prs_path is not None: df_translation_prs_info = pd.read_csv(translation_prs_path, sep="\t", index_col="PRS's") # create output lists plot_data = [] # process the questions per question for index, row in df_question_pgs.iterrows(): if index not in remove_questions: single_question_ids = df_question_ids_total.loc[index, selected_cols_ids] # find the first question id (except for the ever positive tested, we there for use the last question id) if index == "Positive tested cumsum": single_question_ids = single_question_ids.iloc[::-1] single_question_first_id = func_first_value(single_question_ids) # get model type to create the correct plot model_type = df_recode_info.loc[single_question_first_id, "model_type"] answer_options_str = None if index in df_translation_info.index: answer_options_str = df_translation_info.loc[ index, "Answers_options_plots"] if isinstance(model_type, pd.core.series.Series): model_type = model_type.iloc[0] answer_options = dict() if answer_options_str is not None and pd.isna( answer_options_str) == False: if isinstance(answer_options_str, pd.core.series.Series): answer_options_str = answer_options_str.iloc[0] answer_options = json.loads(answer_options_str) # get the right plot type graph_type = "boxplots" if model_type == "gaussian": graph_type = "scatter" row_filtered = row.dropna() # loop through the PRS to create the plot per PRS for pgs_id, single_z_score in row_filtered.iteritems(): single_pvalue = float(df_question_pgs_pvalues.loc[index, pgs_id]) y_ugli = df_pgs_ugli.loc[:, pgs_id] x_ugli = df.loc[y_ugli.index, single_question_first_id] x_ugli = x_ugli.dropna() # fix plotting issue if single_question_first_id == "covt16_christmas_adu_q_1_b": x_ugli = x_ugli.replace({3.0: 2.0}) y_ugli = y_ugli.loc[x_ugli.index] y_cyto = df_pgs_cyto.loc[:, pgs_id] x_cyto = df.loc[y_cyto.index, single_question_first_id] x_cyto = x_cyto.dropna() # fix plotting issue if single_question_first_id == "covt16_christmas_adu_q_1_b": x_cyto = x_cyto.replace({3.0: 2.0}) y_cyto = y_cyto.loc[x_cyto.index] # calculate z scores and p values for the # individual plots of ugli and cyto z_score_ugli, pvalue_ugli = caalculate_z_scrore_from_models( input_analysis_output_ugli_dir, pgs_id, index, single_question_first_id) z_score_cyto, pvalue_cyto = caalculate_z_scrore_from_models( input_analysis_output_cyto_dir, pgs_id, index, single_question_first_id) # Find the correct translations of question and prs label_en = index if index in df_translation_info.index: label_en = df_translation_info.loc[index, "label_en"] label_prs_en = pgs_id if pgs_id in df_translation_prs_info.index: label_prs_en = df_translation_prs_info.loc[pgs_id, "English Label"] # Save the plotting information single_plot_data = { "x_ugli": x_ugli, "x_cyto": x_cyto, "y_ugli": y_ugli, "y_cyto": y_cyto, "question_title": label_en, "pgs_title": label_prs_en, "graph_type": graph_type, "answers": answer_options, "z_score": single_z_score, "pvalue": single_pvalue, "z_score_ugli": z_score_ugli, "pvalue_ugli": pvalue_ugli, "z_score_cyto": z_score_cyto, "pvalue_cyto": pvalue_cyto, } plot_data.append(single_plot_data) ### ### create plots ### # The PDF document plot_file_name = "question_pgs_plots_{date}.pdf".format( date=datetime.now().strftime("%d-%m-%Y")) pdf_pages = PdfPages(os.path.join(output_dir, plot_file_name)) for index, data in enumerate(plot_data): fig = plt.figure(figsize=(11.69, 8.27), dpi=100) figure_title = "{question}\nPGS: {pgs}\nMeta analysis Z-score: {zscore:0.02f}, p-value: {pvalue:0.2E}".format( question=data["question_title"], pgs=data["pgs_title"], zscore=data["z_score"], pvalue=data["pvalue"]) fig.suptitle(figure_title, y=0.96, fontsize=12) ## ## UGLI ## plt.subplot2grid((1, 2), (0, 0), fig=fig) ax_ugli = plt.gca() if data["graph_type"] == "scatter": sns.regplot(data["x_ugli"], data["y_ugli"], ax=ax_ugli, scatter_kws={ "color": "black", "s": 20, 'alpha': 0.3, "rasterized": True, "clip_on": False }) else: sns.boxplot(data["x_ugli"], data["y_ugli"], ax=ax_ugli, color="w", showfliers=False) # add n-values above the boxes unique_labels = data["x_ugli"].unique() n_value_labels = [] hivalues = [] lovalues = [] for unique_label in unique_labels: single_data = data["y_ugli"].where( data["x_ugli"] == unique_label).dropna() q1, med, q3 = np.percentile(single_data, [25, 50, 75]) iqr = q3 - q1 hival = q3 + 1.5 * iqr loval = q1 - 1.5 * iqr wiskhi = single_data[single_data <= hival] if len(wiskhi) == 0 or np.max(wiskhi) < q3: hival = q3 else: hival = np.max(wiskhi) hivalues.append(hival) wisklo = single_data[single_data >= loval] if len(wisklo) == 0 or np.min(wisklo) > q1: loval = q1 else: loval = np.min(wisklo) lovalues.append(loval) if data["question_title"] == "Average time spend sitting per weekend day" or data[ "question_title"] == "Average time spend sitting per working day": unique_label = unique_label - 1 n_value_labels.append({ "x_pos": unique_label - 1 if min(unique_labels) != 0 else unique_label, "y_pos": hival, "label": "n={}".format(single_data.shape[0]) }) text_distance = (max(hivalues) - min(lovalues)) * 0.005 for label in n_value_labels: if unique_labels.shape[0] < 6: ax_ugli.text(label["x_pos"], label["y_pos"] + text_distance, label["label"], ha='center', va='bottom', color="gray") else: ax_ugli.text(label["x_pos"], label["y_pos"] + text_distance, label["label"], ha='center', va='bottom', color="gray", fontsize=6) # Add title title_ugli = "Global Screening Array\nZ-score: {zscore:0.02f}, p-value: {pvalue:0.2E}".format( zscore=data["z_score_ugli"], pvalue=data["pvalue_ugli"]) ax_ugli.set_title(title_ugli, fontsize=12) # set the labels ax_ugli.set_xlabel(short_str(data["question_title"], 50), fontsize=10) ax_ugli.set_ylabel("PRS: {}".format(short_str(data["pgs_title"], 50)), fontsize=10) # edit the design of the plot ax_ugli.grid(False) ax_ugli.spines['right'].set_color('none') ax_ugli.spines['top'].set_color('none') ax_ugli.spines['bottom'].set_position(('axes', -0.05)) ax_ugli.yaxis.set_ticks_position('left') ax_ugli.spines['left'].set_position(('axes', -0.05)) ax_ugli.spines['left'].set_color("dimgray") ax_ugli.spines['bottom'].set_color("dimgray") ## ## CYTO ## plt.subplot2grid((1, 2), (0, 1), fig=fig) ax_cyto = plt.gca() if data["graph_type"] == "scatter": sns.regplot(data["x_cyto"], data["y_cyto"], ax=ax_cyto, scatter_kws={ "color": "black", "s": 20, 'alpha': 0.3, "rasterized": True, "clip_on": False }) else: sns.boxplot(data["x_cyto"], data["y_cyto"], ax=ax_cyto, color="w", showfliers=False) # Add the n-values above the boxes unique_labels = data["x_cyto"].unique() if True: n_value_labels = [] hivalues = [] lovalues = [] for unique_label in unique_labels: single_data = data["y_cyto"].where( data["x_cyto"] == unique_label).dropna() q1, med, q3 = np.percentile(single_data, [25, 50, 75]) iqr = q3 - q1 hival = q3 + 1.5 * iqr loval = q1 - 1.5 * iqr wiskhi = single_data[single_data <= hival] if len(wiskhi) == 0 or np.max(wiskhi) < q3: hival = q3 else: hival = np.max(wiskhi) hivalues.append(hival) wisklo = single_data[single_data >= loval] if len(wisklo) == 0 or np.min(wisklo) > q1: loval = q1 else: loval = np.min(wisklo) lovalues.append(loval) if data["question_title"] == "Average time spend sitting per weekend day" or data[ "question_title"] == "Average time spend sitting per working day": unique_label = unique_label - 1 n_value_labels.append({ "x_pos": unique_label - 1 if min(unique_labels) != 0 else unique_label, "y_pos": hival, "label": "n={}".format(single_data.shape[0]) }) text_distance = (max(hivalues) - min(lovalues)) * 0.005 for label in n_value_labels: if unique_labels.shape[0] < 6: ax_cyto.text(label["x_pos"], label["y_pos"] + text_distance, label["label"], ha='center', va='bottom', color="gray") else: ax_cyto.text(label["x_pos"], label["y_pos"] + text_distance, label["label"], ha='center', va='bottom', color="gray", fontsize=6) # Add the title title_ugli = "HumanCytoSNP-12\nZ-score: {zscore:0.02f}, p-value: {pvalue:0.2E}".format( zscore=data["z_score_cyto"], pvalue=data["pvalue_cyto"]) ax_cyto.set_title(title_ugli, fontsize=12) # Set the labels ax_cyto.set_xlabel(short_str(data["question_title"], 50), fontsize=10) ax_cyto.set_ylabel("PRS: {}".format(short_str(data["pgs_title"], 50)), fontsize=10) # edit the design of the plot ax_cyto.grid(False) ax_cyto.spines['right'].set_color('none') ax_cyto.spines['top'].set_color('none') ax_cyto.spines['bottom'].set_position(('axes', -0.05)) ax_cyto.yaxis.set_ticks_position('left') ax_cyto.spines['left'].set_position(('axes', -0.05)) ax_cyto.spines['left'].set_color("dimgray") ax_cyto.spines['bottom'].set_color("dimgray") #change tick labels if len(data["answers"]) > 0: answers_df = pd.DataFrame.from_dict(data["answers"], orient="index", columns=["answer_str"]) answers_df.index = answers_df.index.astype("float") if np.min(answers_df.index) > 0: if data["graph_type"] != "scatter": answers_df.index = answers_df.index - 1 answers_df = answers_df.sort_index() answers_df["answer_str"] = answers_df["answer_str"].replace( "NaN", np.nan) answers_df["answer_str"] = answers_df["answer_str"].apply( short_str) answers_df = answers_df.dropna() ax_ugli.set_xticks(list(answers_df.index)) ax_ugli.set_xticklabels(list(answers_df["answer_str"]), rotation=45, ha='right') ax_cyto.set_xticks(list(answers_df.index)) ax_cyto.set_xticklabels(list(answers_df["answer_str"]), rotation=45, ha='right') # save the plots plt.tight_layout() plt.subplots_adjust(top=0.80, bottom=0.27, left=0.13, right=0.94, wspace=0.3) pdf_pages.savefig() plt.clf() pdf_pages.close() print("end script")
def renormalization(model, settings, sol, options, temp, dLfrac, anh_order): """ :param model: The LD model :param sol : The optimal solution vector from original fit :temp : the temperature at which the renormalization is performed :anh_order : the order of anharmonicity for renormalization (only 4th for now) :return: """ print('\n') print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') print('!!!!!! STARTING ANHARMONIC RENORMALIZATION @ ', temp, 'K !!!!!!') print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') print('\n') if anh_order < 4: raise ValueError('Max anharmonic order must at least be 4!') path = str(temp) + 'K/' primitive = settings['structure']['prim'] nconfig = int(settings['renormalization']['nconfig']) nprocess = int(settings['renormalization']['nprocess']) mix_old = float(settings['renormalization']['mix_old']) conv_thresh = float(settings['renormalization']['conv_thresh']) nac = NA_correction.from_dict(settings['phonon']) etafac = settings['phonon'].getfloat('etafac', 8.0) pdfout = 'plots-' + temp + 'K.pdf' pdfout = PdfPages(pdfout.strip()) atexit.register(upon_exit, pdfout) try: os.mkdir(path) except: pass if os.path.isfile('SPOSCAR'): pass else: raise ValueError('SPOSCAR not found!') with open(primitive, 'r') as f: lines = f.readlines() #lines[1] = str(float(lines[1])*(1+dLfrac))+' \n' with open(primitive + str(temp) + 'K', 'w') as ff: ff.writelines(lines) with open('SPOSCAR', 'r') as f: lines = f.readlines() #lines[1] = str(float(lines[1])*(1+dLfrac))+' \n' with open(path + 'SPOSCAR', 'w') as ff: ff.writelines(lines) fcfile = 'FORCE_CONSTANTS_2ND' shutil.copy(fcfile, fcfile + '_ORG') shutil.copy(fcfile + '_ORG', path + fcfile) shutil.copy(path + fcfile, path + fcfile + '_OLD') prim = SymmetrizedStructure.init_structure(settings['structure'], primitive + str(temp) + 'K', options.symm_step, options.symm_prim, options.log_level) model = init_ld_model( prim, settings['model'], settings['LDFF'] if 'LDFF' in settings.sections() else {}, options.clus_step, options.symC_step, options.ldff_step) print(prim.lattice.a) print(prim.lattice) print(settings['training']['traindat1']) # Set-up initial sensing matrix with structures used for FC fitting Amat_TD, fval_TD = init_training(model, settings['training'], step=2) nVal, nCorr = Amat_TD.shape settings_TD = copy.deepcopy(settings) settings_TD['training'][ 'traindat1'] = path + 'SPOSCAR ' + path + 'disp*' # change path to TD path going forward sol = np.ones(nCorr) * sol sol_renorm = np.copy(sol[:]) param = model.get_params() print('params : ', param) start2 = 0 for order in range(2): start2 += param[order] start4 = start2 for order in range(2, 4): start4 += param[order] print('start2 : ', start2, ', start4 : ', start4) sol2orig = np.copy(sol[start2:start2 + param[2]]) sol2renorm_old = np.zeros(len(sol2orig)) sol4 = np.copy(sol[start4:start4 + param[4]]) if anh_order >= 6: start6 = start4 for order in range(4, 6): start6 += param[order] sol6 = np.copy(sol[start6:start6 + param[6]]) # Calculate free energy and T-dependent QCV matrix free_energy_old, Lmatcov, poscar = get_qcv(prim.atomic_masses, temp, path) # initial free energy count = 0 while True: count += 1 print('##############') print('ITERATION ', count) print('##############') if count > 1: # Generate T-dependent atomic displacements using QCV qcv_displace(Lmatcov, poscar, nconfig, nprocess, path) # Set-up T-dependent sensing matrix Amat_TD, fval_TD = init_training(model, settings_TD['training'], step=2) nVal, nCorr = Amat_TD.shape # collect displacements for each order A2 = Amat_TD[:, start2:start2 + param[2]].toarray() A4 = Amat_TD[:, start4:start4 + param[4]].toarray() if anh_order >= 6: A6 = Amat_TD[:, start6:start6 + param[6]].toarray() ##### RENORMALIZE FC2 ##### A2inv = np.linalg.pinv( A2 ) # Moore-Penrose pseudo-inverse...essentially a least-squares solver sol2renorm = A2inv.dot(A4.dot(sol4)) # least-squares solution if anh_order >= 6: sol2renorm += A2inv.dot(A6.dot(sol6)) sol_renorm[ start2:start2 + param[2]] = sol2orig + sol2renorm_old * mix_old + sol2renorm * ( 1 - mix_old) print('Renormalized sol2 : \n', sol_renorm[start2:start2 + param[2]]) # Save renormalized FORCE_CONSTANTS_2ND phonon = Phonon(prim, model, sol_renorm, pdfout, NAC=nac, etafac=etafac) save_pot(model, sol_renorm, settings['export_potential'], 2, phonon) shutil.copy(path + fcfile, path + fcfile + '_OLD') shutil.copy(fcfile, path + fcfile) free_energy, Lmatcov, poscar = get_qcv(prim.atomic_masses, temp, path) # Check relative difference in sol2renorm if count > 1: cosine_sim = np.dot(sol2renorm, sol2renorm_old) / np.linalg.norm( sol2renorm) / np.linalg.norm(sol2renorm_old) else: cosine_sim = 0 d_free_energy = (free_energy - free_energy_old) / free_energy rel_diff = np.sum(abs(sol2renorm) / abs(sol2orig)) / len(sol2renorm) print('Cosine similiarty to the previous sol2renorm is ', cosine_sim) print('Relative difference from original sol2 is ', rel_diff) print('Relative change in free energy (meV/atom) is ', d_free_energy) sol2renorm_old = np.copy(sol2renorm[:]) free_energy_old = free_energy # BREAK if relative difference in Free Energy is small # if abs(d_free_energy) < conv_thresh and count > 1: if cosine_sim > conv_thresh and count > 1: print('!!!!! Convergence Reached - Renormalization Done for ', str(temp), ' K !!!!!') break sol_renorm = np.asarray(sol_renorm).reshape(1, nCorr) np.savetxt('solution_all_' + temp + 'K', sol_renorm) phonon_step(model, prim, sol_renorm, settings['phonon'], temp, options.phonon_step, pdfout) # Perform final phonon analysis shutil.move(fcfile + '_ORG', fcfile) for i in range(nconfig): shutil.rmtree(path + 'disp-' + str(i + 1))
#plt.savefig('cond_%s_%s.pdf' % (parameters[i], parameters[j]), bbox_tight=True) # plt.close() plt.subplots_adjust(wspace=0, hspace=0, bottom=0.2, top=0.8, left=0.2, right=0.8) plt.savefig(prefix + 'marg.pdf') plt.savefig(prefix + 'marg.png') plt.close() else: from matplotlib.backends.backend_pdf import PdfPages sys.stderr.write('1dimensional only. Set the D environment variable \n') sys.stderr.write('to D=2 to force 2d marginal plots.\n') pp = PdfPages(prefix + 'marg1d.pdf') for i in range(n_params): plt.figure(figsize=(3, 3)) plt.xlabel(parameters[i]) plt.locator_params(nbins=5) m = s['marginals'][i] iqr = m['q99%'] - m['q01%'] xlim = m['q01%'] - 0.3 * iqr, m['q99%'] + 0.3 * iqr #xlim = m['5sigma'] plt.xlim(xlim) oldax = plt.gca() x, w, patches = oldax.hist(values[:, i], bins=numpy.linspace(xlim[0], xlim[1], 20),
def plot_best_latent(exp_results, out_filenames): sample_d = pickle.load(open(exp_results)) chains = sample_d['chains'] exp = sample_d['exp'] data_filename = exp['data_filename'] data_dict = pickle.load(open(data_filename, 'r')) meta_filename = data_filename[:-4] + "meta" m = pickle.load(open(meta_filename, 'r')) meta_infile = m['infile'] meta = pickle.load(open(meta_infile, 'r')) conn_matrix = meta['conn'] chains = [c for c in chains if type(c['scores']) != int] CHAINN = len(chains) chains_sorted_order = np.argsort([d['scores'][-1] for d in chains])[::-1] from matplotlib.backends.backend_pdf import PdfPages # get data for chain_pos, (latent_fname, latent_pickle) in enumerate(out_filenames): best_chain_i = chains_sorted_order[chain_pos] best_chain = chains[best_chain_i] sample_latent = best_chain['state'] jobs_assignment = np.array(sample_latent['domains']['jobs']['assignment']) users_assignment = np.array(sample_latent['domains']['users']['assignment']) ji = np.argsort(jobs_assignment).flatten() ja = jobs_assignment[ji] j_pos = np.argwhere(np.diff(ja) != 0).flatten() ui = np.argsort(users_assignment).flatten() ua = users_assignment[ui] u_pos = np.argwhere(np.diff(ua) != 0).flatten() pp = PdfPages(latent_fname) f = pylab.figure() ax = f.add_subplot(1, 1, 1) cm = conn_matrix['link'] cm = cm[ui, :] cm = cm[:, ji] ax.imshow(cm > 0, interpolation='nearest', cmap=pylab.cm.Greys) for i in u_pos: ax.axhline(i) for i in j_pos: ax.axvline(i) f.savefig(pp, format='pdf') f = pylab.figure() plot_t1t2_params(f, conn_matrix, users_assignment, jobs_assignment, sample_latent['relations']['R1']['ss'], sample_latent['relations']['R1']['hps'], model = data_dict['relations']['R1']['model'], MAX_DIST = 30, MAX_CLASSES=10) f.savefig(pp, format='pdf') pp.close() pickle.dump(sample_latent, open(latent_pickle, 'w'))
import datetime import matplotlib.pyplot as plt import numpy as np from matplotlib.backends.backend_pdf import PdfPages # Create the PdfPages object to which we will save the pages: # The with statement makes sure that the PdfPages object is closed properly at # the end of the block, even if an Exception occurs. with PdfPages("multipage_pdf.pdf") as pdf: plt.figure(figsize=(3, 3)) plt.plot(range(7), [3, 1, 4, 1, 5, 9, 2], "r-o") plt.title("Page One") pdf.savefig() # saves the current figure into a pdf page plt.close() # # if LaTeX is not installed or error caught, change to `usetex=False` # plt.rc('text', usetex=False) # plt.figure(figsize=(8, 6)) # x = np.arange(0, 5, 0.1) # plt.plot(x, np.sin(x), 'b-') # plt.title('Page Two') # pdf.attach_note("plot of sin(x)") # you can add a pdf note to # # attach metadata to a page pdf.savefig() plt.close() # plt.rc('text', usetex=False) # fig = plt.figure(figsize=(4, 5)) # plt.plot(x, x ** 2, 'ko') # plt.title('Page Three')
import matplotlib x = np.arange(0.01, 10, 0.05) y = np.arange(0.01, 10, 0.05) X, Y = np.meshgrid(x, y) m = np.array([1.0, 2.0]) Z = X + Y - m[0] - m[1] - X * np.log(X/m[0]) - Y * np.log(Y/m[1]) fig = plt.figure() ax = plt.axes() plt.pcolormesh(X, Y, Z, cmap='magma') pp=plt.colorbar (orientation="vertical") plt.rcParams['font.size'] = 16 plt.rcParams['xtick.labelsize'] = 16 plt.rcParams['ytick.labelsize'] = 16 matplotlib.rcParams['ps.useafm'] = True matplotlib.rcParams['pdf.use14corefonts'] = True cont=plt.contour(X,Y,Z,8,vmin=-1,vmax=1, colors=['black']) cont.clabel(fmt='%1.1f', fontsize=16) plt.plot(1.0,2.0,marker='.',markersize=16) plt.xlabel(r'$x_1$', fontsize=16) plt.ylabel(r'$x_2$', fontsize=16) plt.tick_params(which='major', labelsize=16) plt.gca().set_aspect('equal') ppdf = PdfPages('information_entropy.pdf') ppdf.savefig(fig,bbox_inches="tight", pad_inches=0.0) ppdf.close()
def plot_popular_industry(industry_data, number, indicator): occupation_rank = industry_data.newdf.sort_values( by=indicator, ascending=False).ix[:number] occupation_rank["unapproved_case"] = occupation_rank[ "application_pool"] - occupation_rank["approved_case"] with PdfPages('popular occupation groups ranked by ' + indicator + '.pdf') as pdf: plt.figure(figsize=(30, 20)) ax1 = occupation_rank["approved_case"].plot( kind="barh", alpha=0.7, color=["skyblue", "pink", "green"], figsize=(20, 15)) ax1.set_title('Application pool for top ' + str(number) + ' ' + indicator + ' occupation groups') plt.subplots_adjust(bottom=0.1, left=0.3) pdf.savefig() plt.close() plt.figure(figsize=(30, 20)) ax2 = occupation_rank[["approved_case", "unapproved_case" ]].plot(kind="bar", alpha=0.7, stacked=True, color=["skyblue", "pink"], figsize=(20, 15)) ax3 = occupation_rank["approval_rate"].plot(kind="line", secondary_y=True, style='ko--') ax2.set_xticklabels(occupation_rank.index, rotation=45) ax3.set_xticklabels(occupation_rank.index, rotation=45) ax2.set_title('Approval rate for for top ' + str(number) + ' ' + indicator + ' occupation groups') plt.subplots_adjust(bottom=0.3, left=0.1) pdf.savefig() plt.close() plt.figure(figsize=(30, 20)) ax4 = occupation_rank["average_wage"].plot( kind="barh", ylim=[40000, 120000], color="skyblue", figsize=(20, 15), title="Average Wage for Top 10 Largest Application Pool Companies") ax4.set_title('Average wage for top for top ' + str(number) + ' ' + indicator + ' occupation groups') plt.subplots_adjust(bottom=0.1, left=0.3) pdf.savefig() plt.close() print( "Further analysis has been saved in your local folder as PDF, please open your folder to check" ) print( "please return to the previous dictionary to explore other functions\n" ) return
well = (row, col) time_list = [] value_list = [] for time, value in sorted(MES[reading_label][plate_id][well].iteritems()): time_list.append(time) value_list.append(value) time_array = array(time_list) if len(time_list): time_array = (time_array - time_list[0]) / 3600 return time_array, array(value_list) MES = CollectData("../data/tecan/PL6-96.tar.gz", number_of_plates=4) _mkdir('../res/tecan') pp = PdfPages('../res/tecan/2011-02-06_PL6-96.pdf') #rcParams['text.usetex'] = True rcParams['legend.fontsize'] = 12 #rcParams['font.family'] = 'sans-serif' #rcParams['font.size'] = 8 #rcParams['lines.linewidth'] = 0.3 #rcParams['lines.markersize'] = 2 #rcParams['figure.figsize'] = [5, 10] #rcParams['figure.subplot.hspace'] = 0.3 #figure() plot_growth_rate = False fit_window_size = 1.5 # hours fit_start_threshold = 0.01
simperc = numpy.array(simperc) simmedfrac = numpy.array(simmedfrac) simmedabs = numpy.array(simmedabs) f = open('temp25.pkl', 'rb') (fit_input, _) = pickle.load(f) dataperc = numpy.percentile(numpy.sum(fit_input['rho1']**2, axis=1), (50, 50 - 34, 50 + 34)) datamedfrac, datamedabs = meanfrac(fit_input) asort = numpy.argsort(numpy.append(simperc[:, 0], dataperc[0])) resort = numpy.argsort(asort) pp = PdfPages('output25_sim' + tag + '/pvalues_delta2.pdf') plt.errorbar( resort[:-1], simperc[:, 0], yerr=[simperc[:, 0] - simperc[:, 1], simperc[:, 2] - simperc[:, 0]], fmt='.', color='blue') plt.errorbar(resort[-1], dataperc[0], yerr=[[dataperc[0] - dataperc[1]], [dataperc[2] - dataperc[0]]], fmt='.', color='red') plt.xlim([-1, len(resort)]) plt.xlabel('Sorted index') plt.ylabel(r'$\delta^2$') plt.savefig(pp, format='pdf')
output.write( "The first row for each Items(plants) Exported Value in Saudi Arabia with their Year Value" ) output.write("\n") #show the first row for each group print(grouped.first(), file=output) #Code to draw bar plot for all the Items(plants) items = [] itemslist = [] #save available data to array based on thier group for key, item in grouped: itemslist.append(grouped.get_group(key)) items.append(key) with PdfPages( r'/Users/abeer/downloads/Cropsandlivestockproducts_ExportValueCharts.pdf' ) as export_pdf: for i in range(len(itemslist)): Y = [] y_values = [] x_values = [] output.write("The name of the item(value) Exported Value: ") print(items[i], file=output) output.write("\n") output.write( "The Table that shows the Year and Value of the item(value) Exported Value" ) output.write("\n") print(itemslist[i], file=output) output.write("\n") Y = itemslist[i].to_numpy()
def evaluate(corpus): N = 100 # must be large to ensure convergence w_a_tmp = 0 w_b_tmp = 0 # counters that save the number of label predictions given by predict() function correct_predictions_a = 0 correct_predictions_b = 0 # numpy arrays that store accuracy scores over a number of iterations accuracy_a = np.array([]) accuracy_b = np.array([]) # numpy arrays that store number of words used for training over a num of iterations w_a = np.array([]) w_b = np.array([]) # create test set by selecting 10% of all sentences randomly np.random.shuffle(corpus) corpus_length = len(corpus) test_set_size = int(round(corpus_length / 10, 0)) training_set_size = int(corpus_length - test_set_size) training_set, test_set = corpus[:training_set_size], corpus[test_set_size:] num_predictions_so_far = 0 # create instance A of MaxEntModel to be used with train() A = MaxEntModel() A.initialize(training_set) # create instance B of MaxEntModel to be used with train_batch() B = MaxEntModel() B.initialize(training_set) # train A and B for i in range(N): print(Colors.WARNING + "Iteration: " + Colors.ENDC, i) print(Colors.WARNING + "Training A..." + Colors.ENDC) A.train(1) w_a_tmp += 1 print(Colors.WARNING + "Training B..." + Colors.ENDC) B.train_batch(1, 1) w_b_tmp += B.get_num_training_words() # execute predict on the test_set for sentence in test_set: for j in range(len(test_set)): word = sentence[j][0] label = sentence[j][1] if j == 0: prev_label = 'start' else: prev_label = sentence[j - 1][1] print(Colors.WARNING + "Predicting label for A..." + Colors.ENDC) prediction_a = A.predict(word, prev_label) print(Colors.WARNING + "Predicting label for B..." + Colors.ENDC) prediction_b = B.predict(word, prev_label) num_predictions_so_far += 1 if prediction_a == label: correct_predictions_a += 1 if prediction_b == label: correct_predictions_b += 1 # compute accuracy for model A and B it_accuracy_a = correct_predictions_a / num_predictions_so_far it_accuracy_b = correct_predictions_b / num_predictions_so_far accuracy_a = np.append(accuracy_a, it_accuracy_a) accuracy_b = np.append(accuracy_b, it_accuracy_b) w_a = np.append(w_a, w_a_tmp) w_b = np.append(w_b, w_b_tmp) # plot the data (accuracy against number of words) print(Colors.WARNING + "Plotting data..." + Colors.ENDC) chart_a = plt.plot(w_a, accuracy_a) chart_b = plt.plot(w_b, accuracy_b) plt.setp(chart_a, color='r', linewidth=2.0) plt.setp(chart_b, color='b', linewidth=2.0) # save the plot on file pp = PdfPages('plot.pdf') pp.savefig() pp.close()
lge1, eff1, eff_err1 = get_eff_area(sys.argv[2]) print(lge0) print(lge1) diff = list() rel_err0, rel_err1 = list(), list() for i in range(len(lge0)): d = (eff0[i] - eff1[i]) / math.sqrt(eff_err0[i] * eff_err0[i] + eff_err1[i] * eff_err1[i]) diff.append(d) rel_err0.append(eff_err0[i] / eff0[i]) rel_err1.append(eff_err1[i] / eff1[i]) figName = 'figures/EffArea_testingSplit.pdf' with PdfPages(figName) as pdf: fig = plt.figure(figsize=(8, 6), tight_layout=True) ax = plt.gca() ax.set_yscale('log') ax.set_xlabel(r'log$_{10}$($E$/TeV)') ax.set_ylabel(r'$A_\mathrm{eff}$ (cm$^2$)') ax.errorbar(lge0, eff0, yerr=eff_err0, marker='o', linestyle='none') ax.errorbar(lge1, eff1, yerr=eff_err1, marker='s', linestyle='none', markersize=15, fillstyle='none')
def energy_spectrum(self, Directory=False, PdfPages=False, sources=False, real_time=False, color=False): # energy calib E=a*x + b, x is the channel number a = 0.04258 b = 0.09599 for i in np.arange(len(sources)): fig = plt.figure() ax = fig.add_subplot(111) data = loadtxt("Source_spectra/" + sources[i] + "/" + sources[i] + "_spectrum_calibrated.txt") x1 = np.arange(0, len(data), 1) x_calib = a * x1 + b y1 = data y_rate = y1 / real_time[i] y_norm = y1 / np.max(y1) plt.step(x_calib, y_norm, where='mid', label=sources[i], linewidth=0.6, color='#B40431', zorder=1.3) # default farben plt.bar(x_calib, y_norm, width=0.0425, linewidth=0.6, color='#F5A9BC', zorder=1.2, label='_nolegend_') ax = plt.gca() if sources[i] == "Am": plt.xlim(0, 70) ax.annotate(r'$Am\ {\gamma}_{2,0}$', xy=(60, 0.95), xytext=(-5, 5), ha='right', textcoords='offset points', fontsize=8) if sources[i] == "Fe": plt.xlim(0, 15) ax.annotate(r'${K}_{\alpha}^{Fe}$', xy=(7.1, 0.95), xytext=(-5, 5), ha='right', textcoords='offset points', fontsize=8) ax.annotate(r'${K}_{\beta}^{Fe}$', xy=(7.5, 0.15), xytext=(-5, 5), ha='right', textcoords='offset points', fontsize=8) if sources[i] == "Cd": plt.xlim(0, 60) ax.annotate(r'${K}_{\alpha}^{Cd}$', xy=(25.0, 0.95), xytext=(-5, 5), ha='right', textcoords='offset points', fontsize=8) ax.annotate(r'${K}_{\beta}^{Cd}$', xy=(28.0, 0.15), xytext=(-5, 5), ha='right', textcoords='offset points', fontsize=8) if sources[i] == "Background": point_label = [ r'${K}_{\alpha}^{Fe}$', r'${K}_{\alpha}^{Ni}$', r'${K}_{\alpha}^{Cu}$', r'${K}_{\alpha}^{Zn}$', r'${K}_{\alpha}^{Ga}$', r'${K}_{\alpha}^{Ag}$', r'${K}_{\alpha}^{ln}$', r'${K}_{\beta}^{Ag}$' ] y = [0.19, 0.12, 0.1, 0.08, 0.05, 0.95, 0.58, 0.55] x = [8.2, 9.1, 10.2, 11.0, 11.5, 24.5, 25.8, 27.5] for j, txt in enumerate(point_label): ax.annotate(txt, xy=(x[j], y[j]), xytext=(-5, 5), ha='right', textcoords='offset points', fontsize=8) plt.xlim(0, 45) ax.set_ylim(bottom=0) plt.xlabel('Energy [keV]') plt.ylabel('Counts (normalized)') #ax.set_xscale('log') #ax.set_yscale('log') ax.legend(loc='upper right') plt.tight_layout() plt.savefig("Source_spectra/" + sources[i] + "/" + sources[i] + "_spectrum_calibrated.png", bbox_inches='tight') PdfPages.savefig()
ax.grid(True) fontP = FontProperties() fontP.set_size('small') handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels, prop=fontP, loc="upper right") print '\n' return fig if __name__ == '__main__': #################################################################### ## create pdf generator #################################################################### pdf_pages = PdfPages(sys.argv[1]) #################################################################### ## loop through tests #################################################################### for bm in BENCHMARKS: fig = do_a_benchmark(bm) pdf_pages.savefig(fig) #################################################################### ## done #################################################################### print 'make pdf...' pdf_pages.close() print 'done!'
if __name__ == '__main__': global PdfPages Directory = "Amptek_Si_PIN_Detector/" sources = ["Background", "Cd", "Am", "Fe"] color = [ '#F5A9BC', '#d62728', '#1f77b4', '#7f7f7f', '#7e0044', "magenta", 'red', '#33D1FF', "maroon", "yellow", 'lightblue', '#006381', 'grey' ] point_label = [ r'${K}_{\alpha}^{Fe}$', r'${K}_{\beta}^{Fe}$', r'${K}_{\alpha}^{Mo}$', r'${K}_{\alpha}^{Cd}$', r'${K}_{\beta}^{Cd}$', r'$Am\ {\gamma}_{2,0}$' ] real_time = [661.16, 60.715000, 2148.976000, 1.011000, 6272.115000] # Number is accumulation time PdfPages = PdfPages('Amptel_Spectrum.pdf') scan = Amptel_Spectrum() a, a_error, b, b_error = scan.channel_energy_calibration( PdfPages=PdfPages, file= 'Energy_channel_calibration/channel_energy_calibration_full_range.txt', point_label=point_label) scan.energy_spectrum(PdfPages=PdfPages, Directory=Directory, sources=sources, real_time=real_time, color=color) scan.plot_calibration_charge(PdfPages=PdfPages, Directory=Directory, point_label=point_label) scan.close()
def generateReport(cself, filename="report.pdf", showOnScreen=True): figs = list() plotter = PlotCollection.PlotCollection("Calibration report") offset = 3010 #Output calibration results in text form. sstream = StringIO() printResultTxt(cself, sstream) text = [line for line in StringIO(sstream.getvalue())] linesPerPage = 40 while True: fig = pl.figure(offset) offset += 1 left, width = .05, 1. bottom, height = -.05, 1. right = left + width top = bottom + height ax = fig.add_axes([.0, .0, 1., 1.]) # axes coordinates are 0,0 is bottom left and 1,1 is upper right p = patches.Rectangle((left, bottom), width, height, fill=False, transform=ax.transAxes, \ clip_on=False, edgecolor="none") ax.add_patch(p) pl.axis('off') printText = lambda t: ax.text(left, top, t, fontsize=8, \ horizontalalignment='left', verticalalignment='top',\ transform=ax.transAxes) if len(text) > linesPerPage: printText("".join(text[0:linesPerPage])) figs.append(fig) text = text[linesPerPage:] else: printText("".join(text[0:])) figs.append(fig) break #plot imu stuff (if we have imus) for iidx, imu in enumerate(cself.ImuList): f = pl.figure(offset + iidx) plots.plotAccelerations(cself, iidx, fno=f.number, noShow=True) plotter.add_figure("imu{0}: accelerations".format(iidx), f) figs.append(f) offset += len(cself.ImuList) f = pl.figure(offset + iidx) plots.plotAccelErrorPerAxis(cself, iidx, fno=f.number, noShow=True) plotter.add_figure("imu{0}: acceleration error".format(iidx), f) figs.append(f) offset += len(cself.ImuList) f = pl.figure(offset + iidx) plots.plotAccelBias(cself, iidx, fno=f.number, noShow=True) plotter.add_figure("imu{0}: accelerometer bias".format(iidx), f) figs.append(f) offset += len(cself.ImuList) f = pl.figure(offset + iidx) plots.plotAngularVelocities(cself, iidx, fno=f.number, noShow=True) plotter.add_figure("imu{0}: angular velocities".format(iidx), f) figs.append(f) offset += len(cself.ImuList) f = pl.figure(offset + iidx) plots.plotGyroErrorPerAxis(cself, iidx, fno=f.number, noShow=True) plotter.add_figure("imu{0}: angular velocity error".format(iidx), f) figs.append(f) offset += len(cself.ImuList) f = pl.figure(offset + iidx) plots.plotAngularVelocityBias(cself, iidx, fno=f.number, noShow=True) plotter.add_figure("imu{0}: gyroscope bias".format(iidx), f) figs.append(f) offset += len(cself.ImuList) #plot cam stuff if cself.CameraChain: for cidx, cam in enumerate(cself.CameraChain.camList): f = pl.figure(offset + cidx) title = "cam{0}: reprojection errors".format(cidx) plots.plotReprojectionScatter(cself, cidx, fno=f.number, noShow=True, title=title) plotter.add_figure(title, f) figs.append(f) offset += len(cself.CameraChain.camList) #write to pdf pdf = PdfPages(filename) for fig in figs: pdf.savefig(fig) pdf.close() if showOnScreen: plotter.show()
def imgsum(im, obs, obs_uncal, outname, outdir='.', title='imgsum', commentstr="", fontsize=FONTSIZE, cfun='afmhot', snrcut=0., gainplots=True, ampplots=True, cphaseplots=True, campplots=True, ebar=True, debias=True, cp_uv_min=False, sysnoise=0, syscnoise=0): """Produce an image summary plot for an image and uvfits file. Args: im (Image): an Image object obs (Obsdata): the self-calibrated Obsdata object obs_uncal (Obsdata): the original Obsdata object outname (str): output pdf file name outdir (str): directory for output file title (str): the pdf file title commentstr (str): a comment for the top line of the pdf fontsize (float): the font size for text in the sheet cfun (float): matplotlib color function gainplots (bool): include gain plots or not ampplots (bool): include amplitude consistency plots or not cphaseplots (bool): include closure phase consistency plots or not campplots (bool): include closure amplitude consistency plots or not ebar (bool): include error bars or not debias (bool): debias visibility amplitudes before computing chisq or not cp_uv_min (bool): minimum uv-distance cutoff for including a baseline in closure phase sysnoise (float): percent systematic noise added in quadrature syscnoise (float): closure phase systematic noise in degrees added in quadrature snrcut (dict): a dictionary of snrcut values for each quantity Returns: """ plt.rc('font', family='serif') plt.rc('text', usetex=True) plt.rc('font', size=FONTSIZE) plt.rc('axes', titlesize=FONTSIZE) plt.rc('axes', labelsize=FONTSIZE) plt.rc('xtick', labelsize=FONTSIZE) plt.rc('ytick', labelsize=FONTSIZE) plt.rc('legend', fontsize=FONTSIZE) plt.rc('figure', titlesize=FONTSIZE) if fontsize == 0: fontsize = FONTSIZE snrcut_dict = { key: 0. for key in ['vis', 'amp', 'cphase', 'logcamp', 'camp'] } if type(snrcut) is dict: for key in snrcut.keys(): snrcut_dict[key] = snrcut[key] else: for key in snrcut_dict.keys(): snrcut_dict[key] = snrcut with PdfPages(outname) as pdf: titlestr = 'Summary Sheet for %s on MJD %s' % (im.source, im.mjd) #pdf metadata d = pdf.infodict() d['Title'] = title d['Author'] = u'EHT Team 1' d['Subject'] = titlestr d['CreationDate'] = datetime.datetime.today() d['ModDate'] = datetime.datetime.today() #define the figure fig = plt.figure(1, figsize=(18, 28), dpi=200) gs = gridspec.GridSpec(6, 4, wspace=WSPACE, hspace=HSPACE) #user comments if len(commentstr) > 1: titlestr = titlestr + '\n' + str(commentstr) else: titlestr = titlestr plt.suptitle(titlestr, y=.9, va='center', fontsize=int(1.2 * fontsize)) ################################################################################ print("===========================================") print("displaying the image") ax = plt.subplot(gs[0:2, 0:2]) ax.set_title('Submitted Image') ax = display_img(im, axis=ax, show=False, has_title=False, cfun=cfun, fontsize=fontsize) print("===========================================") print("displaying the blurred image") ax = plt.subplot(gs[0:2, 2:5]) ax.set_title('Image blurred to nominal resolution') #beamparams=obs_uncal.fit_gauss() #fwhm = np.min((np.abs(beamparams[0]),np.abs(beamparams[1]))) fwhm = obs.res() print("blur_FWHM: ", fwhm / RADPERUAS) beamparams = [fwhm, fwhm, 0] res = obs.res() imblur = im.blur_gauss(beamparams, frac=1.0) #imblur = im.blur_circ(res) ax = display_img(imblur, beamparams=beamparams, axis=ax, show=False, has_title=False, cfun=cfun, fontsize=fontsize) ################################################################################ print("===========================================") print("calculating statistics") #display the overall chi2 ax = plt.subplot(gs[2, 0:2]) ax.set_title('Image statistics') #ax.axis('off') ax.set_yticks([]) ax.set_xticks([]) flux = im.total_flux() # SNR ordering #obs.reorder_tarr_snr() #obs_uncal.reorder_tarr_snr() maxset = False # compute chi^2 chi2vis = obs.chisq(im, dtype='vis', ttype='nfft', systematic_noise=sysnoise, maxset=maxset, snrcut=snrcut_dict['vis']) chi2amp = obs.chisq(im, dtype='amp', ttype='nfft', systematic_noise=sysnoise, maxset=maxset, snrcut=snrcut_dict['amp']) chi2cphase = obs.chisq(im, dtype='cphase', ttype='nfft', systematic_noise=sysnoise, systematic_cphase_noise=syscnoise, maxset=maxset, cp_uv_min=cp_uv_min, snrcut=snrcut_dict['cphase']) chi2logcamp = obs.chisq(im, dtype='logcamp', ttype='nfft', systematic_noise=sysnoise, maxset=maxset, snrcut=snrcut_dict['logcamp']) chi2camp = obs.chisq(im, dtype='camp', ttype='nfft', systematic_noise=sysnoise, maxset=maxset, snrcut=snrcut_dict['camp']) chi2vis_uncal = obs.chisq(im, dtype='vis', ttype='nfft', systematic_noise=0, maxset=maxset, snrcut=snrcut_dict['vis']) chi2amp_uncal = obs.chisq(im, dtype='amp', ttype='nfft', systematic_noise=0, maxset=maxset, snrcut=snrcut_dict['amp']) chi2cphase_uncal = obs.chisq(im, dtype='cphase', ttype='nfft', systematic_noise=0, systematic_cphase_noise=0, maxset=maxset, cp_uv_min=cp_uv_min, snrcut=snrcut_dict['cphase']) chi2logcamp_uncal = obs.chisq(im, dtype='logcamp', ttype='nfft', systematic_noise=0, maxset=maxset, snrcut=snrcut_dict['logcamp']) chi2camp_uncal = obs.chisq(im, dtype='camp', ttype='nfft', systematic_noise=0, maxset=maxset, snrcut=snrcut_dict['camp']) print("chi^2 vis: %0.2f %0.2f" % (chi2vis, chi2vis_uncal)) print("chi^2 amp: %0.2f %0.2f" % (chi2amp, chi2amp_uncal)) print("chi^2 cphase: %0.2f %0.2f" % (chi2cphase, chi2cphase_uncal)) print("chi^2 logcamp: %0.2f %0.2f" % (chi2logcamp, chi2logcamp_uncal)) print("chi^2 camp: %0.2f %0.2f" % (chi2logcamp, chi2logcamp_uncal)) fs = int(1 * fontsize) fs2 = int(.8 * fontsize) ax.text(.05, .9, "Source:", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.05, .7, "MJD:", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.05, .5, "FREQ:", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.05, .3, "FOV:", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.05, .1, "FLUX:", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.23, .9, "%s" % im.source, fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.23, .7, "%i" % im.mjd, fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.23, .5, "%0.0f GHz" % (im.rf / 1.e9), fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.23, .3, "%0.1f $\mu$as" % (im.fovx() / RADPERUAS), fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.23, .1, "%0.2f Jy" % flux, fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.5, .9, "$\chi^2_{vis}$", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.5, .7, "$\chi^2_{amp}$", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.5, .5, "$\chi^2_{cphase}$", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.5, .3, "$\chi^2_{log camp}$", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.5, .1, "$\chi^2_{camp}$", fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.72, .9, "%0.2f" % chi2vis, fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.72, .7, "%0.2f" % chi2amp, fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.72, .5, "%0.2f" % chi2cphase, fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.72, .3, "%0.2f" % chi2logcamp, fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.72, .1, "%0.2f" % chi2camp, fontsize=fs, ha='left', va='center', transform=ax.transAxes) ax.text(.85, .9, "(%0.2f)" % chi2vis_uncal, fontsize=fs2, ha='left', va='center', transform=ax.transAxes) ax.text(.85, .7, "(%0.2f)" % chi2amp_uncal, fontsize=fs2, ha='left', va='center', transform=ax.transAxes) ax.text(.85, .5, "(%0.2f)" % chi2cphase_uncal, fontsize=fs2, ha='left', va='center', transform=ax.transAxes) ax.text(.85, .3, "(%0.2f)" % chi2logcamp, fontsize=fs2, ha='left', va='center', transform=ax.transAxes) ax.text(.85, .1, "(%0.2f)" % chi2camp_uncal, fontsize=fs2, ha='left', va='center', transform=ax.transAxes) ################################################################################ print("===========================================") print("calculating cphase statistics") #display the closure phase chi2 ax = plt.subplot(gs[3:6, 0:2]) ax.set_title('Closure phase statistics') ax.set_yticks([]) ax.set_xticks([]) # get closure triangle combinations # ANDREW -- hacky, fix this! cp = obs.c_phases(mode="all", count="min", uv_min=cp_uv_min, snrcut=snrcut_dict['cphase']) n_cphase = len(cp) alltris = [(str(cpp['t1']), str(cpp['t2']), str(cpp['t3'])) for cpp in cp] uniqueclosure_tri = [] for tri in alltris: if tri not in uniqueclosure_tri: uniqueclosure_tri.append(tri) # generate data obs_model = im.observe_same(obs, add_th_noise=False, ttype='nfft') # TODO: check SNR cut cphases_obs = obs.c_phases(mode='all', count='max', vtype='vis', uv_min=cp_uv_min, snrcut=snrcut_dict['cphase']) if snrcut_dict['cphase'] > 0: cphases_obs_all = obs.c_phases(mode='all', count='max', vtype='vis', uv_min=cp_uv_min, snrcut=0.) cphases_model_all = obs_model.c_phases(mode='all', count='max', vtype='vis', uv_min=cp_uv_min, snrcut=0.) mask = [cphase in cphases_obs for cphase in cphases_obs_all] cphases_model = cphases_model_all[mask] print('cphase snr cut', snrcut_dict['cphase'], ' : kept', len(cphases_obs), '/', len(cphases_obs_all)) else: cphases_model = obs_model.c_phases(mode='all', count='max', vtype='vis', uv_min=cp_uv_min, snrcut=0.) #generate chi^2 -- NO SYSTEMATIC NOISES ncphase = 0 cphase_chisq_data = [] for c in range(0, len(uniqueclosure_tri)): cphases_obs_tri = obs.cphase_tri(uniqueclosure_tri[c][0], uniqueclosure_tri[c][1], uniqueclosure_tri[c][2], vtype='vis', ang_unit='deg', cphases=cphases_obs) if len(cphases_obs_tri) > 0: cphases_model_tri = obs_model.cphase_tri( uniqueclosure_tri[c][0], uniqueclosure_tri[c][1], uniqueclosure_tri[c][2], vtype='vis', ang_unit='deg', cphases=cphases_model) chisq_tri = 2 * np.sum( (1.0 - np.cos(cphases_obs_tri['cphase'] * DEGREE - cphases_model_tri['cphase'] * DEGREE)) / ((cphases_obs_tri['sigmacp'] * DEGREE)**2)) npts = len(cphases_obs_tri) data = [ uniqueclosure_tri[c][0], uniqueclosure_tri[c][1], uniqueclosure_tri[c][2], npts, chisq_tri ] cphase_chisq_data.append(data) #sort by decreasing chi^2 idx = np.argsort([data[-1] for data in cphase_chisq_data]) idx = list(reversed(idx)) chisqtab = r"\begin{tabular}{ l|l|l|l } \hline Triangle & $N_{tri}$ & $\chi^2_{tri}/N_{tri}$ & $\chi^2_{tri}/N_{tot}$\\ \hline \hline" first = True for i in range(len(cphase_chisq_data)): if i > 30: break data = cphase_chisq_data[idx[i]] tristr = r"%s-%s-%s" % (data[0], data[1], data[2]) nstr = r"%i" % data[3] chisqstr = r"%0.1f" % data[4] rchisqstr = r"%0.1f" % (float(data[4]) / float(data[3])) rrchisqstr = r"%0.3f" % (float(data[4]) / float(n_cphase)) if first: chisqtab += r" " + tristr + " & " + nstr + " & " + rchisqstr + " & " + rrchisqstr first = False else: chisqtab += r" \\" + tristr + " & " + nstr + " & " + rchisqstr + " & " + rrchisqstr chisqtab += r" \end{tabular}" ax.text(0.5, .975, chisqtab, ha="center", va="top", transform=ax.transAxes, size=fontsize) ################################################################################ print("===========================================") print("calculating camp statistics") #display the log closure amplitude chi2 ax = plt.subplot(gs[2:6, 2::]) ax.set_title('Log Closure amplitude statistics') #ax.axis('off') ax.set_yticks([]) ax.set_xticks([]) # get closure amplitude combinations # TODO -- hacky, fix this! cp = obs.c_amplitudes(mode="all", count="min", ctype='logcamp', debias=debias) n_camps = len(cp) allquads = [(str(cpp['t1']), str(cpp['t2']), str(cpp['t3']), str(cpp['t4'])) for cpp in cp] uniqueclosure_quad = [] for quad in allquads: if quad not in uniqueclosure_quad: uniqueclosure_quad.append(quad) # generate data # TODO: check SNR cut camps_obs = obs.c_amplitudes(mode='all', count='max', ctype='logcamp', debias=debias, snrcut=snrcut_dict['logcamp']) if snrcut_dict['logcamp'] > 0: camps_obs_all = obs.c_amplitudes(mode='all', count='max', ctype='logcamp', debias=debias, snrcut=0.) camps_model_all = obs_model.c_amplitudes(mode='all', count='max', ctype='logcamp', debias=False, snrcut=0.) mask = [ camp['camp'] in camps_obs['camp'] for camp in camps_obs_all ] camps_model = camps_model_all[mask] print('closure amp snrcut', snrcut_dict['logcamp'], ': kept', len(camps_obs), '/', len(camps_obs_all)) else: camps_model = obs_model.c_amplitudes(mode='all', count='max', ctype='logcamp', debias=False, snrcut=0.) #generate chi2 -- NO SYSTEMATIC NOISES ncamp = 0 camp_chisq_data = [] for c in range(0, len(uniqueclosure_quad)): camps_obs_quad = obs.camp_quad(uniqueclosure_quad[c][0], uniqueclosure_quad[c][1], uniqueclosure_quad[c][2], uniqueclosure_quad[c][3], vtype='vis', camps=camps_obs, ctype='logcamp') if len(camps_obs_quad) > 0: camps_model_quad = obs.camp_quad(uniqueclosure_quad[c][0], uniqueclosure_quad[c][1], uniqueclosure_quad[c][2], uniqueclosure_quad[c][3], vtype='vis', camps=camps_model, ctype='logcamp') chisq_quad = np.sum( np.abs( (camps_obs_quad['camp'] - camps_model_quad['camp']) / camps_obs_quad['sigmaca'])**2) npts = len(camps_obs_quad) data = (uniqueclosure_quad[c][0], uniqueclosure_quad[c][1], uniqueclosure_quad[c][2], uniqueclosure_quad[c][3], npts, chisq_quad) camp_chisq_data.append(data) #sort by decreasing chi^2 idx = np.argsort([data[-1] for data in camp_chisq_data]) idx = list(reversed(idx)) chisqtab = r"\begin{tabular}{ l|l|l|l } \hline Quadrangle & $N_{quad}$ & $\chi^2_{quad}/N_{quad}$ & $\chi^2_{quad}/N_{tot}$ \\ \hline \hline" for i in range(len(camp_chisq_data)): if i > 45: break data = camp_chisq_data[idx[i]] tristr = r"%s-%s-%s-%s" % (data[0], data[1], data[2], data[3]) nstr = r"%i" % data[4] chisqstr = r"%0.1f" % data[5] rchisqstr = r"%0.1f" % (data[5] / float(data[4])) rrchisqstr = r"%0.3f" % (data[5] / float(n_camps)) if i == 0: chisqtab += r" " + tristr + " & " + nstr + " & " + rchisqstr + " & " + rrchisqstr else: chisqtab += r" \\" + tristr + " & " + nstr + " & " + rchisqstr + " & " + rrchisqstr chisqtab += r" \end{tabular}" ax.text(0.5, .975, chisqtab, ha="center", va="top", transform=ax.transAxes, size=fontsize) #save the first page of the plot print('saving pdf page 1') pdf.savefig(pad_inches=MARGINS, bbox_inches='tight') plt.close() ################################################################################ #plot the vis amps fig = plt.figure(2, figsize=(18, 28), dpi=200) gs = gridspec.GridSpec(6, 4, wspace=WSPACE, hspace=HSPACE) print("===========================================") print("plotting vis amps") ax = plt.subplot(gs[0:2, 0:2]) obs_tmp = obs_model.copy() obs_tmp.data['sigma'] *= 0. ax = plotall_obs_compare([obs, obs_tmp], 'uvdist', 'amp', axis=ax, legend=False, clist=['k', SCOLORS[1]], ttype='nfft', show=False, debias=debias, snrcut=snrcut_dict['amp'], ebar=ebar, markersize=MARKERSIZE) #modify the labels ax.set_title('Calibrated Visiblity Amplitudes') ax.set_xlabel('u-v distance (G$\lambda$)') ax.set_xlim([0, 1.e10]) ax.set_xticks([0, 2.e9, 4.e9, 6.e9, 8.e9, 10.e9]) ax.set_xticklabels(["0", "2", "4", "6", "8", "10"]) ax.set_xticks([1.e9, 3.e9, 5.e9, 7.e9, 9.e9], minor=True) ax.set_xticklabels([], minor=True) ax.set_ylabel('Amplitude (Jy)') ax.set_ylim([0, 1.2 * flux]) yticks_maj = np.array([0, .2, .4, .6, .8, 1]) * flux ax.set_yticks(yticks_maj) ax.set_yticklabels(["%0.2f" % fl for fl in yticks_maj]) yticks_min = np.array([.1, .3, .5, .7, .9]) * flux ax.set_yticks(yticks_min, minor=True) ax.set_yticklabels([], minor=True) ################################################################################3 #plot the caltable gains if gainplots: print("===========================================") print("plotting gains") ax2 = plt.subplot(gs[0:2, 2:6]) obs_tmp = obs_uncal.copy() for i in range(1): ct = selfcal(obs_tmp, im, method='amp', ttype='nfft', caltable=True, gain_tol=.2, processes=PROCESSES) ct = ct.pad_scans() obs_tmp = ct.applycal(obs_tmp, interp='nearest', extrapolate=True) #apply caltable if np.any(np.isnan(obs_tmp.data['vis'])): print("Warning: NaN in applycal vis table!") break if i > 0: ct_out = ct_out.merge([ct]) else: ct_out = ct ax2 = ct_out.plot_gains('all', rangey=[.1, 10], yscale='log', axis=ax2, legend=True, show=False) #median gains ax = plt.subplot(gs[3:6, 2:5]) ax.set_title('Station gain statistics') ax.set_yticks([]) ax.set_xticks([]) gain_data = [] for station in ct_out.tarr['site']: try: gain = np.median(np.abs(ct_out.data[station]['lscale'])) except: continue pdiff = np.abs(gain - 1) * 100 data = (station, gain, pdiff) gain_data.append(data) #sort by decreasing chi^2 idx = np.argsort([data[-1] for data in gain_data]) idx = list(reversed(idx)) chisqtab = r"\begin{tabular}{ l|l|l } \hline Site & Median Gain & Percent diff. \\ \hline \hline" for i in range(len(gain_data)): if i > 45: break data = gain_data[idx[i]] sitestr = r"%s" % (data[0]) gstr = r"%0.2f" % data[1] pstr = r"%0.0f" % data[2] if i == 0: chisqtab += r" " + sitestr + " & " + gstr + " & " + pstr else: chisqtab += r" \\" + sitestr + " & " + gstr + " & " + pstr chisqtab += r" \end{tabular}" ax.text(0.5, .975, chisqtab, ha="center", va="top", transform=ax.transAxes, size=fontsize) ################################################################################3 #baseline amplitude chi2 print("===========================================") print("baseline vis amps chisq") ax = plt.subplot(gs[3:6, 0:2]) ax.set_title('Visibility amplitude statistics') ax.set_yticks([]) ax.set_xticks([]) bl_unpk = obs.unpack(['t1', 't2'], debias=debias) n_bl = len(bl_unpk) allbl = [(str(bl['t1']), str(bl['t2'])) for bl in bl_unpk] uniquebl = [] for bl in allbl: if bl not in uniquebl: uniquebl.append(bl) #generate chi2 -- NO SYSTEMATIC NOISES ncamp = 0 bl_chisq_data = [] for ii in range(0, len(uniquebl)): bl = uniquebl[ii] amps_bl = obs.unpack_bl(bl[0], bl[1], ['amp', 'sigma'], debias=debias) if len(amps_bl) > 0: amps_bl_model = obs_model.unpack_bl(bl[0], bl[1], ['amp', 'sigma'], debias=False) if snrcut_dict['amp'] > 0: amask = amps_bl['amp'] / amps_bl['sigma'] > snrcut_dict[ 'amp'] amps_bl = amps_bl[amask] amps_bl_model = amps_bl_model[amask] chisq_bl = np.sum( np.abs((amps_bl['amp'] - amps_bl_model['amp']) / amps_bl['sigma'])**2) npts = len(amps_bl_model) data = (bl[0], bl[1], npts, chisq_bl) bl_chisq_data.append(data) #sort by decreasing chi^2 idx = np.argsort([data[-1] for data in bl_chisq_data]) idx = list(reversed(idx)) chisqtab = r"\begin{tabular}{ l|l|l|l } \hline Baseline & $N_{amp}$ & $\chi^2_{amp}/N_{amp}$ & $\chi^2_{amp}/N_{total}$ \\ \hline \hline" for i in range(len(bl_chisq_data)): if i > 45: break data = bl_chisq_data[idx[i]] tristr = r"%s-%s" % (data[0], data[1]) nstr = r"%i" % data[2] chisqstr = r"%0.1f" % data[3] rchisqstr = r"%0.1f" % (data[3] / float(data[2])) rrchisqstr = r"%0.3f" % (data[3] / float(n_bl)) if i == 0: chisqtab += r" " + tristr + " & " + nstr + " & " + rchisqstr + " & " + rrchisqstr else: chisqtab += r" \\" + tristr + " & " + nstr + " & " + rchisqstr + " & " + rrchisqstr chisqtab += r" \end{tabular}" ax.text(0.5, .975, chisqtab, ha="center", va="top", transform=ax.transAxes, size=fontsize) #save the first page of the plot print('saving pdf page 2') #plt.tight_layout() #plt.subplots_adjust(wspace=1,hspace=1) #plt.savefig(outname, pad_inches=MARGINS,bbox_inches='tight') pdf.savefig(pad_inches=MARGINS, bbox_inches='tight') plt.close() ################################################################################ #plot the visibility amplitudes page = 3 if ampplots: print("===========================================") print("plotting amplitudes") fig = plt.figure(3, figsize=(18, 28), dpi=200) plt.suptitle("Amplitude Plots", y=.9, va='center', fontsize=int(1.2 * fontsize)) gs = gridspec.GridSpec(6, 4, wspace=WSPACE, hspace=HSPACE) i = 0 j = 0 switch = False obs_model.data['sigma'] *= 0 amax = 1.1 * np.max(np.abs(np.abs(obs_model.data['vis']))) obs_all = [obs, obs_model] for bl in uniquebl: ax = plt.subplot(gs[2 * i:2 * (i + 1), 2 * j:2 * (j + 1)]) ax = plot_bl_obs_compare(obs_all, bl[0], bl[1], 'amp', rangey=[0, amax], markersize=MARKERSIZE, debias=debias, snrcut=snrcut_dict['amp'], axis=ax, legend=False, clist=['k', SCOLORS[1]], ttype='nfft', show=False, ebar=ebar) if ax is None: continue if switch: i += 1 j = 0 switch = False else: j = 1 switch = True ax.set_xlabel('') if i == 3: print('saving pdf page %i' % page) page += 1 pdf.savefig(pad_inches=MARGINS, bbox_inches='tight') plt.close() fig = plt.figure(3, figsize=(18, 28), dpi=200) gs = gridspec.GridSpec(6, 4, wspace=WSPACE, hspace=HSPACE) i = 0 j = 0 switch = False print('saving pdf page %i' % page) page += 1 pdf.savefig(pad_inches=MARGINS, bbox_inches='tight') plt.close() ################################################################################ #plot the closure phases if cphaseplots: print("===========================================") print("plotting closure phases") fig = plt.figure(3, figsize=(18, 28), dpi=200) plt.suptitle("Closure Phase Plots", y=.9, va='center', fontsize=int(1.2 * fontsize)) gs = gridspec.GridSpec(6, 4, wspace=WSPACE, hspace=HSPACE) i = 0 j = 0 switch = False obs_all = [obs, obs_model] cphases_model['sigmacp'] *= 0 cphases_all = [cphases_obs, cphases_model] for tri in uniqueclosure_tri: ax = plt.subplot(gs[2 * i:2 * (i + 1), 2 * j:2 * (j + 1)]) ax = plot_cphase_obs_compare(obs_all, tri[0], tri[1], tri[2], rangey=[-185, 185], cphases=cphases_all, markersize=MARKERSIZE, axis=ax, legend=False, clist=['k', SCOLORS[1]], ttype='nfft', show=False, ebar=ebar) if ax is None: continue if switch: i += 1 j = 0 switch = False else: j = 1 switch = True ax.set_xlabel('') if i == 3: print('saving pdf page %i' % page) page += 1 pdf.savefig(pad_inches=MARGINS, bbox_inches='tight') plt.close() fig = plt.figure(3, figsize=(18, 28), dpi=200) gs = gridspec.GridSpec(6, 4, wspace=WSPACE, hspace=HSPACE) i = 0 j = 0 switch = False print('saving pdf page %i' % page) page += 1 pdf.savefig(pad_inches=MARGINS, bbox_inches='tight') plt.close() ################################################################################ #plot the log closure amps if campplots: print("===========================================") print("plotting closure amplitudes") fig = plt.figure(3, figsize=(18, 28), dpi=200) plt.suptitle("Closure Amplitude Plots", y=.9, va='center', fontsize=int(1.2 * fontsize)) gs = gridspec.GridSpec(6, 4, wspace=WSPACE, hspace=HSPACE) i = 0 j = 0 switch = False obs_all = [obs, obs_model] camps_model['sigmaca'] *= 0 camps_all = [camps_obs, camps_model] cmax = 1.1 * np.max(np.abs(camps_obs['camp'])) for quad in uniqueclosure_quad: ax = plt.subplot(gs[2 * i:2 * (i + 1), 2 * j:2 * (j + 1)]) ax = plot_camp_obs_compare(obs_all, quad[0], quad[1], quad[2], quad[3], markersize=MARKERSIZE, ctype='logcamp', rangey=[-cmax, cmax], camps=camps_all, axis=ax, legend=False, clist=['k', SCOLORS[1]], ttype='nfft', show=False, ebar=ebar) if ax is None: continue if switch: i += 1 j = 0 switch = False else: j = 1 switch = True ax.set_xlabel('') if i == 3: print('saving pdf page %i' % page) page += 1 pdf.savefig(pad_inches=MARGINS, bbox_inches='tight') plt.close() fig = plt.figure(3, figsize=(18, 28), dpi=200) gs = gridspec.GridSpec(6, 4, wspace=WSPACE, hspace=HSPACE) i = 0 j = 0 switch = False print('saving pdf page %i' % page) page += 1 pdf.savefig(pad_inches=MARGINS, bbox_inches='tight') plt.close()
def close(self): PdfPages.close()
import sys from numpy import * from numpy.random import * import matplotlib.pyplot as plt import scipy.stats as st from matplotlib.backends.backend_pdf import PdfPages pp = PdfPages("output-histogram.pdf") import matplotlib #font = {'family' : 'normal', # 'weight' : 'bold', # 'size' : 10} #matplotlib.rc('font', **font) matplotlib.rc('xtick', labelsize=2) matplotlib.rc('ytick', labelsize=2) sys.path.append("/home/cbarnes/dev/cuda-sim-area/cuda-sim/trunk/") import cudasim import cudasim.EulerMaruyama as EulerMaruyama import cudasim.Gillespie as Gillespie import cudasim.Lsoda as Lsoda def print_results(result, timepoints, outfile, sx=-1, model=0): out = open(outfile, 'w') print >> out, 0, 0, 0, 0, for i in range(len(timepoints)): print >> out, timepoints[i], print >> out, "" # loop over threads
model_name = [sys.argv[2]] for s in model_name: print('using model ' + version + '_' + s) network_xml = '/Users/leahanderson/Code/Lanksershim_Network/Lshim_' + version + '_' + model_name[ 0] + '.xml' output_prefix = {} for mt in model_name: output_prefix[ mt] = '/Users/leahanderson/Code/Lanksershim_Network/output/' + version + '_' + mt dataset = '/Users/leahanderson/Code/datasets_external/lankershim' time_aggregation = 5 sys.path.append(dataset) pp = PdfPages('densities_' + version + '.pdf') import network_properties as netprops intersections = netprops.intersection_ids links = netprops.link_ids initial_time = 0 final_time = (netprops.time_range[1] - netprops.time_range[0]) / 1000 time_bounds = range(initial_time, final_time, time_aggregation) network = load_network(network_xml) link_densities = {} with open(dataset + '/densities_links.csv', 'rb') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in reader: link = row[0] + row[1] movement = row[2] denlist = [int(d) for d in row[3::]]
df["year"] = np.floor(1960 + df.SALE_DATE / 365.25) df = df.loc[df.SALE_AMOUNT >= 50000, :] df = df.loc[df.SALE_AMOUNT <= 1000000, :] df["log_SALE_AMOUNT"] = np.log2(df.SALE_AMOUNT) df = df.loc[df.SALE_DATE >= 365.25 * 20] df["age"] = df.year - df.YEAR_BUILT fml = "log_SALE_AMOUNT ~ bs(year, 6) * bs(age, 6) + bs(year, 6) * (bs(LAND_SQUARE_FOOTAGE, 6) + bs(LIVING_SQUARE_FEET, 6) + bs(age, 6))" model = sm.OLS.from_formula(fml, df) result = model.fit() pdf = PdfPages("salesprice_lm.pdf") plt.clf() for age in 0, 10, 20, 40: pred, cb, fvals = predict_functional(result, "year", values={"age": age}, summaries={ "LAND_SQUARE_FOOTAGE": np.median, "LIVING_SQUARE_FEET": np.median }) plt.plot(fvals, pred, '-', label=str(age)) plt.grid(True) ha, lb = plt.gca().get_legend_handles_labels() leg = plt.figlegend(ha, lb, "center right")
ax.set_xlabel('PC1', fontsize=12, fontweight='bold') ax.set_ylabel('PC2', fontsize=12, fontweight='bold') fontsize = 12 ax = gca() for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) tick.label1.set_fontweight('bold') for tick in ax.yaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) tick.label1.set_fontweight('bold') labels = df.Linage.values for label, x, y in zip(labels, X_r[:, 0], X_r[:, 1]): plt.annotate(label, xy=(x, y), textcoords='data', fontsize=7) pp = PdfPages('ancestral_pca.pdf') plt.savefig(pp, format='pdf') pp.close() ########################### PCA Variance ################################## # Select all 64 principal components pca = PCA(92) # project from 64 to 2 dimensions X_r = pca.fit_transform(scale(X, with_std=False)) # Obtain the explained variance for each principal component varianceExp = pca.explained_variance_ratio_ # Compute the total sum of variance totVarExp = np.cumsum( np.round(pca.explained_variance_ratio_, decimals=4) * 100)