def make_marks_figures(): marks_percent = marks.gen_marks(12345) plt.figure() histogram.plot_histogram(marks_percent, 0, 100, n_bins=20) plt.xlabel('Marks (%)') plt.ylabel('Count') plt.savefig('marks_hist.svg') plt.clf() plot_quantile(marks_percent) plt.xlabel('Percentile') plt.ylabel('Marks (%)') plt.savefig('marks_quantile.svg') plt.clf() sns.kdeplot(marks_percent, kernel='gau', bw=0.15, gridsize=1000) sns.despine() plt.xlabel('Marks (%)') plt.ylabel(r'$\rho(\operatorname{Marks})$') plt.savefig('marks_kde.svg') plt.clf() sns.distplot(marks_percent, bins=20, kde_kws={'bw': 0.15, 'gridsize': 1000}) sns.despine() plt.xlabel('Marks (%)') plt.ylabel(r'$\rho(\operatorname{Marks})$') plt.savefig('marks_kde_bndry.svg') bws = np.linspace(0.05, 0.3, 6) kde.plot_kde_varying_bw(marks_percent, bws) plt.xlabel('Marks (%)') plt.ylabel(r'$\rho(\operatorname{Marks})$') plt.savefig('marks_kde_bws.svg') plt.close()
def main(): column, files, bins, outlierRange, plotErrorBars = parseArguments() # We count columns starting with 1 column -= 1 data = readDataFromFiles(files, column) printFilenamesAbsolutePath(files) print("---------------------------") print("Before removing outliers:") analyseData(data) filteredData = returnListWithoutOutliers(data, outlierRange) print("---------------------------") print("After removing outliers:") analyseData(filteredData) if bins: import histogram plotErrorBars = plotErrorBars not in ['False', 'false', 0] histogram.plot_histogram(data, bins, plotErrorBars, 1, "Before removing outliers") histogram.plot_histogram( filteredData, bins, plotErrorBars, 2, "After removing outliers (%.2f * IQR)" % (outlierRange * 1.5)) # To keep plots "alive" raw_input()
def jacobian_frequency(): i = 0 for file_name in os.listdir(path_out_jac)[0]: print file_name nim = NiftiImage(path_out_jac + file_name) print 'first subject', nim.header['dim'] # imshow(nim.data[90], interpolation='nearest') #, cmap=cm.grey) # slice through the vertical axis # show() image_data = nim.asarray() values_below_zero = image_data[image_data < 0] count_below_zero = len(values_below_zero) minimum_val = min(values_below_zero) mean_val = average(values_below_zero) print file_name, 'count below zero', count_below_zero, 'minimum', minimum_val, 'mean', mean_val histogram.plot_histogram(image_data.flatten()) i += 1
def process_backend_data(self): """ Question 4: implement this method """ data = np.sum(self.backend_database, axis=0) size = np.sum(data) k = int(2 * np.power(size, 1/3)) bins = [i * 600 / k for i in range(k+1)] counts = [] i = 0 for val, count in enumerate(data[:-1]): if val/10 < bins[i]: counts[-1] += count else: counts.append(count) i+=1 plot_histogram((counts, bins))
with tf.Session() as sess: saver = tf.train.Saver() init = tf.global_variables_initializer() sess.run(init) for epoch in range(epoch_num): print('** epoch {} begin **'.format(epoch)) g_obj = 0.0 d_obj = 0.0 # plot p_g batch_z = draw_from_pz(10000, z_dim) tmp = model.generate(sess, batch_z) tmp = np.reshape(tmp, [10000]) tmp2 = np.random.normal(target_mu, target_sigma, [10000]).astype(np.float32) plot_histogram(tmp, tmp2, 'result/{}.png'.format(epoch)) for step in range(num_one_epoch): # draw from p_z batch_z = draw_from_pz(batch_size, z_dim) # draw from p_data #batch_inputs = gaussian_mixture(batch_size) batch_inputs = np.random.normal(target_mu, target_sigma, [batch_size, 1]).astype(np.float32) #batch_inputs = gaussian_mixture_B(batch_size) #batch_inputs = single_gaussian(batch_size) # train discriminator d_obj += model.training_disc(sess, batch_z, batch_inputs)
return bins_count, BINS if __name__ == "__main__": s = SimulationQ4(number_of_peers=10, max_peer_pool_size=2) s.run() s.report_result() s = SimulationQ4(number_of_peers=1000, max_peer_pool_size=10) s.run() s.report_result() s = SimulationQ4(number_of_peers=1000, max_peer_pool_size=100) s.run() s.report_result() s = SimulationQ4(number_of_peers=1000, max_peer_pool_size=1000) s.run() s.report_result() s = SimulationQ4(number_of_peers=10000, max_peer_pool_size=10) s.run() s.report_result() s = SimulationQ4(number_of_peers=10000, max_peer_pool_size=100) s.run() # Let's plot the last one to see how it looks plot_histogram(s.process_backend_data()) s.report_result()
def openTargetImage(self): imagePath, _ = QFileDialog.getOpenFileName() image = histogram.read_image(imagePath) histogram_output = histogram.calculate_histogram(image) title = 'target image' histogram.plot_histogram(histogram_output, title)
def main(): args = common.get_args() func_print_messages( ) seqs = scf.input_scaffoldtsv( args.input ) size=common.Size( seqs, args.margin_bw_scaffolds, args.xlim_max, args.alignment_height ) #histograms = histogram.set_space( args.hist, seqs, size.histogram_height ) size.set_histogram_space( seqs, args.hist ) size.set_scaffold_layout( seqs, args.scaffold_layout ) size.output_parameters() fig = plt.figure( figsize=size.figsize_inch ) ax = fig.add_subplot(111) fig.patch.set_alpha( 0.0 ) func_set_axes( ax, size ) scf.plot_scaffolds( ax, seqs, args.scaffold_font_size ) ##plot scale bar scalebar = scf.Scalebar( size ) scalebar.plot( ax ) scalebar.output_parameters() ##plot alignment max_identity = args.max_identity input_formats = [ args.alignment, args.blastn, args.lastz, args.mummer ] func_plot_alignmment = [ alignment.plot_alignment4original, alignment.plot_alignment4blastn, alignment.plot_alignment4lastz, alignment.plot_alignment4mummer ] valid_files = alignment.count_alignment_files( args ) if valid_files == 0: pass else: min_identity = alignment.set_min_identity( args ) ##set colormap heatmap = alignment.Colormap( min_identity, max_identity, args.colormap ) heatmap.output_parameters() ##set and plot colormap legend heatmap_legend = alignment.Colorbox( size ) heatmap_legend.plot( ax, heatmap ) heatmap_legend.output_parameters() for files, func_plot in zip( input_formats, func_plot_alignmment ): if files is None: continue for fn in files: if not os.path.isfile( fn ): continue func_plot( seqs, ax, heatmap, size, fn ) ##plot mark_v if args.mark_v is not None: if os.path.isfile( args.mark_v ): mark_v.plot_mark_v( seqs, ax, size, args.mark_v ) ##plot gene if args.gff3 is not None: for fn in args.gff3: if not os.path.isfile( fn ): continue gff.plot_genes( seqs, ax, size, fn ) ##plot histogram histogram.plot_background( seqs, ax, size ) if args.hist is not None: for fn in args.hist: if not os.path.isfile( fn ): continue histogram.plot_histogram( seqs, ax, size, fn ) pdf_file = args.out + '.pdf' pp = PdfPages( pdf_file ) pp.savefig( fig, bbox_inches='tight' ) pp.close()
#residues_file.write("%s %s \n"%(cluster_id, " ".join(set(residues)))) residues_file.write("%s %s \n"%(cluster_id, " ".join(residues))) contacts_per_cluster[cluster_id] = residues residues_file.close() if options.do_plots: #-------------------------------- # Plot distribution of the residues #-------------------------------- contacts_per_residue = get_num_contacts_per_residue(contacts_per_cluster) contact_residue_labels = get_labels (contacts_per_cluster) # A normal plot target = os.path.join(RESULTS_PATH, "histogram.svg") plot_histogram(contacts_per_cluster, contact_residue_labels, target, False) # A plot averaging target = os.path.join(RESULTS_PATH, "histogram_a.svg") plot_histogram(contacts_per_cluster, contact_residue_labels, target, True) # A plot filtering filtered_contact_residue_labels = filter_less_contacts_than(2000, contact_residue_labels, contacts_per_residue) target = os.path.join(RESULTS_PATH, "histogram_f.svg") plot_histogram(contacts_per_cluster, filtered_contact_residue_labels, target, False) # A plot filtering + averaging filtered_contact_residue_labels = filter_less_contacts_than(2000, contact_residue_labels, contacts_per_residue) target = os.path.join(RESULTS_PATH, "histogram_fa.svg") plot_histogram(contacts_per_cluster, filtered_contact_residue_labels, target, True)
init = tf.global_variables_initializer() sess.run(init) for epoch in range(epoch_num): print('** epoch {} begin **'.format(epoch)) #target_sigma *= 0.99 g_obj = 0.0 d_obj = 0.0 # plot p_g batch_z = draw_from_pz(10000, z_dim) tmp = model.generate(sess, batch_z) tmp = np.reshape(tmp, [10000]) #tmp2 = np.random.normal(target_mu, target_sigma, [10000]).astype(np.float32) tmp2 = np.asarray([target_mu] * 10000).astype(np.float32) plot_histogram(tmp, tmp2, '{}/{}.png'.format(args.dir, epoch)) val, grad = model.get_disc_value(sess, fixed) val = np.reshape(val, [-1]) grad = np.reshape(grad, [-1]) print(np.mean(tmp), grad[300]) plot_scatter2(fixed, grad, '{}_grad'.format(args.dir), '{}'.format(epoch), color = 'red') plot_scatter3(fixed, np.log(val), '{}_val'.format(args.dir), '{}'.format(epoch), color = 'blue') for step in range(num_one_epoch): # draw from p_z batch_z = draw_from_pz(batch_size, z_dim) # draw from p_data #batch_inputs = gaussian_mixture(batch_size) #batch_inputs = np.random.normal(target_mu, target_sigma, [batch_size, 1]).astype(np.float32)