Ejemplo n.º 1
0
def plot_the_loss_curve(epochs, mae_training, mae_validation, filename):
  name = filename.split('.')
  """Plot a curve of loss vs. epoch."""

  plt.figure()
  plt.xlabel("Epoch")
  plt.ylabel("Root Mean Squared Error")

  plt.plot(epochs[1:], mae_training[1:], label="Training Loss")
  plt.plot(epochs[1:], mae_validation[1:], label="Validation Loss")
  plt.legend()
  
  # We're not going to plot the first epoch, since the loss on the first epoch
  # is often substantially greater than the loss for other epochs.
  merged_mae_lists = mae_training[1:] + mae_validation[1:]
  highest_loss = max(merged_mae_lists)
  lowest_loss = min(merged_mae_lists)
  delta = highest_loss - lowest_loss
  print(delta)

  top_of_y_axis = highest_loss + (delta * 0.05)
  bottom_of_y_axis = lowest_loss - (delta * 0.05)
   
  plt.ylim([bottom_of_y_axis, top_of_y_axis])
  plt.save("static/nn_"+name[0]+".png")
Ejemplo n.º 2
0
def main():
	# read arguments
	parser = opts_parser()
	options, args = parser.parse_args()
	
	#plot results
	# Y = [int(x) for x in list(open('results.txt', 'r'))]
	results = list(open('results.txt', 'r'))[22:]
	wls = []
	intensity = []
	num_results=len(results)/2

	for i in xrange(num_results):
		sensor = int(results[2*i])
		intensity.append(sensor)
		r,g,b = [int(channel)/255.0 for channel in results[2*i+1].split()]
		wavelegth = RGB2wav(r,g,b)
		wls.append(wavelegth)
	
	#sortwls = np.argsort(wls)
	#intensity = [intensity[i] for i in sortwls][::-1]
	# X = [380.0 + x*(750.0-380.0) for x in np.sort(wls)[::-1]]
	#X = [x for x in xrange(0, len(intensity))]
	plt.scatter(wls, intensity)
	plt.ylim(0,1100)
	plt.xlim(300,650)
	plt.show()
	plt.save(arg[1]+options.format)

	#save results to file
	np.save(results,outfile)
Ejemplo n.º 3
0
def save_plot_data_series(name, *handles):
	from matplotlib import pyplot as plt
	from lib_Testing import testImgDir
	plt.figure(1)
	for hndl in handles:
		plt.plot(hndl.dates, hndl.values)
	plt.save(testImgDir+name, bbox_inches='tight')
Ejemplo n.º 4
0
def PlotData(x, g_z, data_cols, label_cols=[], seed=0, with_class=False, data_dim=2, save=False, prefix=''):
    real_samples = pd.DataFrame(x, columns=data_cols + label_cols)
    gen_samples = pd.DataFrame(g_z, columns=data_cols + label_cols)

    f, axarr = plt.subplots(1, 2, figsize=(6, 2))
    if with_class:
        axarr[0].scatter(real_samples[data_cols[0]], real_samples[data_cols[1]],
                         c=real_samples[label_cols[0]] / 2)  # , cmap='plasma'  )
        axarr[1].scatter(gen_samples[data_cols[0]], gen_samples[data_cols[1]],
                         c=gen_samples[label_cols[0]] / 2)  # , cmap='plasma'  )

        # For when there are multiple one-hot encoded label columns
        # for i in range(len(label_cols)):
        # temp = real_samples.loc[ real_samples[ label_cols[i] ] == 1 ]
        # axarr[0].scatter( temp[data_cols[0]], temp[data_cols[1]], c='C'+str(i), label=i )
        # temp = gen_samples.loc[ gen_samples[ label_cols[i] ] == 1 ]
        # axarr[1].scatter( temp[data_cols[0]], temp[data_cols[1]], c='C'+str(i), label=i )

    else:
        axarr[0].scatter(real_samples[data_cols[0]], real_samples[data_cols[1]])  # , cmap='plasma'  )
        axarr[1].scatter(gen_samples[data_cols[0]], gen_samples[data_cols[1]])  # , cmap='plasma'  )
    axarr[0].set_title('real')
    axarr[1].set_title('generated')
    axarr[0].set_ylabel(data_cols[1])  # Only add y label to left plot
    for a in axarr: a.set_xlabel(data_cols[0])  # Add x label to both plots
    axarr[1].set_xlim(axarr[0].get_xlim()), axarr[1].set_ylim(
        axarr[0].get_ylim())  # Use axes ranges from real data for generated data

    if save:
        plt.save(prefix + '.xgb_check.png')

    plt.show()
Ejemplo n.º 5
0
def main():
    usage = "usage: %prog [options] track_file [output_file]"
    description = "Read a feature track file and plot the tracks"
    parser = OptionParser(usage=usage, description=description)

    (options, args) = parser.parse_args()

    track_filename = args[0]

    output_filename = None
    if len(args) >= 2:
        output_filename = args[1]

    tracks = load_tracks(track_filename)

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_xlim(2084)
    ax.set_ylim(2084)
    for track_id, track in tracks.iteritems():
        if len(track) < 10:
            continue
        coords = [ts.location for ts in track]
        codes = [Path.MOVETO] + [Path.LINETO] * (len(track) - 1)
        path = Path(coords, codes, closed=False)
        rand_color = (random.random(), random.random(), random.random())
        patch = patches.PathPatch(path, facecolor='none', edgecolor=rand_color)
        ax.add_patch(patch)

    plt.show()

    if output_file:
        plt.save(output_filename)
Ejemplo n.º 6
0
    def plot_trace(self,plotfile=None, show=False):
        """ Plot the trace of the MCMC run along with the marginal distributions of a subset of parameters

        Parameters
        ----------
        plotfile : str, optional
            Name of a file to write the plot to
        show : bool, optional, default False
            Whether to show the plot window

        """
        if not self.fitted:
            pass #raise an error here
        ax = az.plot_trace(
            self.trace,
            var_names=self.plot_trace_vars,
            #filter_vars="regex",
            compact=True,
            #lines=[
                #("mu", {}, mu),
                #("cov", {}, cov),
                #("chol_stds", {}, sigma),
                #("chol_corr", {}, rho),
            #],
        )
        if isinstance(plotfile, str):
            plt.save(plotfile)
        if show:
            plt.show()
Ejemplo n.º 7
0
def plt_to_base64(plt):
    buffer = io.BytesIO()
    plt.save(buffer, format='PNG', verbose=False)
    buffer.seek(0)

    plt_base64 = base64.b64encode(buffer.read()).decode('ascii')
    return '<img src="data:image/png;base64,{}">'.format(plt_base64)
Ejemplo n.º 8
0
    def result(self):
        data_df = self.cleanData.getSpambase_data()
        # Remove the grain species from the DataFrame, save for later
        varieties = list(data_df.pop('is_spam'))

        # Extract the measurements as a NumPy array
        samples = data_df.values
        """
        Perform hierarchical clustering on samples using the
        linkage() function with the method='complete' keyword argument.
        Assign the result to mergings.
        """
        mergings = linkage(samples, method='complete')
        """
        Plot a dendrogram using the dendrogram() function on mergings,
        specifying the keyword arguments labels=varieties, leaf_rotation=90,
        and leaf_font_size=6.
        """
        dendrogram(
            mergings,
            labels=varieties,
            leaf_rotation=90,
            leaf_font_size=6,
        )
        plt.save(self.file_img)
Ejemplo n.º 9
0
def main():
    usage = "usage: %prog [options] track_file [output_file]"
    description = "Read a feature track file and plot the tracks"
    parser = OptionParser(usage=usage, description=description)

    (options, args) = parser.parse_args()

    track_filename = args[0]

    output_filename = None
    if len(args) >= 2:
        output_filename = args[1]

    tracks = load_tracks(track_filename)

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_xlim(2084)
    ax.set_ylim(2084)
    for track_id, track in tracks.iteritems():
        if len(track) < 10:
            continue
        coords = [ts.location for ts in track]
        codes = [Path.MOVETO] + [Path.LINETO] * (len(track)-1)
        path = Path(coords, codes, closed=False)
        rand_color = (random.random(), random.random(), random.random())
        patch = patches.PathPatch(path, facecolor='none', edgecolor=rand_color)
        ax.add_patch(patch)

    plt.show()

    if output_file:
        plt.save(output_filename)
Ejemplo n.º 10
0
def save_graph(input_filepath, output_filepath):
    '''
	Takes a .csv file as outputted from student-transfer-reports.py and
	saves a .png file containing a graph representing the data therein.
	'''
    draw_graph(input_filepath)
    plt.save(output_filepath)
Ejemplo n.º 11
0
def plot_confusion_matrix(cm,
                          classes,
                          normalize=False,
                          title='Confusion Matrix',
                          cmap=plt.cm.Blues):
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    ply.yticks(tick_marks, classes)

    if normalize is True:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusin matrix")
    else:
        print("Confusion matrix, without norm")

    print(cm)

    thres = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j,
                 i,
                 cm[i, j],
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True Label')
    plt.xlabel("Predicted Label")
    filename = 'plots/plot_confusion_matrix.png'
    plt.save(filename)
Ejemplo n.º 12
0
def averager(imgpaths,
             dest_filename=None,
             width=500,
             height=600,
             background='black',
             blur_edges=False,
             out_filename='result.png',
             plot=False):

    size = (height, width)

    images = []
    point_set = []
    for path in imgpaths:
        img, points = load_image_points(path, size)
        if img is not None:
            images.append(img)
            point_set.append(points)

    if len(images) == 0:
        raise FileNotFoundError('Could not find any valid work.' +
                                ' Supported formats are .jpg, .png, .jpeg')

    if dest_filename is not None:
        dest_img, dest_points = load_image_points(dest_filename, size)
        if dest_img is None or dest_points is None:
            raise Exception('No face or detected face points in dest img: ' +
                            dest_filename)
    else:
        dest_img = np.zeros(images[0].shape, np.uint8)
        dest_points = locator.average_points(point_set)

    num_images = len(images)
    result_images = np.zeros(images[0].shape, np.float32)
    for i in range(num_images):
        result_images += warper.warp_image(images[i], point_set[i],
                                           dest_points, size, np.float32)

    result_image = np.uint8(result_images / num_images)
    face_indexes = np.nonzero(result_image)
    dest_img[face_indexes] = result_image[face_indexes]

    mask = blender.mask_from_points(size, dest_points)
    if blur_edges:
        blur_radius = 10
        mask = cv2.blur(mask, (blur_radius, blur_radius))

    if background in ('transparent', 'average'):
        dest_img = np.dstack((dest_img, mask))

        if background == 'average':
            average_background = locator.average_points(images)
            dest_img = blender.overlay_image(dest_img, mask,
                                             average_background)

    print('Averaged {} work'.format(num_images))
    plt = plotter.Plotter(plot, num_images=1, out_filename=out_filename)
    plt.save(dest_img)
    plt.plot_one(dest_img)
    plt.show()
Ejemplo n.º 13
0
def sourceplot(sourcelist, filename=None, figsize=(12, 8), showlegend=True, showspline=True, marker=None):
	"""
	I show you a plot of a list of Source objects.
	"""
	
	plt.figure(figsize=figsize)
	for s in sourcelist:
		if marker == None:
			plt.plot(s.ijds, s.imags, marker="None", color=s.plotcolour, linestyle="-", label = "%s" % (s.name))
		else:
			plt.plot(s.ijds, s.imags, marker=marker, color=s.plotcolour, linestyle="none", label = "%s" % (s.name))
	
	
		if showspline:
			spline = s.spline()
			xs = np.arange(s.ijds[0], s.ijds[-1], 0.02)
			ys = spline.eval(jds = xs)
			plt.plot(xs, ys, "-", color="red", zorder=+20, label="%s.spline()" % (s.name))
				
			
			plt.plot()
	
	# Something for astronomers only : we invert the y axis direction !
	axes = plt.gca()
	axes.set_ylim(axes.get_ylim()[::-1])
	plt.xlabel("HJD - 2400000.5 [days]", fontsize=14)
	plt.ylabel("Magnitude (relative)", fontsize=14)	
		
	if showlegend:
		plt.legend()

	if filename:
		plt.save(filename)
	else:
		plt.show()
Ejemplo n.º 14
0
def tlen_plot(args):
	f1 = args[0]
	f2 = args[1]
	tlen1 = []
	cnt1 = []

	tlen2 = []
	cnt2 = []

	with open(f1) as f:
		for l in f:
			ll = l.strip().split()
			tlen1.append(ll[1])
			cnt1.append(ll[0])

	with open(f2) as f:
		for l in f:
			ll = l.strip().split()
			tlen2.append(ll[1])
			cnt2.append(ll[0])

	plt.plot(tlen1, cnt1, color='k')
	plt.plot(tlen2, cnt2, color='g')

	plt.save('tlen.png')
Ejemplo n.º 15
0
def psplot(pslist, nbins = 0, filename=None, figsize=(12, 8), showlegend=True):
		"""
		Plots a list of PS objects.
		If the PS has a slope, it is plotted as well.
		
		if nbins > 0, I bin the spectra.
		
		add option for linear plot ?
		"""
		
		plt.figure(figsize=figsize)
		for ps in pslist:
		
			
			if not np.all(np.isfinite(np.log10(ps.p))):
				print "No power to plot (probably flat curve !), skipping this one."
				continue
			# We bin the points
			
			if nbins > 0:
				logf = np.log10(ps.f[1:]) # we remove the first one
				logbins = np.linspace(np.min(logf), np.max(logf), nbins+1) # So nbins +1 numbers here.
				bins = 10**logbins
				bincenters = 0.5*(bins[:-1] + bins[1:]) # nbins centers
				logbins[0] -= 1.0
				logbins[-1] += 1.0
				binindexes = np.digitize(logf, logbins) # binindexes go from 1 to nbins+1
				binvals = []
				binstds = []
				for i in range(1, nbins+1):
					vals = ps.p[1:][binindexes == i]
					binvals.append(np.mean(vals))
					binstds.append(np.std(vals)/np.sqrt(vals.size))
			
				bincenters = np.array(bincenters)
				binvals = np.array(binvals)
				binstds = np.array(binstds)
			
				plt.loglog(bincenters, binvals, marker=".", linestyle="-", color=ps.plotcolour, label = "%s" % (ps))
			
			else:
				plt.loglog(ps.f, ps.p, marker=".", linestyle="None", color=ps.plotcolour, label = "%s" % (ps))
			if ps.slope != None:
				plt.loglog(ps.slope["f"], ps.slope["p"], marker="None", color=ps.plotcolour, label = "Slope %s = %.3f" % (ps, ps.slope["slope"]))
				plt.axvline(ps.slope["fmin"], color = ps.plotcolour, dashes = (5,5))
				plt.axvline(ps.slope["fmax"], color = ps.plotcolour, dashes = (5,5))
		
		plt.xlabel("Frequency [1/days]")
		plt.ylabel("Power")
		
		if showlegend:
			plt.legend()
		
		#plt.text(np.min(10**fitx), np.max(10**pfit), "Log slope : %.2f" % (popt[0]), color="red")
		
		
		if filename:
			plt.save(filename)
		else:
			plt.show()
Ejemplo n.º 16
0
def bokeh_plot(u, t, legends, U, omega, t_range, filename):
	"""
	Строится график зависимости приближенного решения от t с 
	использованием библиотеки Bokeh.
	u и t - списки (несколько экспериментов могут сравниваться).
	легенды содержат строки для различных пар u,t.
    """
	if not isinstance(u, (list,tuple)):
		u = [u]  
	if not isinstance(t, (list,tuple)):
		t = [t]  
	if not isinstance(legends, (list,tuple)):
		legends = [legends] 

	import bokeh.plotting as plt
	plt.output_file(filename, mode='cdn', title='Comparison')
	# Предполагаем, что все массивы t имеют одинаковые размеры
	t_fine = np.linspace(0, t[0][-1], 1001)  # мелкая сетка для точного решения
	tools = 'pan,wheel_zoom,box_zoom,reset,'\
	        'save,box_select,lasso_select'
	u_range = [-1.2*U, 1.2*U]
	font_size = '8pt'
	p = []  # список графических объектов
	# Создаем первую фигуру
	p_ = plt.figure(
		width=300, plot_height=250, title=legends[0],
		x_axis_label='t', y_axis_label='u',
		x_range=t_range, y_range=u_range, tools=tools,
		title_text_font_size=font_size)
	p_.xaxis.axis_label_text_font_size=font_size
	p_.yaxis.axis_label_text_font_size=font_size
	p_.line(t[0], u[0], line_color='blue')
	# Добавляем точное решение
	u_e = u_exact(t_fine, U, omega)
	p_.line(t_fine, u_e, line_color='red', line_dash='4 4')
	p.append(p_)
	# Создаем оставшиеся фигуры и добавляем их оси к осям первой фигуры
	for i in range(1, len(t)):
		p_ = plt.figure(
			width=300, plot_height=250, title=legends[i],
			x_axis_label='t', y_axis_label='u',
			x_range=p[0].x_range, y_range=p[0].y_range, tools=tools,
			title_text_font_size=font_size)
		p_.xaxis.axis_label_text_font_size = font_size
		p_.yaxis.axis_label_text_font_size = font_size
		p_.line(t[i], u[i], line_color='blue')
		p_.line(t_fine, u_e, line_color='red', line_dash='4 4')
		p.append(p_)
		
	# Располагаем все графики на сетке с 3 графиками в строке
	grid = [[]]
	for i, p_ in enumerate(p):
		grid[-1].append(p_)
		if (i+1) % 3 == 0:
			# Новая строка
			grid.append([])
	plot = plt.gridplot(grid, toolbar_location='left')
	plt.save(plot)
	plt.show(plot)
Ejemplo n.º 17
0
def bokeh_plot(u, t, legends, I, w, t_range, filename):
    """
    Make plots for u vs t using the Bokeh library.
    u and t are lists (several experiments can be compared).
    legens contain legend strings for the various u,t pairs.
    """
    if not isinstance(u, (list,tuple)):
        u = [u]  # wrap in list
    if not isinstance(t, (list,tuple)):
        t = [t]  # wrap in list
    if not isinstance(legends, (list,tuple)):
        legends = [legends]  # wrap in list

    import bokeh.plotting as plt
    plt.output_file(filename, mode='cdn', title='Comparison')
    # Assume that all t arrays have the same range
    t_fine = np.linspace(0, t[0][-1], 1001)  # fine mesh for u_e
    tools = 'pan,wheel_zoom,box_zoom,reset,'\
            'save,box_select,lasso_select'
    u_range = [-1.2*I, 1.2*I]
    font_size = '8pt'
    p = []  # list of plot objects
    # Make the first figure
    p_ = plt.figure(
        width=300, plot_height=250, title=legends[0],
        x_axis_label='t', y_axis_label='u',
        x_range=t_range, y_range=u_range, tools=tools,
        title_text_font_size=font_size)
    p_.xaxis.axis_label_text_font_size=font_size
    p_.yaxis.axis_label_text_font_size=font_size
    p_.line(t[0], u[0], line_color='blue')
    # Add exact solution
    u_e = u_exact(t_fine, I, w)
    p_.line(t_fine, u_e, line_color='red', line_dash='4 4')
    p.append(p_)
    # Make the rest of the figures and attach their axes to
    # the first figure's axes
    for i in range(1, len(t)):
        p_ = plt.figure(
            width=300, plot_height=250, title=legends[i],
            x_axis_label='t', y_axis_label='u',
            x_range=p[0].x_range, y_range=p[0].y_range, tools=tools,
            title_text_font_size=font_size)
        p_.xaxis.axis_label_text_font_size = font_size
        p_.yaxis.axis_label_text_font_size = font_size
        p_.line(t[i], u[i], line_color='blue')
        p_.line(t_fine, u_e, line_color='red', line_dash='4 4')
        p.append(p_)

    # Arrange all plots in a grid with 3 plots per row
    grid = [[]]
    for i, p_ in enumerate(p):
        grid[-1].append(p_)
        if (i+1) % 3 == 0:
            # New row
            grid.append([])
    plot = plt.gridplot(grid, toolbar_location='left')
    plt.save(plot)
    plt.show(plot)
Ejemplo n.º 18
0
def quick(file_name):
    file_np = file_name + '.npy'
    y_axis = np.load(file_np)
    num_spacing = len(y_axis)
    x_axis = np.linspace(0, 1, num_spacing)
    plt.plot(x_axis, y_axis, 'ro')
    plt.save()
    plt.show()
Ejemplo n.º 19
0
def plot(train_losses, val_losses):
    plt.plot(train_losses, label='train')
    plt.plot(val_losses, label='val')
    plt.legend()
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.grid()
    plt.show()
    plt.save()
Ejemplo n.º 20
0
def draw_graph():
    G = nx.DiGraph()
    nodes, edges = get_graphs(1567)
    G.add_nodes_from(nodes)  #加点集合
    G.add_edges_from(edges)  #加边集合
    nx.draw(G)
    plt.show()
    time.localtime()
    plt.save()
Ejemplo n.º 21
0
def plot_f1_per_interval(f1_scores, name, intervals, save=False):
    plt.plot(intervals, f1_scores)
    plt.xticks(intervals)
    plt.xlabel('Interval (seconds)')
    plt.ylabel('F1 Score')
    plt.title(name)
    if save:
        plt.save(name + '.png')
    else:
        plt.show()
Ejemplo n.º 22
0
def test_fit_microlensing_event():
    true_params = {"u0" : 0.3, "t0" : 100., "tE" : 20., "m0" : 15.}
    
    light_curve = SimulatedLightCurve(mjd=np.linspace(0., 200., 300.), mag=15., error=[0.1])
    light_curve.addMicrolensingEvent(**true_params)
    
    params = analyze.fit_microlensing_event(light_curve)
    plt.errorbar(light_curve.mjd, light_curve.mag, light_curve.error, color="k", marker=".")
    plt.plot(light_curve.mjd, analyze.microlensing_model(params, light_curve.mjd), "r-")
    plt.save()
Ejemplo n.º 23
0
def plot_rf_estimators(f1_scores, name, save=False):
    estimator_counts = [10, 50, 100, 200, 300, 500, 700]
    plt.plot(estimator_counts, f1_scores)
    plt.xticks(estimator_counts)
    plt.xlabel('Estimator counts')
    plt.ylabel('F1 Score')
    # plt.title(name)
    if save:
        plt.save(name + '.png')
    else:
        plt.show()
Ejemplo n.º 24
0
def plot_coeffMain(self, tit):
    import matplotlib.pyplot as plt
    import cairo
    from igraph.drawing.text import TextDrawer

    pos = findPos(tit)
    for i in range(self.number_table):
        plt = plot_coefficient(self, i, pos)
        # Save the plot
        plt.save("temp\\" + tit + "_graph.png")
        plt.show()
Ejemplo n.º 25
0
def graph_rewards_vs_alphas(file, iter):

    df = pd.read_csv(file, sep=',', header=None)
    df = df.T
    df.columns = iter
    df = df[[0.0001, 0.0002, 0.0003, 0.0004]]
    graph_plot = df.plot(linewidth=0.75)
    graph_plot.set_xlabel('Generations')
    graph_plot.set_ylabel('Reward')
    # plt.show()
    plt.save()
Ejemplo n.º 26
0
def plot_fitted(data, bins=100, dist_name='gamma', show=True,savefn=None):
    """
    Fit the given 1-d distribution *and* plot the fitted distribution against the
    histogram. 
    """
    plt.figure()
    plt.hist(data,bins=bins,normed=True)
    plt.plot(fit_1d(data, dist_name))
    if savefn is not None:
        plt.save(savefn, dpi=100)
    if show:
        plt.show()
Ejemplo n.º 27
0
def plot_fitted(data, bins=100, dist_name='gamma', show=True, savefn=None):
    """
    Fit the given 1-d distribution *and* plot the fitted distribution against the
    histogram. 
    """
    plt.figure()
    plt.hist(data, bins=bins, normed=True)
    plt.plot(fit_1d(data, dist_name))
    if savefn is not None:
        plt.save(savefn, dpi=100)
    if show:
        plt.show()
Ejemplo n.º 28
0
def sourceplot(sourcelist,
               filename=None,
               figsize=(12, 8),
               showlegend=True,
               showspline=True,
               marker=None):
    """
	I show you a plot of a list of Source objects.
	"""

    plt.figure(figsize=figsize)
    for s in sourcelist:
        if marker == None:
            plt.plot(s.ijds,
                     s.imags,
                     marker="None",
                     color=s.plotcolour,
                     linestyle="-",
                     label="%s" % (s.name))
        else:
            plt.plot(s.ijds,
                     s.imags,
                     marker=marker,
                     color=s.plotcolour,
                     linestyle="none",
                     label="%s" % (s.name))

        if showspline:
            spline = s.spline()
            xs = np.arange(s.ijds[0], s.ijds[-1], 0.02)
            ys = spline.eval(jds=xs)
            plt.plot(xs,
                     ys,
                     "-",
                     color="red",
                     zorder=+20,
                     label="%s.spline()" % (s.name))

            plt.plot()

    # Something for astronomers only : we invert the y axis direction !
    axes = plt.gca()
    axes.set_ylim(axes.get_ylim()[::-1])
    plt.xlabel("HJD - 2400000.5 [days]", fontsize=14)
    plt.ylabel("Magnitude (relative)", fontsize=14)

    if showlegend:
        plt.legend()

    if filename:
        plt.save(filename)
    else:
        plt.show()
Ejemplo n.º 29
0
    def static_draw(self, ax, n_max=10, h=0.01, x0=0, y0=0):

        color_mat = np.zeros([self.N, self.M, 3])
        ax.set_xticks([])
        ax.set_yticks([])
        n = np.array(Mand.iterate(self.N, self.M, n_max, h, x0, y0), dtype=int)
        for i in range(self.N):
            for j in range(self.M):
                color_mat[i, j] = colors[n[i, j] % n_colors]
        ax.imshow(color_mat, cmap="gray", interpolation="nearest")
        plt.save("Mandelbrot_static.png", dpi=300)
        plt.show()
Ejemplo n.º 30
0
def plot_graphs(self, folder):
    import matplotlib.pyplot as plt


    if(self.number_table > 0):
         for tab in xrange(self.number_table):
            for n in range(len(self.graphList[0].centrality)):
                if(len(self.graphList[tab].centrality[n]) > 0):
                    networkFile = self.graphList[tab].name


                    try:
                        if(os.name == "nt"):
                            pos = networkFile.index("\\")
                        else:
                            pos = networkFile.index("/")
                    except:
                        pos = -1

                    if(pos != -1):
                        networkFile = networkFile[pos+1:]

                    try:
                        pos = networkFile.index(".txt")
                    except:
                        pos = -1
                    if(pos != -1):
                        networkFile = networkFile[:pos]
                    

                    if(os.name == "nt"):
                        path = folder +"\\"+ networkFile + "\\"
                    else:
                        path = folder +"/"+ networkFile + "/"

                    if not os.path.exists(path): 
                        os.makedirs(path)

                    if(os.name == "nt"):
                        path = folder +"\\"+ networkFile + "\\" + columnNames[n+1]
                    else:
                        path = folder +"/"+ networkFile + "/" + columnNames[n+1]


                    #plt.scatter(self.graphList[tab].centrality[2],self.graphList[tab].centrality[6])

                    #plt.show()


                    plt = plot_coefficient(self, tab, n)
                    print(type(plt))
                    # Save the plot
                    plt.save(path + ".png")
Ejemplo n.º 31
0
def make_curve(x, y, title, x_label, y_label, save_path=None):
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)

    plt.plot(x, y, color='green')
    # plt.plot(x_axix, train_acys, color='green', label='training accuracy')
    # plt.legend()  # 显示图例

    plt.show()
    if save_path:
        plt.save(save_path)
Ejemplo n.º 32
0
    def plot_corner(self, point_estimate='mean',plotfile=None,show=True):
        """ Plot the 1D and 2D marginal distributions of the inferred parameters

        Parameters
        ----------
        plotfile : str, optional
            Name of a file to write the plot to
        show : bool, optional, default False
            Whether to show the plot window

        """
        #For consistency's sake I'm going to re-invent the wheel here, and manually create a grid of plots from arviz, rather than letting corner do the work. This is because I want to make sure specific entries are plotted in a specific order.
        
        plot_vars = self.plot_trace_vars#[:-1]
        chol_coords = []
        if self.ndim == 2:
            #chol_coords.append(0)
            #chol_coords.append(1)
            chol_coords=(0,1)
            coords = {"chol_corr_dim_0":[0], "chol_corr_dim_1":[1]}
            #plot_vars.append("chol_corr[0,1]")
        else:
            coords = {"chol_corr_dim_0":[], "chol_corr_dim_1":[]}
            d0 = []
            d1 = []
            #raise NotImplementedError("Corner plots for data with more than 2 dimensions are not available yet!")
            for i in range(self.ndim - 1):
                for j in range(1,self.ndim - 1):
                    d0.append(i)
                    d1.append(j)
                    #print(i,j)
                    #chol_coords.append([i,j])#"chol_corr["+str(i)+","+str(j)+"]")

            coords["chol_corr_dim_0"] = xr.DataArray(d0, dims=['pointwise_sel'])
            coords["chol_corr_dim_1"] = xr.DataArray(d1, dims=['pointwise_sel'])
        #print(plot_vars)
        #coords = {"chol_corr":chol_coords}
        #print(coords)
        #corner = gs.GridSpec(rows, cols, figure=fig
        az.plot_pair(self.trace,
                     var_names = plot_vars,
                     coords = coords,
                     kind="kde",
                     marginals=True,
                     point_estimate=point_estimate,
                     show=show,
            )

        if isinstance(plotfile, str) and not show:
            plt.save(plotfile)
        elif not show:
            raise TypeError("plotfile must be a string")
def plot_feature_importance(cols, lgb_model):
    attr = {
        k: v
        for k, v in zip(cols, lgb_model.feature_importance()) if v > 0
    }
    attr = sorted(attr.items(), key=lambda x: x[1], reverse=False)
    x1, y1 = zip(*attr)
    i1 = range(len(x1))
    plt.figure(num=None, figsize=(9, 7), dpi=100, facecolor='w', edgecolor='k')
    plt.barh(i1, y1)
    plt.title("LGBM importance")
    plt.yticks(i1, x1)
    plt.save("eda_image/lightgbm_importance.png")
Ejemplo n.º 34
0
def save_heights(detect):
    load_name = r"/media/kathrada/My Passport/CleanData/"
    save_name = r"/media/kathrada/My Passport/Heights/"
    if not os.path.exists(save_name):
        os.makedirs(save_name)
    for fn_ in sorted(os.listdir(load_name), key=int):
        fn = os.path.join(run_nb, fn)
        print(fn)
        img = misc.imread(fn, mode='RGB')
        res = detect(img)
        add_height_labels(res)
        show_bboxes(res)
        plt.save(os.path.join(save_name, fn_))
def show_random_data(meta_train_dataset):
    """Plot one random image + corresponding mask from training dataset"""
    # Get data by class, e.g. data_by_class[0]: all bus tuples; im, mask, label_idx = meta_train_dataset.dataset[0][0]
    classes = meta_train_dataset.dataset.labels
    data_by_class = meta_train_dataset.dataset

    # Choose a random image + mask pait out of a random class
    rnd_class_idx = random.randint(0, len(classes) - 1)
    rnd_class = classes[rnd_class_idx]
    rnd_im, rnd_mask, _ = random.choice(data_by_class[rnd_class_idx])
    visualize([rnd_im, rnd_mask], rnd_class)
    #plt.show()
    plt.save('random.png')
Ejemplo n.º 36
0
def plot_confusion(y, y_pred, save=None, cmap='Blues'):
    """
    Plot confusion matrix
    
    Parameters
    ----------
    y
        ground truth labels
    y_pred 
        predicted labels
    save
        save the figure
    cmap
        color map
        
    Return
    ------
    F1 score
    NMI score
    ARI score
    """

    y_class, pred_class_ = np.unique(y), np.unique(y_pred)

    cm = confusion_matrix(y, y_pred)
    f1 = f1_score(y, y_pred, average='micro')
    nmi = normalized_mutual_info_score(y, y_pred)
    ari = adjusted_rand_score(y, y_pred)

    cm = cm.astype('float') / cm.sum(axis=0)[np.newaxis, :]

    plt.figure(figsize=(14, 14))
    sns.heatmap(cm,
                xticklabels=y_class,
                yticklabels=pred_class,
                cmap=cmap,
                square=True,
                cbar=False,
                vmin=0,
                vmax=1)

    plt.xticks(rotation=45, horizontalalignment='right')  #, fontsize=14)
    plt.yticks(fontsize=14, rotation=0)
    plt.ylabel('Leiden cluster', fontsize=18)

    if save:
        plt.save(save, bbox_inches='tight')
    else:
        plt.show()

    return f1, nmi, ari
Ejemplo n.º 37
0
def label_cluster(dico_dstb, dico_dstb_seq, Mix_cluster_detail,
                  first_distribution, second_distribution):
    cluster_label_list = []
    cluster_label_count = {"Mix": 0, "split": 0, "identical": 0, "join": 0}
    #print Mix_cluster_detail
    for key in dico_dstb.keys():
        list_loc = []
        for seq in dico_dstb[key]:
            if seq in dico_dstb_seq.keys():
                list_loc.append(dico_dstb_seq[seq])
            else:
                print seq
        if len(list(set(list_loc))) == 1:
            cluster_label_list.append(list_loc[0])
            cluster_label_count[list_loc[0]] += len(dico_dstb[key])
        else:
            #print list_loc
            cluster_label_list.append("Mix")
            cluster_label_count["Mix"] += len(dico_dstb[key])
    #print cluster_label_count,
    label_dico = dict(collections.Counter(cluster_label_list))
    plt.bar(range(len(label_dico)), label_dico.values(),
            align='center')  # python 2.x
    plt.xticks(range(len(label_dico)), label_dico.keys())  # in python 2.x
    plt.ylabel('# cluster')
    #plt.ylim((0, 170))
    #cluster_one = first_distribution.split("_")[0]  + "_" + first_distribution.split("_")[3].split(".")[0] # simulation
    #cluster_two = second_distribution.split("_")[0] + "_" + second_distribution.split("_")[3]  # simulation
    cluster_one = "I1 IMGT"  #first_distribution.split(".")[0]
    cluster_two = "FaIR"  #second_distribution.split(".")[0]
    name = "Distribution comparison : " + str(cluster_one) + " and " + str(
        cluster_two)
    plt.title(name)
    annotation = []
    for k in label_dico.keys():
        #print k
        annotation.append("{:.2f}".format(
            float(cluster_label_count[k]) / sum(cluster_label_count.values())))
    for x, y, z in zip(range(len(label_dico)), label_dico.values(),
                       annotation):
        #print x,y
        plt.annotate(
            z,  # this is the text
            (x, y),  # this is the point to label
            textcoords="offset points",  # how to position the text
            xytext=(0, 10),  # distance from text to points (x,y)
            ha='center')  # horizontal alignment can be left, right or center

    plt.show()
    plt.save()
    """
Ejemplo n.º 38
0
def build_diff_plot(obs, mag_name, ref_name, mag_err_name, ref_err_name, 
        plot_format='b.', show_stats=True, clipping=1, filename=None, title=None):
    """
    Plot the difference between reference magnitudes and observed magnitudes for a 
    given set of observations
    
    Parameters
    ----------
    obs: `astropy.table.Table`
        Catalog of observations to compare
    mag_name: str
        Name of the magniude column in ``obs`` to compare
    ref_name: str
        Name of the reference column in ``obs`` to compare
    mag_err_name: str
        Name of the magnitude error column
    ref_err_name: str
        Name of the reference error column
    plot_format: str
        Format for matplotlib plot points
    show_stats: str
        Whether or not to show the mean and standard deviation of the observations
    """
    import matplotlib
    import matplotlib.pyplot as plt
    import numpy as np
    
    diff = obs[mag_name]-obs[ref_name]
    # Remove outlier sources
    plot_obs = obs[np.sqrt((diff-np.mean(diff))**2) < clipping*np.std(diff)]
    # build plot
    x = plot_obs[mag_name]
    y = plot_obs[mag_name]-plot_obs[ref_name]
    err = np.sqrt(plot_obs[mag_err_name]**2+plot_obs[ref_err_name]**2)
    plt.errorbar(x, y, yerr=err, fmt=plot_format)
    plt.xlabel(mag_name)
    plt.ylabel('Diff from Std Sources')
    if title is not None:
        plt.title(title)
    # show stats
    if show_stats:
        logger.info('mean: {0}'.format(np.mean(y)))
        logger.info('std dev: {0}'.format(np.std(y)))
    # Save plot or plot to screen
    if filename is None:
        plt.show()
    else:
        plt.save(filename)
    plt.close()
    return y
Ejemplo n.º 39
0
def write_bias_graph(white, aa):
    white.hist(color='blue', alpha=.3)
    aa.Prediction.hist(color='orange', alpha=.3)
    handles = [
        Rectangle((0, 0), 1, 1, color=c, ec="k", alpha=.2)
        for c in ['blue', 'orange']
    ]
    labels = ["More White Words", "More AA Words"]
    plt.title(
        "Education Level Prediction Histogram For Tweets with Race-Word Distributions"
    )
    plt.legend(handles, labels)
    plt.xlabel("Education Level")
    plt.save("BiasDetectionHistogram.png")
Ejemplo n.º 40
0
def gammaTransform():
    img = cv2.imread('d:/img0.jpg')
    # 分通道计算每个通道的直方图
    hist_b = cv2.calcHist([img], [0], None, [256], [0, 256])
    hist_g = cv2.calcHist([img], [1], None, [256], [0, 256])
    hist_r = cv2.calcHist([img], [2], None, [256], [0, 256])
    # 执行Gamma矫正,小于1的值让暗部细节大量提升,同时亮部细节少量提升
    img_corrected = gamma_trans(img, 0.5)
    cv2.imwrite('gamma_corrected.jpg', img_corrected)

    # 分通道计算Gamma矫正后的直方图
    hist_b_corrected = cv2.calcHist([img_corrected], [0], None, [256],
                                    [0, 256])
    hist_g_corrected = cv2.calcHist([img_corrected], [1], None, [256],
                                    [0, 256])
    hist_r_corrected = cv2.calcHist([img_corrected], [2], None, [256],
                                    [0, 256])

    # 将直方图进行可视化
    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D

    fig = plt.figure()

    pix_hists = [[hist_b, hist_g, hist_r],
                 [hist_b_corrected, hist_g_corrected, hist_r_corrected]]

    pix_vals = range(256)
    for sub_plt, pix_hist in zip([121, 122], pix_hists):
        ax = fig.add_subplot(sub_plt, projection='3d')
        for c, z, channel_hist in zip(['b', 'g', 'r'], [20, 10, 0], pix_hist):
            cs = [c] * 256
            ax.bar(pix_vals,
                   channel_hist,
                   zs=z,
                   zdir='y',
                   color=cs,
                   alpha=0.618,
                   edgecolor='none',
                   lw=0)

        ax.set_xlabel('Pixel Values')
        ax.set_xlim([0, 256])
        ax.set_ylabel('Channels')
        ax.set_zlabel('Counts')

    plt.show()
    plt.save('plotcv.jpg')
Ejemplo n.º 41
0
def plot_quality_old(img, tissue, site, caption, cmap='red', title=None):
        import numpy as np
        import nibabel as nb
        import matplotlib.pyplot as plt
        from matplotlib import cm
        from matplotlib.colors import ListedColormap
        import matplotlib.gridspec as gridspec

        # grab img data and coords
        img_data = np.rot90(nb.load(img).get_data())
        midpoint = img_data.shape[2] * 0.5
        if site == 'HB':
            midpoint = midpoint - 30
        coords = [midpoint - 20, midpoint, midpoint + 20, midpoint + 30, midpoint + 50]

        # plot
        fig = plt.figure()
        fig.set_size_inches(50, 30)
        gs = gridspec.GridSpec(1, 5)

        for i, coord in enumerate(coords):
            if i in xrange(5):
                ax = plt.subplot(gs[0, i])

            ax.imshow(img_data[:, :, int(coord)], cm.bone)
            ax.axes.get_yaxis().set_visible(False)
            ax.axes.get_xaxis().set_visible(False)

            ax.set_xlim(15, 175)
            if site == 'HB':
                ax.set_ylim(230, 40)
            else:
                ax.set_ylim(220, 50)

            plt.subplots_adjust(wspace=0.01, hspace=0.01)
            # ax.set_aspect('equal')

            # grab tissue data
            if tissue:
                tissue_data = edge_detect_ero(np.rot90(nb.load(tissue).get_data()))
                tissue_data[tissue_data == 0] = np.nan
                ax.imshow(tissue_data[:, :, int(coord)], ListedColormap(cmap))

        plt.figtext(0.13, 0.625, caption, fontsize=50, color='r')

        if title:
            plt.save(title, bbox_inches='tight')
Ejemplo n.º 42
0
def plot_record_dict(record_dict, plot_fn=None):
    """
    Generates a plot of the given `record_dict`.

    Parameters
    ----------
    record_dict : dict
        See return value of `couscous.training.train_early_stopping`.
    plot_fn : str
        If provided, the plot is saved.
    """

    plt.figure()

    fields = sorted(record_dict.keys())

    def plot_field(field, i_loss=0):
        epochs = [i[0] for i in record_dict[field]]
        values = [i[1][i_loss] for i in record_dict[field]]
        plt.plot(epochs, values, label=field)

    # Determine the maximum number of outputs for any loss function
    n_subplots = 0
    loss_fields = []
    for field in [i for i in fields if "loss" in i]:
        loss_fields.append(field)
        n_subplots = max(n_subplots, len(record_dict[field][0][1]))

    # Subplot every loss function output seperately
    for i_subplot in xrange(n_subplots):
        plt.subplot(n_subplots, 1, i_subplot + 1)
        for field in loss_fields:
            if len(record_dict[field][0][1]) > i_subplot:
                plot_field(field, i_subplot)
        plt.legend()
        plt.grid()
        plt.ylabel("Loss [" + str(i_subplot) +  "]")

    if plot_fn is not None:
        plt.save(plot_fn)
Ejemplo n.º 43
0
def plot (x_plotName, x_axisLabels, x_legends, x_data):
	""" x_plotName: name of output file
		x_axisLabels: labels of axis
		x_legends: names of data files
		x_data: a multi-D array of data """

	assert(len(x_axisLabels) == len(x_legends))
	assert(len(x_data) == len(x_axisLabels))
	assert(x_plotName and x_plotName != "")

	indices = np.arange(len(x_axisLabels))
	width = 0.5/len(x_legends)       # the width of the bars

	fig = plt.figure()
	ax = fig.add_subplot(111)

	predefinedColors = ['r', 'g', 'b'] #FIXME
	graphs = []
	alpha = 0.5

	for i in range(len(x_legends)):
		rect = ax.bar(indices+i*width, x_data[i], width, color=predefinedColors[i], alpha=0.5)
		graphs.append(rect)
	# add some
	ax.set_ylabel('Count')
	ax.set_xticks(ind+width*len(x_legends)/2)
	ax.set_xticklabels( x_axisLabels )
	ax.legend( graphs, x_legends )

	def autolabel(rects):
		# attach some text labels
		for rect in rects:
			height = rect.get_height()
			ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height), ha='center', va='bottom')

	for gr in graphs:
		autolabel(gr)
	plt.save(x_plotName)
Ejemplo n.º 44
0
import numpy as np
fl=sys.argv[1]
dist=int(sys.argv[2])
from bx.bbi.bigwig_file import BigWigFile

genes=read.dat("/home/ssaberi/resources/list.genes.txt",'\t')
table=read.dat("/projects/epigenomics/MarcoJuliaPon/peaks.txt",'\t')
mygenes=read.dat("/projects/epigenomics/MarcoJuliaPon/mygenes.txt",'\t')
ens=[]
for i in mygenes:
	for gn in genes:
		if i in gn[0]:
			ens.append(gn[1])
			break

genespos=read.read_gene_pos('/home/ssaberi/resources/hg19v69_genes.TSS_2000.pc.A03480.H3K27me3.GE02.coverage')
genesbed=bedtools.makebed_genpos(ens,genespos,100000)
              


f = open(fl)
bw = BigWigFile(file=f)
mat=[]
for bed_i in genesbed:
   vals = bw.get( bed_i[0], bed_i[1], bed_i[2])
   mat.append(np.array(vals))
mat=np.array(mat)
plt.matshow(mat,aspect='auto',cmap='YlOrBr')
fl=fl[-fl[::-1].index('/'):-fl[::-1].index('.')]
plt.save(fl+".pdf")
Ejemplo n.º 45
0
import numpy as np
import matplotlib.pyplot as plt
from numpy import genfromtxt
from matplotlib import cm
from matplotlib.patches import Circle


#Load results from txt file
path = np.transpose(np.genfromtxt('shortest_path.txt', delimiter=','))
nodes = np.transpose(np.genfromtxt('shortest_path_nodes.txt', delimiter=','))

#Plot data
fig = plt.figure(figsize=(10,10))
ax = plt.gca()
ax.set_xlim([-1,12])
ax.set_ylim([-1,12])
#Solution from planner
plt.plot(path[0],path[1])
#Nodes of search tree
plt.scatter(nodes[0],nodes[1],s=0.5)

ax.add_patch(Circle([3.0, 2.0],2.0, facecolor="black",alpha=0.7,edgecolor="none"))
ax.add_patch(Circle([6.0, 8.0],2.0, facecolor="black",alpha=0.7,edgecolor="none"))
ax.add_patch(Circle([10.0, 10.0],0.5, facecolor="green",alpha=0.3,edgecolor="none"))
plt.show()
plt.save("shortest_path.png")

Ejemplo n.º 46
0
import numpy as np
from mypca import mypcp
from numpy.linalg import norm, svd, matrix_rank
import matplotlib
matplotlib.use('AGG')

from matplotlib import pyplot as plt


M = np.loadtxt('expression_SC_superfinal.tsv', skiprows=1,
               usecols=range(1, 2578))
print('Loaded Data with shape = %s' % str(M.shape))
L, S, _, _ = mypcp(M)

error = 100*(norm(M - L, 'fro')/norm(M, 'fro'))
plt.semilogy(svd(M, compute_uv=False), label='Data')
plt.semilogy(svd(M, compute_uv=False), label='Low Rank')
print('Error = %f' % error)
print('Rank Data = %d' % matrix_rank(M))
print('Rank L = %d' % matrix_rank(L))

plt.save('plot.png')
parser.add_argument("--field", type=str, default="rho_avg")
args = parser.parse_args()

# If files have not been specified, use all files named OUT* in the current directory
if len(args.files) == 0:
    args.files = glob.glob("ZPROF*")

# Construct the Profile data object
profile_data = Profiles(sorted(args.files))

fig, axis = plt.subplots(1, 1, figsize=(10, 6))

if args.field == "rho_avg":
    # Construct the density field from the parameters of the system
    rho = (-profile_data.parameters["B_therm"] / profile_data.parameters["D_visc"] * profile_data["Temp_avg"]
          + profile_data.parameters["B_comp"] / profile_data.parameters["D_visc"] * profile_data["Chem_avg"]
          + (-profile_data.parameters["S_therm"] + profile_data.parameters["S_comp"]) * profile_data["z1"])
else:
    rho = profile_data[args.field]

# Plot faded history of the density profile
for i, frame in enumerate(profile_data):
    axis.plot(rho[i], frame["z1"], alpha = 0.1, color="black")

# Plot the most recent density profile in red
axis.plot(rho[-1], profile_data[-1]["z1"], color="red")
axis.set_ylabel("z")
axis.set_xlabel("rho")

plt.save()
# Create linear model
X = sm.add_constant(x)
model = sm.OLS(y,X)
f = model.fit()
# results summary
f.summary()

coeff = f.params

# plot:
line=[]
line2 = []
for j in fico:
	line.append(coeff[0] + coeff[1]*j + coeff[2]*10000)
	line2.append(coeff[0] + coeff[1]*j + coeff[2]*30000)

plt.close()
plt.scatter(fico,intrate)
plt.hold(True)
plt.plot(fico, line, label = '$10,000 Requested', color = 'blue')
plt.plot(fico, line2, label = '$30,000 Requested', color = 'green')
plt.legend(loc = 'upper right')
plt.ylabel('Interest Rate in %')
plt.xlabel('FICO Score')
plt.save('Fico_Scatter_10000&30000.png')

# Load to new CSV file
loansData.to_csv('loansData_clean.csv', header=True, index=False)

  
  
Ejemplo n.º 49
0
test_data = gl.SFrame.read_csv('test_data.csv')
model = gl.load_model('74per_model')
with open('class_map.pickle', 'rb') as fhand:
    class_map = pickle.load(fhand)

inv_map = {v: k for k, v in class_map.items()}

pred = model.predict_topk(validation_data, k=1)

table = gl.SFrame({'true': test_data['category'], 'predicted': pred['class']})
table = table.to_dataframe()

table['target_label'] = table['target'].apply(lambda x: inv_map[x])
table['predicted_label'] = table['predicted'].apply(lambda x: inv_map[x])
labels = sorted(class_map.keys())

cm = confusion_matrix(table['target_label'], table['predicted_label'], labels)
cm_normalized = np.round(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], 2)

fig = plt.figure()
f, ax = plt.subplots(1, 1, figsize=(8, 8))
cax = ax.matshow(cm_normalized)

plt.title('Confusion matrix of the classifier')
f.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.save('confusion_matrix.png')
Ejemplo n.º 50
0
from matplotlib import pyplot

filename= 'time_collision.dat'
lines = open(filename).readlines()
x = [int(line.strip()) for line in lines]
bins = [i*1000 for i in range(10)]

pyplot.hist(x,bins=bins, facecolor='green', alpha=0.75)
pyplot.grid(True)

pyplot.save( 'tempi_collisione.png')

Ejemplo n.º 51
0
def overlap_skeletons(image, big_skel, norm_skel, aplpy_plot=True,
                      save_figure=True, save_name="skeletons",
                      output_file_type="png", rasterized=True,
                      vmin=None, vmax=None):
    '''
    Make a nice aplpy plot of the different skeletons. The original image
    should be passed, and it will be expanded to match the dimensions of the
    regridded skeleton.
    '''

    # Load files in

    image, hdr = fits.getdata(image, header=True)
    image[np.isnan(image)] = 0.0

    norm_skel = fits.getdata(norm_skel)
    norm_skel = (norm_skel > 0).astype(int)

    big_skel = fits.getdata(big_skel)
    big_skel = (big_skel > 0).astype(int)

    big_skel_hdu = fits.PrimaryHDU(big_skel, header=hdr)

    # The original image and the normal skeleton should have the same
    # dimensions.
    assert image.shape == norm_skel.shape

    image = zoom(image,
                 [i/float(j) for j, i in zip(image.shape, big_skel.shape)])

    assert image.shape == big_skel.shape

    hdr['NAXIS1'] = image.shape[1]
    hdr['NAXIS2'] = image.shape[0]

    image_hdu = fits.PrimaryHDU(image, header=hdr)

    norm_skel_zoom = \
        zoom(norm_skel,
             [i/float(j) for j, i in zip(norm_skel.shape, big_skel.shape)],
             order=0)

    assert norm_skel_zoom.shape == big_skel.shape

    norm_skel_hdu = fits.PrimaryHDU(norm_skel_zoom, header=hdr)

    if aplpy_plot:

        try:
            import aplpy
        except ImportError:
            ImportError("Cannot import aplpy. Do not enable aplpy.")

        fig = aplpy.FITSFigure(image_hdu)

        fig.show_grayscale(invert=True, stretch="arcsinh", vmin=vmin,
                           vmax=vmax)

        fig.tick_labels.set_xformat('hh:mm')
        fig.tick_labels.set_yformat('dd:mm')

        fig.tick_labels.set_font(size='large', weight='medium',
                                 stretch='normal', family='sans-serif',
                                 style='normal', variant='normal')

        fig.axis_labels.set_font(size='large', weight='medium',
                                 stretch='normal', family='sans-serif',
                                 style='normal', variant='normal')

        # fig.add_grid()

        # NOTE! - rasterization will only work with my fork of aplpy!
        # [email protected]:e-koch/aplpy.git on branch 'rasterize_contours'
        fig.show_contour(norm_skel_hdu, colors="red", linewidths=1.5,
                         rasterize=True)

        fig.show_contour(big_skel_hdu, colors="blue", rasterize=True)

        fig.show_colorbar()
        fig.colorbar.set_label_properties(size='large', weight='medium',
                                          stretch='normal',
                                          family='sans-serif',
                                          style='normal', variant='normal')
        fig.colorbar.set_axis_label_text('Surface Brightness (MJy/sr)')
        fig.colorbar.set_axis_label_font(size='large', weight='medium',
                                         stretch='normal',
                                         family='sans-serif',
                                         style='normal', variant='normal')

        if save_figure:
            fig.save(save_name+"."+output_file_type)
            fig.close()

    else:
        # Using matplotlib
        from astropy.visualization import scale_image

        scaled_image = scale_image(image, scale='asinh', asinh_a=0.005)

        p.imshow(scaled_image, interpolation='nearest', origin='lower',
                 cmap='binary')

        p.contour(norm_skel_zoom, colors='r', linewidths=2)
        p.contour(big_skel, colors='b', linewidths=1)

        if save_figure:
            p.save(save_name+"."+output_file_type, rasterized=rasterized)
            p.close()
        else:
            p.show(block=True)
Ejemplo n.º 52
0
plt.plot(X_coord, LL_5, label='d=5')
plt.plot(X_coord, LL_10, label='d=10')
plt.plot(X_coord, LL_20, label='d=20')
plt.plot(X_coord, LL_30, label='d=30')
plt.xlabel('Iteration')
plt.ylabel('Joint Log Likelihood')
plt.title('Joint Log Likelihood MAP convergence by d')
plt.legend(loc=5)
plt.savefig('JLL.png')
plt.show()

plt.plot(X_gibbs, LL_gibbs)
plt.xlabel('Iteration')
plt.ylabel('Joint Log Likelihood')
plt.title('Joint Log Likelihood using Gibbs')
plt.save('JLL_Gibbs.png')
plt.show()

U = U10
V = V10

f = open(base_dir+'/movies.txt')
i = 0
movies = dict()
for line in f:
    movie_name = line.split('\n')[0]
    movies[i] = movie_name
    i += 1

import pandas as pd
import random