Пример #1
0
def valueChartList(inputlist,path):
    seen_values = Counter()

    for dict in inputlist:
        seen_values += Counter(dict['location-value-pair'].values())

    seen_values = seen_values.most_common()[:25]
    seen_values_pct = map(itemgetter(1), tupleCounts2Percents(seen_values))
    seen_values_pct = ['{:.1%}'.format(item)for item in seen_values_pct]

    plt.figure()
    numberchart = plt.bar(range(len(seen_values)), map(itemgetter(1), seen_values), width=0.9,alpha=0.6)
    plt.xticks(range(len(seen_values)), map(itemgetter(0), seen_values),ha='left')

    plt.ylabel('Occurrences')

    plot_margin = 1.15
    x0, x1, y0, y1 = plt.axis()
    plt.axis((x0,
              x1,
              y0,
              y1*plot_margin))

    plt.tick_params(axis='both', which='major', labelsize=8)
    plt.tick_params(axis='both', which='minor', labelsize=8)
    plt.tight_layout()

    autolabel(numberchart,seen_values_pct)

    plt.savefig(path)
    plt.clf()
Пример #2
0
def scree_plot(pca_obj, fname=None): 
    '''
    Scree plot for variance & cumulative variance by component from PCA. 

    Arguments: 
        - pca_obj: a fitted sklearn PCA instance
        - fname: path to write plot to file

    Output: 
        - scree plot 
    '''   
    components = pca_obj.n_components_ 
    variance = pca.explained_variance_ratio_
    plt.figure()
    plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance')
    plt.plot(np.arange(1, components + 1), variance, label='Variance')
    plt.xlim([0.8, components]); plt.ylim([0.0, 1.01])
    plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11)
    plt.legend(loc='best') 
    plt.tight_layout() 
    if fname is not None:
        plt.savefig(fname)
        plt.close() 
    else:
        plt.show() 
    return 
Пример #3
0
def predicted_probabilities(y_true, y_pred, n_groups=30):
    """Plots the distribution of predicted probabilities.

    Parameters
    ----------
    y_true : array_like
        Observed labels, either 0 or 1.
    y_pred : array_like
        Predicted probabilities, floats on [0, 1].
    n_groups : int, optional
        The number of groups to create. The default value is 30.

    Notes
    -----
    .. plot:: pyplots/predicted_probabilities.py
    """
    plt.hist(y_pred, n_groups)
    plt.xlim([0, 1])
    plt.xlabel('Predicted Probability')
    plt.ylabel('Count')

    title = 'Distribution of Predicted Probabilities (n = {})'
    plt.title(title.format(len(y_pred)))

    plt.tight_layout()
Пример #4
0
def chartProperties(counter,path):

    seen_properties = sorted(counter, key=lambda x: x[1],reverse=True)
    seen_values_pct = map(itemgetter(1), tupleCounts2Percents(seen_properties))
    seen_values_pct = ['{:.1%}'.format(item)for item in seen_values_pct]

    plt.figure()

    numberchart = plt.bar(range(len(seen_properties)), map(itemgetter(1), seen_properties), width=0.9,alpha=0.6)
    plt.xticks(range(len(seen_properties)), map(itemgetter(0), seen_properties),rotation=90,ha='left')

    plt.ylabel('Occurrences')

    plot_margin = 1.15
    x0, x1, y0, y1 = plt.axis()
    plt.axis((x0,
              x1,
              y0,
              y1*plot_margin))

    plt.tick_params(axis='both', which='major', labelsize=8)
    plt.tick_params(axis='both', which='minor', labelsize=8)
    plt.tight_layout()

    autolabel(numberchart,seen_values_pct)

    plt.savefig(path)
    plt.clf()
Пример #5
0
def vis_detections (im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor='red', linewidth=3.5)
        )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(class_name, score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

    ax.set_title(('{} detections with '
                  'p({} | box) >= {:.1f}').format(class_name, class_name,
                                                  thresh),
                 fontsize=14)
    plt.axis('off')
    plt.tight_layout()
    plt.draw()
Пример #6
0
def plot_wav_fft(wav_filename, desc=None):
    plt.clf()
    plt.figure(num=None, figsize=(6, 4))
    sample_rate, X = scipy.io.wavfile.read(wav_filename)
    spectrum = np.fft.fft(X)
    freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)

    plt.subplot(211)
    num_samples = 200.0
    plt.xlim(0, num_samples / sample_rate)
    plt.xlabel("time [s]")
    plt.title(desc or wav_filename)
    plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
    plt.grid(True)

    plt.subplot(212)
    plt.xlim(0, 5000)
    plt.xlabel("frequency [Hz]")
    plt.xticks(np.arange(5) * 1000)
    if desc:
        desc = desc.strip()
        fft_desc = desc[0].lower() + desc[1:]
    else:
        fft_desc = wav_filename
    plt.title("FFT of %s" % fft_desc)
    plt.plot(freq, abs(spectrum), linewidth=5)
    plt.grid(True)

    plt.tight_layout()

    rel_filename = os.path.split(wav_filename)[1]
    plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
                bbox_inches='tight')
Пример #7
0
def influence_plot(X, y_true, y_pred, **kwargs):
    """Produces an influence plot.

    Parameters
    ----------
    X : array
        Design matrix.
    y_true : array_like
        Observed labels, either 0 or 1.
    y_pred : array_like
        Predicted probabilities, floats on [0, 1].

    Notes
    -----
    .. plot:: pyplots/influence_plot.py
    """
    r = pearson_residuals(y_true, y_pred)
    leverages = pregibon_leverages(X, y_pred)

    delta_X2 = case_deltas(r, leverages)
    dbetas = pregibon_dbetas(r, leverages)

    plt.scatter(y_pred, delta_X2, s=dbetas * 800, **kwargs)

    __, __, y1, y2 = plt.axis()
    plt.axis((0, 1, y1, y2))

    plt.xlabel('Predicted Probability')
    plt.ylabel(r'$\Delta \chi^2$')

    plt.tight_layout()
Пример #8
0
def make_fish(zoom=False):
    plt.close(1)
    plt.figure(1, figsize=(6, 4))
    plt.plot(plot_limits['pitch'], plot_limits['rolldev'], '-g', lw=3)
    plt.plot(plot_limits['pitch'], -plot_limits['rolldev'], '-g', lw=3)
    plt.plot(pitch.midvals, roll.midvals, '.b', ms=1, alpha=0.7)

    p, r = make_ellipse()  # pitch, off nominal roll
    plt.plot(p, r, '-c', lw=2)

    gf = -0.08  # Fudge on pitch value for illustrative purposes
    plt.plot(greta['pitch'] + gf, -greta['roll'], '.r', ms=1, alpha=0.7)
    plt.plot(greta['pitch'][-1] + gf, -greta['roll'][-1], 'xr', ms=10, mew=2)

    if zoom:
        plt.xlim(46.3, 56.1)
        plt.ylim(4.1, 7.3)
    else:
        plt.ylim(-22, 22)
        plt.xlim(40, 180)
    plt.xlabel('Sun pitch angle (deg)')
    plt.ylabel('Sun off-nominal roll angle (deg)')
    plt.title('Mission off-nominal roll vs. pitch (5 minute samples)')
    plt.grid()
    plt.tight_layout()
    plt.savefig('fish{}.png'.format('_zoom' if zoom else ''))
def plot_dpi_dpr_distribution(args, dpis, dprs, diagnoses):
    print log.INFO, 'Plotting estimate distributions...'
    diagnoses = np.array(diagnoses)
    diagnoses[(0.25 <= diagnoses) & (diagnoses <= 0.75)] = 0.5

    # Setup plot
    fig, ax = plt.subplots()
    pt.setup_axes(plt, ax)

    biomarkers_str = args.method if args.biomarkers is None else ', '.join(args.biomarkers)
    ax.set_title('DP estimation using {0} at {1}'.format(biomarkers_str, ', '.join(args.visits)))
    ax.set_xlabel('DP')
    ax.set_ylabel('DPR')

    plt.scatter(dpis, dprs, c=diagnoses, edgecolor='none', s=25.0,
                vmin=0.0, vmax=1.0, cmap=pt.progression_cmap,
                alpha=0.5)

    # Plot legend
    # noinspection PyUnresolvedReferences
    rects = [mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_cn + (0.5,), linewidth=0),
             mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_mci + (0.5,), linewidth=0),
             mpl.patches.Rectangle((0, 0), 1, 1, fc=pt.color_ad + (0.5,), linewidth=0)]
    labels = ['CN', 'MCI', 'AD']
    legend = ax.legend(rects, labels, fontsize=10, ncol=len(rects), loc='upper center', framealpha=0.9)
    legend.get_frame().set_edgecolor((0.6, 0.6, 0.6))

    # Draw or save the plot
    plt.tight_layout()
    if args.plot_file is not None:
        plt.savefig(args.plot_file, transparent=True)
    else:
        plt.show()
    plt.close(fig)
Пример #10
0
def saveani(mesh, plc, data, label, out, cMin=None, cMax=None, logScale=False, cmap=None):
    """
    """
    dpi = 92
    scale = 1
    fig = plt.figure(facecolor="white", figsize=(scale * 800 / dpi, scale * 490 / dpi), dpi=dpi)
    ax = fig.add_subplot(1, 1, 1)

    gci = pg.mplviewer.drawModel(ax, mesh, data=data[0], cMin=cMin, cMax=cMax, cmap=cmap, logScale=logScale)

    cbar = pg.mplviewer.createColorbar(gci, label=label, pad=0.55)
    ax.set_ylabel("Depth [m]")
    ax.set_xlabel("$x$ [m]")

    ticks = ax.yaxis.get_majorticklocs()
    tickLabels = []
    for t in ticks:
        tickLabels.append(str(int(abs(t))))

    ax.set_yticklabels(tickLabels)

    pg.show(plc, axes=ax)

    plt.tight_layout()
    plt.pause(0.001)

    def animate(i):
        print(out + ": Frame:", i, "/", len(data))
        pg.mplviewer.setMappableData(gci, pg.abs(data[i]), cMin=cMin, cMax=cMax, logScale=logScale)
        # plt.pause(0.001)

    createAnimation(fig, animate, int(len(data)), dpi, out)
Пример #11
0
 def plotall(self):
     real = self.z_data_raw.real
     imag = self.z_data_raw.imag
     real2 = self.z_data_sim.real
     imag2 = self.z_data_sim.imag
     fig = plt.figure(figsize=(15,5))
     fig.canvas.set_window_title("Resonator fit")
     plt.subplot(131)
     plt.plot(real,imag,label='rawdata')
     plt.plot(real2,imag2,label='fit')
     plt.xlabel('Re(S21)')
     plt.ylabel('Im(S21)')
     plt.legend()
     plt.subplot(132)
     plt.plot(self.f_data*1e-9,np.absolute(self.z_data_raw),label='rawdata')
     plt.plot(self.f_data*1e-9,np.absolute(self.z_data_sim),label='fit')
     plt.xlabel('f (GHz)')
     plt.ylabel('Amplitude')
     plt.legend()
     plt.subplot(133)
     plt.plot(self.f_data*1e-9,np.unwrap(np.angle(self.z_data_raw)),label='rawdata')
     plt.plot(self.f_data*1e-9,np.unwrap(np.angle(self.z_data_sim)),label='fit')
     plt.xlabel('f (GHz)')
     plt.ylabel('Phase')
     plt.legend()
     # plt.gcf().set_size_inches(15,5)
     plt.tight_layout()
     plt.show()
def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)

    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, cm[i, j],
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
Пример #13
0
 def peek(self, figsize=(15, 5)):
     """Quick-look summary plot."""
     import matplotlib.pyplot as plt
     fig, axes = plt.subplots(nrows=1, ncols=2, figsize=figsize)
     self.plot_bias(ax=axes[0])
     self.plot_matrix(ax=axes[1])
     plt.tight_layout()
Пример #14
0
def plot_flux(f, q_left, q_right, plot_zero=True):
    qvals = np.linspace(q_right, q_left, 200)
    fvals = f(qvals)
    dfdq = np.diff(fvals) / (qvals[1]-qvals[0])  # approximate df/dq
    qmid = 0.5*(qvals[:-1] + qvals[1:])   # midpoints for plotting dfdq

    #plt.figure(figsize=(12,4))
    plt.subplot(131)
    plt.plot(qvals,fvals)
    plt.xlabel('q')
    plt.ylabel('f(q)')
    plt.title('flux function f(q)')

    plt.subplot(132)
    plt.plot(qmid, dfdq)
    plt.xlabel('q')
    plt.ylabel('df/dq')
    plt.title('characteristic speed df/dq')

    plt.subplot(133)
    plt.plot(dfdq, qmid)
    plt.xlabel('df/dq')
    plt.ylabel('q')
    plt.title('q vs. df/dq')
    if plot_zero:
        plt.plot([0,0],[qmid.min(), qmid.max()],'k--')

    plt.subplots_adjust(left=0.)
    plt.tight_layout()
Пример #15
0
def plothist():

	n_groups = 3

	means_men = (42.3113658071, 39.7803247373, 67.335243553)
	std_men = (1, 2, 3)

	fig, ax = plt.subplots()

	index = np.array([0.5,1.5,2.5])
	bar_width = 0.4

	opacity = 0.4
	error_config = {'ecolor': '0'}

	rects1 = plt.bar(index, means_men, bar_width,
	                 alpha=opacity,
	                 color='b',
	                 error_kw=error_config)

	plt.xlabel('Approach')
	plt.ylabel('Accuracy')
	plt.axis((0,3.4,0,100))
	plt.title('Evaluation')
	plt.xticks(index + bar_width/2, ('Bing Liu', 'AFINN', 'SentiWordNet'))
	plt.legend()

	plt.tight_layout()
	# plt.show()
	plt.savefig('foo.png')
def plot_all():

    font = {'family': 'serif',
            'weight': 'normal',
            'size': 12,
            }

    plt.rc('font', **font)

    data = DataReader('dyn_spin.txt')
    ts = data['time']

    fig = plt.figure(figsize=(5, 4))

    mx = data['m_x']
    my = data['m_y']
    mz = data['m_z']
    plt.plot(ts, mx, '-', label='mx', color='b')
    plt.plot(ts, my, '-', label='my', color='g')
    plt.plot(ts[::6], mz[::6],'.-', label='mz',  color='r')

    plt.legend(bbox_to_anchor=[0.8, 0.8], shadow=True, frameon=True)
    #plt.xlim([0, 1.01])

    plt.legend()

    plt.xlabel('Time')
    plt.ylabel('m')
    plt.tight_layout()
    fig.savefig('m_ts.pdf')
Пример #17
0
def example(show=True, save=False):

    # Settings:
    t0 = 0.
    dt = .0001
    dv = .0001
    tf = .1
    verbose = True
    update_method = 'approx'
    approx_order = 1
    tol = 1e-14
    
    # Run simulation:
    simulation = get_simulation(dv=dv, verbose=verbose, update_method=update_method, approx_order=approx_order, tol=tol)
    simulation.run(dt=dt, tf=tf, t0=t0)
    
    # Visualize:
    i1 = simulation.population_list[1]
    plt.figure(figsize=(3,3))
    plt.plot(i1.t_record, i1.firing_rate_record)
    plt.xlim([0,tf])
    plt.ylim(ymin=0)
    plt.xlabel('Time (s)')
    plt.ylabel('Firing Rate (Hz)')
    plt.tight_layout()
    if save == True: plt.savefig('./excitatory_inhibitory.png')
    if show == True: plt.show()
    
    return i1.t_record, i1.firing_rate_record
Пример #18
0
def execute_solver(IMAGE_FILE):
    sample4x4_crop = import_image(IMAGE_FILE)
    cluster_image = get_clustering_image(sample4x4_crop)
    cluster_groupings_dict = cluster_grouper(cluster_image).execute()
    final = pre_process_image(IMAGE_FILE)
    prediction_dict = clean_prediction_dict(get_predictions(final))
    write_puzzle_file(cluster_groupings_dict,prediction_dict)
    try:
        solution = solve_puzzle('cv_puzzle.txt',False)
    except:
        return 'error'

    #get image of result
    fig = plt.figure(figsize=(2, 2), dpi=100,frameon=False)
    plt.axis('off')
    plt.imshow(sample4x4_crop, cmap=mpl.cm.Greys_r)
    for k,v in solution.items():
        if v == None:
            return 'error'
        plt.annotate('{}'.format(v), xy=(k[0]*50+12,k[1]*50+40), fontsize=14)
    plt.tight_layout()
    plt.savefig('static/images/solution.jpg', bbox_inches='tight', dpi=100)

    #theres an issue with the saved layout, tight_layout
    #doesn't appear to work so I need to apply my own cropping again
    resize_final = import_image('static/images/solution.jpg',80)
    imsave('static/images/solution.jpg',resize_final)
    return 'good'
Пример #19
0
	def set_defaults(d):

		figsize=[12,15]
		normal = 24
		small = 12
		tiny = 8
		line = 1.5
	
		params={'axes.labelsize': normal,
		    'axes.linewidth': line,
			'lines.markeredgewidth': line,
			'font.size': normal,
			'legend.fontsize': normal,
			'xtick.labelsize': normal,
			'ytick.labelsize': normal,
			'xtick.major.size': small,
			'xtick.minor.size': tiny,
			'ytick.major.size': small,
			'ytick.minor.size': tiny,
			'savefig.dpi':300,
			'text.usetex': True,
			'figure.figsize': figsize,
		    	}

		plt.rc('font',**{'family':'serif','serif':['Times']})
		plt.rc('lines', lw=line)
		plt.rc('axes', linewidth=line)
		plt.tight_layout(pad=0.1)

		pylab.rcParams.update(params)
Пример #20
0
def LEpdf(xydata):
	""" Plot pdf 
	NEEDS WORK
	"""
	
	## Read eta (yy), xHO (x1) points from file
	yy,x1 = np.loadtxt(xydata,delimiter=" ",skiprows=1)[:,1:3].T
	del xydata

	## Construct a (normed) histogram of the data
	nbins = [100,100]
	H,xedges,yedges = np.histogram2d(x1,yy,bins=nbins,normed=True)
	xpos = xedges[1:]-xedges[0]; ypos = yedges[1:]-yedges[0]

	## Plot pdf
	H = gaussian_filter(H, 3)	## Convolve with Gaussian
	fig, (ax1, ax2, ax3) = plt.subplots(nrows=3)
	ax1.imshow(H, interpolation='nearest', origin='low')#,extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
	ax1.set_xlabel("$x_{HO}$");ax1.set_ylabel("$\eta$")
	ax2.contour(xpos,ypos,H,10)
	ax2.set_xlabel("$x_{HO}$");ax2.set_ylabel("$\eta$")
	ax3.hist2d(x1,yy, bins=100, normed=True)
	ax3.set_xlabel("$x$");ax3.set_ylabel("$\eta$")
	plt.tight_layout()

	plt.show()
	return
def stellar_mass_vs_halo_mass(G_MR, ThisRedshiftList, pdf):

    ylim=[4.0,12.5]
    xlim=[7.5, 15.]
  
    plt.rcParams.update({'xtick.major.width': 1.0, 'ytick.major.width': 1.0, 
                         'xtick.minor.width': 1.0, 'ytick.minor.width': 1.0})    
    fig = plt.figure(figsize=(10,10))
    subplot=plt.subplot()
    subplot.set_ylim(ylim), subplot.set_xlim(xlim) 

    ylab='$\mathrm{log_{10}}(M_*[h^{-2}M_{\odot}])$'       
    xlab='$\mathrm{log_{10}}(M_{200c}[h^{-2}M_{\odot}])$'
    subplot.set_xlabel(xlab, fontsize=16), subplot.set_ylabel(ylab, fontsize=16)
                  
    for ii in range (0,len(ThisRedshiftList)):
  
        #MODEL
        (sel)=select_current_redshift(G_MR, ThisRedshiftList, ii)
        
        G0_MR=G_MR[sel]   
        G0_MR=G0_MR[G0_MR['StellarMass']>0.]
        StellarMass=stellar_mass_with_err(G0_MR, Hubble_h, ThisRedshiftList[ii])
        HaloMass=np.log10(G0_MR['Mvir']*1.e10*Hubble_h)
        subplot.scatter(HaloMass,StellarMass,s=5, color='black')
    #endfor
    
    
    plt.tight_layout()
    plt.savefig('./fig/plots_smhm.pdf')
    pdf.savefig()
    plt.close()
Пример #22
0
def plot_raw_data(ratings):
    """plot the statistics result on raw rating data."""
    # do statistics.
    num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()
    num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()
    sorted_num_movies_per_user = np.sort(num_items_per_user)[::-1]
    sorted_num_users_per_movie = np.sort(num_users_per_item)[::-1]

    # plot
    fig = plt.figure()
    ax1 = fig.add_subplot(1, 2, 1)
    ax1.plot(sorted_num_movies_per_user, color='blue')
    ax1.set_xlabel("users")
    ax1.set_ylabel("number of ratings (sorted)")
    ax1.grid()

    ax2 = fig.add_subplot(1, 2, 2)
    ax2.plot(sorted_num_users_per_movie)
    ax2.set_xlabel("items")
    ax2.set_ylabel("number of ratings (sorted)")
    ax2.set_xticks(np.arange(0, 2000, 300))
    ax2.grid()

    plt.tight_layout()
    plt.savefig("stat_ratings")
    plt.show()
    # plt.close()
    return num_items_per_user, num_users_per_item
Пример #23
0
def plot_errsh():


    results = Control_results;

    fig, ax = plt.subplots()


    #results
    rects_train = plt.barh(ind,results['train_errs'], width,
                    color = 'b',
                    alpha = opacity,
                    xerr =results['train_errs_std']/np.sqrt(10),
                    label = '$train$');
    rects_test = plt.barh(ind+width,results['test_errs'], width,
                    color = 'r',
                    alpha = opacity,
                    xerr =results['test_errs_std']/np.sqrt(10),
                    label = 'test');

    
    plt.ylabel('Performance (Error)');
    plt.title('Error (MSE)')
    plt.yticks(ind+width, Datasets);
    plt.legend();

    #plot and save
    plt.tight_layout();
    plt.savefig('errs'+'.png');
    plt.show();
Пример #24
0
def plot_err_comp():


    results = Control_results;

    fig, ax = plt.subplots()


    #results
    rects_train = plt.bar(ind,results['train_errs'], width,
                    color = 'b',
                    alpha = opacity,
                    yerr =results['train_errs_std'],
                    label = 'train');
    rects_test = plt.bar(ind+width,results['test_errs'], width,
                    color = 'r',
                    alpha = opacity,
                    yerr =results['test_errs_std'],
                    label = 'test');

    plt.xlabel('Datasets');
    plt.ylabel('Error(MSE)');
    plt.title('Performance (Error)')
    plt.xticks(ind+width, Datasets);
    plt.legend();

    #plot and save
    plt.tight_layout();
    plt.savefig('errs_comparison'+'.png');
    plt.show();
Пример #25
0
def plot_errs():


    results = Control_results;

    fig, ax = plt.subplots()


    #results
##    rects_train = plt.bar(ind,results['train_errs'], width,
##                    color = 'b',
##                    alpha = opacity,
##                    yerr =results['train_errs_std']/np.sqrt(10),
##                    label = 'train');
    rects_test = plt.boxplot(results['test_errs'],                    
                    labels =Datasets);

    
    plt.ylabel('$Error(MSE)$');
    plt.title('$Performance (Error) - With\ Injections$');
    plt.xticks(ind+width, Datasets);
##    plt.legend();

    #plot and save
    plt.tight_layout();
    plt.savefig('errs_with_inject'+'.png');
    plt.show();
Пример #26
0
def bar_plot(hist_mod, tool, paths, save_to=None, figsize=(10, 10), fontsize=6):
    """
    Plots bar plot for selected tracks:

    :param figsize: Plot figure size
    :param save_to: Object for plots saving
    :param fontsize: Size of xlabels on plot
    """
    ind = np.arange(len(paths))

    result = []
    for path in paths:
        result.append((donor(path), Bed(path).count()))

    result = sorted(result, key=donor_order_id)
    result_columns = list(zip(*result))

    plt.figure(figsize=figsize)
    width = 0.35
    plt.bar(ind, result_columns[1], width, color='black')
    plt.ylabel('Peaks count', fontsize=fontsize)
    plt.xticks(ind, result_columns[0], rotation=90, fontsize=fontsize)
    plt.title(hist_mod + " " + tool, fontsize=fontsize)
    plt.tight_layout()
    save_plot(save_to)
Пример #27
0
    def plot_main_seeds(self, qname, radio=False, checkbox=False, 
                        numerical=False, array=False):
        """ Plot the responses separately for each seed group in main_seeds. """
        
        assert sum([radio, checkbox, numerical, array]) == 1

        for seed in self.main_seeds:
            responses_seed = self.filter_rows_by_seed(seed, self.responses)
            responses_seed_question = self.filter_columns_by_name(qname, responses_seed)

            plt.subplot(int("22" + str(self.main_seeds.index(seed))))
            plt.title("Seed " + seed)

            if radio:
                self.plot_convergence_radio(qname, responses_seed_question)
            elif checkbox:
                self.plot_convergence_checkbox(responses_seed_question)
            elif numerical:
                self.plot_convergence_numerical(responses_seed_question)
            elif array:
                self.plot_array_boxes(qname, responses_seed_question)

        qtext = self.get_qtext_from_qname(qname)
        plt.suptitle(qtext)
        plt.tight_layout()
        plt.show()
Пример #28
0
def dend(X, notitles=False, metric="euclidean"):
    """Takes BoWs array creates linkage and dendrogram.

    Args
    ----
    X: ndarray
        BoWs array
    metric: String
        Distance metric to use (default: "euclidean")

    Returns
    -------
    Z: ndarray
        Linkage array
    dend: dict
        dendrogram as a leaf and branch tree.
    """

    Z = linkage(X, metric=metric)

    plt.clf()
    den = dendrogram(Z, labels=abbrev, orientation="left")
    plt.title("Dendrogram of Antiquity Texts")
    plt.xlabel("Distance between items")
    plt.tight_layout()
    if notitles:
        name = "Dendrogram_notitles_{}.pdf".format(metric)
    else:
        name = "Dendrogram_{}.pdf".format(metric)
    plt.savefig(name)

    return Z, den
Пример #29
0
def length_bar_plots(tracks_paths, min_power, max_power, threads_num, save_to=None):
    """
    Plots bar plot for each track - peaks count via peaks lengths:

    :param tracks_paths: List of absolute track paths
    :param min_power: used for left border of bar plot as a power for 10
    :param max_power: used for right border of bar plot as a power for 10
    :param threads_num: Threads number for parallel execution
    :param save_to: Object for plots saving
    """
    pool = multiprocessing.Pool(processes=threads_num)
    bins = np.logspace(min_power, max_power, 80)
    ordered_paths, ordered_lengths, track_max_bar_height = zip(*pool.map(functools.partial(
        _calculate_lengths, bins=bins), tracks_paths))
    max_bar_height = max(track_max_bar_height)
    lengths = dict(zip(ordered_paths, ordered_lengths))

    plt.figure()
    for i, track_path in enumerate(tracks_paths):
        ax = plt.subplot(330 + i % 9 + 1)
        ax.hist(lengths[track_path], bins, histtype='bar')
        ax.set_xscale('log')
        ax.set_xlabel('Peaks length')
        ax.set_ylabel('Peaks count')
        ax.set_ylim([0, max_bar_height])
        ax.set_title(donor(track_path) if is_od_or_yd(track_path) else Path(track_path).name)

        if i % 9 == 8:
            plt.tight_layout()
            save_plot(save_to)
            plt.figure()
    plt.tight_layout()
    save_plot(save_to)
Пример #30
0
def dist_small_multiples(df, figsize=(20, 20)):
    """
    Small multiples plots of the distribution of a dataframe's variables.
    """
    import math

    sns.set_style("white")

    num_plots = len(df.columns)
    n = int(math.ceil(math.sqrt(num_plots)))

    fig = plt.figure(figsize=figsize)
    axes = [plt.subplot(n, n, i) for i in range(1, num_plots + 1)]

    i = 0
    for k, v in df.iteritems():
        ax = axes[i]
        sns.kdeplot(v, shade=True, ax=ax, legend=False)
        sns.rugplot(v, ax=ax, c=sns.color_palette("husl", 3)[0])
        [label.set_visible(False) for label in ax.get_yticklabels()]
        ax.xaxis.set_ticks([v.min(), v.max()])
        ax.set_title(k)
        i += 1
    sns.despine(left=True, trim=True, fig=fig)
    plt.tight_layout()
    return fig, axes
Пример #31
0
def main(weather_data_dir, katkam_dir):

    weather_files = os.listdir(weather_data_dir)
    df = pd.concat((pd.read_csv(os.path.join(weather_data_dir, f), header=14, parse_dates=['Date/Time']) for f in weather_files))
    df = df[df['Weather'].notnull()]
    weather_df = df[['Date/Time', 'Time', 'Weather']].copy()
    images = katkam_dir + '/*.*'
    x_images = io.imread_collection(images)
    images = pd.DataFrame({'filename': x_images.files, 'img': np.arange(0, len(x_images.files)) })
    images['Date/Time'] = pd.to_datetime(images['filename'].str.extract('-([0-9]+)\.', expand=False), format='%Y%m%d%H%M%S')
    images = images.merge(weather_df, on='Date/Time')
    images['Weather'] = images['Weather'].apply(transform_weather)

    data = []
    target = []
    filenames = []

    # need to do this for loop to get the images out of x_images or else we'd need to load the whole
    # x_images array instead of just the images we have data for
    for i, x in images.iterrows():
        matrix = rgb2gray(x_images[x['img']])
        matrix = np.reshape(matrix, (192*256))
        data.append(matrix)
        target.append(x['Weather'].split(','))
        filenames.append(x['filename'])

    mlb = MultiLabelBinarizer()
    y_enc = mlb.fit_transform(target)

    X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(np.array(data), y_enc, np.array(filenames))

    model = make_pipeline(
        PCA(250),
        KNeighborsClassifier(n_neighbors=15)
    )

    model.fit(X_train, y_train)
    predicted = model.predict(X_test)
    print("KNN Model Score: %f" % model.score(X_test, y_test))
    result = np.empty(predicted.shape[0], dtype=np.bool)

    for i, (x,y) in enumerate(zip(predicted, y_test)):
        result[i] = np.array_equal(x,y)

    wrong = mlb.inverse_transform(predicted[~result])
    real = mlb.inverse_transform(y_test[~result])
    results_df = pd.DataFrame({'filename': idx2[~result], 'predicted': wrong, 'actual': real})
    aggregated = results_df.groupby(['predicted', 'actual']).count().rename(columns={'filename': 'Predicted Incorrectly'})
    aggregated.plot.bar()
    plt.tight_layout()
    plt.legend()
    plt.savefig('errors.png')

    correct = mlb.inverse_transform(predicted[result])
    real = mlb.inverse_transform(y_test[result])
    results_df = pd.DataFrame({'filename': idx2[result], 'predicted': correct, 'actual': real})
    aggregated = results_df.groupby(['predicted', 'actual']).count().rename(columns={'filename': 'Predicted Correctly'})
    aggregated.plot.bar()
    plt.tight_layout()
    plt.legend()
    plt.savefig('correct.png')
Пример #32
0
    ax=ax1,
    cbar_kwargs={"label": '$%s$' % ds_data_consistency.abs_err.attrs['units']},
#    robust=True
)
ax1.set_title('mean absolute error')

ds_data_consistency.rel_err.plot(
    ax=ax2,
    cbar_kwargs={"label": '%s' % ds_data_consistency.rel_err.attrs['units']},
#    robust=True
)
ax2.set_title('mean relative error')

plt.suptitle('ELM - data consistency (non-robust version)')

plt.tight_layout()
plt.draw()
# -

# The robust version ignores outliers (2-quantile to 98-quantile only, if I remember correctly).

# +
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12,6))

ds_data_consistency.abs_err.plot(
    ax=ax1,
    cbar_kwargs={"label": '$%s$' % ds_data_consistency.abs_err.attrs['units']},
    robust=True
)
ax1.set_title('mean absolute error')
Пример #33
0
def run(n_neurons=60, t=10, t_test=10, n_trains=10, n_encodes=20, n_tests=10, 
        f=DoubleExp(1e-3, 3e-2), f_out=DoubleExp(1e-3, 1e-1), 
        dt=0.001, neuron_type=LIF(), reg=1e-2, penalty=0.5, load_w=None, load_df=None):

    d_ens = np.zeros((n_neurons, 1))
    f_ens = f
    w_ens = None
    e_ens = None
    w_ens2 = None
    e_ens2 = None
    f_smooth = DoubleExp(1e-2, 2e-1)
    print('\nNeuron Type: %s'%neuron_type)

    if isinstance(neuron_type, DurstewitzNeuron):
        if load_w:
            w_ens = np.load(load_w)['w_ens']
        else:
            print('Optimizing ens1 encoders')
            for nenc in range(n_encodes):
                print("encoding trial %s"%nenc)
                stim_func1, stim_func2 = make_normed_flipped(value=1.4, t=t, N=1, f=f, seed=nenc)
                data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, stim_func1=stim_func1, stim_func2=stim_func2, neuron_type=neuron_type, w_ens=w_ens, e_ens=e_ens, L=True)
                w_ens = data['w_ens']            
                e_ens = data['e_ens']   
                np.savez('data/multiply_w.npz', w_ens=w_ens, e_ens=e_ens)
                
                fig, ax = plt.subplots()
                sns.distplot(np.ravel(w_ens), ax=ax)
                ax.set(xlabel='weights', ylabel='frequency')
                plt.savefig("plots/tuning/multiply_%s_w_ens.pdf"%neuron_type)
                
                a_ens = f_smooth.filt(data['ens'], dt=dt)
                a_supv = f_smooth.filt(data['supv'], dt=dt)
                for n in range(n_neurons):
                    fig, ax = plt.subplots(1, 1)
                    ax.plot(data['times'], a_supv[:,n], alpha=0.5, label='supv')
                    ax.plot(data['times'], a_ens[:,n], alpha=0.5, label='ens')
                    ax.set(ylim=((0, 40)))
                    plt.legend()
                    plt.savefig('plots/tuning/multiply_ens_nenc_%s_activity_%s.pdf'%(nenc, n))
                    plt.close('all')

    if load_df:
        load = np.load(load_df)
        d_ens = load['d_ens']
        d_out1 = load['d_out1']
        taus_ens = load['taus_ens']
        taus_out1 = load['taus_out1']
        f_ens = DoubleExp(taus_ens[0], taus_ens[1])
        f_out1 =  DoubleExp(taus_out1[0], taus_out1[1])
    else:
        print('Optimizing ens1 filters and decoders')
        stim_func1, stim_func2 = make_normed_flipped(value=1.2, t=t, N=n_trains, f=f, seed=0)
        data = go(d_ens, f_ens, n_neurons=n_neurons, t=t*n_trains, f=f, dt=dt, neuron_type=neuron_type,
            stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens)
        d_ens, f_ens, taus_ens = df_opt(data['x'][:,0]*data['x'][:,1], data['ens'], f, dt=dt, penalty=penalty, reg=reg, name='multiply_%s'%neuron_type)
        d_ens = d_ens.reshape((n_neurons, 1))
        d_out1, f_out1, taus_out1 = df_opt(data['x'], data['ens'], f_out, dt=dt, name='multiply_%s'%neuron_type)
        np.savez('data/multiply_%s_df.npz'%neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_out1=d_out1, taus_out1=taus_out1)

        times = np.arange(0, 1, 0.0001)
        fig, ax = plt.subplots()
        ax.plot(times, f.impulse(len(times), dt=0.0001), label=r"$f^x, \tau_1=%.3f, \tau_2=%.3f$" %(-1./f.poles[0], -1./f.poles[1]))
        ax.plot(times, f_ens.impulse(len(times), dt=0.0001), label=r"$f^{ens}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$"
           %(-1./f_ens.poles[0], -1./f_ens.poles[1], np.count_nonzero(d_ens), n_neurons))
        ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10)))
        ax.legend(loc='upper right')
        plt.tight_layout()
        plt.savefig("plots/multiply_%s_filters_ens.pdf"%neuron_type)

        times = np.arange(0, 1, 0.0001)
        fig, ax = plt.subplots()
        ax.plot(times, f_out.impulse(len(times), dt=0.0001), label=r"$f^{out}, \tau=%.3f, \tau_2=%.3f$" %(-1./f_out.poles[0], -1./f_out.poles[1]))
        ax.plot(times, f_out1.impulse(len(times), dt=0.0001), label=r"$f^{out1}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$"
           %(-1./f_out1.poles[0], -1./f_out1.poles[1], np.count_nonzero(d_out1), n_neurons))
        ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10)))
        ax.legend(loc='upper right')
        plt.tight_layout()
        plt.savefig("plots/multiply_%s_filters_out1.pdf"%neuron_type)

        a_ens = f_ens.filt(data['ens'], dt=dt)
        x = f.filt(data['x'][:,0]*data['x'][:,1], dt=dt).ravel()
        xhat_ens = np.dot(a_ens, d_ens).ravel()
        rmse_ens = rmse(xhat_ens, x)
        fig, ax = plt.subplots()
        ax.plot(data['times'], x, linestyle="--", label='x')
        ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' %rmse_ens)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens1")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens1_train.pdf"%neuron_type)

        a_ens = f_out1.filt(data['ens'], dt=dt)
        x_out = f_out.filt(data['x'], dt=dt)
        xhat_ens_out = np.dot(a_ens, d_out1)
        rmse_ens_out1 = rmse(xhat_ens_out[:,0], x_out[:,0])
        rmse_ens_out2 = rmse(xhat_ens_out[:,1], x_out[:,1])
        fig, ax = plt.subplots()
        ax.plot(data['times'], x_out[:,0], linestyle="--", label='x_0')
        ax.plot(data['times'], x_out[:,1], linestyle="--", label='x_1')
        ax.plot(data['times'], xhat_ens_out[:,0], label='ens_0, rmse=%.3f' %rmse_ens_out1)
        ax.plot(data['times'], xhat_ens_out[:,1], label='ens_1, rmse=%.3f' %rmse_ens_out2)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens1")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens1_out_train.pdf"%neuron_type)

    if isinstance(neuron_type, DurstewitzNeuron):
        if load_w:
            w_ens2 = np.load(load_w)['w_ens2']
        else:
            print('Optimizing ens2 encoders')
            for nenc in range(n_encodes):
                print("encoding trial %s"%nenc)
                stim_func1, stim_func2 = make_normed_flipped(value=1.4, t=t, N=1, f=f, seed=nenc)
                data = go(d_ens, f_ens, n_neurons=n_neurons, t=t, f=f, stim_func1=stim_func1, stim_func2=stim_func2, neuron_type=neuron_type, w_ens=w_ens, w_ens2=w_ens2, e_ens2=e_ens2, L2=True)
                w_ens2 = data['w_ens2']            
                e_ens2 = data['e_ens2']   
                np.savez('data/multiply_w.npz', w_ens=w_ens, e_ens=e_ens, w_ens2=w_ens2, e_ens2=e_ens2)

                fig, ax = plt.subplots()
                sns.distplot(np.ravel(w_ens2), ax=ax)
                ax.set(xlabel='weights', ylabel='frequency')
                plt.savefig("plots/tuning/multiply_%s_w_ens2.pdf"%neuron_type)

                a_ens = f_smooth.filt(data['ens2'], dt=dt)
                a_supv = f_smooth.filt(data['supv2'], dt=dt)
                for n in range(30):
                    fig, ax = plt.subplots(1, 1)
                    ax.plot(data['times'], a_supv[:,n], alpha=0.5, label='supv2')
                    ax.plot(data['times'], a_ens[:,n], alpha=0.5, label='ens2')
                    ax.set(ylim=((0, 40)))
                    plt.legend()
                    plt.savefig('plots/tuning/multiply_ens2_nenc_%s_activity_%s.pdf'%(nenc, n))
                    plt.close('all')

    if load_df:
        load = np.load(load_df)
        d_out2 = load['d_out2']
        taus_out2 = load['taus_out2']
        f_out2 = DoubleExp(taus_out2[0], taus_out2[1])
    else:
        print('Optimizing ens2 filters and decoders')
        stim_func1, stim_func2 = make_normed_flipped(value=1.2, t=t, N=n_trains, f=f, seed=0)
        data = go(d_ens, f_ens, n_neurons=n_neurons, t=t*n_trains, f=f, dt=dt, neuron_type=neuron_type,
            stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens, w_ens2=w_ens2)
        d_out2, f_out2, taus_out2  = df_opt(data['x2'], data['ens2'], f_out, dt=dt, name='multiply_%s'%neuron_type)
        np.savez('data/multiply_%s_df.npz'%neuron_type, d_ens=d_ens, taus_ens=taus_ens, d_out1=d_out1, taus_out1=taus_out1, d_out2=d_out2, taus_out2=taus_out2)

        times = np.arange(0, 1, 0.0001)
        fig, ax = plt.subplots()
        ax.plot(times, f_out.impulse(len(times), dt=0.0001), label=r"$f^{out}, \tau=%.3f, \tau_2=%.3f$" %(-1./f_out.poles[0], -1./f_out.poles[1]))
        ax.plot(times, f_out2.impulse(len(times), dt=0.0001), label=r"$f^{out2}, \tau_1=%.3f, \tau_2=%.3f, d: %s/%s$"
           %(-1./f_out2.poles[0], -1./f_out2.poles[1], np.count_nonzero(d_out2), 30))
        ax.set(xlabel='time (seconds)', ylabel='impulse response', ylim=((0, 10)))
        ax.legend(loc='upper right')
        plt.tight_layout()
        plt.savefig("plots/multiply_%s_filters_out2.pdf"%neuron_type)

        a_ens2 = f_out2.filt(data['ens2'], dt=dt)
        x2 = f_out.filt(data['x2'], dt=dt)
        xhat_ens2 = np.dot(a_ens2, d_out2)
        rmse_ens2 = rmse(xhat_ens2, x2)
        fig, ax = plt.subplots()
        ax.plot(data['times'], x2, linestyle="--", label='x')
        ax.plot(data['times'], xhat_ens2, label='ens2, rmse=%.3f' %rmse_ens2)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="train ens2")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens2_train.pdf"%neuron_type)


    rmses_ens = np.zeros((n_tests))
    rmses_ens_out = np.zeros((n_tests))
    rmses_ens2 = np.zeros((n_tests))
    for test in range(n_tests):
        print('test %s' %test)
        stim_func1, stim_func2 = make_normed_flipped(value=1.0, t=t_test, N=1, f=f, seed=100+test)
        data = go(d_ens, f_ens, n_neurons=n_neurons, t=t_test, f=f, dt=dt, neuron_type=neuron_type,
            stim_func1=stim_func1, stim_func2=stim_func2, w_ens=w_ens, w_ens2=w_ens2)

        a_ens = f_ens.filt(data['ens'], dt=dt)
        x = f.filt(data['x'][:,0]*data['x'][:,1], dt=dt).ravel()
        xhat_ens = np.dot(a_ens, d_ens).ravel()
        rmse_ens = rmse(xhat_ens, x)

        a_ens_out = f_out1.filt(data['ens'], dt=dt)
        x_out = f_out.filt(data['x'], dt=dt)
        xhat_ens_out = np.dot(a_ens, d_out1)
        rmse_ens_out = rmse(xhat_ens_out, x_out)
        rmse_ens_out1 = rmse(xhat_ens_out[:,0], x_out[:,0])
        rmse_ens_out2 = rmse(xhat_ens_out[:,1], x_out[:,1])

        a_ens2 = f_out2.filt(data['ens2'], dt=dt)
        x2 = f_out.filt(data['x2'], dt=dt)
        xhat_ens2 = np.dot(a_ens2, d_out2)
        rmse_ens2 = rmse(xhat_ens2, x2)
        rmses_ens[test] = rmse_ens
        rmses_ens_out[test] = rmse_ens_out
        rmses_ens2[test] = rmse_ens2        
    
        fig, ax = plt.subplots()
        ax.plot(data['times'], x_out[:,0], linestyle="--", label='x_0')
        ax.plot(data['times'], x_out[:,1], linestyle="--", label='x_1')
        ax.plot(data['times'], xhat_ens_out[:,0], label='ens_0, rmse=%.3f' %rmse_ens_out1)
        ax.plot(data['times'], xhat_ens_out[:,1], label='ens_1, rmse=%.3f' %rmse_ens_out2)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens1 out")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens1_out_test_%s.pdf"%(neuron_type, test))
        
        fig, ax = plt.subplots()
        ax.plot(data['times'], x, linestyle="--", label='x')
        ax.plot(data['times'], xhat_ens, label='ens, rmse=%.3f' %rmse_ens)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens1")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens1_test_%s.pdf"%(neuron_type, test))
        
        fig, ax = plt.subplots()
        ax.plot(data['times'], x2, linestyle="--", label='x')
        ax.plot(data['times'], xhat_ens2, label='ens2, rmse=%.3f' %rmse_ens2)
        ax.set(xlabel='time (s)', ylabel=r'$\mathbf{x}$', title="test ens2")
        plt.legend(loc='upper right')
        plt.savefig("plots/multiply_%s_ens2_test_%s.pdf"%(neuron_type, test))
        plt.close('all')

    mean_ens = np.mean(rmses_ens)
    mean_ens_out = np.mean(rmses_ens_out)
    mean_ens2 = np.mean(rmses_ens2)
    CI_ens = sns.utils.ci(rmses_ens)
    CI_ens_out = sns.utils.ci(rmses_ens_out)
    CI_ens2 = sns.utils.ci(rmses_ens2)
    
    fig, ax = plt.subplots()
    sns.barplot(data=rmses_ens2)
    ax.set(ylabel='RMSE', title="mean=%.3f, CI=%.3f-%.3f"%(mean_ens2, CI_ens2[0], CI_ens2[1]))
    plt.xticks()
    plt.savefig("plots/multiply_%s_rmse.pdf"%neuron_type)

    print('rmses: ', rmses_ens, rmses_ens_out, rmses_ens2)
    print('means: ', mean_ens, mean_ens_out, mean_ens2)
    print('confidence intervals: ', CI_ens, CI_ens_out, CI_ens2)
    np.savez('data/multiply_%s_results.npz'%neuron_type, rmses_ens=rmses_ens, rmses_ens_out=rmses_ens_out, rmses_ens2=rmses_ens2)
    return rmses_ens2
Пример #34
0
def top_correlation_to_name(stocks, column_name, searchstring, top=5):
    """
    ####################################################################################
    This function draws a correlation chart of the top "x" rows of a data frame that are highly
    correlated to a selected row in the dataframe. You can think of the rows of the input
    dataframe as containing stock prices or fund flows or product sales and the columns should
    contain time series data of prices or flows or sales over multiple time periods.
    Now this program will allow you to select the top 5 or 10 rows that are highly correlated
    to a given row selected by the column: column_name and using a search string "searchstring".
    The program will search for the search string in that column column_name and return a list
    of 5 or 10 rows that are the most correlated to that selected row. If you give "top" as
    a float ratio then it will use the ratio as the cut off point in the correlation
    coefficient to select rows.
    ####################################################################################
    """
    #### First increment top by 1 since you are asking for top X names in addition to the one you have, top += 1
    incl = [x for x in list(stocks) if x not in column_name]
    ### First drop all NA rows since they will mess up your correlations, stocks.dropna(inplace=True)
    if stocks.empty:
        print('After dropping NaNs, the data frame has become empty.')
        return
    ### Now find the highest correlated rows to the selected row ###
    try:
        index_val = search_string(stocks, column_name,searchstring).index[0]
    except:
        print('Not able to find the search string in the column.')
        return
    ### Bring that selected Row to the top of the Data Frame
    df = stocks[:]
    df["new"] = range(l, len(df)+l)
    df.loc[index_val,"new"] = 0
    stocks = df.sort_values("new").drop("new",axis=1)
    stocks.reset_index(inplace=True,drop=True)
    ##### Now calculate the correlation coefficients of other rows with the Top row
    try:
        cordf = pd.DataFrame(stocks[incl].T.corr().sort_values(0, ascending=False))
    except:
        print('Cannot calculate Correlations since Dataframe contains string values or objects.')
        return
    try:
        cordf = stocks[column_name].join(cordf)
    except:
        cordf = pd.concat((stocks[column_name], cordf), axis=1)
    #### Visualizing the top 5 or 10 or whatever cut-off they have given for Corr Coeff
    if top >= 1:
        top10index = cordf.sort_values(0, ascending=False).iloc[:top, :3].index
        top10names = cordf.sort_values(0, ascending=False).iloc[:top, :3][column_name]
        top10values = cordf.sort_values(0, ascending=False)[0].values[:top]
    else:
        top10index = cordf.sort_values(0, ascending=False)[
                     cordf.sort_values(0, ascending=False)[0].values >= top].index
        top10names = cordf.sort_values(0, ascending=False)[cordf.sort_values(
                                       0, ascending=False)[0].values >= top][column_name]
        top10alues = cordf.sort_values(0, ascending=False)[cordf.sort_values(
                                       0, ascending=False)[0].values >= top][0]
    print(top10names, top10values)
    #### Now plot the top rows that are highly correlated based on condition above
    stocksloc = stocks.iloc[top10index]
    #### Visualizing using Matplotlib ###
    stocksloc = stocksloc.T
    stocksloc = stocksloc.reset_index(drop=True)
    stocksloc.columns = stocksloc.iloc[0].values.tolist()
    stocksloc.drop(0).plot(subplots=True, figsize=(15, 10), legend=False,
                           title="Top %s Correlations to %s" % (top, searchstring))
    [ax.legend(loc=1) for ax in plt.gcf().axes]
    plt.tight_layout()
    plt.show()
Пример #35
0
    def lm_plot(self, x, axs=None):
        """Plot results of actdyn_ode45 function.
            data = [t, lmt, lm, lt, vm, fm*fm0, fse*fm0, fl*fm0, fpe*fm0, alpha]
        """

        try:
            import matplotlib.pyplot as plt
        except ImportError:
            print('matplotlib is not available.')
            return
        
        if axs is None:
            _, axs = plt.subplots(nrows=3, ncols=2, sharex=True, figsize=(10, 6))

        axs[0, 0].plot(x[:, 0], x[:, 1], 'b', label='LMT')
        lmt = x[:, 2]*np.cos(x[:, 9]) + x[:, 3]
        if np.sum(x[:, 9]) > 0:
            axs[0, 0].plot(x[:, 0], lmt, 'g--', label=r'$LM \cos \alpha + LT$')
        else:
            axs[0, 0].plot(x[:, 0], lmt, 'g--', label=r'LM+LT')
        ylim = self.margins(x[:, 1], margin=.1)
        axs[0, 0].set_ylim(ylim)
        axs[0, 0].legend(framealpha=.5, loc='best')
        
        axs[0, 1].plot(x[:, 0], x[:, 3], 'b')
        #axs[0, 1].plot(x[:, 0], lt0*np.ones(len(x)), 'r')
        ylim = self.margins(x[:, 3], margin=.1)
        axs[0, 1].set_ylim(ylim)
        
        axs[1, 0].plot(x[:, 0], x[:, 2], 'b')
        #axs[1, 0].plot(x[:, 0], lmopt*np.ones(len(x)), 'r')
        ylim = self.margins(x[:, 2], margin=.1)
        axs[1, 0].set_ylim(ylim)
        
        axs[1, 1].plot(x[:, 0], x[:, 4], 'b')
        ylim = self.margins(x[:, 4], margin=.1)
        axs[1, 1].set_ylim(ylim)
        
        axs[2, 0].plot(x[:, 0], x[:, 5], 'b', label='Muscle')
        axs[2, 0].plot(x[:, 0], x[:, 6], 'g--', label='Tendon')
        ylim = self.margins(x[:, [5, 6]], margin=.1)
        axs[2, 0].set_ylim(ylim)
        axs[2, 0].set_xlabel('Time (s)')
        axs[2, 0].legend(framealpha=.5, loc='best')
        
        axs[2, 1].plot(x[:, 0], x[:, 8], 'b', label='PE')
        ylim = self.margins(x[:, 8], margin=.1)
        axs[2, 1].set_ylim(ylim)
        axs[2, 1].set_xlabel('Time (s)')
        axs[2, 1].legend(framealpha=.5, loc='best')
        
        ylabel = ['$L_{MT}\,(m)$', '$L_{T}\,(m)$', '$L_{M}\,(m)$',
                  '$V_{CE}\,(m/s)$', '$Force\,(N)$', '$Force\,(N)$']
        for i, axi in enumerate(axs.flat):
            axi.set_ylabel(ylabel[i], fontsize=14)
            axi.yaxis.set_major_locator(plt.MaxNLocator(4))
            axi.yaxis.set_label_coords(-.2, 0.5)

        plt.suptitle('Simulation of muscle-tendon mechanics', fontsize=18,
                     y=1.03)
        plt.tight_layout()
        plt.show()
        
        return axs
Пример #36
0
    def muscle_plot(self, a=1, axs=None):
        """Plot muscle-tendon relationships with length and velocity."""

        try:
            import matplotlib.pyplot as plt
        except ImportError:
            print('matplotlib is not available.')
            return
        
        if axs is None:
            _, axs = plt.subplots(nrows=1, ncols=3, figsize=(9, 4))
        
        lmopt   = self.P['lmopt']
        ltslack = self.P['ltslack']
        vmmax   = self.P['vmmax']
        alpha0  = self.P['alpha0']
        fm0     = self.P['fm0']
        lm0     = self.S['lm0']
        lmt0    = self.S['lmt0']
        lt0     = self.S['lt0']
        if np.isnan(lt0):
            lt0 = lmt0 - lm0*np.cos(alpha0)
        
        lm  = np.linspace(0, 2, 101)
        lt  = np.linspace(0, 1, 101)*0.05 + 1
        vm  = np.linspace(-1, 1, 101)*vmmax*lmopt
        fl  = np.zeros(lm.size)
        fpe = np.zeros(lm.size)
        fse = np.zeros(lt.size)
        fvm = np.zeros(vm.size)
        
        fl_lm0  = self.force_l(lm0/lmopt)
        fpe_lm0 = self.force_pe(lm0/lmopt)
        fm_lm0  = fl_lm0 + fpe_lm0
        ft_lt0  = self.force_se(lt0, ltslack)*fm0        
        
        for i in range(101):
            fl[i]  = self.force_l(lm[i])
            fpe[i] = self.force_pe(lm[i])
            fse[i] = self.force_se(lt[i], ltslack=1)
            fvm[i] = self.force_vm(vm[i], a=a, fl=fl_lm0)

        lm  = lm*lmopt
        lt  = lt*ltslack
        fl  = fl
        fpe = fpe
        fse = fse*fm0
        fvm = fvm*fm0
            
        xlim = self.margins(lm, margin=.05, minmargin=False)
        axs[0].set_xlim(xlim)
        ylim = self.margins([0, 2], margin=.05)
        axs[0].set_ylim(ylim)
        axs[0].plot(lm, fl, 'b', label='Active')
        axs[0].plot(lm, fpe, 'b--', label='Passive')
        axs[0].plot(lm, fl+fpe, 'b:', label='')
        axs[0].plot([lm0, lm0], [ylim[0], fm_lm0], 'k:', lw=2, label='')
        axs[0].plot([xlim[0], lm0], [fm_lm0, fm_lm0], 'k:', lw=2, label='')
        axs[0].plot(lm0, fm_lm0, 'o', ms=6, mfc='r', mec='r', mew=2, label='fl(LM0)')
        axs[0].legend(loc='best', frameon=True, framealpha=.5)
        axs[0].set_xlabel('Length [m]')
        axs[0].set_ylabel('Scale factor')
        axs[0].xaxis.set_major_locator(plt.MaxNLocator(4))
        axs[0].yaxis.set_major_locator(plt.MaxNLocator(4))
        axs[0].set_title('Muscle F-L (a=1)')
        
        xlim = self.margins([0, np.min(vm), np.max(vm)], margin=.05, minmargin=False)
        axs[1].set_xlim(xlim)
        ylim = self.margins([0, fm0*1.2, np.max(fvm)*1.5], margin=.025)
        axs[1].set_ylim(ylim)
        axs[1].plot(vm, fvm, label='')
        axs[1].set_xlabel('$\mathbf{^{CON}}\;$ Velocity [m/s] $\;\mathbf{^{EXC}}$')
        axs[1].plot([0, 0], [ylim[0], fvm[50]], 'k:', lw=2, label='')
        axs[1].plot([xlim[0], 0], [fvm[50], fvm[50]], 'k:', lw=2, label='')
        axs[1].plot(0, fvm[50], 'o', ms=6, mfc='r', mec='r', mew=2, label='FM0(LM0)')
        axs[1].plot(xlim[0], fm0, '+', ms=10, mfc='r', mec='r', mew=2, label='')
        axs[1].text(vm[0], fm0, 'FM0')
        axs[1].legend(loc='upper right', frameon=True, framealpha=.5)
        axs[1].set_ylabel('Force [N]')
        axs[1].xaxis.set_major_locator(plt.MaxNLocator(4))
        axs[1].yaxis.set_major_locator(plt.MaxNLocator(4))
        axs[1].set_title('Muscle F-V (a=1)')

        xlim = self.margins([lt0, ltslack, np.min(lt), np.max(lt)], margin=.05,
                             minmargin=False)
        axs[2].set_xlim(xlim)
        ylim = self.margins([ft_lt0, 0, np.max(fse)], margin=.05)
        axs[2].set_ylim(ylim)
        axs[2].plot(lt, fse, label='')
        axs[2].set_xlabel('Length [m]')
        axs[2].plot([lt0, lt0], [ylim[0], ft_lt0], 'k:', lw=2, label='')
        axs[2].plot([xlim[0], lt0], [ft_lt0, ft_lt0], 'k:', lw=2, label='')
        axs[2].plot(lt0, ft_lt0, 'o', ms=6, mfc='r', mec='r', mew=2, label='FT(LT0)')
        axs[2].legend(loc='upper left', frameon=True, framealpha=.5)
        axs[2].set_ylabel('Force [N]')
        axs[2].xaxis.set_major_locator(plt.MaxNLocator(4))
        axs[2].yaxis.set_major_locator(plt.MaxNLocator(4))
        axs[2].set_title('Tendon')  
        plt.suptitle('Muscle-tendon mechanics', fontsize=18, y=1.03)
        plt.tight_layout(w_pad=.1)
        plt.show()
        
        return axs
Пример #37
0
y = principalDf_2["cluster"]
clf_dt = DecisionTreeClassifier().fit(X, y)

# Plotting decision regions

X = X.as_matrix()  # X is an ndarray

x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
                     np.arange(y_min, y_max, 0.1))

Z = clf_dt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)
plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)

plt.scatter(X[:, 0], X[:, 1], c=y, s=15, edgecolor='k')
plt.suptitle('Decision surface of a decision tree using PC1 & PC2',
             fontsize=15)
plt.show()

###-------------------------------------------------------------------------###
###                   Applying Decision Tree on the whole data              ###
###                                                                         ###
###         OriginalDataDf: Variable1-...Variable47 , cluster               ###
###-------------------------------------------------------------------------###

OriginalDataDf['cluster'] = labels.tolist()

X = OriginalDataDf[features]  # X is a dataframe
Пример #38
0
    return shift * 343.2 / (2 * 2e6 * np.cos(angle))


doppler1 = doppler(15)
doppler2 = doppler(30)
doppler3 = doppler(60)

flowvelocity15 = flowvelocity(doppler1, fifteen) * 1e3  #mm/s
flowvelocity30 = flowvelocity(doppler2, thirty) * 1e3  #mm/s
flowvelocity60 = flowvelocity(doppler3, sixty) * 1e3  #mm/s
shiftcos15 = fifteen / np.cos(doppler1)
shiftcos30 = thirty / np.cos(doppler2)
shiftcos60 = sixty / np.cos(doppler3)

np.savetxt('data_scripts/tiny_velocity.txt',
           np.column_stack([
               fifteen, flowvelocity15, thirty, flowvelocity30, sixty,
               flowvelocity60
           ]),
           fmt='%.3f')

plt.plot(flowvelocity15, shiftcos15, '.', label=r'$\theta = \ang{15;;}$')
plt.plot(flowvelocity30, shiftcos30, '.', label=r'$\theta = \ang{30;;}$')
plt.plot(flowvelocity60, shiftcos60, '.', label=r'$\theta = \ang{60;;}$')

plt.xlabel(r'$v \mathbin{/} \si{\milli\meter\second\tothe{-1}}$')
plt.ylabel(r'$\symup{\Delta} \nu \cos^{-1}(\alpha) \mathbin{/} \si{\hertz}$')
plt.legend()

plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/tiny.pdf')
Пример #39
0
def orbits(o, **options):
    """This function creates several scatter plots of a set of orbital elements based on the
    different possible axis planar projections, calculates all possible permutations of plane
    intersections based on the number of columns

    :param numpy.ndarray o: Rows are distinct orbits and columns are orbital elements in the order a, e, i, omega, Omega
    :param options:  dictionary containing all the optional settings

    #TODO: this needs updating

    Currently the options fields are:
        :marker [char]: the marker type
        :size [int]: the size of the marker
        :title [string]: the title of the plot
        :title_font_size [int]: the title font size
        :axis_labels [list of strings]: labels for each column
        :tick_font_size [int]: the axis tick font size
        :window [tuple/list]: the size of the plot window in pixels (assuming dpi = 80)
        :save [string]: will not display figure and will instead save it to this path
        :show [bool]: if False will do draw() instead of show() allowing script to continue
        :tight_rect [list of 4 floats]: configuration for the tight_layout function


    Example::
        
        import dpt_tools as dpt
        import numpy as np
        np.random.seed(19680221)

        orbs = np.matrix([
            11  + 3  *np.random.randn(1000),
            0.5 + 0.2*np.random.randn(1000),
            60  + 10 *np.random.randn(1000),
            120 + 5  *np.random.randn(1000),
            33  + 2  *np.random.randn(1000),
        ]).T

        dpt.orbits(orbs,
            title = "My orbital element distribution",
            size = 10,
        )


    """
    if not isinstance(o, np.ndarray):
        o = np.array(o)

    data_axis = options.get('axis', 0)
    if data_axis == 1:
        dim_axis = 0
    else:
        dim_axis = 1

    scale = options.get('scale', np.ones((o.shape[dim_axis],), dtype=o.dtype))


    #turn on TeX interperter
    plt.rc('text', usetex=True)

    lis = list(range(o.shape[dim_axis]))
    axis_plot = list(combinations(lis, 2))

    axis_labels = options.setdefault('axis_labels', None)

    limits = options.get('limits', None)

    if isinstance(axis_labels, str):
        if axis_labels == 'earth-orbit':
            axis_labels = ["$a$ [$R_E$]","$e$ [1]","$i$ [deg]","$\omega$ [deg]","$\Omega$ [deg]","$M_0$ [deg]" ]
            scale = [1/constants.R_earth] + [1]*5
        elif axis_labels == 'earth-state':
            axis_labels = ["$x$ [$R_E$]","$y$ [$R_E$]","$z$ [$R_E$]","$v_x$ [km/s]","$v_y$ [km/s]","$v_z$ [km/s]" ]
            scale = [1/constants.R_earth]*3 + [1e-3]*3
        elif axis_labels == 'sol-orbit':
            axis_labels = ["$a$ [AU]","$e$ [1]","$i$ [deg]","$\omega$ [deg]","$\Omega$ [deg]","$M_0$ [deg]" ]
            scale = [1/pyorb.AU]*3 + [1e-3]*3
        elif axis_labels == 'sol-state':
            axis_labels = ["$x$ [AU]","$y$ [AU]","$z$ [AU]","$v_x$ [km/s]","$v_y$ [km/s]","$v_z$ [km/s]" ]
            scale = [1/pyorb.AU]*3 + [1e-3]*3
        else:
            axis_labels = ['']*6
    elif axis_labels is None:
        axis_labels = ['']*6


    
    if o.shape[dim_axis] == 2:
        subplot_cnt = (1,2)
        subplot_perms = 2
    elif o.shape[dim_axis] == 3:
        subplot_cnt = (1,3)
        subplot_perms = 3
    elif o.shape[dim_axis] == 4:
        subplot_cnt = (2,3)
        subplot_perms = 6
    elif o.shape[dim_axis] == 5:
        subplot_cnt = (2,5)
        subplot_perms = 10
    else:
        subplot_cnt = (3,5)
        subplot_perms = 15
    subplot_cnt_ind = 1

    if 'window' in options:
        size_in = options['window']
        size_in = tuple(x/80.0 for x in size_in)
    else:
        size_in=(19, 10)

    fig = plt.figure(figsize=size_in,dpi=80)

    fig.suptitle(
        options.get('title','Orbital elements distribution'),
        fontsize=options.get('title_font_size',24),
    )
    axes = []
    for I in range( subplot_perms ):
        ax = fig.add_subplot(subplot_cnt[0],subplot_cnt[1],subplot_cnt_ind)
        axes.append(ax)

        if dim_axis == 1:
            x = o[:,axis_plot[I][0]]*scale[axis_plot[I][0]]
            y = o[:,axis_plot[I][1]]*scale[axis_plot[I][1]]
        else:
            x = o[axis_plot[I][0],:]*scale[axis_plot[I][0]]
            y = o[axis_plot[I][1],:]*scale[axis_plot[I][1]]

        sc = ax.scatter( 
            x.flatten(), 
            y.flatten(), 
            marker=options.get('marker','.'),
            c=options.setdefault('color','b'),
            s=options.get('size',2),
        )

        if isinstance(options['color'],np.ndarray):
            plt.colorbar(sc)

        # x_ticks = np.linspace(np.min(o[:,axis_plot[I][0]]),np.max(o[:,axis_plot[I][0]]), num=4)
        # plt.xticks( [round(x,1) for x in x_ticks] )
        ax.set_xlabel(
            axis_labels[axis_plot[I][0]],
            fontsize=options.setdefault('ax_font_size',22),
        )
        ax.set_ylabel(
            axis_labels[axis_plot[I][1]],
            fontsize=options['ax_font_size'],
        )
        plt.xticks(fontsize=options.setdefault('tick_font_size',17))
        plt.yticks(fontsize=options['tick_font_size'])

        if limits is not None:
            if len(limits) > axis_plot[I][0]:
                ax.set_xlim(*limits[axis_plot[I][0]])
            if len(limits) > axis_plot[I][1]:
                ax.set_ylim(*limits[axis_plot[I][1]])

        subplot_cnt_ind += 1
    
    plt.tight_layout(rect=options.setdefault('tight_rect',[0, 0.03, 1, 0.95]))


    if 'save' in options:
        fig.savefig(options['save'],bbox_inches='tight')
    else:
        if options.setdefault('show', False):
            plt.show()
        else:
            plt.draw()

    return fig, axes
Пример #40
0
def plot(results,
         experiment_dir,
         agents,
         plot_file_name="",
         conf_intervals=[],
         use_cost=False,
         cumulative=False,
         episodic=True,
         open_plot=True,
         track_disc_reward=False):
    '''
    Args:
        results (list of lists): each element is itself the reward from an episode for an algorithm.
        experiment_dir (str): path to results.
        agents (list): each element is an agent that was run in the experiment.
        plot_file_name (str)
        conf_intervals (list of floats) [optional]: confidence intervals to display with the chart.
        use_cost (bool) [optional]: If true, plots are in terms of cost. Otherwise, plots are in terms of reward.
        cumulative (bool) [optional]: If true, plots are cumulative cost/reward.
        episodic (bool): If true, labels the x-axis "Episode Number". Otherwise, "Step Number". 
        open_plot (bool)
        track_disc_reward (bool): If true, plots discounted reward.

    Summary:
        Makes (and opens) a single reward chart plotting all of the data in @data.
    '''

    # Set x-axis labels to be integers.
    from matplotlib.ticker import MaxNLocator
    ax = pyplot.figure().gca()
    ax.xaxis.set_major_locator(MaxNLocator(integer=True))

    # Some nice markers and colors for plotting.
    markers = ['o', 's', 'D', '^', '*', 'x', 'p', '+', 'v', '|']

    x_axis_unit = "episode" if episodic else "step"

    # Map them to floats in [0:1].
    colors = [[shade / 255.0 for shade in rgb] for rgb in color_ls]

    # Puts the legend into the best location in the plot and use a tight layout.
    pyplot.rcParams['legend.loc'] = 'best'

    # Negate everything if we're plotting cost.
    if use_cost:
        results = [[-x for x in alg] for alg in results]

    agent_colors = _get_agent_colors(experiment_dir, agents)

    # Make the plot.
    print_prefix = "\nAvg. cumulative reward" if cumulative else "Avg. reward"
    # For each agent.
    for i, agent_name in enumerate(agents):

        # Add figure for this algorithm.
        agent_color_index = i if agent_name not in agent_colors else agent_colors[
            agent_name]
        agent_marker_index = agent_color_index

        # Grab new color/marker if we've gone over.
        if agent_color_index >= len(colors):
            agent_color_index = agent_color_index % len(colors)
        if agent_marker_index >= len(markers):
            agent_marker_index = agent_marker_index % len(markers)

        series_color = colors[agent_color_index]
        series_marker = markers[agent_marker_index]
        y_axis = results[i]
        x_axis = list(
            drange(X_AXIS_START_VAL,
                   X_AXIS_START_VAL + len(y_axis) * X_AXIS_INCREMENT,
                   X_AXIS_INCREMENT))

        # Plot Confidence Intervals.
        if conf_intervals != []:
            alg_conf_interv = conf_intervals[i]
            top = np.add(y_axis, alg_conf_interv)
            bot = np.subtract(y_axis, alg_conf_interv)
            pyplot.fill_between(x_axis,
                                top,
                                bot,
                                facecolor=series_color,
                                edgecolor=series_color,
                                alpha=0.25)
        print("\t" + str(agents[i]) + ":", round(y_axis[-1], 5),
              "(conf_interv:", round(alg_conf_interv[-1], 2), ")")

        marker_every = max(len(y_axis) / 30, 1)
        pyplot.plot(x_axis,
                    y_axis,
                    color=series_color,
                    marker=series_marker,
                    markevery=marker_every,
                    label=agent_name)
        pyplot.legend()
    print()

    # Configure plot naming information.
    unit = "Cost" if use_cost else "Reward"
    plot_label = "Cumulative" if cumulative else "Average"
    if "times" in experiment_dir:
        # If it's a time plot.
        unit = "Time"

    disc_ext = "Discounted " if track_disc_reward else ""

    # Set names.
    exp_dir_split_list = experiment_dir.split("/")
    if 'results' in exp_dir_split_list:
        exp_name = exp_dir_split_list[exp_dir_split_list.index('results') + 1]
    else:
        exp_name = exp_dir_split_list[0]
    experiment_dir = experiment_dir + "/" if experiment_dir[
        -1] != "/" else experiment_dir
    plot_file_name = plot_file_name if plot_file_name != "" else experiment_dir + plot_label.lower(
    ) + "_" + unit.lower() + ".pdf"
    plot_title = CUSTOM_TITLE if CUSTOM_TITLE is not None else plot_label + " " + disc_ext + unit + ": " + exp_name
    if CUSTOM_TITLE is None:
        plot_title = _format_title(plot_title)

    # Axis labels.
    x_axis_label = X_AXIS_LABEL if X_AXIS_LABEL is not None else x_axis_unit[
        0].upper() + x_axis_unit[1:] + " Number"
    y_axis_label = Y_AXIS_LABEL if Y_AXIS_LABEL is not None else plot_label + " " + unit

    # Pyplot calls.
    pyplot.xlabel(x_axis_label)
    pyplot.ylabel(y_axis_label)
    pyplot.title(plot_title)
    pyplot.grid(True)
    pyplot.tight_layout()  # Keeps the spacing nice.

    # Save the plot.
    pyplot.savefig(plot_file_name, format="pdf")

    if open_plot:
        # Open it.
        open_prefix = "gnome-" if sys.platform == "linux" or sys.platform == "linux2" else ""
        os.system(open_prefix + "open " + plot_file_name)

    # Clear and close.
    pyplot.cla()
    pyplot.close()
Пример #41
0
def make_all_plots(reportcsv):
    print(type(reportcsv))
    charges_df = pd.read_csv(reportcsv)

    print(charges_df.describe())
    print('')
    print(charges_df.head(5))

    mask = (charges_df['CPU charge ($)'] > 0)

    cpu_charges_df = charges_df[mask][['Last name', 'CPU charge ($)']].sort_values(by=['CPU charge ($)'], ascending=False)

    mycolors = []
    pis = []
    for i,r in cpu_charges_df.iterrows():
        pis.append(r[0])

    for pi in pis:
        for i,r in cpu_charges_df.iterrows():
            #print('i= {} ;  r[0] = {}'.format(i, r[0]))
            if r[0] == pi:
                #mycolors.append('red')
                mycolors.append([1,0.2,0.2,1])
            else:
                #mycolors.append('cyan')
                mycolors.append([0.8,0.8,0.9,1])

        ### XXX
        #mycolors = list(islice(cycle(['b', 'r', 'g', 'y', 'k']), None, len(cpu_charges_df)))

        #print(mycolors)


        ax = cpu_charges_df.plot(kind='bar', color=[mycolors], width=0.8)

        #ax.set_xticklabels(cpu_charges_df['Last name'])
        plt.tick_params(
            axis='x',          # changes apply to the x-axis
            which='both',      # both major and minor ticks are affected
            bottom=False,      # ticks along the bottom edge are off
            top=False,         # ticks along the top edge are off
            labelbottom=False) # labels along the bottom edge are off
        ax.legend_.remove()

        fmt = '$%.0f'
        #fmt = '$%:,.0f'
        #fmt = '${x:,.0f}'
        tick = mtick.FormatStrFormatter(fmt)

        ax.yaxis.set_major_formatter(tick)
        plt.ylabel('CPU charge')
        plt.xlabel('PI')
        plt.tight_layout()

        fig = ax.get_figure()

        caption = 'NB If a red bar is not visible, the charge for this project is not large enough to be visible on this plot.'

        ### caption option 1
        #fig.text(.5, -.5, caption, ha='center', size='smaller')

        ##  caption option 2
        mpl.rc('text', usetex=True)
        ax.set_xlabel(r'\begin{center}PI\\*\textit{\small{' + caption + r'}}\end{center}')

        fig.savefig('plot_{}.svg'.format(pi))

        # reset
        mycolors = []
        plt.close()

    return charges_df
Пример #42
0
def get_thresholds(in_dat,
                   interactive=False,
                   plot_events=False,
                   fig_path=None,
                   prefix=None):
    """Guess distance threshold for event filtering

    Analyse the events in the first million of Hi-C pairs in the library, plot
    the occurrences of each event type according to number of restriction
    fragments, and ask user interactively for the minimum threshold for uncuts
    and loops.

    Parameters
    ----------
    in_dat: str
        Path to the .pairs file containing Hi-C pairs.
    interactive: bool
        If True, plots are diplayed and thresholds are required interactively.
    plot_events : bool
        Whether to show the plot
    fig_path : str
        Path where the figure will be saved. If None, the figure will be
        diplayed interactively.
    prefix : str
        If the library has a name, it will be shown on plots.
    
    Returns
    -------
    dictionary
        dictionary with keys "uncuts" and "loops" where the values are the
        corresponding thresholds entered by the user.
    """
    thr_uncut = None
    thr_loop = None
    max_sites = 50
    # Map of event -> legend name of event for intrachromosomal pairs.
    legend = {
        "++": "++ (weird)",
        "--": "-- (weird)",
        "+-": "+- (uncuts)",
        "-+": "-+ (loops)",
    }
    colors = {"++": "#222222", "+-": "r", "--": "#666666", "-+": "tab:orange"}
    n_events = {event: np.zeros(max_sites) for event in legend}
    i = 0
    # open the file for reading (just the first 1 000 000 lines)
    with open(in_dat, "r") as pairs:
        for line in pairs:
            # Skip header lines
            if line.startswith("#"):
                continue
            i += 1
            # Only use the first million pairs to estimate thresholds
            if i == 1000000:
                break
            # Process Hi-C pair into a dictionary
            p = process_read_pair(line)
            # Type of event and number of restriction site between reads
            etype = p["type"]
            nsites = p["nsites"]
            # Count number of events for intrachrom pairs
            if etype != "inter" and nsites < max_sites:
                n_events[etype][nsites] += 1

    def plot_event(n_events, legend, name):
        """Plot the frequency of a given event types over distance."""
        plt.xlim([-0.5, 15])
        plt.plot(
            range(n_events[name].shape[0]),
            n_events[name],
            "o-",
            label=legend[name],
            linewidth=2.0,
            c=colors[name],
        )

    if interactive:
        # PLot:
        try:
            plt.figure(0)
            for event in legend:
                plot_event(n_events, legend, event)
            plt.grid()
            plt.xlabel("Number of restriction fragment(s)")
            plt.ylabel("Number of events")
            plt.yscale("log")
            plt.legend()
            plt.show(block=False)

        except Exception:
            logger.error(
                "Unable to show plots, skipping figure generation. Perhaps "
                "there is no Xserver running ? (might be due to windows "
                "environment). Try running without the interactive option.")

        # Asks the user for appropriate thresholds
        print(
            "Please enter the number of restriction fragments separating "
            "reads in a Hi-C pair below or at which loops and "
            "uncuts events will be excluded\n",
            file=sys.stderr,
        )
        thr_uncut = int(input("Enter threshold for the uncuts events (+-):"))
        thr_loop = int(input("Enter threshold for the loops events (-+):"))
        try:
            plt.clf()
        except Exception:
            pass
    else:
        # Estimate thresholds from data
        for event in n_events:
            fixed = n_events[event]
            fixed[fixed == 0] = 1
            n_events[event] = fixed

        all_events = np.log(np.array(list(n_events.values())))
        # Compute median occurences at each restriction sites
        event_med = np.median(all_events, axis=0)
        # Compute MAD, to have a robust estimator of the expected deviation
        # from median at long distances
        mad = np.median(abs(all_events - event_med))
        exp_stdev = mad / 0.67449
        # Iterate over sites, from furthest to frag+2
        for site in range(max_sites)[:1:-1]:
            # For uncuts and loops, keep the last (closest) site where the
            # deviation from other events <= expected_stdev
            if (abs(np.log(n_events["+-"][site]) - event_med[site]) <=
                    exp_stdev):
                thr_uncut = site
            if (abs(np.log(n_events["-+"][site]) - event_med[site]) <=
                    exp_stdev):
                thr_loop = site
        if thr_uncut is None or thr_loop is None:
            raise ValueError(
                "The threshold for loops or uncut could not be estimated. "
                "Please try running with -i to investigate the problem.")
        logger.info("Filtering with thresholds: uncuts={0} loops={1}".format(
            thr_uncut, thr_loop))
        if plot_events:
            try:
                plt.figure(1)
                plt.xlim([-0.5, 15])
                # Draw colored lines for events to discard
                plt.plot(
                    range(0, thr_uncut + 1),
                    n_events["+-"][:thr_uncut + 1],
                    "o-",
                    c=colors["+-"],
                    label=legend["+-"],
                )
                plt.plot(
                    range(0, thr_loop + 1),
                    n_events["-+"][:thr_loop + 1],
                    "o-",
                    c=colors["-+"],
                    label=legend["-+"],
                )
                plt.plot(
                    range(0, 2),
                    n_events["--"][:2],
                    "o-",
                    c=colors["--"],
                    label=legend["--"],
                )
                plt.plot(
                    range(0, 2),
                    n_events["++"][:2],
                    "o-",
                    c=colors["++"],
                    label=legend["++"],
                )
                # Draw black lines for events to keep
                plt.plot(
                    range(thr_uncut, n_events["+-"].shape[0]),
                    n_events["+-"][thr_uncut:],
                    "o-",
                    range(thr_loop, n_events["-+"].shape[0]),
                    n_events["-+"][thr_loop:],
                    "o-",
                    range(1, n_events["--"].shape[0]),
                    n_events["--"][1:],
                    "o-",
                    range(1, n_events["++"].shape[0]),
                    n_events["++"][1:],
                    "o-",
                    label="kept",
                    linewidth=2.0,
                    c="g",
                )
                plt.grid()
                plt.xlabel("Number of restriction site(s)")
                plt.ylabel("Number of events")
                plt.yscale("log")
                # Remove duplicate "kept" entries in legend
                handles, labels = plt.gca().get_legend_handles_labels()
                by_label = OrderedDict(zip(labels, handles))
                plt.legend(by_label.values(), by_label.keys())
                # Show uncut and loop threshold as vertical lines
                plt.axvline(x=thr_loop, color=colors["-+"])
                plt.axvline(x=thr_uncut, color=colors["+-"])

                if prefix:
                    plt.title(
                        "Library events by distance in {}".format(prefix))
                plt.tight_layout()
                if fig_path:
                    plt.savefig(fig_path)
                else:
                    plt.show(block=False)
                # plt.clf()

            except Exception:
                logger.error(
                    "Unable to show plots, skipping figure generation. Is "
                    "an X server running? (might be due to windows "
                    "environment). Try running without the plot option.")
    return thr_uncut, thr_loop
def multiplot_measure_by_d(avgs, stds, num_samples):
    matplotlib.rcParams.update({'font.size':4})
    fig = plt.figure(figsize=(12,3))
    #fig.suptitle("Columns, left to right: Star discrepancy, squared distance from the origin, and squared distance from the center.\n" + 
    #             "K between 1 and 55. Shaded is 45th to 55th percentile.\n" +
    #             "DPPs are using an RBF kernel: DPP-rbf-narrow has variance 1/10, DPP-rbf-wide has variance d/2.", 
    #             fontsize=8)

    counter = 0
    measures = get_eval_measures()#['dispersion']#['unif_point','l2_cntr', 'l2']#, 'l1', 'l1_cntr']
    ds = get_ds()
    #ds = [get_ds()[0], get_ds()[1], get_ds()[2], get_ds()[3], get_ds()[6]]

    samplers = get_samplers()#['SobolSampler','UniformSampler', 'DPPVVNarrow']
    

    min_samples = []

    for d in ds:
        for measure in measures:
            counter = counter + 1
            cur_ax = fig.add_subplot(len(measures),len(ds),counter, adjustable='box')
            #adjustable='box', aspect='equal')#adjustable='box', aspect=1)#, adjustable='box', aspect=100)
            #cur_ax.set_aspect('equal', 'box')
            cur_avgs = get_one_plot_data(avgs, measure, d)
            cur_stds = get_one_plot_data(stds, measure, d)

            # to get the minimum samples used to make one of the plotted points
            #print cur_stds
            cur_min = [None, None, None, float('inf')]
            for cur_sampler in cur_stds:
                
                cur_cur_min = min([cur_stds[cur_sampler][i][3] for i in cur_stds[cur_sampler]])
                if cur_min[3] > cur_cur_min:
                    cur_min = [d, measure, cur_sampler, cur_cur_min]
            min_samples.append(cur_min)
            
            one_plot(cur_ax, cur_avgs, cur_stds, measure, d, samplers)
            #if counter == 1:
            #    cur_ax.set_xlabel('Distance to center, with k between 3 and 100')
            #else:
            #    cur_ax.set_xlabel('Distance to origin, with k between 3 and 100')
            #cur_ax.set_ylabel(get_measure_names()[measure])
            if d == ds[0] and measure == 'discrep':
                cur_ax.set_title('star discrepancy')
            elif d == ds[0] and measure == 'l2':
                cur_ax.set_title('distance from origin')
            elif d == ds[0] and measure == 'l2_cntr':
                cur_ax.set_title('distance from center')
            if measure == 'discrep' and d == ds[-1]:
                cur_ax.set_xlabel('k, between {} and {}'.format(get_n_min(), get_n_max()))
            cur_ax.set_title('d = {}'.format(d))
            #if measure == 'discrep':
            #    cur_ax.set_ylabel('d={}'.format(d))



    plt.tight_layout()


    print("the min samples: ", min_samples)
    min_samp_num = min([x[3] for x in min_samples])
    
    #print("the number of d=1,n=40,DPPNNarrow samps:", 

    out_fname = 'plots/best_known/' + get_filename(ds, measures, samplers, min_samp_num)
    cur_out_fname = out_fname + '.pdf'
    
    if not os.path.exists(cur_out_fname):
        plt.savefig(cur_out_fname)
    else:
        counter = 0
        while os.path.exists(cur_out_fname):
            counter += 1
            cur_out_fname = out_fname + '_{}'.format(counter) + '.pdf'
        plt.savefig(cur_out_fname)
    print("saving to {}".format(cur_out_fname))
Пример #44
0
def get_t0_frame(vid, hMatrix, maskData, fraction, threshold=20):
    """
    Advance frame by frame from the start of the video and monitor the pixels 
    around the center of the disk for the presence of water and return the 
    index of the first frame.
    """
    viewResult = False
    viewProgress = False
    tShift = 2  #Number of frames back for comparison

    N = int(vid.get(7))  # number of frames in video
    ref = extract_frame(vid, 0, hMatrix, maskData)
    center = maskData['diskCenter']
    R = fraction * maskData['diskRadius']
    x1 = center[0] - R
    x2 = center[0] + R
    y1 = center[1] - R
    y2 = center[1] + R
    mask = IPF.create_circular_mask(ref, R, center)
    maskData = None

    maxDif = 0

    for i in range(tShift, N):
        ref = extract_frame(vid, i - tShift, hMatrix, maskData) * mask
        frame = extract_frame(vid, i, hMatrix, maskData) * mask
        dif = IPF.subtract_images(ref, frame)

        if viewProgress:
            plt.clf()
            plt.subplot(1, 2, 1)
            plt.imshow(ref)

            plt.axis([x1, x2, y1, y2])
            plt.gray()
            plt.title('Frame #%i' % (i - tShift))
            plt.subplot(1, 2, 2)
            plt.imshow(frame)
            plt.axis([x1, x2, y1, y2])
            plt.gray()
            plt.title('Frame #%i' % i)
            plt.tight_layout(pad=0)
            plt.pause(0.001)

        maxDif = np.max(dif)
        if maxDif > threshold:
            t0FrameNumber = i
            break

    if viewResult:
        plt.clf()
        plt.subplot(1, 3, 1)
        plt.imshow(ref)
        plt.axis([x1, x2, y1, y2])
        plt.gray()
        plt.subplot(1, 3, 2)
        plt.imshow(frame)
        plt.axis([x1, x2, y1, y2])
        plt.gray()
        plt.subplot(1, 3, 3)
        frame = extract_frame(vid, i + 1, hMatrix, maskData) * mask
        plt.imshow(frame)
        plt.axis([x1, x2, y1, y2])
        plt.gray()
        plt.tight_layout(pad=0)
        plt.pause(0.001)
        plt.pause(3)

    return t0FrameNumber
Пример #45
0
def eval_plot_gmix(x, x_hat, x_new, save_path):
    n_recons = 10

    gridspec_kw = dict(wspace=.25, hspace=.25)
    fig, ax = plt.subplots(nrows=1,
                           ncols=2,
                           figsize=(8, 4),
                           gridspec_kw=gridspec_kw)

    # def invert_transforms(x):
    #     return x * 0.9985 + 5.

    def invert_transforms(x):
        return x

    x = invert_transforms(x)
    x_hat = invert_transforms(x_hat)
    x_new = invert_transforms(x_new)

    # Plot with fake data over real data

    ax[0].scatter(x[:, 0], x[:, 1], color="blue", label="real data", alpha=0.5)
    ax[0].scatter(x_new[:, 0],
                  x_new[:, 1],
                  color="red",
                  label="fake data",
                  alpha=0.5)

    ax[0].legend(loc='lower left',
                 bbox_to_anchor=(0., 1.),
                 fancybox=True,
                 shadow=False,
                 ncol=2)

    # Plot with reconstructed data over real data (corresponding points)

    cm = plt.cm.get_cmap("jet")

    for i in range(n_recons):
        color = np.array(cm(float(i) / (n_recons - 1)))

        ax[1].scatter(x[i, 0],
                      x[i, 1],
                      color=color,
                      label="real data" if i == 0 else None,
                      s=100.,
                      alpha=0.3)
        ax[1].scatter(x_hat[i, 0],
                      x_hat[i, 1],
                      color=color,
                      label="fake data" if i == 0 else None,
                      s=30.)

    ax[1].legend(loc='lower left',
                 bbox_to_anchor=(0., 1.),
                 fancybox=True,
                 shadow=False,
                 ncol=2)

    # Ax limits to have a fixed frame from update to update

    mini_1, mini_2 = torch.min(x[:, 0]), torch.min(x[:, 1])
    maxi_1, maxi_2 = torch.max(x[:, 0]), torch.max(x[:, 1])
    half_width_1, half_width_2 = abs(maxi_1 - mini_1) / 2., abs(maxi_2 -
                                                                mini_2) / 2.
    ax[0].set_xlim(mini_1 - half_width_1, maxi_1 + half_width_1)
    ax[0].set_ylim(mini_2 - half_width_2, maxi_2 + half_width_2)
    ax[1].set_xlim(mini_1 - half_width_1, maxi_1 + half_width_1)
    ax[1].set_ylim(mini_2 - half_width_2, maxi_2 + half_width_2)

    # Save the figure

    plt.tight_layout()
    os.makedirs(str(save_path.parent), exist_ok=True)
    fig.savefig(str(save_path))
    plt.close(fig)
Пример #46
0

def power_law_spectrum(energy, normalisation, spectral_index):
    return normalisation * energy**(-spectral_index)


bin_edges = np.logspace(2, 5, 15)
bin_centers = 0.5 * (bin_edges[:-1] + bin_edges[1:])


plt.errorbar(
    np.log10(bin_centers),
    power_law_spectrum(bin_centers, 1e-12, 2.5),
    xerr=[np.log10(bin_centers) - np.log10(bin_edges[:-1]), np.log10(bin_edges[1:]) - np.log10(bin_centers)],
    yerr=0.2 * power_law_spectrum(bin_centers, 1e-12, 2.5),
    linestyle='',
)

plt.xlabel(r'$\log_{10}\bigl(E \mathbin{/} \si{\giga\electronvolt}\bigr)$')
plt.ylabel(
    r'$Φ'
    r'\mathbin{/}'
    r'\si{\per\GeV\per\second\per\steradian\per\meter\squared}$'
)

plt.text(0.1, 0.1, formula, transform=plt.gca().transAxes)
plt.yscale('log')

plt.tight_layout(pad=0)
plt.savefig('build/plot.pdf')
Пример #47
0
def do_gifs2(envs, agent, vae, model_dict, update_current_state,
             update_rewards, total_num_steps):
    save_dir = model_dict['save_to']
    shape_dim0 = model_dict['shape_dim0']
    num_processes = model_dict['num_processes']
    obs_shape = model_dict['obs_shape']
    dtype = model_dict['dtype']
    num_steps = model_dict['num_steps']
    gamma = model_dict['gamma']

    action_names = envs.unwrapped.get_action_meanings()

    vow = ["A", "E", "I", "O", "U"]
    # ['NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT', 'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE', 'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE']
    # print (action_names)
    for aa in range(len(action_names)):
        for v in vow:
            action_names[aa] = action_names[aa].replace(v, "")
    # print (action_names)
    # fads

    num_processes = 1

    gif_path = save_dir + '/gifs/'
    makedir(gif_path, print_=False)

    gif_epoch_path = save_dir + '/gifs/gif' + str(total_num_steps) + '/'
    makedir(gif_epoch_path, print_=False, rm=True)

    n_gifs = 1

    episode_rewards = torch.zeros(
        [num_processes, 1])  #keeps track of current episode cumulative reward
    final_rewards = torch.zeros([num_processes, 1])

    # get data
    for j in range(n_gifs):

        state_frames = []
        value_frames = []
        actions_frames = []
        probs = []

        # Init state
        state = envs.reset()  # (channels, height, width)

        state = np.expand_dims(state, 0)  # (1, channels, height, width)

        current_state = torch.zeros(
            num_processes,
            *obs_shape)  # (processes, channels*stack, height, width)
        current_state = update_current_state(
            current_state, state,
            shape_dim0).type(dtype)  #add the new frame, remove oldest
        # agent.insert_first_state(current_state) #storage has states: (num_steps + 1, num_processes, *obs_shape), set first step

        agent.rollouts_list.reset()
        agent.rollouts_list.states = [current_state]

        step = 0
        done_ = False
        while not done_ and step < 400:

            state1 = np.squeeze(state[0])
            state_frames.append(state1)

            value, action, action_log_probs, dist_entropy = agent.act(
                Variable(agent.rollouts_list.states[-1], volatile=True))
            value_frames.append([value.data.cpu().numpy()[0][0]])

            action_prob = agent.actor_critic.action_dist(
                Variable(agent.rollouts_list.states[-1], volatile=True))[0]
            action_prob = np.squeeze(action_prob.data.cpu().numpy())  # [A]
            actions_frames.append(action_prob)

            # value, action = agent.act(Variable(agent.rollouts_list.states[-1], volatile=True))
            cpu_actions = action.data.squeeze(1).cpu().numpy()  #[P]
            # Step, S:[P,C,H,W], R:[P], D:[P]
            state, reward, done, info = envs.step(cpu_actions)

            state = np.expand_dims(state, 0)  # (1, channels, height, width)
            reward = np.expand_dims(reward, 0)  # (1, 1)
            done = np.expand_dims(done, 0)  # (1, 1)

            # Record rewards
            reward, masks, final_rewards, episode_rewards, current_state = update_rewards(
                reward, done, final_rewards, episode_rewards, current_state)
            # Update state
            current_state = update_current_state(current_state, state,
                                                 shape_dim0)
            # Agent record step
            # agent.insert_data(step, current_state, action.data, value.data, reward, masks)
            agent.rollouts_list.insert(step, current_state, action.data,
                                       value.data,
                                       reward.numpy()[0][0], masks)

            done_ = done[0]
            # print (done)

            step += 1
            # print ('step', step)

            state_get_prob = Variable(
                torch.from_numpy(state).float().view(1, 84, 84)).cuda()
            state_get_prob = state_get_prob / 255.0
            elbo, logpx, logpz, logqz = vae.forward(state_get_prob, k=100)
            probs.append(elbo.data.cpu().numpy())

        next_value = agent.actor_critic(
            Variable(agent.rollouts_list.states[-1], volatile=True))[0].data
        agent.rollouts_list.compute_returns(next_value, gamma)
        # print (agent.rollouts_list.returns)#.cpu().numpy())

        # print ('steps', step)
        # print ('reward_length', len(agent.rollouts_list.rewards))
        # print ('return length', len(agent.rollouts_list.returns))
        # print ('state_frames', len(state_frames))

        # if sum(agent.rollouts_list.rewards) == 0.:
        #     continue

        #make figs
        # for j in range(n_gifs):

        frames_path = gif_epoch_path + 'frames' + str(j) + '/'
        makedir(frames_path, print_=False)

        # for step in range(num_steps):
        for step in range(len(state_frames) - 1):

            if step % 10 == 0:
                print(step, len(state_frames) - 1)

            # if step > 30:
            #     break

            rows = 1
            cols = 9

            fig = plt.figure(figsize=(12, 4), facecolor='white')

            #Plot probs
            ax = plt.subplot2grid((rows, cols), (0, 0),
                                  frameon=False,
                                  colspan=1)
            min_logprob = np.min(probs)
            probs = np.array(probs) - min_logprob
            max_logprob = np.max(probs)
            probs = probs / max_logprob
            ax.bar(1, probs[step])
            ax.set_ylim([0., 1.])
            ax.set_title('State Prob', family='serif')
            ax.set_yticks([])
            ax.set_xticks([])

            # plot frame
            ax = plt.subplot2grid((rows, cols), (0, 1),
                                  frameon=False,
                                  colspan=3)
            # state1 = np.squeeze(state[0])
            state1 = state_frames[step]
            ax.imshow(state1, cmap='gray')
            ax.set_xticks([])
            ax.set_yticks([])
            ax.set_title('State ' + str(step), family='serif')

            #plot actions
            ax = plt.subplot2grid((rows, cols), (0, 4),
                                  frameon=False,
                                  colspan=3)
            action_prob = actions_frames[step]
            action_size = envs.action_space.n
            # print (action_size)
            ax.bar(range(action_size), action_prob)
            ax.set_title('Action', family='serif')
            # plt.xticks(range(action_size),['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'R_FIRE', 'L_FIRE'], fontsize=6)
            plt.xticks(range(action_size), action_names, fontsize=5)
            ax.set_ylim([0., 1.])

            #plot values histogram
            ax = plt.subplot2grid((rows, cols), (0, 7),
                                  frameon=False,
                                  colspan=2)
            values = value_frames[step]  #[0]#.cpu().numpy()
            weights = np.ones_like(values) / float(len(values))
            ax.hist(values, 50, range=[-2., 2.], weights=weights)
            ax.set_ylim([0., 1.])
            ax.set_title('Value', family='serif')
            val_return = agent.rollouts_list.returns[
                step]  #.cpu().numpy()#[0][0]
            # print(val_return)
            ax.plot([val_return, val_return], [0, 1])
            ax.set_yticks([])

            #plot fig
            plt.tight_layout(pad=1.5, w_pad=.4, h_pad=1.)
            plt_path = frames_path + 'plt' + str(step) + '.png'
            plt.savefig(plt_path)
            # print ('saved',plt_path)
            plt.close(fig)

        # Make gif

        # dir_ = home+ '/Documents/tmp/a2c_reg_and_dropout_pong2/PongNoFrameskip-v4/a2c_dropout/seed0/frames_a2c_dropout_PongNoFrameskip-v4_6000000'
        # print('making gif')
        max_epoch = 0
        for file_ in os.listdir(frames_path):
            if 'plt' in file_:
                numb = file_.split('plt')[1].split('.')[0]
                numb = int(numb)
                if numb > max_epoch:
                    max_epoch = numb
        # print ('max epoch in dir', max_epoch)

        images = []
        for i in range(max_epoch + 1):
            images.append(imageio.imread(frames_path + 'plt' + str(i) +
                                         '.png'))

        gif_path_this = gif_epoch_path + str(j) + '.gif'
        imageio.mimsave(gif_path_this, images)
        print('made gif', gif_path_this)
Пример #48
0
    def EnergyConservation_test(show=False):
        cases = [(2, "circular"), (2.5, "elliptical")]
        for vel_fac, shape in cases:
            TestSolarSystem = SolarSystem()
            TestSolarSystem.CreateCelestialObject(0, 0, 0, 0, 1)
            TestSolarSystem.CreateCelestialObject(1, 0, 0, vel_fac * np.pi,
                                                  3.003e-6)

            P, V = TestSolarSystem.fill_array_c(int(1e6), 15)
            t = np.linspace(0, 15, int(1e6) + 1)
            KineticEnergyEarth = 0.5 * TestSolarSystem.ObjectMasses[1] * (
                V[:, 1, 0]**2 + V[:, 1, 1]**2)  #SolarMasses*AU**2/yr**2
            KineticEnergySun = 0.5 * TestSolarSystem.ObjectMasses[0] * (
                V[:, 0, 0]**2 + V[:, 0, 1]**2)
            KineticEnergy = KineticEnergySun + KineticEnergyEarth

            CenterOfMass = P[:, 0, :] * TestSolarSystem.ObjectMasses[
                0] + P[:, 1, :] * TestSolarSystem.ObjectMasses[1]
            distance = np.sqrt((P[:, 1, 0] - P[:, 0, 0])**2 +
                               (P[:, 0, 1] - P[:, 1, 1])**2)
            PotentialEnergy = -G * TestSolarSystem.ObjectMasses[
                0] * TestSolarSystem.ObjectMasses[1] / distance

            plt.plot(t, KineticEnergy)
            plt.plot(t, PotentialEnergy)
            plt.plot(t, KineticEnergy + PotentialEnergy)
            plt.axis([0, 15, -1.5e-4, 1.5e-4])
            plt.title("Energy of %s Planet-Sun system over 15 years" % shape)
            plt.legend(["Kinetic Energy", "Potential Energy", "Total Energy"])
            plt.xlabel("time in years")
            plt.ylabel("energy in SolarMasses*AU^2/Year^2")
            plt.tight_layout()
            plt.savefig(
                os.path.join(get_fig_dir(),
                             "energy_conservation_v=%gpi.pdf" % vel_fac))
            if show:
                plt.show()
            plt.clf()

            print(
                "Center of mass at beginning of simulation, and after 15 years:"
            )
            print(CenterOfMass[0], "\n", CenterOfMass[-1])

            AngularMomentum = TestSolarSystem.ObjectMasses[1] * np.cross(
                P[:, 1], V[:, 1])
            plt.plot(t, AngularMomentum)
            plt.title("Angular Momentum of Planet over 15 years")
            plt.xlabel("time in years")
            plt.ylabel("Angular Momentum")
            print("Relative error in angular momentum over 15 years: %e" %
                  ((np.min(AngularMomentum) - np.max(AngularMomentum)) /
                   np.min(AngularMomentum)))
            plt.axis([0, 15, 0, 4e-5])
            plt.tight_layout()
            plt.savefig(
                os.path.join(get_fig_dir(),
                             "angular_momentum_v=%gpi.pdf" % vel_fac))
            if show:
                plt.show()
            plt.clf()
Пример #49
0
def PC_traj(dfMEAN, dfSEM, PCs_toplot, rep, directory, file_type, cmap,
            drugsToPlot, start_end):
    """this function groups by drug an plots the trajectories through PC space
    Input
        dfMEAN - dataframe containing the PC values for each of the drugs
        dfSEM - dataframe containing the PC SEM for each drug at each dose
        PCs_toplot 
        rep - the name of the experiments
        directory - the directory to save the files into
        file_type - type of image ('tif' or 'svg' ...)
        cmap - colormap to use
        drugstoPlot
        start_end
    Output
        Plot showing trajectory through PC space with errorbars
        
    """
    import numpy as np
    import pandas as pd
    import matplotlib.pyplot as plt
    import os

    #scale the PCs
    xscale = 1 / (np.max(dfMEAN[PCs_toplot[0]]) -
                  np.min(dfMEAN[PCs_toplot[0]]))
    yscale = 1 / (np.max(dfMEAN[PCs_toplot[1]]) -
                  np.min(dfMEAN[PCs_toplot[1]]))

    #okay so now have a summary of each drug for each PC.
    #scale and plot the drugs across the PC1 and 2 space

    #make note of drugs to plot
    if drugsToPlot == []:
        uniqueDrugs1 = np.unique(dfMEAN['drug'])
    else:
        uniqueDrugs1 = drugsToPlot

    plt.figure()
    #for each drug plot the mean and SEM in both PC1 and PC2
    for drug in range(len(uniqueDrugs1)):
        MeanPlot = dfMEAN['drug'] == uniqueDrugs1[drug]
        SemPlot = dfSEM['drug'] == uniqueDrugs1[drug]
        plottingMEAN = dfMEAN[MeanPlot]
        plottingSEM = dfSEM[SemPlot]
        ax = plt.errorbar(x=plottingMEAN[PCs_toplot[0]]*xscale, y=plottingMEAN[PCs_toplot[1]]*yscale, \
                      xerr = plottingSEM[PCs_toplot[0]]*xscale, yerr=plottingSEM[PCs_toplot[1]]*yscale, \
                       linewidth =2, linestyle = '--', color = cmap[drug], marker = 'o', label = uniqueDrugs1[drug])
        if start_end == True:
            plt.text(x=plottingMEAN[PCs_toplot[0]].iloc[0] * xscale,
                     y=plottingMEAN[PCs_toplot[1]].iloc[0] * yscale,
                     s='start')
            plt.text(x=plottingMEAN[PCs_toplot[0]].iloc[-1] * xscale,
                     y=plottingMEAN[PCs_toplot[1]].iloc[-1] * yscale,
                     s='end')
        else:
            continue
    plt.axis('scaled')
    plt.xlim(-1, 1)
    plt.ylim(-1, 1)
    plt.legend(loc='upper left',
               bbox_to_anchor=(1.1, 1.05),
               ncol=1,
               frameon=True)
    plt.tight_layout(rect=[0, 0, 1, 1])
    plt.xlabel(PCs_toplot[0])
    plt.ylabel(PCs_toplot[1])
    try:
        plt.savefig(os.path.join(os.path.dirname(directory), 'Figures', rep + '_PCtraj.' + file_type),\
                bbox_inches="tight")
    except TypeError:
        plt.savefig(os.path.join(os.path.dirname(directory), 'Figures',
                                 'PC_Traj.' + file_type),
                    bbox_inches='tight')
    plt.show()
Пример #50
0
def DT(X, y, train_size, data_name):
    X = StandardScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=train_size)

    # https://scikit-learn.org/stable/auto_examples/tree/plot_cost_complexity_pruning.html#sphx-glr-auto-examples-tree-plot-cost-complexity-pruning-py
    # Fit classification model
    dt = DecisionTreeClassifier()
    path = dt.cost_complexity_pruning_path(X_train, y_train)
    ccp_alphas, impurities = path.ccp_alphas, path.impurities

    fig, ax = plt.subplots()
    ax.plot(ccp_alphas[:-1],
            impurities[:-1],
            marker='o',
            drawstyle="steps-post")
    ax.set_xlabel("effective alpha")
    ax.set_ylabel("total impurity of leaves")
    ax.set_title("Total Impurity vs effective alpha for training set")

    clfs = []
    for ccp_alpha in ccp_alphas:
        clf = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
        clf.fit(X_train, y_train)
        clfs.append(clf)
    print("Number of nodes in the last tree is: {} with ccp_alpha: {}".format(
        clfs[-1].tree_.node_count, ccp_alphas[-1]))

    # %%
    # For the remainder of this example, we remove the last element in
    # ``clfs`` and ``ccp_alphas``, because it is the trivial tree with only one
    # node. Here we show that the number of nodes and tree depth decreases as alpha
    # increases.
    clfs = clfs[:-1]
    ccp_alphas = ccp_alphas[:-1]

    node_counts = [clf.tree_.node_count for clf in clfs]
    depth = [clf.tree_.max_depth for clf in clfs]
    fig, ax = plt.subplots(2, 1)
    ax[0].plot(ccp_alphas, node_counts, marker='o', drawstyle="steps-post")
    ax[0].set_xlabel("alpha")
    ax[0].set_ylabel("number of nodes")
    ax[0].set_title("Number of nodes vs alpha")
    ax[1].plot(ccp_alphas, depth, marker='o', drawstyle="steps-post")
    ax[1].set_xlabel("alpha")
    ax[1].set_ylabel("depth of tree")
    ax[1].set_title("Depth vs alpha")
    fig.tight_layout()

    # %%
    # Accuracy vs alpha for training and testing sets
    # ----------------------------------------------------
    # When ``ccp_alpha`` is set to zero and keeping the other default parameters
    # of :class:`DecisionTreeClassifier`, the tree overfits, leading to
    # a 100% training accuracy and 88% testing accuracy. As alpha increases, more
    # of the tree is pruned, thus creating a decision tree that generalizes better.
    # In this example, setting ``ccp_alpha=0.015`` maximizes the testing accuracy.
    train_scores = [clf.score(X_train, y_train) for clf in clfs]
    test_scores = [clf.score(X_test, y_test) for clf in clfs]

    fig, ax = plt.subplots()
    ax.set_xlabel("alpha")
    ax.set_ylabel("accuracy")
    ax.set_title("Accuracy vs alpha for training and testing sets")
    ax.plot(ccp_alphas,
            train_scores,
            marker='o',
            label="train",
            drawstyle="steps-post")
    ax.plot(ccp_alphas,
            test_scores,
            marker='o',
            label="test",
            drawstyle="steps-post")
    ax.legend()
    plt.show()
    # %%
    best_alpha = 0.040790348647614105
    # %%
    # Create CV training and test scores for various training set sizes
    train_sizes, train_scores, test_scores = learning_curve(
        DecisionTreeClassifier(ccp_alpha=best_alpha),
        X,
        y,
        # Number of folds in cross-validation
        cv=5,
        # Evaluation metric
        scoring='accuracy',
        # Use all computer cores
        n_jobs=-1,
        # 50 different sizes of the training set
        train_sizes=np.linspace(0.01, 1.0, 50))

    print(train_scores)
    # Create means and standard deviations of training set scores
    train_mean = np.mean(train_scores, axis=1)
    train_std = np.std(train_scores, axis=1)

    # Create means and standard deviations of test set scores
    test_mean = np.mean(test_scores, axis=1)
    test_std = np.std(test_scores, axis=1)

    # Draw lines
    plt.plot(train_sizes,
             train_mean,
             '--',
             color="#111111",
             label="Training score")
    plt.plot(train_sizes,
             test_mean,
             color="#111111",
             label="Cross-validation score")

    # Draw bands
    plt.fill_between(train_sizes,
                     train_mean - train_std,
                     train_mean + train_std,
                     color="#DDDDDD")
    plt.fill_between(train_sizes,
                     test_mean - test_std,
                     test_mean + test_std,
                     color="#DDDDDD")

    # Create plot
    plt.title("DT Learning Curve - {}".format(data_name))
    plt.xlabel("Training Set Size"), plt.ylabel("Accuracy Score"), plt.legend(
        loc="best")
    plt.tight_layout()
    plt.show()
Пример #51
0
    t_atmosphere = datalib.ATData(lda*1e-9)
    fig = plt.figure()
    plt.plot(lda*1e-3, t_atmosphere*100,'k', alpha = 0.1, label='Atmospheric \n transmittance')
    plt.plot(lda*1e-3, (1-np_R*calR-np_T*calT)*100,'k', label = 'Total absorption \n (measured)')
    plt.plot(lda*1e-3, (1-Tideal-Rideal)*100, 'k:', label = 'Total absorption \n (simulated)')
    plt.plot(lda*1e-3, Aideal[:,1]*100,'b:', label = 'Roughness layer \n (6.8% $SiO_{2}$ Brugg.)')
    plt.plot(lda*1e-3, Aideal[:,2]*100,'r:', label = 'Nanoparticle layer \n (23.8% $SiO_2$ Brugg.)')
    plt.plot(lda*1e-3, Aideal[:,4]*100,'m:', label = 'Si Substrate')
    #plt.plot(lda, Aideal[:,3]*100,'y:', label = 'SiO2 native oxide absorption')
    
    plt.xlabel('Wavelength (um)')
    plt.ylabel('Absorption (%)')
    #plt.title('Absorption at normal incidence')
    #ax.legend().draggable()
    
    plt.tight_layout(rect=[-0.10,0,0.75,1])
    plt.legend(bbox_to_anchor=(1.04, 1))
    plt.show() 
else:
    AM1p5 = datalib.AM(lda*1e-9)            
    fig = plt.figure()
    plt.plot(lda, (AM1p5/(1.4*1e9))*100,'k', alpha = 0.1, label='AM1.5')
    plt.plot(lda, (1-np_R*calR-np_T)*100,'r', label = 'Total absorption \n (measured)')
    plt.plot(lda, (1-Rideal-Tideal)*100, 'r--', label = 'Total absorption \n (simulated)')
    plt.plot(lda, Aideal[:,1]*100,'b:', label = 'Roughness layer \n (6.8% $SiO_{2}$ Brugg.)')
    plt.plot(lda, Aideal[:,2]*100,'k:', label = 'Nanoparticle layer \n (23.8% $SiO_2$ Brugg.)')
    plt.plot(lda, Aideal[:,4]*100,'m:', label = 'Si Substrate')
    plt.plot(lda, (np_RD/np_R)*100,'c', label = 'Diffuse refleciton \n contribution \n (measured)')
    #plt.plot(lda, Aideal[:,3]*100,'y:', label = 'SiO2 native oxide absorption')
    
    plt.xlabel('Wavelength (nm)')
Пример #52
0
def do_gifs(envs, agent, model_dict, update_current_state, update_rewards,
            total_num_steps):
    save_dir = model_dict['save_to']
    shape_dim0 = model_dict['shape_dim0']
    num_processes = model_dict['num_processes']
    obs_shape = model_dict['obs_shape']
    dtype = model_dict['dtype']
    num_steps = model_dict['num_steps']
    gamma = model_dict['gamma']

    num_processes = 1

    gif_path = save_dir + '/gifs/'
    makedir(gif_path, print_=False)

    gif_epoch_path = save_dir + '/gifs/gif' + str(total_num_steps) + '/'
    makedir(gif_epoch_path, print_=False, rm=True)

    n_gifs = 1

    episode_rewards = torch.zeros(
        [num_processes, 1])  #keeps track of current episode cumulative reward
    final_rewards = torch.zeros([num_processes, 1])

    # get data
    for j in range(n_gifs):

        state_frames = []
        value_frames = []
        actions_frames = []
        actions_frames2 = []

        # Init state
        state = envs.reset()  # (channels, height, width)

        state = np.expand_dims(state, 0)  # (1, channels, height, width)

        current_state = torch.zeros(
            num_processes,
            *obs_shape)  # (processes, channels*stack, height, width)
        current_state = update_current_state(
            current_state, state,
            shape_dim0).type(dtype)  #add the new frame, remove oldest
        # agent.insert_first_state(current_state) #storage has states: (num_steps + 1, num_processes, *obs_shape), set first step

        agent.rollouts_list.reset()
        agent.rollouts_list.states = [current_state]

        step = 0
        done_ = False
        while not done_ and step < 400:

            state1 = np.squeeze(state[0])
            state_frames.append(state1)

            value, action, action_log_probs, dist_entropy = agent.act(
                Variable(agent.rollouts_list.states[-1], volatile=True))
            value_frames.append([value.data.cpu().numpy()[0][0]])

            action_prob = agent.actor_critic.action_dist(
                Variable(agent.rollouts_list.states[-1], volatile=True))[0]
            action_prob = np.squeeze(action_prob.data.cpu().numpy())  # [A]
            actions_frames.append(action_prob)

            # value, action = agent.act(Variable(agent.rollouts_list.states[-1], volatile=True))
            cpu_actions = action.data.squeeze(1).cpu().numpy()  #[P]
            actions_frames2.append(cpu_actions)
            # Step, S:[P,C,H,W], R:[P], D:[P]
            state, reward, done, info = envs.step(cpu_actions)

            state = np.expand_dims(state, 0)  # (1, channels, height, width)
            reward = np.expand_dims(reward, 0)  # (1, 1)
            done = np.expand_dims(done, 0)  # (1, 1)

            # Record rewards
            reward, masks, final_rewards, episode_rewards, current_state = update_rewards(
                reward, done, final_rewards, episode_rewards, current_state)
            # Update state
            current_state = update_current_state(current_state, state,
                                                 shape_dim0)
            # Agent record step
            # agent.insert_data(step, current_state, action.data, value.data, reward, masks)
            agent.rollouts_list.insert(step, current_state, action.data,
                                       value.data,
                                       reward.numpy()[0][0], masks,
                                       action_log_probs)

            done_ = done[0]
            # print (done)

            step += 1
            # print ('step', step)

        next_value = agent.actor_critic(
            Variable(agent.rollouts_list.states[-1], volatile=True))[0].data
        agent.rollouts_list.compute_returns(next_value, gamma)
        # print (agent.rollouts_list.returns)#.cpu().numpy())

        # print ('steps', step)
        # print ('reward_length', len(agent.rollouts_list.rewards))
        # print ('return length', len(agent.rollouts_list.returns))
        # print ('state_frames', len(state_frames))

        # if sum(agent.rollouts_list.rewards) == 0.:
        #     continue

        #make figs
        # for j in range(n_gifs):

        frames_path = gif_epoch_path + 'frames' + str(j) + '/'
        makedir(frames_path, print_=False)

        # for step in range(num_steps):
        for step in range(len(state_frames) - 1):

            if step % 10 == 0:
                print(step, len(agent.rollouts_list.returns) - 1)

            rows = 1
            cols = 2

            fig = plt.figure(figsize=(8, 4), facecolor='white')

            # plot frame
            ax = plt.subplot2grid((rows, cols), (0, 0), frameon=False)

            # state1 = np.squeeze(state[0])
            state1 = state_frames[step]
            ax.imshow(state1, cmap='gray')
            ax.set_xticks([])
            ax.set_yticks([])
            ax.set_title('State ' + str(step), family='serif')

            action_ = actions_frames2[step][0]
            if 'Montezuma' in model_dict['env']:
                if action_ in [2, 10]:
                    ax.text(.45,
                            -.1,
                            'Up',
                            transform=ax.transAxes,
                            color='Blue')
                elif action_ in [5, 13]:
                    ax.text(.45,
                            -.1,
                            'Down',
                            transform=ax.transAxes,
                            color='Green')
                elif action_ in [0, 1]:
                    ax.text(.45,
                            -.1,
                            'NoOp',
                            transform=ax.transAxes,
                            color='Black')
                elif action_ in [3, 11]:
                    ax.text(.45,
                            -.1,
                            'Right',
                            transform=ax.transAxes,
                            color='Purple')
                elif action_ in [4, 12]:
                    ax.text(.45,
                            -.1,
                            'Left',
                            transform=ax.transAxes,
                            color='Yellow')

            # #plot values histogram
            # ax = plt.subplot2grid((rows,cols), (0,2), frameon=False)

            # values = value_frames[step]#[0]#.cpu().numpy()
            # weights = np.ones_like(values)/float(len(values))
            # ax.hist(values, 50, range=[0., 8.], weights=weights)

            # ax.set_ylim([0.,1.])
            # ax.set_title('Value',family='serif'

            # # print ()
            # val_return = agent.rollouts_list.returns[step] #.cpu().numpy()#[0][0]
            # # print(val_return)
            # ax.plot([val_return,val_return],[0,1])

            #plot actions
            ax = plt.subplot2grid((rows, cols), (0, 1), frameon=False)

            # action_prob = agent.actor_critic.action_dist(Variable(agent.rollouts_list.states[-1], volatile=True))[0]
            # action_prob = np.squeeze(action_prob.data.cpu().numpy())
            action_prob = actions_frames[step]
            action_size = envs.action_space.n
            # print (action_size)

            # print (action_)

            barlist = ax.bar(range(action_size), action_prob)
            barlist[action_].set_color('black')

            ax.set_title('Action', family='serif')
            # plt.xticks(range(action_size),['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'R_FIRE', 'L_FIRE'], fontsize=6)
            plt.xticks(range(action_size),
                       [str(x) for x in range(action_size)],
                       fontsize=6)
            ax.set_ylim([0., 1.])

            #plot fig
            plt.tight_layout(pad=3., w_pad=2.5, h_pad=1.0)
            plt_path = frames_path + 'plt' + str(step) + '.png'
            plt.savefig(plt_path)
            # print ('saved',plt_path)
            plt.close(fig)

        # Make gif

        # dir_ = home+ '/Documents/tmp/a2c_reg_and_dropout_pong2/PongNoFrameskip-v4/a2c_dropout/seed0/frames_a2c_dropout_PongNoFrameskip-v4_6000000'
        # print('making gif')
        max_epoch = 0
        for file_ in os.listdir(frames_path):
            if 'plt' in file_:
                numb = file_.split('plt')[1].split('.')[0]
                numb = int(numb)
                if numb > max_epoch:
                    max_epoch = numb
        # print ('max epoch in dir', max_epoch)

        images = []
        for i in range(max_epoch + 1):
            images.append(imageio.imread(frames_path + 'plt' + str(i) +
                                         '.png'))

        # print (gif_epoch_path)
        # fdsfa

        gif_path_this = gif_epoch_path + str(total_num_steps) + '_' + str(
            j) + '.gif'
        imageio.mimsave(gif_path_this, images)
        print('made gif', gif_path_this)
Пример #53
0
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)

c0 = ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0",
                 alpha=0.5)
c1 = ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1",
                 alpha=0.5)
ax1.set_title('Original set')

ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
            label="Class #0", alpha=.5)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
            label="Class #1", alpha=.5)
ax2.set_title('ADASYN')

# make nice plotting
for ax in (ax1, ax2):
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()
    ax.spines['left'].set_position(('outward', 10))
    ax.spines['bottom'].set_position(('outward', 10))
    ax.set_xlim([-6, 8])
    ax.set_ylim([-6, 6])

plt.figlegend((c0, c1), ('Class #0', 'Class #1'), loc='lower center',
              ncol=2, labelspacing=0.)
plt.tight_layout(pad=3)
plt.show()
Пример #54
0
def plot():
    for file in os.listdir('Fst_stats'):
        if fnmatch.fnmatch(file, 'pop1_pop2_flt_results_sorted.csv'):
            # Import csv file with Fst results.
            gl = pd.read_csv('Fst_stats/pop1_pop2_flt_results_sorted.csv')

            # Optimize memory usage.
            gl_int = gl.select_dtypes(include=['int'])
            converted_int = gl_int.apply(pd.to_numeric, downcast='unsigned')
            gl_float = gl.select_dtypes(include=['float'])
            converted_float = gl_float.apply(pd.to_numeric, downcast='float')
            optimized_gl = gl.copy()
            optimized_gl[converted_int.columns] = converted_int
            optimized_gl[converted_float.columns] = converted_float

            # Convert CHROM column from object to category.
            gl_obj = gl.select_dtypes(include=['object']).copy()
            chrom = gl_obj.CHROM
            chrom_cat = chrom.astype('category')
            converted_obj = pd.DataFrame()

            # If unique values are more than 50% of the data do not
            # convert to category, it will not optimize memory usage.
            for col in gl_obj.columns:
                num_unique_values = len(gl_obj[col].unique())
                num_total_values = len(gl_obj[col])
                if num_unique_values / num_total_values < 0.5:
                    converted_obj.loc[:, col] = gl_obj[col].astype('category')
                else:
                    converted_obj.loc[:, col] = gl_obj[col]

            # Apply on the csv file.
            optimized_gl[converted_obj.columns] = converted_obj
            dtypes_col = optimized_gl.dtypes.index
            dtypes_type = [i.name for i in optimized_gl.dtypes.values]
            column_types = dict(zip(dtypes_col, dtypes_type))
            read_and_optimized = pd.read_csv('Fst_stats/pop1_pop2_flt_results_sorted.csv', \
                 dtype=column_types)

            # Rename the read and optimized csv file
            # from the Fst analysis to "df".
            df = read_and_optimized
            df['code'] = chrom_cat.cat.codes

            df['ind'] = range(len(df))
            df_grouped = df.groupby(('code'))

            # Dict for the contig names and index number.
            names = dict(enumerate(df['CHROM'].cat.categories))

            # Make plot of data.
            fig = plt.figure(figsize=(80, 20))
            ax = fig.add_subplot(111)
            colors = ['green', 'turquoise', \
             'blue', 'purple', \
             'red', 'orange', \
             'yellow']
            x_labels = []
            x_labels_pos = []
            for num, (name, group) in enumerate(df_grouped):
                group.plot(kind='scatter', x='ind', y='WEIR_AND_COCKERHAM_FST', \
                color=colors[num % len(colors)], ax=ax)
                x_labels.append(name)
                x_labels_pos.append((group['ind'].iloc[-1] \
                - (group['ind'].iloc[-1] - group['ind'].iloc[0])/2))
                ax.set_xticks(x_labels_pos)
                ax.set_xticklabels(x_labels, rotation='vertical', fontsize=10)
                ax.set_xlim([0, len(df)])
                ax.set_ylim([0, 1])
                ax.set_xlabel('contigs', fontsize=24)
                ax.set_ylabel('Fst value', fontsize=24)
                ax.set_title('Weir and Cockerham Fst', fontsize=40)
                plt.tick_params(axis='x', length=0.01)

            # Add legend with key values paired with the name of the contig.
            legend_list = []
            for key, value in names.items():
                temp = [key, value]
                legend_list.append(temp)

            plt.legend(legend_list,bbox_to_anchor=(1.01, 1), \
               ncol=5, \
               borderaxespad=0)
            plt.tight_layout(pad=7)

            # Save plot as pdf.
            plt.savefig("Fst_stats/Fst_plot_vcftools.pdf")
Пример #55
0
# plot histograms of R2
field='r2_mul'

fig, ax = plt.subplots(figsize=(9,9))
bins=np.linspace(floor(min(df[field])*10)/10,ceil(max(df[field])*10)/10,
                 int((ceil(max(df[field])*10)/10-floor(min(df[field])*10)/10)/0.02))
for reg in reg_list:
    ax = plt.subplot(3,2,reg)
    plt.hist(df[(df['source']==phsource)&(df.reg==reg)][field],bins=bins)
    plt.title(f'reg{reg}')
#    plt.xlim([0,1])
    plt.yscale('log')
    plt.ylabel('count')#r'$\delta$')
    plt.xlabel(field)
#    plt.ylim(top=3000)
plt.tight_layout()




# plot histogram of R2
reg=2
field='r2_mul'
plt.hist(np.log10(df[(df['source']==phsource)&(df.reg==reg)][field]),bins=50)
plt.title(f'reg{reg}\n{reg_desc[reg]}')
#plt.yscale('log')
plt.xlabel(r'$\log_{10}(r^2)$')
plt.ylabel('count')


Пример #56
0
def main(save=True):
    #data for 20180404 reduction
    #vars = ['R0', 'ALP_I', 'ALP_O', 'KSI0', 'G', 'E']
    #r0 = [63, 73, 83] #73-83
    #alp_i = [2.5, 5, 7.5] #5-7.5
    #alp_o = [-2.5, -5] #-2.5
    #ksi0 = [1.5, 2, 3] #1.5
    #g = [0.2, 0.4, 0.6] #0.6
    #e = [0, 0.06, 0.1] #doesn't matter
    #params = [r0, alp_i, alp_o, ksi0, g, e]

    #set variables to plot against each other
    vars = ['R0', 'ALP_I', 'ALP_O', 'G', 'KSI0', 'BETA']
    dir = '20180503/'

    data = np.loadtxt(dir + 'grid_search_stats_20180503.txt', dtype='str')

    for i in range(len(vars)):
        col = np.squeeze(np.where(data[0, :] == vars[i]))  #eg 9
        values = np.unique(data[1:, col]).astype('float')
        if i == 0:
            params = [values]
        else:
            params.append(values)

    chi_col_index = np.squeeze(np.where(data[0, :] == 'CHISQ/dof'))

    for i in range(np.shape(params)[0] - 1):
        for j in range(i + 1, np.shape(params)[0]):
            xparam = vars[i]  #eg 'r0'
            yparam = vars[j]  #eg 'alp_i'

            xcol = np.squeeze(np.where(data[0, :] == vars[i]))  #eg 9
            ycol = np.squeeze(np.where(data[0, :] == vars[j]))  #eg 3

            im = np.zeros((len(params[j]), len(params[i])))

            for x in range(len(params[i])):
                for y in range(len(params[j])):
                    loc = np.where(
                        (data[1:, xcol].astype('float') == params[i][x])
                        & (data[1:, ycol].astype('float') == params[j][y]))
                    loc = np.squeeze(np.array(loc))

                    im[y, x] = np.mean(data[loc + 1,
                                            chi_col_index].astype('float'))

            if xparam == 'R0': tit1 = r'$r_0$'
            elif xparam == 'ALP_I': tit1 = r'$\alpha_{in}$'
            elif xparam == 'ALP_O': tit1 = r'$\alpha_{out}$'
            elif xparam == 'BETA': tit1 = r'$\beta$'
            elif xparam == 'ksi0': tit1 = r'$\xi$'
            elif xparam == 'G': tit1 = r'$g$'

            if yparam == 'R0': tit2 = r'$r_0$'
            elif yparam == 'ALP_I': tit2 = r'$\alpha_{in}$'
            elif yparam == 'ALP_O': tit2 = r'$\alpha_{out}$'
            elif yparam == 'BETA': tit2 = r'$\beta$'
            elif yparam == 'ksi0': tit2 = r'$\xi$'
            elif yparam == 'G': tit2 = r'$g$'

            plt.imshow(im, interpolation='none')
            plt.xticks(range(len(params[i])), params[i])
            plt.yticks(range(len(params[j])), params[j])
            plt.title(tit1 + ' vs. ' + tit2 + " mean " + r'$\chi_{\nu}^{2}$',
                      fontsize=21)
            plt.tight_layout()
            plt.subplots_adjust(bottom=0.1, top=0.93)  #make space for title
            plt.xlabel(tit1, fontsize=21)
            plt.ylabel(tit2, fontsize=21)
            plt.colorbar()

            if save == True:
                plt.savefig(dir + 'contour ' + xparam + ' vs ' + yparam +
                            '.png')
                plt.close()
            else:
                plt.show()
Пример #57
0
def plot_tracking_history(path, sequence_idx, data=False, filename_log=False, kitti=Kitti,
                          final_frame_idx=None, disp='show', only_alive=False, show_cov=False, show_predictions=None,
                          config_name='', car_van_flag=False, fafe=False, num_conseq_frames=None):

    if fafe and num_conseq_frames is None:
        raise ValueError("Fafe needs num conseq frames")
    if not data and not filename_log:
        raise ValueError("Neither data or filename to log file specified. ")

    if filename_log and not data:
        data = load_logging_data(filename_log)

    if not os.path.exists(path):
        os.mkdir(path)

    fig, ax = plt.subplots(2, 1)
    gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
    fig.set_size_inches(10, 15, forward=True)

    img = kitti.load_image(sequence_idx=sequence_idx, frame_idx=final_frame_idx)
    ax[0].imshow(img)
    ax[0].grid(False)

    ego_vehicle = patches.Rectangle((-0.5, -2), 1, 4, color="blue", alpha=0.50)
    ax[1].add_patch(ego_vehicle)

    for l in kitti.lbls[final_frame_idx]:

        if car_van_flag:
            if l.type[0] not in ['Car', 'Van']:
                continue
        else:
            if l.type[0] == 'DontCare':
                continue

        x_pos = l.location[0]
        z_pos = l.location[2]
        pos = np.array([x_pos, z_pos])
        width = l.dimensions[1]
        length = l.dimensions[2]

        if x_pos <= XLIM[0] or x_pos >= XLIM[1] or z_pos <= ZLIM[0] or z_pos >= ZLIM[1]:
            continue

        rot_y = l.rotation_y
        _xm = - width / 2
        _zm = - length / 2
        _xp = width / 2
        _zp = length / 2

        _bbox = np.matrix([[_xm, _zm], [_xm, _zp], [_xp, _zp], [_xp, _zm]])
        _phi = np.pi / 2 - rot_y
        _rotm = np.matrix([[np.cos(_phi), -np.sin(_phi)], [np.sin(_phi), np.cos(_phi)]])
        _rotated_bbox = (_rotm * _bbox.T).T + pos
        r = patches.Polygon(_rotated_bbox, color="red", alpha=0.2)
        ax[1].add_patch(r)
        ax[1].text(x_pos + 0.5 * width, z_pos + 0.5 * length, str(l.track_id), color='black')
        ax[1].plot(x_pos, z_pos, 'r.', ms=0.5)

    # Plot current measurements. If fafe the measurements is lined up differently.
    if fafe:
        meas_frame_idx = final_frame_idx - num_conseq_frames + 1
    else:
        meas_frame_idx = final_frame_idx
    measurements = measurements_from_log(data=data, frame_idx=meas_frame_idx)
    for meas in measurements:
        ax[1].plot(meas[0], meas[1], 'rs', markerfacecolor='none')

    es, ev, et, eps, epv = create_estimated_trajectory(data=data)

    for tid, state in es.items():
        frame_indeces = et[tid]
        if only_alive:
            if final_frame_idx not in frame_indeces:
                continue
        last_idx_to_plot = frame_indeces.index(final_frame_idx)
        states_to_plot = []
        for idx, frame_idx in enumerate(frame_indeces[0:last_idx_to_plot + 1]):
            current_state = state[idx][0:2]
            for i in range(frame_idx, final_frame_idx):
                current_velocity = kitti.get_ego_bev_velocity(frame_idx=i)
                current_rotation = kitti.get_ext_bev_rotation(frame_idx=i)
                current_state = current_rotation @ (current_state - kitti.dT * current_velocity)
            states_to_plot.append(current_state)
        _ex = [x[0, 0] for x in states_to_plot]
        _ez = [x[1, 0] for x in states_to_plot]
        _c = cnames[tid % len(cnames)]
        ax[1].plot(_ex, _ez, color=_c, linewidth=2)
        ax[1].plot(_ex[last_idx_to_plot], _ez[last_idx_to_plot], color=_c, marker='o', markerfacecolor='none', ms=5)
        if not (_ex[-1] <= XLIM[0] or _ex[-1] >= XLIM[1] or _ez[-1] <= ZLIM[0] or _ez[-1] >= ZLIM[1]):
            ax[1].text(_ex[last_idx_to_plot] - 1, _ez[last_idx_to_plot] + 1, str(tid), color=_c)

        if show_cov:
            _cov = ev[tid][last_idx_to_plot][0:2, 0:2]
            _cnt = np.array([[_ex[last_idx_to_plot]], [_ez[last_idx_to_plot]]])
            _ = plot_cov_ellipse(_cov, _cnt, nstd=3, ax=ax[1], alpha=0.5, color=_c)

        if show_predictions is not None:
            _pex = [x[0, 0] for x in eps[tid][last_idx_to_plot]]
            _pez = [x[1, 0] for x in eps[tid][last_idx_to_plot]]
            ax[1].plot(_pex, _pez, linestyle='--', marker='^', color=_c, linewidth=0.5, ms=4)

    ax[1].set_xlim(XLIM[0], XLIM[1])
    ax[1].set_ylim(ZLIM[0], ZLIM[1])
    ax[1].grid(True)
    plt.tight_layout()

    if disp == 'show':
        plt.show()
    elif disp == 'save':
        fig.savefig(
            path + '/' + config_name + '_track_seq_' + str(sequence_idx).zfill(4) + '_frame_' + str(final_frame_idx).zfill(
                4) + '.png')
        plt.close(fig)
    else:
        assert 'Noob'
Пример #58
0
# 绘制特征重要性图像---特征重要性从高到底排序
mp.figure('Feature Importance', facecolor='lightgray')
mp.rcParams['font.sans-serif'] = 'SimHei'
mp.subplot(211)
mp.title('Decision Tree FI')
mp.ylabel('Feature Importance')
mp.grid(linestyle=":")
# 排序
# names = boston.feature_names
names = np.array(
    ['犯罪率', '住宅地比例', '商业用地比例', '是否靠河', '空气质量', '房间数', '房屋年限', '距市中心的距离', '路网密度', '房产税', '师生比', '黑人比例', '低地位人口比例'])
sorted_indexes = dt_fi.argsort()[::-1]  # 下标排序,从大到小
x = np.arange(names.size)
mp.bar(x, dt_fi[sorted_indexes], 0.7, color='dodgerblue', label='DTFI')
mp.xticks(x, names[sorted_indexes])  # 设置x轴坐标
mp.tight_layout()
mp.legend()

mp.subplot(212)
mp.title('AdaBoostRegressor FI')
mp.ylabel('AdaBoostRegressor FI')
mp.grid(linestyle=":")
# 排序
# names = boston.feature_names
names = np.array(
    ['犯罪率', '住宅地比例', '商业用地比例', '是否靠河', '空气质量', '房间数', '房屋年限', '距市中心的距离', '路网密度', '房产税', '师生比', '黑人比例', '低地位人口比例'])
sorted_indexes = ad_fi.argsort()[::-1]  # 下标排序,从大到小
x = np.arange(names.size)
mp.bar(x, ad_fi[sorted_indexes], 0.7, color='orangered', label='ABRFI')
mp.xticks(x, names[sorted_indexes])  # 设置x轴坐标
mp.tight_layout()
    imgs_comb = np.concatenate((reference_batch_np, input_batch_np), axis=2)
    # cv2.imshow('Main', imgs_comb)
    #####################################################################
    # this is done for matplotlib visualisation
    h, w = 30, 60  # for raster image
    nrows, ncols = 5, 10  # array of sub-plots
    # figsize = [6, 8]  # figure size, inches
    figsize = [10, 10]  # figure size, inches
    # create figure (fig), and array of axes (ax)
    fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)

    # plot simple raster image on each sub-plot
    for i, axi in enumerate(ax.flat):
        img = imgs_comb[i]
        # img = input_batch[i]
        img=np.moveaxis(img, 0, -1)
        # plt.imshow(img)
        axi.imshow(img, alpha=1)
        axi.set_title("dis:" + str(round(euclidean_distance[i],3) ))
        axi.axis('off')
        # axi.set_title("Row:" + str(rowid) + ", Col:" + str(colid))

    plt.tight_layout(True)
    plt.show()
    #################################################################################
    ref = reference_batch[0][indices_mn].unsqueeze(0)
    inp = input_batch[indices_mn].unsqueeze(0)
    concatenated = torch.cat((inp, ref), 0)
    imshow(torchvision.utils.make_grid(concatenated), 'Color_of_light: {}'.format(light_color+ str(  round(dismalirty,3))))
    ############################################################################################
Пример #60
0
data = h5py.File(recodir+file, 'r+')

imgs = []
for key in data['images']:
    if key=='xml':
        continue
    imgs.append(data['images'][key]['data'][:])

data.close()

img_set = imgs[show_set][:]
if np.iscomplexobj(img_set):
    img_set = abs(img_set)

slices = img_set.shape[0]
if slices > 1:
    columns = int(np.sqrt(slices))
    rows = int(slices/columns+1)
    maxsig = max(abs(img_set.flatten()))
    fig=plt.figure(figsize=(12, 12))
    for i in range(slices):
        fig.add_subplot(rows, columns, i+1)
        plt.imshow(img_set[i,0,0],cmap='gray', vmin=0, vmax=maxsig, interpolation='none')
        plt.axis('off')
        plt.tight_layout(pad=0.2)


plt.figure(figsize=(10,10))
plt.imshow(img_set[show_slice,0,0], 'gray', interpolation='none')
plt.axis('off')