Пример #1
0
    def plot_results(self):
        """
        A simple script to plot the balance of the portfolio, or
        "equity curve", as a function of time.
        It requires OUTPUT_RESULTS_DIR to be set in the project
        settings.
        """
        sns.set_palette("deep", desat=0.6)
        sns.set_context(rc={"figure.figsize": (8, 4)})

        equity_file = os.path.join(settings.OUTPUT_DIR, "output.csv")
        equity = pd.io.parsers.read_csv(equity_file, parse_dates=True, header=0, index_col=0)

        # Plot three charts: Equity curve, period returns, drawdowns
        fig = plt.figure()
        fig.patch.set_facecolor("white")  # Set the outer colour to white

        # Plot the equity curve
        ax1 = fig.add_subplot(311, ylabel="Portfolio value")
        equity["Equity"].plot(ax=ax1, color=sns.color_palette()[0])

        # Plot the returns
        ax2 = fig.add_subplot(312, ylabel="Period returns")
        equity["Returns"].plot(ax=ax2, color=sns.color_palette()[1])

        # Plot the returns
        ax3 = fig.add_subplot(313, ylabel="Drawdowns")
        equity["Drawdown"].plot(ax=ax3, color=sns.color_palette()[2])

        # Plot the figure
        plt.show()
Пример #2
0
def plot_summary_no_feval(f, X_design, model, prefix, G, Gamma_name):
    """
    Plot a summary of the current iteration.
    """
    import matplotlib.pyplot as plt
    import seaborn as sns
    X = model.X
    y = model.Y
    m_s, k_s = model.predict(X_design, full_cov=True)
    m_05, m_95 = model.predict_quantiles(X_design)
    fig, ax1 = plt.subplots()
    ax1.plot(X, y, 'x', linewidth=2, markersize=10, markeredgewidth=2,
             color='black')
    ax1.plot(X_design, m_s, '--', linewidth=2, color=sns.color_palette()[0])
    ax1.fill_between(X_design.flatten(), m_05.flatten(), m_95.flatten(),
                     color=sns.color_palette()[0], alpha=0.25)
    i = np.argmax(G)
    ax1.set_ylabel('$f(x)$', fontsize=16)
    ax1.set_xlabel('$x$', fontsize=16)
    ax2 = ax1.twinx()
    ax2.plot(X_design, G, ':', linewidth=2, color=sns.color_palette()[1])
    ax2.set_ylabel('$\\operatorname{%s}(x)$' % Gamma_name, fontsize=16, color=sns.color_palette()[1])
    ax2.set_ylim([0., 2.])
    plt.setp(ax2.get_yticklabels(), color=sns.color_palette()[1])
    png_file = prefix + '.png'
    print '+ writing:', png_file
    fig.savefig(png_file)
    plt.close(fig)
Пример #3
0
def draw_overall_charts(stats):
    #draws charts for data from multiple simulations
    #print stats[0]
    #chart 1 : average score per turn
    sns.set(style = "darkgrid", palette = "muted")
    fig = plt.subplots(1, 1, figsize = (4, 2.5))
    b, g, r, p = sns.color_palette("muted", 4)
    ax = sns.tsplot(stats[0], color=g)
    ax.set(ylabel = "Average score per turn")
    ax.set_xlabel("Generation")
    plt.gcf().subplots_adjust(bottom = 0.22)
    plt.savefig("images/historical_overall.png") 
    
    
    plt.clf()
    plt.cla()
    
    #chart 3: cooperation and defection
    sns.set(style="darkgrid", palette="muted")
    fig = plt.subplots(1, 1, figsize=(4, 3))
    b, g, r, p = sns.color_palette("muted", 4)
    data = np.dstack([[j for j in stats[i]] for i in [1,2]]) 
    ax = sns.tsplot(data, color = [ b, r])
    ax.set(ylabel = "percent coop/defect")
    ax.set_xlabel("Generation")
    plt.gcf().subplots_adjust(bottom = 0.22)
    plt.savefig("images/cooppct_overall.png") 
    
    return
Пример #4
0
    def precision_recall_curve(self):
    	x = self.prc['thresholds']
        y = self.prc['precision']
        z = self.prc['recall']
        # w = self.prc['combination']

        m = np.mean(y, axis=0)
        dm= np.std(y, axis=0)
        n = np.mean(z, axis=0)
        dn= np.std(z, axis=0)
        # p = np.mean(w, axis=0)
        # dp= np.std(w, axis=0)

        # plt.plot([0,1],[0,1],'--k')
        plt.fill_between(x, m+dm, m-dm, alpha=0.5, linewidth=0, color=sns.color_palette()[0]) 
        plt.fill_between(x, n+dn, n-dn, alpha=0.5, linewidth=0, color=sns.color_palette()[1]) 
        # plt.fill_between(x, p+dp, p-dp, alpha=0.5, linewidth=0, color=sns.color_palette()[2])
        plt.plot(x, m, label='precision')
        plt.plot(x, n, label='recall')
        # plt.plot(x, p, label='p r / (p + r)')
        plt.ylim([0,1])
        plt.xlim([0,1])
        plt.xlabel('Decision Threshold')
        #plt.ylabel('Precision')
        plt.title('Precision-Recall curve for ' + self.name)
        plt.legend()
Пример #5
0
    def plot_results(self):
        """
        A simple script to plot the balance of the portfolio, or
        "equity curve", as a function of time.
        """
        sns.set_palette("deep", desat=.6)
        sns.set_context(rc={"figure.figsize": (8, 4)})

        # Plot two charts: Equity curve, period returns
        fig = plt.figure()
        fig.patch.set_facecolor('white')

        df = pd.DataFrame()
        df["equity"] = pd.Series(self.equity, index=self.timeseries)
        df["equity_returns"] = pd.Series(self.equity_returns, index=self.timeseries)
        df["drawdowns"] = pd.Series(self.drawdowns, index=self.timeseries)

        # Plot the equity curve
        ax1 = fig.add_subplot(311, ylabel='Equity Value')
        df["equity"].plot(ax=ax1, color=sns.color_palette()[0])

        # Plot the returns
        ax2 = fig.add_subplot(312, ylabel='Equity Returns')
        df['equity_returns'].plot(ax=ax2, color=sns.color_palette()[1])

        # drawdown, max_dd, dd_duration = self.create_drawdowns(df["Equity"])
        ax3 = fig.add_subplot(313, ylabel='Drawdowns')
        df['drawdowns'].plot(ax=ax3, color=sns.color_palette()[2])

        # Rotate dates
        fig.autofmt_xdate()

        # Plot the figure
        plt.show()
Пример #6
0
def plot_misclasses_over_time(misclasses, alpha=1, lw=0.75):
    for single_misclass in misclasses['train']:
        plt.plot(single_misclass, color=seaborn.color_palette()[0], alpha=alpha, lw=lw)
    for single_misclass in misclasses['valid']:
        plt.plot(single_misclass, color=seaborn.color_palette()[1], alpha=alpha, lw=lw)
    for single_misclass in misclasses['test']:
        plt.plot(single_misclass, color=seaborn.color_palette()[2], alpha=alpha, lw=lw)
Пример #7
0
def genfunc_converg(data,labels):
	plt.clf()
	f = plt.figure(figsize=(3.32,2.))
	markers = ['o','h','s','^']
	Text = [r'$2$',
			r'$4$',
			r'$6$',
			r'$8$',
			r'$10$',
			r'$12$',
			r'$14$',
			r'$16$',
			r'$18$']
	for n,(d,l) in enumerate(zip(data,labels)):
		dat = np.genfromtxt(d)
		plt.plot(dat.T[3]/1.e9,dat.T[1],color=sns.color_palette()[n])
		plt.plot(dat.T[3]/1.e9,dat.T[2],color=sns.color_palette()[n])
		plt.scatter(dat.T[3]/1.e9,dat.T[1],
		            color=sns.color_palette()[n],marker=markers[n],
		            label=labels[n])
		plt.scatter(dat.T[3]/1.e9,dat.T[2],
		            color=sns.color_palette()[n],marker=markers[n])
		if(n==2):
			for i, txt in enumerate(Text):
			    if(i==0):
			    	plt.annotate(txt, (dat.T[3][i]/1.e9*1.07,dat.T[2][i]*0.7),fontsize=6)
			    else:
			    	plt.annotate(txt, (dat.T[3][i]/1.e9*1.07,dat.T[2][i]),fontsize=6)
	plt.annotate(r'$N_\mathrm{samp}=2400,\,N_T=24$',xy=(0.95,0.95), xycoords='axes fraction', horizontalalignment='right', verticalalignment='top')
	plt.xlabel(r'$t/s$')
	plt.ylabel(r'$\Delta J_i/\mathrm{kpc\,km\,s}^{-1}$')
	plt.semilogy()
	plt.semilogx()
	plt.legend(handlelength=1, scatterpoints=1, numpoints=1,frameon=False,ncol=4,loc='lower center', bbox_to_anchor=(0.5, 1.),fontsize=10)
	plt.savefig('genfunc_converg.pdf',bbox_inches='tight')
def box_and_whisker(ax, med,sem,std,yloc,boxw=0.2,legend_lab_ea_color=False):
	sem_low,sem_hi= sem
	std_low,std_hi= std
	#get colors
	sns.set_palette('colorblind')
	c_med='k' #median
	c_sem=sns.color_palette()[2] #error on median box 'r', bootstraped
	c_std= sns.color_palette()[0] #std dev of pdf 'b', percentiled
	#legend labels (if called)
	ll= ['Median','Std. Error of Median','Std. Dev. of PDF']
	#std error med box
	if legend_lab_ea_color: 
		print '-------------------------- adding legend -------------'
		ax.plot([med]*2,[yloc-boxw,yloc+boxw],c=c_med,label=ll[0])
	else: ax.plot([med]*2,[yloc-boxw,yloc+boxw],c=c_med)
	ax.plot([med-sem_low]*2,[yloc-boxw,yloc+boxw],c=c_sem)
	ax.plot([med+sem_hi]*2,[yloc-boxw,yloc+boxw],c=c_sem)
	ax.plot([med-sem_low,med+sem_hi],[yloc-boxw]*2,c=c_sem)
	if legend_lab_ea_color: ax.plot([med-sem_low,med+sem_hi],[yloc+boxw]*2,c=c_sem,label=ll[1])
	else: ax.plot([med-sem_low,med+sem_hi],[yloc+boxw]*2,c=c_sem)
	#pdf std dev
	ax.plot([med-std_low,med-sem_low],[yloc]*2,c=c_std)
	ax.plot([med+sem_hi,med+std_hi],[yloc]*2,c=c_std)
	ax.plot([med-std_low]*2,[yloc-boxw,yloc+boxw],c=c_std)
	if legend_lab_ea_color: ax.plot([med+std_hi]*2,[yloc-boxw,yloc+boxw],c=c_std,label=ll[2])
	else: ax.plot([med+std_hi]*2,[yloc-boxw,yloc+boxw],c=c_std)
def plot_offense_vs_defense_spacing(spacing_data):
    """
    Plot of offensive vs. defensive spacing for games

    Args:
        spacing_data (pd.DataFrame): Dataframe with columns of spacing data
            ['home_offense_areas', 'home_defense_areas',
             'away_offense_areas', 'away_defense_areas']
        save_fig (bool): if True, save plot to temp/ directory

    Returns None
        Also, shows plot.
    """
    sns.regplot(spacing_data.away_offense_areas,
                spacing_data.home_defense_areas,
                fit_reg=True, color=sns.color_palette()[0],
                ci=None)
    sns.regplot(spacing_data.home_offense_areas,
                spacing_data.away_defense_areas,
                fit_reg=False, color=sns.color_palette()[0],
                ci=None)
    plt.xlabel('Average Offensive Spacing (sq ft)', fontsize=16)
    plt.ylabel('Average Defensive Spacing (sq ft)', fontsize=16)
    plt.title('Offensive spacing robustly induces defensive spacing',
              fontsize=16)
    plt.savefig('temp/OffenseVsDefense.png')
    plt.close()
    return None
Пример #10
0
def e13c(time, count, x0=None, cv=1, row_int=(-3, 3), col_int=(-35, 45),
         plot_soloid=True):
    if x0 is None:
        x0 = [10.6888, 127.9398, 960.8654, 200., 34.]
    res = optimize.minimize(lambda x: chi2(x, time, count), x0, method='BFGS',
                            jac=lambda x: jac(x, time, count))
    row = res.x[4] + np.linspace(row_int[0], row_int[1], 50)
    col = res.x[3] + np.linspace(col_int[0], col_int[1], 50)
    X, Y = np.meshgrid(row, col)
    if plot_soloid:
        a = res.x
        f = np.vectorize(lambda x: chi2([a[0], a[1], a[2], x, y], time, count))
        Z = f(Y, X)
        c1 = sns.color_palette()[0]
        plt.contour(X, Y, Z, [objfun(res.x) + cv], colors=[c1])
    x0 = res.x[:3]

    @np.vectorize
    def f(x, y):
        tita, ava = e13a(time, count, x, y)
        params = np.concatenate([tita, [x, y]])
        return objfun(params)
    Z = f(Y, X)
    ct = plt.contour(X, Y, -Z, [-objfun(res.x) - cv],
                     colors=[sns.color_palette()[1]])
    plt.plot([res.x[4]], [res.x[3]], 'o')
    plt.xlabel('$a_5$')
    plt.ylabel('$a_4$')
    p = ct.collections[0].get_paths()[0]
    v = p.vertices
    x = v[:, 0]
    y = v[:, 1]
    print(min(x), max(x), min(y), max(y))
    return res
Пример #11
0
def compare_more_models_final(experiments, eval_data, labels=None, difficulties=True, runs=1):
    labels = sorted(experiments.keys()) if labels is None else labels

    df = pd.DataFrame(columns=["labels", "rmse"])
    for label in labels:
        r = Evaluator(experiments[label][0](label), experiments[label][1](label)).get_report()
        df.loc[len(df)] = (label, r["rmse"])

    plt.subplot(131)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   palette=sns.color_palette()[:4],
                   metric="rmse", force_evaluate=False, answer_filters={
            "binary": response_as_binary(),
        }, runs=runs, hue_order=False, with_all=False)

    plt.subplot(132)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   palette=sns.color_palette()[:4],
                   metric="rmse", force_evaluate=False, answer_filters={
            # "response >7s-0.5": transform_response_by_time(((7, 0.5),), binarize_before=True),
            "linear 14": transform_response_by_time_linear(14),
        }, runs=runs, hue_order=False, with_all=False)

    plt.subplot(133)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   palette=sns.color_palette()[:4],
                   metric="AUC", force_evaluate=False, runs=runs, hue_order=False)
def plot_retest_data(retest_data, size=4.6, save_dir=None):
    colors = [sns.color_palette('Reds_d',3)[0], sns.color_palette('Blues_d',3)[0]]
    f = plt.figure(figsize=(size,size*.75))
    # plot boxes
    with sns.axes_style('white'):
        box_ax = f.add_axes([.15,.1,.8,.5]) 
        sns.boxplot(x='icc3.k', y='Measure Category', ax=box_ax, data=retest_data,
                    palette={'Survey': colors[0], 'Task': colors[1]}, saturation=1,
                    width=.5, linewidth=size/4)
    box_ax.text(0, 1, '%s Task measures' % Task_N, color=colors[1], fontsize=size*2)
    box_ax.text(0, 1.2, '%s Survey measures' % Survey_N, color=colors[0], fontsize=size*2)
    box_ax.set_ylabel('Measure category', fontsize=size*2, labelpad=size)
    box_ax.set_xlabel('Intraclass correlation coefficient', fontsize=size*2, labelpad=size)
    box_ax.tick_params(labelsize=size*1.5, pad=size, length=2)
    [i.set_linewidth(size/5) for i in box_ax.spines.values()]

    # plot distributions
    dist_ax = f.add_axes([.15,.6,.8,.4]) 
    dist_ax.set_xlim(*box_ax.get_xlim())
    dist_ax.set_xticklabels('')
    dist_ax.tick_params(length=0)
    for i, (name, g) in enumerate(retest_data.groupby('Measure Category')):
        sns.kdeplot(g['icc3.k'], color=colors[i], ax=dist_ax, linewidth=size/3, 
                    shade=True, legend=False)
    dist_ax.set_ylim((0, dist_ax.get_ylim()[1]))
    dist_ax.axis('off')
    if save_dir:
        plt.savefig(save_dir, dpi=dpi, bbox_inches='tight')
Пример #13
0
def plot_eta_omega(labels_all, labels_predict_phys, axes=None):

    labels_eta_phys = labels_predict_phys[(labels_all == "eta")]
    labels_omega_phys = labels_predict_phys[(labels_all == "omega")]

    if axes is None:
        fig, axes = plt.subplots(1,2,figsize=(16,6), sharey=True)

    st = pd.Series(labels_eta_phys)
    nstates = st.value_counts()
    nstates.plot(kind='bar', color=sns.color_palette()[0], ax=axes[0])
    axes[0].set_ylim(0, 1.05*np.max(nstates))

    axes[0].set_title("Distribution for state Eta")
    axes[0].set_xlabel("States")
    axes[0].set_ylabel("Number of samples")

    st = pd.Series(labels_omega_phys)
    nstates = st.value_counts()
    nstates.plot(kind='bar', color=sns.color_palette()[0], ax=axes[1])
    axes[1].set_ylim(0, 1.05*np.max(nstates))
    axes[1].set_title("Distribution for stat Omega")
    axes[1].set_xlabel("States")
    #ax2.set_ylabel("Number of samples")

    return axes





    return ax
Пример #14
0
def plot_bydurations(durations, savepath, savefig=True):
    """Plots duration for each trial separated by trajectories. Behavior only.

        Parameters
        ----------
        durations : dict
            With u, shortcut, novel, num_sessions as keys.
            Each value is a list of durations (float) for a each session.
        savepath : str
            Location and filename for the saved plot.
        savefig : boolean
            Default is True and will save the plot to the specified location. False
            shows with plot without saving it.

        """
    ax = sns.boxplot(data=[durations['u'], durations['shortcut'], durations['novel']])
    sns.color_palette("hls", 18)
    ax.set(xticklabels=['U', 'Shortcut', 'Novel'])
    plt.ylabel('Duration of trial (s)')
    plt.xlabel('sessions=' + str(durations['num_sessions']))
    plt.ylim(0, 140)
    sns.despine()

    if savefig:
        plt.savefig(savepath, dpi=300, bbox_inches='tight')
        plt.close()
    else:
        plt.show()
Пример #15
0
def plot_data(data, has_label=True):
	import numpy as np
	import seaborn as sns
	from sklearn.manifold import TSNE
	from sklearn.decomposition import PCA

	if not has_label:
		data = data.copy()
		data['label'] = np.zeros([len(data),1])

	LIMIT = 4000
	if data.shape[0] > LIMIT:
		dt = data.sample(n=LIMIT, replace=False)
		X = dt.ix[:,:-1]
		labels = dt.ix[:,-1]
	else:
		X = data.ix[:,:-1]
		labels = data.ix[:,-1]

	tsne_model = TSNE(n_components=2, random_state=0)
	np.set_printoptions(suppress=True)
	points1 = tsne_model.fit_transform(X)
	df1 = pd.DataFrame(data=np.column_stack([points1,labels]), columns=["x","y","class"])
	sns.lmplot("x", "y", data=df1, hue='class', fit_reg=False, palette=sns.color_palette('colorblind'))
	sns.plt.title('TNSE')

	pca = PCA(n_components=2)
	pca.fit(X)
	points2 = pca.transform(X)
	df2 = pd.DataFrame(data=np.column_stack([points2,labels]), columns=["x","y","class"])
	sns.lmplot("x", "y", data=df2, hue='class', fit_reg=False, palette=sns.color_palette('colorblind'))
	sns.plt.title('PCA')
Пример #16
0
def plot_results(wgs_results, out_pdf, aln_size):
    paralogs = ['Notch2', 'Notch2NL-A', 'Notch2NL-B', 'Notch2NL-C', 'Notch2NL-D']
    fig, plots = plt.subplots(5, sharey=True, sharex=True)
    plt.yticks((0, 0.1, 0.2, 0.3, 0.4))
    plt.ylim((0, 0.4))
    xticks = range(0, int(round(aln_size / 10000.0) * 10000.0), 10000)
    plt.xticks(xticks, rotation='vertical')
    plt.xlim((0, aln_size))
    plt.xlabel("Alignment position")
    for i, (p, para) in enumerate(zip(plots, paralogs)):
        p.set_title(para)
        wgs = wgs_results[para]
        xvals, yvals = zip(*wgs)
        p.vlines(xvals, np.zeros(len(xvals)), yvals, color=sns.color_palette()[0], alpha=0.7, linewidth=0.8)
        # mark the zeros
        zero_wgs = [[x, y + 0.02] for x, y in wgs if y == 0]
        if len(zero_wgs) > 0:
            z_xvals, z_yvals = zip(*zero_wgs)
            p.vlines(z_xvals, np.zeros(len(z_xvals)), z_yvals, color=sns.color_palette()[2], alpha=0.7, linewidth=0.8)
    plt.tight_layout(pad=2.5, h_pad=0.25)
    zero_line = matplotlib.lines.Line2D([], [], color=sns.color_palette()[2])
    reg_line = matplotlib.lines.Line2D([], [], color=sns.color_palette()[0])
    fig.legend(handles=(reg_line, zero_line), labels=["WGS SUN Fraction", "WGS Missing SUN"], loc="upper right")
    fig.text(0.01, 0.5, 'SUN fraction of reads', va='center', rotation='vertical')
    plt.savefig(out_pdf, format="pdf")
    plt.close()
Пример #17
0
def pca_and_report(data, plot_comps=[1,2,3,4], verbose=True, pca=PCA(), data_labels=None):
    """Generate figures and tables to provide insight into PCA results."""
    r = munch.Munch()

    if data_labels is None:
        data_labels = pd.Series(["unlabeled"] * data.shape[0], name="Labels")

    r.data_labels = data_labels

    r.pft = pca.fit_transform(data)

    var_ratios = pca.explained_variance_ratio_
    r.var_ratios = pd.Series(var_ratios, index=range(1,1+len(var_ratios)))

    r.pcs = repandasify(array=r.pft, y_names=data.index.values, X_names=['PC {v_}'.format(v_=v+1) for v in range(len(r.pft[0]))])

    r.var_pairs = apply_pairwise(series=r.var_ratios, func=np.sum)

    with sns.color_palette(sns.color_palette("hls", 2)):
        with sns.axes_style("white"):
            g = sns.PairGrid(pd.concat([r.data_labels,r.pcs], axis=1), hue=data_labels.name)
            g.map_diag(plt.hist)
            g.map_lower(plt.scatter)
#             g.map_lower(sns.kdeplot, cmap=sns.cubehelix_palette(8, start=.5, rot=-.75, as_cmap=True), shade=True)
            g.add_legend()
            r.g = g

    return r
Пример #18
0
def graph_data(f,img_path):
    df = pd.read_csv(f, sep='\t', index_col=0, parse_dates=True)
    now = df.index[-1]
    before = now - timedelta(minutes=10)
    df = df.between_time(start_time = before, end_time=now)

    with sns.color_palette("Paired"):
        ax = plt.subplot(311)
        ax.plot(df.index, df['Diode 1 Temp (C)'])
        ax.plot(df.index, df['Diode 1 Set Temp (C)'])
        ax.plot(df.index, df['Diode 2 Temp (C)'])
        ax.plot(df.index, df['Diode 2 Set Temp (C)'])
        ax.legend(bbox_to_anchor = (1,1), loc=2)
        ax = plt.subplot(312)
        ax.plot(df.index, df['Output Power (W)'])
        ax.plot(df.index, df['Set Power (W)'])
        ax.legend(bbox_to_anchor = (1,1), loc=2)
    with sns.color_palette():
        ax = plt.subplot(313)
        ax.plot(df.index, df['Current (A)'])
        ax.legend(bbox_to_anchor=(1,1), loc=2)


    figfile = BytesIO()
    plt.savefig(figfile, format='svg', bbox_inches='tight', pad_inches = 0.25)
    figdata_svg = b'<svg' + figfile.getvalue().split(b'<svg')[1]
    figdata_svg = figdata_svg.decode()
    plt.close()
    return figdata_svg
Пример #19
0
def lag_plot(tracks, condition='Condition', save=False, palette='deep',
    skip_color=0, null_model=True, context='notebook'):
    """Lag plot for velocities and turning angles"""
    if 'Displacement' not in tracks.columns:
        tracks = analyze(tracks)

    if condition not in tracks.columns:
        tracks[condition] = 'Default'

    sns.set(style="white", palette=sns.color_palette(
        palette, tracks[condition].unique().__len__() + skip_color), context=context)
    if 'Plane Angle' in tracks.columns:
        fig, ax = plt.subplots(1,3, figsize=(12,4.25))
    else:
        fig, ax = plt.subplots(1,2, figsize=(8,4.25))
    plt.setp(ax, yticks=[])
    plt.setp(ax, xticks=[])
    ax[0].set_title('Velocity')
    ax[0].set_xlabel('v(t)')
    ax[0].set_ylabel('v(t+1)')
    ax[0].axis('equal')
    ax[1].set_title('Turning Angle')
    ax[1].set_xlabel(r'$\theta$(t)')
    ax[1].set_ylabel(r'$\theta$(t+1)')
    ax[1].axis('equal')
    if 'Plane Angle' in tracks.columns:
        ax[2].set_title('Plane Angle')
        ax[2].set_xlabel(r'$\phi$(t)')
        ax[2].set_ylabel(r'$\phi$(t+1)')
        ax[2].axis('equal')

    if null_model:
        null_model = tracks.ix[np.random.choice(tracks.index, tracks.shape[0])]
        ax[0].scatter(null_model['Velocity'], null_model['Velocity'].shift(),
            facecolors='0.8')
        ax[1].scatter(null_model['Turning Angle'], null_model['Turning Angle'].shift(),
            facecolors='0.8')
        if 'Plane Angle' in tracks.columns:
            ax[2].scatter(null_model['Plane Angle'], null_model['Plane Angle'].shift(),
                facecolors='0.8')

    for i, (_, cond_tracks) in enumerate(tracks.groupby(condition)):
        color = sns.color_palette()[i + skip_color]
        for _, track in cond_tracks.groupby('Track_ID'):
            ax[0].scatter(track['Velocity'], track['Velocity'].shift(),
                facecolors=color)
            ax[1].scatter(track['Turning Angle'], track['Turning Angle'].shift(),
                facecolors=color)
            if 'Plane Angle' in tracks.columns:
                ax[2].scatter(track['Plane Angle'], track['Plane Angle'].shift(),
                    facecolors=color)

    sns.despine()
    plt.tight_layout()
    if save:
        conditions = [cond.replace('= ', '')
            for cond in tracks[condition].unique()]
        plt.savefig('Motility-LagPlot_' + '-'.join(conditions) + '.png', dpi=300)
    else:
        plt.show()
Пример #20
0
Файл: g9.py Проект: ababino/efe
def ej14(data_x, data_y, s):
    a2, a1 = np.polyfit(data_x, data_y, 1)
    chi2_t_list = []
    chi2_f_list = []
    for _ in range(1000):
        new_y = np.random.normal(a1 + a2 * data_x, s)
        p = np.polyfit(data_x, new_y, 1)
        chi2_t_list.append(sum(((new_y - a1 - a2 * data_x) / s)**2))
        chi2_f_list.append(sum(((new_y - p[1] - p[0] * data_x) / s)**2))
    plt.figure(1)
    my_hist(chi2_t_list, 20, label='Datos')
    x = np.linspace(0, 30, 100)
    chi211 = chi2.pdf(x, 11)
    plt.plot(x, chi211, label='$\chi^2_{11}$', color=sns.color_palette()[2])
    plt.legend()
    plt.savefig('fig4.jpg')
    plt.figure(2)
    my_hist(chi2_f_list, 20, label='Datos')
    x = np.linspace(0, 30, 100)
    plt.plot(x, chi211, label='$\chi^2_{11}$', color=sns.color_palette()[2])
    chi29 = chi2.pdf(x, 9)
    plt.plot(x, chi29, label='$\chi^2_{9}$', color=sns.color_palette()[3])
    plt.legend()
    plt.savefig('fig5.jpg')
    plt.show()
Пример #21
0
def plot_tracks_parameter_space(tracks, n_tracks=None, condition='Condition',
    save=False, palette='deep', skip_color=0, context='notebook'):
    """Plot tracks in velocities-turning-angles-space"""
    if 'Displacement' not in tracks.columns:
        tracks = analyze(tracks)

    if condition not in tracks.columns:
        tracks[condition] = 'Default'

    sns.set(style="ticks", palette=sns.color_palette(
        palette, tracks[condition].unique().__len__() + skip_color), context=context)
    fig, ax = plt.subplots(figsize=(5.5,5.5))
    ax.set_xlabel('Turning Angle')
    ax.set_xlim([0,np.pi])
    ax.set_xticks([0, np.pi/2, np.pi])
    ax.set_xticklabels([r'$0$', r'$\pi/2$', r'$\pi$'])
    ax.set_ylabel('Velocity')
    for i, (_, cond_tracks) in enumerate(tracks.groupby(condition)):
        color = sns.color_palette()[i + skip_color]
        if n_tracks != None:
            cond_tracks = cond_tracks[cond_tracks['Track_ID'].isin(
                np.random.choice(cond_tracks['Track_ID'], n_tracks))]
        for _, track in cond_tracks.groupby('Track_ID'):
            ax.plot(track['Turning Angle'], track['Velocity'],
                color=color, alpha=0.5)

    sns.despine()
    plt.tight_layout()
    if save:
        conditions = [cond.replace('= ', '')
            for cond in tracks[condition].unique()]
        plt.savefig('Motility-TracksInParameterSpace_' + '-'.join(conditions)
            + '.png', dpi=300)
    else:
        plt.show()
Пример #22
0
def scatter_plot_matrix():
    pth = "/Users/jonathan/PycharmProjects/networkclassifer/saved_clf"
    classifier = pickle.load(open("../saved_clf", "rb"))
    # cs = ['none','b','r','k','grey','grey']
    import seaborn as sns
    import pandas as pd

    sns.palplot(sns.color_palette("hls", 8))
    sns.color_palette("hls", 8)
    mc2 = [
        (0.14901960784313725, 0.13725490196078433, 0.13725490196078433),
        (0.8235294117647058, 0.34509803921568627, 0.34509803921568627),
        (0.30196078431372547, 0.4588235294117647, 0.7019607843137254),
        (0.7725490196078432, 0.7764705882352941, 0.7803921568627451),
    ]
    sns.palplot(mc2)

    X = classifier.iss_features[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]]
    fig = plt.figure(figsize=(8, 11))
    sns.set_palette(mc2)
    df = pd.DataFrame(X, columns=["Feature" + str(i + 1) for i in range(X.shape[1])])
    print classifier.labels.shape
    df["Network States"] = classifier.labels
    pg = sns.PairGrid(df, vars=["Feature" + str(i + 1) for i in range(X.shape[1])], hue="Network States", size=2)
    # pg = sns.pairplot(df)# hue="Network States")
    pg.map(plt.scatter)
    # pg.map_lower(plt.scatter)
    # pg.map_upper(plt.scatter)
    # pg.map_diag(plt.scatter)

    # plt.savefig(static+'scattermatrix.pdf')
    plt.show()
def display_model(training_data, alpha, beta, gamma, delta):
    """
    Displays a plot of the data under the maximum a posterior distribution, along side the maximum likelihood for
    the data and the prior.

    :param training_data: The training data to fit the distribution to.
    :type training_data: list[float]
    :param alpha: The alpha hyperparameter of the prior
    :type alpha: float
    :param beta: The beta hyperparameter of the prior
    :type beta: float
    :param gamma: The gamma hyperparameter of the prior
    :type gamma: float
    :param delta: The delta hyperparameter of the prior
    :type delta: float
    """
    prior_mean, prior_variance = generate_parameters([], alpha, beta, gamma, delta)
    ml_mean, ml_variance = generate_maximum_likelihood_parameters(training_data)
    mean, variance = generate_parameters(training_data, alpha, beta, gamma, delta)

    prior_sigma = prior_variance ** 0.5
    ml_sigma = ml_variance ** 0.5
    sigma = variance ** 0.5

    map_color = sns.color_palette()[0]
    data_color = sns.color_palette()[1]
    prior_color = sns.color_palette()[2]
    ml_color = sns.color_palette()[3]

    display.draw_normal(prior_mean, prior_sigma, color=prior_color)
    display.draw_normal(ml_mean, ml_sigma, color=ml_color)
    display.draw_data_under_normal(training_data, mean, sigma, data_color=data_color, normal_color=map_color)
    plt.show()
Пример #24
0
def all_boxplot():
    """ """
    general_settings()
    plt.figure(figsize=(4, 6))

    # Load data
    data = pickle.load(open('../scripts/mrt_gauthier_normalized.pickle',
                            'rb'))

    # Colors
    cmap = sns.color_palette("colorblind", n_colors=3)
    cmap.append(sns.color_palette("muted", 4)[3])

    # Plot
    ax = sns.boxplot(x='accuracy', y='dataset', hue='model', data=data,
                     palette=cmap, orient='h')
    """
    sns.swarmplot(x='accuracy', y='model', hue='dataset', data=data,
                  palette=cmap, split=True)
    """
    ax.legend(loc='lower left')

    y_ticks = ['Mirror-\nreversed\nText', 'Temporal\nTuning']
    ax.set_yticklabels(y_ticks)

    plt.savefig('all_boxplot_strip.png')

    plt.show()
Пример #25
0
def plot_bias(bias_list, fpath):
    import matplotlib as mpl
    import matplotlib.pyplot as plt
    import seaborn as sns

    sys.stderr.write("saving to %s\n" % fpath)
    p = sns.color_palette("deep", desat=.8)
    c1, c2 = p[0], p[-1]
    c1, c2 = sns.color_palette("Set1", 2)

    mpl.rc("figure", figsize=(8, 4))
    f, ax1 = plt.subplots(1)

    xs = [int(x['base']) for x in bias_list]

    ax1.plot(xs, [100 * float(x['read_1']) for x in bias_list], c=c1,
             label="read 1")
    ax1.plot(xs, [100 * float(x['read_2']) for x in bias_list], c=c2,
             label="read 2")
    ax1.axvline(x=4, c="#aaaaaa", alpha=0.8, linewidth=3, zorder=-1)
    ax1.axvline(x=max(xs) - 4, c="#aaaaaa", alpha=0.8, linewidth=3, zorder=-1)

    ax1.legend(loc='upper center')
    ax1.set_xlim(min(xs), max(xs))
    ax1.set_ylabel('mean CG % methylation')
    ax1.set_xlabel('position along read')
    ax1.set_title('Methylation Bias Plot (vertical lines at 4 bases from end)')
    ax1.grid('off')

    f.tight_layout()
    f.savefig(fpath)
Пример #26
0
def main():
    path = "C:/Users/Andrew/Documents/GitHub/summer_game_theory_repo/"
    path2= "C:/Users/Andrew/Documents/GitHub/summer_game_theory_repo/IPD_output/images/"
    

    avg_turn_score_df=pd.io.json.read_json(path+"avgscore.json")
    
    generations,simulations=avg_turn_score_df.shape
    
    avg_turn_score_df=avg_turn_score_df.sort() # jesus this object is whiny
    print avg_turn_score_df
    sns.set(style = "darkgrid", palette = "muted",rc={"lines.linewidth":0.1})
    fig = plt.subplots(1, 1, figsize = (16, 12))
    b, g, r, p = sns.color_palette("muted", 4)
    stuff_t= np.transpose(np.array(avg_turn_score_df))
    #ax = sns.tsplot(result, color=g)
    cis = np.linspace(98, 10, 4) #acts like range does
    #ax = sns.tsplot(stats[0] ,err_style="boot_traces", n_boot=simulations)
    #balls=[avg_turn_score_df[22][i] for i in range(generations)]
    ax=sns.tsplot( stuff_t ,err_style="ci_band",ci = cis, color=p)
    ax.set_autoscale_on(False)
    ax.axis([0,generations,0,3]) #[xmin,xmax,ymin,ymax]
    '''for i in range(simulations):
        plt.plot(range(generations), avg_turn_score_df[i], color='black', alpha=0.4)'''
    plt.xlabel('Generation')
    plt.ylabel('Average score per turn')
    plt.title('Distribution of Average Scores per Turn')
    plt.savefig(path2+"overall_avg_turn_score.png") 
    
    #plt.show()
    
    
    plt.clf()
    plt.cla()
    
    avg_coop_pct_df=pd.io.json.read_json(path+"avgcoop.json")
    avg_defect_pct_df=pd.io.json.read_json(path+"avgdefect.json")
    
    avg_coop_pct_df=avg_coop_pct_df.sort() # tres important!
    avg_defect_pct_df=avg_defect_pct_df.sort() # do not forget!
    
    stuff_c=np.transpose(np.array(avg_coop_pct_df))
    stuff_d=np.transpose(np.array(avg_defect_pct_df))
    
    sns.set(style="darkgrid", palette="muted",rc={"lines.linewidth":0.5})
    fig = plt.subplots(1, 1, figsize=(16, 12))
    b, g, r, p = sns.color_palette("muted", 4)

    sns.tsplot( stuff_c ,err_style="ci_band",ci = cis, color=b)
    sns.tsplot( stuff_d ,err_style="ci_band",ci = cis, color=r)
    '''for i in range(simulations):
    
        plt.plot(range(generations), avg_coop_pct_df[i], color='blue', alpha=0.01)
        plt.plot(range(generations), avg_defect_pct_df[i], color='red', alpha=0.01)'''
    plt.xlabel('Generation')
    plt.ylabel('Percent')
    plt.title('Percentage of cooperation vs defection')
    
    plt.savefig(path2+"overall_cooppct.png") 
Пример #27
0
def factor_scatter_matrix(df, factor, factor_labels, legend_title=None,
                          palette=None, title=None):
    '''Create a scatter matrix of the variables in df, with differently colored
    points depending on the value of df[factor].
    inputs:
        df: pandas.DataFrame containing the columns to be plotted, as well
            as factor.
        factor: string or pandas.Series. The column indicating which group
            each row belongs to.
        palette: A list of hex codes, at least as long as the number of groups.
            If omitted, a predefined palette will be used, but it only includes
            9 groups.
    '''

    if isinstance(factor, basestring):
        factor_name = factor  # save off the name
        factor = df[factor]  # extract column
        df = df.drop(factor_name, axis=1)  # remove from df, so it
        # doesn't get a row and col in the plot.

    classes = list(set(factor))

    if palette is None:
        palette = sns.color_palette("gist_ncar", len(set(factor)))
    elif isinstance(palette, basestring):
        palette = sns.color_palette(palette, len((set(factor))))
    else:
        palette = sns.color_palette(palette)

    color_map = dict(zip(classes, palette))

    if len(classes) > len(palette):
        raise ValueError((
            "Too many groups for the number of colors provided."
            "We only have {} colors in the palette, but you have {}"
            "groups.").format(len(palette), len(classes)))

    colors = factor.apply(lambda group: color_map[group])
    axarr = scatter_matrix(df, figsize=(10, 10),
                           marker='o', c=np.array(list(colors)), diagonal=None,
                           alpha=1.0)

    if legend_title is not None:
        plt.grid('off')
        plt.legend([plt.Circle((0, 0), fc=color) for color in palette],
                factor_labels, title=legend_title, loc='best',
                ncol=3)
    if title is not None:
        plt.suptitle(title)

    # for rc in xrange(len(df.columns)):
    #     for group in classes:
    #         y = df[factor == group].icol(rc).values
    #         gkde = gaussian_kde(y)
    #         ind = np.linspace(y.min(), y.max(), 1000)
    #         axarr[rc][rc].plot(ind, gkde.evaluate(ind), c=color_map[group])

    return axarr, color_map
Пример #28
0
def stat_freq_filter(stat_file):
    sns.color_palette('hls', 26)

    save = pickle.load(open(stat_file, 'rb'))
    sets = save['sets']

    total_weights = [np.sum(sets[i].values()) for i in range(26)]
    total_sizes = [len(sets[i]) for i in range(26)]

    def foo(freq):
    	print 'threshold:%d' % freq
        sizes = [np.array(sets[i].values()) for i in range(26)]
        inds = [np.where(sizes[i] > freq)[0] for i in range(26)]
        f_sizes = [len(inds[i]) for i in range(26)]
        f_weights = [np.sum(sizes[i][inds[i]]) for i in range(26)]
        f_s_ratios = [f_sizes[i] * 1.0 / total_sizes[i] for i in range(26)]
        f_w_ratios = [f_weights[i] * 1.0 / total_weights[i] for i in range(26)]
        print 'dimension %d->%d' % (np.sum(total_sizes), np.sum(f_sizes))
        print 'field\tt_size\tt_weight\tf_size\tf_weight\tfs_ratio\tfw_ratio'
        for i in range(26):
        	print '%3d%10d%12d%10d%12d\t%.4f\t%.4f' % (i, total_sizes[i],
        		total_weights[i], f_sizes[i], f_weights[i], f_s_ratios[i],
        		f_w_ratios[i])
        return f_s_ratios, f_w_ratios, np.sum(f_sizes)

    s_ratios = []
    w_ratios = []
    sizes = []
    thresholds = [10, 20, 50, 100, 200, 500, 1000]
    for i in thresholds:
        s, w, ss = foo(i)
        s_ratios.append(s)
        w_ratios.append(w)
        sizes.append(ss)

    plt.plot(thresholds, sizes)
    plt.xlabel('freq threshold')
    plt.ylabel('dim')
    plt.title('dim reduction')
    plt.show()

    s_ratios = np.array(s_ratios)
    w_ratios = np.array(w_ratios)

    for i in range(26):
        plt.plot(thresholds, s_ratios[:, i])
    plt.xlabel('freq threshold')
    plt.ylabel('dim reduce rate')
    plt.title('dim reduction on different fields')
    plt.show()

    for i in range(26):
        plt.plot(thresholds, w_ratios[:, i])
    plt.xlabel('freq threshold')
    plt.ylabel('weight reduce rate')
    plt.title('item weights on different fields')
    plt.show()
Пример #29
0
def plot_per_sub_dfs(dfs, values_fn):
    per_sub_dfs = get_per_sub_dfs(dfs)
    cp = seaborn.color_palette()
    with seaborn.color_palette(np.repeat(cp,len(dfs),axis=0)):
        plot_dfs_vals(per_sub_dfs, values_fn=values_fn)
    subject_legend_mrks = [plt.Line2D((0,1),(0,0),
                                  color=seaborn.color_palette()[i_color], marker='o', linestyle='Null')
                       for i_color in (0,1,2)]
    plt.legend(subject_legend_mrks, ("Subj 1","Subj 2","Subj 3"))
Пример #30
0
def plot_mean_std_misclasses_over_time(misclasses):
    padded_misclasses, _ = get_padded_chan_vals(misclasses['train'])
    plot_mean_and_std(padded_misclasses, color=seaborn.color_palette()[0])
    padded_misclasses, _ = get_padded_chan_vals(misclasses['valid'])
    plot_mean_and_std(padded_misclasses, color=seaborn.color_palette()[1])
    padded_misclasses, n_exps_by_epoch = get_padded_chan_vals(misclasses['test'])
    plot_mean_and_std(padded_misclasses, color=seaborn.color_palette()[2])
    plt.plot(n_exps_by_epoch / float(n_exps_by_epoch[0]), color='black', lw=1)
    plt.ylim(0,1)
Пример #31
0
def graph_messages_window(all_messages, start_date, end_date):

    delta = end_date - start_date

    xs = [start_date + timedelta(days=i) for i in range(delta.days)]

    # List of tuples of the form (total number of messages, y-values corresponding to every day, their name)
    ys_and_labels = []

    for name in all_messages:
        total_messages = 0
        y_name = [0] * len(xs)

        message_counts = all_messages[name]

        for day in message_counts:
            message_delta = day - start_date
            for windowed_day in range(
                    message_delta.days - int(window_size_days / 2),
                    message_delta.days + int(window_size_days / 2) + 1):
                if windowed_day > 0 and windowed_day < len(y_name):
                    weight = get_weight_for_time(message_delta.days,
                                                 windowed_day)
                    inc = message_counts[day] * weight
                    y_name[windowed_day] += inc

            if message_delta.days > 0 and message_delta.days < len(y_name):
                total_messages += message_counts[day]

        ys_and_labels.append((total_messages, y_name, name, total_messages))

    # Display label sorted by most messages
    ys_and_labels.sort(reverse=True)

    # Sort y-values into a set of y-values for each of the top n people and "Other"
    ys = [y_and_label[1] for y_and_label in ys_and_labels[:num_top_people]]
    other_ys = [0] * len(xs)
    total_other_messages = 0
    for y_and_label in ys_and_labels[num_top_people:]:
        y = y_and_label[1]
        for i, count in enumerate(y):
            other_ys[i] += count
        total_other_messages += y_and_label[0]

    top_ys_and_labels = ys_and_labels[:num_top_people]

    anon_mapping = {name: name for _, _, name, _ in top_ys_and_labels}
    if anonymize:
        anon_mapping = {
            name: anon_names[i]
            for i, (_, _, name, _) in enumerate(top_ys_and_labels)
        }

    labels = [
        "%s (%d)" % (anon_mapping[y_and_label[2]], int(y_and_label[3]))
        for y_and_label in top_ys_and_labels
    ]
    labels.append("Other (%d)" % int(total_other_messages))

    # Make colors prettier
    pal = sns.color_palette("hls", num_top_people)
    # This is really hardcoded for 20 basically
    colors = spread_list(pal, color_spread_skip)
    other_color = (0.9, 0.9, 0.9)  #Grey
    colors.append(other_color)

    plt.rc('xtick', labelsize=16)
    plt.rc('ytick', labelsize=16)
    plt.rc('legend', fontsize=8)
    plt.rc('axes', titlesize=24)
    plt.title("Message word count by person over time")
    plt.stackplot(xs,
                  *ys,
                  other_ys,
                  colors=colors,
                  labels=labels,
                  baseline='wiggle')
    plt.legend(loc='upper right')
    plt.show()
Пример #32
0
def display_stacked_cat_bar(df,
                            groupby,
                            on,
                            order=None,
                            unit=None,
                            palette=None,
                            horizontal=True,
                            figsize=(11, 11)):
    """
    Displays a stacked bar plot given two categorical variables
    :param df: DataFrame to display data from
    :param groupby: Column name by which bars would be grouped
    :param on: Column name of the different bar blocks
    :param order: Order in which to draw the bars by
    :param unit: Scale to which unit
    :param palette: Color palette to use for drawing
    :param horizontal: Horizontal or vertical barplot
    :param figsize: Figure size
    :return: matplotlib.Axis object
    """

    # Create a binary dataframe
    stacked_bar_df = pd.concat([df[groupby], pd.get_dummies(df[on])], axis=1)
    bins = list(stacked_bar_df.columns[1:])
    stacked_bar_df = stacked_bar_df.groupby(groupby)[bins].sum().reset_index()

    if order:
        if not isinstance(order, list):
            raise ValueError('"order" must be a list')
        if set(order) != set(bins):
            raise ValueError(
                '"order" iterable must contain all possible values: {}'.format(
                    str(bins)))

        stacked_bar_df = stacked_bar_df[[groupby] + order]
        bins = order

    # Scale if given unit
    if unit:
        # Calculate total
        stacked_bar_df['total'] = stacked_bar_df[bins].sum(axis=1)

        # Scale
        for bin_label in bins:
            stacked_bar_df[bin_label] /= stacked_bar_df['total']
            stacked_bar_df[bin_label] *= unit

        # Drop irrelevant 'total' column
        stacked_bar_df = stacked_bar_df.iloc[:, :-1]

    # Cumsum row wise
    for idx in range(1, len(bins)):
        stacked_bar_df[bins[idx]] = stacked_bar_df[bins[idx]] + stacked_bar_df[
            bins[idx - 1]]

    # Get relevant palette
    if palette:
        palette = palette[:len(bins)]
    else:
        palette = sns.color_palette()[:len(bins)]

    # Plot
    fig = plt.figure(figsize=figsize)
    ax = fig.add_subplot(111)

    if horizontal:
        for color, bin_label in reversed(list(zip(palette, bins))):
            sns.barplot(y=groupby,
                        x=bin_label,
                        data=stacked_bar_df,
                        color=color,
                        label=bin_label,
                        ax=ax)
    else:
        for color, bin_label in reversed(list(zip(palette, bins))):
            sns.barplot(x=groupby,
                        y=bin_label,
                        data=stacked_bar_df,
                        color=color,
                        label=bin_label,
                        ax=ax)

    ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')

    if unit:
        if horizontal:
            ax.set(xlim=(0, unit))
        else:
            ax.set(ylim=(0, unit))

    if horizontal:
        ax.set(xlabel='')
    else:
        ax.set(ylabel='')

    return ax
Пример #33
0
    def plot_pollster_house_effects(self, samples, hebrew=True):
        """
        Plot the house effects of each pollster per party.
        """
        import matplotlib.pyplot as plt
        import matplotlib.patches as mpatches
        import matplotlib.ticker as ticker
        from bidi import algorithm as bidialg

        house_effects = samples.transpose(2, 1, 0)
        fe = self.forecast_model

        actual_pollsters = [
            i for i in fe.dynamics.pollster_mapping.items() if i[1] is not None
        ]
        pollster_ids = [
            fe.pollster_ids[pollster]
            for _, pollster in sorted(actual_pollsters, key=lambda i: i[1])
        ]

        plots = []
        for i, party in enumerate(fe.party_ids):

            def pollster_label(pi, pollster_id):
                perc = '%.2f %%' % (100 * house_effects[i][pi].mean())
                if hebrew and len(
                        fe.config['pollsters'][pollster_id]['hname']) > 0:
                    label = perc + ' :' + bidialg.get_display(
                        fe.config['pollsters'][pollster_id]['hname'])
                else:
                    label = fe.config['pollsters'][pollster_id][
                        'name'] + ': ' + perc
                return label

            cpalette = sns.color_palette("cubehelix", len(pollster_ids))
            patches = [
                mpatches.Patch(color=cpalette[pi],
                               label=pollster_label(pi, pollster))
                for pi, pollster in enumerate(pollster_ids)
            ]

            fig, ax = plt.subplots(figsize=(10, 2))
            fig.set_facecolor('white')
            legend = fig.legend(handles=patches, loc='best', ncol=2)
            if hebrew:
                for col in legend._legend_box._children[-1]._children:
                    for c in col._children:
                        c._children.reverse()
                    col.align = "right"
            ax.set_title(
                bidialg.get_display(fe.parties[party]['hname']
                                    ) if hebrew else fe.parties[party]['name'])
            for pi, pollster_house_effects in enumerate(house_effects[i]):
                sns.kdeplot(100 * pollster_house_effects,
                            shade=True,
                            ax=ax,
                            color=cpalette[pi])
            ax.xaxis.set_major_formatter(ticker.PercentFormatter(decimals=1))
            ax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=1))
            plots += [ax]
            fig.text(.5,
                     1.05,
                     bidialg.get_display('הטיית הסוקרים')
                     if hebrew else 'House Effects',
                     ha='center',
                     fontsize='xx-large')
            fig.text(.5,
                     .05,
                     'Generated using pyHoshen © 2019 - 2021',
                     ha='center')
Пример #34
0
import numpy as np
import seaborn as sns
import mayavi.mlab as mlab
import os

colors = sns.color_palette('Paired', 9 * 2)
colors2 = sns.color_palette('Set2', 9 * 2)
names = [
    'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
    'Misc', 'DontCare'
]


def draw_box(line, in_color=colors):
    line = line.split()
    if (len(line) is 15):  # lable file
        lab, _, _, _, _, _, _, _, h, w, l, x, y, z, rot = line
    else:  # eval file
        lab, _, _, _, _, _, _, _, h, w, l, x, y, z, rot, conf = line
    h, w, l, x, y, z, rot = map(float, [h, w, l, x, y, z, rot])
    if lab != 'DontCare':
        x_corners = [
            l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2
        ]
        y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
        z_corners = [
            w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2
        ]
        corners_3d = np.vstack([x_corners, y_corners, z_corners])  # (3, 8)

        # transform the 3d bbox from object coordiante to camera_0 coordinate
Пример #35
0
def make_2d_pca(data, marker=False, scaling=False):
    '''make 2d PCA plot from pandas dataframe with genes as columns
    rows as samples. Cell type as a column "cell". Returns seaborn lmplot
    Scaling options = AutoScale, MinMax, MaxAbs, Robust
    '''
    import pandas as pd
    import numpy as np
    import matplotlib.pyplot as plt
    import seaborn as sns
    sns.set()
    from sklearn.decomposition import PCA
    from sklearn import preprocessing
    matrix_df = data.drop('cell', axis='columns')
    matrix = matrix_df.as_matrix()
    if scaling == 'AutoScale':
        matrix = preprocessing.scale(matrix)
    if scaling == 'MinMax':
        min_max_scaler = preprocessing.MinMaxScaler()
        matrix = min_max_scaler.fit_transform(matrix)
    if scaling == 'MaxAbs':
        max_abs_scaler = preprocessing.MaxAbsScaler()
        matrix = max_abs_scaler.fit_transform(matrix)
    if scaling == 'Robust':
        robust_scaler = preprocessing.RobustScaler()
        matrix = robust_scaler.fit_transform(matrix)
    if scaling is False:
        matrix = matrix
    pca = PCA(n_components=2)
    pca.fit(matrix)
    print('explained variance is {0}'.format(
        pca.explained_variance_ratio_))
    pca = PCA(n_components=2).fit_transform(matrix)

    pca_df = pd.DataFrame(matrix,
                          columns=matrix_df.columns.values,
                          index=matrix_df.index.values)

    pca_df['PCA1'] = pca[:, 0]
    pca_df['PCA2'] = pca[:, 1]
    pca_df = pca_df.join(data['cell'])

    markers = markers = ['1', '2', '3', '4', 'p', 's',
                         'x', 'o', '.', 's', '+']
    sns.set_palette(sns.color_palette("hls",
                                      pca_df['cell'].unique().size))
    y = []
    if marker is True:
        x = sns.lmplot("PCA1", "PCA2",
                       data=pca_df,
                       hue='cell',
                       fit_reg=False,
                       markers=markers[0:pca_df['cell'].unique().size])
        y.append(x)
    else:
        x = sns.lmplot("PCA1", "PCA2",
                       data=pca_df,
                       hue='cell',
                       fit_reg=False)
        y.append(x)
    y.append(pca_df)
    return y
Пример #36
0
def plot_conditions(epochs,
                    conditions=OrderedDict(),
                    ci=97.5,
                    n_boot=1000,
                    title='',
                    palette=None,
                    ylim=(-6, 6),
                    diff_waveform=(1, 2)):
    """Plot ERP conditions.
    Args:
        epochs (mne.epochs): EEG epochs
    Keyword Args:
        conditions (OrderedDict): dictionary that contains the names of the
            conditions to plot as keys, and the list of corresponding marker
            numbers as value. E.g.,
                conditions = {'Non-target': [0, 1],
                               'Target': [2, 3, 4]}
        ci (float): confidence interval in range [0, 100]
        n_boot (int): number of bootstrap samples
        title (str): title of the figure
        palette (list): color palette to use for conditions
        ylim (tuple): (ymin, ymax)
        diff_waveform (tuple or None): tuple of ints indicating which
            conditions to subtract for producing the difference waveform.
            If None, do not plot a difference waveform
    Returns:
        (matplotlib.figure.Figure): figure object
        (list of matplotlib.axes._subplots.AxesSubplot): list of axes
    """
    if isinstance(conditions, dict):
        conditions = OrderedDict(conditions)

    if palette is None:
        palette = sns.color_palette("hls", len(conditions) + 1)

    X = epochs.get_data() * 1e6
    times = epochs.times
    y = pd.Series(epochs.events[:, -1])

    fig, axes = plt.subplots(2, 2, figsize=[12, 6], sharex=True, sharey=True)
    axes = [axes[1, 0], axes[0, 0], axes[0, 1], axes[1, 1]]

    for ch in range(4):
        for cond, color in zip(conditions.values(), palette):
            sns.tsplot(X[y.isin(cond), ch],
                       time=times,
                       color=color,
                       n_boot=n_boot,
                       ci=ci,
                       ax=axes[ch])

        if diff_waveform:
            diff = (np.nanmean(X[y == diff_waveform[1], ch], axis=0) -
                    np.nanmean(X[y == diff_waveform[0], ch], axis=0))
            axes[ch].plot(times, diff, color='k', lw=1)

        axes[ch].set_title(epochs.ch_names[ch])
        axes[ch].set_ylim(ylim)
        axes[ch].axvline(x=0,
                         ymin=ylim[0],
                         ymax=ylim[1],
                         color='k',
                         lw=1,
                         label='_nolegend_')

    axes[0].set_xlabel('Time (s)')
    axes[0].set_ylabel('Amplitude (uV)')
    axes[-1].set_xlabel('Time (s)')
    axes[1].set_ylabel('Amplitude (uV)')

    if diff_waveform:
        legend = (['{} - {}'.format(diff_waveform[1], diff_waveform[0])] +
                  list(conditions.keys()))
    else:
        legend = conditions.keys()
    axes[-1].legend(legend)
    sns.despine()
    plt.tight_layout()

    if title:
        fig.suptitle(title, fontsize=20)

    return fig, axes
Пример #37
0
print 'blend-mask', np.nanmean(gmt['rcp85', :, 'gmt_bm', 2010:2020]) - 0.93
print 'millar', np.nanmean(gmt['rcp85', :, 'gmt_millar', 2010:2020])
print 'blend-mask', np.nanmean(gmt['rcp85', :, 'gmt_bm', 2015:2016]) - 0.93

# FIG SI 1
plot_dict = {
    'gmt_sat': {
        'l_color': 'orange',
        'color': 'darkorange',
        'longname': '$\mathregular{GMT_{SAT}}$',
        'pos': 0.65,
        'lsty': '-'
    },
    'gmt_ar5': {
        'l_color': 'lawngreen',
        'color': sns.color_palette()[1],
        'longname': '$\mathregular{GMT_{AR5}}$',
        'pos': 0.65,
        'lsty': '--'
    },
    'gmt_millar': {
        'l_color': 'cornflowerblue',
        'color': sns.color_palette()[0],
        'longname': '$\mathregular{GMT_{M17}}$',
        'pos': 0.75,
        'lsty': '--'
    },
    'gmt_bm': {
        'l_color': 'tomato',
        'color': sns.color_palette()[2],
        'longname': '$\mathregular{GMT_{blend-mask}}$',
Пример #38
0
def get_colors():
    p_names = ['Right', 'Left', 'Rest', 'FB', 'Closed', 'Opened', 'Baseline']
    cm = sns.color_palette('Paired', n_colors=len(p_names))
    c = dict(zip(p_names, [cm[j] for j in range(len(p_names))]))
    return c
Пример #39
0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.style as style
color = sns.color_palette()
sns.set_style('darkgrid')
import warnings


def ignore_warn(*args, **kwargs):
    pass


warnings.warn = ignore_warn

from scipy import stats
from scipy.stats import norm, skew  # norm用来生成正态分布

pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x))

from subprocess import check_output

############################## 数据加载&预处理 ###################################

train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
print("\n训练集形状 : {} ".format(train.shape))
print("测试集形状 : {} ".format(test.shape))

#Save the 'Id' column
Пример #40
0
d = 2

n1s = n_xor
n2s = n_nxor

ns = np.concatenate((n1s, n2s + n1s[-1]))

ls = ['-', '--']
algorithms = ['Uncertainty Forest', 'Lifelong Forest']

#%%
fontsize = 30
labelsize = 27.5

colors = sns.color_palette('Dark2', n_colors=2)

X, Y = generate_gaussian_parity(750, cov_scale=0.1, angle_params=0)
Z, W = generate_gaussian_parity(750, cov_scale=0.1, angle_params=np.pi / 4)

fig, ax = plt.subplots(2, 2, figsize=(16, 16))

ax[0][0].scatter(Z[:, 0], Z[:, 1], c=get_colors(colors, W), s=50)

ax[0][0].set_xticks([])
ax[0][0].set_yticks([])
ax[0][0].set_title('Gaussian R-XOR', fontsize=30)
ax[0][0].axis('off')

# ax.set_aspect('equal')
TASK1 = 'XOR'
Пример #41
0
import pandas as pd
import datetime
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm

%matplotlib inline

from pandasql import sqldf
pysqldf = lambda q: sqldf(q, globals())

import seaborn as sns
sns.set(style="ticks", color_codes=True, font_scale=1.5)
color = sns.color_palette()
sns.set_style('darkgrid')

from mpl_toolkits.mplot3d import Axes3D

import plotly as py
import plotly.graph_objs as go
py.offline.init_notebook_mode()

from scipy import stats
from scipy.stats import skew, norm, probplot, boxcox
from sklearn import preprocessing
import math

from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
Пример #42
0
def plot_panel_transits(datadir, ax=None, insts=None, companions=None, colors=None, title=None, ppm=False, ylim=None, yticks=None, fontscale=2):

    config.init(datadir)
    
    #::: more plotting settings
    SMALL_SIZE = 8*fontscale
    MEDIUM_SIZE = 10*fontscale
    BIGGER_SIZE = 12*fontscale
    plt.rc('font', size=MEDIUM_SIZE)          # controls default text sizes
    plt.rc('axes', titlesize=BIGGER_SIZE)     # fontsize of the axes title
    plt.rc('axes', labelsize=BIGGER_SIZE)    # fontsize of the x and y labels
    plt.rc('xtick', labelsize=MEDIUM_SIZE)    # fontsize of the tick labels
    plt.rc('ytick', labelsize=MEDIUM_SIZE)    # fontsize of the tick labels
    plt.rc('legend', fontsize=MEDIUM_SIZE)    # legend fontsize
    plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
    
    samples = draw_initial_guess_samples()
    params_median, params_ll, params_ul = get_params_from_samples(samples)
    
    if companions is None:
        companions = config.BASEMENT.settings['companions_phot']
    if colors is None:
        colors = [sns.color_palette('deep')[i] for i in [0,1,3]]
    if insts is None:
        insts = config.BASEMENT.settings['inst_phot']

    ally = []
    
    if ax is None:
        ax_init = None
        fig, axes = plt.subplots(len(insts),len(companions),figsize=(6*len(companions),4*len(insts)), sharey=True, sharex=True)
        axes = np.atleast_2d(axes).T
    else:
        ax_init = ax
        axes = np.atleast_2d(ax).T
        
    for i,(companion, color) in enumerate(zip(companions, colors)):
        
        for j,inst in enumerate(insts):
            ax = axes[i,j]
            
            key='flux'
            if title is None:
                if i==0:
                    title=inst
                else:
                    title=''
            if j==len(insts)-1:
                xlabel=r'$\mathrm{ T - T_0 \ (h) }$'
            else:
                xlabel=''
            if i==0:
                if ppm:
                    ylabel=r'$\Delta$ Flux (ppm)'
                else:
                    ylabel='Relative Flux'
            else:
                ylabel=''
            alpha = 1.
                    
            x = config.BASEMENT.data[inst]['time']
            baseline_median = calculate_baseline(params_median, inst, key) #evaluated on x (!)
            y = config.BASEMENT.data[inst][key] - baseline_median
            
            zoomfactor = params_median[companion+'_period']*24.
            
            for other_companion in config.BASEMENT.settings['companions_phot']:
                if companion!=other_companion:
                    model = flux_fct(params_median, inst, other_companion)
                    y -= model
                    y += 1.
            
            if ppm:
                y = (y-1)*1e6
                    
            dt = 20./60./24. / params_median[companion+'_period']
                
            phase_time, phase_y, phase_y_err, _, phi = lct.phase_fold(x, y, params_median[companion+'_period'], params_median[companion+'_epoch'], dt = dt, ferr_type='meansig', ferr_style='sem', sigmaclip=True)    
            ax.plot( phi*zoomfactor, y, 'b.', color='silver', rasterized=True )
            ax.errorbar( phase_time*zoomfactor, phase_y, yerr=phase_y_err, linestyle='none', marker='o', ms=8, color=color, capsize=0, zorder=11 )
            ax.set_xlabel(xlabel, fontsize=BIGGER_SIZE)
            ax.set_ylabel(ylabel, fontsize=BIGGER_SIZE)

            ax.text(0.97,0.87,companion,ha='right',va='bottom',transform=ax.transAxes,fontsize=BIGGER_SIZE)
            ax.text(0.03,0.87,title,ha='left',va='bottom',transform=ax.transAxes,fontsize=MEDIUM_SIZE)
        
            ally += list(phase_y)
            
            #model, phased
            xx = np.linspace( -4./zoomfactor, 4./zoomfactor, 1000)
            xx2 = params_median[companion+'_epoch'] + np.linspace( -4./zoomfactor, 4./zoomfactor, 1000)*params_median[companion+'_period']
            for ii in range(samples.shape[0]):
                s = samples[ii,:]
                p = update_params(s)
#                p = update_params(s, phased=True)
                model = flux_fct(p, inst, companion, xx=xx2) #evaluated on xx2 (!)
                if ppm:
                    model = (model-1)*1e6
                ax.plot( xx*zoomfactor, model, 'r-', alpha=alpha, zorder=12, lw=2 )
                 
    if ppm:
        ylim0 = np.nanmin(ally) - 500
        ylim1 = np.nanmax(ally) + 500
    else:
        ylim0 = np.nanmin(ally) - 500/1e6
        ylim1 = np.nanmax(ally) + 500/1e6
   
    if ylim is None:
        ylim = [ylim0, ylim1]
    
    for i in range(len(companions)):
        for j in range(len(insts)):
            ax = axes[i,j]
            ax.set( xlim=[-4,4], ylim=ylim )
            if yticks is not None:
                ax.set(yticks=yticks)
            ax.set_xticklabels(ax.get_xticks(), {'fontsize': MEDIUM_SIZE})
            ax.set_yticklabels(ax.get_yticks(), {'fontsize': MEDIUM_SIZE})
            
    
    plt.tight_layout()

    if ax_init is None:
        fig.savefig( os.path.join(config.BASEMENT.outdir,'data_panel_transits.pdf'), bbox_inches='tight' )
        return fig, axes
    else:
        return ax
Пример #43
0
def plot_mwd(lon,
             dec,
             color_val,
             origin=0,
             size=3,
             title='Mollweide projection',
             projection='mollweide',
             savdir='../results/',
             savname='mwd_0.pdf',
             overplot_galactic_plane=True,
             is_tess=False,
             is_radec=None):
    '''
    args, kwargs:

        lon, lat are arrays of same length. they can be (RA,dec), or (ecliptic
            long, ecliptic lat). lon takes values in [0,360), lat in [-90,90],

        is_radec: mandatory. True if (RA,dec), else elong/elat.

        title is the title of the figure.

        projection is the kind of projection: 'mollweide', 'aitoff', ...

    comments: see
    http://balbuceosastropy.blogspot.com/2013/09/the-mollweide-projection.html.
    '''
    if is_radec == None:
        raise AssertionError

    # for matplotlib mollweide projection, x and y values (usually RA/dec, or
    # lat/lon) must be in -pi<x<pi, -pi/2<y<pi/2.
    # In astronomical coords, RA increases east (left on celestial charts).
    # Here, the default horizontal scale has positive to the right.

    def _shift_lon_get_x(lon, origin):
        x = np.array(np.remainder(lon + 360 - origin, 360))  # shift lon values
        ind = x > 180
        x[ind] -= 360  # scale conversion to [-180, 180]
        x = -x  # reverse the scale: East to the left
        return x

    x = _shift_lon_get_x(lon, origin)

    fig = plt.figure(figsize=(6, 4.5))
    ax = fig.add_subplot(111, projection=projection, facecolor='White')

    if is_tess:
        # set up colormap
        import seaborn as sns
        rgbs = sns.color_palette('Paired', n_colors=13, desat=0.9)
        cmap = mpl.colors.ListedColormap(rgbs)
        bounds = list(np.arange(0.5, 14.5, 1))
        norm = mpl.colors.BoundaryNorm(bounds, cmap.N)

        # plot the stars
        cax = ax.scatter(np.radians(x),
                         np.radians(dec),
                         c=color_val,
                         s=size,
                         lw=0,
                         zorder=2,
                         cmap=cmap,
                         norm=norm,
                         rasterized=True)

        # set up colorbar
        cbar = fig.colorbar(cax,
                            cmap=cmap,
                            norm=norm,
                            boundaries=bounds,
                            fraction=0.025,
                            pad=0.03,
                            ticks=np.arange(13) + 1,
                            orientation='vertical')
        ylabels = np.arange(1, 14, 1)
        cbar.ax.set_yticklabels(map(str, ylabels))
        cbar.set_label('number of pointings', rotation=270, labelpad=10)
        cbar.ax.tick_params(direction='in')

        # label each sector. this involves computing the positions first...
        views, sector_numbers, lambda_init, n_cameras = [], 13, 315.8 * u.deg, 4
        southern_elats = np.array([-18, -42, -66, -90]) * u.degree
        for sector_number in range(sector_numbers):
            this_elon = np.mod(
                lambda_init.to(u.deg).value + sector_number *
                (360 / sector_numbers), 360) * u.deg
            for n_camera in range(n_cameras):
                this_elat = southern_elats[n_camera]
                this_coord = SkyCoord(lon=this_elon,
                                      lat=this_elat,
                                      frame='barycentrictrueecliptic')
                views.append([
                    sector_number, n_camera, this_elon, this_elat,
                    this_coord.icrs.ra, this_coord.icrs.dec
                ])
        views_columns = [
            'sector_number', 'n_camera', 'elon', 'elat', 'ra', 'dec'
        ]
        views = pd.DataFrame(views, columns=views_columns)
        subsel = (views['n_camera'] == 0)
        for ix, view in views[subsel].iterrows():
            this_elon, this_elat = view['elon'], view['elat']
            sector_number = int(view['sector_number'])
            this_coord = SkyCoord(lon=this_elon,
                                  lat=this_elat,
                                  frame='barycentrictrueecliptic')
            if is_radec:
                this_x = _shift_lon_get_x(this_coord.icrs.ra.value, origin)
                this_dec = this_coord.icrs.dec.value
            else:
                this_x = _shift_lon_get_x(this_elon.value, origin)
                this_dec = this_elat.value
            ax.text(np.radians(this_x),
                    np.radians(this_dec),
                    'S' + str(sector_number),
                    fontsize='small',
                    zorder=4,
                    ha='center',
                    va='center')

    else:
        ax.scatter(np.radians(x),
                   np.radians(dec),
                   c=color_val,
                   s=size,
                   zorder=2)

    if overplot_galactic_plane:

        ##########
        # make many points, and also label the galactic center. ideally you
        # will never need to follow these coordinate transformations.
        glons = np.arange(0, 360)
        glats = np.zeros_like(glons)
        coords = SkyCoord(glons * u.degree, glats * u.degree, frame='galactic')
        gplane_ra, gplane_dec = coords.icrs.ra.value, coords.icrs.dec.value
        gplane_elon = coords.barycentrictrueecliptic.lon.value
        gplane_elat = coords.barycentrictrueecliptic.lat.value
        if is_radec:
            gplane_x = _shift_lon_get_x(gplane_ra, origin)
        else:
            gplane_x = _shift_lon_get_x(gplane_elon, origin)
            gplane_dec = gplane_elat
        ax.scatter(np.radians(gplane_x),
                   np.radians(gplane_dec),
                   c='lightgray',
                   s=2,
                   zorder=3)
        gcenter = SkyCoord('17h45m40.04s', '-29d00m28.1s', frame='icrs')
        gcenter_ra, gcenter_dec = gcenter.icrs.ra.value, gcenter.icrs.dec.value
        gcenter_elon = gcenter.barycentrictrueecliptic.lon.value
        gcenter_elat = gcenter.barycentrictrueecliptic.lat.value
        if is_radec:
            gcenter_x = _shift_lon_get_x(np.array(gcenter_ra), origin)
        else:
            gcenter_x = _shift_lon_get_x(np.array(gcenter_elon), origin)
            gcenter_dec = gcenter_elat
        ax.scatter(np.radians(gcenter_x),
                   np.radians(gcenter_dec),
                   c='black',
                   s=2,
                   zorder=4,
                   marker='X')
        ax.text(np.radians(gcenter_x),
                np.radians(gcenter_dec),
                'GC',
                fontsize='x-small',
                ha='left',
                va='top')
        ##########

    tick_labels = np.array([150, 120, 90, 60, 30, 0, 330, 300, 270, 240, 210])
    tick_labels = np.remainder(tick_labels + 360 + origin, 360)
    ax.set_xticklabels(tick_labels, fontsize='x-small')
    ax.set_yticklabels(np.arange(-75, 75 + 15, 15), fontsize='x-small')

    ax.set_title(title, y=1.05, fontsize='small')
    if is_radec:
        ax.set_xlabel('ra', fontsize='x-small')
        ax.set_ylabel('dec', fontsize='x-small')
    else:
        ax.set_xlabel('ecl lon', fontsize='x-small')
        ax.set_ylabel('ecl lat', fontsize='x-small')
    ax.grid(color='lightgray', linestyle='--', linewidth=0.5, zorder=-1)

    ax.text(0.99,
            0.01,
            'github.com/lgbouma/tessmaps',
            fontsize='xx-small',
            transform=ax.transAxes,
            ha='right',
            va='bottom')
    fig.tight_layout()
    #fig.savefig(savdir+savname, bbox_inches='tight')
    fig.savefig(savdir + savname.replace('pdf', 'png'),
                dpi=350,
                bbox_inches='tight')
Пример #44
0
    def __init__(self, config):

        self.config = config
        with open(config.map) as map_file:
            self.data_map = yaml.load(map_file)

        with open(config.schedule) as states_file:
            self.schedule = yaml.load(states_file)

        self.num_agents = len(self.data_map["agents"])
        self.K = self.config.nGraphFilterTaps
        self.ID_agent = self.config.id_chosenAgent
        data_contents = sio.loadmat(config.GSO)
        self.GSO = np.transpose(data_contents["gso"], (2, 3, 0, 1)).squeeze(3)
        self.commRadius = data_contents["commRadius"]
        self.maxLink = 500

        aspect = self.data_map["map"]["dimensions"][0] / self.data_map["map"][
            "dimensions"][1]

        self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4))
        self.ax = self.fig.add_subplot(111, aspect='equal')
        self.fig.subplots_adjust(left=0,
                                 right=1,
                                 bottom=0,
                                 top=1,
                                 wspace=None,
                                 hspace=None)
        # self.ax.set_frame_on(False)

        self.patches = []
        self.artists = []
        self.agents = dict()
        self.commLink = dict()
        self.agent_names = dict()

        # self.list_color = self.get_cmap(self.num_agents)
        self.list_color = sns.color_palette("hls", self.num_agents)
        self.list_color_commLink = sns.color_palette("hls", 8)  # self.K)

        self.list_commLinkStyle = list(lines.lineStyles.keys())

        # create boundary patch
        xmin = -0.5
        ymin = -0.5
        xmax = self.data_map["map"]["dimensions"][0] - 0.5
        ymax = self.data_map["map"]["dimensions"][1] - 0.5

        # self.ax.relim()
        plt.xlim(xmin, xmax)
        plt.ylim(ymin, ymax)
        # self.ax.set_xticks([])
        # self.ax.set_yticks([])
        # plt.axis('off')
        # self.ax.axis('tight')
        # self.ax.axis('off')

        self.patches.append(
            Rectangle((xmin, ymin),
                      xmax - xmin,
                      ymax - ymin,
                      facecolor='none',
                      edgecolor='black'))
        for o in self.data_map["map"]["obstacles"]:
            x, y = o[0], o[1]
            self.patches.append(
                Rectangle((x - 0.5, y - 0.5),
                          1,
                          1,
                          facecolor='black',
                          edgecolor='black'))

        # initialize communication Link
        for id_link in range(self.maxLink):
            #https://matplotlib.org/api/artist_api.html#module-matplotlib.lines
            name_link = "{}".format(id_link)
            # self.commLink[name_link] = FancyArrowPatch((0,0), (0,0),linewidth=2)
            self.commLink[name_link] = plt.Line2D((0, 0), (0, 0), linewidth=2)
            self.artists.append(self.commLink[name_link])

        # print(self.schedule["schedule"])
        # create agents:
        self.T = 0
        # draw goals first
        for d, i in zip(self.data_map["agents"], range(0, self.num_agents)):
            self.patches.append(
                Rectangle((d["goal"][0] - 0.25, d["goal"][1] - 0.25),
                          0.6,
                          0.6,
                          facecolor=self.list_color[i],
                          edgecolor=self.list_color[i],
                          alpha=0.5))

        for d, i in zip(self.data_map["agents"], range(0, self.num_agents)):
            #https://matplotlib.org/api/artist_api.html#module-matplotlib.lines
            name = d["name"]
            self.agents[name] = Circle((d["start"][0], d["start"][1]),
                                       0.4,
                                       facecolor=self.list_color[i],
                                       edgecolor=self.list_color[i])
            self.agents[name].original_face_color = self.list_color[i]
            self.patches.append(self.agents[name])
            self.T = max(self.T, self.schedule["schedule"][name][-1]["t"])

            # set floating ID
            self.agent_names[name] = self.ax.text(d["start"][0], d["start"][1],
                                                  name.replace('agent', ''))

            self.agent_names[name].set_horizontalalignment('center')
            self.agent_names[name].set_verticalalignment('center')
            self.artists.append(self.agent_names[name])

        # self.ax.add_line(dotted_line)
        # self.ax.set_axis_off()
        # self.fig.axes[0].set_visible(False)
        # self.fig.axes.get_yaxis().set_visible(False)

        # self.fig.tight_layout()

        self.anim = animation.FuncAnimation(self.fig,
                                            self.animate_func,
                                            init_func=self.init_func,
                                            frames=int(self.T + 1) * 10,
                                            interval=100,
                                            blit=True)
Пример #45
0
from astropy import log
import os
import seaborn as sb
from scipy.special import erf

from cube_analysis.spectral_stacking_models import find_hwhm, fit_gaussian

from paths import (iram_co21_14B088_data_path, fourteenB_HI_data_wGBT_path)
from plotting_styles import (default_figure, onecolumn_figure,
                             onecolumn_twopanel_figure)
from galaxy_params import gal_feath as gal
from constants import (co21_mass_conversion, hi_mass_conversion, hi_freq,
                       beam_eff_30m_druard)

default_figure()
cpal = sb.color_palette()

log.info("Running 38'' analysis.")
# Get the original CO mask. We'll only use the same pixel positions with the
# low res data.
co_mask = fits.open(
    iram_co21_14B088_data_path(
        "m33.co21_iram.14B-088_HI_source_mask.fits"))[0].data

hi_cube_38 = SpectralCube.read(
    fourteenB_HI_data_wGBT_path(
        "smooth_2beam/M33_14B-088_HI.clean.image.GBT_feathered.38arcsec.fits"))
co_cube_38 = SpectralCube.read(
    iram_co21_14B088_data_path(
        "smooth_2beam/m33.co21_iram.14B-088_HI.38arcsec.fits"))
co_rms_38 = fits.open(
Пример #46
0
def convert_bulk_bkg(obj3d, org_obj3d, result_dir, job,
			init_layer, final_layer, task, kwargs, is_verbose):
	X = np.array(np.where(obj3d == 1.0)).T
	obj3d_nan = np.full(obj3d.shape, np.nan) # obj3d.shape
	

	# # for HAC only
	# kwargs = dict({"distance_threshold":None,
	# 		"n_clusters":200, # either "distance_threshold" or "n_clusters" will be set
	# 		"affinity":'euclidean', "linkage":'ward', 
	# 		})
	# hac_model = hac(**kwargs)
	# hac_model.fit(X)
	# saveat = result_dir+"/dedogram.pdf"
	# labels = hac_model.model.labels_
	# # # for HAC ploting only
	# dd_plot_kwargs = dict({"truncate_mode":'level', "p":3})
	# dendrogram = hac_model.plot_dendrogram(saveat=saveat, **dd_plot_kwargs)
	# # print (dendrogram["ivl"])

	layer_id = "/layer_{0}-{1}".format(init_layer, final_layer)

	by_particle_dir = result_dir+text_dir+"/particles/"+job+layer_id
	print ("Prepare to fit")
	clusterer = hdbscan.HDBSCAN(min_cluster_size=kwargs["min_cluster_size"], 
		min_samples=kwargs["min_samples"], 
		alpha=kwargs["alpha"], allow_single_cluster=kwargs["allow_single_cluster"],
		prediction_data=True)
	# X_fit = copy.copy(X)
	# density = []
	# points = np.array(X)
	# for p in points:
	# 	pz, py, px = p[0], p[1], p[2]
	# 	print ("pz, py, px", pz, py, px)
	# 	n_upsampling = int(org_obj3d[pz][py][px]*1000)
		# if n_upsampling > 0:
		# 	# print ("Upsampling: ", n_upsampling, "points")
		# 	for i in range(n_upsampling):
		# 		p_sample = one_sample(px, py, pz, final_layer, init_layer)
		# 		X_fit = np.append(X_fit, [p_sample], axis=0)

	# print ("density:", density)
	# X_fit = np.append(X_fit, np.array(density), axis=1)
	
	# X_fit = minmax_scale(X_fit)

	clusterer.fit(X)
	print ("Done fitting")

	# labels = clusterer.predict(X)

	labels, strengths = hdbscan.approximate_predict(clusterer, X)

	assert X.shape[0] == len(labels)

	if is_verbose:
		# # # plot dendrogram
		fig = plt.figure(figsize=(10, 10), dpi=300)
		clusterer.condensed_tree_.plot(select_clusters=True,
	        selection_palette=sns.color_palette('deep', 8))
		save_at = by_particle_dir+"dendrogram.pdf"
		makedirs(save_at)
		fig.tight_layout(rect=[0, 0.03, 1, 0.95])
		plt.savefig(save_at, transparent=False)
		print ("Save file at:", save_at)

		# # # End plot dendrogram

	set_labels = set(labels)
	n_clusters = len(set_labels)

	cmap = plt.cm.rainbow
	norm = matplotlib.colors.Normalize(vmin=0.0, vmax=n_clusters)

	for lbl in set_labels:
		pos_of_lbl = X[np.where(labels==lbl)]
		# print (pos_of_lbl)

		zs, ys, xs  = pos_of_lbl[:, 0], pos_of_lbl[:, 1], pos_of_lbl[:, 2]
		obj3d_nan[zs, ys, xs] = lbl #[lbl] * len(pos_of_lbl)

		print ("lbl:", lbl, "total:", len(set_labels), "particle size:", len(xs), len(ys), len(zs))
		parse2submatrix(org_data=org_obj3d, 
			xs=xs, ys=ys, zs=zs, savedir=by_particle_dir,
			pid=lbl, task=task,
			init_layer=init_layer, final_layer=final_layer,
			is_verbose=is_verbose)
		# break		 

	label_dir = result_dir+text_dir+"/lbl_in3D/"+job+layer_id
	label_fig_dir = result_dir+fig_dir+"/lbl_in3D/"+job+layer_id

	n_layers = obj3d.shape[0]

	vmin = np.nanmin(obj3d_nan)
	vmax = np.nanmax(obj3d_nan)

	for ith, layer in enumerate(range(init_layer, final_layer+1)):
		data = obj3d_nan[ith]
		file = label_dir+"/lbl_{}.txt".format(layer)
		makedirs(file)
		save_layer2text(data, file=file)
		print ("Save at:", file)

		if is_verbose:
			figfile = label_fig_dir+"/lbl_{}.pdf".format(layer)
			plot_density(values=data, save_at=figfile,  cmap_name="jet", 
					title=layer, vmin=vmin, vmax=vmax, is_save2input=None,
					is_lbl=True,set_labels=set_labels)
Пример #47
0
def get_finding_chart(
    source_ra,
    source_dec,
    source_name,
    image_source='ztfref',
    output_format='pdf',
    imsize=3.0,
    tick_offset=0.02,
    tick_length=0.03,
    fallback_image_source='dss',
    zscale_contrast=0.045,
    zscale_krej=2.5,
    **offset_star_kwargs,
):
    """Create a finder chart suitable for spectroscopic observations of
       the source

    Parameters
    ----------
    source_ra : float
        Right ascension (J2000) of the source
    source_dec : float
        Declination (J2000) of the source
    source_name : str
        Name of the source
    image_source : {'desi', 'dss', 'ztfref'}, optional
        Survey where the image comes from "desi", "dss", "ztfref"
        (more to be added)
    output_format : str, optional
        "pdf" of "png" -- determines the format of the returned finder
    imsize : float, optional
        Requested image size (on a size) in arcmin. Should be between 2-15.
    tick_offset : float, optional
        How far off the each source should the tick mark be made? (in arcsec)
    tick_length : float, optional
        How long should the tick mark be made? (in arcsec)
    fallback_image_source : str, optional
        Where what `image_source` should we fall back to if the
        one requested fails
    zscale_contrast : float, optional
        Contrast parameter for the ZScale interval
    zscale_krej : float, optional
        Krej parameter for the Zscale interval
    **offset_star_kwargs : dict, optional
        Other parameters passed to `get_nearby_offset_stars`

    Returns
    -------
    dict
        success : bool
            Whether the request was successful or not, returning
            a sensible error in 'reason'
        name : str
            suggested filename based on `source_name` and `output_format`
        data : str
            binary encoded data for the image (to be streamed)
        reason : str
            If not successful, a reason is returned.
    """
    if (imsize < 2.0) or (imsize > 15):
        return {
            'success': False,
            'reason': 'Requested `imsize` out of range',
            'data': '',
            'name': '',
        }

    if image_source not in source_image_parameters:
        return {
            'success': False,
            'reason': f'image source {image_source} not in list',
            'data': '',
            'name': '',
        }

    matplotlib.use("Agg")
    fig = plt.figure(figsize=(11, 8.5), constrained_layout=False)
    widths = [2.6, 1]
    heights = [2.6, 1]
    spec = fig.add_gridspec(
        ncols=2,
        nrows=2,
        width_ratios=widths,
        height_ratios=heights,
        left=0.05,
        right=0.95,
    )

    # how wide on the side will the image be? 256 as default
    npixels = source_image_parameters[image_source].get("npixels", 256)
    # set the pixelscale in arcsec (typically about 1 arcsec/pixel)
    pixscale = 60 * imsize / npixels

    hdu = fits_image(source_ra,
                     source_dec,
                     imsize=imsize,
                     image_source=image_source)

    # skeleton WCS - this is the field that the user requested
    wcs = WCS(naxis=2)

    # set the headers of the WCS.
    # The center of the image is the reference point (source_ra, source_dec):
    wcs.wcs.crpix = [npixels / 2, npixels / 2]
    wcs.wcs.crval = [source_ra, source_dec]

    # create the pixel scale and orientation North up, East left
    # pixelscale is in degrees, established in the tangent plane
    # to the reference point
    wcs.wcs.cd = np.array([[-pixscale / 3600, 0], [0, pixscale / 3600]])
    wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]

    fallback = True
    if hdu is not None:
        im = hdu.data

        # replace the nans with medians
        im[np.isnan(im)] = np.nanmedian(im)

        # Fix the header keyword for the input system, if needed
        hdr = hdu.header
        if 'RADECSYS' in hdr:
            hdr.set('RADESYSa', hdr['RADECSYS'], before='RADECSYS')
            del hdr['RADECSYS']

        if source_image_parameters[image_source].get("reproject", False):
            # project image to the skeleton WCS solution
            log("Reprojecting image to requested position and orientation")
            im, _ = reproject_adaptive(hdu, wcs, shape_out=(npixels, npixels))
        else:
            wcs = WCS(hdu.header)

        if source_image_parameters[image_source].get("smooth", False):
            im = gaussian_filter(
                hdu.data,
                source_image_parameters[image_source]["smooth"] / pixscale)

        cent = int(npixels / 2)
        width = int(0.05 * npixels)
        test_slice = slice(cent - width, cent + width)
        all_nans = np.isnan(im[test_slice, test_slice].flatten()).all()
        all_zeros = (im[test_slice, test_slice].flatten() == 0).all()
        if not (all_zeros or all_nans):
            percents = np.nanpercentile(im.flatten(), [10, 99.0])
            vmin = percents[0]
            vmax = percents[1]
            interval = ZScaleInterval(
                nsamples=int(0.1 * (im.shape[0] * im.shape[1])),
                contrast=zscale_contrast,
                krej=zscale_krej,
            )
            norm = ImageNormalize(im, vmin=vmin, vmax=vmax, interval=interval)
            watermark = source_image_parameters[image_source]["str"]
            fallback = False

    if hdu is None or fallback:
        # if we got back a blank image, try to fallback on another survey
        # and return the results from that call
        if fallback_image_source is not None:
            if fallback_image_source != image_source:
                log(f"Falling back on image source {fallback_image_source}")
                return get_finding_chart(
                    source_ra,
                    source_dec,
                    source_name,
                    image_source=fallback_image_source,
                    output_format=output_format,
                    imsize=imsize,
                    tick_offset=tick_offset,
                    tick_length=tick_length,
                    fallback_image_source=None,
                    **offset_star_kwargs,
                )

        # we dont have an image here, so let's create a dummy one
        # so we can still plot
        im = np.zeros((npixels, npixels))
        watermark = None
        vmin = 0
        vmax = 0
        norm = ImageNormalize(im, vmin=vmin, vmax=vmax)

    # add the images in the top left corner
    ax = fig.add_subplot(spec[0, 0], projection=wcs)
    ax_text = fig.add_subplot(spec[0, 1])
    ax_text.axis('off')
    ax_starlist = fig.add_subplot(spec[1, 0:])
    ax_starlist.axis('off')

    ax.imshow(im, origin='lower', norm=norm, cmap='gray_r')
    ax.set_autoscale_on(False)
    ax.grid(color='white', ls='dotted')
    ax.set_xlabel(r'$\alpha$ (J2000)', fontsize='large')
    ax.set_ylabel(r'$\delta$ (J2000)', fontsize='large')
    obstime = offset_star_kwargs.get("obstime",
                                     datetime.datetime.utcnow().isoformat())
    ax.set_title(f'{source_name} Finder ({obstime})',
                 fontsize='large',
                 fontweight='bold')

    star_list, _, _, _, used_ztfref = get_nearby_offset_stars(
        source_ra, source_dec, source_name, **offset_star_kwargs)

    if not isinstance(star_list, list) or len(star_list) == 0:
        return {
            'success': False,
            'reason': 'failure to get star list',
            'data': '',
            'name': '',
        }

    ncolors = len(star_list)
    if star_list[0]['str'].startswith("!Data"):
        ncolors -= 1
    colors = sns.color_palette("colorblind", ncolors)

    start_text = [-0.35, 0.99]
    origin = "GaiaDR2" if not used_ztfref else "ZTFref"
    starlist_str = (
        f"# Note: {origin} used for offset star positions\n"
        "# Note: spacing in starlist many not copy/paste correctly in PDF\n" +
        "#       you can get starlist directly from" +
        f" /api/sources/{source_name}/offsets?" +
        f"facility={offset_star_kwargs.get('facility', 'Keck')}\n" +
        "\n".join([x["str"] for x in star_list]))

    # add the starlist
    ax_starlist.text(
        0,
        0.50,
        starlist_str,
        fontsize="x-small",
        family='monospace',
        transform=ax_starlist.transAxes,
    )

    # add the watermark for the survey
    props = dict(boxstyle='round', facecolor='gray', alpha=0.7)

    if watermark is not None:
        ax.text(
            0.035,
            0.035,
            watermark,
            horizontalalignment='left',
            verticalalignment='center',
            transform=ax.transAxes,
            fontsize='medium',
            fontweight='bold',
            color="yellow",
            alpha=0.5,
            bbox=props,
        )

    ax.text(
        0.95,
        0.035,
        f"{imsize}\u2032 \u00D7 {imsize}\u2032",  # size'x size'
        horizontalalignment='right',
        verticalalignment='center',
        transform=ax.transAxes,
        fontsize='medium',
        fontweight='bold',
        color="yellow",
        alpha=0.5,
        bbox=props,
    )

    # compass rose
    # rose_center_pixel = ax.transAxes.transform((0.04, 0.95))
    rose_center = pixel_to_skycoord(int(npixels * 0.1), int(npixels * 0.9),
                                    wcs)
    props = dict(boxstyle='round', facecolor='gray', alpha=0.5)

    for ang, label, off in [(0, "N", 0.01), (90, "E", 0.03)]:
        position_angle = ang * u.deg
        separation = (0.05 * imsize * 60) * u.arcsec  # 5%
        p2 = rose_center.directional_offset_by(position_angle, separation)
        ax.plot(
            [rose_center.ra.value, p2.ra.value],
            [rose_center.dec.value, p2.dec.value],
            transform=ax.get_transform('world'),
            color="gold",
            linewidth=2,
        )

        # label N and E
        position_angle = (ang + 15) * u.deg
        separation = ((0.05 + off) * imsize * 60) * u.arcsec
        p2 = rose_center.directional_offset_by(position_angle, separation)
        ax.text(
            p2.ra.value,
            p2.dec.value,
            label,
            color="gold",
            transform=ax.get_transform('world'),
            fontsize='large',
            fontweight='bold',
        )

    # account for Shane header
    if star_list[0]['str'].startswith("!Data"):
        star_list = star_list[1:]

    for i, star in enumerate(star_list):

        c1 = SkyCoord(star["ra"] * u.deg, star["dec"] * u.deg, frame='icrs')

        # mark up the right side of the page with position and offset info
        name_title = star["name"]
        if star.get("mag") is not None:
            name_title += f", mag={star.get('mag'):.2f}"
        ax_text.text(
            start_text[0],
            start_text[1] - i / ncolors,
            name_title,
            ha='left',
            va='top',
            fontsize='large',
            fontweight='bold',
            transform=ax_text.transAxes,
            color=colors[i],
        )
        source_text = f"  {star['ra']:.5f} {star['dec']:.5f}\n"
        source_text += f"  {c1.to_string('hmsdms')}\n"
        if (star.get("dras") is not None) and (star.get("ddecs") is not None):
            source_text += f'  {star.get("dras")} {star.get("ddecs")} to {source_name}'
        ax_text.text(
            start_text[0],
            start_text[1] - i / ncolors - 0.06,
            source_text,
            ha='left',
            va='top',
            fontsize='large',
            transform=ax_text.transAxes,
            color=colors[i],
        )

        # work on making marks where the stars are
        for ang in [0, 90]:
            # for the source itself (i=0), change the angle of the lines in
            # case the offset star is the same as the source itself
            position_angle = ang * u.deg if i != 0 else (ang + 225) * u.deg
            separation = (tick_offset * imsize * 60) * u.arcsec
            p1 = c1.directional_offset_by(position_angle, separation)
            separation = (tick_offset + tick_length) * imsize * 60 * u.arcsec
            p2 = c1.directional_offset_by(position_angle, separation)
            ax.plot(
                [p1.ra.value, p2.ra.value],
                [p1.dec.value, p2.dec.value],
                transform=ax.get_transform('world'),
                color=colors[i],
                linewidth=3 if imsize <= 4 else 2,
                alpha=0.8,
            )
        if star["name"].find("_o") != -1:
            # this is an offset star
            text = star["name"].split("_o")[-1]
            position_angle = 14 * u.deg
            separation = (tick_offset +
                          tick_length * 1.6) * imsize * 60 * u.arcsec
            p1 = c1.directional_offset_by(position_angle, separation)
            ax.text(
                p1.ra.value,
                p1.dec.value,
                text,
                color=colors[i],
                transform=ax.get_transform('world'),
                fontsize='large',
                fontweight='bold',
            )

    buf = io.BytesIO()
    fig.savefig(buf, format=output_format)
    plt.close(fig)
    buf.seek(0)

    return {
        "success": True,
        "name": f"finder_{source_name}.{output_format}",
        "data": buf.read(),
        "reason": "",
    }
Пример #48
0
def plot_scores():
    import numpy as np
    import pandas as pd
    import matplotlib.pyplot as plt
    from matplotlib.backends.backend_pdf import PdfPages
    import seaborn as sns

    input_filename = results_filename()
    outut_filename = input_filename.replace(".xlsx", "_scores-by-tv.pdf")

    # scores
    y_cols = [
        'recall_mean', 'auc', 'beta_r_bar', 'beta_fleiss_kappa',
        'beta_dice_bar'
    ]
    x_col = 'tv'

    # colors
    #sns.palplot(sns.color_palette("Paired"))
    pal = sns.color_palette("Paired")
    colors = {
        (0.01, 0.1): pal[0],
        (0.1, 0.1): pal[1],
        (0.01, 0.9): pal[4],
        (0.1, 0.9): pal[5]
    }

    data = pd.read_excel(input_filename, sheetname='cv_by_param')
    # avoid poor rounding
    data.l1_ratio = np.asarray(data.l1_ratio).round(3)
    assert len(data.l1_ratio.unique()) == 5
    data.tv = np.asarray(data.tv).round(5)
    assert len(data.tv.unique()) == 11
    data.a = np.asarray(data.a).round(5)
    assert len(data.a.unique()) == 3

    def close(vec, val, tol=1e-4):
        return np.abs(vec - val) < tol

    data = data[close(data.l1_ratio, .1) | close(data.l1_ratio, .9)]
    data = data[close(data.a, .01) | close(data.a, .1)]
    data.sort_values(by=x_col, ascending=True, inplace=True)

    pdf = PdfPages(outut_filename)

    for y_col in y_cols:
        #y_col = y_cols[0]
        fig = plt.figure()
        for (l1, a), d in data.groupby(["l1_ratio", "a"]):
            print((a, l1))
            plt.plot(d.tv,
                     d[y_col],
                     color=colors[(a, l1)],
                     label="a:%.2f, l1/l2:%.1f" % (a, l1))
        plt.xlabel(x_col)
        plt.ylabel(y_col)
        plt.suptitle(y_col)
        plt.legend()
        pdf.savefig(fig)
        plt.clf()
    pdf.close()
Пример #49
0
# import pygraphviz as graphviz
# from graphviz import Graph
import pandas as pd
import matplotlib
matplotlib.use('TkAgg')
import warnings
warnings.filterwarnings("ignore")
from matplotlib.ticker import MaxNLocator
from sklearn.manifold import TSNE

import seaborn as sns
import matplotlib.pyplot as plt
sns.set_context("notebook", font_scale=1.2, \
        rc={"font.size":12 ,"axes.titlesize": 10,"axes.labelsize":10, "axes.xticks.size": 8, "lines.linewidth": 2.5})
sns.set_style("white")
current_palette = sns.color_palette("Paired")
# Ref: http://seaborn.pydata.org/tutorial/color_palettes.html
# ['#e6f6e1', '#d7efd1', '#c6e9c2', '#abdeb6', '#8bd2bf', '#6bc3c9', '#4bb0d1', '#3192c1', '#1878b4', '#085da0']
# ['#ab162a', '#cf5246', '#eb9172', '#fac8af', '#feefe6', '#f1f1f1', '#d3d3d3', '#ababab', '#7c7c7c', '#484848']
# ['#ab162a', '#cf5246', '#eb9172', '#fac8af', '#faeae1', '#e6eff4', '#bbdaea', '#7bb6d6', '#3c8abe', '#1e61a5']
# ['#19122b', '#17344c', '#185b48', '#3c7632', '#7e7a36', '#bc7967', '#d486af', '#caa9e7', '#c2d2f3', '#d6f0ef']
# ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a']

gray = '#ababab'
blue = '#7bb6d6'
green = '#b2df8a'
light_purple = '#cab2d6'
purple = '#6a3d9a'
red = '#ab162a'
pink = '#cf5246'
orange = '#fdbf6f'
Пример #50
0
COLUMN_TO_HUMAN_READABLE = {
  "n_covid_deaths": "Décès",
  "n_covid_healed": "Sorties de réa",
  "n_covid_transfered": "Transferts (autre réa)",
  "n_covid_refused": "Refus (faute de place)",
  "n_covid_free": "Lits Covid+ libres",
  "n_ncovid_free": "Lits Covid- libres",
  "n_covid_occ": "Lits Covid+ occupés",
  "n_ncovid_occ": "Lits Covid- occupés",
  "flow": "Flux total de patients",
  "pct_deaths": "Pourcentage de décès",
  "pct_healed": "Pourcentage de sorties",
}

COL_COLOR = {
  col: seaborn.color_palette("colorblind",
                             len(BEDCOUNT_COLUMNS) + 1)[i]
  for i, col in enumerate(BEDCOUNT_COLUMNS + ["flow"])
}
COL_COLOR.update({
  "n_covid_deaths": (0, 0, 0),
  "n_covid_healed": (
    0.00784313725490196,
    0.6196078431372549,
    0.45098039215686275,
  ),
  "n_covid_occ": (0.8, 0.47058823529411764, 0.7372549019607844),
  "n_covid_transfered": (
    0.00392156862745098,
    0.45098039215686275,
    0.6980392156862745,
  ),
Пример #51
0
BIG_KEY = round(BOX_SIZE * 1.5)
SMALL_KEY = round(BIG_KEY / 2)
FONT_SIZE = round(18 * SCALE)
MAX_WIDTH = round(200 * SCALE)
MAX_WIDTH2 = round(280 * SCALE)

logger.info("Generating grid chart")
index = sorted([(x, g1.prob_dict[(x, )] / sum(g1.prob_dict.values()))
                for x in LETTERS if (x, ) in g1.prob_dict],
               key=lambda p: p[1],
               reverse=True)
array = [[(y, n / sum(g1.markov_dict[(x, )].values()))
          for y, n in g1.markov_dict[(x, )].most_common()] for x, _ in index]
data = pd.DataFrame(array, index=index)

pone = tmap(RGBA, sns.color_palette("Reds", 8))
ptwo = tmap(RGBA, sns.color_palette("Blues", 8))
color_index = lambda p: 0 if p == 0 else clip(6 + int(log(p, 10) * 2), 0, 6)


def image_fn(pair, palette, row=None, size=BOX_SIZE):
    if pair is None: return None
    bg = palette[color_index(pair[1])]
    img = Image.new("RGBA", (size, size), bg)
    img.place(Image.from_text(pair[0], arial(size // 2), "black", bg=bg),
              copy=False)
    if row is not None and pair[0] != " ":
        if not isinstance(row, str):
            twogram = g2.markov_dict[(index[row][0], pair[0])].most_common()
            row, _ = twogram[0][0], twogram[0][1] / sum(n for _, n in twogram)
        img.place(Image.from_text(row,
Пример #52
0
)

# Loop through operators
for j, op in enumerate(operators):
    # Loop through repressors
    for i, rep in enumerate(repressors):
        # Extract the multipliers for a specific strain
        df_sample = df_maxEnt[
            (df_maxEnt.operator == op) & (df_maxEnt.repressor == rep)
        ]

        # Group multipliers by inducer concentration
        df_group = df_sample.groupby("inducer_uM")

        # Define colors for plot
        colors = sns.color_palette(col_dict[op], n_colors=len(df_group) + 1)

        # Initialize matrix to save probability distributions
        Pp = np.zeros([len(df_group), len(protein_space)])

        # Loop through each of the entries
        for k, (group, data) in enumerate(df_group):
            # Select the Lagrange multipliers
            lagrange_sample = data.loc[
                :, [col for col in data.columns if "lambda" in col]
            ].values[0]

            # Compute distribution from Lagrange multipliers values
            Pp[k, :] = ccutils.maxent.maxEnt_from_lagrange(
                mRNA_space, protein_space, lagrange_sample, exponents=moments
            ).T
Пример #53
0
ds_addr = '/home/amirhossein/Desktop/implement/dataset/fall detection dataset/'
data_addr = 'data/'
options = {'downsample_rate': 4, 'background_sub': False, 'stride': 2}
#uradl = extraction('ur_fall', options , 'ur_fall_ds4__str2') # dataset folder in ds_addr
#uradl.load()
#f,l = uradl.load_layer(1)
#print(f.shape, l)
datasets = [
    'ur_fall', 'ur_adl', 'Office', 'Home_02', 'Home_01', 'Coffee_room_01'
]  # os.listdir(ds_addr)
dataset_list = []
layer = 2
X = []
Y = []
colors = []
cmap1 = sns.color_palette("bright", 2 * len(datasets))[:len(datasets)]
cmap2 = sns.color_palette("bright",
                          2 * len(datasets))[len(datasets):len(datasets) * 2]
for i, ds in enumerate(datasets):
    ds = extraction(ds, options, ds + '_ds4__str2', ds_addr, data_addr)
    #    dataset_list+=[ds]
    ds.load()
    x, y = ds.load_layer(layer)
    colors += [cmap1[i] if yy == 1 else cmap2[i] for yy in y]
    print(x.shape)
    X += [x]
    Y += [y]

# =================
j = 0
#x = [v[j][0] for v in X]+[v[j][0] for v in X2]+[v[j][0] for v in X3]
def analyse_qte_qual(data, nomcaract1, typecaract1, nomcaract2, typecaract2):
    # Représentation
    fig = plt.figure(figsize=(20, 20))
    abcisses = nomcaract1
    ordonnes = str(input('Choisir variable de mesure (qte) :'))
    teinte = nomcaract2
    color_palette_names = [
        'deep', 'muted', 'bright', 'pastel', 'dark', 'colorblind'
    ]
    question = str(input('Voulez-vous afficher les couleurs ? (y/n)'))
    if question == 'y':
        print(color_palette_names)
    else:
        print('affichage des couleurs non demandé')
    choix_couleur = str(input('Choisir couleur du graphique :'))
    if not (choix_couleur):
        choix_couleur = None

    data_plot = sns.barplot(x=abcisses,
                            y=ordonnes,
                            hue=teinte,
                            data=data,
                            palette=sns.color_palette(choix_couleur, 2))
    titre = str(input('Choisissez le titre du graphique'))
    data_plot.set_title(titre)

    ylabel = str(input("Donner le nom de l'axe des ordonnés du graphique :"))
    if not (ylabel):
        data_plot.set_ylabel(nomcaract2)
    else:
        data_plot.set_ylabel(ylabel)

    xlabel = str(input("Donner le nom de l'axe des abcisses du graphique :"))
    if not (xlabel):
        data_plot.set_xlabel(nomcaract1)
    else:
        data_plot.set_xlabel(xlabel)
    plt.show(fig)
    save_image = str(input("Sauvegarder l'image ? (y/n) :"))
    if save_image == 'y':
        image = fig.get_figure()
        chemin = str(input('Indiquer le chemin du dossier'))
        image.savefig('{}/{}'.format(chemin, titre))
    else:
        print("Pas de sauvegarde")

    # Analyse de la corrélation
    x = data[nomcaract1]
    y = data[nomcaract2]

    moyenne_y = y.mean()
    classes = []
    for classe in x.unique():
        yi_classe = y[x == classe]
        classes.append({
            'ni': len(yi_classe),
            'moyenne_classe': yi_classe.mean()
        })
    SCT = sum([(yj - moyenne_y)**2 for yj in y])
    SCE = sum(
        [c['ni'] * (c['moyenne_classe'] - moyenne_y)**2 for c in classes])
    eta_squared = SCE / SCT
    print("Le coefficient de corrélation (eta-squared) est égal à {}".format(
        eta_squared))
    if eta_squared <= 0 and eta_squared > -0.40:
        print(
            'Les variables ne sont pas négativement corrélées car {} est supérieur à -0,40'
            .format(eta_squared))
    elif eta_squared < -0.60:
        print(
            'Les variables sont négativement corrélées car {} est inférieur à -0,60'
            .format(eta_squared))
    elif eta_squared >= 0 and eta_squared < 0.40:
        print(
            'Les variables ne sont pas positivement corrélées car {} est inférieur à 0,40'
            .format(eta_squared))
    elif eta_squared > 0.60:
        print(
            'Les variables sont positivement corrélées car {} est supérieur à 0,60'
            .format(eta_squared))
    else:
        seuil_confiance = float(
            input('Choisir un seuil de confiance 0.1 ou 0.05 :'))
        p_value = round(st.pearsonr(data[nomcaract1], data[nomcaract2])[1], 2)
        if p_value < seuil_confiance:
            print(
                'On retient H1 : Les variables sont corrélées car {} (p-valeur) est inférieure à {} (seuil de confiance)'
                .format(p_value, seuil_confiance))
        else:
            print(
                'On retient H0 : Les variables ne sont pas corrélées car {} (p-valeur) est supérieure à {} (seuil de confiance)'
                .format(p_value, seuil_confiance))
Пример #55
0
class DoubleHeatmapConfig:
    figure: Dict[str, Union[str, tuple]] = default_field({
        "figsize": (8, 8),
        "dpi": 300,
        "n_grid": 10,
    })
    heatmap: Dict[str, Union[int, str, bool, float]] = default_field({
        "orientation":
        "antidiagonal",
        "xticklabels":
        True,
        "yticklabels":
        True,
        "ticks_labelsize":
        8,
        "xticks_labelrotation":
        90,
        "yticks_labelrotation":
        0,
        "linecolor":
        "white",
        "linewidths":
        0.5,
        "square":
        True,
    })
    legend: Dict[str, Union[int, float, str]] = default_field({
        'edgecolor':
        'k',
        'fancybox':
        False,
        'facecolor':
        'w',
        'fontsize':
        10,
        'framealpha':
        1,
        'frameon':
        False,
        'handle_length':
        1,
        'handle_height':
        1.125,
        'title_fontsize':
        12,
    })
    count: Dict[str, Union[int, float, str, bool]] = default_field({
        'boundaries': [1, 5, 10, 15, 20, 50, 200, 500],
        'auto_boundaries': {
            "n": 7,
            "decimals": 0,
            "middle": None,
            "regular": True
        },
        'cmap':
        sns.color_palette("Blues", n_colors=7, as_cmap=True),
        'cbar_fraction':
        0.25,
        'cbar_aspect':
        None,
        'cbar_reverse':
        True,
        'cbar_xy': (0, 0.5),
        'cbar_title':
        "Counts",
        'cbar_title_fontsize':
        12,
        'cbar_title_pad':
        6,
        'cbar_ticks_rotation':
        0,
        'cbar_ticks_length':
        5,
        'cbar_ticks_labelsize':
        8,
        'cbar_ticks_pad':
        4,
    })
    ratio: Dict[str, Union[int, float, str, bool]] = default_field({
        'boundaries': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
        'auto_boundaries': {
            "n": 7,
            "decimals": 0,
            "middle": None,
            "regular": True
        },
        'cmap':
        sns.diverging_palette(50, 200, s=90, l=50, sep=1, as_cmap=True),
        'hide_non_significant':
        True,
        'cbar_fraction':
        0.25,
        'cbar_aspect':
        None,
        'cbar_reverse':
        False,
        'cbar_xy': (0.5, 0.1),
        'cbar_title':
        "Ratios",
        'cbar_title_pad':
        6,
        'cbar_title_fontsize':
        12,
        'cbar_ticks_rotation':
        0,
        'cbar_ticks_length':
        5,
        'cbar_ticks_labelsize':
        8,
        'cbar_ticks_pad':
        4,
    })
    test: Dict[str, Union[int, float, str]] = default_field({
        'pval_level':
        0.05,
        'fwer_level':
        0.05,
        'fdr_level':
        0.1,
        'fwer_size':
        10,
        'fwer_marker':
        '*',
        'fwer_color':
        'black',
        'fdr_size':
        1,
        'fdr_marker':
        's',
        'fdr_color':
        'black',
    })
Пример #56
0
# In[23]:

mlt.subplots(figsize=(10, 6))
sns.countplot(x='season', hue='toss_decision', data=matches)
mlt.show()

# In[24]:

sns.countplot(x='winner', hue='umpire1', data=matches)
mlt.show()

# In[25]:

mlt.subplots(figsize=(10, 6))
ax = matches['toss_winner'].value_counts().plot.bar(width=0.9,
                                                    color=sns.color_palette(
                                                        'RdYlGn', 20))
for p in ax.patches:
    ax.annotate(format(p.get_height()), (p.get_x() + 0.15, p.get_height() + 5))
mlt.show()

# In[26]:

matches_played_byteams = pd.concat([matches['team1'], matches['team2']])
matches_played_byteams = matches_played_byteams.value_counts().reset_index()
matches_played_byteams.columns = ['Team', 'Total Matches']
matches_played_byteams['wins'] = matches['winner'].value_counts().reset_index(
)['winner']
matches_played_byteams.set_index('Team', inplace=True)

trace1 = go.Bar(x=matches_played_byteams.index,
                y=matches_played_byteams['Total Matches'],
def plot_pnet_vs_dense_with_ratio(ax, c, label, plot_ratio=False):
    sns.set_color_codes('muted')
    current_palette = sns.color_palette()
    color = current_palette[3]

    sizes = []
    for i in range(0, 20, 3):
        df_split = pd.read_csv(join(PROSTATE_DATA_PATH, 'splits/training_set_{}.csv'.format(i)), index_col=0)
        sizes.append(df_split.shape[0])
    sizes = np.array(sizes)

    df_dense_sameweights = get_dense_sameweights(c)
    df_pnet = get_pnet_preformance(col=c)
    pvalues = get_stats(df_pnet, df_dense_sameweights)
    print c, zip(pvalues, sizes)
    plot_compaison(ax, label, df_pnet, df_dense_sameweights)
    # ax.legend(ax.legend.text, loc= 'upper left')

    y1 = df_pnet.mean()
    y2 = df_dense_sameweights.mean()
    height = map(max, zip(y1, y2))
    print 'height', height
    updated_values = []
    for i, (p, s) in enumerate(zip(pvalues, sizes)):
        if p >= 0.05:
            displaystring = r'n.s.'
        elif p < 0.0001:
            displaystring = r'***'
        elif p < 0.001:
            displaystring = r'**'
        else:
            displaystring = r'*'
        updated_values.append('{:.0f}\n({})'.format(s, displaystring))
        ax.axvline(x=s, ymin=0, linestyle='--', alpha=0.3)
    ax.set_xscale("log")
    ax.set_xticks([], [])

    ax.xaxis.set_major_formatter(NullFormatter())
    ax.xaxis.set_minor_formatter(NullFormatter())
    ax.tick_params(axis=u'x', which=u'both', length=0)

    ax.set_xticks(sizes)
    ax.set_xticklabels(updated_values)
    ax.set_xlim((min(sizes) - 5, max(sizes) + 50))

    if plot_ratio:
        ax2 = ax.twinx()
        y1 = df_pnet.mean()
        y2 = df_dense_sameweights.mean()
        ratio = (y1.values - y2.values) / y2.values
        new_x = np.linspace(min(sizes), max(sizes), num=np.size(sizes))
        coefs = np.polyfit(sizes, ratio, 3)
        new_line = np.polyval(coefs, new_x)

        ax2.plot(new_x, new_line, '-.', linewidth=0.5, color=color)
        ax2.set_ylim((0.005, .23))
        ax.set_ylim((.5, 1.05))
        ax2.set_ylabel('Performance increase', fontproperties)
        vals = ax2.get_yticks()
        ax2.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
        ax2.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
        ax.set_yticks([], minor=True)
        ax2.spines['right'].set_color(color)
        ax2.yaxis.label.set_color(color)
        ax2.tick_params(axis='y', colors=color)
        ax2.spines['top'].set_visible(False)
        ax2.spines['right'].set_visible(False)
        ax2.spines['left'].set_visible(False)
        ax2.spines['bottom'].set_visible(False)

    ax.set_xlabel('Number of samples', fontproperties)
    size_vals = ax.get_xticks()
    pvalues_dict = {}
    for p, s in zip(pvalues, sizes):
        pvalues_dict[s] = p
    return pvalues_dict
    def get_stimulus_parameters(self):

        # For stimulus with alternate pulses of light
        if self.experiment_type == '2Stimx3':
            stimulus_on_time = [46, 86, 126, 166, 206, 246]
            stimulus_off_time = [65, 105, 145, 185, 225, 265]
            stimulus_train = self.experiment_parameters['light_type'] * (len(stimulus_on_time) / 2)
            color_mat = ['#00FFFF', '#FF0000', '#0000FF', '#FF1493', '#3090C7', '#800000']  # blue-red alternates

        elif self.experiment_type == '1Stimx4New':
            stimulus_on_time = [46, 86, 126, 166]
            stimulus_off_time = [65, 105, 145, 185]
            stimulus_train = self.experiment_parameters['light_type'] * (len(stimulus_on_time))
            color_mat = sns.color_palette(str(self.experiment_parameters['light_type']).strip("'[]'") + 's',
                                          len(stimulus_on_time) + 2)[2:]

        elif self.experiment_type == 'BlueRedx2':
            stimulus_on_time = [43, 83, 123, 163]
            stimulus_off_time = [64, 104, 144, 184]
            stimulus_train = ['Blue', 'Red', 'Blue', 'Red']
            color_mat = ['#00FFFF', '#FF0000', '#0000FF', '#FF1493']  # blue-red alternates

        elif self.experiment_type == 'FarRedBluex3':
            stimulus_on_time = [32, 52, 72, 92, 112, 132]
            stimulus_off_time = [42, 62, 82, 102, 122, 142]
            stimulus_train = ['Red', 'Red', 'Red', 'Blue', 'Blue', 'Blue']
            color_mat = ['#FF0000', '#FF1493', '#800000', '#00FFFF', '#0000FF', '#3090C7']  # blue-red alternates

        elif self.experiment_type == '1color4stim':
            stimulus_on_time = [46, 98, 142, 194]
            stimulus_off_time = [69, 120, 164, 216]
            stimulus_train = self.experiment_parameters['light_type'] * (len(stimulus_on_time))
            color_mat = sns.color_palette(str(self.experiment_parameters['light_type']).strip("'[]'") + 's',
                                          len(stimulus_on_time) + 2)[2:]

        elif self.experiment_type == 'RedBlueHighSpeedLQ':
            stimulus_on_time = [422, 702, 982, 1262, 1542, 1822]
            stimulus_off_time = [574, 854, 1134, 1414, 1694, 2044]
            print 'number of stimulus pulses caluculated is %s' % (len(stimulus_on_time))
            stimulus_train = ['Red', 'Blue', 'Red', 'Blue', 'Red', 'Blue']
            color_mat = sns.color_palette(["salmon", "aqua", "orangered", "dodgerblue", "maroon", "royalblue"])


        # For high speed stimuli with just one type of light
        elif self.experiment_type == 'HighSpeed13fps':
            time_end = self.experiment_parameters['time_end']
            frames_per_sec = self.experiment_parameters['frames_per_sec']
            num_frames_in_20s = frames_per_sec * 20
            frame_num = num_frames_in_20s - frames_per_sec * 2 + 10
            stimulus_off_time = []
            stimulus_on_time = []
            while frame_num < time_end - num_frames_in_20s:
                stimulus_on_time.append(frame_num)
                stimulus_off_time.append(frame_num + num_frames_in_20s)
                frame_num += num_frames_in_20s * 2
            stimulus_on_time[6] += 5
            # stimulus_on_time[5] += 10
            stimulus_off_time[6] += 5
            # stimulus_off_time[5] += 10
            print 'number of stimulus pulses caluculated is %s' % (len(stimulus_on_time))
            stimulus_train = self.experiment_parameters['light_type'] * len(stimulus_on_time)
            color_mat = sns.color_palette(str(self.experiment_parameters['light_type']).strip("'[]'") + 's',
                                          len(stimulus_on_time) + 2)[2:]


        elif self.experiment_type == 'HighSpeed30fps':
            time_end = self.experiment_parameters['time_end']
            frames_per_sec = self.experiment_parameters['frames_per_sec']
            num_frames_in_20s = frames_per_sec * 20
            frame_num = num_frames_in_20s + frames_per_sec / 7 - 25
            stimulus_off_time = []
            stimulus_on_time = []
            while frame_num < time_end - num_frames_in_20s:
                stimulus_on_time.append(frame_num)
                stimulus_off_time.append(frame_num + num_frames_in_20s)
                frame_num += num_frames_in_20s * 2
            stimulus_on_time[5] += 40
            stimulus_on_time[4] += 20
            stimulus_off_time[5] += 40
            stimulus_off_time[4] += 20

            print 'number of stimulus pulses caluculated is %s' % (len(stimulus_on_time))
            stimulus_train = self.experiment_parameters['light_type'] * len(stimulus_on_time)
            color_mat = sns.color_palette(str(self.experiment_parameters['light_type']).strip("'[]'") + 's',
                                          len(stimulus_on_time) + 2)[2:]

        self.print_and_plot_stuff(stimulus_on_time, stimulus_off_time, stimulus_train, color_mat)
        return stimulus_on_time, stimulus_off_time, stimulus_train, color_mat
Пример #59
0
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
sns.scatterplot(x="threshold", y="n_verts", data=res_df, legend=False, ax=ax)
ax.set_title(title)
stashfig(f"threshold-vs-n-verts" + base_save)

knn_df = pd.melt(
    res_df.drop(["Residual F-norm", "n_verts", "Norm. Resid. F-norm"], axis=1),
    id_vars=["threshold"],
    var_name="K",
    value_name="P(Pair w/in KNN)",
)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
sns.lineplot(
    x="threshold",
    y="P(Pair w/in KNN)",
    data=knn_df,
    hue="K",
    palette=sns.color_palette("Reds", knn_df["K"].nunique()),
)
plt.legend(bbox_to_anchor=(1.08, 1), loc=2, borderaxespad=0.0)
ax.set_title(title)
stashfig(f"threshold-vs-knn" + base_save)

# %%
latent_cols = [f"dim {i}" for i in range(latent.shape[1])]
latent_df = pd.DataFrame(data=latent, index=mg.meta.index, columns=latent_cols)
latent_df = pd.concat((mg.meta, latent_df), axis=1)
latent_df.index.name = "Skeleton ID"
out_file = f"maggot_models/notebooks/outs/{FNAME}/latent.csv"
latent_df.to_csv(out_file)
# Youtube Tutorial: https://www.youtube.com/watch?v=QYDp_-TX7C4
import pandapower as pp
import pandapower.plotting as pplt
import pandapower.topology as top
import pandapower.networks as nw
import matplotlib.pyplot as plt
import seaborn as sns

net = nw.mv_oberrhein()
pplt.simple_plot(net)

mg = top.create_nxgraph(net, nogobuses=set(net.trafo.lv_bus.values) | set(net.trafo.hv_bus.values))
colors = sns.color_palette()
collections = list()
sizes = pplt.get_collection_sizes(net)
for area, color in zip(top.connected_components(mg), colors):
    collections.append(pplt.create_bus_collection(net, area, color=color, size=sizes["bus"]))
    line_ind = net.line.loc[:, "from_bus"].isin(area) | net.line.loc[:, "to_bus"].isin(area)
    lines = net.line.loc[line_ind].index
    collections.append(pplt.create_line_collection(net, lines, color=color))
collections.append(pplt.create_ext_grid_collection(net, size=sizes["ext_grid"]))
pplt.draw_collections(collections)
plt.show()