def classify_manual(masks, template):
    """ Opens a GUI that lets you manually classify masks into any of the valid types.

    :param np.array masks: 3-d array of masks (num_masks, image_height, image_width)
    :param np.array template: Image used as background to help with mask classification.
    """
    import matplotlib.pyplot as plt
    import seaborn as sns

    mask_types= []
    plt.ioff()
    for mask in masks:
        ir = mask.sum(axis=1) > 0
        ic = mask.sum(axis=0) > 0

        il, jl = [max(np.min(np.where(i)[0]) - 10, 0) for i in [ir, ic]]
        ih, jh = [min(np.max(np.where(i)[0]) + 10, len(i)) for i in [ir, ic]]
        tmp_mask = np.array(mask[il:ih, jl:jh])

        with sns.axes_style('white'):
            fig, ax = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(10, 3))

        ax[0].imshow(template[il:ih, jl:jh], cmap=plt.cm.get_cmap('gray'))
        ax[1].imshow(template[il:ih, jl:jh], cmap=plt.cm.get_cmap('gray'))
        tmp_mask[tmp_mask == 0] = np.NaN
        ax[1].matshow(tmp_mask, cmap=plt.cm.get_cmap('viridis'), alpha=0.5, zorder=10)
        ax[2].matshow(tmp_mask, cmap=plt.cm.get_cmap('viridis'))
        for a in ax:
            a.set_aspect(1)
            a.axis('off')
        fig.tight_layout()
        fig.canvas.manager.window.wm_geometry("+250+250")
        fig.suptitle('S(o)ma, A(x)on, (D)endrite, (N)europil, (A)rtifact or (U)nknown?')

        def on_button(event):
            if event.key == 'o':
                mask_types.append('soma')
                plt.close(fig)
            elif event.key == 'x':
                mask_types.append('axon')
                plt.close(fig)
            elif event.key == 'd':
                mask_types.append('dendrite')
                plt.close(fig)
            elif event.key == 'n':
                mask_types.append('neuropil')
                plt.close(fig)
            elif event.key == 'a':
                mask_types.append('artifact')
                plt.close(fig)
            elif event.key == 'u':
                mask_types.append('unknown')
                plt.close(fig)

        fig.canvas.mpl_connect('key_press_event', on_button)

        plt.show()
    sns.reset_orig()

    return mask_types
예제 #2
0
def resetPlot():
    """Reset figure style"""
    global fancyPlots
    if 'seaborn' in sys.modules and fancyPlots:
        sns.reset_orig()
        fancyPlots = False
    setFigOutput(figDisplay)
예제 #3
0
    def quit(self, evt=None):
        """Override this to handle pane closing"""

        self.fig.clear()
        plt.close('all')
        self.pf.destroy()
        self.mainwin.destroy()
        import seaborn as sns
        sns.reset_orig()
        return
예제 #4
0
def matrix_and_best_path(mymatrix, path):
    import seaborn as sns
    sns.reset_orig()
    import matplotlib.pyplot as plt
    #sns.set_context('notebook', font_scale=2.5)
    plt.imshow(mymatrix.T, origin='lower', interpolation='nearest')
    plt.title('Optimal path')
    plt.plot(path[0], path[0], 'c-')
    plt.plot(path[0], path[1], 'y')
    plt.plot(path[0], path[1], 'ro')
    plt.xlim(-0.5, mymatrix.shape[0] - 0.5)
    plt.ylim(-0.5, mymatrix.shape[1] - 0.5)
예제 #5
0
파일: notify.py 프로젝트: xibby/pipeline
def temporary_image(array, key):
    import matplotlib
    matplotlib.rcParams['backend'] = 'Agg'
    import matplotlib.pyplot as plt
    import seaborn as sns
    with sns.axes_style('white'):
        plt.matshow(array, cmap='gray')
        plt.axis('off')
    filename = '/tmp/' + key_hash(key) + '.png'

    plt.savefig(filename)
    sns.reset_orig()
    return filename
예제 #6
0
파일: plot.py 프로젝트: ygtfrdes/Program
def set_mpl_style(font_size=12):
    # Old seaborn versions (such as the one in Google Colab)
    # modify the default matplotlib style on import.
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        sns.reset_orig()

    plt.rc('figure', figsize=(18, 2.5))
    plt.rc('font', size=font_size)
    plt.rc('axes', titlesize=font_size)
    plt.rc('text', usetex=False)
    plt.rc('font', family='sans-serif')
    plt.rc('legend', frameon=False)
예제 #7
0
def temporary_image(array, key):
    import matplotlib
    matplotlib.rcParams['backend'] = 'Agg'
    import matplotlib.pyplot as plt
    import seaborn as sns
    with sns.axes_style('white'):
        plt.matshow(array, cmap='gray')
        plt.axis('off')
    filename = '/tmp/' + key_hash(key) + '.png'

    plt.savefig(filename)
    sns.reset_orig()
    return filename
예제 #8
0
    def scatter_epsiode_returns(self,
                                title='Episode vs. Rewards',
                                fig_path=None,
                                fig_name=None,
                                save_fig=True):
        """Scatter plotting the reward returns over episodes.
        
        :param title: String title for figure.
        :param fig_path: File path to save figure to.
        :param fig_name: File name to save figure as.
        :param save_fig: Bool indicating whether to save the figure.
        """

        sns.set()
        sns.set_style("whitegrid")

        plt.figure()

        plt.scatter(range(len(self.episode_rewards)),
                    self.episode_rewards,
                    color='red',
                    lw=2)

        plt.title(title, fontsize=22)
        plt.xlabel('Episodes', fontsize=20)
        plt.ylabel('Cumulative Rewards', fontsize=20)

        plt.tick_params(axis='both', which='major', labelsize=18)
        plt.tick_params(axis='both', which='minor', labelsize=18)
        plt.xlim([0, len(self.episode_rewards)])

        plt.tight_layout()

        if save_fig:
            # Default figure path.
            if fig_path is None:
                fig_path = os.getcwd() + '/images'

            #print(fig_path)
            # Default figure name.
            if fig_name is None:
                #title = title.translate(string.punctuation)
                #fig_name = '_'.join(title.split()) + '.png'
                fig_name = title.replace(' ', '-').lower() + '.png'

            plt.savefig(os.path.join(fig_path, fig_name), bbox_inches='tight')

        sns.reset_orig()

        plt.show()
예제 #9
0
def set_plot_style():

    sns.reset_orig()

    plt.rcParams["figure.figsize"] = (12, 8)
    plt.rcParams["font.size"] = 14
    plt.rcParams["lines.linewidth"] = 2
    plt.rcParams["xtick.labelsize"] = 13
    plt.rcParams["ytick.labelsize"] = 13
    plt.rcParams["axes.labelsize"] = 14
    plt.rcParams["axes.titlesize"] = 14
    plt.rcParams["legend.fontsize"] = 13
    plt.rcParams["axes.spines.top"] = False
    plt.rcParams["axes.spines.right"] = False
def plot_correlation_matrix(data):
    """Plots a correlation matrix of the data set"""
    sns.reset_orig()
    fig = plt.figure(figsize=(15, 15))
    ax = fig.add_subplot(111)
    cax = ax.matshow(data.corr(), cmap=plt.cm.Blues)
    fig.colorbar(cax)

    ax.set_xticklabels(data, rotation=60)
    ax.set_yticklabels(data)
    ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
    ax.yaxis.set_major_locator(ticker.MultipleLocator(1))

    plt.show()
예제 #11
0
def plot_trajectories_individual_line(line, clustering, outpath, state, states, min_sol, max_sol):
	import seaborn as sns
	sns.reset_orig()
	sns.set(context='paper', style='white', palette='muted', font='sans-serif', font_scale=2.0, color_codes=False, rc={"axes.linewidth": 2.0})

	x_list = list()
	y_list = list()
	value_list = list()
	kmax = 0
	
	for c_line in clustering:
		m = eval(c_line['m'])
		x_list.append(m[0])
		y_list.append(m[1])
		kmax = kmax if m[0]+m[1] < kmax else m[0]+m[1] 
		cluster = c_line['cluster']
		cluster_id = "('{}', '{}')".format(state, cluster)
		value = line[cluster_id]
		value = value if value > 0 else 10**(-30)
		value_list.append(value)
	
	#plot
	import matplotlib.cm as cm
	from matplotlib.colors import Normalize, LogNorm
	import numpy as np
	plt.clf()
	c_list = list()
	cmap = cm.magma
	norm = Normalize(vmin=np.log(min_sol), vmax=np.log(max_sol))
	c_list = [cmap(norm(np.log(value))) for value in value_list]
	value_list_log = [np.log(v) if v >0.0 else 10**(-10) for v in value_list]

	ax = plt.gca()
	sc = ax.scatter(x_list, y_list, c=value_list_log, alpha=0.9, linewidths=0.0, cmap=cm.magma, norm=norm)
	ax.set_xlim([-0.5, kmax+1.0])
	ax.set_ylim([-0.5, kmax+1.0])
	ax.set_aspect(1.0)
	plt.xticks(np.arange(0, kmax+1, int(kmax/5)))
	plt.yticks(np.arange(0, kmax+1, int(kmax/5)))
	sns.despine()
	plt.colorbar(sc)
	
	xlabel='Neighbors in state {}'.format(states[0])
	ylabel='Neighbors in state {}'.format(states[1])
	ax.set_xlabel(xlabel,fontsize=18)
	ax.set_ylabel(ylabel,fontsize=18)
	#ax.set(xlabel='Neighbors in state {}'.format(states[0]),)
	plt.title('Distribution for State '+str(state))
	plt.tight_layout()
	plt.savefig(outpath)	
예제 #12
0
def plot_cost(histories, A, c_1, c_2, equilibriums):
    
    sns.set_style('whitegrid', {'font.family':['serif'], 'font.serif':['Times New Roman'], 
                  'grid.color':'.9'})

    fig = plt.figure(1, figsize=(5,5))
    fig.clf()
    ax = fig.add_subplot(111)

    fs1 = 22
    fs2 = 24
    max_ = 100

    for i, history in enumerate(histories):

        if i == 0:
            linestyle = '-'
            equil_color = xkcd['magenta']
            label_e = r'$\pi^{\ast}_S$'
            label_1 = r'$\pi_1^{S}$'
            label_2 = r'$\pi_2^{S}$'
        else:
            linestyle = '--'
            equil_color = xkcd['yellow orange']
            label_e = r'$\pi^{\ast}_N$'
            label_1 = r'$\pi_1^{N}$'
            label_2 = r'$\pi_2^{N}$'

        cost_1 = f1(history[:max_, 0], history[:max_, 1], A, c_1, c_2)
        cost_2 = f2(history[:max_, 0], history[:max_, 1], A, c_1, c_2)
        equil_1 = f1(equilibriums[i][0], equilibriums[i][1], A, c_1, c_2)
        equil_2 = f2(equilibriums[i][0], equilibriums[i][1], A, c_1, c_2)
        print(cost_1[-1], cost_2[-1])

        ax.axhline(equil_1, lw=4, color=equil_color)
        ax.axhline(equil_2, lw=4, color=equil_color, label=label_e)
        ax.plot(cost_1, lw=4, color=xkcd['black'], linestyle=linestyle, label=label_1)
        ax.plot(cost_2, lw=4, color=xkcd['tomato red'], linestyle=linestyle, label=label_2)

    ax.set_xlabel('Iterations', fontsize=fs2)
    ax.set_ylabel('Profit', fontsize=fs2)
    lgd = ax.legend(bbox_to_anchor=(1,1), loc='upper left', fontsize=fs1, fancybox=True, 
                    framealpha=0, ncol=1, handlelength=1.5)

    ax.tick_params(labelsize=fs2)
    plt.savefig(os.path.join(os.getcwd(), 'Figs', 'profit.pdf'), 
                bbox_extra_artists=(lgd,), bbox_inches='tight', dpi=100)

    plt.show()
    sns.reset_orig()
예제 #13
0
def show_audio(audio, text=None, return_array=False):
    sns.reset_orig()
    plt.figure(figsize=(14, 3))
    plt.plot(audio, linewidth=0.08, alpha=0.7)
    if text:
        plt.title(text, fontsize='10')
    plt.ylabel('amplitude')
    plt.xlabel('frames')
    if return_array:
        plt.tight_layout()
        buff = io.BytesIO()
        plt.savefig(buff, format='png')
        plt.close()
        buff.seek(0)
        return np.array(Image.open(buff))
예제 #14
0
def plot3d(df1,df2,x,y,z,zOffset,limit):
    sns.reset_orig() # prevent seaborn from over-riding mplot3d defaults
    fig = plt.figure(figsize = (10, 12))
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(df1.loc[df2 == 0, x][:limit], df1.loc[df2 == 0, y][:limit], -np.log10(df1.loc[df2== 0, z][:limit] + zOffset), c = 'b', marker = '.', s = 1, label = 'genuine')
    ax.scatter(df1.loc[df2== 1, x][:limit], df1.loc[df2 == 1, y][:limit],  -np.log10(df1.loc[df2 == 1, z][:limit] + zOffset), c = 'y', marker = '.', s = 1, label = 'fraudulent')
    ax.set_xlabel(x, size = 16); 
    ax.set_ylabel(y + ' [hour]', size = 16); 
    ax.set_zlabel('- log$_{10}$ (' + z + ')', size = 16)
    ax.set_title('Error-based features separate out genuine and fraudulent transactions', size = 20)
    plt.axis('tight')
    ax.grid(1)
    noFraudMarker = mlines.Line2D([], [], linewidth = 0, color='b', marker='.',markersize = 10, label='genuine')
    fraudMarker = mlines.Line2D([], [], linewidth = 0, color='y', marker='.',markersize = 10, label='fraudulent')
    plt.legend(handles = [noFraudMarker, fraudMarker],bbox_to_anchor = (1.20, 0.38 ), frameon = False, prop={'size': 16});
예제 #15
0
def show_spectrogram(spec, text=None, return_array=False):
    sns.reset_orig()
    plt.figure(figsize=(14, 6))
    plt.imshow(spec)
    if text:
        plt.title(text, fontsize='10')
    plt.colorbar(shrink=0.5, orientation='horizontal')
    plt.ylabel('mels')
    plt.xlabel('frames')
    if return_array:
        plt.tight_layout()
        buff = io.BytesIO()
        plt.savefig(buff, format='png')
        plt.close()
        buff.seek(0)
        return np.array(Image.open(buff))
예제 #16
0
    def show_spectrogram(self):
        if not os.path.exists(path):
            os.makedirs(path)
        sns.reset_orig()
        plt.figure(figsize=(14, 6))
        plt.imshow(spec)
        if text:
            plt.title(text, fontsize='10')
        plt.colorbar(shrink=0.5, orientation='horizontal')
        plt.ylabel('mels')
        plt.xlabel('frames')
        title = text.replace(" @@", "").replace("@@ ", "")

        fname = os.path.join(path, title + ".pdf")
        plt.savefig(fname)
        plt.close()
예제 #17
0
def nube_palabras(textos_completos, archivo_imagen=''):
    """
            La idea de esta función es darle los datos, un rango de fechas, y que 
            nos devuelva la nube de palabras asociada a la discusión durante esas fechas.
            """

    # Incorporamos todos los textos de los tweets a textos

    textos = []
    for t in textos_completos:
        textos.append(
            re.sub(r'https?:\/\/\S*', '', t, flags=re.MULTILINE).lower())

    es_stop = nltk.corpus.stopwords.words('spanish')

    #Filtramos las stopwords y sacamos los .,' y tildes

    textos = ''.join(textos).replace(',',
                                     ' ').replace('.',
                                                  ' ').replace("'",
                                                               ' ').split(' ')

    textos_filtrado = list(filter(lambda x: x not in es_stop, textos))
    textos = ' '.join(textos_filtrado)
    textos = saca_tildes(textos)

    # Armamos la wordcloud
    wc = WordCloud(width=1600,
                   height=800,
                   background_color="white",
                   contour_width=3,
                   contour_color='steelblue',
                   max_words=100,
                   collocations=False).generate_from_text(textos)
    sbn.set_context("paper", font_scale=2)

    plt.figure(figsize=(10, 8), dpi=100)
    plt.title('Nube de Palabras', fontsize=20)
    plt.imshow(wc, interpolation='bilinear')
    plt.axis("off")
    if archivo_imagen == "":
        plt.show()
    else:
        plt.savefig(archivo_imagen, bbox_inches='tight')
        plt.show()
    plt.clf()
    sbn.reset_orig()
예제 #18
0
파일: d_analyze.py 프로젝트: zjyzjjzmt/drep
def _make_scoring_plot(db, bars, **kwargs):
    '''
    Used by winner plot

    db is the database to plot- must contain 'genome' and all columns listed in 'bars'
    bars is all of the columns in the database to become bars
    for taxonomy, put genome2taxonomy in kwargs
    '''
    sns.reset_orig()

    # Make the normalized bar plot
    nd = normalize(db)
    d = pd.melt(nd, id_vars=['genome'], value_vars=bars)
    g = sns.barplot(data=d, y='genome', x='value', hue='variable')

    # Get a list of the un-normalized values
    x = pd.melt(db, id_vars=['genome'], value_vars=bars)
    vals = []
    for variable in x['variable'].unique():
        vals += [v for v in x['value'][x['variable'] == variable].tolist()]

    # Add un-normalized values to barplots
    i = 0
    for p in g.patches:
        g.annotate("{0:.1f}".format(vals[i]),
                   (p.get_width(), p.get_y() + (p.get_height() / 1.1)),
                   fontsize=8)
        i += 1

    # Add taxonomy if available
    axes = plt.gca()
    labels = [item.get_text() for item in axes.get_yticklabels()]
    if kwargs.get('genome2taxonomy', False) != False:
        g2t = kwargs.get('genome2taxonomy')
        for i, label in enumerate(labels):
            labels[i] = "{0}\n{1}".format(label, g2t[label.replace(' *', '')])
        axes.set_yticklabels(labels)

    # Adjust labels
    plt.xlabel('Normalized Score')
    plt.legend(loc='lower right')
    plt.tick_params(axis='both', which='major', labelsize=8)

    # Adjust figure size
    fig = plt.gcf()
    fig.set_size_inches(12, _x_fig_size(len(labels), factor=1))
    plt.subplots_adjust(left=0.5)
예제 #19
0
def generate_plots(data):
    fig, ax = plt.subplots(figsize=(8, 6))

    sns.reset_orig()
    sns.swarmplot(data=data, x='algo', y='time', ax=ax)

    ax.set_title("Combined speed test")
    ax.set_xlabel("Algorithm")
    ax.set_ylabel("Time taken / ms")
    ax.set_yscale('log')
    ax.grid(which='major', axis='y')
    ax.grid(which='minor', axis='y', linestyle=':', linewidth=0.5)

    path = join("results", "combinedspeed.pdf")
    ensure_path(path)
    fig.savefig(path)
    return fig
예제 #20
0
    def feature_attribution_plot(self,
                                 data=None,
                                 target_metric=None,
                                 plot_features=None):
        sns.reset_orig()
        _check_df(data)

        metric_values = data[target_metric].values
        for feature in plot_features:
            f1, (ax1) = plt.subplots(1, sharex=True, sharey=True)
            feature_values = data[feature].values
            ax1.plot(feature_values, metric_values, 'rx')
            ax1.legend()
            x_label = feature
            y_label = target_metric
            _set_x_y_label(ax1, x_label, y_label)
            plt.show()
예제 #21
0
    def plot_principales_Hashtags(self,
                                  archivo_imagen='',
                                  fecha_inicial='',
                                  fecha_final='',
                                  cantidad=20):
        """
        Gráfico de los principales Hashtags utilizados en todos los tweets.
        Se pueden usar los parámetros fecha_inicial y fecha_final para filtrar por fechas. Si no se completa se utilizarán todos los tweets realizados entre los tiempos de levantados (esto no incluye tweets más viejos que hayan sido retwiteados y citados)

        """

        if fecha_final == '':
            fecha_final = max(self.tweets['tw_created_at'])
        else:
            fecha_final = pd.to_datetime(fecha_final).tz_localize('UTC')
        if fecha_inicial == '':
            fecha_inicial = min(self.tweets['tw_created_at'])
        else:
            fecha_inicial = pd.to_datetime(fecha_inicial).tz_localize('UTC')

        d = self.tweets[
            (self.tweets.tw_created_at.between(fecha_inicial, fecha_final)) |
            ((self.tweets.relacion_nuevo_original == 'Original') &
             (self.tweets.or_created_at.between(fecha_inicial, fecha_final))
             )].copy()

        datos = pd.DataFrame(
            data={'Hashtags': ' '.join(d.tw_hashtags.dropna().values).split(
            )})['Hashtags'].value_counts().sort_values(ascending=True)[-20:]
        sbn.set_context("paper", font_scale=2)
        fig, ax = plt.subplots(figsize=(11, 8))
        datos.plot(kind='barh', ax=ax)
        ax.barh(y=range(len(datos)),
                width=datos.values,
                color=plt.get_cmap('Set2').colors,
                tick_label=datos.keys())
        ax.grid(linestyle='dashed')
        ax.set_xlabel('Cantidad de Apariciones')
        ax.set_title('Hashtags Principales')
        if archivo_imagen == '':
            plt.show()
        else:
            plt.savefig(archivo_imagen, bbox_inches='tight')
            plt.show()
        sbn.reset_orig()
예제 #22
0
    def plot_heatmap(self, dataframe_list):
        '''
        creates a seaborn heatmap with the provided dataframe
        :param dataframe_list: a list with pandas dataframe including the heatmap data
        :return: -
        '''
        # fig = plt.figure(1, figsize=(self.directories.__len__() / 20.0, self.directories.__len__() / 90.0)) # stupid
        # fig = plt.figure(1, figsize=(dataframe.columns.__len__() / 3.0, dataframe.columns.__len__() / 7.0)) # more line output

        for n in xrange(1, dataframe_list.__len__()):
            x_width = (dataframe_list[n].columns.__len__() /
                       3.0) if (dataframe_list[n].columns.__len__() /
                                3.0) > 7.0 else 7.0
            y_height = (dataframe_list[n].columns.__len__() /
                        7.0) if (dataframe_list[n].columns.__len__() /
                                 7.0) > 3.0 else 3.0
            fig = plt.figure(n, figsize=(x_width, y_height))
            # fig = plt.figure(1, figsize=(7.0, 3.0)) # one line output
            # fig = plt.figure(1, figsize=(200.0, 50.0))

            # setup seaborn for grey output as np.NaN values and no lines indicating the row and columns
            sns.set()  # setup seaborn

            # create heatmap
            ax = sns.heatmap(dataframe_list[n],
                             linewidths=.3,
                             cbar=False,
                             cmap=mpl.colors.ListedColormap(
                                 ['red', 'yellow', 'green']),
                             square=True,
                             annot=False,
                             vmax=1.0,
                             vmin=0.0)
            plt.xticks(rotation=90)
            # bugfix for the 'bbox_inches=tight' layout, otherwise the label will be cut away
            plt.title('$\quad$', fontsize=60)
            # plt.title('$\quad$', fontsize=35)
            plt.xlabel('Testcase number', fontsize=15)
            plt.ylabel('Testcase', fontsize=15)
            self.plot_rectangle(figure=fig, axis=ax)
            plt.savefig(self.pth + 'Heatmap_Threshold_' + str(n) + '.pdf',
                        bbox_inches='tight')
            fig.clf()
            sns.reset_orig()
예제 #23
0
 def rocCurve(self):
     """
     plot multi-class roc curve
     :return: None
     """
     # shuffle and split training and test sets
     clf = self.reducedClf if self.classifier == "rf" else self.clf
     OvrClf = OneVsRestClassifier(clf)
     classes = list(range(9, 19)) if not self.RelE else list(range(1, 9))
     self.y = label_binarize(self.y, classes=classes)
     nClasses = self.y.shape[1]
     X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=.5, random_state=0)
     if self.classifier == "rf":
         y_score = OvrClf.fit(X_train, y_train).predict_proba(X_test)
     else:
         y_score = OvrClf.fit(X_train, y_train).decision_function(X_test)
     # Compute ROC curve and ROC area for each class
     fpr, tpr, roc_auc = {}, {}, {}
     for i in range(nClasses):
         fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
         roc_auc[i] = auc(fpr[i], tpr[i])
     # Compute micro-average ROC curve and ROC area
     fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
     roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
     # Plot ROC curve
     sns.reset_orig()
     plt.clf()
     plt.figure()
     plt.plot(fpr["micro"], tpr["micro"], '--', linewidth=3, label='micro-average (area = {0:0.2f})'
              ''.format(roc_auc["micro"]))
     for i in range(nClasses):
         pos = classes[i]
         plt.plot(fpr[i], tpr[i], label='A-site @ {0} (area = {1:0.2f})'
                  ''.format(pos, roc_auc[i]))
     #
     plt.plot([0, 1], [0, 1], 'k--')
     plt.xlim([0.0, 1.0])
     plt.ylim([0.0, 1.05])
     plt.xlabel('False Positive Rate', fontsize=18)
     plt.ylabel('True Positive Rate', fontsize=18)
     plt.tick_params(axis='both', which='major', labelsize=18)
     plt.legend(loc="lower right", fontsize=12)
     plt.gcf()
     plt.savefig(self.output + "/" + "asite_roc.pdf")
예제 #24
0
def exp_variance_plots(input_data, n_plots, n_setbars):
    import seaborn as sns
    from sklearn.decomposition import PCA
    sns.reset_orig()

    if (n_plots * n_setbars) != len(input_data.keys()):
        raise Exception(
            "Number of plots x Number of Bar subplots needs to equal number of pca dimensions"
        )

    pca = PCA(n_components=input_data.shape[1])
    pca.fit(input_data)

    # PCA components
    dimensions = dimensions = [
        'Dimension {}'.format(i) for i in range(1,
                                                len(pca.components_) + 1)
    ]
    components = pd.DataFrame(np.round(pca.components_, 4),
                              columns=input_data.keys())
    components.index = dimensions

    # PCA explained variance ratios
    bare_ratios = pca.explained_variance_ratio_

    for i in range(n_plots):

        fig, ax = plt.subplots(figsize=(14, 8))
        sc = components.loc[components.index[n_setbars * i:n_setbars *
                                             (i + 1)]]
        vr = bare_ratios[n_setbars * i:n_setbars * (i + 1)]
        sc.plot(ax=ax, kind='bar')
        ax.set_ylabel("Feature Weights")
        ax.set_xticklabels(sc.index, rotation=0)

        for j, ev in enumerate(vr):
            ax.text(j - 0.40,
                    ax.get_ylim()[1] + 0.05,
                    "Explained Variance\n %.4f" % (ev))

        plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)

        plt.show()
예제 #25
0
def setFigStyle(): #fancyPlots=False): # Merge this with setFigOutput
    """Set figure style"""
    global fancyPlots
    global colours
    if fancyPlots:
        try:
            import seaborn as sns
            #cp = sns.color_palette()
            colours = sns.color_palette()
        except ImportError:
            warnings.warn('Seaborn not found - using default plotting scheme.')
        else: # Try another package?
            pass
        finally: # Always do this last
            fancyPlots = True
    else:
        if 'seaborn' in sys.modules:
            sns.reset_orig()
        fancyPlots = False
예제 #26
0
def fals_1Dplt(d_var_pcsc, d_obs_pcsc, pc_num):
    pc_name = []
    for i in range(pc_num):
        pc_name.append('PC' + str(i + 1))

    d_scores = pd.DataFrame(d_var_pcsc[:, :pc_num], columns=pc_name)
    dob_scores = pd.DataFrame(d_obs_pcsc[:, :pc_num], columns=pc_name)
    plt.figure(figsize=(8, 4))

    sns.set(font_scale=1.4)
    sns.violinplot(data=d_scores, inner=None, color=".8", width=1)
    sns.stripplot(data=d_scores, jitter=True, size=4, linewidth=1)
    sns.stripplot(data=dob_scores,
                  color='red',
                  size=9,
                  linewidth=1,
                  marker="D",
                  edgecolor='y')
    sns.reset_orig()
예제 #27
0
def setFigStyle():  #fancyPlots=False): # Merge this with setFigOutput
    """Set figure style"""
    global fancyPlots
    global colours
    if fancyPlots:
        try:
            import seaborn as sns
            #cp = sns.color_palette()
            colours = sns.color_palette()
        except ImportError:
            warnings.warn('Seaborn not found - using default plotting scheme.')
        else:  # Try another package?
            pass
        finally:  # Always do this last
            fancyPlots = True
    else:
        if 'seaborn' in sys.modules:
            sns.reset_orig()
        fancyPlots = False
예제 #28
0
파일: plotter.py 프로젝트: dianagudu/ca-as
    def boxplot_random(data, algos, outfile):
        # reset seaborn settings
        sns.reset_orig()
        Plotter.__set_rc_params()
        _, ax1 = plt.subplots(figsize=(8, 5))
        plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
        bp = plt.boxplot(data, notch=1, vert=False, whis=[5, 95],
                         bootstrap=10, showmeans=True, showfliers=True)
        plt.setp(bp['boxes'], color='black')
        plt.setp(bp['whiskers'], color='black')
        plt.setp(bp['fliers'], color='grey',
                 marker='.', mew=0.5, mec='grey', markersize=3.5)
        plt.setp(bp['means'], color='red', marker='*', mec='red', mfc='red')
        plt.setp(bp['medians'], color='blue')
        ax1.xaxis.grid(
            True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
        ax1.yaxis.grid(
            True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
        # hide grid behind plot objects
        ax1.set_axisbelow(True)
        ytickNames = plt.setp(ax1, yticklabels=algos)
        ax1.set_xlim(-100, 100)
        plt.xlabel("difference to mean welfare (%)")
        # finally, add a basic legend
        plt.figtext(
            0.795, 0.13, '-', color='blue', weight='roman', size='medium')
        plt.figtext(0.815, 0.132, ' median value',
                    color='black', weight='roman', size='small')
        plt.figtext(
            0.795, 0.098, '*', color='red', weight='roman', size='medium')
        plt.figtext(0.815, 0.11, ' average value',
                    color='black', weight='roman', size='small')
        plt.figtext(0.7965, 0.085, 'o',
                    color='grey', weight='roman', size='small')
        plt.figtext(0.815, 0.085, ' outliers',
                    color='black', weight='roman', size='small')
        plt.savefig(outfile, bbox_inches='tight', dpi=300)

        for i in range(len(algos)):
            print(ytickNames[i], bp['boxes'][i].get_xdata())
        for ws in bp['whiskers']:
            print(ws.get_xdata())
예제 #29
0
def regplot(x, y, axes=None, rtrnum=1, **kwargs):
    '''
    rz's wrapper for the seaborn.regplot function, we add the output for the regression result.
    We return regression result, like p values
    To obtain the objects of lines, shading, scatters, please use axes.get_children() function

    Scatters are PathCollection objects
    Lines are Lines objects
    Shading are PolyCollection objects

    '''
    import seaborn as sns
    import matplotlib.pyplot as plt
    import numpy as np
    from scipy import stats

    sns.reset_orig() #

    if axes is None:
        axes = plt.gca()
    # do it
    p = sns.regplot(x=x, y=y, ax=axes, **kwargs)

    # figure out the statistical model
    if 'order' in kwargs:
        order = kwargs['order']
    else:
        order = 1
    
    # deal with Nan value
    nanind = np.isnan(x) | np.isnan(y)

    if order > 1:
        regressresult = np.polyfit(x=x[~nanind], y=y[~nanind], deg=order)
    else:
        regressresult = stats.linregress(x=x[~nanind], y=y[~nanind])

    if rtrnum == 1:
        return regressresult
    elif rtrnum == 2:
        return regressresult, p
    # get the predict data point
예제 #30
0
def plot_clustering_1d(model, cluster_to_elems):
	return
	plt.clf()
	import seaborn as sns
	clusterlist = [-1] * (model['network']['kmax']+1)

	for i, (cluster, elems) in enumerate(cluster_to_elems.items()):
		for elem in elems:
			if np.sum(elem[1:]) == 0:
				clusterlist[elem[0]] = i

	clusters = [clusterlist]
	ax = sns.heatmap(clusters, cbar=False, yticklabels=False, xticklabels=True, annot=True, fmt="d")
	ax.set_aspect(1.7)
	plt.tight_layout()
	plt.title('Degree Clustering')
	plt.tight_layout()
	plt.savefig(model['output_path'].replace('ame_', 'clustering1D_').replace('.py', '.pdf'))
	sns.reset_orig()
	matplotlib.rcParams.update(matplotlib.rcParamsDefault)
예제 #31
0
def plot2d_gamma(regr):
    """Only works if there are two parameters being varied."""
    
    a = regr.cv_results_['mean_test_score']
    b = regr.cv_results_['params']
    c = np.vstack((b, a)).T  
    
    sns.reset_orig()  # get default matplotlib styles back
    clrs = sns.color_palette('husl', n_colors=len(c_list))  # a list of RGB tuples
    
    fig, ax = plt.subplots()
    length = len(gamma_list)
    for num, C_value in enumerate(c_list):
        lines=ax.plot(gamma_list, c[:,1][length*(num):length*(num+1)], '.-', label='C={:.4f}'.format(C_value))
        lines[0].set_color(clrs[num])
        ax.legend(title='C values')
        ax.set_ylim([0.75,None])
        ax.set_xlabel("Gamma")
        ax.set_ylabel("Average 5-fold cross-validated R^2 value")
    plt.show()
예제 #32
0
    def plot_tipo_tweet(self, archivo_imagen=''):
        """
        Esta función toma como entrada el archivo de datos preprocesados y
        devuelve una imagen tipo diagrama de Venn con el tipo de tweets pescados
        """
        #Levantar los datos preprocesados del archivo

        originales = set(
            self.tweets.or_id.values)  #Conjunto de tweets originales
        rt = set(self.tweets[self.tweets.relacion_nuevo_original ==
                             'RT'].tw_id.values)  #Conjunto de retweets
        citas = set(self.tweets[self.tweets.relacion_nuevo_original ==
                                'QT'].tw_id.values)  #Conjunto de citas
        total_tweets = len(
            originales.union(rt).union(citas))  #Cantidad total de tweets

        # Realizar la figura
        labels = ['RT', 'Originales', 'QT']
        sizes = [
            100 * len(rt) / total_tweets, 100 * len(originales) / total_tweets,
            100 * len(citas) / total_tweets
        ]

        sbn.set_context("paper", font_scale=1.5)

        plt.figure(figsize=(11, 8), dpi=300)
        plt.title('Tipos de Tweets', fontsize=20)
        plt.pie(sizes, autopct='%1.1f%%')
        plt.legend(labels)
        plt.axis('equal')
        plt.text(
            -.1, -1.2,
            'El total de tweets registrados durante el período fue de : {}'.
            format(total_tweets))
        if archivo_imagen == '':
            plt.show()
        else:
            plt.savefig(archivo_imagen, bbox_inches='tight')
            plt.show()
        plt.clf()
        sbn.reset_orig()
예제 #33
0
def plot_smooth_transforms(bb, aa, w_lo=1e-1, w_hi=1e4, **kwargs):
    import matplotlib.pyplot as pp
    import seaborn as sns
    # Fix until MPL or seaborn gets straightened out
    import warnings
    with warnings.catch_warnings():
        import matplotlib as mpl
        warnings.simplefilter('ignore', mpl.cbook.MatplotlibDeprecationWarning)
        sns.reset_orig()
    bb_s, aa_s = smooth_transfer_functions(bb, aa, **kwargs)

    g = bb.shape[:2]
    fr = np.logspace(np.log10(w_lo), np.log10(w_hi), 200)
    om = fr * 2 * np.pi
    with sns.plotting_context('notebook'), sns.axes_style('whitegrid'):
        f, axs = pp.subplots(*g, sharex=True, sharey=True)
        for i, j in itertools.product(range(g[0]), range(g[1])):
            _, h1 = signal.freqs(bb[i, j], aa[i, j], worN=om)
            _, h2 = signal.freqs(bb_s[i, j], aa_s[i, j], worN=om)
            axs[i, j].loglog(fr, np.c_[np.abs(h1), np.abs(h2)])
    pp.show()
예제 #34
0
 def confusion_matrix(self):
     s = self.scale
     plt.figure(2, figsize=(5 * s + 2, 5 * s + 2))
     result = confusion_matrix(self.y_true, self.y_pred)
     df_cm = pd.DataFrame(
         result,
         ["Bad", "Good"],
         ["Bad", "Good"],
     )
     sn.set(font_scale=2)  # for label size
     sn.heatmap(df_cm,
                annot=True,
                annot_kws={"size": 30},
                fmt='g',
                cbar=False,
                cmap='Blues')  # font size
     plt.xlabel("Algorithm Classification")
     plt.ylabel("Human Classification")
     self.save_data_fun('confusion_matrix.svg')
     plt.show()
     sn.reset_orig()
예제 #35
0
def plot_residual_violin(E_x, E_CNN, E_EXO, name_x, name_CNN, name_EXO, fOUT):
    import seaborn as sns
    import pandas as pd
    sns.set_style("whitegrid")
    dE_CNN = E_CNN - E_x
    dE_EXO = E_EXO - E_x
    bin_edges = [x for x in range(0, 4250, 250)]
    bin_width = int((bin_edges[1] - bin_edges[0]) / 2.0)
    data_dic = {'energy': [], 'residual': [], 'type': []}
    for i in range(len(E_x)):
        bin_CNN = np.digitize(E_x[i], bin_edges) - 1
        data_dic['energy'].append(bin_edges[bin_CNN] + bin_width)
        data_dic['residual'].append(dE_CNN[i])
        data_dic['type'].append(name_CNN)
        data_dic['energy'].append(bin_edges[bin_CNN] + bin_width)
        data_dic['residual'].append(dE_EXO[i])
        data_dic['type'].append(name_EXO)
    data = pd.DataFrame.from_dict(data_dic)
    fig, ax = plt.subplots()
    ax.axhline(y=0.0, lw=2, color='k')
    sns.violinplot(x='energy',
                   y='residual',
                   hue='type',
                   data=data,
                   inner="quartile",
                   palette='Set2',
                   split=True,
                   cut=0,
                   scale='area',
                   scale_hue=True,
                   bw=0.4)
    ax.set_ylim(-150, 150)
    ax.set_xlabel('%s Energy [keV]' % (name_x))
    ax.set_ylabel('Residual ( xxx - %s ) [keV]' % (name_x))
    fig.savefig(fOUT, bbox_inches='tight')
    plt.clf()
    plt.close()
    sns.reset_orig()
    return
import numpy as np
import pandas as pd
import cPickle as pickle
import json
import sys
import os
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pdb
import h5py
import importlib

sns.reset_orig()

plt.rcParams['figure.figsize'] = (6.0, 5.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'Blues'

# sns.set_style("ticks")
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2

def plot_multiple_likelhood_values(likelihood_arr, time_axis=0, 
                                   x=None, save_path='',
                                   title='', xlabel='', ylabel='',
                                   colors=['red', 'green', 'blue'],
                                   labels=['red', 'green', 'blue'],
                                   linestyle=['-', '-', '-', '-'],
def classify_manual_extended(masks,template1,template2,template3,template4,template5,traces1,traces2,movie,threshold=80,window=3):
    """ Opens a GUI that lets you manually classify masks into any of the valid types.

    :param np.array masks: 3-d array of masks (num_masks, image_height, image_width)
    :param np.array template1: Image used as background to help with mask classification.
    :param np.array template2: Image used as background to help with mask classification.
    :param np.array template3: Image used as background to help with mask classification.
    :param np.array template4: Image used as background to help with mask classification.
    :param np.array template5: Series of 7 images used as background to help with mask classification.
    :param np.array traces1: 2-d array of mask activity plotted and used to highlight high activity frames (num_masks,num_frames)
    :param np.array traces2: 2-d array of mask activity, plotted (num_masks,num_frames)
    :param np.array movie: 3-d array of motion corrected imaging frames (image_height, image_width, num_frames)
    :param float threshold: percentile between 0 and 100 used to plot inner versus outer mask
    :param int window: odd number indicating width of window used in median filter of trace 1 searching for high activity frames
    """

    import matplotlib.pyplot as plt
    import matplotlib.cm as cm
    import seaborn as sns
    from scipy import signal

    mask_types= []
    plt.ioff()

    for mask, trace1, trace2 in zip(masks, traces1, traces2):
        with sns.axes_style('white'):
            fig, axes = plt.subplots(4, 7, figsize(30, 20))

        ir = mask.sum(axis=1) > 0
        ic = mask.sum(axis=1) > 0

        il, jl = [max(np.min(np.where(i)[0]) - 10, 0) for i in [ir, ic]]
        ih, jh = [min(np.max(np.where(i)[0]) + 10, len(i)) for i in [ir, ic]]
        plot_mask = np.array(mask[il:ih, jl:jh])

        for ax,template in zip(axes[0][:6], [plot_mask, template1, template2-template1,
                                             template1, template4, template3, template3*template1]):
            ax.matshow(template[il:ih, jl:jh], cmap=cm.get_cmap('gray'))
            ax.contour(plot_mask, np.percentile(mask[mask>0], threshold), linewidths=0.8, colors='w')
            ax.contour(plot_mask, [0.01], linewidths=0.8, colors='w')
            ax.set_aspect(1)
            ax.axis('off')

        for ax,template in zip(axes[1], template5):
            ax.matshow(template[il:ih, jl:jh], cmap=cm.get_cmap('gray'))
            ax.contour(plot_mask, np.percentile(mask[mask > 0], threshold), linewidths=0.8, colors='w')
            ax.contour(plot_mask, [0.01], linewidths=0.8, colors='w')
            ax.set_aspect(1)
            ax.axis('off')

        filt_trace = signal.medfilt(trace1, window)
        idx = detect_peaks(filt_trace, mpd=len(trace1)/window)
        centers = np.flip(sorted(np.stack([idx, filt_trace[idx]]).T, key=lambda x: x[1]))[:7]
        for ax,center in zip(axes[3], sorted(centers, key=lambda x: x[0])[:][0]):
            frame = np.max(movie[0][:, :, int(center-window/2):int(center+window/2+.5)])
            ax.matshow(frame[il:ih, jl:jh], cmap=cm.get_cmap('gray'))
            ax.contour(plot_mask, np.percentile(mask[mask > 0], threshold), linewidths=0.8, colors='w')
            ax.contour(plot_mask, [0.01], linewidths=0.8, colors='w')
            ax.set_aspect(1)
            ax.axis('off')

        trace1_ax = plt.subplot(8, 1, 5)
        trace1_ax.plot(trace1)
        trace1_ax.plot(centers, trace1[[int(center) for center in centers]], 'or')

        trace2_ax = plt.subplot(8, 1, 6)
        trace2_ax.plot(trace2)

        fig.tight_layout()
        fig.canvas.manager.window.wm_geometry("+250+250")
        fig.suptitle('S(o)ma, A(x)on, (D)endrite, (N)europil, (A)rtifact or (U)nknown?')

        def on_button(event):
            if event.key == 'o':
                mask_types.append('soma')
                plt.close(fig)
            elif event.key == 'x':
                mask_types.append('axon')
                plt.close(fig)
            elif event.key == 'd':
                mask_types.append('dendrite')
                plt.close(fig)
            elif event.key == 'n':
                mask_types.append('neuropil')
                plt.close(fig)
            elif event.key == 'a':
                mask_types.append('artifact')
                plt.close(fig)
            elif event.key == 'u':
                mask_types.append('unknown')
                plt.close(fig)

        fig.canvas.mpl_connect('key_press_event', on_button)

        plt.show()

    sns.reset_orig()

    return mask_types