Esempio n. 1
0
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
    """Helper function to plot a gallery of portraits"""
    pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
    pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
    for i in range(n_row * n_col):
        pl.subplot(n_row, n_col, i + 1)
        pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
        pl.title(titles[i], size=12)
        pl.xticks(())
        pl.yticks(())
Esempio n. 2
0
def corner_ele_plot(target, lines, file):

	sns.set_style("darkgrid")

	for line in lines:
		print 'plotting results for', line
		data = pd.read_pickle(file)
		plt.figure()

		data_f = {}

		for key in data:

			if '_'+line+'_' in key and not 'adaptive' in key and not line == 'HI' and \
				not line in ['H2J0', 'H2J1', 'H2J2', 'H2J3', 'H2J4', 'H2J5', 'H2J6', 'H2J7']:

				data_f[key] = data[key][0]

			elif line == 'HI' and line in key and not 'adaptive' in key:

				data_f[key] = data[key][0]

			elif line in ['H2J0', 'H2J1', 'H2J2', 'H2J3', 'H2J4', 'H2J5', 'H2J6', 'H2J7'] and \
				line in key and not 'adaptive' in key:

				data_f[key] = data[key][0]

			elif line in ['H2J0', 'H2J1', 'H2J2', 'H2J3', 'H2J4', 'H2J5', 'H2J6', 'H2J7'] and \
				key.startswith('b_H2') \
				and not 'adaptive' in key: # key.startswith(('b_H2', 'dz_H2'))

				data_f[key] = data[key][0]


		df = pd.DataFrame(data_f)
		g = sns.pairplot(df, diag_kind="kde")

		g = sns.PairGrid(df) 
		g.map_upper(sns.kdeplot, cmap="bone_r",n_levels=10,shade=True,
			shade_lowest=False)
		g.map_lower(sns.kdeplot, cmap="bone_r",n_levels=10,shade=True,
			shade_lowest=False)
		g.map_diag(plt.hist, alpha=0.8)

		plt.subplots_adjust(hspace=0.1, wspace=0.1)

		#g.axes[0,0].set_ylabel(r"$\mathrm{log} N\, ({\rm cm}^{-2})$")
		#g.axes[1,0].set_ylabel(r"$b\, {\rm (km/s)}$")
		#g.axes[2,0].set_ylabel(r"$v_0\, {\rm (km/s)}$")

		#g.axes[2,0].set_xlabel(r"$\mathrm{log} N\, ({\rm cm}^{-2})$")
		#g.axes[2,1].set_xlabel(r"$b\, {\rm (km/s)}$")
		#g.axes[2,2].set_xlabel(r"$v_0\, {\rm (km/s)}$")

		g.savefig(target[0:10]+"_"+str(line)+"_"+"corner.pdf")
Esempio n. 3
0
 def PlotAllChannels(self, event):
     fig, ax = plt.subplots(nrows=7,
                            ncols=2,
                            sharex=True,
                            sharey=True,
                            squeeze=False,
                            figsize=(12, 12))
     plt.subplots_adjust(hspace=0, wspace=0.05)
     for xi in range(7):
         for yi in range(2):
             ax[xi][yi].plot(range(10), range(10))
Esempio n. 4
0
def plotting_two(im1, im2, subtitle1, subtitle2,
                 title):  # Function for plotting
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
    f.tight_layout()
    ax1.imshow(im1)
    ax1.set_title(subtitle1, fontsize=20)
    ax2.imshow(im2)
    ax2.set_title(subtitle2, fontsize=20)
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
    plt.suptitle(title)
    plt.show()
def main():

    #DEFINITON OF THE PATHS TO THE FILES WITH THE CONTENT
    path_to_train_accuracy = 'accuracy_train_data.csv'
    path_to_train_loss = 'loss_train_data.csv'
    path_to_validation_accuracy = 'accuracy_validation_data.csv'
    path_to_validation_loss = '"loss_validation_data.csv"'

    # CREATE LIST OF NUMBER OF EPOCHS COMPUTED
    eval_indices = range(1, EPOCHS + 1)

    #LOADS THE DATA FROM THE FILES
    accuracy_train, loss_train, accuracy_validation, loss_validation = read_data(path_to_train_accuracy,path_to_train_loss,
                                                                                 path_to_validation_accuracy,path_to_validation_loss)

    #SHOW THE INFORMATION FOR CONTROL OF QUALITY
    print(eval_indices)
    print("Accuracy Train: ",accuracy_train)
    print("Loss Train: " ,loss_train)
    print("Accuracy Validation: ", accuracy_validation)
    print("Loss validation: ", loss_validation)

    # DRAW THE ACCURACY GRAPH FOR VALIDATION AND TRAIN
    plt.clf()
    plt.subplot(211)
    plt.plot(eval_indices, accuracy_train, 'k--', label='TREINO')
    plt.plot(eval_indices, accuracy_validation, 'g-x', label='VALIDAÇÃO')
    plt.legend(loc='upper right')
    plt.xlabel('Épocas')
    plt.ylabel('ACERTO')
    plt.grid(which='major', axis='both')

    # DRAW THE LOSS GRAPH FOR VALIDATION AND TRAIN
    plt.subplot(212)
    # plt.plot(eval_indices, train, 'g-x', label='Train Set Accuracy')
    plt.plot(eval_indices, loss_train, 'r-x', label='TREINO')
    # plt.plot(eval_indices, np.ones(len(eval_indices))/TOT_CLASSES, 'k--')
    plt.plot(eval_indices, loss_validation, 'k--', label='VALIDAÇÃO')
    plt.legend(loc="upper right")
    plt.xlabel('Épocas')
    plt.ylabel('ERRO')
    plt.ylim(0, 1)
    plt.grid(which='both', axis='y')

    plt.subplots_adjust(left=0.2, wspace=0.2, hspace=0.3)

    plt.show()
    plt.pause(0.01)

    #SAVES BOTH OF THE GRAPHICS IN ONE FILE NAMED "Learning.png"
    plt.savefig('Learning.png')
Esempio n. 6
0
    def boton_alcance_horizontalf(self):

        # __Variables necesarias para conseguir R (alcance horizontal)__ #
        xi = int(self.entrada_posicion_x0.get())
        yi = int(self.entrada_posicion_y0.get())
        v0 = int(self.entrada_Rapidez_inicial.get())
        angulo0 = math.radians(int(self.entrada_angulo_inicial.get()))
        coseno = math.cos(angulo0)
        seno = math.sin(angulo0)
        vxo = v0 * coseno
        vyo = v0 * seno
        g = 9.8
        altura = int(self.entrada_posicion_y0.get()) + (((v0 * seno)**2) /
                                                        (2 * g))
        imp = (g * yi / math.pow(v0, 2))
        time = (v0 / g) * (seno + math.sqrt(math.pow(seno, 2) + 2 * imp))

        # __Ecuacion dividida en 4 partes para conseguir R__ #
        R1 = (math.pow(v0, 2) * math.sin(2 * angulo0)) / (2 * g)
        R2 = (v0 * coseno) / g
        R3 = np.sqrt((math.pow((v0 * seno), 2)) + (2 * yi * g))
        R = xi + R1 + R2 * R3

        imprimir = (
            "{0:.2f}".format(R)
        )  # Imprimir guarda el resultado final (R) y lo deja con solo dos decimales
        print("R = ", R)  # print de control

        # __Estetica de la grafica__ #
        mpl.suptitle('Alcance Horizontal:', fontsize=22)
        mpl.subplots_adjust(top=0.80)
        mpl.title(R, fontsize=18, color='C3')
        mpl.xlim(0, R + 2)
        mpl.ylim(-0.03, altura + 2)
        mpl.xlabel("X(m)")
        mpl.ylabel("Y(m)")

        # __Dibujado de la curva__ #
        x = np.arange(0, time, 0.001)
        c_y = yi + vyo * x + (
            1 / 2) * -9.8 * x**2  # Ecuacion de lanzamiento de proyectil
        c_x = xi + vxo * x + (
            1 / 2) * 0 * x**2  # Ecuacion de lanzamiento de proyectil
        mpl.plot(c_x, c_y, "-")  # lanzamiento completo
        mpl.plot(R, 0, "ro")
        mpl.show()
        pass
Esempio n. 7
0
def sns_velo_pair_plot(target, line, file, nvoigts):

	sns.set_style("darkgrid")
	#sns.set_style("ticks")
	#sns.set_context("talk")

	for i in np.arange(1, nvoigts + 1, 1):

		data = pd.read_pickle(file)
		plt.figure()

		data_f = {}

		for key in data:
			if key.startswith(("v0", "N", "b")) and key.endswith(str(i)):
				data_f[key] = data[key][0]
			#if key in ["a", "a1", "a2"]:
			#	data_f[key] = data[key][0]
		
		df = pd.DataFrame(data_f)
		#g = sns.pairplot(df, diag_kind="kde")

		g = sns.PairGrid(df) #, diag_kws=dict(color="blue", shade=True))
		g.map_upper(sns.kdeplot, cmap="bone_r",n_levels=10,shade=True,
			shade_lowest=False)
		g.map_lower(sns.kdeplot, cmap="bone_r",n_levels=10,shade=True,
			shade_lowest=False)
		#g.map_diag(sns.kdeplot, lw=2);
		g.map_diag(plt.hist, alpha=0.8)

		plt.subplots_adjust(hspace=0.1, wspace=0.1)

		g.axes[0,0].set_ylabel(r"$\mathrm{log} N\, ({\rm cm}^{-2})$")
		g.axes[1,0].set_ylabel(r"$b\, {\rm (km/s)}$")
		g.axes[2,0].set_ylabel(r"$v_0\, {\rm (km/s)}$")

		g.axes[2,0].set_xlabel(r"$\mathrm{log} N\, ({\rm cm}^{-2})$")
		g.axes[2,1].set_xlabel(r"$b\, {\rm (km/s)}$")
		g.axes[2,2].set_xlabel(r"$v_0\, {\rm (km/s)}$")


		g.savefig(target+"_"+str(nvoigts)+"_"+str(i)+"_"+str(line)+"_"+"v.pdf")
Esempio n. 8
0
def plot_interpolation_results(list_of_dataframes, list_of_countries, column_to_plot, share_x=True):
    country_dimension = len(list_of_countries)
    dataframe_dimension = len(list_of_dataframes)
    # Set up the number of sub plots based on the dimensions of experiments and points
    figure_size_unit = 8 # This governs the size of each subplot on the figure 
    sns.set_style("ticks", {'axes.grid': True, 'grid.color': '.8', 'grid.linestyle': '-'})
    plt.rcParams.update({'axes.titlesize' : 18, 'lines.linewidth' : 3,\
         'axes.labelsize' : 16, 'xtick.labelsize' : 16, 'ytick.labelsize' : 16})
    figure, axes = plt.subplots(country_dimension, 2, sharex=share_x, figsize=(figure_size_unit * dataframe_dimension, figure_size_unit * country_dimension))
    for country_index, country in enumerate(list_of_countries):
        for dataframe_index, dataframe in enumerate(list_of_dataframes):
            # Use the "cross section" method to grab the results for a specific experiment and points configuration
            dataframe_to_plot = dataframe.loc[pd.IndexSlice[:,:,[country]], :].loc[:, [column_to_plot]].\
                reset_index(level=["Region", "Income Group", "Country", "Decade"])
            # Set up the title for the plot
            if dataframe_index == 0:
                title = column_to_plot + "\nBEFORE Interpolation For " + country
            else:
                title = column_to_plot + "\nAFTER Interpolation For " + country
            # Send the data off to get plotted
            create_sub_plot(figure, axes, country_index, dataframe_index , dataframe_to_plot, "Year", column_to_plot, title)
    plt.subplots_adjust(hspace=.4)
Esempio n. 9
0
 def PlotAllChannels(self, event):
     fig, ax = plt.subplots(nrows=7, ncols=2, sharex=True, sharey=True, squeeze=False, figsize=(12, 12))
     plt.subplots_adjust(hspace=0, wspace=0.05)
     for xi in range(7):
         for yi in range(2):
             ax[xi][yi].plot(range(10), range(10))
Esempio n. 10
0
    Y = list(data_subset[to_plot])
    Y_err = [0] * len(Y)

#    Y_err = list(data_subset[to_plot + '_std'])
#    plt.errorbar(T[:], Y[:], yerr = Y_err, markersize = 5, marker = 'o', label = type)

    plt.plot(T[3:], Y[3:], markersize = 5, lw = 3, marker = 'o', label = type[4:-8] + ' spins')

    # if i == 0:
    #      plt.plot(T[:], Y[:], markersize = 5, lw = 3, marker = 'o', label = '1D')
    # elif i == 1 :
    #      plt.plot(T[1:], Y[1:], markersize = 5, lw = 3, marker = 'o', label = '1.5D')
    # elif i == 2 :
    #      plt.plot(T[1:], Y[1:], markersize = 5, lw = 3, marker = 'o', label = '2D')
    # else:
    #      plot(T[7:], Y[7:], markersize = 5, lw = 3, marker = 'o', label = '2.5D')

plt.xlabel('$T$', fontsize = 20)
plt.ylabel('$E$', fontsize = 20, rotation = 'horizontal', labelpad = 25)

#plt.axvline(x = 2.2, lw = 5, color = 'k', alpha = 0.2)

plt.subplots_adjust(left = 0.15, right = 0.92, top = 0.92, bottom = 0.15)
plt.tick_params(axis = 'both', which = 'major', labelsize = 20)


plt.xlim(left = 0, right = 5)
plt.ylim(bottom = -2.1, top = 0)
legend = plt.legend(fontsize = 18, loc = 2)
show()
Esempio n. 11
0
def print_sat_mutagen_figure(filename,
                             rhapsody_obj,
                             res_interval=None,
                             min_interval_size=15,
                             extra_plot=None,
                             html=False,
                             PP2=True,
                             EVmutation=True,
                             EVmut_cutoff=-4.551,
                             fig_height=8,
                             fig_width=None,
                             dpi=300):

    # check inputs
    assert isinstance(filename, str), 'filename must be a string'
    assert isinstance(rhapsody_obj, Rhapsody), 'not a Rhapsody object'
    assert rhapsody_obj.predictions is not None, 'predictions not found'
    if res_interval is not None:
        assert isinstance(res_interval, tuple) and len(res_interval) == 2, \
               'res_interval must be a tuple of 2 values'
        assert res_interval[1] >= res_interval[0], 'invalid res_interval'
    if extra_plot is not None:
        assert len(extra_plot) == len(rhapsody_obj.predictions), \
               'length of additional predictions array is incorrect'
    assert isinstance(fig_height, (int, float))
    assert isinstance(dpi, int)

    matplotlib = try_import_matplotlib()
    if matplotlib is None:
        return

    # delete extension from filename
    filename = os.path.splitext(filename)[0]

    # make sure that all variants belong to the same Uniprot sequence
    s = rhapsody_obj.SAVcoords['acc']
    if len(set(s)) != 1:
        m = 'Only variants from a single Uniprot sequence can be accepted'
        raise ValueError(m)

    if rhapsody_obj.auxPreds is not None:
        aux_preds_found = True
    else:
        aux_preds_found = False

    # import pathogenicity probability from Rhapsody object
    p_full = rhapsody_obj.predictions['path. probability']
    p_mix = None
    if aux_preds_found:
        p_mix = rhapsody_obj.mixPreds['path. probability']

    # select an appropriate interval, based on available predictions
    res_min = np.min(rhapsody_obj.SAVcoords['pos'])
    res_max = np.max(rhapsody_obj.SAVcoords['pos'])
    upper_lim = res_max + min_interval_size

    # create empty (20 x num_res) mutagenesis tables
    table_full = np.zeros((20, upper_lim), dtype=float)
    table_full[:] = 'nan'
    table_mix = table_full.copy()
    if extra_plot is not None:
        table_other = table_full.copy()
    if PP2:
        table_PP2 = table_full.copy()
    if EVmutation:
        table_EVmut = table_full.copy()

    # fill tables with predicted probability
    #  1:    deleterious
    #  0:    neutral
    # 'nan': no prediction/wt
    aa_list = 'ACDEFGHIKLMNPQRSTVWY'
    aa_map = {aa: i for i, aa in enumerate(aa_list)}
    for i, SAV in enumerate(rhapsody_obj.SAVcoords):
        aa_mut = SAV['aa_mut']
        index = SAV['pos'] - 1
        table_full[aa_map[aa_mut], index] = p_full[i]
        if aux_preds_found:
            table_mix[aa_map[aa_mut], index] = p_mix[i]
        if extra_plot is not None:
            table_other[aa_map[aa_mut], index] = extra_plot[i]
        if PP2:
            s = float(rhapsody_obj.PP2output['pph2_prob'][i])
            table_PP2[aa_map[aa_mut], index] = s
        if EVmutation:
            s = rhapsody_obj.calcEVmutationFeats()['EVmut-DeltaE_epist'][i]
            table_EVmut[aa_map[aa_mut], index] = s / EVmut_cutoff * 0.5

    # compute average pathogenicity profiles
    # NB: I expect to see RuntimeWarnings in this block
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        avg_p_full = np.nanmean(table_full, axis=0)
        avg_p_mix = np.nanmean(table_mix, axis=0)
        min_p = np.nanmin(table_mix, axis=0)
        max_p = np.nanmax(table_mix, axis=0)
        if extra_plot is not None:
            avg_p_other = np.nanmean(table_other, axis=0)
        if PP2:
            avg_p_PP2 = np.nanmean(table_PP2, axis=0)
        if EVmutation:
            avg_p_EVmut = np.nanmean(table_EVmut, axis=0)

    # use upper strip for showing additional info, such as PDB lengths
    upper_strip = np.zeros((1, upper_lim))
    upper_strip[:] = 'nan'
    PDB_sizes = np.zeros(upper_lim, dtype=int)
    PDB_coords = [''] * upper_lim
    for a, b in zip(rhapsody_obj.SAVcoords, rhapsody_obj.Uniprot2PDBmap):
        index = a['pos'] - 1
        if b['PDB size'] != 0:
            PDB_length = int(b['PDB size'])
            PDBID_chain = ':'.join(b['PDB SAV coords'][0].split()[:2])
            upper_strip[0, index] = PDB_length
            PDB_sizes[index] = PDB_length
            PDB_coords[index] = PDBID_chain
    max_PDB_size = max(PDB_sizes)
    if max_PDB_size != 0:
        upper_strip[0, :] /= max_PDB_size

    # final data to show on figure
    if aux_preds_found:
        table_final = table_mix
        avg_p_final = avg_p_mix
        pclass_final = rhapsody_obj.mixPreds['path. class']
    else:
        table_final = table_full
        avg_p_final = avg_p_full
        pclass_final = rhapsody_obj.predictions['path. class']
    # avg_p_final = np.where(np.isnan(avg_p_full), avg_p_mix, avg_p_full)

    # PLOT FIGURE

    from matplotlib import pyplot as plt
    from matplotlib import gridspec as gridspec

    # portion of the sequence to display
    if res_interval is None:
        res_interval = (res_min, res_max)
    # adjust interval
    res_i, res_f = adjust_res_interval(res_interval, upper_lim,
                                       min_interval_size)
    nres_shown = res_f - res_i + 1

    # figure proportions
    if fig_width is None:
        fig_width = fig_height / 2  # inches
        fig_width *= nres_shown / 20
    fig, ax = plt.subplots(3, 2, figsize=(fig_width, fig_height))
    wspace = 0.5  # inches
    plt.subplots_adjust(wspace=wspace / fig_width, hspace=0.15)

    # figure structure
    gs = gridspec.GridSpec(3,
                           2,
                           width_ratios=[nres_shown, 1],
                           height_ratios=[1, 20, 10])
    ax0 = plt.subplot(gs[0, 0])  # secondary structure strip
    ax1 = plt.subplot(gs[1, 0])  # mutagenesis table
    axcb = plt.subplot(gs[1, 1])  # colorbar
    ax2 = plt.subplot(gs[2, 0])  # average profile

    # padding for tick labels
    pad = 0.2 / fig_width

    # top strip
    ax0.imshow(upper_strip[0:1, res_i - 1:res_f],
               aspect='auto',
               cmap='YlGn',
               vmin=0,
               vmax=1)
    ax0.set_ylim((-0.45, .45))
    ax0.set_yticks([])
    ax0.set_ylabel(f'PDB size \n[0-{max_PDB_size} res] ',
                   fontsize=14,
                   ha='right',
                   va='center',
                   rotation=0)
    ax0.set_xticks([])

    # mutagenesis table (heatmap)
    matplotlib.cm.coolwarm.set_bad(color='white')
    im = ax1.imshow(table_final[:, res_i - 1:res_f],
                    aspect='auto',
                    cmap='coolwarm',
                    vmin=0,
                    vmax=1)
    axcb.figure.colorbar(im, cax=axcb)
    ax1.set_yticks(np.arange(len(aa_list)))
    ax1.set_yticklabels(aa_list, ha='center', position=(-pad, 0), fontsize=14)
    ax1.set_xticks(np.arange(5 - res_i % 5, res_f - res_i + 1, 5))
    ax1.set_xticklabels([])
    ax1.set_ylabel('pathog. probability', labelpad=10)

    # average pathogenicity profile
    x_resids = np.arange(1, upper_lim + 1)
    # shading showing range of values
    ax2.fill_between(x_resids,
                     min_p,
                     max_p,
                     alpha=0.5,
                     edgecolor='salmon',
                     facecolor='salmon')
    # plot average profile for other predictions, if available
    if extra_plot is not None:
        ax2.plot(x_resids, avg_p_other, color='gray', lw=1)
    if PP2:
        ax2.plot(x_resids, avg_p_PP2, color='blue', lw=1)
    if EVmutation:
        ax2.plot(x_resids, avg_p_EVmut, color='green', lw=1)
    # solid line for predictions obtained with full classifier
    ax2.plot(x_resids, avg_p_full, 'ro-')
    # dotted line for predictions obtained with auxiliary classifier
    ax2.plot(x_resids, avg_p_final, 'ro-', markerfacecolor='none', ls='dotted')
    # cutoff line
    ax2.axhline(y=0.5, color='grey', lw=.8, linestyle='dashed')

    ax2.set_xlim((res_i - .5, res_f + .5))
    ax2.set_xlabel('residue number')
    ax2.set_ylim((-0.05, 1.05))
    ax2.set_ylabel('average', rotation=90, labelpad=10)
    ax2.set_yticklabels([])
    ax2r = ax2.twinx()
    ax2r.set_ylim((-0.05, 1.05))
    ax2r.set_yticks([0, .5, 1])
    ax2r.set_yticklabels(['0', '0.5', '1'])
    ax2r.tick_params(axis='both', which='major', pad=15)

    tight_padding = 0.1
    fig.savefig(filename + '.png',
                format='png',
                bbox_inches='tight',
                pad_inches=tight_padding,
                dpi=dpi)
    plt.close()
    plt.rcParams.update(plt.rcParamsDefault)
    LOGGER.info(f'Saturation mutagenesis figure saved to {filename}.png')

    # write a map in html format, to make figure clickable
    if html:
        all_axis = {'strip': ax0, 'table': ax1, 'bplot': ax2}

        # precompute some useful quantities for html code
        html_data = {}
        # dpi of printed figure
        html_data["dpi"] = dpi
        # figure size *before* tight
        html_data["fig_size"] = fig.get_size_inches()
        # tight bbox as used by fig.savefig()
        html_data["tight_bbox"] = fig.get_tightbbox(fig.canvas.get_renderer())
        # compute new origin and height, based on tight box and padding
        html_data["new_orig"] = html_data["tight_bbox"].min - tight_padding
        html_data[
            "new_height"] = html_data["tight_bbox"].height + 2 * tight_padding

        def get_area_coords(ax, d):
            assert ax_type in ("strip", "table", "bplot")
            # get bbox coordinates (x0, y0, x1, y1)
            bbox = ax.get_position().get_points()
            # get bbox coordinates in inches
            b_inch = bbox * d["fig_size"]
            # adjust bbox coordinates based on tight bbox
            b_adj = b_inch - d["new_orig"]
            # use html reference system (y = 1 - y)
            b_html = b_adj * np.array([1, -1]) + np.array([0, d["new_height"]])
            # convert to pixels
            b_px = (d["dpi"] * b_html).astype(int)
            # put in html format
            coords = '{},{},{},{}'.format(*b_px.flatten())
            # output
            return coords

        # html templates
        area_html = Template(
        '<area shape="rect" coords="$coords" ' + \
        'id="{{map_id}}_$areaid" {{area_attrs}}> \n'
        )

        # write html
        with open(filename + '.html', 'w') as f:
            f.write('<div>\n')
            f.write('<map name="{{map_id}}" id="{{map_id}}" {{map_attrs}}>\n')
            for ax_type, ax in all_axis.items():
                fields = {'areaid': ax_type}
                fields['coords'] = get_area_coords(ax, html_data)
                f.write(area_html.substitute(fields))
            f.write('</map>\n')
            f.write('</div>\n')

        # populate info table that will be passed as a javascript variable
        info = {}
        for k in ['strip', 'table', 'bplot']:
            n_cols = 20 if k == 'table' else 1
            info[k] = [[''] * nres_shown for i in range(n_cols)]
        for i, SAV in enumerate(rhapsody_obj.SAVcoords):
            resid = SAV['pos']
            aa_wt = SAV['aa_wt']
            aa_mut = SAV['aa_mut']
            # consider only residues shown in figure
            if not (res_i <= resid <= res_f):
                continue
            # SAV & PDB coordinates
            SAV_code = f'{aa_wt}{resid}{aa_mut}'
            PDB_code = PDB_coords[resid - 1]
            # coordinates on table
            t_i = aa_map[aa_mut]
            t_j = resid - 1
            # coordinates on *shown* table
            ts_i = t_i
            ts_j = resid - res_i
            # predictions and other info
            rh_pred = table_final[t_i, t_j]
            av_rh_pred = avg_p_final[t_j]
            pclass = pclass_final[i]
            others = {}
            if extra_plot is not None:
                others['other'] = (table_other[t_i, t_j], avg_p_other[t_j])
            if PP2:
                others['PP2'] = (table_PP2[t_i, t_j], avg_p_PP2[t_j])
            if EVmutation:
                others['EVmut'] = (table_EVmut[t_i, t_j], avg_p_EVmut[t_j])
            # compose message for table
            m = f'{SAV_code}: {rh_pred:4.2f} ({pclass})'
            for k, t in others.items():
                m += f', {k}={t[0]:<4.2f}'
            info['table'][ts_i][ts_j] = m
            info['table'][aa_map[aa_wt]][ts_j] = f'{SAV_code[:-1]}: wild-type'
            # compose message for upper strip
            m = f'{PDB_code}'
            if PDB_code != '':
                m += f' (size: {PDB_sizes[t_j]} res)'
            info['strip'][0][ts_j] = m
            # compose message for bottom plot
            m = f'{SAV_code[:-1]}: {av_rh_pred:4.2f}'
            for k, t in others.items():
                m += f', {k}={t[1]:<4.2f}'
            info['bplot'][0][ts_j] = m

        def create_info_msg(ax_type, d):
            text = '[ \n'
            for row in d:
                text += '  ['
                for m in row:
                    text += f'"{m}",'
                text += '], \n'
            text += ']'
            return text

        area_js = Template(
        '{{map_data}}["{{map_id}}_$areaid"] = { \n' + \
        '  "num_rows": $num_rows, \n' + \
        '  "num_cols": $num_cols, \n' + \
        '  "info_msg": $info_msg, \n' + \
        '}; \n'
        )

        # dump info in javascript format
        with open(filename + '.js', 'w') as f:
            f.write('var {{map_data}} = {{map_data}} || {}; \n')
            for ax_type, d in info.items():
                vars = {'areaid': ax_type}
                vars['num_rows'] = 20 if ax_type == 'table' else 1
                vars['num_cols'] = nres_shown
                vars['info_msg'] = create_info_msg(ax_type, d)
                f.write(area_js.substitute(vars))

        return info
    return
Esempio n. 12
0
ncols = 3
plt.clf()
f = plt.figure(1)
f.suptitle(" Data Histograms", fontsize=12)
vlist = list(data.columns)
nrows = len(vlist) // ncols
if len(vlist) % ncols > 0:
    nrows += 1
for i, var in enumerate(vlist):
    plt.subplot(nrows, ncols, i + 1)
    plt.hist(data[var].values, bins=15)
    plt.title(var, fontsize=10)
    plt.tick_params(labelbottom='off', labelleft='off')
plt.tight_layout()
plt.subplots_adjust(top=0.88)
plt.show()


def process(X_train, X_test, y_train, y_test):
    #X_train, X_test, y_train, y_test = train_test_split(x1, y1)
    model3 = LogisticRegression()
    model3.fit(X_train, y_train)
    y = model3.predict(X_test)
    print("MSE VALUE FOR LogisticRegression IS %f " %
          mean_squared_error(y_test, y))
    print("MAE VALUE FOR LogisticRegression IS %f " %
          mean_absolute_error(y_test, y))
    print("R-SQUARED VALUE FOR LogisticRegression IS %f " %
          r2_score(y_test, y))
    rms = np.sqrt(mean_squared_error(y_test, y))
Esempio n. 13
0
                   data=df)
ax16.set_title("Credit Amount by Job", fontsize=15)
ax16.set_xlabel("Job Reference", fontsize=12)
ax16.set_ylabel("Credit Amount", fontsize=12)

ax17 = sns.violinplot(x="Job",
                      y="Age",
                      hue="Risk",
                      split=True,
                      pallete="hls",
                      ax=ax[1],
                      data=df)
ax17.set_title("Job class vs Age", fontsize=15)
ax17.set_xlabel("Job class", fontsize=12)
ax17.set_ylabel("Age", fontsize=12)
plt.subplots_adjust(hspace=0.4, top=0.9)
plt.savefig("../output/job_vs_age_credit_amount_risk.png")
#plt.show()

#Visualizing the distribution of credit amount

#Histogram:
x1 = np.log(df.loc[df["Risk"] == 'good']['Credit amount'])
x2 = np.log(df.loc[df["Risk"] == 'bad']['Credit amount'])
#histo = [x1, x2]
#group_labels = ["Good credit", "Bad credit"]

ax18 = plt.figure(18)
#ax18 = sns.distplot(x1)
ax18 = sns.distplot(x1, label="Good credit", color="r")
ax18 = sns.distplot(x2, label="Bad credit", color="b")
Esempio n. 14
0
def print_sat_mutagen_figure(filename,
                             rhapsody_obj,
                             res_interval=None,
                             PolyPhen2=True,
                             EVmutation=True,
                             extra_plot=None,
                             fig_height=8,
                             fig_width=None,
                             dpi=300,
                             min_interval_size=15,
                             html=False,
                             main_clsf='main',
                             aux_clsf='aux.'):

    # check inputs
    assert isinstance(filename, str), 'filename must be a string'
    assert isinstance(rhapsody_obj, Rhapsody), 'not a Rhapsody object'
    assert rhapsody_obj._isColSet('main score'), 'predictions not found'
    assert rhapsody_obj._isSaturationMutagenesis(), 'unable to create figure'
    if res_interval is not None:
        assert isinstance(res_interval, tuple) and len(res_interval) == 2, \
               'res_interval must be a tuple of 2 values'
        assert res_interval[1] >= res_interval[0], 'invalid res_interval'
    if extra_plot is not None:
        assert len(extra_plot) == rhapsody_obj.numSAVs, \
               'length of additional predictions array is incorrect'
    assert isinstance(fig_height, (int, float))
    assert isinstance(dpi, int)

    matplotlib = _try_import_matplotlib()
    if matplotlib is None:
        return

    # delete extension from filename
    filename = os.path.splitext(filename)[0]

    # make sure that all variants belong to the same Uniprot sequence
    accs = [s.split()[0] for s in rhapsody_obj.data['SAV coords']]
    if len(set(accs)) != 1:
        m = 'Only variants from a single Uniprot sequence can be accepted'
        raise ValueError(m)

    # select an appropriate interval, based on available predictions
    seq_pos = [int(s.split()[1]) for s in rhapsody_obj.data['SAV coords']]
    res_min = np.min(seq_pos)
    res_max = np.max(seq_pos)
    upper_lim = res_max + min_interval_size

    # create empty (20 x num_res) mutagenesis tables
    table_best = np.zeros((20, upper_lim), dtype=float)
    table_best[:] = 'nan'
    table_main = table_best.copy()
    if extra_plot is not None:
        table_other = table_best.copy()
    if PolyPhen2:
        table_PP2 = table_best.copy()
    if EVmutation:
        table_EVmut = table_best.copy()

    # import pathogenicity probabilities from Rhapsody object
    p_best = rhapsody_obj.getPredictions(classifier='best')['path. prob.']
    p_main = rhapsody_obj.data['main path. prob.']
    if PolyPhen2:
        rhapsody_obj._calcPolyPhen2Predictions()
        p_PP2 = rhapsody_obj.data['PolyPhen-2 score']
    if EVmutation:
        rhapsody_obj._calcEVmutationPredictions()
        EVmut_score = np.array(rhapsody_obj.data['EVmutation score'])
        EVmut_cutoff = SETTINGS.get('EVmutation_metrics')['optimal cutoff']
        p_EVmut = -EVmut_score / EVmut_cutoff * 0.5

    # fill tables with predicted probability
    #  1:    deleterious
    #  0:    neutral
    # 'nan': no prediction/wt
    aa_list = 'ACDEFGHIKLMNPQRSTVWY'
    aa_map = {aa: i for i, aa in enumerate(aa_list)}
    for i, SAV in enumerate(rhapsody_obj.data['SAV coords']):
        aa_mut = SAV.split()[3]
        index = int(SAV.split()[1]) - 1
        table_best[aa_map[aa_mut], index] = p_best[i]
        table_main[aa_map[aa_mut], index] = p_main[i]
        if extra_plot is not None:
            table_other[aa_map[aa_mut], index] = extra_plot[i]
        if PolyPhen2:
            table_PP2[aa_map[aa_mut], index] = p_PP2[i]
        if EVmutation:
            table_EVmut[aa_map[aa_mut], index] = p_EVmut[i]

    # compute average pathogenicity profiles
    # NB: I expect to see RuntimeWarnings in this block
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        avg_p_best = np.nanmean(table_best, axis=0)
        avg_p_main = np.nanmean(table_main, axis=0)
        min_p = np.nanmin(table_best, axis=0)
        max_p = np.nanmax(table_best, axis=0)
        if extra_plot is not None:
            avg_p_other = np.nanmean(table_other, axis=0)
        if PolyPhen2:
            avg_p_PP2 = np.nanmean(table_PP2, axis=0)
        if EVmutation:
            avg_p_EVmut = np.nanmean(table_EVmut, axis=0)

    # use upper strip for showing additional info, such as PDB lengths
    upper_strip = np.zeros((1, upper_lim))
    upper_strip[:] = 'nan'
    PDB_sizes = np.zeros(upper_lim, dtype=int)
    PDB_coords = [''] * upper_lim
    for s in rhapsody_obj.data:
        index = int(s['SAV coords'].split()[1]) - 1
        if s['PDB size'] != 0:
            PDB_length = int(s['PDB size'])
            PDBID_chain = ':'.join(s['PDB SAV coords'][0].split()[:2])
            upper_strip[0, index] = PDB_length
            PDB_sizes[index] = PDB_length
            PDB_coords[index] = PDBID_chain
    max_PDB_size = max(PDB_sizes)
    if max_PDB_size != 0:
        upper_strip[0, :] /= max_PDB_size

    # PLOT FIGURE

    from matplotlib import pyplot as plt
    from matplotlib import gridspec as gridspec

    # portion of the sequence to display
    if res_interval is None:
        res_interval = (res_min, res_max)
    # adjust interval
    res_i, res_f = _adjust_res_interval(res_interval, upper_lim,
                                        min_interval_size)
    nres_shown = res_f - res_i + 1

    # figure proportions
    if fig_width is None:
        fig_width = fig_height / 2  # inches
        fig_width *= nres_shown / 20
    fig, ax = plt.subplots(3, 2, figsize=(fig_width, fig_height))
    wspace = 0.5  # inches
    plt.subplots_adjust(wspace=wspace / fig_width, hspace=0.15)

    # figure structure
    gs = gridspec.GridSpec(3,
                           2,
                           width_ratios=[nres_shown, 1],
                           height_ratios=[1, 20, 10])
    ax0 = plt.subplot(gs[0, 0])  # secondary structure strip
    ax1 = plt.subplot(gs[1, 0])  # mutagenesis table
    axcb = plt.subplot(gs[1, 1])  # colorbar
    ax2 = plt.subplot(gs[2, 0])  # average profile

    # padding for tick labels
    pad = 0.2 / fig_width

    # top strip
    matplotlib.cm.YlGn.set_bad(color='antiquewhite')
    ax0.imshow(upper_strip[0:1, res_i - 1:res_f],
               aspect='auto',
               cmap='YlGn',
               vmin=0,
               vmax=1)
    ax0.set_ylim((-0.45, .45))
    ax0.set_yticks([])
    ax0.set_ylabel(f'PDB size \n[0-{max_PDB_size} res] ',
                   fontsize=14,
                   ha='right',
                   va='center',
                   rotation=0)
    ax0.set_xticks(np.arange(5 - res_i % 5, res_f - res_i + 1, 5))
    ax0.set_xticklabels([])
    # add white grid
    ax0.set_xticks(np.arange(-.5, res_f - res_i + 1, 1), minor=True)
    ax0.tick_params(axis='both', which='minor', length=0)
    ax0.grid(which='minor', color='w', linestyle='-', linewidth=.5)

    # mutagenesis table (heatmap)
    matplotlib.cm.coolwarm.set_bad(color='#F9E79F')
    im = ax1.imshow(table_best[:, res_i - 1:res_f],
                    aspect='auto',
                    cmap='coolwarm',
                    vmin=0,
                    vmax=1)
    axcb.figure.colorbar(im, cax=axcb)
    ax1.set_yticks(np.arange(len(aa_list)))
    ax1.set_yticklabels(aa_list, ha='center', position=(-pad, 0), fontsize=14)
    ax1.set_xticks(np.arange(5 - res_i % 5, res_f - res_i + 1, 5))
    ax1.set_xticklabels([])
    ax1.set_ylabel('pathog. probability', labelpad=10)
    # add white grid
    ax1.set_xticks(np.arange(-.5, res_f - res_i + 1, 1), minor=True)
    ax1.set_yticks(np.arange(-.5, 20, 1), minor=True)
    ax1.tick_params(axis='both', which='minor', length=0)
    ax1.grid(which='minor', color='w', linestyle='-', linewidth=.5)

    # average pathogenicity profile
    x_resids = np.arange(1, upper_lim + 1)
    # shading showing range of values
    # NB: a bug in pyplot.fill_between() arises when selecting a region with
    # set_xlim() in a large plot (e.g. > 1000), causing the shaded area to
    # be plotted even though it's outside the selected region. As a workaround,
    # here I slice the plot to fit the selected region.
    sl = slice(max(0, res_i - 2), min(res_f + 2, upper_lim + 1))
    ax2.fill_between(x_resids[sl],
                     min_p[sl],
                     max_p[sl],
                     alpha=0.5,
                     edgecolor='salmon',
                     facecolor='salmon')
    # plot average profile for other predictions, if available
    if extra_plot is not None:
        ax2.plot(x_resids, avg_p_other, color='gray', lw=1)
    if PolyPhen2:
        ax2.plot(x_resids, avg_p_PP2, color='blue', lw=1)
    if EVmutation:
        ax2.plot(x_resids, avg_p_EVmut, color='green', lw=1)
    # solid line for predictions obtained with full classifier
    ax2.plot(x_resids, avg_p_main, 'ro-')
    # dotted line for predictions obtained with auxiliary classifier
    ax2.plot(x_resids, avg_p_best, 'ro-', markerfacecolor='none', ls='dotted')
    # cutoff line
    ax2.axhline(y=0.5, color='grey', lw=.8, linestyle='dashed')

    ax2.set_xlim((res_i - .5, res_f + .5))
    ax2.set_xlabel('residue number')
    ax2.set_ylim((-0.05, 1.05))
    ax2.set_ylabel('average', rotation=90, labelpad=10)
    ax2.set_yticklabels([])
    ax2r = ax2.twinx()
    ax2r.set_ylim((-0.05, 1.05))
    ax2r.set_yticks([0, .5, 1])
    ax2r.set_yticklabels(['0', '0.5', '1'])
    ax2r.tick_params(axis='both', which='major', pad=15)

    tight_padding = 0.1
    fig.savefig(filename + '.png',
                format='png',
                bbox_inches='tight',
                pad_inches=tight_padding,
                dpi=dpi)
    plt.close()
    plt.rcParams.update(plt.rcParamsDefault)
    LOGGER.info(f'Saturation mutagenesis figure saved to {filename}.png')

    # write a map in html format, to make figure clickable
    if html:
        all_axis = {'strip': ax0, 'table': ax1, 'bplot': ax2}

        # precompute some useful quantities for html code
        html_data = {}
        # dpi of printed figure
        html_data["dpi"] = dpi
        # figure size *before* tight
        html_data["fig_size"] = fig.get_size_inches()
        # tight bbox as used by fig.savefig()
        html_data["tight_bbox"] = fig.get_tightbbox(fig.canvas.get_renderer())
        # compute new origin and height, based on tight box and padding
        html_data["new_orig"] = html_data["tight_bbox"].min - tight_padding
        html_data["new_height"] = (html_data["tight_bbox"].height +
                                   2 * tight_padding)

        def get_area_coords(ax, d):
            assert ax_type in ("strip", "table", "bplot")
            # get bbox coordinates (x0, y0, x1, y1)
            bbox = ax.get_position().get_points()
            # get bbox coordinates in inches
            b_inch = bbox * d["fig_size"]
            # adjust bbox coordinates based on tight bbox
            b_adj = b_inch - d["new_orig"]
            # use html reference system (y = 1 - y)
            b_html = b_adj * np.array([1, -1]) + np.array([0, d["new_height"]])
            # convert to pixels
            b_px = (d["dpi"] * b_html).astype(int)
            b_px = np.sort(b_px, axis=0)
            # put in html format
            coords = '{},{},{},{}'.format(*b_px.flatten())
            # output
            return coords

        # html templates
        area_html = Template('<area shape="rect" coords="$coords" '
                             'id="{{map_id}}_$areaid" {{area_attrs}}> \n')

        # write html
        with open(filename + '.html', 'w') as f:
            f.write('<div>\n')
            f.write('<map name="{{map_id}}" id="{{map_id}}" {{map_attrs}}>\n')
            for ax_type, ax in all_axis.items():
                fields = {'areaid': ax_type}
                fields['coords'] = get_area_coords(ax, html_data)
                f.write(area_html.substitute(fields))
            f.write('</map>\n')
            f.write('</div>\n')

        # populate info table that will be passed as a javascript variable
        best_preds = rhapsody_obj.getPredictions()
        best_avg_preds = rhapsody_obj.getResAvgPredictions()
        PDB_coords = rhapsody_obj.getPDBcoords()
        abbrev = {
            '?': '?',
            'deleterious': 'del',
            'neutral': 'neu',
            'prob.delet.': 'p.del',
            'prob.neutral': 'p.neu'
        }
        info = {}
        for k in ['strip', 'table', 'bplot']:
            n_cols = 20 if k == 'table' else 1
            info[k] = [[''] * nres_shown for i in range(n_cols)]
        for i, row in enumerate(rhapsody_obj.data):
            SAV = row['SAV coords']
            acc, resid, aa_wt, aa_mut = SAV.split()
            resid = int(resid)
            # consider only residues shown in figure
            if not (res_i <= resid <= res_f):
                continue
            # SAV coordinates
            SAV_code = f'{aa_wt}{resid}{aa_mut}'
            # coordinates on table
            t_i = aa_map[aa_mut]
            t_j = resid - 1
            # coordinates on *shown* table
            ts_i = t_i
            ts_j = resid - res_i
            # compose message for table
            bp = best_preds[i]
            pprob = bp['path. prob.']
            pclass = bp['path. class']
            clsf = main_clsf if row['best classifier'] == 'main' else aux_clsf
            m = f'{SAV_code}: Rhapsody-{clsf} = {pprob:<3.2f} ({pclass})'
            if PolyPhen2:
                score = bp['PolyPhen-2 score']
                pclass = abbrev[bp['PolyPhen-2 path. class']]
                m += f', PolyPhen-2 = {score:<3.2f} ({pclass})'
            if EVmutation:
                score = bp['EVmutation score']
                pclass = abbrev[bp['EVmutation path. class']]
                m += f', EVmutation = {score:<3.2f} ({pclass})'
            if extra_plot is not None:
                score = table_other[t_i, t_j]
                m += f', other = {score:<3.2f}'
            info['table'][ts_i][ts_j] = m
            info['table'][aa_map[aa_wt]][ts_j] = f'{SAV_code[:-1]}: wild-type'
            if i % 19 == 0:
                # compose message for upper strip
                PDBID, ch, resid, aa, size = PDB_coords[i][[
                    'PDBID', 'chain', 'resid', 'resname', 'PDB size'
                ]]
                if size > 0:
                    m = f'{PDBID}:{ch}, resid {resid}, aa {aa}, size {size}'
                else:
                    m = 'no PDB found'
                info['strip'][0][ts_j] = m
                # compose message for bottom plot (residue-averages)
                bap = best_avg_preds[int(i / 19)]
                pprob = bap['path. prob.']
                pcl = bap['path. class']
                m = f'{SAV_code[:-1]}: Rhapsody-{clsf} = {pprob:<3.2f} ({pcl})'
                if PolyPhen2:
                    score = bap['PolyPhen-2 score']
                    pcl = abbrev[bap['PolyPhen-2 path. class']]
                    m += f', PolyPhen-2 = {score:<3.2f} ({pcl})'
                if EVmutation:
                    score = bap['EVmutation score']
                    pcl = abbrev[bap['EVmutation path. class']]
                    m += f', EVmutation = {score:<3.2f} ({pcl})'
                if extra_plot is not None:
                    score = avg_p_other[t_j]
                    m += f', other = {score:<3.2f}'
                info['bplot'][0][ts_j] = m

        def create_info_msg(ax_type, d):
            text = '[ \n'
            for row in d:
                text += '  ['
                for m in row:
                    text += f'"{m}",'
                text += '], \n'
            text += ']'
            return text

        area_js = Template('{{map_data}}["{{map_id}}_$areaid"] = { \n'
                           '  "img_id": "{{img_id}}", \n'
                           '  "map_id": "{{map_id}}", \n'
                           '  "coords": [$coords], \n'
                           '  "num_rows": $num_rows, \n'
                           '  "num_cols": $num_cols, \n'
                           '  "info_msg": $info_msg, \n'
                           '}; \n')

        # dump info in javascript format
        with open(filename + '.js', 'w') as f:
            f.write('var {{map_data}} = {}; \n')
            for ax_type, d in info.items():
                vars = {'areaid': ax_type}
                vars['coords'] = get_area_coords(all_axis[ax_type], html_data)
                vars['num_rows'] = 20 if ax_type == 'table' else 1
                vars['num_cols'] = nres_shown
                vars['info_msg'] = create_info_msg(ax_type, d)
                f.write(area_js.substitute(vars))

        return info
    return
Esempio n. 15
0
    }, {
        1: 4
    }, {
        1: 5
    }, {
        1: 10
    }, {
        1: 20
    }, {
        1: 50
    }]
}
plt.figure()

for i, eval_metrics in enumerate(('precision', 'recall', 'f1', 'roc_auc')):
    gride_clf_custom = GridSearchCV(clf,
                                    param_grid=gride_values,
                                    scoring=eval_metrics)
    gride_clf_custom.fit(X_twover_train, y_train)
    print('Grid best parameter (max. {0}): {1}'.format(
        eval_metrics, gride_clf_custom.best_params_))
    print('Grid best score (max. {0}): {1}'.format(
        eval_metrics, gride_clf_custom.best_score_))

    plt.subplot(2, 2, i + 1)
    plt.subplots_adjust(wspace=0.3, hspace=0.3)
    plot_class_region_for_classifier(gride_clf_custom, X_twover_test, y_test)
    plt.title(eval_metrics + '-oriented SVC')

    plt.show()
Esempio n. 16
0
PLOT_IMPERIAL = True  # fplot the curve in imperial units?

g = 9.81  # accel due to gravity

# Impact velocities between 0.1 and 10m/s
impact_velocity = np.arange(0.1, 10, 0.1)

# Use conservation of energy, ignore aerodynamic effects
height = impact_velocity**2 / (2 * g)

# Plot in SI?
if PLOT_SI:
    # Set the plot size - 3x2 aspect ratio is best
    fig = plt.Figure(figsize=(6, 4))
    ax = plt.gca()
    plt.subplots_adjust(bottom=0.17, left=0.17, top=0.96, right=0.96)

    # Change the axis units font
    plt.setp(ax.get_ymajorticklabels(), fontsize=18)
    plt.setp(ax.get_xmajorticklabels(), fontsize=18)

    ax.spines['right'].set_color('none')
    ax.spines['top'].set_color('none')

    ax.xaxis.set_ticks_position('bottom')
    ax.yaxis.set_ticks_position('left')

    # Turn on the plot grid and set appropriate linestyle and color
    ax.grid(True, linestyle=':', color='0.75')
    ax.set_axisbelow(True)
Esempio n. 17
0
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0  # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C), svm.LinearSVC(C=C),
          svm.SVC(kernel='rbf', gamma=0.7,
                  C=C), svm.SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, y) for clf in models)

# title for the plots
titles = ('SVC with linear kernel', 'LinearSVC (linear kernel)',
          'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel')

# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)

X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)

for clf, title, ax in zip(models, titles, sub.flatten()):
    plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
    ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
    ax.set_xlim(xx.min(), xx.max())
    ax.set_ylim(yy.min(), yy.max())
    ax.set_xlabel('Sepal length')
    ax.set_ylabel('Sepal width')
    ax.set_xticks(())
    ax.set_yticks(())
    ax.set_title(title)
titles = pd.read_sql('SELECT * FROM titles', dbc)

# ## Visualize the number of employees with each title.

# emp_title = titles[['title', 'emp_no']].groupby('title').count()
# import matplotlib as plt
# emp_title.plot.bar(rot=45)

get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt

titles.title.value_counts()
titles.title.value_counts().plot()
titles.title.value_counts().plot.bar()
plt.xticks(rotation=30)
plt.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.2)
plt.show()

#only current employees
from datetime import datetime
datetime.now()
current_titles = titles[titles.to_date > datetime.now().date()]
current_titles.title.value_counts().plot.bar()

# ## Visualize how frequently employees change titles.

# how many titles does each employee have?
titles.emp_no.value_counts()
titles.emp_no.value_counts().value_counts()
titles.emp_no.value_counts().value_counts().plot.bar()
Esempio n. 19
0
text(58,40e3,'Galactic Center Surveys**\n0.02-200 GHz @0.7 Hz-0.2 GHz (>300h)',color='b',fontsize=14)
text(200,3500,'**Shostak+1982, Vallee+1985,\n   Marx+1991, Steffes+1994,\n   Mauersberger+1996, Sullivan+1996,\n   Backus+2005, Harp+2010,\n   Williams+2013, Tingay+2016',color='b',fontsize=10)


########## More annotation and scaling

# Axes labels
xlabel('Channel RMS Sensitivity (mJy)')
ylabel('Distance (ly)')

# Axes scaling
ax = gca()
ax.set_xscale('log')
ax.set_yscale('log')
axis([0.1,3e6,1,3e8])
subplots_adjust(top=0.98,left=0.08,right=0.98,bottom=0.08)

# Set ticklabel size
labelsize=18
ticksize=8
linewidth=1.5
for tick in ax.xaxis.get_major_ticks():
    tick.label1.set_fontsize(labelsize)
for tick in ax.yaxis.get_major_ticks():
    tick.label1.set_fontsize(labelsize)

# Set label size
ax.xaxis.label.set_fontsize(labelsize+4)
ax.yaxis.label.set_fontsize(labelsize+4)
ax.title.set_fontsize(labelsize+4)
 def adjust(self,top=None,bottom=None,left=None,right=None):
     
     plt.subplots_adjust(top,bottom,left,right)