def plot_clustermap(dat, cmap='purple', save_fig=False, save_name='Clustermap'):
    """Plot clustermap.

    Parameters
    ----------
    dat : pandas.DataFrame
        Data to create clustermap from.
    """

    # Set up plotting and aesthetics
    sns.set()
    sns.set_context("paper", font_scale=1.5)

    # Set colourmap
    if cmap == 'purple':
        cmap = sns.cubehelix_palette(as_cmap=True)
    elif cmap == 'blue':
        cmap = sns.cubehelix_palette(as_cmap=True, rot=-.3, light=0.9, dark=0.2)

    # Create the clustermap
    cg = sns.clustermap(dat, cmap=cmap, method='complete', metric='cosine', figsize=(12, 10))

    # Fix axes
    cg.cax.set_visible(True)
    _ = plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=60, ha='right')
    _ = plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)

    # Save out - if requested
    if save_fig:

        db = check_db(db)
        s_file = os.path.join(db.figs_path, save_name + '.svg')

        cg.savefig(s_file, transparent=True)
Beispiel #2
0
def build_color_palette(num_items, weeks_before_switch):
    num_pre_switch_colors = weeks_before_switch
    num_post_switch_colors = num_items - num_pre_switch_colors
    print('preparing colors for {} pre-oxygen-switch'.format(
        num_pre_switch_colors),
          'samples and {} post-switch samples'
          .format(num_post_switch_colors))

    # get the first colors from this pallete:
    pre_switch_colors = \
        sns.cubehelix_palette(11, start=.5, rot=-.75)[0:num_pre_switch_colors]
    print(pre_switch_colors)

    # get post-switch colors here:
    # post_switch_colors = sns.diverging_palette(220, 20,
    # n=6)[::-1][0:num_post_switch_colors]
    post_switch_colors = \
        sns.color_palette("coolwarm", num_post_switch_colors)
    # sns.light_palette("navy", reverse=True)[0:num_post_switch_colors]
    rgb_colors = pre_switch_colors + post_switch_colors
    sns.palplot(rgb_colors)

    # check that we got the right amount
    print(num_items)
    assert (num_items == len(rgb_colors))
    print("")
    return rgb_colors
Beispiel #3
0
def get_seaborn_colorbar(dfr, classes):
    """Return a colorbar representing classes, for a Seaborn plot.

    The aim is to get a pd.Series for the passed dataframe columns,
    in the form:
    0    colour for class in col 0
    1    colour for class in col 1
    ...  colour for class in col ...
    n    colour for class in col n
    """
    levels = sorted(list(set(classes.values())))
    paldict = {
        lvl: pal
        for (lvl, pal) in zip(
            levels,
            sns.cubehelix_palette(
                len(levels), light=0.9, dark=0.1, reverse=True, start=1, rot=-2
            ),
        )
    }
    lvl_pal = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())}
    col_cb = pd.Series(dfr.index).map(lvl_pal)
    # The col_cb Series index now has to match the dfr.index, but
    # we don't create the Series with this (and if we try, it
    # fails) - so change it with this line
    col_cb.index = dfr.index
    return col_cb
Beispiel #4
0
def plot_kmer_dists(kmers_dists, kmers_scores, kmers, out_pdf):
    ''' Plot a heatmap of k-mer distances and scores.'''

    # shape scores
    kmers_scores = kmers_scores.reshape((-1,1))

    cols = 20
    plt.figure()
    ax_dist = plt.subplot2grid((1,cols), (0,0), colspan=cols-1)
    ax_score = plt.subplot2grid((1,cols), (0,cols-1), colspan=1)

    sns.heatmap(kmers_dists, cmap=sns.cubehelix_palette(n_colors=(1+kmers_dists.max()), reverse=True, as_cmap=True), ax=ax_dist, yticklabels=kmers, xticklabels=False)
    for tick in ax_dist.get_yticklabels():
        if kmers_dists.shape[0] <= 100:
            tick.set_fontsize(4)
        elif kmers_dists.shape[0] <= 250:
            tick.set_fontsize(2.5)
        else:
            tick.set_fontsize(2)

    score_max = kmers_scores.max()
    sns.heatmap(kmers_scores, cmap = 'RdBu_r', vmin=-score_max, vmax=score_max, ax=ax_score, yticklabels=False, xticklabels=False)

    plt.savefig(out_pdf)
    plt.close()
Beispiel #5
0
def pland(h_in):
    cmap = sns.cubehelix_palette(8, as_cmap=True)
    plt.pcolormesh(h_in.reshape(ny, nx, order="C"), cmap=cmap)
    # plt.contourf(h_in.reshape(ny, nx, order='C'), cmap=cmap)
    plt.axis("equal")
    plt.colorbar()
    plt.show()
def plot_domain_cdf(cdf_dict, cur_channel, quantity, min_radius=3.5, use_legend=True, plot_every=0.5):

    desired_r_bins = np.arange(min_radius, 10 + plot_every, plot_every)
    r_bins = np.array(cdf_dict['radius_scaled_used'])
    r_bins = np.around(r_bins, 3) # This way we can conveniently compare floats...

    # Figure out which r_bins are in the desired_r_bins
    r_bins_mask = np.in1d(r_bins, desired_r_bins)

    r_bins_filtered = r_bins[r_bins_mask]

    colors_to_use = sns.cubehelix_palette(n_colors = len(r_bins_filtered))
    count = 0
    plt.hold(True)
    for i, plot_this_value in zip(range(len(r_bins)), r_bins_mask): # Number of bins
        if plot_this_value:
            cur_data = cdf_dict[cur_channel, i]
            mean_data = cur_data.groupby(level=0).agg(['mean', sp.stats.sem])
            plt.plot(mean_data[quantity + '_midbin', 'mean'], mean_data['ecdf', 'mean'],
                    color=colors_to_use[count], label=r_bins[i])
            # Plot error
            #x = mean_data[quantity + '_midbin', 'mean']
            #y = mean_data['ecdf', 'mean']
            #yerr = mean_data['ecdf', 'sem']
            #plt.fill_between(x, y + yerr, y - yerr, alpha=0.3, color=colors_to_use[count])
            count += 1
    plt.hold(False)

    if use_legend:
        plt.legend(loc='best')
    plt.ylabel('Average Empirical CDF')
 def __init__(self, picklePath, figsize=(8,4), figFormat="tiff", radErrLim=((0, .02))):
     self.figsize = figsize
     self.figFormat = figFormat
     self.data = pickle.load(open(picklePath, "rb"))
     self.radErrLim = radErrLim
     self.cmap = seaborn.cubehelix_palette(start=8, light=1, as_cmap=True)
     self.figDir="figs"
Beispiel #8
0
def compare_spectra():
    import mywfc3.stgrism as st
    import unicorn
    
    ### Fancy colors
    import seaborn as sns
    import matplotlib.pyplot as plt
    cmap = sns.cubehelix_palette(as_cmap=True, light=0.95, start=0.5, hue=0.4, rot=-0.7, reverse=True)
    cmap.name = 'sns_rot'
    plt.register_cmap(cmap=cmap)
    sns.set_style("ticks", {"ytick.major.size":3, "xtick.major.size":3})
    plt.set_cmap('sns_rot')
    #plt.gray()
    
    fig = st.compare_methods(x0=787, y0=712, v=np.array([-1.5,4])*0.6, NX=180, NY=40, direct_off=100, final=True, mask_lim = 0.02)
    #fig.tight_layout()
    unicorn.plotting.savefig(fig, '/tmp/compare_model_star.pdf', dpi=300)

    fig = st.compare_methods(x0=485, y0=332, v=np.array([-1.5,4])*0.2, NX=180, NY=40, direct_off=100, final=True, mask_lim = 0.1)
    unicorn.plotting.savefig(fig, '/tmp/compare_model_galaxy.pdf', dpi=300)

    fig = st.compare_methods(x0=286, y0=408, v=np.array([-1.5,4])*0.08, NX=180, NY=40, direct_off=100, final=True, mask_lim = 0.1)
    unicorn.plotting.savefig(fig, '/tmp/compare_model_galaxy2.pdf', dpi=300)

    fig = st.compare_methods(x0=922, y0=564, v=np.array([-1.5,4])*0.2, NX=180, NY=40, direct_off=100, final=True, mask_lim = 0.15)
    unicorn.plotting.savefig(fig, '/tmp/compare_model_galaxy3.pdf', dpi=300)
Beispiel #9
0
    def plot_mean_image(self):

        cmap = sns.cubehelix_palette(as_cmap=True, reverse=True, light=1, dark=0)
        m = Mosaic(self.inputs.mean_file, self.inputs.mean_file, self.inputs.mask_file, step=1)
        m.plot_overlay(vmin=0, cmap=cmap, fmt="%d")
        m.savefig("mean_func.png")
        m.close()
def plottingDrawdownPeriods(cumReturns,
                            drawDownTable,
                            top,
                            ax,
                            title='Top 5 Drawdown Periods'):
    y_axis_formatter = FuncFormatter(two_dec_places)
    ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
    cumReturns.plot(ax=ax)
    lim = ax.get_ylim()

    tmp = drawDownTable.sort_values(by='draw_down')
    topDrawdown = tmp.groupby('recovery').first()
    topDrawdown = topDrawdown.sort_values(by='draw_down')[:top]
    colors = sns.cubehelix_palette(len(topDrawdown))[::-1]
    for i in range(len(colors)):
        recovery = topDrawdown.index[i]
        ax.fill_between((topDrawdown['peak'][i], recovery),
                        lim[0],
                        lim[1],
                        alpha=.4,
                        color=colors[i])

    ax.set_title(title)
    ax.set_ylabel('Cumulative returns')
    ax.legend(['Cumulative returns'], loc='best')
    ax.set_xlabel('')
    return ax
Beispiel #11
0
    def show_geo_graph(graph):
        node_position = nx.get_node_attributes(graph,'pos')

        # Find node near center (0.5,0.5)
        d_min            = 1
        node_near_center = 0
        for node in node_position:
            x, y = node_position[node]
            distance = (x - 0.5)**2 + (y - 0.5)**2
            if distance < d_min:
                node_near_center = node
                d_min = distance

        # Color by path length from node near center
        color_node        = dict(nx.single_source_shortest_path_length(graph, node_near_center))
        array_color_node  = np.array(list(color_node.values()))

        sns.set_style('darkgrid')
        cmap = sns.cubehelix_palette(start = .5, rot = -.65, dark = .4, light = .6, as_cmap = True)
        plt.figure(figsize = (10, 8))
        nx.draw_networkx_edges(graph, node_position, nodelist=[node_near_center],alpha=0.4)
        nx.draw_networkx_nodes(graph, node_position, nodelist=color_node.keys(),
                               node_size = 80,
                               node_color = array_color_node,
                               cmap = cmap)

        plt.xlim(0,1)
        plt.ylim(0,1)
        plt.axis('off')
        file = str(graph_path) + "/graph.pdf"
        plt.savefig(file, transparent = True)
Beispiel #12
0
def replicated_map_movie(priors,posterior, frames):
    """

    :param priors: list of xidplus.prior classes
    :param posterior: xidplus.posterior class
    :param frames: number of frames
    :return: Movie of replicated maps. Each frame is a sample from the posterior
    """
    figs,fig=plot_map(priors)
    mod_map_array=postmaps.replicated_maps(priors,posterior,frames)
    cmap=sns.cubehelix_palette(8, start=.5, rot=-.75,as_cmap=True)

    def animate(i):
        for b in range(0,len(priors)):
            figs[b]._data[priors[b].sy_pix-np.min(priors[b].sy_pix)-1,priors[b].sx_pix-np.min(priors[b].sx_pix)-1]=mod_map_array[b][:,i]
            figs[b].show_colorscale(vmin=np.min(priors[b].sim),vmax=np.max(priors[b].sim),cmap=cmap)


        return figs
    # call the animator.  blit=True means only re-draw the parts that have changed.
    anim = animation.FuncAnimation(fig, animate,
                               frames=frames, interval=1000)


    # call our new function to display the animation
    return display_animation(anim)
Beispiel #13
0
def plot_map(priors):

    """Plot of the fitted maps, with fitted objects overplotted

    :param priors: list of xidplus.prior classes
    :return: the default xidplus map plot
    """
    sns.set_style("white")

    cmap=sns.cubehelix_palette(8, start=.5, rot=-.75,as_cmap=True)
    hdulists=list(map(lambda prior:postmaps.make_fits_image(prior,prior.sim), priors))
    fig = plt.figure(figsize=(10*len(priors),10))
    figs=[]
    for i in range(0,len(priors)):
        figs.append(aplpy.FITSFigure(hdulists[i][1],figure=fig,subplot=(1,len(priors),i+1)))

    for i in range(0,len(priors)):
        vmin=np.min(priors[i].sim)
        vmax=np.max(priors[i].sim)
        figs[i].show_colorscale(vmin=vmin,vmax=vmax,cmap=cmap)
        figs[i].show_markers(priors[i].sra, priors[i].sdec, edgecolor='black', facecolor='black',
                marker='o', s=20, alpha=0.5)
        figs[i].tick_labels.set_xformat('dd.dd')
        figs[i].tick_labels.set_yformat('dd.dd')
        figs[i].add_colorbar()
        figs[i].colorbar.set_location('top')
    return figs,fig
def plot_drawdown_periods(returns, ax, n_drawdown_periods=10):
    """
    Plots cumulative returns and highlights top drawdown periods.

    :param returns: (pd.Series) daily non-cumulative returns
    :param n_drawdown_periods: (int) number of drawdown periods
    :param ax: (matplotlib.Axes) axes to plot on
    :return: (matplotlib.Axes)
    """
    y_axis_formatter = FuncFormatter(one_dec_places)
    ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
    df_cum_rets = get_cum_returns(returns, starting_value=1.0)
    df_drawdowns = gen_drawdown_table(
        returns, n_drawdown_periods=n_drawdown_periods)

    df_cum_rets.plot(ax=ax)
    lim = ax.get_ylim()
    colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
    objs = df_drawdowns[['peak date', 'recovery date']]
    for i, (peak, recovery) in objs.iterrows():
        if pd.isnull(recovery):
            recovery = returns.index[-1]
        ax.fill_between((peak, recovery), lim[0], lim[1],
                        alpha=.4,
                        color=colors[i])

    ax.set_title('Top %i Drawdown Periods' % n_drawdown_periods)
    ax.set_ylabel('Cumulative returns')
    ax.legend(['Portfolio'], loc='upper left')
    ax.set_xlabel('')
    return ax
Beispiel #15
0
def cmap(data, use_sns=True, robust=True):
    '''Get a default colormap from the given data.

    If the data is boolean, use a black and white colormap.

    If the data has both positive and negative values,
    use a diverging colormap ('coolwarm').

    Otherwise, use a sequential map: either cubehelix or 'OrRd'.

    Parameters
    ----------
    data : np.ndarray
        Input data

    use_sns : bool
        If True, and `seaborn` is installed, use cubehelix maps for
        sequential data

    robust : bool
        If True, discard the top and bottom 2% of data when calculating
        range.

    Returns
    -------
    cmap : matplotlib.colors.Colormap
        - If `data` has dtype=boolean, `cmap` is 'gray_r'
        - If `data` has only positive or only negative values,
          `cmap` is 'OrRd' (`use_sns==False`) or cubehelix
        - If `data` has both positive and negatives, `cmap` is 'coolwarm'

    See Also
    --------
    matplotlib.pyplot.colormaps
    seaborn.cubehelix_palette
    '''

    data = np.atleast_1d(data)

    if data.dtype == 'bool':
        return plt.get_cmap('gray_r')

    data = data[np.isfinite(data)]

    if robust:
        min_p, max_p = 2, 98
    else:
        min_p, max_p = 0, 100

    max_val = np.percentile(data, max_p)
    min_val = np.percentile(data, min_p)

    if min_val >= 0 or max_val <= 0:
        if use_sns and _HAS_SEABORN:
            return sns.cubehelix_palette(light=1.0, as_cmap=True)
        else:
            return plt.get_cmap('OrRd')

    return plt.get_cmap('coolwarm')
Beispiel #16
0
def plot_heatmap(ax):
    ax.hexbin(txx[:, 0], txx[:, 1],
              cmap=sns.cubehelix_palette(as_cmap=True),
              mincnt=1,
              bins='log'
              )
    ax.set_xlabel("tIC 1", fontsize=16)
    ax.set_ylabel("tIC 2", fontsize=16)
def plot_proportions(df, by='relative_height'):
    props = proportion(df, [by, 'width'])
    with sns.color_palette(sns.cubehelix_palette()):
        sns.FacetGrid(props.reset_index(), hue='width').map(plt.plot, by, 'p(around)', marker='o')

    plt.legend(loc='lower right')
    plt.xlabel('obstacle height {} lowest height not afforded'.format('-' if by == 'relative_height' else '/'))
    plt.ylabel('P(around)')
Beispiel #18
0
def plotAnArray(array_obj): 
	import matplotlib.pyplot as plt
	import seaborn as sns
	sns.set_style('whitegrid')
	sns.set_context("paper")
	sns.set_palette(sns.cubehelix_palette(5, start=2, rot=0.45, dark=0.2, light=.8, reverse=True))
	for i,row in enumerate(array_obj[1:,1:]): 
		plt.plot(np.arange(array_obj.shape[1]-1),row,marker='o',label='q = '+str(i))
Beispiel #19
0
def make_JointPlot(plot, region, data, backgrounds) :

    sample_to_plot = []
    if data.name == plot.sample : sample_to_plot.append(data)
    if not len(sample_to_plot) :
        for bk in backgrounds :
            if bk.name == plot.sample : sample_to_plot.append(bk)
    if len(sample_to_plot) == 0 or len(sample_to_plot) > 1 :
        msg('ERROR make_JointPlot received %d samples to plot for plot with name %s'%(len(sample_to_plot), plot.name))
        sys.exit()

    # turn this tree into an array :)
    sample_to_plot = sample_to_plot[0]
    selection_ = '(' + region.tcut + ') * eventweight * ' + str(sample_to_plot.scale_factor)
    tree_array = tree2rec(sample_to_plot.tree, branches=[plot.x_var, plot.y_var],
                            selection=selection_)
    tree_array.dtype.names = (plot.x_var, plot.y_var)
    x_arr = tree_array[plot.x_var]
    y_arr = tree_array[plot.y_var]

    sns.set(style="white")

    # stats?
    stat_func_ = None
    if plot.stat_func == "kendalltau" :
        from scipy.stats import kendalltau
        stat_func_ = kendalltau
    elif plot.stat_func == None :
        from scipy.stats import pearsonr
        stat_func_ = pearsonr

    j_plot_grid = None
    if plot.cmap == None or plot.cmap == "default" :
        j_plot_grid = sns.jointplot(x_arr, y_arr, kind = plot.kind, stat_func=stat_func_, color = plot.color, linewidth = plot.line_width, ylim=[plot.y_range_min,plot.y_range_max], xlim=[plot.x_range_min,plot.x_range_max])
        #j_plot_grid = sns.jointplot(x_arr, y_arr, kind = plot.kind, stat_func=stat_func_, color = plot.color, linewidth = plot.line_width, joint_kws={"n_levels":plot.n_levels, "shade":True}, ylim=[plot.y_range_min,plot.y_range_max], xlim=[plot.x_range_min,plot.x_range_max])

    elif plot.cmap == "cubehelix" :
        cmap_ = sns.cubehelix_palette(as_cmap=True, dark=0, light=1, reverse = True)
        j_plot_grid = sns.jointplot(x_arr, y_arr, kind = plot.kind, stat_func=stat_func_, linewidth = plot.line_width, joint_kws={"cmap":cmap_, "n_levels":plot.n_levels, "shade":True}, ylim=[plot.y_range_min, plot.y_range_max], xlim=[plot.x_range_min,plot.x_range_max])
    elif plot.cmap == "blues" :
        j_plot_grid = sns.jointplot(x_arr, y_arr, kind = plot.kind, stat_func=stat_func_, linewidth = 1.0, joint_kws={"cmap":"Blues", "n_levels":plot.n_levels, "shade":True, "shade_lowest":False}, ylim=[plot.y_range_min, plot.y_range_max], xlim=[plot.x_range_min,plot.x_range_max])
    else :
        msg("cmap attribute of joint plot not yet added")
        sys.exit()

    j_plot_grid.fig.suptitle(plot.title)
    j_plot_grid.fig.subplots_adjust(top=0.935)
    j_plot_grid.set_axis_labels(plot.x_label, plot.y_label)


    # save the plot to file
    outname = plot.name + ".eps"
    j_plot_grid.savefig(outname)
    out = indir + "/plots/" + outdir 
    utils.mv_file_to_dir(outname, out, True)
    fullname = out + "/" + outname
    msg("%s saved to : %s"%(outname, os.path.abspath(fullname)))
def heatmap_plot_zscore_bigneuron(df_zscore_features, df_all, output_dir, title=None):

    print "heatmap plot:bigneuron"

    #taiwan
    metric ='nt_type'
    mtypes = np.unique(df_all[metric])
    print mtypes
    mtypes_pal = sns.color_palette("hls", len(mtypes))

    mtypes_lut = dict(zip(mtypes, mtypes_pal))
    mtypes_colors = df_all[metric].map(mtypes_lut)



    linkage = hierarchy.linkage(df_zscore_features, method='ward', metric='euclidean')

    data = df_zscore_features.transpose()
    row_linkage = hierarchy.linkage(data, method='ward', metric='euclidean')
    feature_order = hierarchy.leaves_list(row_linkage)

    #print data.index
    matchIndex = [data.index[x] for x in feature_order]
    #print matchIndex
    data = data.reindex(matchIndex)

    pl.figure()
    g = sns.clustermap(data, row_cluster = False, col_linkage=linkage, method='ward', metric='euclidean',
                       linewidths = 0.0,col_colors = [mtypes_colors],
                       cmap = sns.cubehelix_palette(light=1, as_cmap=True),figsize=(40,10))

    pl.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
    pl.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
    #g.ax_heatmap.set_xticklabels([])
    pl.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.95)  # !!!!!

    if title:
        pl.title(title)


    location ="best"
    num_cols=1
    # Legend for row and col colors

    for label in mtypes:
         g.ax_row_dendrogram.bar(0, 0, color=mtypes_lut[label], label=label, linewidth=0.0)
         g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0)

    filename = output_dir + '/zscore_feature_heatmap.png'
    pl.savefig(filename, dpi=300)
    #pl.show()
    print("save zscore matrix heatmap figure to :" + filename)
    pl.close()
    print "done clustering and heatmap plotting"
    return linkage
Beispiel #21
0
def draw_graph_heatmap(graph, value_map, output, directed=False, palette=sns.cubehelix_palette(10, start=.5, rot=-.75), position=None):

  # for normalize
  values = value_map.values()

  maxv = max(values)
  minv = min(values)

  if len(values) != len(graph):
    # some graph nodes missing from map.
    # make them 0
    minv = min(minv, 0)

  gt_graph = gt.Graph(directed=directed)

  node_map = {node: gt_graph.add_vertex() for node in graph}

  if not directed:
    seen_edges = set()

  for node, edges in graph.iteritems():
    i = node_map[node]
    for e in edges:
      j = node_map[e]

      if directed:
        gt_graph.add_edge(i, j)
      else:
        if (j, i) not in seen_edges:
          gt_graph.add_edge(i, j)
          seen_edges.add((i, j))

  node_intensity = gt_graph.new_vertex_property("vector<float>")
  node_label = gt_graph.new_vertex_property("string")

  for id, value in value_map.iteritems():
    node = node_map[id]
    node_intensity[node] = find_color(value, maxv, minv, palette)
    node_label[node] = id

  for id in graph:
    if id not in value_map:
      node = node_map[id]
      node_intensity[node] = find_color(0, maxv, minv, palette)
      node_label[node] = id

  if position is None:
    position = gt.sfdp_layout(gt_graph)

  gt.graph_draw(gt_graph, pos=position,
                vertex_text=node_label,
                vertex_fill_color=node_intensity,
                output=output)

  return position
Beispiel #22
0
def gals_in_groups():


    gals = pf.open("groups/G3CGalv07.fits")[1].data   #galaxy group catalog
    #gal = gal[gal['GroupID']!=0]              #keeping galaxies that are in groups  
    
    match = filter('GAMA-MATCHED') 

    mask = np.in1d(match['CATAID'] , gals[gals['GroupID']!=0]['CATAID'])

    zphot = np.loadtxt("zphot_matched.txt")

    match = match[mask]
    zphot = zphot[mask]

    palette = itertools.cycle(sns.color_palette())
    fig, axs = plt.subplots(1, 1, figsize=(6, 6), sharex=True)
    palette = itertools.cycle(sns.cubehelix_palette(1, light=0.6))
    axs.scatter(zphot , match['Z'] , color = next(palette) , s = 0.001)

    axs.set_xlim([0,1.])
    axs.set_ylim([0,1.])

    axs.set_xlabel(r'$zphot$')
    axs.set_ylabel(r'$zspec$')

    fig.tight_layout()
    plt.savefig("/home/vakili/public_html/files/photoz_galaxies_ingroups.png")
    plt.close()


    fig, axs = plt.subplots(1, 1, figsize=(6, 6), sharex=True)
    palette = itertools.cycle(sns.cubehelix_palette(1, light=0.6))
    axs.scatter(zphot , (zphot-match['Z'])/(1. + match['Z']), color = next(palette), s = 0.001)
    axs.set_xlabel("photo-z")
    axs.set_ylabel(r"$\delta z / (1+z)$")
    axs.set_xlim([0,1.])
    axs.set_ylim([-0.1,0.1])
    fig.tight_layout()
    plt.savefig("/home/vakili/public_html/files/dz_galaxies_ingroups.png")

    return None
Beispiel #23
0
def make_plot(data,cols=[11,12,13],label='Face-on',
              output_file='J_profiles_sim_round.pdf'):
	''' Takes the results from ret_results.dat and plots a series of J-factor
		and D-factor profiles for each model '''
	f,a=plt.subplots(2,1,figsize=[3.32,4.])
	plt.subplots_adjust(hspace=0.)
	cm = sns.cubehelix_palette(8,start=.5,rot=-.75,as_cmap=True)
	cNorm = colors.Normalize(vmin=0.4,vmax=2.5)
	sM = cmx.ScalarMappable(norm=cNorm,cmap=cm)
	## 1. For each model in ret_results.dat we plot a profile of the J-factor
	##    against beam angle. ret_results.dat contains the central density,
	##	  scale-radii and tidal radii for each model
	tmax = np.logspace(-2.,np.log10(0.5),20)  ## angle grid
	for d in data[1:]:
		Jvals_r = np.zeros(len(tmax))
		Dvals_r = np.zeros(len(tmax))
		for n,i in enumerate(tmax):
			if(d[0]>1.): ## Prolate case
				ba,ca = 1./d[0],1./d[0]
				print ba,ca,i
				model = cJ.AlphaBetaGammaDensityProfile(np.array([alpha,beta,gamma]),d[cols[0]],d[cols[1]],d[cols[2]],np.array([1.,ba,ca]),True)
				Jvals_r[n] = np.log10(model.J_far_factor(Distance,i,"x")/sJ.GEV2cm5toMsol2kpc5)
				Dvals_r[n] = np.log10(model.D_far_factor(Distance,i,"x")/sJ.GEVcm2toMsolkpc2)
			else: ## Oblate case
				ba,ca = 1.,d[0]
				print ba,ca,i
				model = cJ.AlphaBetaGammaDensityProfile(np.array([alpha,beta,gamma]),d[cols[0]],d[cols[1]],d[cols[2]],np.array([1.,ba,ca]),True)
				Jvals_r[n] = np.log10(model.J_far_factor(Distance,i,"z")/sJ.GEV2cm5toMsol2kpc5)
				Dvals_r[n] = np.log10(model.D_far_factor(Distance,i,"z")/sJ.GEVcm2toMsolkpc2)
		l,=a[0].plot(tmax,Jvals_r,color=sM.to_rgba(d[0]))
		l2,=a[1].plot(tmax,Dvals_r,color=sM.to_rgba(d[0]))
		if(d[0]==1.):
			l.set_dashes((2,1))
			l2.set_dashes((2,1))
	## 2. Also plot the spherical formulae from Paper I
	l,=a[0].plot(tmax,sJ.wyns_formulaJ_NFW_data(Velocity_dispersion,rh*1000.*gf
	             ,Distance,tmax,2.*rh*gf,walker_or_wolf="walker"),
				ls='dashed',color='k')
	l.set_dashes((4,1))
	l,=a[1].plot(tmax,sJ.wyns_formulaD_NFW_data(Velocity_dispersion,rh*1000.*gf,Distance,tmax,2.*rh*gf,walker_or_wolf="walker"),ls='dashed',color='k')
	l.set_dashes((4,1))
	## 3. Add the colorbar
	divider = make_axes_locatable(a[0])
	cba = divider.append_axes("top",size="5%",pad=0.)
	cbl =matplotlib.colorbar.ColorbarBase(cba,cmap=cm,norm=cNorm,orientation='horizontal')
	cbl.set_label(r'$q$',labelpad=-30.4)
	cbl.ax.xaxis.set_ticks_position('top')
	a[0].yaxis.get_major_ticks()[0].label1.set_visible(False)
	a[0].set_xticklabels([])
	a[0].set_ylabel(r'$\log_{10}(\mathrm{J}(\theta)/\,\mathrm{GeV^2\,cm}^{-5})$')
	a[1].set_xlabel(r'$\theta/\,\mathrm{deg}$')
	a[1].set_ylabel(r'$\log_{10}(\mathrm{D}(\theta)/\,\mathrm{GeV\,cm}^{-2})$')
	a[0].text(0.9,0.1,label,horizontalalignment='right',verticalalignment='bottom',transform=a[0].transAxes,fontsize=14)
	plt.savefig(output_file,bbox_inches='tight')
Beispiel #24
0
def plot_cluster_centers(ax):
    ax.hexbin(txx[:, 0], txx[:, 1],
              cmap=sns.cubehelix_palette(as_cmap=True),
              mincnt=1,
              bins='log',
              )
    ax.scatter(kmeans.cluster_centers_[:, 0],
               kmeans.cluster_centers_[:, 1],
               s=40, c=colors[0],
               )
    ax.set_xlabel("tIC 1", fontsize=16)
    ax.set_ylabel("tIC 2", fontsize=16)
def test_cchannelchain(Simulator, plt, rng, seed, outfile):
    dims = 2
    layers = 5
    n_neurons = 100
    synapse = nengo.Lowpass(0.01)

    with nengo.Network(seed=seed) as model:
        value = nengo.dists.UniformHypersphere().sample(
            dims, 1, rng=rng)[:, 0]
        stim = nengo.Node(value)

        ens = [nengo.Ensemble(n_neurons, dimensions=dims)
               for _ in range(layers)]

        nengo.Connection(stim, ens[0])
        for i in range(layers - 1):
            nengo.Connection(ens[i], ens[i+1], synapse=synapse)

        p_input = nengo.Probe(stim)
        p_outputs = [nengo.Probe(ens[i], synapse=synapse)
                     for i in range(layers)]

    sim = Simulator(model)
    sim.run(0.5)

    if type(plt).__name__ != 'Mock':
        figsize = (onecolumn, 4.0) if horizontal else (onecolumn * 2, 4.0)
        setup(figsize=figsize)
        colors = sns.cubehelix_palette(5)
        lines = []
        for i, p_output in enumerate(p_outputs):
            l = plt.plot(sim.trange(), sim.data[p_output],
                         c=colors[i % len(colors)])
            lines.append(l[0])
        plt.legend(lines, ["Ensemble %d" % i for i in range(1, 6)],
                   loc='best')
        plt.plot(sim.trange(), sim.data[p_input], c='k', lw=1)
        plt.xlim(right=0.12)
        plt.yticks((-0.5, 0, 0.5))
        plt.xticks((0, 0.05, 0.1))
        plt.ylabel('Decoded output')
        plt.xlabel('Time (s)')
        sns.despine()
        plt.saveas = 'results-1.svg'

    outfile.write('"n_neurons": %d,\n' % sum(
        e.n_neurons for e in model.all_ensembles))
    outfile.write('"simtime": 0.5,\n')
    outfile.write('"rmse": %f,\n' % (
        rmse(sim.data[p_outputs[-1]][sim.trange() > 0.4], value)))

    if hasattr(sim, 'close'):
        sim.close()
Beispiel #26
0
def example(kinectDict,startsDict,txtnames,_plot=True): 
	import seaborn as sns 
	sns.set_style('whitegrid')
	sns.set_context("paper")
	sns.set_palette(sns.cubehelix_palette(3, start=2, rot=0.45, dark=0.2, light=.8, reverse=True))
	#types,states,counts = [1,3,8,1,3,8],[2,2,2,4,4,4],[1,1,1,2,2,2]
	types,states,counts = [1,7,9,1,7,9],[2,2,2,4,4,4],[1,1,1,1,1,1]
	#types,states,counts = [1,6,9,1,6,9],[1,1,1,2,2,2],[1,1,1,1,1,1]
	#types,states,counts = [1,1,1,1,1],[1,3,5,2,1],[1,3,3,3,3]
	#types,states,counts = [1,1],[1,2],[1,4]
	pcts,tasks,evolution=testRun(kinectDict,startsDict,txtnames,types,states,counts,_plot=_plot)
	return pcts,tasks
	plt.show()
def plot_targetvec_xy(data, graphfile):
    try:
        cmap = sb.cubehelix_palette(as_cmap=True, reverse=True)
        if (data['Target_X'].min() == data['Target_X'].max() or data['Target_Y'].min() == data['Target_Y'].max()):
            print "There is no variation in Target vector - not plotting"
            return -1
        axes = pyplot.quiver(data['Pos_X'], data['Pos_Y'], data['Target_X'], data['Target_Y'], data['Timestamp'], cmap=cmap)
        figure = axes.get_figure()
        figure.suptitle("Target Vector in 2D")
        figure.savefig(graphfile, dpi=200)
        pyplot.close(figure)
    except Exception as e:
        print "Unable to plot data [in plot_targetvec_xy]"
        print e
Beispiel #28
0
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs):
    """
    Plots cumulative returns highlighting top drawdown periods.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    top : int, optional
        Amount of top drawdowns periods to plot (default 10).
    ax : matplotlib.Axes, optional
        Axes upon which to plot.
    **kwargs, optional
        Passed to plotting function.

    Returns
    -------
    ax : matplotlib.Axes
        The axes that were plotted on.
    """

    if ax is None:
        ax = plt.gca()

    y_axis_formatter = FuncFormatter(utils.one_dec_places)
    ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))

    df_cum_rets = timeseries.cum_returns(returns, starting_value=1.0)
    df_drawdowns = timeseries.gen_drawdown_table(returns, top=top)

    df_cum_rets.plot(ax=ax, **kwargs)

    lim = ax.get_ylim()
    colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
    for i, (peak, recovery) in df_drawdowns[
            ['peak date', 'recovery date']].iterrows():
        if pd.isnull(recovery):
            recovery = returns.index[-1]
        ax.fill_between((peak, recovery),
                        lim[0],
                        lim[1],
                        alpha=.4,
                        color=colors[i])

    ax.set_title('Top %i Drawdown Periods' % top)
    ax.set_ylabel('Cumulative returns')
    ax.legend(['Portfolio'], 'upper left')
    ax.set_xlabel('')
    return ax
Beispiel #29
0
def v_plot(h_in, d_in, s_in):
    fig = plt.figure(1)

    h_in = h_in.reshape(ny, nx, order="C")
    cmap = sns.cubehelix_palette(8, as_cmap=True, dark=0.3)
    plt.pcolormesh(h_in.reshape(ny, nx, order="C"), cmap=cmap)
    plt.colorbar()
    # plt.contourf(h_in.reshape(ny, nx, order='C'), cmap=cmap)
    s_in = s_in / s_in.max()
    U = np.zeros(nn)
    V = np.zeros(nn)
    for ij in range(nn):
        if d_in[ij] == 0:
            U[ij] = -1
            V[ij] = -1
        if d_in[ij] == 1:
            U[ij] = 0
            V[ij] = -1
        if d_in[ij] == 2:
            U[ij] = 1
            V[ij] = -1
        if d_in[ij] == 3:
            U[ij] = -1
            V[ij] = 0
        if d_in[ij] == 4:
            U[ij] = 0
            V[ij] = 0
        if d_in[ij] == 5:
            U[ij] = 1
            V[ij] = 0
        if d_in[ij] == 6:
            U[ij] = -1
            V[ij] = 1
        if d_in[ij] == 7:
            U[ij] = 0
            V[ij] = 1
        if d_in[ij] == 8:
            U[ij] = 1
            V[ij] = 1

    qx = np.arange(nx) * dx + dx / 2.0
    qy = np.arange(ny) * dy + dy / 2.0
    qU = (dx * U).reshape(ny, nx)
    qV = (dy * V).reshape(ny, nx)
    Q = plt.quiver(qx, qy, qU, qV, scale=max([xl, yl]) * 2.0)
    plt.axis("equal")
    plt.xlim([0, nx])
    plt.ylim([0, ny])
    plt.show()
    def BB_vs_Sidechain():
        # Make bins for BB RMSDs
        number_of_bins = 5
        bin_size = len(bb_vs_sidechain_df['WT-Mutant Backbone RMSD']) / number_of_bins + 1

        # Assign arbitrary bin identifiers for BB Group
        for index, row in bb_vs_sidechain_df.iterrows():
            bb_vs_sidechain_df.loc[index, 'BB Group'] = ((index + 1) // bin_size)
        # Find bin boundaries for BB group and add to dict
        bin_rename_dict = {}
        for name, group in bb_vs_sidechain_df.groupby('BB Group'):
            bin_rename_dict[name] = '%s -\n%s' % (group['WT-Mutant Backbone RMSD'].iloc[0], group['WT-Mutant Backbone RMSD'].iloc[len(group) - 1])
        # Rename bin identifiers to bin boundary values in BB group
        for index, row in bb_vs_sidechain_df.iterrows():
            bb_vs_sidechain_df.loc[index, 'BB Group'] = bin_rename_dict[bb_vs_sidechain_df.loc[index, 'BB Group']]

        # Assign bin identifiers for DDG Group
        for DDG_type in ['Experimental DDG', 'Predicted DDG']:
            for index, row in bb_vs_sidechain_df.iterrows():
                if row[DDG_type] > 2.5 or row[DDG_type] < -2.5:
                    bb_vs_sidechain_df.loc[index, DDG_type + ' Group'] = 'Extra Large DDG (DGG > 2.5 REU or DDG < -2.5 REU)'
                elif row[DDG_type] > 1 or row[DDG_type] < -1:
                    bb_vs_sidechain_df.loc[index, DDG_type + ' Group'] = 'Large DDG (2.5 REU > DGG > 1 REU or -2.5 < DDG < -1 REU)'
                elif row[DDG_type] > 0.5 or row[DDG_type] < -0.5:
                    bb_vs_sidechain_df.loc[index, DDG_type + ' Group'] = 'Medium DDG (1 REU > DGG > 0.5 REU or -1 < DDG < -0.5 REU)'
                else:
                    bb_vs_sidechain_df.loc[index, DDG_type + ' Group'] = 'Small DDG (0.5 REU > DDG > -0.5 REU)'

            sns.set_style('white', {'axes.grid': True, 'axes.edgecolor': '0'})
            sns.set_context('paper', font_scale=1.5, rc={'lines.linewidth': 1})

            fig, ax = plt.subplots(figsize=(20, 10))
            fig.suptitle('WT PDB - Mutant PDB Neighborhood Backbone RMSD vs. \nMutant PDB - RosettaOut Point Mutant Residues All-Atom RMSD', fontsize = 24, y=1.0)
            with sns.cubehelix_palette(number_of_bins, start=0.5, rot=-.75):
                sns.boxplot(x=bb_vs_sidechain_df['BB Group'],
                            y=bb_vs_sidechain_df['Point Mutant RMSD'],
                            ax=ax
                            )
            with sns.color_palette("husl", number_of_bins):
                sns.stripplot(x='BB Group',
                              y='Point Mutant RMSD',
                              hue= DDG_type + ' Group',
                              data=bb_vs_sidechain_df,
                              jitter=True,
                              ax=ax
                              )

            ax.set(xlabel='WT PDB - Mutant PDB Neighborhood Backbone RMSD', ylabel='Mutant PDB - RosettaOut Point Mutant Residues All-Atom RMSD')
            output_pdf.savefig(fig, pad_inches=1, bbox_inches='tight')
from solcore import si
from structure import Interface, BulkLayer, Structure
from matrix_formalism.process_structure import process_structure
from matrix_formalism.multiply_matrices import calculate_RAT
from angles import theta_summary
from textures.standard_rt_textures import regular_pyramids, V_grooves
from solcore.material_system import create_new_material
import matplotlib.pyplot as plt
from angles import make_angle_vector
from config import results_path
from sparse import load_npz

import seaborn as sns
from cycler import cycler
#create_new_material('Si_OPTOS', 'data/Si_OPTOS_n.txt', 'data/Si_OPTOS_k.txt')
pal = sns.cubehelix_palette()

cols = cycler('color', pal)

params = {
    'legend.fontsize': 'small',
    'axes.labelsize': 'small',
    'axes.titlesize': 'small',
    'xtick.labelsize': 'small',
    'ytick.labelsize': 'small',
    'axes.prop_cycle': cols
}

plt.rcParams.update(params)
angle_degrees_in = 8
Beispiel #32
0
from pandas import read_excel
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

eafv_df = read_excel('EAFVdata_31404626.xls', sheet_name='data', header=0, 
              index_col=0, squeeze=True, parse_dates=True)


eafv_df_ses = pd.DataFrame(eafv_df)
eafv_df_ses.reset_index(inplace=True)
eafv_df_ses['year'] = [d.year for d in eafv_df_ses.Date]
eafv_df_ses['month'] = [d.strftime('%b') for d in eafv_df_ses.Date]
years = eafv_df_ses['year'].unique()

sns.cubehelix_palette(len(years))

mycolors=sns.cubehelix_palette(len(years))
for i, y in enumerate(years):
    if i > 0:   
        plt.xlabel('Date')
        plt.title('eafv Seasonal Data')
        plt.plot('month', 'Yt', data=eafv_df_ses.loc[eafv_df_ses.year==y, :], color=mycolors[i], label=y)
        plt.text(eafv_df_ses.loc[eafv_df_ses.year==y, :].shape[0]-.9, 
                 eafv_df_ses.loc[eafv_df_ses.year==y, 'Yt'][-1:].values[0], 
                 y, fontsize=12, color=mycolors[i])
plt.show()
Beispiel #33
0
def mouse_per_scan_oracle(animal_id, size):

    key = dict(animal_id=animal_id, **SETTINGS)
    sz = tuple(i * size_factor[size] for i in [.7, .7])
    with sns.plotting_context('talk' if size == 'huge' else 'paper',
                              font_scale=1.3):
        with sns.axes_style(style="white", rc={"axes.facecolor":
                                               (0, 0, 0, 0)}):
            df = pd.DataFrame(
                (tune.MovieOracle.Total()
                 & key).fetch(order_by='session ASC, scan_idx ASC'))
            df['scan'] = [
                '{}-{}-{}'.format(ai, s, sa)
                for ai, s, sa in zip(df.animal_id, df.session, df.scan_idx)
            ]

            # Initialize the FacetGrid object
            N = len(dj.U('session', 'scan_idx') & (tune.MovieOracle() & key))

            pal = sns.cubehelix_palette(N, rot=-.25, light=.7)
            g = sns.FacetGrid(df, row="scan", hue="scan", palette=pal)

            # Draw the densities in a few steps
            g.map(sns.kdeplot,
                  "pearson",
                  clip_on=False,
                  shade=True,
                  alpha=1,
                  lw=1.5,
                  bw=.01)
            g.map(sns.kdeplot,
                  "pearson",
                  clip_on=False,
                  color="w",
                  lw=2,
                  bw=.01)
            g.map(plt.axhline, y=0, lw=2, clip_on=False)

            # Define and use a simple function to label the plot in axes coordinates
            def label(x, color, label):
                ax = plt.gca()
                low, high = ax.get_xlim()
                low -= .02
                ax.set_xlim((low, high))
                ax.text(0,
                        .2,
                        label,
                        fontweight="bold",
                        color=color,
                        ha="left",
                        va="center",
                        transform=ax.transAxes)

            g.map(label, "scan")

            # Set the subplots to overlap
            g.fig.subplots_adjust(bottom=.1, hspace=-0.25, top=.9)

            g.fig.set_size_inches(sz)

            # Remove axes details that don't play will with overlap
            g.set_titles("")
            g.fig.suptitle("Movie Oracle Correlations")
            g.set(yticks=[])
            g.despine(bottom=True, left=True)
            g.axes.ravel()[-1].set_xlabel('Pearson Correlation')

    return savefig(g.fig)
Beispiel #34
0
def mouse_per_stack_oracle(animal_id, size):

    key = dict(animal_id=animal_id, **SETTINGS)
    sz = tuple(i * size_factor[size] for i in [.7, .7])
    with sns.plotting_context('talk' if size == 'huge' else 'paper',
                              font_scale=1.3):
        with sns.axes_style(style="white", rc={"axes.facecolor":
                                               (0, 0, 0, 0)}):
            rel = (stack.StackSet.Unit()).aggr(stack.StackSet.Match().proj(
                'munit_id', session='scan_session') * tune.MovieOracle.Total()
                                               & key,
                                               pearson='MAX(pearson)')
            df = pd.DataFrame(
                rel.fetch(order_by='stack_session ASC, stack_idx ASC'))
            df['stack'] = [
                '{}-{}-{}'.format(ai, s, sa) for ai, s, sa in zip(
                    df.animal_id, df.stack_session, df.stack_idx)
            ]

            # Initialize the FacetGrid object
            N = len(dj.U('stack_session', 'stack_idx') & rel)

            pal = sns.cubehelix_palette(N, rot=-.25, light=.7)

            if N > 1:
                g = sns.FacetGrid(df,
                                  row="stack",
                                  hue='stack',
                                  aspect=15,
                                  size=.5,
                                  palette=pal)

                # Draw the densities in a few steps
                g.map(sns.kdeplot,
                      "pearson",
                      clip_on=False,
                      shade=True,
                      alpha=1,
                      lw=1.5,
                      bw=.01)
                g.map(sns.kdeplot,
                      "pearson",
                      clip_on=False,
                      color="w",
                      lw=2,
                      bw=.01)
                g.map(plt.axhline, y=0, lw=2, clip_on=False)

                # Define and use a simple function to label the plot in axes coordinates
                def label(x, color, label):
                    ax = plt.gca()
                    low, high = ax.get_xlim()
                    low -= .02
                    ax.set_xlim((low, high))
                    ax.text(0,
                            .2,
                            label,
                            fontweight="bold",
                            color=color,
                            ha="left",
                            va="center",
                            transform=ax.transAxes)

                g.map(label, "stack")

                # Set the subplots to overlap
                g.fig.subplots_adjust(hspace=-.25, top=.9)

                g.fig.set_size_inches(sz)

                # Remove axes details that don't play will with overlap
                g.set_titles("")
                g.fig.suptitle("Movie Oracle Correlations")
                g.set(yticks=[])
                g.despine(bottom=True, left=True)
                g.axes.ravel()[-1].set_xlabel('Pearson Correlation')
                return savefig(g.fig)
            else:
                with sns.axes_style('ticks',
                                    rc={"axes.facecolor": (0, 0, 0, 0)}):
                    fig, ax = plt.subplots()

                # Draw the densities in a few steps
                sns.kdeplot(df.pearson,
                            shade=True,
                            alpha=1,
                            lw=1.5,
                            bw=.01,
                            label='n={} neurons'.format(len(df)))
                # Set the subplots to overlap
                fig.set_size_inches(sz)

                # Remove axes details that don't play will with overlap
                fig.suptitle("Movie Oracle Correlations Stack " +
                             np.unique(df['stack']).item())
                ax.set(yticks=[])
                low, high = ax.get_ylim()
                ax.set_ylim((0, high))
                sns.despine(fig=fig, trim=True, left=True)
                ax.set_xlabel('Pearson Correlation')
                ax.spines['bottom'].set_linewidth(1)
                ax.tick_params(axis='both', length=3, width=1)

                return savefig(fig)
Beispiel #35
0
    stock = stock.dropna() # get rid of all the NAN rows.
    from sklearn.preprocessing import MinMaxScaler
    scaler=MinMaxScaler()
    stock["share_price_scaled"]=scaler.fit_transform(stock["share_price"].to_frame())
    all_data=all_data.append(stock) #append data to one matrix
    
#all_data.head()


brent[['date','oil_price']].set_index('date').plot(color="green", linewidth=1.0)


#==============================================================================
# Pairplot using master data table (all_data) with a filter on BP share price
#==============================================================================
palette=sns.cubehelix_palette(18, start=2, rot=0, dark=0, light=.95, reverse=False)
sns.pairplot(all_data[all_data['name']=="BP.L"].drop(["share_price_scaled"],axis=1),
             hue="year",palette=palette,size=4,markers="o",
             plot_kws=dict(s=50, edgecolor="b", linewidth=0))


#==============================================================================
# Pairplot on less data 2013 to 2017 using Royal Dutch Shell (LON) stock price
#==============================================================================

# Just for the last 5 years
all_data13=all_data[all_data["year"]>2012]
palette=sns.cubehelix_palette(5, start=2, rot=0, dark=0, light=.95, reverse=False)
sns.pairplot(all_data13[all_data13['name']=="RDSB.L"].drop(["share_price_scaled"],axis=1),
             hue="year",palette=palette,size=4,markers="o",
             plot_kws=dict(s=50, edgecolor="b", linewidth=0))
Beispiel #36
0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import get_data

answers = get_data.get_survey_Data()

# Plot 1: start/end trip
path = 'plots/changes_groups.png'
od_data = answers.groupby(['localidadOrigin', 'localidadDestination'],
                          as_index=False)
od_data = od_data['timestamp'].count()
cmap = sns.cubehelix_palette(dark=.3, light=.8, as_cmap=True)
ax = sns.scatterplot('localidadDestination',
                     'localidadOrigin',
                     data=od_data,
                     size='timestamp',
                     sizes=(1, 300),
                     cmap=cmap,
                     legend=False)
# ax.set(xlabel='Origen del viaje', ylabel='Destino', title='', fontsize=30)
ax.set_ylabel('Localidad de origen', fontsize=6)
ax.set_xlabel('Localidad de destino', fontsize=6)
ax.tick_params(labelsize=5)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
plt.tight_layout()
plt.show()
plt.savefig(path)

plt.close()
Beispiel #37
0
plt.title('Distribution of Taxi Trips by Tip - Monday', fontsize=15)
plt.savefig('./tip_weekday/tip_mon.png', dpi=300, bbox_inches='tight')

#### Tuesday
sns.set(context='notebook',
        style='darkgrid',
        font='sans-serif',
        font_scale=1,
        color_codes=False,
        rc=None)
plot = sns.barplot(x="tip_range",
                   y="Total Number of Trips",
                   data=tip1,
                   palette=sns.cubehelix_palette(9,
                                                 start=10,
                                                 rot=0,
                                                 dark=0.25,
                                                 light=.95,
                                                 reverse=True))
plot.set_xlabel('Range of Tip Amount (in dollar)')
plot.set_ylabel('Total Number of Trips (in thousands)')
plot.set_yticklabels([
    '0', '1,000', '2,000', '3,000', '4,000', '5,000', '6,000', '7,000', '8,000'
])
plt.title('Distribution of Taxi Trips by Tip - Tuesday', fontsize=15)
plt.savefig('./tip_weekday/tip_tue.png', dpi=300, bbox_inches='tight')

#### Wednesdaay
sns.set(context='notebook',
        style='darkgrid',
        font='sans-serif',
        font_scale=1,
Beispiel #38
0
def plot_error_map(backend, figsize=(12, 9), show_title=True):
    """Plots the error map of a given backend.

    Args:
        backend (IBMQBackend): Given backend.
        figsize (tuple): Figure size in inches.
        show_title (bool): Show the title or not.

    Returns:
        Figure: A matplotlib figure showing error map.

    Raises:
        VisualizationError: Input is not IBMQ backend.

    Example:
        .. jupyter-execute::
            :hide-code:
            :hide-output:

            from qiskit.test.ibmq_mock import mock_get_backend
            mock_get_backend('FakeVigo')

        .. jupyter-execute::

            from qiskit import QuantumCircuit, execute, IBMQ
            from qiskit.visualization import plot_error_map
            %matplotlib inline

            IBMQ.load_account()
            provider = IBMQ.get_provider(hub='ibm-q')
            backend = provider.get_backend('ibmq_vigo')
            plot_error_map(backend)
    """
    color_map = sns.cubehelix_palette(reverse=True, as_cmap=True)

    props = backend.properties().to_dict()
    config = backend.configuration().to_dict()

    n_qubits = config['n_qubits']

    # U2 error rates
    single_gate_errors = [0] * n_qubits
    for gate in props['gates']:
        if gate['gate'] == 'u2':
            _qubit = gate['qubits'][0]
            single_gate_errors[_qubit] = gate['parameters'][0]['value']

    # Convert to percent
    single_gate_errors = 100 * np.asarray(single_gate_errors)
    avg_1q_err = np.mean(single_gate_errors)

    single_norm = matplotlib.colors.Normalize(vmin=min(single_gate_errors),
                                              vmax=max(single_gate_errors))
    q_colors = [color_map(single_norm(err)) for err in single_gate_errors]

    cmap = config['coupling_map']

    directed = False
    if n_qubits < 20:
        for edge in cmap:
            if not [edge[1], edge[0]] in cmap:
                directed = True
                break

    cx_errors = []
    for line in cmap:
        for item in props['gates']:
            if item['qubits'] == line:
                cx_errors.append(item['parameters'][0]['value'])
                break
        else:
            continue

    # Convert to percent
    cx_errors = 100 * np.asarray(cx_errors)
    avg_cx_err = np.mean(cx_errors)

    cx_norm = matplotlib.colors.Normalize(vmin=min(cx_errors),
                                          vmax=max(cx_errors))
    line_colors = [color_map(cx_norm(err)) for err in cx_errors]

    # Measurement errors

    read_err = []

    for qubit in range(n_qubits):
        for item in props['qubits'][qubit]:
            if item['name'] == 'readout_error':
                read_err.append(item['value'])

    read_err = 100 * np.asarray(read_err)
    avg_read_err = np.mean(read_err)
    max_read_err = np.max(read_err)

    fig = plt.figure(figsize=figsize)
    gridspec.GridSpec(nrows=2, ncols=3)

    grid_spec = gridspec.GridSpec(
        12,
        12,
        height_ratios=[1] * 11 + [0.5],
        width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2])

    left_ax = plt.subplot(grid_spec[2:10, :1])
    main_ax = plt.subplot(grid_spec[:11, 1:11])
    right_ax = plt.subplot(grid_spec[2:10, 11:])
    bleft_ax = plt.subplot(grid_spec[-1, :5])
    bright_ax = plt.subplot(grid_spec[-1, 7:])

    plot_gate_map(backend,
                  qubit_color=q_colors,
                  line_color=line_colors,
                  qubit_size=28,
                  line_width=5,
                  plot_directed=directed,
                  ax=main_ax)
    main_ax.axis('off')
    main_ax.set_aspect(1)

    single_cb = matplotlib.colorbar.ColorbarBase(bleft_ax,
                                                 cmap=color_map,
                                                 norm=single_norm,
                                                 orientation='horizontal')
    tick_locator = ticker.MaxNLocator(nbins=5)
    single_cb.locator = tick_locator
    single_cb.update_ticks()
    single_cb.update_ticks()
    bleft_ax.set_title('H error rate (%) [Avg. = {}]'.format(
        round(avg_1q_err, 3)))

    cx_cb = matplotlib.colorbar.ColorbarBase(bright_ax,
                                             cmap=color_map,
                                             norm=cx_norm,
                                             orientation='horizontal')
    tick_locator = ticker.MaxNLocator(nbins=5)
    cx_cb.locator = tick_locator
    cx_cb.update_ticks()
    bright_ax.set_title('CNOT error rate (%) [Avg. = {}]'.format(
        round(avg_cx_err, 3)))

    if n_qubits < 10:
        num_left = n_qubits
        num_right = 0
    else:
        num_left = math.ceil(n_qubits / 2)
        num_right = n_qubits - num_left

    left_ax.barh(range(num_left),
                 read_err[:num_left],
                 align='center',
                 color='#DDBBBA')
    left_ax.axvline(avg_read_err, linestyle='--', color='#212121')
    left_ax.set_yticks(range(num_left))
    left_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
    left_ax.set_yticklabels([str(kk) for kk in range(num_left)], fontsize=12)
    left_ax.invert_yaxis()
    left_ax.set_title('Readout Error (%)', fontsize=12)

    for spine in left_ax.spines.values():
        spine.set_visible(False)

    if num_right:
        right_ax.barh(range(num_left, n_qubits),
                      read_err[num_left:],
                      align='center',
                      color='#DDBBBA')
        right_ax.axvline(avg_read_err, linestyle='--', color='#212121')
        right_ax.set_yticks(range(num_left, n_qubits))
        right_ax.set_xticks(
            [0, round(avg_read_err, 2),
             round(max_read_err, 2)])
        right_ax.set_yticklabels([str(kk) for kk in range(num_left, n_qubits)],
                                 fontsize=12)
        right_ax.invert_yaxis()
        right_ax.invert_xaxis()
        right_ax.yaxis.set_label_position("right")
        right_ax.yaxis.tick_right()
        right_ax.set_title('Readout Error (%)', fontsize=12)
    else:
        right_ax.axis('off')

    for spine in right_ax.spines.values():
        spine.set_visible(False)

    if show_title:
        fig.suptitle('{name} Error Map'.format(name=backend.name()),
                     fontsize=24,
                     y=0.9)
    if get_backend() in ['module://ipykernel.pylab.backend_inline', 'nbAgg']:
        plt.close(fig)
    return fig
Beispiel #39
0
    def create_ridgeplot(cls, var_name, df: pd.DataFrame, outfile: str,
                         labels: str) -> None:

        logging.info(
            "[CMIP6_ridgeplot] Starting ridgeplot creation for {}".format(
                var_name))

        sns.set(style="white",
                rc={"axes.facecolor": (0, 0, 0, 0)},
                font_scale=1.8)
        pal = sns.cubehelix_palette(12, rot=0.3, light=0.7)

        # Color combinations
        # https://digitalsynopsis.com/design/minimal-web-color-palettes-combination-hex-code/
        color1 = "#FAA50A"
        color2 = "#FA5A0A"
        color3 = "#DC2B50"

        g1 = sns.FacetGrid(df,
                           row="month",
                           hue="month",
                           aspect=15,
                           height=0.75,
                           palette=pal)

        # Draw the densities in a few steps
        g1.map(
            sns.kdeplot,
            var_name,
            bw_adjust=0.5,
            clip_on=False,
            fill=True,
            alpha=0.7,
            linewidth=1.0,
            color=color1,
        )
        g1.map(sns.kdeplot,
               var_name,
               clip_on=False,
               color=color1,
               lw=1,
               bw_adjust=0.5)
        g1.map(
            sns.kdeplot,
            "Proj",
            bw_adjust=0.5,
            clip_on=False,
            fill=True,
            alpha=0.45,
            linewidth=1.0,
            color=color2,
        )
        g1.map(sns.kdeplot,
               "Proj",
               clip_on=False,
               color=color2,
               lw=1.0,
               bw_adjust=0.5)
        g1.map(
            sns.kdeplot,
            "Proj2",
            bw_adjust=0.5,
            clip_on=False,
            fill=True,
            alpha=0.45,
            linewidth=1.0,
            color=color3,
        )
        g1.map(sns.kdeplot,
               "Proj2",
               clip_on=False,
               color=color3,
               lw=1.0,
               bw_adjust=0.5)

        g1.map(plt.axhline, y=0, lw=1, clip_on=False)

        # Define and use a simple function to label the plot in axes coordinates
        g1.map(cls.label, var_name)
        # Set the subplots to overlap
        g1.fig.subplots_adjust(hspace=-0.25)

        # Remove axes details that don't play well with overlap
        g1.set_titles("")
        g1.set(yticks=[])

        g1.despine(bottom=True, left=True)
        for ax in g1.axes.ravel():
            if ax.is_first_row():
                ax.legend(
                    labels=[labels[0], labels[1], labels[2]],
                    facecolor="white",
                    framealpha=1,
                    loc="upper right",
                )

        if not os.path.exists(os.path.dirname(outfile)):
            os.makedirs(os.path.dirname(outfile))
        if os.path.exists(outfile):
            os.remove(outfile)
        print("[CMIP6_plot] Created plot {}".format(outfile))
        plt.savefig(outfile, dpi=300)

        plt.show()
Beispiel #40
0
           whis=5,
           color=sb.color_palette("Blues")[5],
           ax=axes[0, 2])
sb.boxplot(temp_df["Claims"],
           color=sb.color_palette("BuGn_r")[4],
           ax=axes[1, 0])
sb.boxplot(temp_df["Motor"],
           color=sb.color_palette("BuGn_r")[3],
           ax=axes[1, 1])
sb.boxplot(temp_df["Household"],
           whis=7,
           color=sb.color_palette("BuGn_r")[0],
           ax=axes[1, 2])
sb.boxplot(temp_df["Health"],
           whis=2.5,
           color=sb.cubehelix_palette(8)[2],
           ax=axes[2, 0])
sb.boxplot(temp_df["Life"],
           whis=7.5,
           color=sb.cubehelix_palette(8)[4],
           ax=axes[2, 1])
sb.boxplot(temp_df["Work_Compensations"],
           whis=7,
           color=sb.cubehelix_palette(8)[6],
           ax=axes[2, 2])

# 7.1.2 Histogram visualization
f, axes = plt.subplots(3, 3, figsize=(7, 7))
plt.subplots_adjust(wspace=0.3, hspace=0.3)
sb.distplot(temp_df["First_Policy"],
            color=sb.color_palette("Blues")[1],
Beispiel #41
0
def plot_results(results, hsids, metrics, datasets, methods, n_runs, respath):
    """

    :param results:
    :param metrics:
    :param datasets:
    :param methods:
    :param n_runs:
    :param respath:
    :return:
    """
    sns.set_style('darkgrid')
    sns.set_context("talk")

    m_sad = {dataset: [[] for i in range(len(methods))] for dataset in datasets}
    m_idx_hat = {dataset: [[]for i in range(len(methods))] for dataset in datasets}

    if 'loss' in metrics:

        for dataset in datasets:
            fig, axes = plt.subplots(nrows=n_runs,
                                     ncols=len(methods),
                                     figsize=(5 * len(methods),
                                              5 * n_runs))

            for ax, (run, method) in zip(axes.flatten(), product(range(n_runs), methods)):
                ax.plot(results[dataset][method][run]['loss'])
                ax.set(title="{},\n{} run: {}".format(method, dataset, run+1))

            fig.subplots_adjust(hspace=0.5)
            plt.savefig(Path(respath, dataset + '_loss.png'), dpi=200, bbox_inches='tight', format='png')
            plt.clf()

    if 'SAD' in metrics:

        for dataset in datasets:
            fig, axes = plt.subplots(nrows=n_runs,
                                     ncols=len(methods),
                                     figsize=(5 * len(methods),
                                              5 * n_runs))

            for ax, (run, method) in zip(axes.flatten(), product(range(n_runs), methods)):
                ax.plot(results[dataset][method][run]['SAD'])
                ax.set(title="{},\n{} run: {}".format(method, dataset, run+1))

            fig.subplots_adjust(hspace=0.5)
            plt.savefig(Path(respath, dataset + '_SAD.png'), dpi=200, bbox_inches='tight', format='png')
            plt.clf()

    if 'endmembers' in metrics:

        for dataset in datasets:
            n_bands, n_endmembers = results[dataset]['ref_endmembers'].shape
            fig, axes = plt.subplots(nrows=len(methods) + 1,
                                     ncols=n_endmembers,
                                     figsize=(7 * n_endmembers,
                                              6 * len(methods) + 6),
                                     sharey='row', sharex='col')

            # Plot reference endmembers
            if 'ref_endmembers' in results[dataset]:
                for n in range(n_endmembers):
                    ref_endmember = results[dataset]['ref_endmembers'][:, n]
                    if hsids[dataset].bands_to_use is not None:
                        for i in range(len(hsids[dataset].bands_to_use)):
                            if hsids[dataset].bands_to_use[i] is np.nan:
                                ref_endmember = np.insert(ref_endmember,i,np.nan)
                    if hsids[dataset].freq_list is not None:
                        freq_list = hsids[dataset].freq_list
                    else:
                        freq_list = [i for i in range(ref_endmember.shape[0])]
                    axes[0][n].plot(freq_list, ref_endmember)
                    axes[0][n].set(title="{},\n{} endmember: {}".format(dataset, 'reference', n + 1))
                    axes[0][n].fill_between(freq_list, np.nanmin(ref_endmember), np.nanmax(ref_endmember),
                                            where=np.isnan(ref_endmember), color='grey', alpha=0.5)
            # Plot every runs endmembers for each methods in apropriate column
            linecolor = sns.color_palette()[0]
            for (midx, eidx) in product(range(len(methods)), range(n_endmembers)):
                for run in range(n_runs):
                    sad_m, idx_org_m, idx_hat_m, sad_k_m, s0 = \
                        calc_SAD_2(results[dataset]['ref_endmembers'],
                                   results[dataset][methods[midx]][run]['endmembers'])
                    m_sad[dataset][midx].append(sad_m)
                    m_idx_hat[dataset][midx].append(idx_hat_m)
                    endmember = results[dataset][methods[midx]][run]['endmembers'][:, idx_hat_m[eidx]]
                    if hsids[dataset].bands_to_use is not None:
                        for i in range(len(hsids[dataset].bands_to_use)):
                            if hsids[dataset].bands_to_use[i] is np.nan:
                                endmember = np.insert(endmember,i,np.nan)
                    if hsids[dataset].freq_list is not None:
                        freq_list = hsids[dataset].freq_list
                    else:
                        freq_list = [i for i in range(len(endmember))]
                    axes[midx+1][eidx].plot(freq_list, endmember,
                                            color=linecolor)
                axes[midx+1][eidx].set(title="{} endmember: {}\n {} runs".format(methods[midx], eidx+1, n_runs))
                axes[midx+1][eidx].fill_between(freq_list, np.nanmin(endmember), np.nanmax(endmember),
                                                where=np.isnan(endmember), color='grey', alpha=0.5)

            plt.subplots_adjust(hspace=0.5)
            plt.savefig(Path(respath, dataset+'_endmembers.png'), dpi=200, format='png')
            plt.clf()

    if 'abundances' in metrics:

        cmap = sns.cubehelix_palette(start=.4, rot=-0.85, gamma=1.2, hue=2, light=0.85,
                                     dark=0.2, reverse=True, as_cmap=True)

        for dataset in datasets:

            n_bands, n_endmembers = results[dataset]['ref_endmembers'].shape
            fig = plt.figure(figsize=(7 * n_endmembers, 7 * len(methods) + 7 ))
            gs = fig.add_gridspec(len(methods) + 1, n_endmembers)

            # Plot reference abundances
            if 'ref_abundances' in results[dataset]:
                for n in range(n_endmembers):
                    ax = fig.add_subplot(gs[0, n], gid='endmember_ax_' + str(n))
                    sns.heatmap(results[dataset]['ref_abundances'][:, :, n], ax=ax, square=True, cmap=cmap)
                    ax.set(title="{},\nreference abundance map: {}".format(dataset, n + 1))

            # Plot abundances in their apropriate column

            for (midx, eidx) in product(range(len(methods)), range(n_endmembers)):
                if not m_sad[dataset][midx]:
                    for run in range(n_runs):
                        sad_m, idx_org_m, idx_hat_m, sad_k_m, s0 = \
                            calc_SAD_2(results[dataset]['ref_endmembers'],
                                       results[dataset][methods[midx]][run]['endmembers'])
                        m_sad[dataset][midx].append(sad_m)
                        m_idx_hat[dataset][midx].append(idx_hat_m)
                m_run = np.argmin(m_sad[dataset][midx])
                ax = fig.add_subplot(gs[midx + 1, eidx],
                                     gid=methods[midx] + '_ax_' + str(eidx) + str(m_run))
                sns.heatmap(results[dataset][methods[midx]][m_run]['abundances'][:, :,
                            m_idx_hat[dataset][midx][m_run][eidx]], ax=ax, square=True, cmap=cmap)
                ax.set(title="{} run: {}\nabundance map: {}".format(methods[midx], m_run, eidx+1))

            plt.subplots_adjust(hspace=0.6)
            plt.savefig(Path(respath, dataset+'_abundances.png'), dpi=200, format='png')
            plt.clf()

    print('Plots saved to', Path(respath).absolute())
Beispiel #42
0
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import shap
shap.initjs()
import plotly.graph_objects as go
from plotly import tools
import plotly.offline as py
import streamlit.components.v1 as components
import joblib,os,pickle,warnings
warnings.filterwarnings('ignore')
import seaborn as sns
color=sns.cubehelix_palette(start=.5, rot=-.5, as_cmap=True)
import lightgbm as lgb
st.set_page_config(layout="wide")
st.title("Tableaux de bord pour prédire un défaut de remboursement de crédit")
st.subheader("Ce tableau de bord permet de prédire si un client est capable ou non capable de rembourser un crédit")

#X_test_final=pd.read_pickle("https://github.com/SidiML/Projet_Scoring/blob/master/X_test_final?raw=true")
#@st.cache()
X_test_final=pd.read_csv("https://raw.githubusercontent.com/SidiML/Projet7/master/X_test_final.csv")
#X_train_final=pd.read_csv("C:/Users/admin/OneDrive/Bureau/Openclassroom/Projet7/X_train_final.csv")
#X_val_final=pd.read_csv("C:/Users/admin/OneDrive/Bureau/Openclassroom/Projet7/X_val_final.csv")

#X_train_final.set_index("SK_ID_CURR", inplace = True)
#X_val_final.set_index("SK_ID_CURR", inplace = True)
#X_test_final.set_index("SK_ID_CURR", inplace = True)
w = pd.DataFrame(w)
w = w.T
w.index = spleen.index

##top 300 genes
new_w = np.zeros((300*n_components,n_components))
new_w = pd.DataFrame(new_w)
genes = []
for i in range(n_components):
    a=w.sort_values(by=[w.columns[i]],ascending=False)
    genes.extend(a.index[:300])
    new_w.iloc[300*i:300*(i+1),:]=a.iloc[:300,:].values
new_w.index=genes
new_w = new_w.drop_duplicates()

cmap = sns.cubehelix_palette(light=1, as_cmap=True)
f=sns.clustermap(new_w,method='ward', metric='euclidean',cmap=cmap)
f.savefig("gene_topic",dpi=450)

##visualize genes after dimension reduction
pca = PCA(n_components=2)
embedding = pca.fit_transform(new_w)
embedding = pd.DataFrame(embedding)
embedding.columns=['PC1','PC2']
f=sns.lmplot(x='PC1', y='PC2',data=embedding,
           fit_reg=False,legend=False,scatter_kws={"s": 5})
f.savefig("pca_gene",dpi=300)

embedding = TSNE(n_components=2).fit_transform(new_w)
embedding = pd.DataFrame(embedding)
embedding.columns=['tSNE1','tSNE2']
Beispiel #44
0
def plot_yield_curve(dates=None, ctry='US'):
    # prepare quandl query including auth_token, query data, write to df
    if dates is None:
        dates = ['2017-12-29']
    start_date = dates[0]
    end_date = datetime.datetime.now().strftime("%Y-%m-%d")
    query, yields = None, None
    auth_token = 'WgALsBb8T2xgwghZaXiJ'

    if ctry == 'US':
        query = 'USTREASURY/YIELD'
        yields = quandl.get(query, authtoken=auth_token, start_date=start_date, end_date=end_date)
    elif ctry == 'CH':
        query = 'SNB/RENDOBLIM'
        yields = quandl.get(query, authtoken=auth_token)

    df = pd.DataFrame(columns=yields.columns)  # df of yields placeholder
    latest = yields.iloc[-1].name.to_pydatetime().date().strftime("%Y-%m-%d")  # most recent date
    day_before = yields.iloc[-2].name.to_pydatetime().date().strftime("%Y-%m-%d")  # previous date

    # append dates list with relevant dates: EOY 2017, 2018, most recent
    dates.append(day_before)
    dates.append('2018-12-31')
    dates.append(latest)
    dates = sorted(dates)
    for date in dates:
        date_series = yields.loc[date]
        df.loc[date] = date_series

    # spreads time series
    spreads = pd.DataFrame(index=yields.index, columns=['10y-2y', '10y-3m'])
    spreads['10y-2y'] = yields['10 YR'] - yields['2 YR']
    spreads['10y-3m'] = yields['10 YR'] - yields['3 MO']

    # plotting
    with plt.style.context('seaborn-whitegrid'):
        # instantiate figure including layout
        # cmap = sns.cubehelix_palette(rot=-.4, as_cmap=True)
        fig = plt.figure(figsize=(10, 8))
        fig.suptitle("Yield Curve Data " + ctry + " for " + str(end_date), size=16)
        layout = (2, 2)
        year_on_year_ax = plt.subplot2grid(layout, (0, 0), fig=fig)
        one_day_change_ax = plt.subplot2grid(layout, (0, 1), fig=fig)
        one_day_change_bar_ax = plt.subplot2grid(layout, (1, 0), fig=fig)
        ytd_spreads_ax = plt.subplot2grid(layout, (1, 1), fig=fig)

        # yr on yr plot
        year_on_year_ax.set_xticks(np.arange(df.columns.shape[0]))
        year_on_year_ax.set_xticklabels(df.columns)
        df.iloc[[0, df.index.get_loc('2018-12-31'), -1]].transpose().plot(
            ax=year_on_year_ax, legend=True, title='Year End', style=['-^', '-*', '-<'],
            cmap=sns.cubehelix_palette(rot=-.2, as_cmap=True))

        # one day change yield curve
        one_day_change_ax.set_xticks(np.arange(df.columns.shape[0]))
        one_day_change_ax.set_xticklabels(df.columns)
        df.iloc[-2:].transpose().plot(ax=one_day_change_ax, legend=True, title='Latest Data', style=['-^', '-*'],
                                      cmap=sns.cubehelix_palette(rot=-.2, as_cmap=True))

        # one day change bar plot
        changes = pd.DataFrame(data=(df.iloc[-1] - df.iloc[-2]).values, index=df.columns, columns=['1 day change'])
        changes['YTD change'] = df.iloc[-1] - df.loc['2018-12-31']
        one_day_change_bar_ax.set_xticks(np.arange(df.columns.shape[0]))
        one_day_change_bar_ax.set_xticklabels(df.columns)
        changes.plot.bar(ax=one_day_change_bar_ax, title='Yield Change',
                         cmap=sns.cubehelix_palette(rot=-.2, as_cmap=True))
        one_day_change_bar_ax.axhline(alpha=0.3, color='black')

        # spreads plot
        spreads.plot(ax=ytd_spreads_ax, title='Spreads Time Series', cmap=sns.cubehelix_palette(rot=-.2, as_cmap=True))
        ytd_spreads_ax.axhline(y=0, color='black', alpha=0.3)

        plt.tight_layout()
        sns.despine()
    return None
Beispiel #45
0
def make_plot(data,
              cols=[11, 12, 13],
              label='Face-on',
              output_file='J_profiles_sim_round.pdf'):
    ''' Takes the results from ret_results.dat and plots a series of J-factor
		and D-factor profiles for each model '''
    f, a = plt.subplots(2, 1, figsize=[3.32, 4.])
    plt.subplots_adjust(hspace=0.)
    cm = sns.cubehelix_palette(8, start=.5, rot=-.75, as_cmap=True)
    cNorm = colors.Normalize(vmin=0.4, vmax=2.5)
    sM = cmx.ScalarMappable(norm=cNorm, cmap=cm)
    ## 1. For each model in ret_results.dat we plot a profile of the J-factor
    ##    against beam angle. ret_results.dat contains the central density,
    ##	  scale-radii and tidal radii for each model
    tmax = np.logspace(-2., np.log10(0.5), 20)  ## angle grid
    for d in data[1:]:
        Jvals_r = np.zeros(len(tmax))
        Dvals_r = np.zeros(len(tmax))
        for n, i in enumerate(tmax):
            if (d[0] > 1.):  ## Prolate case
                ba, ca = 1. / d[0], 1. / d[0]
                print ba, ca, i
                model = cJ.AlphaBetaGammaDensityProfile(
                    np.array([alpha, beta, gamma]), d[cols[0]], d[cols[1]],
                    d[cols[2]], np.array([1., ba, ca]), True)
                Jvals_r[n] = np.log10(
                    model.J_far_factor(Distance, i, "x") /
                    sJ.GEV2cm5toMsol2kpc5)
                Dvals_r[n] = np.log10(
                    model.D_far_factor(Distance, i, "x") / sJ.GEVcm2toMsolkpc2)
            else:  ## Oblate case
                ba, ca = 1., d[0]
                print ba, ca, i
                model = cJ.AlphaBetaGammaDensityProfile(
                    np.array([alpha, beta, gamma]), d[cols[0]], d[cols[1]],
                    d[cols[2]], np.array([1., ba, ca]), True)
                Jvals_r[n] = np.log10(
                    model.J_far_factor(Distance, i, "z") /
                    sJ.GEV2cm5toMsol2kpc5)
                Dvals_r[n] = np.log10(
                    model.D_far_factor(Distance, i, "z") / sJ.GEVcm2toMsolkpc2)
        l, = a[0].plot(tmax, Jvals_r, color=sM.to_rgba(d[0]))
        l2, = a[1].plot(tmax, Dvals_r, color=sM.to_rgba(d[0]))
        if (d[0] == 1.):
            l.set_dashes((2, 1))
            l2.set_dashes((2, 1))
    ## 2. Also plot the spherical formulae from Paper I
    l, = a[0].plot(tmax,
                   sJ.wyns_formulaJ_NFW_data(Velocity_dispersion,
                                             rh * 1000. * gf,
                                             Distance,
                                             tmax,
                                             2. * rh * gf,
                                             walker_or_wolf="walker"),
                   ls='dashed',
                   color='k')
    l.set_dashes((4, 1))
    l, = a[1].plot(tmax,
                   sJ.wyns_formulaD_NFW_data(Velocity_dispersion,
                                             rh * 1000. * gf,
                                             Distance,
                                             tmax,
                                             2. * rh * gf,
                                             walker_or_wolf="walker"),
                   ls='dashed',
                   color='k')
    l.set_dashes((4, 1))
    ## 3. Add the colorbar
    divider = make_axes_locatable(a[0])
    cba = divider.append_axes("top", size="5%", pad=0.)
    cbl = matplotlib.colorbar.ColorbarBase(cba,
                                           cmap=cm,
                                           norm=cNorm,
                                           orientation='horizontal')
    cbl.set_label(r'$q$', labelpad=-30.4)
    cbl.ax.xaxis.set_ticks_position('top')
    a[0].yaxis.get_major_ticks()[0].label1.set_visible(False)
    a[0].set_xticklabels([])
    a[0].set_ylabel(
        r'$\log_{10}(\mathrm{J}(\theta)/\,\mathrm{GeV^2\,cm}^{-5})$')
    a[1].set_xlabel(r'$\theta/\,\mathrm{deg}$')
    a[1].set_ylabel(r'$\log_{10}(\mathrm{D}(\theta)/\,\mathrm{GeV\,cm}^{-2})$')
    a[0].text(0.9,
              0.1,
              label,
              horizontalalignment='right',
              verticalalignment='bottom',
              transform=a[0].transAxes,
              fontsize=14)
    plt.savefig(output_file, bbox_inches='tight')
Beispiel #46
0
fig.savefig(str(output / "group_tube.png"), dpi=300)

group_channels = {
    group: {
        tube: [(m, g) for t, m, g, _ in gdata if t == tube]
        for tube in ("1", "2", "3")
    }
    for group, gdata in group_data
}
markers = {
    tube: [m for m, _ in group_channels["normal"][tube]]
    for tube in ("1", "2", "3")
}
sns.set_style("white")
fig, axes = plt.subplots(9, 3, figsize=(9, 10))
colors = sns.cubehelix_palette(len(mappings.GROUPS), rot=4, dark=0.30)
colors = [*colors[1:], colors[0]]
for i, group in enumerate(mappings.GROUPS):
    for j, tube in enumerate(markers):
        ax = axes[i, j]
        data = group_channels[group][tube]
        pos = np.arange(len(data))
        ax.bar(
            pos,
            [m for _, m in data],
            width=0.8,
            color=[colors[i]],
        )
        ax.set_ylim(0, 12)
        ax.set_xticks(np.arange(len(data)))
        ax.set_xticklabels([m for m, _ in data], rotation="vertical")
def uv_ridgePlot(data, engine, xlabel, ylabel, afreq):
    data = data.copy()
    data.rename(columns={'plotX1': ylabel}, inplace=True)
    if data['anfreq_label'].nunique() > 15:
        engine = 'Interactive'

    if engine == 'Static':

        sns.set_theme(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
        # Initialize the FacetGrid object
        pal = sns.cubehelix_palette(10, rot=-.25, light=.7)
        g = sns.FacetGrid(data,
                          row="anfreq_label",
                          hue="anfreq_label",
                          aspect=15,
                          height=.5,
                          palette=pal)

        # Draw the densities in a few steps
        g.map(sns.kdeplot,
              ylabel,
              bw_adjust=.5,
              clip_on=False,
              fill=True,
              alpha=1,
              linewidth=1.5)
        g.map(sns.kdeplot,
              ylabel,
              clip_on=False,
              color="w",
              lw=2,
              bw_adjust=.5)
        g.map(plt.axhline, y=0, lw=2, clip_on=False)

        # Define and use a simple function to label the plot in axes coordinates
        def label(x, color, label):
            ax = plt.gca()
            ax.text(0,
                    .2,
                    label,
                    fontweight="bold",
                    color=color,
                    ha="left",
                    va="center",
                    transform=ax.transAxes)

        g.map(label, ylabel)

        # Set the subplots to overlap
        g.fig.subplots_adjust(hspace=-.25)

        # Remove axes details that don't play well with overlap
        g.set_titles("")
        g.set(yticks=[])
        g.despine(bottom=True, left=True)
        plt.close()
        return pn.pane.Matplotlib(g.fig, tight=True)

    elif engine == 'Interactive':

        step = 30
        overlap = 2
        data = data.dropna()
        min_cval = data[ylabel].min()
        max_cval = data[ylabel].max()
        ridgeline = alt.Chart(data, height=step)
        ridgeline = ridgeline.mark_area(interpolate="monotone",
                                        fillOpacity=0.8,
                                        stroke="lightgray",
                                        strokeWidth=0.5)
        ridgeline = ridgeline.encode(
            alt.X("{0}:Q".format(ylabel),
                  bin=True,
                  title=ylabel,
                  axis=alt.Axis(format='~s')))
        ridgeline = ridgeline.encode(
            alt.Y("count({0}):Q".format(ylabel),
                  scale=alt.Scale(range=[step, -step * overlap]),
                  impute=alt.ImputeParams(value=0),
                  axis=None))
        ridgeline = ridgeline.encode(
            alt.Fill("mean({0}):Q".format(ylabel),
                     legend=None,
                     scale=alt.Scale(domain=[max_cval, min_cval],
                                     scheme="redyellowblue")))
        if afreq not in ['Month Start', 'Month End']:
            ridgeline = ridgeline.encode(
                alt.Row("{0}:N".format('anfreq_label'),
                        header=alt.Header(labelAngle=0, labelAlign="left")))
        else:
            ridgeline = ridgeline.encode(
                alt.Row("{0}:N".format('anfreq_label'),
                        title=afreq,
                        sort=[
                            'January', 'February', 'March', 'April', 'May',
                            'June', 'July', 'August', 'September', 'October',
                            'November', 'December'
                        ],
                        header=alt.Header(labelAngle=0, labelAlign="left")))
        ridgeline = ridgeline.properties(bounds="flush", width=525)
        ridgeline = ridgeline.configure_facet(spacing=0)

        return ridgeline
Beispiel #48
0
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

total = pd.read_excel(
    r'C:\Users\toyaji\Documents\수업\국제개발\데이터\meat_consumption(통계수업).xlsx',
    'Total')
sns.set(font_scale=1.7, )

total = total[total['Year'] < 2017]
year = reversed(total.Year.unique())
pallete = sns.cubehelix_palette(27)

f, ax = plt.subplots(figsize=(20, 10))
plt.ylim(0, 0.10)
plt.xlim(-15, 60)
for y, p in zip(year, pallete):
    tf = total[total['Year'] == y]['sheep']
    g = sns.distplot(tf, label=str(y), ax=ax, hist=False, color=p)
    ax.axvline(tf.mean(), color=p, ymax=0.1)

plt.legend(ncol=2)

plt.title("Sheep Distribution (1990-2016)")
plt.savefig("Sheep Distribution.jpg")
plt.show()
def generate_report():

    conn = sqlite3.connect('IOCs.db')
    logging.info('Opened database successfully')

    query = conn.execute('''SELECT a.ioc, a.provider, a.first_seen
		FROM iocs a
		JOIN (SELECT ioc, COUNT(*)
		FROM iocs 
		GROUP BY ioc
		HAVING count(*) > 1 and provider='FarsightSecurity') b
		ON a.ioc = b.ioc WHERE a.ioc IN (SELECT threat_indicator FROM nod_matches)
		ORDER BY a.ioc,a.first_seen''')

    cols = [column[0] for column in query.description]
    df = pd.DataFrame.from_records(data=query.fetchall(), columns=cols)

    logging.info(df.ioc.unique())

    # set farsight first seen as baseline - farsight_first_seen
    df2 = df.loc[df['provider'] == 'FarsightSecurity', ('ioc', 'first_seen')]
    df2.rename(columns={'first_seen': 'farsight_first_seen'}, inplace=True)
    df3 = pd.merge(df, df2, on='ioc', how='outer')

    df3.drop(df3[df3.provider == 'FarsightSecurity'].index,
             inplace=True)  #to remove from graph
    #df3.drop(df3[df3.provider == 'CrowdStrike'].index, inplace=True) #to remove from graph
    #df3.drop(df3[df3.provider == 'Cyber threat coalition'].index, inplace=True) #to remove from graph
    #df3.drop(df3[df3.provider == 'SURBL'].index, inplace=True) #to remove from graph
    #df3.drop(df3[df3.provider == 'EmergingThreats'].index, inplace=True) #to remove from graph
    #df3.drop(df3[df3.provider == 'Palo Alto'].index, inplace=True) #to remove from graph
    #df3.drop(df3[df3.provider == 'IID'].index, inplace=True) #to remove from graph
    #df3.drop(df3[df3.provider == 'Fortinet'].index, inplace=True) #to remove from graph

    df3['time_delta_human'] = pd.to_datetime(
        df3['first_seen']) - pd.to_datetime(df3['farsight_first_seen'])
    df3['time_delta'] = (pd.to_datetime(df3['first_seen']) - pd.to_datetime(
        df3['farsight_first_seen'])).dt.total_seconds() / 3600
    average_time_gain = round(df3['time_delta'].mean(), 1)

    # Create the data
    x = df3.time_delta
    g = df3.provider
    df = pd.DataFrame(dict(x=x, g=g))

    # Initialize the FacetGrid object
    pal = sns.cubehelix_palette(10, rot=-.25, light=.7)
    g = sns.FacetGrid(df, row="g", hue="g", aspect=15, height=1, palette=pal)

    g.map(sns.kdeplot,
          "x",
          bw_adjust=.15,
          clip_on=False,
          fill=True,
          common_norm=True,
          alpha=1,
          linewidth=1.5)

    # Define and use a simple function to label the plot in axes coordinates
    def label(x, color, label):
        label = label + '\n{}h\n{} IOCs'.format(
            round(df3[df3.provider == label].time_delta.mean(), 1),
            len(df3[df3.provider == label].index))
        ax = plt.gca()
        ax.text(0,
                .2,
                label,
                fontweight="bold",
                color=color,
                ha="left",
                va="center",
                transform=ax.transAxes)

    g.map(label, "x")
    plt.xlabel("Delay to Farsight NOD in hours")

    # Set the subplots to overlap
    g.fig.subplots_adjust(hspace=0.5)

    # Remove axes details that don't play well with overlap
    g.set_titles('')
    g.set(yticks=[])
    g.despine(bottom=True, left=True)

    plt.title('Average time gain: {}hours'.format(average_time_gain))
    plt.savefig('backtest/Farsight_timeline.png')
    plt.close(g.fig)
    logging.info('Generated backtest/Farsight_timeline.png successfully')
Beispiel #50
0
OPTIMIZERS = ('hc', 'sa', 'ga', 'me')
OPTIMIZER_NAMES = {
    'hc': 'Hill Climbing',
    'sa': 'Simulated Annealing',
    'ga': 'Genetic Algorithm',
    'me': 'MAP-elites',
}

NETWORK_NAMES = {
    'resnet50': 'ResNet-50',
    'alexnet': 'AlexNet',
    'inception': 'Inception V3'
}

cmap = sns.cubehelix_palette(5, start=.5, rot=-.75, reverse=True)


def test_optimizer(c, r, log_dir):
    c['log_dir'] = log_dir + f'/{r:03}'
    _, t = optimize_with_config(config=c, verbose=False, set_log_dir=True)
    return t


def run_optimizer_test(n_threads=-1):
    if n_threads == -1:
        n_threads = multiprocessing.cpu_count()

    for optimizer in tqdm(OPTIMIZERS):
        # log(f'Testing optimizer {optimizer}')
        run_name = f'e3_{optimizer}-{NETWORK}{"-pipeline" if PIPELINE_BATCHES > 1 else ""}' \
Beispiel #51
0
    # Use the provided units to label the colorbar
    a.cax.set_ylabel(units)

    # Add figure-level title and tweak margins.
    fig = plt.gcf()
    fig.suptitle(title, weight='bold', size=20)
    fig.subplots_adjust(right=0.8, bottom=0.2)
    return a


v = dataframe_for_value(domain, algorithm, value)

if (v['fill_piv'] < 0).values.any() & (v['fill_piv'] > 0).values.any():
    center = 0
    cmap = 'RdBu_r'
else:
    center = None
    cmap = sns.cubehelix_palette(as_cmap=True)

fig = plot_heatmap(fill_piv=v['fill_piv'],
                   vmin=v['vmin'],
                   vmax=v['vmax'],
                   title=v['title'],
                   units=v['units'],
                   metric='euclidean',
                   method='average',
                   idx=None,
                   clustermap_kwargs=dict(center=center, cmap=cmap))

fig.savefig(output)
Beispiel #52
0
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import sys
import seaborn as sns
# sns.set()
color_set = [
    sns.color_palette("GnBu_d"),
    sns.color_palette("Blues"),
    sns.cubehelix_palette(8),
    sns.light_palette("green"),
]

if __name__ == '__main__':
    L = 31
    g = float(sys.argv[1])  # 1.4
    h = float(sys.argv[2])  # 0.9045
    chi = 32
    order = '1st'

    exact_sz = np.load(
        '../2_time_evolution/data_tebd/1d_TFI_g%.4f_h%.4f/L31/mps_chi%d_1st_sz_array.npy'
        % (g, h, chi))
    exact_ent = np.load(
        '../2_time_evolution/data_tebd/1d_TFI_g%.4f_h%.4f/L31/mps_chi%d_1st_ent_array.npy'
        % (g, h, chi))
    exact_E = np.load(
        '../2_time_evolution/data_tebd/1d_TFI_g%.4f_h%.4f/L31/mps_chi%d_1st_energy.npy'
        % (g, h, chi))
    exact_t = np.load(
        '../2_time_evolution/data_tebd/1d_TFI_g%.4f_h%.4f/L31/mps_chi%d_1st_dt.npy'
#%%

#Fig 3 B, bottom


def kBUcoop_Fit(B, a, b):
    return a / (b + B) - 0.5


lambda_plt = []
beta_plt = []
dn = 1

col = sns.cubehelix_palette(len(N_List),
                            start=.5,
                            rot=-.75,
                            dark=0.2,
                            light=0.7)

fig = plt.figure(figsize=(4, 3), dpi=150)
for Boff, kBUc, N, i in zip(Boff_N, kBUcoop_N, N_List, range(len(N_List))):

    d = {'value1': Boff[0::dn], 'value2': np.array(kBUc[0::dn]) / kBU}
    df = pd.DataFrame(data=d)

    popt_kBU, pcov_kBU = curve_fit(kBUcoop_Fit,
                                   Boff[0::dn],
                                   np.array(kBUc[0::dn]) / kBU,
                                   p0=(1, 0.9),
                                   bounds=([0, 0], [np.inf, np.inf]),
                                   maxfev=10000)
map_shoredirection[unmask_deposits_shallow] = map_shoredirection[unmask_deposits_shallow] - 180.
map_shoredistance_sqrt  = np.sqrt(map_shoredistance)


if data_subset == 'land': # choose only locations over land or shallow-marine areas with deposits
    mask_exclude = np.invert(unmask_LandOrDeposits) # larger dataset
elif data_subset == 'deposit': # choose only locations where some type of deposit has been found
    mask_exclude = np.invert(unmask_deposits) # smaller dataset
else:
    raise ValueError('I dont recognise that data subset!')
unmask_include = np.invert(mask_exclude)


# REPLOT (Distance, Deposits)
fig = plt.figure(figsize=(16,8))
cmap_shoredist = sns.cubehelix_palette(8, start=2., rot=-.3, dark=0.3, light=0.7, reverse=True, as_cmap=True)
vmin=0.
vmax=1850.
ax_shoredist = fig.add_subplot(111)
ax_shoredist = sns.heatmap(map_shoredistance, cmap=cmap_shoredist, vmin=vmin, vmax=vmax, square=True, cbar=True, cbar_kws={"shrink": .75}, xticklabels=False, yticklabels=False, mask=mask_exclude)
ax_shoredist.set_xlabel('Paleolongitude')
ax_shoredist.set_ylabel('Paleolatitude')
ax_shoredist.set_title('Distance to shoreline during Miocene')
fig.savefig(images_folder+"map_shoredist_miocene.pdf", bbox_inches='tight', pad_inches=0.3)
plt.close(fig)


# ## PRECIPITATION
print("Precipitation...")
data_rain = xr.open_dataset(data_rainfall_filename)
lon_data  = data_rain.variables["lon"].values
"""
Scatterplot with continuous hues and sizes
==========================================

_thumb: .45, .45

"""

import seaborn as sns

sns.set()

# Load the example iris dataset
planets = sns.load_dataset("planets")

cmap = sns.cubehelix_palette(rot=-.2, as_cmap=True)
ax = sns.scatterplot(x="distance",
                     y="orbital_period",
                     hue="year",
                     size="mass",
                     palette=cmap,
                     sizes=(10, 200),
                     data=planets)
Beispiel #56
0
def heatmap_seaborn(df,
                    outfilename=None,
                    title=None,
                    cmap=None,
                    vmin=None,
                    vmax=None,
                    labels=None,
                    classes=None):
    """Returns seaborn heatmap with cluster dendrograms.

    - df - pandas DataFrame with relevant data
    - outfilename - path to output file (indicates output format)
    - cmap - colourmap option
    - vmin - float, minimum value on the heatmap scale
    - vmax - float, maximum value on the heatmap scale
    - labels - dictionary of alternative labels, keyed by default sequence
               labels
    - classes - dictionary of sequence classes, keyed by default sequence
                labels
    """
    # Obtain colour map
    cmap = plt.get_cmap(cmap)

    # Decide on figure layout size
    figsize = max(8, df.shape[0] * 1.1)

    # Add class colour bar. The aim is to get a pd.Series for the columns
    # of the form:
    # 0    colour for class in col 0
    # 1    colour for class in col 1
    # ...  colour for class in col ...
    # n    colour for class in col n
    # This is in col_cb when we're finished
    if classes is not None:
        levels = sorted(list(set(classes.values())))
        pal = sns.cubehelix_palette(len(levels),
                                    light=.9,
                                    dark=.1,
                                    reverse=True,
                                    start=1,
                                    rot=-2)
        paldict = {lvl: pal for (lvl, pal) in zip(levels, pal)}
        lvl_pal = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())}
        col_cb = pd.Series(df.index).map(lvl_pal)
    else:
        col_cb = None

    # Labels are defined before we build the clustering
    # If a label mapping is missing, use the key text as fall back
    if labels is not None:
        newlabels = [labels.get(i, i) for i in df.index]
    else:
        newlabels = [i for i in df.index]

    # Plot heatmap
    fig = sns.clustermap(df,
                         cmap=cmap,
                         vmin=vmin,
                         vmax=vmax,
                         col_colors=col_cb,
                         row_colors=col_cb,
                         figsize=(figsize, figsize),
                         linewidths=0.5,
                         xticklabels=newlabels,
                         yticklabels=newlabels,
                         annot=True)

    fig.cax.yaxis.set_label_position('left')
    fig.cax.set_ylabel(title)

    # Rotate ticklabels
    fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(),
                                   rotation=90)
    fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(),
                                   rotation=0)

    # Save to file
    if outfilename:
        fig.savefig(outfilename)
    return fig
Beispiel #57
0
                       linewidth=0,
                       antialiased=False)
ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z)
plt.show()

f, ax = plt.subplots(figsize=(6, 6))
cmap = sns.cubehelix_palette(as_cmap=True, dark=0, light=1, reverse=True)
sns.kdeplot(x1, y1, cmap=cmap, n_levels=60, shade=True)

plt.show()

fig = plt.figure()
ax = fig.gca(projection='3d')

# Plot a sin curve using the x and y axes.
ax.plot(x1, y1, zs=0, zdir='z', label='curve in (x,y)')

# Plot scatterplot data (20 2D points per colour) on the x and z axes.
colors = ('r', 'g', 'b', 'k')
x = np.random.sample(20 * len(colors))
y = np.random.sample(20 * len(colors))
c_list = []
Beispiel #58
0
from vtl.Readfile import Readfile
from astropy.io import ascii
from astropy.table import Table
import cPickle
import os
from glob import glob
from time import time
import seaborn as sea
import pysynphot as S

sea.set(style='white')
sea.set(style='ticks')
sea.set_style({"xtick.direction": "in", "ytick.direction": "in"})
colmap = sea.cubehelix_palette(12,
                               start=2,
                               rot=.2,
                               dark=0,
                               light=1.1,
                               as_cmap=True)


def Scale_model(D, sig, M):
    C = np.sum(((D * M) / sig**2)) / np.sum((M**2 / sig**2))
    return C


def Oldest_galaxy(z):
    return cosmo.age(z).value


class Gen_spec(object):
    def __init__(self,
Beispiel #59
0
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})

# Create the data
rs = np.random.RandomState(1979)
x = rs.randn(500)
g = np.tile(list("ABCDEFGHIJ"), 50)
df = pd.DataFrame(dict(x=x, g=g))
m = df.g.map(ord)
df["x"] += m

# Initialize the FacetGrid object
pal = sns.cubehelix_palette(10, rot=-.25, light=.7)
g = sns.FacetGrid(df, row="g", hue="g", aspect=15, size=.5, palette=pal)

# Draw the densities in a few steps
g.map(sns.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)
g.map(sns.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=False)


# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
    ax = plt.gca()
    ax.text(0,
            .2,
            label,
            fontweight="bold",
 def seq_palette(self, n_colors):
     return sns.cubehelix_palette(n_colors,
                                  start=.5,
                                  rot=-.75,
                                  reverse=True)