Beispiel #1
0
def SetAxes(legend=False):
    f_b = 0.164
    f_star = 0.01
    err_b = 0.006
    err_star = 0.004
    f_gas = f_b - f_star
    err_gas = np.sqrt(err_b**2 + err_star**2)

    plt.axhline(y=f_gas, ls='--', c='k', label='', zorder=-1)
    x = np.linspace(.0,2.,1000)
    plt.fill_between(x, y1=f_gas - err_gas, y2=f_gas + err_gas, color='k', alpha=0.3, zorder=-1)
    plt.text(.6, f_gas+0.006, r'f$_{gas}$', verticalalignment='bottom', size='large')
    plt.xlabel(r'r/r$_{vir}$', size='x-large')
    plt.ylabel(r'f$_{gas}$ ($<$ r)', size='x-large')

    plt.xscale('log')
    plt.xticks([1./1.9, 1.33/1.9, 1, 1.5, 2.],[r'r$_{500}$', r'r$_{200}$', 1, 1.5, 2], size='large')
    #plt.yticks([.1, .2], ['0.10', '0.20'])
    plt.tick_params(length=10, which='major')
    plt.tick_params(length=5, which='minor')
    plt.xlim([0.4,1.5])
    plt.minorticks_on()

    if legend:
        plt.legend(loc=0, prop={'size':'small'}, markerscale=0.7, numpoints=1, ncol=2)
Beispiel #2
0
def plot_twoscales(name, dict_array, xlabel='', ylabel='', title='', linetypes=['b','r','g','k'], labels=[], xlog=None, ylim=None):
  plt.clf()
  if len(xlabel) > 0:
  	plt.xlabel(xlabel)
  if len(ylabel) > 0:
  	plt.ylabel(ylabel)
  if len(title) > 0:
  	plt.title(title)
  if xlog:
    plt.xscale('log', basex=xlog)
  if ylim:
    plt.ylim(ylim)

  ax1 = plt.figure().add_subplot(111)  
  dicty1 = zip(*sorted(dict_array[0].iteritems()))
  dicty2 = zip(*sorted(dict_array[1].iteritems()))
  ax1.plot(dicty1[0], dicty1[1], linetypes[0], label=labels[0])
  for tl in ax1.get_yticklabels():
    tl.set_color(linetypes[0])
  ax1.set_ylabel(labels[0], color=linetypes[0])
  ax2 = ax1.twinx() 
  ax2.plot(dicty2[0], dicty2[1], linetypes[1], label=labels[1])
  for tl in ax2.get_yticklabels():
    tl.set_color(linetypes[1])
  ax2.set_ylabel(labels[1], color=linetypes[1])
  plt.savefig('%s.eps' % name)
Beispiel #3
0
def _scatter(actual, prediction, args):
    plt.figure()
    plt.plot(actual, prediction, 'b'+args['plot_scatter_marker'])
    xmin=min(actual)
    xmax=max(actual)
    ymin=min(prediction)
    ymax=max(prediction)
    diagxmin=min(math.fabs(x) for x in actual)
    diagymin=min(math.fabs(y) for y in prediction)
    diagpmin=min(diagxmin,diagymin)
    pmin=min(xmin,ymin)
    pmax=max(xmax,ymax)
    plt.plot([diagpmin,pmax],[diagpmin,pmax],'k-')
    if args['plot_identifier'] != 'NoName':
        plt.title(args['plot_identifier'])
    plt.xlabel('Observed')
    plt.ylabel('Modeled')
    if args['plot_performance_log'] == True:
        plt.yscale('log')
        plt.xscale('log')
    if args['plot_scatter_free'] != True:
        plt.axes().set_aspect('equal')
    if args['plot_dump'] == True:
        pfname=os.path.join(args['plot_dir'],args['plot_identifier']+'_eiger_scatter.pdf')
        plt.savefig(pfname,format="pdf")
    else:
        plt.show()
def create_scatter(datax, datay, x_label, y_label, filename, title=None,
                   log=False, set_same=False, colors=None,
                   num_colors=None, color_map=None, xlims=None, ylims=None):
    """Given a set of data, create a scatter plot of the data, optionally
    in log format

    """
    plt.figure()
    plt.grid()
    if colors is not None:
        plt.scatter(datax, datay, marker='x', c=colors, s=num_colors,
                    cmap=color_map)
    else:
        plt.scatter(datax, datay, marker='x')
    plt.xlabel(x_label)
    plt.ylabel(y_label)

    if log:
        plt.xscale('log')

    if xlims is not None:
        plt.xlim(xlims)
    if ylims is not None:
        plt.ylim(ylims)

    if set_same:
        ylims = plt.ylim()
        plt.xlim(ylims)
    if title is not None:
        plt.title(title)
    plt.tight_layout()
    plt.savefig(filename, fmt='pdf')
Beispiel #5
0
def plot_figure_commute(file_name):
    f = open(file_name)
    lines = f.readlines()
    lines[0] = lines[0][:-1]
    f.close()

    probabilities = lines[0].split(" ")
    xx = xrange(len(probabilities))
    probabilities = [float(x) for x in probabilities]

    plt.figure(figsize=(12, 10))
    plt.plot(xx, probabilities, label="D(degree > x)")
    plt.ylabel('Percentage of vertices which degree> x')
    plt.xlabel('Degree')
    plt.title("Cumulative function for degrees")
    plt.legend(loc='upper right')

    name = "results_graphs/"
    if file_name == "temp_degrees/cumulative_temp.txt":
        name += "ful_cumulative"
    else:
        name += "good_cumulative"

    plt.savefig(name + '.png')

    plt.figure(figsize=(12, 10))
    plt.plot(xx, probabilities, label="log(D(degree > x))")
    plt.legend(loc='upper right')
    plt.title("Cumulative function for degrees in log/log coords")
    plt.ylabel('Log of percentage of vertices which degree> x')
    plt.xlabel('Log of degrees')
    plt.xscale('log')
    plt.yscale('log')

    plt.savefig(name + '_log' + '.png')
Beispiel #6
0
def Config(**options):
    """Configures the plot.

    Pulls options out of the option dictionary and passes them to
    title, xlabel, ylabel, xscale, yscale, xticks, yticks, axis, legend,
    and loc.
    """
    title = options.get('title', '')
    pyplot.title(title)

    xlabel = options.get('xlabel', '')
    pyplot.xlabel(xlabel)

    ylabel = options.get('ylabel', '')
    pyplot.ylabel(ylabel)

    if 'xscale' in options:
        pyplot.xscale(options['xscale'])

    if 'xticks' in options:
        pyplot.xticks(options['xticks'])

    if 'yscale' in options:
        pyplot.yscale(options['yscale'])

    if 'yticks' in options:
        pyplot.yticks(options['yticks'])

    if 'axis' in options:
        pyplot.axis(options['axis'])

    loc = options.get('loc', 0)
    legend = options.get('legend', True)
    if legend:
        pyplot.legend(loc=loc)
def main():

    sample='q'
    sm_bin='10.0_10.5'
    catalogue = 'sm_9.5_s0.2_sfr_c-0.75_250'

    #load in fiducial mock
    filepath = './'
    filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'_cov.npy'
    cov = np.matrix(np.load(filepath+filename))
    diag = np.diagonal(cov)
    filepath = cu.get_output_path() + 'analysis/central_quenching/observables/'
    filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'.dat'
    data = ascii.read(filepath+filename)
    rbins = np.array(data['r'])
    mu = np.array(data['wp'])
    
    #load in comparison mock
    
    
    
    
    plt.figure()
    plt.errorbar(rbins, mu, yerr=np.sqrt(np.diagonal(cov)), color='black')
    plt.plot(rbins, wp,  color='red')
    plt.xscale('log')
    plt.yscale('log')
    plt.show()
    
    inv_cov = cov.I
    Y = np.matrix((wp-mu))
    
    X = Y*inv_cov*Y.T
    
    print(X)
Beispiel #8
0
    def t90_dist(self):
        """ Plots T90 distribution, gives the mean and median T90 values of the
        sample and calculates the number of short, long bursts in the sample """
        t90s = []
        for i in range(0,len(self.t90s),1):
            try:
                t90s.append(float(self.t90s[i]))

            except ValueError:
                continue

        t90s = np.array(t90s)
        mean_t90 = np.mean(t90s)
        median_t90 = np.median(t90s)
        print('Mean T90 time =',mean_t90,'s')
        print('Median T90 time=',median_t90,'s')
        mask = np.ma.masked_where(t90s < 2, t90s)
        short_t90s = t90s[mask == False]
        long_t90s = t90s[mask != False]
        print('Number of Short/Long GRBs =',len(short_t90s),'/',len(long_t90s))

        plt.figure()
        plt.xlabel('T$_{90}$ (s)')
        plt.ylabel('Number of GRBs')
        plt.xscale('log')
        minimum, maximum, = min(short_t90s), max(long_t90s)
        plt.axvline(mean_t90,color='red',linestyle='-')
        plt.axvline(median_t90,color='blue',linestyle='-')
        plt.hist(t90s,bins= 10**np.linspace(np.log10(minimum),np.log10(maximum),20),color='grey',alpha=0.5)
        plt.show()
Beispiel #9
0
    def fluence_dist(self):
        """ Plots the fluence distribution and gives the mean and median fluence
        values of the sample """
        fluences = []
        for i in range(0,len(self.fluences),1):
            try:
                fluences.append(float(self.fluences[i]))

            except ValueError:
                continue

        fluences = np.array(fluences)
        mean_fluence = np.mean(fluences)
        median_fluence = np.median(fluences)
        print('Mean Fluence =',mean_fluence,'(15-150 keV) [10^-7 erg cm^-2]')
        print('Median Fluence =',median_fluence,'(15-150 keV) [10^-7 erg cm^-2]')

        plt.figure()
        plt.xlabel('Fluence (15-150 keV) [$10^{-7}$ erg cm$^{-2}$]')
        plt.ylabel('Number of GRBs')
        plt.xscale('log')
        minimum, maximum = min(fluences), max(fluences)
        plt.axvline(mean_fluence,color='red',linestyle='-')
        plt.axvline(median_fluence,color='blue',linestyle='-')
        plt.hist(fluences,bins= 10**np.linspace(np.log10(minimum),np.log10(maximum),20),color='grey',alpha=0.5)
        plt.show()
Beispiel #10
0
def dim_sensitivity_plot(x, Y, fname, show_legend=True):

    plt.rc('text', usetex=True)
    plt.rc('font', family='serif')

    plt.figure(figsize=(3, 3))
    plt.xlabel('$d$', size=FONTSIZE)
    plt.ylabel('ROC AUC', size=FONTSIZE)

    plt.set_cmap('Set2')

    lines = []
    for i, label in enumerate(KEYS):
        line_data = Y.get(label)

        if line_data is None:
            continue
        
        line, = plt.plot(x, line_data, label=label, marker=MARKERS[i],
                         markersize=0.5 * FONTSIZE, color=COLORS[i])
        lines.append(line)



    if show_legend:
        plt.legend(handles=lines)
        plt.legend(loc='lower right')
    plt.xscale('log', basex=2)
    plt.xticks(x, [str(y) for y in x], size=FONTSIZE)
    plt.yticks(size=FONTSIZE)
    plt.tight_layout()

    plt.savefig(fname)
def plot_a_func_time(aes, times, which_are_final_bodies=None,year_unit='kyr',title=None):
    """This function takes a list of each individual body's semimajor axes,
    which itself can be stored as a list, as well as a list of lists of the 
    times corresponding to semimajor axes passed with the first argument
    and then plots semi-major axis as a function of time for the objects 
    passed to me.
    which_are_final_bodies: pass the index of the final bodies if you want 
    those lines plotted thicker
    """  
    year_unit_dict  = {"Myr":1.e6,"kyr":1.e3}

    fig = pp.figure()

    for i in range(len(aes)):
        pp.plot(np.divide(times[i],year_unit_dict[year_unit]),aes[i],color='blue',linewidth=0.5)

    if which_are_final_bodies != None:
        for i in range(len(which_are_final_bodies)):
            #print "   final body plotting as red: " + str(i)
            pp.plot(np.divide(times[which_are_final_bodies[i]],year_unit_dict[year_unit]),aes[which_are_final_bodies[i]],color='red')#,color='blue',linewidth=1.5)

    pp.xscale(u'log')
    
    if title != None:
        pp.title(title)

    pp.xlabel("Time ("+year_unit+")")
    pp.ylabel("Semimajor axis (AU)")


    return fig
Beispiel #12
0
def make_plot(filename, title, arguments, methods, scale):
    if not support_plots:
        return

    is_linear = (scale == 'linear')

    plot_size = LINEAR_PLOT_SIZE if is_linear else OTHER_PLOT_SIZE
    plt.figure(figsize=plot_size)

    for name, func, measures in methods:
        plt.plot(arguments, measures, 'o-', label=name, markersize=3)

    if is_linear:
        axis = plt.axis()
        plt.axis((0, axis[1], 0, axis[3]))

    plt.xscale(scale)
    plt.yscale(scale)

    plt.xticks(fontsize=NORMAL_FONT_SIZE)
    plt.yticks(fontsize=NORMAL_FONT_SIZE)

    plt.grid(True)

    plt.title(title, fontsize=LABEL_FONT_SIZE)
    plt.xlabel('Argument', fontsize=NORMAL_FONT_SIZE)
    plt.ylabel('Time (seconds)', fontsize=NORMAL_FONT_SIZE)
    plt.legend(loc='upper left', fontsize=NORMAL_FONT_SIZE)

    plt.tight_layout(0.2)

    path = os.path.join(PLOTS_DIR, filename)
    plt.savefig(path)
    print '[*] Saved plot "%s"' % path
Beispiel #13
0
def initPlot(name):
    plt.xlabel("fppi")
    plt.ylabel("miss rate")
    plt.title(name)
    plt.grid(True)
    plt.xscale('log')
    plt.yscale('log')
Beispiel #14
0
def avgDegree(G):
  print "Nodes: ",
  print G.number_of_nodes()
  print "Edges: ",
  print G.number_of_edges()

  # avg degree
  degrees = defaultdict(int)
  total = 0
  for node in G.nodes():
    neighbors = G.neighbors(node)
    degrees[len(neighbors)] += 1
    total += len(neighbors)

  max_degree = max(degrees.keys())
  degrees_arr = (max_degree+1) * [0]
  for index, count in degrees.iteritems():
    degrees_arr[index] = count

  plt.plot(range(max_degree+1), degrees_arr, '.')
  plt.xscale('log', basex=2)
  plt.xlabel('degree')
  plt.yscale('log', basex=2)
  plt.ylabel('# of people')
  plt.savefig('degree_distribution.png')
  plt.close()
Beispiel #15
0
def dose_plot(df,err,cols,scale='linear'):
    n_rows = int(np.ceil(len(cols)/3.0))
    plt.figure(figsize=(20,4 * n_rows))
    subs = gridspec.GridSpec(n_rows, 3) 
    plt.subplots_adjust(hspace=0.54,wspace=0.27)

    for col,sub in zip(cols,subs):
        plt.subplot(sub)
        for base in df['Base'].unique():
            for drug in get_drugs_with_multiple_doses(filter_rows(df,'Base',base)):
                data = thread_first(df,
                                    (filter_rows,'Drug',drug),
                                    (filter_rows,'Base',base),
                                    (DF.sort, 'Dose'))
                error = thread_first(err,
                                     (filter_rows,'Drug',drug),
                                     (filter_rows,'Base',base),
                                     (DF.sort, 'Dose'))
                if scale == 'linear':
                    plt.errorbar(data['Dose'],data[col],yerr=error[col])
                    title = "{} vs. Dose".format(col)
                else: 
                    plt.errorbar(data['Dose'],data[col],yerr=error[col])
                    plt.xscale('log')
                    title = "{} vs. Dose (Log Scale)".format(col)
                    plt.xticks(data['Dose'].values,data['Dose'].values)
                    plt.xlim(0.06,15)
                label('Dose ({})'.format(data.Unit.values[0]), col,title,fontsize = 15)

                plt.legend(df['Base'].unique(), loc = 0)
def set_axis_properties(p,metric,varying_parameter,group):

    #Set major x-axis label
    plt.xlabel(xlabel_names[varying_parameter])

    #Set x-axis scale
    xscale_args = xscale_arguments[(metric,varying_parameter)]
    plt.xscale(xscale_args[0],**xscale_args[1])

    #Set x-axis tick labels
    #Get tick values
    ticks = list(sp.unique(group[varying_parameter]))

    #If an item is not in the tick dictionary for the bar plot, add it
    if pltkind[(metric,varying_parameter)] is 'bar':
        for item in ticks:
            if item not in varying_xlabels[varying_parameter].keys():
                varying_xlabels[varying_parameter][item] = '$' + str(item) +'$'

    xlabels = [ varying_xlabels[varying_parameter][item] for item in ticks]

    if pltkind[(metric,varying_parameter)] is 'bar':
        p.set_xticks(sp.arange(len(ticks))+0.5)
        plt.setp(p.set_xticklabels(xlabels), rotation=0)
    else:
        plt.xticks(ticks,xlabels)

    plt.ylabel(ylabel_names[metric])
    plt.grid('on')
Beispiel #17
0
def plot_citation_graph(citation_graph, filename, plot_title):
    # find the indegree_distribution
    indeg_dist = in_degree_distribution(citation_graph)    
    # sort freq by keys
    number_citations = sorted(indeg_dist.keys())
    indeg_freq = [indeg_dist[n] for n in number_citations]

    # normalize
    total = sum(indeg_freq)
    indeg_freq_norm = [freq / float(total) for freq in indeg_freq]
    
    # calculate log/log, except for the first one (0)
    #log_number_citations = [math.log10(x) for x in number_citations[1:]]
    #log_indeg_freq_norm = [math.log10(x) for x in indeg_freq_norm[1:]]
    
    plot(number_citations[1:], indeg_freq_norm[1:], 'o')
    
    xscale("log")
    yscale("log")
    
    xlabel("log10 #citations")
    ylabel("log10 Norm.Freq.")
    title(plot_title)
    grid(True)
    savefig(filename)
    show()
def PlotEDepSummary(gFiles,nFiles,figureName='EDepSummary.png',tParse=GetThickness,
  histKey='eDepHist'):
  """ PlotEDepSummary
  Plotss the energy deposition summary
  """
  # Extrating the average values
  gT = list()
  gDep = list()
  gDepError = list()
  nT = list()
  nDep = list()
  nDepError = list()
  for fname in gFiles:
    f = TFile(fname,'r')
    hist = f.Get(histKey)
    gT.append(GetThickness(fname))
    gDep.append(hist.GetMean())
    gDepError.append(hist.GetMeanError())
  for fname in nFiles:
    f = TFile(fname,'r')
    hist = f.Get(histKey)
    nT.append(GetThickness(fname))
    nDep.append(hist.GetMean())
    nDepError.append(hist.GetMeanError())
  # Plotting
  plt.errorbar(gT,gDep,yerr=gDepError,fmt='r+')
  plt.hold(True)
  plt.errorbar(nT,nDep,yerr=nDepError,fmt='go')
  plt.xlabel("Thickness (mm)")
  plt.ylabel("Average Energy Deposition (MeV)")
  plt.legend(["Co-60","Cf-252"])
  plt.xscale("log")
  plt.yscale("log")
  plt.grid(True)
  plt.savefig(figureName)
Beispiel #19
0
def setDiffsPlot(CF,d0,ySym = True):
    CFtext = [str(j)+',' for j in CF]
    bText = [CFtext[0],r'...,$a_i$+c,...',CFtext[-1]]
    CFtext = ''.join(CFtext)
    bText= ''.join(bText)
    CFtext = '[' + CFtext[:-1] + ']'
    bText = '[' + bText[:-1] + ']'
    print(CFtext)

    plt.ylabel(r'$d^{crit}_b - d^{crit}_a$',fontsize=20)
    plt.xlabel(r'Element changed',fontsize=20)
    xmin, xmax, ymin, ymax = plt.axis()
    if ySym:
        plt.yscale('symlog',linthreshy=1e-15)
        yLoc = [y*ymax for y in [.1, .01, .001]]
    else:
        yLoc = [y*(ymax-ymin)+ymin for y in [0.95, 0.85, 0.75]]
    plt.plot([0,xmax],[0,0],'k--',label='_')
    plt.text((xmax-xmin)*0.15,yLoc[0],r'$a = [a_i] =$'+CFtext,fontsize=15)
    plt.text((xmax-xmin)*0.15,yLoc[1],r'$b_i = $'+bText,fontsize=15)
    plt.text((xmax-xmin)*0.15,yLoc[2],r'$d_a^{crit} = $'+str(float(d0)),fontsize=15)
    plt.legend(loc='best')
    plt.xscale('symlog',linthreshx=1e-14)
    # plt.yscale('log')
    plt.show()
Beispiel #20
0
    def __save(self, n, plot, sfile):
        p.figure(figsize=sfile)

        p.xlabel(plot.xlabel)
        p.ylabel(plot.ylabel)
        p.xscale(plot.xscale)
        p.yscale(plot.yscale)
        p.grid()
        for curvetype, args, kwargs in plot.curves:
            if curvetype == "plot":
                p.plot(*args, **kwargs)
            elif curvetype == "imshow":
                p.imshow(*args, **kwargs)
            elif curvetype == "hist":
                p.hist(*args, **kwargs)
            elif curvetype == "bar":
                p.bar(*args, **kwargs)

        p.axes().set_aspect(plot.aspect)
        if plot.legend:
            p.legend(shadow=0, loc=plot.loc)

        if not os.path.isdir(plot.dir):
            os.mkdir(plot.dir)
        if plot.pgf:
            p.savefig(plot.dir + plot.name + ".pgf")
            print(plot.name + ".pgf")
        if plot.pdf:
            p.savefig(plot.dir + plot.name + ".pdf", bbox_inches="tight")
            print(plot.name + ".pdf")

        p.close()
Beispiel #21
0
def main():
    counter = n36.main()
    plt.figure()
    plt.xscale('log')
    plt.yscale('log')
    plt.plot(sorted(list(counter.values()), reverse=True), range(1, len(list(counter)) + 1))
    plt.savefig('fig39.png')
def nlp39(words):
    # Zipfの法則:https://ja.wikipedia.org/wiki/%E3%82%B8%E3%83%83%E3%83%97%E3%81%AE%E6%B3%95%E5%89%87

    # 語彙頻度
    freq = {}
    for word in words:
        if word['pos'] == '動詞':
            if not word['base'] in freq:
                freq[word['base']] = 1
            else:
                freq[word['base']] += 1

    count = 1
    x = []
    y = []
    for word in sorted(freq, key=freq.get, reverse=True):
        x.append(count)
        y.append(freq[word])
        count += 1

    plt.xscale('log')
    plt.yscale('log')

    plt.plot(x, y, 'o')
    plt.show()
def make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list):
    cmap_positive = plt.get_cmap('Reds')
    cmap_negative = plt.get_cmap('Blues')

    xx = l2_penalty_list
    plt.plot(xx, [0.]*len(xx), '--', lw=1, color='k')

    table_positive_words = table.filter_by(column_name='word', values=positive_words)
    table_negative_words = table.filter_by(column_name='word', values=negative_words)
    del table_positive_words['word']
    del table_negative_words['word']

    for i in xrange(len(positive_words)):
        color = cmap_positive(0.8*((i+1)/(len(positive_words)*1.2)+0.15))
        plt.plot(xx, table_positive_words[i:i+1].to_numpy().flatten(),
                 '-', label=positive_words[i], linewidth=4.0, color=color)

    for i in xrange(len(negative_words)):
        color = cmap_negative(0.8*((i+1)/(len(negative_words)*1.2)+0.15))
        plt.plot(xx, table_negative_words[i:i+1].to_numpy().flatten(),
                 '-', label=negative_words[i], linewidth=4.0, color=color)

    plt.legend(loc='best', ncol=3, prop={'size':16}, columnspacing=0.5)
    plt.axis([1, 1e5, -1, 2])
    plt.title('Coefficient path')
    plt.xlabel('L2 penalty ($\lambda$)')
    plt.ylabel('Coefficient value')
    plt.xscale('log')
    plt.rcParams.update({'font.size': 18})
    plt.tight_layout()
def Validation():
  numSamples = 1000000
  
  theta = np.random.rand(numSamples)*np.pi
  ECo60 = np.array([1.117,1.332])
  Ef0,Ee0 = Compton(ECo60[0],theta)
  Ef1,Ee1 = Compton(ECo60[1],theta)
  dSdE0 = diffXSElectrons(ECo60[0],theta)
  dSdE1 = diffXSElectrons(ECo60[1],theta)

  # Sampling Values
  values = list()
  piMax = np.max([dSdE0,dSdE1])
  while (len(values) < numSamples):
    values.append(SampleRejection(piMax,ComptonScattering))
  # Binning the data
  bins = np.logspace(-3,0.2,100)
  counts = np.histogram(values,bins)
  counts = counts[0]/float(len(values))
  binCenters = 0.5*(bins[1:]+bins[:-1])
  
  # Plotting
  plt.figure()
  plt.plot(binCenters,counts,ls='steps')
  #plt.bar(binCenters,counts,align='center')
  plt.grid(True)
  plt.xlim((1E-3,1.4))
  plt.xlabel('Electron Energy (MeV)')
  plt.ylabel('Frequency per Photon')
  plt.yscale('log')
  plt.xscale('log')
  plt.savefig('ValComptonScatteringXS.png')
Beispiel #25
0
def plotCurves(losses,rateOfExceedance,return_periods,lossLevels):

    plt.figure(1, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
    plt.scatter(losses,rateOfExceedance,s=20)
    if len(return_periods) > 0:
        annual_rate_exc = 1.0/np.array(return_periods)
        for rate in annual_rate_exc:
            if rate > min(rateOfExceedance):
                plt.plot([min(losses),max(losses)],[rate,rate],color='red') 
                plt.annotate('%.6f' % rate,xy=(max(losses),rate),fontsize = 12)

    plt.yscale('log')
    plt.xscale('log')
    plt.ylim([min(rateOfExceedance),1])
    plt.xlim([min(losses),max(losses)])
    plt.xlabel('Losses', fontsize = 16)
    plt.ylabel('Annual rate of exceedance', fontsize = 16)

    setReturnPeriods = 1/rateOfExceedance
    plt.figure(2, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
    plt.scatter(setReturnPeriods,losses,s=20)
    if len(return_periods) > 0:
        for period in return_periods:
            if period < max(setReturnPeriods):
                plt.plot([period,period],[min(losses),max(losses)],color='red') 
                plt.annotate(str(period),xy=(period,max(losses)),fontsize = 12)

    plt.xscale('log')
    plt.xlim([min(setReturnPeriods),max(setReturnPeriods)])
    plt.ylim([min(losses),max(losses)])
    plt.xlabel('Return period (years)', fontsize = 16)
    plt.ylabel('Losses', fontsize = 16)
Beispiel #26
0
    def plot(self, debug = False):
	"""plot figures for population, nuisance parameters"""
	# first figure out what scheme is used
	self.list_scheme()

	# next get MABR sampling done
	self.MBAR_analysis()

	# load in precomputed P and dP from MBAR analysis
        pops0, pops1   = self.P_dP[:,0], self.P_dP[:,self.K-1]
        dpops0, dpops1 = self.P_dP[:,self.K], self.P_dP[:,2*self.K-1]
	t0 = self.traj[0]
    	t1 = self.traj[self.K-1]

        # Figure Plot SETTINGS
        label_fontsize = 12
        legend_fontsize = 10
        fontfamily={'family':'sans-serif','sans-serif':['Arial']}
        plt.rc('font', **fontfamily)

        # determine number of row and column
        if (len(self.scheme)+1)%2 != 0:
            c,r = 2, (len(self.scheme)+2)/2
    	else:
            c,r = 2, (len(self.scheme)+1)/2
    	plt.figure( figsize=(4*c,5*r) )
    	# Make a subplot in the upper left
    	plt.subplot(r,c,1)
    	plt.errorbar( pops0, pops1, xerr=dpops0, yerr=dpops1, fmt='k.')
    	plt.hold(True)
    	plt.plot([1e-6, 1], [1e-6, 1], color='k', linestyle='-', linewidth=2)
    	plt.xlim(1e-6, 1.)
    	plt.ylim(1e-6, 1.)
    	plt.xlabel('$p_i$ (exp)', fontsize=label_fontsize)
    	plt.ylabel('$p_i$ (sim+exp)', fontsize=label_fontsize)
    	plt.xscale('log')
    	plt.yscale('log')
    	# label key states
    	plt.hold(True)
    	for i in range(len(pops1)):
        	if (i==0) or (pops1[i] > 0.05):
            		plt.text( pops0[i], pops1[i], str(i), color='g' )
    	for k in range(len(self.scheme)):
        	plt.subplot(r,c,k+2)
        	plt.step(t0['allowed_'+self.scheme[k]], t0['sampled_'+self.scheme[k]], 'b-')
        	plt.hold(True)
        	plt.xlim(0,5)
        	plt.step(t1['allowed_'+self.scheme[k]], t1['sampled_'+self.scheme[k]], 'r-')
        	plt.legend(['exp', 'sim+exp'], fontsize=legend_fontsize)
        	if self.scheme[k].find('cs') == -1:
            		plt.xlabel("$\%s$"%self.scheme[k], fontsize=label_fontsize)
            		plt.ylabel("$P(\%s)$"%self.scheme[k], fontsize=label_fontsize)
            		plt.yticks([])
        	else:
            		plt.xlabel("$\sigma_{%s}$"%self.scheme[k][6:],fontsize=label_fontsize)
            		plt.ylabel("$P(\sigma_{%s})$"%self.scheme[k][6:],fontsize=label_fontsize)
            		plt.yticks([])

    	plt.tight_layout()
    	plt.savefig(self.picfile)
Beispiel #27
0
def plot_netload_cache_size_sensitivity(show=True, save=True):
    """
    Plot sensitivity of network load vs cache size
    """
    # Parameters
    T = TOPOLOGIES
    C = C_RANGE
    A = ALPHA_C_PLOT
    LT = LINK_TYPES
    # Execution
    for t in T:
        for a in [str(al) for al in A]:
            for lt in LT:
                plt.figure()
                plt.title('Network load: LINK=%s T=%s A=%s' % (lt, t, a))
                plt.ylabel('Average link load (Mbps)')
                plt.xlabel('Cache to population ratio')
                plt.xscale('log')
                S = SummaryAnalyzer(path.join(SUMMARY_LOG_DIR, 'SUMMARY_NETWORK_LOAD.txt'))
                for strategy in NETLOAD_STRATEGIES:
                    cond = (S.param['T'] == t) &  (S.param['A'] == a) &  (S.param['S'] == strategy) & (S.data['LinkType'] == lt)
                    plt.plot(S.param['C'][cond], S.data['NetworkLoad'][cond], style_dict[strategy])
                plt.xlim(min(C), max(C))
                plt.legend(tuple(netload_legend_list), prop={'size': LEGEND_SIZE}, loc='lower left')
                if show: plt.show()
                if save: plt.savefig(path.join(GRAPHS_DIR ,'NETLOAD_C_SENS_LT=%s@T=%s@A=%s.pdf' % (lt, t, a)), bbox_inches='tight')
Beispiel #28
0
def plot(objects, xscales={}, yscales={}, title=""):
    from matplotlib.pyplot import plot, show, close, subplot,\
        xscale, yscale, gcf
    '''
    Plots current state of objects in subplots.
    Define xscales and yscales as dict of indexes.
    '''
    if not isinstance(objects, list):
            objects = [objects]

    l = len(objects)
    first = round(l / 2.0) + 1
    second = round(l / 2.0)
    for i in range(0, l):
        subplot(first, second, i + 1)
        if i in xscales:
            xscale(xscales[i])
        if i in yscales:
            yscale(yscales[i])
        fig = gcf()
        fig.suptitle(title, fontsize="x-large")

        values = objects[i].get_y_values()
        x, y = values.shape

        for j in range(x):
            plot(objects[i].get_t_values(), values[j, :])

    show()
    close()
Beispiel #29
0
def pareto_graph(database, objectives=None):
	'''Constructs a visualization of the movement of the best front over generations

	The argument 'objectives' must be a list indicating the indices of the fitness values to be used.
	The first two will be consumed. If the list has less than two elements, or if is not given, the
	graph will be produced using the first two fitness values.
	'''
	if objectives is None or len(objectives) < 2:
		objectives = [0, 1]

	generations = []
	if database.properties['highest_population'] < FRONT_COUNT:
		generations = list(range(1, database.properties['highest_population'] + 1))
	else:
		step = database.properties['highest_population'] / FRONT_COUNT
		generations = [round(i * step) for i in range(1, FRONT_COUNT + 1)]

	for i, gen in enumerate(generations, start=1):
		database.select(gen)
		individual_data = [val for key, val in database.load_report().items()
			if key.startswith('I') and val['rank'] == 1]
		x_values = [val['fitness'][objectives[0]] for val in individual_data]
		y_values = [val['fitness'][objectives[1]] for val in individual_data]
		plt.plot(x_values, y_values,
			color=str((FRONT_COUNT - i) / FRONT_COUNT),
			linestyle='None',
			marker='o',
			markeredgecolor='white')

	plt.title('Movement of best front')
	plt.xscale('log')
	plt.yscale('log')
	plt.xlabel(database.properties['objective_names'][objectives[0]])
	plt.ylabel(database.properties['objective_names'][objectives[1]])
	plt.show()
 def check_hod(self, z, prop):
     data = np.genfromtxt(LOCATION + "/data/" + prop + "z" + str(z))
     if prop == "ncen":
         if PLOT:
             plt.clf()
             plt.plot(self.hod.hmf.M,
                      self.hod.n_cen,
                      label="mine")
             plt.plot(data[:, 0] * self.hod.cosmo.h, data[:, 1], label="charles")
             plt.legend()
             plt.xscale('log')
             plt.yscale('log')
             plt.savefig(join(pref, "ncen" + prop + "z" + str(z) + ".pdf"))
         assert max_diff_rel(self.hod.n_cen, data[:, 1], 0.01)
     elif prop == "nsat":
         if PLOT:
             plt.clf()
             plt.plot(self.hod.hmf.M,
                      self.hod.n_sat,
                      label="mine")
             plt.plot(data[:, 0] * self.hod.cosmo.h, data[:, 1], label="charles")
             plt.legend()
             plt.xscale('log')
             plt.yscale('log')
             plt.savefig(join(pref, "nsat" + prop + "z" + str(z) + ".pdf"))
         assert max_diff_rel(self.hod.n_sat, data[:, 1], 0.01)
Beispiel #31
0
def figure11():
    original_maze = Maze()
    params_prioritized = DynaParams()
    params_prioritized.theta = 0.0001
    params_prioritized.planning_steps = 5
    params_prioritized.alpha = 0.5
    params_prioritized.gamma = 0.95

    params_dyna = DynaParams()
    params_dyna.planning_steps = 5
    params_dyna.alpha = 0.5
    params_dyna.gamma = 0.95
    # track the # of backups
    backups = defaultdict(list)

    runs = 10

    params = [params_dyna,params_dyna]

    # set up models for planning
    models = [HeuristicModel,TrivialModel]
    method_names = ['Focused Dyna','Random Dyna']

    # due to limitation of my machine, I can only perform experiments for 5 mazes
    # assuming the 1st maze has w * h states, then k-th maze has w * h * k * k states
    num_of_mazes = 1

    # build all the mazes
    mazes = [original_maze.extend_maze(i*2) for i in range(1, num_of_mazes + 1)]
    methods = [dyna_h,dyna_q]

    for run in tqdm(range(0, runs)):
        for i in range(0, len(method_names)):
            for mazeIndex, maze in zip(range(0, len(mazes)), mazes):
                #print('run %d, %s, maze size %d' % (run, method_names[i], maze.WORLD_HEIGHT * maze.WORLD_WIDTH))

                # initialize the state action values
                q_value = np.zeros(maze.q_size)

                # track steps / backups for each episode
                stepss = []

                # generate the model
                model = models[i]()

                # play for an episode
                while True:
                    stepss.append(methods[i](q_value, model, maze, params[i]))

                    # print best actions w.r.t. current state-action values
                    # printActions(currentStateActionValues, maze)
                    # check whether the (relaxed) optimal path is found
                    if check_path(q_value, maze):
                        break

                # update the total steps / backups for this maze
                backups[method_names[i]].extend(stepss)

    # Dyna-Q performs several backups per step
    for i in method_names:
        plt.plot(sorted(backups[i], reverse=True), label=i)
    plt.xlabel('Backups')
    plt.ylabel('Steps to Goal')
    plt.ylim((0, 10000))
    plt.xscale('log')
    plt.legend()

    plt.savefig('figure11.png')
    plt.close()
Beispiel #32
0
    df.rating.hist(bins=30, alpha=0.4)
    plt.axvline(meanrat, 0, 0.75, color='r', label='Mean')
    plt.xlabel("average rating of book")
    plt.ylabel("Counts")
    plt.title("Ratings Histogram")
    plt.legend()
    #sns.despine()

# In[32]:

df.review_count.hist(bins=np.arange(0, 40000, 400))

# In[33]:

df.review_count.hist(bins=100)
plt.xscale("log")

# In[34]:

plt.scatter(df.year, df.rating, lw=0, alpha=.08)
plt.xlim([1900, 2010])
plt.xlabel("Year")
plt.ylabel("Rating")

# In[35]:

alist = [1, 2, 3, 4, 5]

# In[36]:

asquaredlist = [i * i for i in alist]
Beispiel #33
0
         horizontalalignment='center',
         verticalalignment='center',
         color=xy_color_dict[xaxis],
         backgroundcolor='white')
plt.text(5e5,
         5e5 * 0.500,
         '2x',
         fontsize=14,
         horizontalalignment='center',
         verticalalignment='center',
         color=xy_color_dict[xaxis],
         backgroundcolor='white')

# main plot
plt.scatter(df[xvar], df[yvar], s=80, edgecolor='black', facecolor='none')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.axis(axis_limits)

# highlight genes
pathway_list = pathway_csv.split(',')
pathway_color = color_csv.split(',')
for i in range(len(pathway_list)):
    # get VF list from pathway
    gene_list = gene_dict[pathway_list[i]]
    # gene colors
    gene_color = sns.xkcd_rgb[pathway_color[i]]

    plt.scatter(df[xvar][gene_list],
ax.set_xlabel('xyz')                                                 
# set x axis range
plt.xlim(0,1)                                                        
ax.set_xlim([0., 1])                                                 
# x,y axis start from same position
plt.margins(0)                                                       
 # move x-axis where y=0
pos1 = ax.spines['bottom'].set_position('zero') or
plt.margins(0)                    
# To shift the tick labels relative to the ticks use pad
ax.tick_params(which='both', direction='out', pad=5)  
# set log scale
ax.set_yscale('log')
# turn off minor ticks
plt.minorticks_off()
plt.xscale('log', subsx=[2, 3, 4, 5, 6, 7, 8, 9])
ax.minorticks_off()

############################
#----------marker----------#
############################

# ================    ===============================
# character           description
# ================    ===============================
   # -                solid line style
   # --               dashed line style
   # -.               dash-dot line style
   # :                dotted line style
   # .                point marker
   # ,                pixel marker
Beispiel #35
0
def timeseries_smooth(adata,
                      genes='none',
                      gene_symbols='none',
                      key='louvain',
                      groups='all',
                      style='-b',
                      n_restarts_optimizer=10,
                      likelihood_landscape=False,
                      normalize_y=False,
                      noise_level=0.5,
                      noise_level_bounds=(1e-2, 1e+1),
                      length_scale=1,
                      length_scale_bounds=(1e-2, 1e+1),
                      save='none',
                      title='long'):
    """
    Plot a timeseries of some genes in pseudotime
    
    Keyword arguments:
    adata -- anndata object
    genes -- list of genes. If 'none', the first 5 genes are plotted
    gene_symbols -- variable annotation. If 'none', the index is used
    key -- observation annotation. 
    groups -- basically branches, chosen from the annotations in key
    style -- line plotting style
    """

    import pandas as pd
    import numpy as np
    import matplotlib.pyplot as plt
    import matplotlib.colors as colors
    from sklearn.gaussian_process import GaussianProcessRegressor
    from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C, WhiteKernel

    # select one branch
    if groups != 'all':
        adata_selected = adata[np.in1d(adata.obs[key], groups)]
    else:
        adata_selected = adata

    # select genes
    if genes == 'none':

        # no genes specified, we just use the first 5
        genes = adata_selected.var_names.values[0:5]
        m = {'Mapped': genes, 'Original': genes}

    elif gene_symbols != 'none':

        # a gene annotation is used, we map the gene names
        mapping_table = pd.DataFrame(adata_selected.var[gene_symbols])
        name_mapping = mapping_table.set_index(gene_symbols)
        name_mapping['Ensembl'] = mapping_table.index
        genes_mapped = name_mapping.loc[genes, :]

        # save in dict
        m = {'Mapped': genes_mapped['Ensembl'], 'Original': genes}
    else:
        m = {'Mapped': genes, 'Original': genes}

    # construct a look up table
    gene_table = pd.DataFrame(data=m)

    # extract the pseudotime
    time = adata_selected.obs['dpt_pseudotime']

    # construct a data frame which has time as index
    exp_data = pd.DataFrame(data=adata_selected[:, gene_table['Mapped']].X,
                            index=time,
                            columns=[gene_table['Original'].values])

    # sort according to pseudotime
    exp_data.sort_index(inplace=True)
    ()

    # remove the last entry
    (m, n) = exp_data.shape
    exp_data = exp_data.iloc[:m - 1, :]

    # loop counter
    i = 0

    # loop over all genes we wish to plot
    for index, row in gene_table.iterrows():

        # select data
        data_selected = exp_data.loc[:, row['Original']].reset_index()

        # create the labels
        X = np.atleast_2d(data_selected['dpt_pseudotime'].values)

        # create the targets
        y = data_selected[row['Original']].values.ravel()

        # Mesh the input space for evaluations of the prediction and
        # its MSE
        x = np.atleast_2d(np.linspace(0, 1, 1000)).T

        # Initiate a Gaussian process modell. We use a sum of two kernels here, this allows
        # us to estimate the noice level via optimisation of the marginal likelihood as well
        kernel = 1.0 * RBF(length_scale=length_scale, length_scale_bounds=length_scale_bounds) \
        + WhiteKernel(noise_level=noise_level, noise_level_bounds=noise_level_bounds)
        gp = GaussianProcessRegressor(
            kernel=kernel,
            alpha=0.0,
            n_restarts_optimizer=n_restarts_optimizer,
            normalize_y=normalize_y).fit(X, y)

        # obtain a prediction from this model. Also return the covariance matrix, so we can calculate
        # confidence intervals
        y_mean, y_cov = gp.predict(x, return_cov=True)

        # plot current genes
        plt.figure(num=i, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
        plt.plot(x, y_mean, 'k', lw=3, zorder=9, label='Prediction')
        plt.fill_between(x.ravel(),
                         y_mean - np.sqrt(np.diag(y_cov)),
                         y_mean + np.sqrt(np.diag(y_cov)),
                         alpha=0.5,
                         color='k')
        plt.scatter(X,
                    y,
                    c='r',
                    s=50,
                    zorder=10,
                    edgecolors=(0, 0, 0),
                    label='Observation')
        if title == 'long':
            plt.title(
                "Gene: %s\nInitial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
                % (row['Original'], kernel, gp.kernel_,
                   gp.log_marginal_likelihood(gp.kernel_.theta)))
        else:
            plt.title("Gene: %s" % (row['Original']))
        plt.xlabel('$t_{pseudo}$')
        plt.ylabel('Expression')
        plt.legend(loc='upper left')
        if save != 'none':
            plt.savefig(save + row['Original'] + '_dynamics.pdf')

        if likelihood_landscape == True:

            # Plot LML landscape
            i += 1
            plt.figure(num=i,
                       figsize=(8, 6),
                       dpi=80,
                       facecolor='w',
                       edgecolor='k')
            theta0 = np.logspace(-2, 3, 49)  # length scale
            theta1 = np.logspace(-1.5, 0, 50)  # Noise level
            Theta0, Theta1 = np.meshgrid(theta0, theta1)
            LML = [[
                gp.log_marginal_likelihood(
                    np.log([0.36, Theta0[i, j], Theta1[i, j]]))
                for i in range(Theta0.shape[0])
            ] for j in range(Theta0.shape[1])]
            LML = np.array(LML).T

            vmin, vmax = (-LML).min(), (-LML).max()
            #vmax = 50
            level = np.around(np.logspace(np.log10(vmin), np.log10(vmax), 50),
                              decimals=1)
            plt.contour(Theta0,
                        Theta1,
                        -LML,
                        levels=level,
                        norm=colors.LogNorm(vmin=vmin, vmax=vmax))
            plt.colorbar()
            plt.xscale("log")
            plt.yscale("log")
            plt.xlabel("Length-scale")
            plt.ylabel("Noise-level")
            plt.title("Log-marginal-likelihood")
            #plt.tight_layout()
            plt.show()

        # increase loop counter
        i += 1
Beispiel #36
0
def ahmet_ali_nuhoglu_21602149_hw2(question):
    if question == '1':

        print("Question 1")
        print("Part A")

        with h5py.File('hw3_data2.mat', 'r') as file:
            Xn, Yn = list(file['Xn']), list(file['Yn'])

        Xn = np.array(Xn).T
        Yn = np.array(Yn).flatten()

        def ridge_regression(X, y, lmbd):
            return np.linalg.inv(
                X.T.dot(X) + lmbd * np.identity(np.shape(X)[1])).dot(
                    X.T).dot(y)

        def r_squared(Y, pred):
            return (np.corrcoef(Y, pred)[0, 1])**2

        def cross_validation(X, y, K, lmbd):

            part_len = int(np.size(y) / K)

            valid_means_d = dict()
            test_means_d = dict()

            for i in range(K):
                valid_data_start = i * part_len
                test_data_start = (i + 1) * part_len
                train_data_start = (i + 2) * part_len

                train_data_ind, test_data_ind, valid_data_ind = [], [], []

                for j in range(valid_data_start, test_data_start):
                    valid_data_ind.append(j % np.size(y))

                for j in range(test_data_start, train_data_start):
                    test_data_ind.append(j % np.size(y))

                for j in range(train_data_start,
                               valid_data_start + np.size(y)):
                    train_data_ind.append(j % np.size(y))

                x_valid, x_test, x_train = X[valid_data_ind], X[
                    test_data_ind], X[train_data_ind]
                y_valid, y_test, y_train = y[valid_data_ind], y[
                    test_data_ind], y[train_data_ind]

                for l in lmbd:
                    weight = ridge_regression(x_train, y_train, l)

                    valid_means_d.setdefault(l, []).append(
                        r_squared(y_valid, x_valid.dot(weight)))
                    test_means_d.setdefault(l, []).append(
                        r_squared(y_test, x_test.dot(weight)))

            valid_means_d = dict(
                (lmbd, np.mean(val)) for lmbd, val in valid_means_d.items())
            test_means_d = dict(
                (lmbd, np.mean(val)) for lmbd, val in test_means_d.items())

            return valid_means_d, test_means_d

        lambda_values = np.logspace(0, 12, num=500, base=10)
        dict_valid, dict_test = cross_validation(Xn, Yn, 10, lambda_values)

        lambda_opt = max(dict_valid, key=lambda k: dict_valid[k])

        x_val, y_val = zip(*sorted(dict_valid.items()))
        x_tst, y_tst = zip(*sorted(dict_test.items()))

        plt.figure()
        plt.plot(x_tst, y_tst)
        plt.plot(x_val, y_val)
        plt.legend([
            'Test Data',
            'Validation Data',
        ])
        plt.ylabel(r'$R^2$')
        plt.xlabel(r'$\lambda$')
        plt.title(r'$R^2$' ' vs ' '$\lambda$')
        plt.xscale('log')
        plt.grid()
        plt.show(block=False)

        print("Optimal Lambda Value: ", lambda_opt)

        print("Part B")

        np.random.seed(3)

        def bootstrap(iter_num, x, y, lmbd):
            weight_new = []
            for i in range(iter_num):
                new_ind = np.random.choice(np.arange(np.size(y)), np.size(y))
                x_new, y_new = Xn[new_ind], Yn[new_ind]
                weight_r = ridge_regression(x_new, y_new, lmbd)
                weight_new.append(weight_r)
            return weight_new

        def find_significant_w(arr_mean, arr_std):
            p_values = 2 * (1 - norm.cdf(np.abs(arr_mean / arr_std)))
            significant_weights = np.where(p_values < 0.05)
            return significant_weights

        weight_new = []
        weight_new = bootstrap(500, Xn, Yn, 0)

        weight_new_mean = np.mean(weight_new, axis=0)
        weight_new_std = np.std(weight_new, axis=0)
        plt.figure(figsize=(20, 10))
        plt.grid()
        plt.errorbar(np.arange(1, 101),
                     weight_new_mean,
                     yerr=2 * weight_new_std,
                     ecolor='r',
                     fmt='o-k',
                     capsize=5)
        plt.ylabel(r'Resampled Weight Values')
        plt.xlabel(r'Weight Indices')
        plt.title(r'Ridge Regression with ' r'$\lambda = 0$' '\nand %95 CI')
        plt.show(block=False)
        print(
            "Indices of the Resampled Weights which are significantly different than zero:"
        )
        print(find_significant_w(weight_new_mean, weight_new_std)[0])

        print("Part C")

        weight_new_ridge = []
        weight_new_ridge = bootstrap(500, Xn, Yn, lambda_opt)
        weight_newR_mean = np.mean(weight_new_ridge, axis=0)
        weight_newR_std = np.std(weight_new_ridge, axis=0)
        plt.figure(figsize=(20, 10))
        plt.grid()
        plt.errorbar(np.arange(1, 101),
                     weight_newR_mean,
                     yerr=2 * weight_newR_std,
                     ecolor='r',
                     fmt='o-k',
                     capsize=5)
        plt.ylabel(r'Resampled Weight Values')
        plt.xlabel(r'Weight Indices')
        plt.title(r'Ridge Regression with '
                  r'$\lambda = \lambda_{opt}$'
                  '\nand %95 CI')
        plt.show(block=False)
        print(
            "Indices of the Resampled Weights which are significantly different than zero:"
        )
        print(find_significant_w(weight_newR_mean, weight_newR_std)[0])

    elif question == '2':

        print("Question 2")

        print("Part A")

        with h5py.File('hw3_data3.mat', 'r') as file:
            pop1, pop2 = np.array(list(file['pop1'])).flatten(), np.array(
                list(file['pop2'])).flatten()

        def bootstrap(iter_num, x, seed=6):
            np.random.seed(seed)
            x_new = []
            for i in range(iter_num):
                new_ind = np.random.choice(np.arange(np.size(x)), np.size(x))
                x_sample = x[new_ind]
                x_new.append(x_sample)
            return np.array(x_new)

        def mean_difference(x, y, iterations):
            xy_concat = np.concatenate((x, y))
            xy_boot = bootstrap(iterations, xy_concat)
            x_boot = np.zeros((iterations, np.size(x)))
            y_boot = np.zeros((iterations, np.size(y)))
            for i in range(np.size(xy_concat)):
                if i < np.size(x):
                    x_boot[:, i] = xy_boot[:, i]
                else:
                    y_boot[:, i - np.size(x)] = xy_boot[:, i]
            x_means = np.mean(x_boot, axis=1)
            y_means = np.mean(y_boot, axis=1)
            mean_diff = x_means - y_means

            return mean_diff

        mean_diff = mean_difference(pop1, pop2, 10000)

        def find_z_and_p(x, mu):
            mu_0 = np.mean(x)
            sigma = np.std(x)
            z = np.abs((mu - mu_0) / sigma)
            p = (1 - norm.cdf(z))
            return z, p

        plt.figure()
        plt.title('Population Mean Difference')
        plt.xlabel('Difference of Means')
        plt.ylabel('P(x)')
        plt.yticks([])
        plt.hist(mean_diff, bins=60, density=True, edgecolor='black')
        plt.show(block=False)

        z, p = find_z_and_p(mean_diff, np.mean(pop1) - np.mean(pop2))
        print("z-score: ", z)
        print("two sided p-value: ", 2 * p)

        print("Part B")

        with h5py.File('hw3_data3.mat', 'r') as file:
            vox1, vox2 = np.array(list(file['vox1'])).flatten(), np.array(
                list(file['vox2'])).flatten()

        vox1_boot = bootstrap(10000, vox1)
        vox2_boot = bootstrap(10000, vox2)

        corr_boot = np.zeros(10000)
        for i in range(10000):
            corr_boot[i] = np.corrcoef(vox1_boot[i], vox2_boot[i])[0, 1]

        corr_mean = np.mean(corr_boot)
        sorted_corr = np.sort(corr_boot)
        dif = np.size(sorted_corr) / 40
        corr_lower = sorted_corr[int(dif)]
        corr_upper = sorted_corr[int(np.size(sorted_corr) - dif)]
        print("Mean: ", corr_mean)
        print("%95 CI: (", corr_lower, ", ", corr_upper, ")")

        zero_corr = np.where(corr_boot < 10**(-2))
        print("Number of elements with zero correlation: ", np.size(zero_corr))

        print("Part C")

        vox1_indep = bootstrap(10000, vox1, 13)
        vox2_indep = bootstrap(10000, vox2, 5)

        corr_boot_indep = np.zeros(10000)
        for i in range(10000):
            corr_boot_indep[i] = np.corrcoef(vox1_indep[i], vox2_indep[i])[0,
                                                                           1]

        plt.figure()
        plt.title('Correlation between vox1 and vox2')
        plt.xlabel('Correlation (x)')
        plt.ylabel('P(x)')
        plt.yticks([])
        plt.hist(corr_boot_indep, bins=60, density=True, edgecolor='black')
        plt.show(block=False)

        z, p = find_z_and_p(corr_boot_indep, np.corrcoef(vox1, vox2)[0, 1])
        print("z-score: ", z)
        print("one sided p value: ", p)

        print("Part D")

        with h5py.File('hw3_data3.mat', 'r') as file:
            building, face = np.array(list(
                file['building'])).flatten(), np.array(list(
                    file['face'])).flatten()

        mean_diff_d = np.zeros(10000)
        diff_options = np.zeros(4)
        choices = np.zeros(20)

        for i in range(10000):
            for j in range(20):
                ind = np.random.choice(20)
                diff_options[0:1] = 0
                diff_options[2] = building[ind] - face[ind]
                diff_options[3] = -1 * diff_options[2]
                choices[j] = diff_options[np.random.choice(4)]
            mean_diff_d[i] = np.mean(choices)

        plt.figure()
        plt.title(
            'Difference of Means\nBuilding - Face\n(Subject Population = Same)'
        )
        plt.xlabel('Difference of Means (x)')
        plt.ylabel('P(x)')
        plt.yticks([])
        plt.hist(mean_diff_d, bins=60, density=True, edgecolor='black')
        plt.show(block=False)

        z, p = find_z_and_p(mean_diff_d, np.mean(building) - np.mean(face))
        print("z-score: ", z)
        print("Two sided p value: ", 2 * p)

        print("Part E")

        mean_diff_e = mean_difference(building, face, 10000)

        plt.figure()
        plt.title(
            'Difference of Means\nBuilding - Face\n(Subject Population = Different)'
        )
        plt.xlabel('Difference of Means (x)')
        plt.ylabel('P(x)')
        plt.yticks([])
        plt.hist(mean_diff_e, bins=60, density=True, edgecolor='black')
        plt.show(block=False)

        z_e, p_e = find_z_and_p(mean_diff_e, np.mean(building) - np.mean(face))
        print("z-score: ", z_e)
        print("Two sided p value: ", 2 * p_e)
Beispiel #37
0
 def PlotInject(self,filename=None):
     '''
     Function that uses the global parameters str_min and str_step as well as the global results sigma_ar to
     generate a plot.
     
     Argument :
         fliename : Name of the file in which the plot will be saved. If None, the plot will be just shown
                    but not saved. Default to None.
     '''
     
     # Get the x-values (signal strength)
     if(self.str_scale=='lin'):
         sig_str = np.arange(self.str_min,self.str_min+self.str_step*len(self.sigma_ar),step=self.str_step)
     else:
         sig_str = np.array([i%10*10**(self.str_min+i//10) for i in range(len(self.sigma_ar)+len(self.sigma_ar)//10+1) if i%10!=0])
     
     # If filename is not None and log scale must check
     if(filename!=None and self.str_scale=='log'):
         if(type(filename)==type('str')):
             print('WARNING : log plot for signal injection will not be saved !')
             nolog = True
         else:
             nolog = False
     
     # Do the plot
     F = plt.figure(figsize=(12,8))
     plt.title('Significance vs signal strength')
     plt.errorbar(sig_str,self.sigma_ar[:,0],
                  xerr=0,yerr=[self.sigma_ar[:,1],self.sigma_ar[:,2]],
                  linewidth=2,marker='o')
     plt.xlabel('Signal strength',size='large')
     plt.ylabel('Significance',size='large')
     
     if(filename is None):
         plt.show()
     else:
         if(self.str_scale=='log' and nolog is False):
             plt.savefig(filename[0],bbox_inches='tight')
         else:
             plt.savefig(filename,bbox_inches='tight')
         plt.close(F)
     
     # If log scale, do also a log plot
     if(self.str_scale=='log'):
         F = plt.figure(figsize=(12,8))
         plt.title('Significance vs signal strength (log scale)')
         plt.errorbar(sig_str,self.sigma_ar[:,0],
                      xerr=0,yerr=[self.sigma_ar[:,1],self.sigma_ar[:,2]],
                      linewidth=2,marker='o')
         plt.xlabel('Signal strength',size='large')
         plt.ylabel('Significance',size='large')
         plt.xscale('log')
         
         if(filename is None):
             plt.show()
         else:
             if(nolog is False):
                 plt.savefig(filename[1],bbox_inches='tight')
             plt.close(F)
     
     return
Beispiel #38
0
# See documentation for description of each column

M = pd.read_csv("M.dat",
                sep=' ',
                names=[
                    "NDim", "Iter", "N_init", "NPF", "GD", "SS", "HV",
                    "HausDist", "Cover", "GDPS", "SSPS", "HDPS", "Prob", "q",
                    "Front"
                ])

ZG = {}
Keys = []

Metric = "GD"

fig = pl.figure(Metric)

ZG = M.\
    groupby("NPF").get_group(100).\
    groupby("Iter").agg(np.mean).reset_index()

pl.plot(ZG["Iter"], ZG[Metric], 'o')
print(ZG[Metric])

pl.legend()
pl.grid()
pl.xscale("log")
pl.yscale("log")
fig.show()
Beispiel #39
0
def pol_cl_parse(pol_cl_out_file,
                 pol_cov_out_file,
                 wl_array=None,
                 cn=0,
                 rebin=None,
                 nbin=25,
                 lmin=1,
                 lmax=1500,
                 bin_type='lin',
                 custom_bins=None,
                 show=False):
    """
    Parser of the txt output file of PolSpice, which contains the angular power spectrum (APS).

    Parameters
	----------
    pol_cl_out_file : str
        ascii output file created by PolSpice
    pol_cov_out_file : str
        .fits file containing the covariance matrix created by PolSpice
    raw_corr : (float, numpy array)
        must be a python list with 2 entries: the first one must be the
        white poissonian noise, the second must be the array (or the spline)
        of the  Wbeam function as a funcion of l integrated in a energy bin.
    rebin : list (or array)
        if not None, it has to be the list defining the edges of the new binning.

    Returns
    -------
    array, array, array
        in order: array of the ells, array of the Cls, array of the Cls errors
        (estimated from the covariance matrix). If rebin is not None the arrays are
        the rebinned array.
    """
    if pol_cov_out_file:
        hdu = pf.open(pol_cov_out_file)
        _cov = hdu[0].data[0]
        _invcov = np.linalg.inv(_cov)
    f = open(pol_cl_out_file, 'r')

    _l, _cl = [], []
    for line in f:
        try:
            l, cl = [float(item) for item in line.split()]
            _l.append(l)
            _cl.append(cl)
        except:
            pass
    _l = np.array(_l)
    _cl = np.array(_cl)
    if len(_cl) < lmax:
        logger.info('ATT: Setting new l_max=%i ...' % len(_cl))
        lmax = len(_cl)

    if wl_array is not None:
        wl = wl_array[:lmax]
        _l = _l[:lmax]
        _cl = (_cl[:lmax] - cn) / (wl**2)

        if pol_cov_out_file:
            _cov_ = np.array([_cov[i][:len(wl)] for i in range(0, len(wl))])
            _cov_ = _cov_ / (wl**2)
            for l in range(len(wl)):
                _cov_[l] = _cov_[l] / (wl**2)
        else:
            _clerr = np.sqrt(2 / (2 * _l + 1)) * (_cl + cn / wl**2)
            print(len(_clerr))
    else:
        _clerr = np.sqrt(2 / (2 * _l + 1)) * (_cl)
        pass

    if rebin:
        logger.info('Rebinning in multipole range')
        _lr, _clr, _clerrr = [], [], []
        if custom_bins is not None:
            logger.info('Custom multipole binning.')
            rebinning = new_binning(lmin,
                                    lmax,
                                    nbin,
                                    bin_type=bin_type,
                                    custom_bins=custom_bins)
        else:
            rebinning = new_binning(lmin, lmax, nbin, bin_type=bin_type)
        for bmin, bmax in zip(rebinning[:-1], rebinning[1:]):
            logger.info('considering %i < li < %i' % (bmin, bmax))
            _index = np.where(np.logical_and(_l >= bmin, _l < bmax))[0]
            _index = _index.astype(int)
            if bin_type == 'log':
                _lmean = np.sqrt(bmin * bmax)
            if bin_type == 'lin':
                _lmean = (bmin + bmax) / 2
            _lr.append(_lmean)
            _clmean = np.mean(_cl[_index])
            _clr.append(_clmean)
            logger.info('cl_mean %.3e' % _clmean)

            if pol_cov_out_file:
                _clerr = np.sqrt(np.mean(_cov[bmin:bmax, bmin:bmax]))
                logger.info('cl_mean err %.3e' % _clerr)
                _clerrr.append(_clerr)
            else:
                _cler = np.mean(_clerr[_index])
                logger.info('cl_mean err %.3e' % _cler)
                _clerrr.append(_cler)

        _l = np.array(_lr)
        _cl = np.array(_clr)
        _clerr = np.array(_clerrr)
    else:
        if pol_cov_out_file:
            _clerr = np.array([np.sqrt(_cov[i][i]) for i in _l.astype(int)])
        else:
            pass

    if show:
        plt.figure()
        plt.errorbar(_l, _cl, yerr=_clerr, fmt='.')
        plt.plot([_l[0], _l[-1]], [0, 0], c='silver', linewidth=1)
        plt.title('CAPS', size=20)
        plt.xlabel('$\ell$', size=18)
        plt.ylabel('C$_{\ell}$', size=18)
        plt.xscale('log')
        plt.show()

    return np.array(_l), np.array(_cl), np.array(_clerr)
Beispiel #40
0
def plot_data(input_txt_file, plot_position, plot_color):

    plt.subplot(plot_position)

    coll_criterion_name = os.path.basename(input_txt_file)[9:-4]
    plt.annotate(coll_criterion_name + " criterion",
                 xy=(0.03, 0.075),
                 xycoords='axes fraction',
                 weight='heavy',
                 color=plot_color,
                 fontsize=40)

    # Get data from file
    data = np.genfromtxt(input_txt_file,
                         names=('Ncoll', 'Nevents', 't_run', 'V', 'sigma', 'N',
                                'Ntest', 'T', 'dt'))

    with open(input_txt_file, 'r') as f:
        smash_version = f.readlines()[-1].strip('# \n')

    # Sort by x axis variable
    data = data[data[xvar].argsort()]

    x = data[xvar]
    y = data['Ncoll']

    # Make a text label about the properties of the used box
    s = []
    s.append('Elastic Box$:$')
    if (xvar != 'V'): s.append("$V$ = %.1f fm$^3$" % data['V'][0])
    if (xvar != 'sigma'): s.append("$\sigma$ = %.1f fm$^2$" % data['sigma'][0])
    if (xvar != 'N'): s.append("$N$ = %i" % data['N'][0])
    if (xvar != 'Ntest'): s.append("$N_{test}$ = %i" % data['Ntest'][0])
    if (xvar != 'T'): s.append("$T$ = %.3f GeV" % data['T'][0])
    if (xvar != 'dt'): s.append("$dt$ = %s fm/c" % data['dt'][0])
    s.append("$t_{tot}$ = %.1f fm/c" % data['t_run'][0])
    s.append("$N_{ev}$ = %i" % data['Nevents'][0])
    box_label = '\n'.join(s)

    if plot_position == 211:  # only print title and input box once
        plt.annotate(box_label,
                     xy=(1.02, 0.97),
                     ha="left",
                     va="top",
                     xycoords='axes fraction',
                     fontsize=30)
        plt.title('only $\pi^0$, only elastic collisions', fontsize=30, y=1.02)
    if plot_position == 212:
        plt.annotate(y_label_defintion,
                     xy=(0.62, 0.125),
                     xycoords='axes fraction',
                     fontsize=40)

    # Number of collisions is expected to be equal to this norm (for <v> = c)
    norm = data['Nevents'] * data['t_run'] * (data['sigma'] * 0.5 * data['N'] *
                                              data['N'] * data['Ntest'] /
                                              data['V'])

    # Average relative velocity factor, arXiv:1311.4494, matters only at m/T < 0.7
    a = 0.135 / data['T']  # m_pi0/T
    v_rel = 4. / a * sp.kn(3, 2.0 * a) / np.power(sp.kn(2, a), 2)
    norm *= v_rel

    y = y / norm
    y_error = np.sqrt(data['Ncoll']) / norm
    plt.errorbar(x,
                 y,
                 yerr=y_error,
                 fmt='o',
                 capsize=10,
                 label=smash_version,
                 markersize=15,
                 zorder=2,
                 markeredgecolor=plot_color,
                 color=plot_color)

    if args.comp_prev_version:
        import comp_to_prev_version as cpv
        # plot reference data from previous SMASH version
        cpv.plot_previous_results('elastic_box', args.setup,
                                  '-' + coll_criterion_name + '.txt')

    plt.xlim(0.0, 1.05 * x.max())
    plt.ylim(ymin=0.4, ymax=max(1.5, 1.05 * y.max()))
    if (xvar == 'dt'):
        plt.xscale('log')
        plt.xlim(1.e-4, 2.0 * x.max())
        plt.gca().tick_params(pad=10)

    # store plotted data
    save_table(
        OrderedDict([('x', x), ('y', y), ("y_error", y_error)]),
        '{}.txt'.format(args.setup + "-" + coll_criterion_name),
        smash_version,
    )

    plt.legend(loc='upper right')
    plt.axhline(1, linewidth=3, linestyle='--', color='black', zorder=0)
    plt.ylabel(y_label, fontsize=50)
    if plot_position == 212: plt.xlabel(x_labels[xvar])
Beispiel #41
0
    tau[i_file, :] = data[:, 1]
    tau_std[i_file, :] = data[:, 2]
    i_file = i_file + 1

print 'plotting'
for j_file in [0, 1, 2]:
    paras = re.split('IMFmin|IMFmax|eta|slope|.dat', files[j_file])
    plt.plot(z[j_file, :],
             tau[j_file, :],
             label=(r'$M_{max}=$' + paras[2] + r'$[M_{\odot}]$'))
    # '\n'+r'$M_{max}=$'+paras[2]+r'$M_{\odot}$'+
plt.plot(z[0, :], tau_arr, label=('Planck 2014'), color='black', linestyle='-')
plt.plot(z[0, :],
         tau_arr - sig_tau,
         label=(r'$1\sigma$ error'),
         color='black',
         linestyle='-.')
plt.plot(z[0, :], tau_arr + sig_tau, color='black', linestyle='-.')

plt.ylabel(r'Final $\tau$', size=20)
plt.xlabel(r'$z$')
plt.xscale('linear')
plt.xlim(0, 35)
# plt.yscale('log')
plt.legend(bbox_to_anchor=(1.0, 0.5))
plt.title(r'Final $\tau$')
plt.grid(b=True, which='both', color='0.65', linestyle='-')
plt.savefig('../plots/reproduced/tau_mmax.jpg', bbox_inches='tight')
plt.show()
plt.clf()