def plotDataAndFit_S():
    """
	
		Plot \\xi( |s| )
	
	"""

    binSize = 4.0

    sss = numpy.arange(0.0, 50.0) * 4.0 + 2.0
    yyy_dat = numpy.zeros(shape=(50, 2))
    yyy_fit = numpy.zeros(shape=(50, 2))

    cut = numpy.logical_and((xxx__ != 0.0), (xxx__ < 200.0))
    xxx = xxx__[cut]
    xi_dat = xi_dat__[cut]
    xi_fit = xi_fit__[cut]
    xi_err = 1.0 / (xi_err__[cut] ** 2.0)

    for i in range(0, xxx.size):
        sIdx = int(xxx[i] / binSize)
        yyy_dat[sIdx][0] += xi_dat[i] * xi_err[i]
        yyy_dat[sIdx][1] += xi_err[i]
        yyy_fit[sIdx][0] += xi_fit[i] * xi_err[i]
        yyy_fit[sIdx][1] += xi_err[i]

    yyy_dat[:, 0] /= yyy_dat[:, 1]
    yyy_dat[:, 1] = numpy.sqrt(1.0 / yyy_dat[:, 1])
    yyy_fit[:, 0] /= yyy_fit[:, 1]
    yyy_fit[:, 1] = numpy.sqrt(1.0 / yyy_fit[:, 1])

    ### Plot the results

    for i in numpy.arange(0, 3):

        a = ""
        if i == 1:
            a += "|s|."
        elif i == 2:
            a += "|s|^{2}."

        coef = numpy.power(sss, 1.0 * i)

        plt.errorbar(
            sss,
            coef * yyy_dat[:, 0],
            yerr=coef * yyy_dat[:, 1],
            linestyle="",
            marker="o",
            color="blue",
            label=r"$<Simu>$",
        )
        plt.errorbar(sss, coef * yyy_fit[:, 0], color="red", label=r"$<Fit>$")
        plt.xlabel(r"$|s| \, [h^{-1} Mpc]$")
        plt.ylabel(r"$" + a + "\\xi(|s|)$")
        myTools.deal_with_plot(False, False, True)

        plt.show()

    return
def PlotPatchBins(Sc, PatchData, NumBins, color, MinimumBinSize=7, ErrorBars=True):
    """
    Plot E*R* data binned from the hilltop pacth data.
    """
    E_s = E_Star(Sc, PatchData[6], PatchData[2])
    R_s = R_Star(Sc, PatchData[10], PatchData[2])

    bin_x, bin_std_x, bin_y, bin_std_y, std_err_x, std_err_y, count = Bin.bin_data_log10(E_s, R_s, NumBins)

    # filter bins based on the number of data points used in their calculation
    bin_x = np.ma.masked_where(count < MinimumBinSize, bin_x)
    bin_y = np.ma.masked_where(count < MinimumBinSize, bin_y)
    # these lines produce a meaningless warning - don't know how to solve it yet.

    if ErrorBars:
        # only plot errorbars for y as std dev of x is just the bin width == meaningless
        plt.scatter(
            bin_x,
            bin_y,
            c=count,
            s=50,
            edgecolor="",
            cmap=plt.get_cmap("autumn_r"),
            label="Binned Patch Data",
            zorder=100,
        )
        plt.errorbar(bin_x, bin_y, yerr=std_err_y, fmt=None, ecolor="k", elinewidth=2, capsize=3, zorder=0)
        cbar = plt.colorbar()
        cbar.set_label("Number of values per bin")

    else:
        plt.errorbar(bin_x, bin_y, fmt="o", color=color, label="No. of Bins = " + str(NumBins))
Пример #3
0
def plotSpeedups(query, data, data_dir):
  fig = plt.figure()
  ax = plt.subplot(111)
  plt.title(query)
  plt.xlabel("Target Partition Sizes")
  plt.ylabel("Execution Time (s)")
  (x, y, err, minX, maxX, minY, maxY) = getStats(data)
  plt.errorbar(x, y, yerr=err, marker='.', color='black',ecolor="gray")
  plt.axis([0, 1.02*maxX, 0, 1.02*(maxY+max(err))])
  leg = ax.legend(["Caching. 6 workers."], fancybox=True)
  leg.get_frame().set_alpha(0.5)
  # plt.grid()

  # Add annotations for the endpoint values.
  # ax.annotate("("+str(x[0])+","+str(y[0])+")", xy=(x[0],y[0]), xytext=(0,0),
  #     textcoords='offset points', color='black')
  # ax.annotate(str(y[1]), xy=(x[1],y[1]), xytext=(0,0),
  #     textcoords='offset points', color='black')
  ax.annotate(str(y[-1]), xy=(x[-1],y[-1]), xytext=(10,0),
      textcoords='offset points', color='black')

  plt.savefig(data_dir + "/partitions/pdf/" + query + ".pdf")
  plt.savefig(data_dir + "/partitions/png/" + query + ".png")
  plt.clf()

  # Print stats.
  def two(s): return "{:.2f}".format(s)
  print(" & ".join([query, two(min(y)), two(y[-1])]) + r" \\")
Пример #4
0
def tuning(x, y, err=None, smooth=None, ylabel=None, pal=None):
    """
    Plot a tuning curve
    """
    if smooth is not None:
        xs, ys = smoothfit(x, y, smooth)
        plt.plot(xs, ys, linewidth=4, color="black", zorder=1)
    else:
        ys = asarray([0])
    if pal is None:
        pal = sns.color_palette("husl", n_colors=len(x) + 6)
        pal = pal[2 : 2 + len(x)][::-1]
    plt.scatter(x, y, s=300, linewidth=0, color=pal, zorder=2)
    if err is not None:
        plt.errorbar(x, y, yerr=err, linestyle="None", ecolor="black", zorder=1)
    plt.xlabel("Wall distance (mm)")
    plt.ylabel(ylabel)
    plt.xlim([-2.5, 32.5])
    errTmp = err
    errTmp[isnan(err)] = 0
    rng = max([nanmax(ys), nanmax(y + errTmp)])
    plt.ylim([0 - rng * 0.1, rng + rng * 0.1])
    plt.yticks(linspace(0, rng, 3))
    plt.xticks(range(0, 40, 10))
    sns.despine()
    return rng
Пример #5
0
def display(params_estimated):

    # Construct matrix of experimental data and variance columns of interest
    exp_obs_norm = exp_data[data_names].view(float).reshape(len(exp_data), -1).T
    var_norm = exp_data[var_names].view(float).reshape(len(exp_data), -1).T
    std_norm = var_norm ** 0.5

    # Simulate model with new parameters and construct a matrix of the
    # trajectories of the observables of interest, normalized to 0-1.
    solver.run(params_estimated)
    obs_names_disp = obs_names + ['aSmac']
    sim_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
    totals = obs_totals + [momp_obs_total]
    sim_obs_norm = (sim_obs / totals).T

    # Plot experimental data and simulation on the same axes
    colors = ('r', 'b')
    for exp, exp_err, sim, c in zip(exp_obs_norm, std_norm, sim_obs_norm, colors):
        plt.plot(exp_data['Time'], exp, color=c, marker='.', linestyle=':')
        plt.errorbar(exp_data['Time'], exp, yerr=exp_err, ecolor=c,
                     elinewidth=0.5, capsize=0, fmt=None)
        plt.plot(solver.tspan, sim, color=c)
    plt.plot(solver.tspan, sim_obs_norm[2], color='g')
    plt.vlines(momp_data[0], -0.05, 1.05, color='g', linestyle=':')
    plt.show()
def main():

    sample='q'
    sm_bin='10.0_10.5'
    catalogue = 'sm_9.5_s0.2_sfr_c-0.75_250'

    #load in fiducial mock
    filepath = './'
    filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'_cov.npy'
    cov = np.matrix(np.load(filepath+filename))
    diag = np.diagonal(cov)
    filepath = cu.get_output_path() + 'analysis/central_quenching/observables/'
    filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'.dat'
    data = ascii.read(filepath+filename)
    rbins = np.array(data['r'])
    mu = np.array(data['wp'])
    
    #load in comparison mock
    
    
    
    
    plt.figure()
    plt.errorbar(rbins, mu, yerr=np.sqrt(np.diagonal(cov)), color='black')
    plt.plot(rbins, wp,  color='red')
    plt.xscale('log')
    plt.yscale('log')
    plt.show()
    
    inv_cov = cov.I
    Y = np.matrix((wp-mu))
    
    X = Y*inv_cov*Y.T
    
    print(X)
Пример #7
0
def build_plot(profilerResults):
    # Calculate each value.
    x = []
    mean = []
    std = []
    for t in xrange(profilerResults.getLookBack()*-1, profilerResults.getLookForward()+1):
        x.append(t)
        values = np.asarray(profilerResults.getValues(t))
        mean.append(values.mean())
        std.append(values.std())

    # Cleanup
    plt.clf()
    # Plot a line with the mean cumulative returns.
    plt.plot(x, mean, color='#0000FF')

    # Error bars starting on the first lookforward period.
    lookBack = profilerResults.getLookBack()
    firstLookForward = lookBack+1
    plt.errorbar(
        x=x[firstLookForward:], y=mean[firstLookForward:], yerr=std[firstLookForward:],
        capsize=3,
        ecolor='#AAAAFF', alpha=0.5
    )

    # Horizontal line at the level of the first cumulative return.
    plt.axhline(
        y=mean[lookBack],
        xmin=-1*profilerResults.getLookBack(), xmax=profilerResults.getLookForward(),
        color='#000000'
    )

    plt.xlim(profilerResults.getLookBack()*-1-0.5, profilerResults.getLookForward()+0.5)
    plt.xlabel('Time')
    plt.ylabel('Cumulative returns')
def make_intergenerational_figure(data, lowerbound, upperbound, rows, title):
    plt.figure(figsize=(10,10))
    plt.suptitle(title,fontsize=20)
    for index in range(4):
        plt.subplot(2,2,index+1)    
        #simulation distribution
        plt.hist(accepted[:,rows[index]], normed=True, bins = range(0,100,5), color = col)
        #simulation values
        value = np.mean(accepted[:,rows[index]])
        std = 2*np.std(accepted[:,rows[index]])
        plt.errorbar((value,), (red_marker_location-0.02), xerr=((std,),(std,)),
                     color=col, fmt='o', linewidth=2, capsize=5, mec = col)
        #survey values
        value = data[index]
        lb = lowerbound[index]
        ub = upperbound[index]
        plt.errorbar((value,), (red_marker_location,), xerr=((value-lb,),(ub-value,)),
                     color='r', fmt='o', linewidth=2, capsize=5, mec = 'r')
        #labeling    
        plt.ylim(0,ylimit)
        plt.xlim(0,100)
    #make subplots pretty
    plt.subplot(2,2,1)
    plt.title("Males")
    plt.ylabel("'05\nFrequency")
    plt.subplot(2,2,2)
    plt.title("Females")
    plt.subplot(2,2,3)
    plt.ylabel("'08\nFrequency")
    plt.xlabel("Percent Responding Affirmatively")
    plt.subplot(2,2,4)
    plt.xlabel("Percent Responding Affirmatively")
Пример #9
0
def plot_categorical_scatter_with_mean(vals, categoryLabels, jitter=True, colours=None, xlabel=None, ylabel=None, title=None):
    import matplotlib.colors
    import scipy.stats
    import pdb
    numCategories = len(vals)
    plt.hold(True)
    if colours is None:
        colours = plt.cm.gist_rainbow(np.linspace(0,1,numCategories))
    for category in range(numCategories):
        edgeColour = matplotlib.colors.colorConverter.to_rgba(colours[category], alpha=0.5)
        xval = (category+1)*np.ones(len(vals[category]))
        if jitter:
            jitterAmt = np.random.random(len(xval))
            xval = xval + (0.3 * jitterAmt) - 0.15
        #pdb.set_trace()
        plt.plot(xval, vals[category], 'o', mec=edgeColour, mew = 4, mfc='none', ms=16)
        mean = np.mean(vals[category])
        sem = scipy.stats.sem(vals[category])
        print mean, sem
        plt.plot(category+1, mean, 'o', color='k', mec=colours[category], ms=20)
        plt.errorbar(category+1, mean, yerr = sem, color=colours[category])
    plt.xlim(0,numCategories+1)
    plt.ylim(0,1)
    ax = plt.gca()
    ax.set_xticks(range(1,numCategories+1))
    ax.set_xticklabels(categoryLabels, fontsize=16)
    if xlabel is not None:
        plt.xlabel(xlabel, fontsize=20)
    if ylabel is not None:
        plt.ylabel(ylabel, fontsize=20)
    if title is not None:
        plt.title(title)
    plt.show()
Пример #10
0
def draw_methods(argmts,da_method):
    methods,ys,yerrs,x,lookfor_pair = argmts
    fig, ax = plt.subplots(figsize=(12,8))
    index = np.arange(len(x))
    markers = ['.','x']*(len(methods)/2)
    i = 0
    # print index,[len(y) for y in ys]
    for y in ys: #yerr=yerrs[i]
        plt.errorbar(index,y,marker= markers[i],alpha=opacity,label=convert(methods[i]),mew=3,linewidth=3.0,markersize=10)
        i += 1
    plt.xticks(index,x)

    plt.title(lookfor_pair+': '+da_method,size=22)
    plt.xlabel('$\\lambda$',size=22)
    plt.ylabel('Accuracy',size=22)
    # bottom box
    # box = ax.get_position()
    # ax.set_position([box.x0, box.y0 + box.height * 0.1,box.width, box.height * 0.9])
    # ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),
    #       fancybox=True, shadow=True, ncol=5)
    # plt.show()
    plt.autoscale()
    plt.ylim([57,90])
    plt.savefig('%s:%s-acc.png'%(lookfor_pair,da_method))
    pass
Пример #11
0
	def fitplot(self,pars):
		
		AnalyticTTVs = []
		for parset in pars:
			m,m1,ex,ey,ex1,ey1 = parset
			AnalyticTTVs.append( self.get_ttvs(ex,ey,ex1,ey1) )
#		#
		pl0tr = self.transits
		pl1tr = self.transits1
		N = self.trN
		N1 = self.trN1
		errs,errs1 = self.input_data[:,2],self.input_data1[:,2]
#		#
		symbols = ['x','o','d']
		## Figure 1 ##
		plt.figure()
		plt.subplot(211)
		plt.errorbar(pl0tr, pl0tr - self.p*N - self.T0,yerr=errs,fmt='ks')
		for i,ttvs in enumerate(AnalyticTTVs):
			plt.plot(pl0tr , ttvs[0] * m1 ,'k%s'% symbols[i%len(symbols)] ) 
		plt.subplot(212)
		plt.errorbar(pl1tr , pl1tr - self.p1*N1 - self.T10 ,yerr=errs1,fmt='rs')
		for i,ttvs in enumerate(AnalyticTTVs):
			plt.plot(pl1tr , ttvs[1] * m  ,'r%s'% symbols[i%len(symbols)] ) 
		plt.show()
Пример #12
0
def dose_plot(df,err,cols,scale='linear'):
    n_rows = int(np.ceil(len(cols)/3.0))
    plt.figure(figsize=(20,4 * n_rows))
    subs = gridspec.GridSpec(n_rows, 3) 
    plt.subplots_adjust(hspace=0.54,wspace=0.27)

    for col,sub in zip(cols,subs):
        plt.subplot(sub)
        for base in df['Base'].unique():
            for drug in get_drugs_with_multiple_doses(filter_rows(df,'Base',base)):
                data = thread_first(df,
                                    (filter_rows,'Drug',drug),
                                    (filter_rows,'Base',base),
                                    (DF.sort, 'Dose'))
                error = thread_first(err,
                                     (filter_rows,'Drug',drug),
                                     (filter_rows,'Base',base),
                                     (DF.sort, 'Dose'))
                if scale == 'linear':
                    plt.errorbar(data['Dose'],data[col],yerr=error[col])
                    title = "{} vs. Dose".format(col)
                else: 
                    plt.errorbar(data['Dose'],data[col],yerr=error[col])
                    plt.xscale('log')
                    title = "{} vs. Dose (Log Scale)".format(col)
                    plt.xticks(data['Dose'].values,data['Dose'].values)
                    plt.xlim(0.06,15)
                label('Dose ({})'.format(data.Unit.values[0]), col,title,fontsize = 15)

                plt.legend(df['Base'].unique(), loc = 0)
Пример #13
0
def plotres(psr,deleted=False,group=None,**kwargs):
    """Plot residuals, compute unweighted rms residual."""

    res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
    
    if (not deleted) and N.any(psr.deleted != 0):
        res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
        print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs))

    meanres = math.sqrt(N.mean(res**2)) / 1e-6
    
    if group is None:
        i = N.argsort(t)
        P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs)
    else:
        if (not deleted) and N.any(psr.deleted):
            flagmask = psr.flagvals(group)[~psr.deleted]
        else:
            flagmask = psr.flagvals(group)

        unique = list(set(flagmask))
            
        for flagval in unique:
            f = (flagmask == flagval)
            flagres, flagt, flagerrs = res[f], t[f], errs[f]
            i = N.argsort(flagt)
            P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs)
        
        P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1))

    P.xlabel('MJD'); P.ylabel('res [us]')
    P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
Пример #14
0
def main():
    if not len(sys.argv) == 2:
        print "Usage: ./generate_plots.py estimator_name"
        exit(1)

    estimator_name = sys.argv[1]
    f = open("data/" + estimator_name + ".txt", 'r')

    x = []
    y = []
    e = []

    for line in f:
        values = line.split()
        x.append(float(values[0]))
        y.append(float(values[1]))
        e.append(1.96 * float(values[2]))

    f.close()

    plt.figure()
    plt.title(estimator_name)
    plt.xlabel('N')
    plt.ylabel('N_hat')
    plt.axis([1, 5000, 1, 7000])
    plt.plot(x, x, 'r-', label="Actual")
    plt.legend()
    plt.errorbar(x, y, yerr=e, fmt='ko')
    #plt.show()
    plt.savefig("figures/" + estimator_name + "_plot.png", format="png")
def draw_zprofile(
        analyzed_root_files,
        energy,
        histlabel=None,
        histcolor = '#000000'):
    
    nmodules = 14
    nsectors = 16
    cut_event_numb = 0. # counter for number of events after cuts are applied

    # add to tree all edm_analyzed reco files for electrongun
    rh_tree = ROOT.TChain("demo/rh_tree")
    rh_tree.Add(analyzed_root_files)

    # initialize arrays for module energies
    total_module_energy = nmodules * [0.] # array with energy of all sectors for each module, air
    total_module_energy_per_event = nmodules * [0.]

    # initialize lists for error calculation
    # sigma = sqrt(<x^2> - <x>^2), mean_error = sigma/sqrt(2)
    module_energies_squared = nmodules * [0.] # sum of squares of module energies 
    bin_error =  nmodules * [0.]


    # loop over all events 

    for i in range(rh_tree.GetEntries()):
        rh_tree.GetEntry(i)
        px, py, pz = rh_tree.gen_part_momentum_x, rh_tree.gen_part_momentum_y, rh_tree.gen_part_momentum_z
        momentum_vector = ROOT.TVector3(px, py, pz)
        eta = momentum_vector.Eta()
        if (eta > -6.4) & (eta < -5.5):
            cut_event_numb += 1
            for module_numb in range(nmodules):
                total_module_energy_per_event[module_numb] = 0
                for sector_numb in range(nsectors):
                    total_module_energy[module_numb] += rh_tree.energy_castor[sector_numb*nmodules + module_numb]
                    total_module_energy_per_event[module_numb] += rh_tree.energy_castor[sector_numb*nmodules + module_numb]
                module_energies_squared[module_numb] += total_module_energy_per_event[module_numb]**2
            
    print "cut event number: ", cut_event_numb


    for module_numb in range(nmodules):
        bin_error[module_numb] = 1./energy*1./np.sqrt(cut_event_numb) * np.sqrt(module_energies_squared[module_numb]/cut_event_numb
                                - (total_module_energy[module_numb]/cut_event_numb)**2)
        print "module; ", module_numb+1, ", bin_error: " , bin_error[module_numb]

    mean_module_energy = np.array(total_module_energy)/cut_event_numb


    # draw histogram 
    # plt.bar(np.arange(1,15,1), mean_module_energy/energy,
    #         yerr=np.array(bin_error)/energy, # value of error bars
    #         width=1, fill=False, edgecolor=histedgecolor, alpha = transparency, label=str(energy)+" GeV",
    #         error_kw=dict(ecolor=histedgecolor)) # set collor of error bar
    plt.hist(np.arange(1.,15.,1), bins=np.arange(1,16), weights=mean_module_energy/energy,
             histtype=u'step', align = u'mid', color=histcolor , label=histlabel)
    plt.errorbar(np.arange(1.5,15.5,1), mean_module_energy/energy, yerr=np.array(bin_error),
                 fmt='none', ecolor=histcolor)
def PlotEDepSummary(gFiles,nFiles,figureName='EDepSummary.png',tParse=GetThickness,
  histKey='eDepHist'):
  """ PlotEDepSummary
  Plotss the energy deposition summary
  """
  # Extrating the average values
  gT = list()
  gDep = list()
  gDepError = list()
  nT = list()
  nDep = list()
  nDepError = list()
  for fname in gFiles:
    f = TFile(fname,'r')
    hist = f.Get(histKey)
    gT.append(GetThickness(fname))
    gDep.append(hist.GetMean())
    gDepError.append(hist.GetMeanError())
  for fname in nFiles:
    f = TFile(fname,'r')
    hist = f.Get(histKey)
    nT.append(GetThickness(fname))
    nDep.append(hist.GetMean())
    nDepError.append(hist.GetMeanError())
  # Plotting
  plt.errorbar(gT,gDep,yerr=gDepError,fmt='r+')
  plt.hold(True)
  plt.errorbar(nT,nDep,yerr=nDepError,fmt='go')
  plt.xlabel("Thickness (mm)")
  plt.ylabel("Average Energy Deposition (MeV)")
  plt.legend(["Co-60","Cf-252"])
  plt.xscale("log")
  plt.yscale("log")
  plt.grid(True)
  plt.savefig(figureName)
Пример #17
0
	def plot_minos_fit(self,p,decay="X",title="Fit Results",erange=9,step=4.07,lum=4200):
		fig = plt.figure(figsize=(8,6))
		plt.errorbar(self.x,self.y,self.yerr,fmt='o')
		M = p[0][0]
		G = p[1][0]
		B = p[2][0]
		dMl = p[0][1]
		dMu = p[0][2]
		dGl = p[1][1]
		dGu = p[1][2]
		dBl = p[2][1]
		dBu = p[2][2]
		x_fit = np.linspace(min(self.x),max(self.x),num=100)
		plt.plot(x_fit,self.convBWG(x_fit,M,G,B))
		plt.xlabel("$\sqrt{\hat{s}} (MeV)$",fontsize=16)
		plt.ticklabel_format(useOffset=False)
		plt.ylabel("Counts",fontsize=16)
		plt.title(title,fontsize=16)
		lbl1 = "Input:\n$\mathcal{L}=%d pb^{-1}$\n$\Delta=%.3f\ MeV$\n$\delta\sqrt{\hat{s}} = %.3f MeV$" % (lum,step,self.beam)
		lbl1 = lbl1 + "\n$M_h = 125.0 GeV$\n$\Gamma_h = 4.07 MeV$\n$Br(h^0\\rightarrow$%s$) = %.3f$" % (decay, self.higgs[2])
		lbl1 = lbl1 + "\n$\sigma_{bkg} = %.2f pb^{-1}$" % (self.bkg)
		lbl2 = "\nFit results:\n"
		lbl2 = lbl2 + "$\Delta M_h = %.3f_{-%.3f}^{+%.3f}\ MeV$\n" % (M-self.higgs[0], -1*dMl, dMu)
		lbl2 = lbl2 + "$\Gamma_h = %.3f_{-%.3f}^{+%.3f} \ MeV$\n" % (G, -1*dGl, dGu)
		lbl2 = lbl2 + "$Br(h^0\\rightarrow$%s$) = %.3f_{-%.3f}^{+%.3f}$\n" % (decay, B, -1*dBl, dBu)
		plt.annotate(lbl1, [0.1,0.6], xycoords='axes fraction',fontsize=15)
		plt.annotate(lbl2, [0.7,0.6], xycoords='axes fraction',fontsize=15)
		return plt
Пример #18
0
def plotDataAndFit_1D():
    """
	"""

    if type__ == "1D":
        xxx = numpy.arange(0.0, nbBin__) * 4.0 + 2.0
        xxx[(xxx__ != 0.0)] = xxx__[(xxx__ != 0.0)]
        b = "|s|"
    elif type__ == "2D":
        xxx = numpy.arange(0.0, nbBin__)
        b = "s"

    for i in numpy.arange(0, 3):

        if i == 0:
            a = ""
            c = ""
        elif i == 1:
            a = "|s|."
            c = " \, [h^{-1}.Mpc]"
        else:
            a = "|s|^{2}."
            c = " \, [(h^{-1}.Mpc)^{2}]"

        coef = numpy.power(xxx__, 1.0 * i)

        plt.errorbar(xxx, coef * xi_dat__, yerr=coef * xi_err__, linestyle="", marker="o", color="blue")
        plt.errorbar(xxx, coef * xi_fit__, color="red")
        plt.xlabel(r"$" + b + " \, [h^{-1}.Mpc] $")
        plt.ylabel(r"$" + a + "\\xi(" + b + ") " + c + "$")
        myTools.deal_with_plot(False, False, True)
        plt.show()

    return
Пример #19
0
def plot_magnitude_time_scatter(catalogue, plot_error=False, filename=None,
        filetype='png', dpi=300, fmt_string='o'):
    """
    Creates a simple scatter plot of magnitude with time
    :param catalogue:
        Earthquake catalogue as instance of :class:
        hmtk.seismicity.catalogue.Catalogue
    :param bool plot_error:
        Choose to plot error bars (True) or not (False)
    :param str fmt_string:
        Symbology of plot
    """
    plt.figure(figsize=DEFAULT_SIZE)
    dtime = catalogue.get_decimal_time()
    if len(catalogue.data['sigmaMagnitude']) == 0:
        print 'Magnitude Error is missing - neglecting error bars!'
        plot_error = False

    if plot_error:
        plt.errorbar(dtime,
                     catalogue.data['magnitude'],
                     xerr=None,
                     yerr=catalogue.data['sigmaMagnitude'],
                     fmt=fmt_string)
    else:
        plt.plot(dtime, catalogue.data['magnitude'], fmt_string)
    plt.xlabel('Year', fontsize='large')
    plt.ylabel('Magnitude', fontsize='large')
    plt.title('Magnitude-Time Plot', fontsize='large')
    plt.show()
    _save_image(filename, filetype, dpi)
    return
Пример #20
0
    def plot(self, debug = False):
	"""plot figures for population, nuisance parameters"""
	# first figure out what scheme is used
	self.list_scheme()

	# next get MABR sampling done
	self.MBAR_analysis()

	# load in precomputed P and dP from MBAR analysis
        pops0, pops1   = self.P_dP[:,0], self.P_dP[:,self.K-1]
        dpops0, dpops1 = self.P_dP[:,self.K], self.P_dP[:,2*self.K-1]
	t0 = self.traj[0]
    	t1 = self.traj[self.K-1]

        # Figure Plot SETTINGS
        label_fontsize = 12
        legend_fontsize = 10
        fontfamily={'family':'sans-serif','sans-serif':['Arial']}
        plt.rc('font', **fontfamily)

        # determine number of row and column
        if (len(self.scheme)+1)%2 != 0:
            c,r = 2, (len(self.scheme)+2)/2
    	else:
            c,r = 2, (len(self.scheme)+1)/2
    	plt.figure( figsize=(4*c,5*r) )
    	# Make a subplot in the upper left
    	plt.subplot(r,c,1)
    	plt.errorbar( pops0, pops1, xerr=dpops0, yerr=dpops1, fmt='k.')
    	plt.hold(True)
    	plt.plot([1e-6, 1], [1e-6, 1], color='k', linestyle='-', linewidth=2)
    	plt.xlim(1e-6, 1.)
    	plt.ylim(1e-6, 1.)
    	plt.xlabel('$p_i$ (exp)', fontsize=label_fontsize)
    	plt.ylabel('$p_i$ (sim+exp)', fontsize=label_fontsize)
    	plt.xscale('log')
    	plt.yscale('log')
    	# label key states
    	plt.hold(True)
    	for i in range(len(pops1)):
        	if (i==0) or (pops1[i] > 0.05):
            		plt.text( pops0[i], pops1[i], str(i), color='g' )
    	for k in range(len(self.scheme)):
        	plt.subplot(r,c,k+2)
        	plt.step(t0['allowed_'+self.scheme[k]], t0['sampled_'+self.scheme[k]], 'b-')
        	plt.hold(True)
        	plt.xlim(0,5)
        	plt.step(t1['allowed_'+self.scheme[k]], t1['sampled_'+self.scheme[k]], 'r-')
        	plt.legend(['exp', 'sim+exp'], fontsize=legend_fontsize)
        	if self.scheme[k].find('cs') == -1:
            		plt.xlabel("$\%s$"%self.scheme[k], fontsize=label_fontsize)
            		plt.ylabel("$P(\%s)$"%self.scheme[k], fontsize=label_fontsize)
            		plt.yticks([])
        	else:
            		plt.xlabel("$\sigma_{%s}$"%self.scheme[k][6:],fontsize=label_fontsize)
            		plt.ylabel("$P(\sigma_{%s})$"%self.scheme[k][6:],fontsize=label_fontsize)
            		plt.yticks([])

    	plt.tight_layout()
    	plt.savefig(self.picfile)
def main_plot_MC_sigma():
	# 0.13s na iteraciu (tu 4000)
	POINTS = params['POINTS']
	NETS = params['NETS']
	#sigmas = linspace(0.001, 0.2, POINTS)
	sigmas = linspace(0.075, 0.1, POINTS)
	#params['RUNS'] = 1

	y = zeros([NETS, POINTS])

	for i, sigma in enumerate(sigmas):
		for net in range(NETS):
			W = dist_W(sigma)
			WI = dist_WI()

			MC, _ = memory_capacity(W, WI, params)
			y[net, i] = sum(MC)
			print("\rsigma: %.3f (%d of %d), net: (%d of %d)" % (sigma, i, POINTS, net, NETS), end="")
	y, error = (average(y, axis=0), std(y, axis=0) / sqrt(NETS))

	x = sigmas
	plt.errorbar(x, y, yerr=(error * 3))
	plt.plot(sigmas, y)

	plt.grid(True)
	plt.ylabel('Memory capacity')
	plt.xlabel('$\sigma_{W^R}$')
	#plt.ylim([0,1])
	plt.title('Memory capacity (confidence = $3\sigma$) (runs = %d) (nets = %d) ' % (params['RUNS'], params['NETS']))
	plt.show()
Пример #22
0
Файл: GUI.py Проект: em0980/EPR
def multi_trial_stats():
    """
    Note: a lot of this should probably be in helper function tbh
    May want to add recursive copy of trial folders into summary folder, idk
    I also forgot to take sensitivity into account, I'll fix that once this all works
    """
    # Ask user for directory with all data directories to be used inside
    selected_dir = tkFileDialog.askdirectory()
    sub_dirs = [os.path.join(selected_dir, name) for name in os.listdir(selected_dir) if os.path.isdir(os.path.join(selected_dir, name))]

    # Create a TrialSummary Object for each trial
    trial_summaries = []
    for trial_dir in sub_dirs:
        trial_summaries.append(TrialSummary(trial_dir))

    # Create Plot of Averages and save
    plt.figure(2)  # off screen figure
    plt.clf()
    plt.grid(True)
    for summary in trial_summaries:
        x, y = summary.get_average_response()
        plt.plot(x, y, label=summary.name)
    plt.legend()
    plt.savefig(os.path.join(selected_dir, 'average_plot.png'))

    # Check if each trial can be given a number based on sample name
    summary_plot_numbered = []
    flag = True
    for trial in trial_summaries:
        try:
            num = float(trial.name)
            summary_plot_numbered.append((num, trial))
        except:
            flag = False
            break

    if flag:
        # Sort list by associated trial number (lest to greatest)
        summary_plot_numbered = sorted(summary_plot_numbered, key=lambda i: i[0])

    # Create the Summary Plots
    measurement_title = {0: "Amplitude (V)", 2: "Max amplitude current (A)", 4: "Min amplitude current (A)", 6: "Peak distance (V)", 8: "Peak separation (A)"}
    for measurement in range(0, 9, 2):
        plt.figure(2)
        plt.clf()
        values = [data.get_stats_list()[measurement] for number, data in summary_plot_numbered]
        errors = [data.get_stats_list()[measurement + 1] for number, data in summary_plot_numbered]
        plt.errorbar(range(len(values)), values, yerr=errors)
        for (label, x, y) in zip([x[0] for x in summary_plot_numbered], range(len(values)), values):
            plt.annotate(label, xy=(x, y), xytext=(5, 5), textcoords="offset points")
        plt.title(measurement_title[measurement])
        plt.xlabel("Sample")
        plt.ylabel("Value and error (one standard deviation)")
        plt.xlim([-1, len(values)])
        plt.savefig(os.path.join(selected_dir, measurement_title[measurement]))

    # Create Summary.txt
    for number, trial in summary_plot_numbered:
        trial.save_data(selected_dir)
    return
Пример #23
0
def mw_cdf(x_hist_vals, hist_vals, a_coeff, figs, plot=False):
    max_a = np.sum(hist_vals)
    area_a = np.ones(len(hist_vals))
    for el in xrange(len(hist_vals)):
        area_a[el] = np.sum(hist_vals[0:el+1])

    
    c_d_f = area_a/max_a
    interp = interp1d(c_d_f, x_hist_vals)
    a_best = interp(0.5)
    a_limits = interp(np.array([0.5 - 0.683/2.0, 0.5 + 0.683/2.0]))

    decim = [math.trunc(np.abs(np.log10(a_best - a_limits[0])))+2, math.trunc(np.abs(np.log10(a_limits[1] - a_best)))+2]
             
    uncertainties = np.array([round(a_best - a_limits[0], decim[0]), round(a_limits[1] - a_best, decim[1])])

    if plot:
        plt.figure(figs)
        figs += 1
        plt.clf()
        plt.scatter(x_hist_vals, c_d_f, marker='+')
        plt.plot((a_best, a_best), (( c_d_f.max(), 0)), 'g')
        plt.errorbar(a_best, 0.5, xerr=[[uncertainties[0]], [uncertainties[1]]], fmt='^', color='red')
        plt.ylabel('CDF ')
        plt.xlabel(r'a$_'+str(a_coeff)+'$ values')
        
        plt.title(r'Result: a$_'+str(a_coeff)+' = '+str(round(a_best, np.max(decim)))+'_{-'+str(uncertainties[1])+'}^{+'+str(uncertainties[0])+'}$')
        plt.show() #in most cases unnecessary
        
    return figs
Пример #24
0
def histograma(h,l):

    Hist = genhistograma(h)
    
    if h <= 3:
        x = np.arange(1.5,13,1)
        H = np.histogram(Hist, bins = [1,2,3,4,5,6,7,8,9,10,11,12,13])
        y = H[0]
        plt.hist(Hist, bins = [1,2,3,4,5,6,7,8,9,10,11,12,13])
        plt.grid(True)
        plt.xlim((1,13))
        plt.ylabel("frecuencia")
        plt.title("Histograma %d"%(h+1))
        plt.errorbar(x,y, yerr = np.sqrt(y), fmt = '.')
        plt.savefig("hist%d"%(h))
        plt.close()
        
    elif h > 3:
        x = np.arange(0.5,11,1)
        H = np.histogram(Hist, bins = [0,1,2,3,4,5,6,7,8,9,10,11])
        y = H[0]
        plt.hist(Hist, bins = [0,1,2,3,4,5,6,7,8,9,10,11])
        plt.grid(True)
        plt.xlim((0,11))
        plt.ylabel("frecuencia")
        plt.title("Histograma %d"%(h+1))
        plt.errorbar(x,y, yerr = np.sqrt(y), fmt = '.')
        plt.savefig("hist%d"%(h))
        plt.close()
Пример #25
0
def plot_cluster_size(file, outfile='cluster_size.pdf', title=None, grid=False, epochs=[],
                      labels=[], errorbars=True):
    reader = csv.reader(file)
    rownum = 0
    data_read = False

    for row in reader:
        # Skip commented lines
        if re.match('^\s*#', row[0]) != None:
            continue

        rownum += 1

        if rownum == 1:
            colnames = row[1:]

            for i in range(len(colnames)):
                colnames[i] = re.sub('_', ' ', colnames[i])

            continue
        else:
            row = list(map(float, row))

            if (len(epochs) == 0 or row[0] in epochs):
                if data_read:
                    data = numpy.vstack((data,row))
                else:
                    data = numpy.array(row)
                    data_read = True

    if data_read:
        fig = plt.figure()

        if len(labels) > 0 and len(labels) == len(colnames):
            colnames = labels

        plot_cols = list(range(2, data.shape[1], 3))

        # Plot the number of clusters
        for t in plot_cols:
            if errorbars:
                e = data[:, t+1]
            else:
                e = None

            plt.errorbar(data[:,0], data[:,t], yerr=e, xerr=None, label=string.capitalize(colnames[t-1]))

        if grid:
            plt.grid()

        plt.xlabel("Time (epoch)")
        plt.ylabel("Cluster Size (cells)")

        if title:
            plt.title(title)

        plt.legend(loc=0)
        plt.savefig(outfile)
    else:
        print("Could not generate plot: No data match given parameters")
def fit_and_plot_gains(gains, label, label_ypos, color, show_data=True, effective=True, boost=None, final=False):
    """Fit and plot a bunch of gains."""
    import matplotlib.pyplot as plt
    
    
    if final:
        expected_gain = np.asarray(gains['effective gain'] / 2.7)
    elif effective:
        expected_gain = np.asarray(gains['effective gain'])
    else:
        expected_gain = np.asarray(gains['gain'])
    model_gain = np.asarray(gains['fit gain'])
    model_noise = np.asarray(gains['fit sigma'])
    
    y = model_gain * np.sqrt(1.0/model_noise)
    A = np.vstack([expected_gain, np.zeros(len(expected_gain))]).T
    A *= np.sqrt(1.0/model_noise)[:,None]
    print(A, y)
    m, c = np.linalg.lstsq(A, y)[0]
    
    if show_data:
        plt.errorbar(expected_gain, model_gain, yerr=model_noise, fmt='.', ls='none', label=label, color=color)
    
    x = np.linspace(0.0, 2.0, 50)
    plt.plot(x, x * m + c, '-', label="{} Fit: $m={:.2f}$ $c={:.2f}$".format(label, m, c), color=color, alpha=0.3)
    
    if boost is not None:
        eboost = float(boost) / float(m)
        plt.text(0.98, 0.98, "fit boost: {:.1f}".format(eboost), transform=plt.gca().transAxes, ha='right', va='top')
    return m, c
Пример #27
0
def plot_k2_curve(k2_arr, conc_list):
    k2_means = np.mean(k2_arr, axis=0)
    k2_sds = np.std(k2_arr, axis=0)
    # Plot k2 on a linear scale
    plt.figure()
    plt.errorbar(conc_list, k2_means, yerr=k2_sds / np.sqrt(3))
    plt.title('$k_2$')
Пример #28
0
def plot_ts(table):
    """
    Plot dh and dAGC time series and the correlation dAGC x dh.
    """
    sys.path.append('/Users/fpaolo/code/misc')
    from util import poly_fit
    # load data from Table
    time2 = table.cols.time2[:] 
    month = table.cols.month[:] 
    dh_mean = table.cols.dh_mean[:] 
    dh_error = table.cols.dh_error[:] 
    dg_mean = table.cols.dg_mean[:] 
    dg_error = table.cols.dg_error[:] 
    dates = [dt.datetime(y, m, 15) for y, m in zip(time2, month)]
    # plot TS
    fig = plt.figure()
    plt.subplot(211)
    plt.errorbar(dates, dh_mean, yerr=dh_error, linewidth=2)
    plt.ylabel('dh (m)')
    plt.subplot(212)
    plt.errorbar(dates, dg_mean, yerr=dg_error, linewidth=2)
    plt.ylabel('dAGC (dB)')
    fig.autofmt_xdate()
    # plot correlation
    dg_fit, dh_fit, _ = poly_fit(dg_mean, dh_mean)
    plt.figure()
    plt.plot(dg_mean, dh_mean, 'o')
    plt.plot(dg_fit, dh_fit, linewidth=2.5)
    plt.xlabel('dAGC (dB)')
    plt.ylabel('dh (m)')
    corr = np.corrcoef(dg_mean, dh_mean)[0,1]
    print 'correlation = %.2f' % corr
Пример #29
0
def test_complete():
    fig = plt.figure('Figure with a label?', figsize=(10, 6))

    plt.suptitle('Can you fit any more in a figure?')

    # make some arbitrary data
    x, y = np.arange(8), np.arange(10)
    data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
    v = np.sin(v * -0.6)

    plt.subplot(3, 3, 1)
    plt.plot(list(xrange(10)))

    plt.subplot(3, 3, 2)
    plt.contourf(data, hatches=['//', 'ooo'])
    plt.colorbar()

    plt.subplot(3, 3, 3)
    plt.pcolormesh(data)

    plt.subplot(3, 3, 4)
    plt.imshow(data)

    plt.subplot(3, 3, 5)
    plt.pcolor(data)

    plt.subplot(3, 3, 6)
    plt.streamplot(x, y, u, v)

    plt.subplot(3, 3, 7)
    plt.quiver(x, y, u, v)

    plt.subplot(3, 3, 8)
    plt.scatter(x, x**2, label='$x^2$')
    plt.legend(loc='upper left')

    plt.subplot(3, 3, 9)
    plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)

    ###### plotting is done, now test its pickle-ability #########

    # Uncomment to debug any unpicklable objects. This is slow (~200 seconds).
#    recursive_pickle(fig)

    result_fh = BytesIO()
    pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)

    plt.close('all')

    # make doubly sure that there are no figures left
    assert_equal(plt._pylab_helpers.Gcf.figs, {})

    # wind back the fh and load in the figure
    result_fh.seek(0)
    fig = pickle.load(result_fh)

    # make sure there is now a figure manager
    assert_not_equal(plt._pylab_helpers.Gcf.figs, {})

    assert_equal(fig.get_label(), 'Figure with a label?')
Пример #30
0
 def wrfile(self):
     ((sff,eff,dis,his),wh) = self._sf()
     
     pp = PdfPages('SF of %s_%s.pdf' %(wh,self.nm))
     plt.figure()
     
     plt.clf()
     
     plt.errorbar(dis,sff,yerr=eff,markersize=4,fmt='o',ecolor='b',c='b',elinewidth=1.5,capsize=4)
     plt.xlabel('distance (pixel)')
     plt.ylabel('angle difference')
     plt.xlim(0,max(dis)+1)
     plt.ylim(0,90.)
     plt.title('%s: Structure Function for %s (Order %s)' %(self.nm,wh,int(self.od)))
     pp.savefig()
     
     bn = 5.
     for i in range(len(dis)):
         if dis[i] > 0:
             hist,bins = np.histogram(np.array(his[i]),range=(0.,90.),
                                      bins=90/bn,density=True) # 5 deg per bin, prob. density func.
             center = (bins[:-1] + bins[1:])/2
         
             plt.clf()
             plt.bar(center, hist, align='center',width=bn)
             plt.xlim(0,90)
             plt.xlabel('Angle difference (deg)')
             plt.ylabel('Normalized number of pairs in each bin (%s deg)' %bn)
             plt.title("Histogram at scale %s (pixel)" %round(dis[i],2))
             pp.savefig()
         
     pp.close()
Пример #31
0
def main():

    #Read in the file
    fname = 'C:/Users/sammy/Downloads/Edited_cooldown_all_8_16_20.txt'
    f2 = 'C:/Users/sammy/Downloads/Edited_cooldown_all_8_16_20L.txt'

    Fdat = np.loadtxt(fname, delimiter="\t")
    F2 = np.loadtxt(f2, delimiter="\t")
    startcut = 50
    cutEnd = True
    if (cutEnd):
        #Two endcuts have been used. 5600 gives you more or less the whole file
        #Before the temperature gets out of hand. 4520 is all the data before
        #the .2K rise
        endcut = -70
        freqs = Fdat[startcut:endcut, 2]
        runs = Fdat[startcut:endcut, 3]
        temps = Fdat[startcut:endcut, 4]
        currs = Fdat[startcut:endcut, 5]
        dc = Fdat[startcut:endcut, 0]
        amp = F2[startcut:endcut, 6]
        Q = F2[startcut:endcut, 3]
    else:
        freqs = Fdat[startcut:, 2]
        runs = Fdat[startcut:, 3]
        temps = Fdat[startcut:, 4]
        currs = Fdat[startcut:, 5]
        dc = Fdat[startcut:, 0]
        amp = F2[startcut:, 6]
        Q = F2[startcut:, 3]
    #Convert current to magnetic field and make Qs positive
    Bs = currs * .85 / 5
    Q = np.abs(Q)

    #We need to calculate the temperature by getting rid of the magnetoresistance.
    #To do that, we need to get rid of any runs that were at a different temperature
    DelOutliersTemp = False
    #We may or may not want to use those different-temperature data points in the
    #full analysis. I still cut them out, it doesn't make a real difference now though
    DelOutliersFit = False
    #If you're cutting out data sections, enter this loop
    if (DelOutliersTemp or DelOutliersFit):
        #Run 17 is the weird run f
        Outliers = [17]
        #We're going to make arrays with the data (f, Q, T, etc.) from each part
        #of the dataset. If we want to keep that part of the dataset, we append
        #the new data to the running list of all data

        #Initialize indexes of where data sections start and how many fields we
        #have looked at
        startIndex = 0
        index = 0
        #Initialize data arrays
        fs, rs, ts, bs, ds, ams, qs = np.zeros(0), np.zeros(0), np.zeros(
            0), np.zeros(0), np.zeros(0), np.zeros(0), np.zeros(0)
        for j in range(len(runs) - 1):
            #If the current changed, you have found the end of a section of data
            if (currs[j] != currs[j + 1] or j == len(runs) - 2):
                #If you don't want this section of data, don't add it to the list
                #Just change the indexes to start looking at the next section
                if (index in Outliers):
                    startIndex = j + 1
                    index += 1
                #If you want to analyze this section of data, add it to the list
                #of all data
                else:
                    rs = np.append(rs, runs[startIndex:j])
                    fs = np.append(fs, freqs[startIndex:j])
                    ts = np.append(ts, temps[startIndex:j])
                    bs = np.append(bs, Bs[startIndex:j])
                    ds = np.append(ds, dc[startIndex:j])
                    ams = np.append(ams, amp[startIndex:j])
                    qs = np.append(qs, Q[startIndex:j])
                    startIndex = j + 1
                    index += 1
        #If you're cutting outlier sections from the magnetoresistance calculation,
        #make arrays of all the good data for temperature analysis
        if DelOutliersTemp:
            runsTemp = rs
            tempsTemp = ts
            BsTemp = bs
        #If you're cutting outlier sections from all analysis, just redefine the
        #base data arrays to only have the good data
        if DelOutliersFit:
            runs = rs
            freqs = fs
            temps = ts
            Bs = bs
            dc = ds
            amp = ams
            Q = qs

    #If you're not cutting outlier sections from magnetoresistance analysis,
    #define the data for temperature analysis as all of it
    if (not DelOutliersTemp):
        runsTemp = runs
        tempsTemp = temps
        BsTemp = Bs

    #Define a polynomial fit in B and the run index for the temperature value
    #that we read out
    def TFit(data, Bcs, Rcs):
        runs = data[:, 0]
        Bs = data[:, 1]

        fit = np.zeros(len(runs))
        i = 2
        #The m's are the fit coefficients, doing only an even fit in B
        for m in zip(Bcs):
            fit += m * np.power(Bs, i)
            i += 2
        #n's are fit coefficients, allow for an offset term in the run index fit
        j = 0
        for n in zip(Rcs):
            fit += n * np.power(runs, j)
        return fit

    #When you call the curve_fit code, it can ONLY take things of the form
    #function(x, args). We need to make a wrapping function to put TFit in
    #that form.
    def wrapT(x, *args):
        #Note that NTs is a global variable telling which indexes separate
        #the B and run fit coefficients
        polyBs = list(args[:NTs[0]])
        polyRs = list(args[NTs[0]:])
        return TFit(x, polyBs, polyRs)

    #The order of the even in B polynomial fit and run index fits
    BOrd = 5
    ROrd = 1
    #We use NTs in wrapT, so we make it a global variable. Gives the index
    #of fit variables of different types
    global NTs
    NTs = ([BOrd, BOrd + ROrd])
    pT = np.zeros(BOrd + ROrd)
    #Have a run index after which temperature rises
    tendCut = 3570
    tstartCut = 170
    #Find that index in the data arrays
    place = np.where(runsTemp == tendCut)[0]
    #Keep only data before the cutoff index
    pT[BOrd] = np.mean(tempsTemp[tstartCut:place[0]])
    #Stack the runs and B data so that it can be passed as one variable to the fit function
    RandB = np.vstack(
        (runsTemp[tstartCut:place[0]], BsTemp[tstartCut:place[0]])).T
    #Call the fit function, this syntax is terrible but works. lambda essentially
    #defines a new function that we can pass as our fit function
    popt, pcov = curve_fit(lambda RandB, *pT: wrapT(RandB, *pT),
                           RandB,
                           tempsTemp[tstartCut:place[0]],
                           p0=pT)
    #if you want to see fit results for temperature, uncomment here
    #    print(popt)
    #    print(popt/np.sqrt(np.diag(pcov)))
    plt.plot(runsTemp[tstartCut:place[0]], tempsTemp[tstartCut:place[0]])
    plt.plot(runsTemp[tstartCut:place[0]], wrapT(RandB, *popt))
    plt.xlabel('Run Index')
    plt.ylabel('Temperature Reading')
    plt.show()

    #In order to get the true temperature, we need to subtract off the magnetic
    #field part. So define a parameter array where the run-dependent fit parts are 0
    #and the magnetic field dependent parts are the results of the fit
    pSub = np.zeros(len(pT))
    pSub[:BOrd] = popt[:BOrd]

    #If you want to see the true temperature, uncomment this
    trueTsSub = tempsTemp[tstartCut:place[0]] - wrapT(RandB, *pSub)
    plt.plot(runsTemp[tstartCut:place[0]], trueTsSub)
    plt.xlabel('Run Index')
    plt.ylabel('Actual Temperature')
    plt.show()

    #I made a fit for f0 as a function of temperature, Q, and magnetic field.
    #In retrospect, somewhat unnecessary, but it works and helps with later smoothing
    #to have big jumps taken out

    #define the true temperature calling the fit function used earlier
    #but with the run terms set to 0
    TandBFull = np.vstack((runs, Bs)).T
    trueTs = temps - wrapT(TandBFull, *pSub)

    #Define the data for f0 fitting
    #tdiff = np.gradient(trueTs, 1)

    def tdmaker(Bs, trueTs):
        td = np.zeros(0)
        startIndex = 0
        for i in range(0, len(Bs) - 1):
            if (Bs[i] != Bs[i + 1]):
                td = np.append(td, np.gradient(trueTs[startIndex:i + 1], 1))
                startIndex = i + 1
        td = np.append(td, np.gradient(trueTs[startIndex:], 1))
        return td

    tdiff = tdmaker(Bs, trueTs)
    plt.plot(tdiff)
    plt.show()

    RunData = np.vstack((runs, trueTs, Q, Bs, tdiff)).T
    #Define order of polynomial fits. NBSigm must be 2, others can be changed to anything
    #Also one sigmoid in B^2
    #    N_fbase = 3
    #    N_temp = 3
    #    N_Q = 0
    #    NBSigm = 2
    #    N_BPoly = 7
    N_fbase = 3
    N_temp = 3
    N_Q = 0
    NBSigm = 2
    N_BPoly = 7
    N_tderiv = 1
    #Make a global variable for the number of each fit variable type
    global Ns
    Ns = [N_fbase, N_temp, N_Q, NBSigm, N_BPoly, N_tderiv]
    #Initialize fit parameters
    p0 = np.zeros(sum(Ns))
    p0[0] = np.mean(freqs)
    #Convert the Ns array to the indexes separating varaible types
    for i in range(1, len(Ns)):
        Ns[i] = Ns[i] + Ns[i - 1]

    #Define a sigmoid for ease later
    def sigmoid(v):
        return 1 / (1 + np.exp(-v))

    #Call a wrapper for the f0 fitting. Same idea as the run and B-field fit
    def wrapper(x, *args):
        fCoeff = list(args[:Ns[0]])
        tCoeff = list(args[Ns[0]:Ns[1]])
        QCoeff = list(args[Ns[1]:Ns[2]])
        BSigmCoeff = list(args[Ns[2]:Ns[3]])
        BCoeff = list(args[Ns[3]:Ns[4]])
        TDCoeff = list(args[Ns[4]:Ns[5]])
        return fit_func(x, fCoeff, tCoeff, QCoeff, BSigmCoeff, BCoeff, TDCoeff)

    #The fit function. Polynomials and one sigmoid
    def fit_func(data, fCoeff, tCoeff, QCoeff, BSigmCoeff, BCoeff, TDCoeff):
        runs = data[:, 0]
        temps = data[:, 1]
        Qs = data[:, 2]
        Bs = data[:, 3]
        TDs = data[:, 4]
        fit = np.zeros(len(runs))
        i = 0
        for a in zip(fCoeff):
            fit += a * np.power(runs, i)
            i += 1
        j = 1
        for a in zip(tCoeff):
            fit += a * np.power(temps, j)
            j += 1
        j = 1
        for a in zip(QCoeff):
            fit += a * np.power(Qs, j)
            j += 1
        j = 1
        for a in zip(BCoeff):
            fit += a * np.power(Bs, j)
            j += 1
        j = 1
        for a in zip(TDCoeff):
            fit += a * np.power(TDs, j)
            j += 1
        fit += BSigmCoeff[0] * (sigmoid((Bs / BSigmCoeff[1])**2) - .5)
        return fit

    #Initialize p0 to some reasonable values in base f0 and B^2 terms
    p0[0] = np.min(freqs)
    p0[Ns[2]] = .5
    p0[Ns[2] + 1] = 1
    #Call the f0 fit function and get parameters
    popt2, pcov2 = curve_fit(lambda RunData, *p0: wrapper(RunData, *p0),
                             RunData,
                             freqs,
                             p0=p0)
    print(
        'f0 fit parameters in order: run index polynomial, temperature polynomial, Q polynomial, B^2 sigmoid, and B-polynomial'
    )
    print(popt2)
    #    print('fit coeff in std. dev.')
    #    print(pcov2)

    #Uncomment this if you want to see your f0 and fit of f0
    plt.plot(runs, freqs)
    plt.plot(runs, wrapper(RunData, *popt2))
    plt.ylabel("Frequency and Fit")
    plt.show()

    #Note we did not have any even/odd or voltage dependent parts in the f0 fit.
    #So the leftover parts after you subtract the fit should have that info, but
    #without the messy offsets
    leftover = freqs - wrapper(RunData, *popt2)

    #Make a smoothed version of the residual/v-dependent part
    def secAvg(left, Bs):
        sa = np.zeros(0)
        startIndex = 0
        for i in range(0, len(Bs) - 1):
            if (Bs[i] != Bs[i + 1]):
                sa = np.append(
                    sa,
                    np.mean(left[startIndex:i + 1]) *
                    np.ones(len(left[startIndex:i + 1])))
                startIndex = i + 1
        sa = np.append(
            sa,
            np.mean(left[startIndex:]) * np.ones(len(left[startIndex:])))
        return sa

    secA = secAvg(leftover, Bs)
    size = 21
    sm = signal.savgol_filter(leftover - secA, size, 1)

    #Uncomment this if you want to see plots of how well our fits for f0 are doing
    #without accounting for V. I used this to justify cutting out run 17.
    plt.plot(runs, leftover)
    size0 = 1
    plt.plot([min(runs), max(runs)], [.002, .002], 'y')
    plt.plot([min(runs), max(runs)], [-.002, -.002], 'y')
    plt.plot([min(runs), max(runs)], [.004, .004], 'r')
    plt.plot([min(runs), max(runs)], [-.004, -.004], 'r')
    plt.ylabel("Frequency Fit Residual")
    plt.show()
    plt.plot(runs, leftover - secA - sm)
    plt.plot([min(runs), max(runs)], [.002, .002], 'y')
    plt.plot([min(runs), max(runs)], [-.002, -.002], 'y')
    plt.plot([min(runs), max(runs)], [.004, .004], 'r')
    plt.plot([min(runs), max(runs)], [-.004, -.004], 'r')
    plt.show()

    #Decide if you want to subtract off offsets in the leftover data as you know any
    #linear in V terms will avearge out to 0
    ZeroOut = True
    if (ZeroOut):
        leftover = leftover - secA - sm

    # a = 2040
    # b = 2375

    # tdiff = np.gradient(trueTs, 1)
    # plt.plot(runs[a:b], leftover[a:b])
    # plt.plot(runs[a:b], -.4*tdiff[a:b])
    # plt.ylabel("Frequency and Deriv Fit")
    # plt.show()

    #Convert the even/odd data to -1 and 1
    EO = 2 * (runs % 2 - .5)

    #Get a sense of the std. dev. on the leftover data
    size2 = 81
    distStart = np.sqrt(leftover**2)
    smDist = np.sqrt(signal.savgol_filter(leftover**2, size2, 1))

    #If CutOutliers is true, cut out all points from the dataset which are
    #more than 'cut' standard deviations from 0.
    CutOutliers = True
    if (CutOutliers):
        cut = 2
        pts = len(leftover)
        mask = np.ones(pts, dtype=bool)
        for i in range(0, pts):
            if (distStart[i] > cut * smDist[i]):
                mask[i] = False
        leftover = leftover[mask]
        trueTs = trueTs[mask]
        Q = Q[mask]
        Bs = Bs[mask]
        EO = EO[mask]
        runs = runs[mask]
        plt.plot(runs, leftover)
        plt.plot([min(runs), max(runs)], [.002, .002], 'y')
        plt.plot([min(runs), max(runs)], [-.002, -.002], 'y')
        plt.plot([min(runs), max(runs)], [.004, .004], 'r')
        plt.plot([min(runs), max(runs)], [-.004, -.004], 'r')
        plt.show()
        distStart = np.sqrt(leftover**2)
        smDist = np.sqrt(signal.savgol_filter(leftover**2, size2, 1))
        #Uncomment if you want to see how many points we are cutting out
        plt.plot(distStart)
        plt.plot(cut * smDist)
        plt.show()

    #VERY IMPORTANT STEP. I multiply the remainder data by the +-1 even/odd
    #index, or the sign of voltage applied. This makes it so that signal looks
    #like an offset instead of noise. Very helpful in determining if fits are
    #working
    leftover = leftover * EO

    #Do the same procedure we did on the f0 fit, now just fitting the voltage-dependent data
    #Don't change the offset N_V from 1, others can be changed at will
    global NEO
    N_V = 1
    N_VT = 3
    N_VQ = 2
    N_VB = 2
    NEO = [N_V, N_VT, N_VQ, N_VB]
    p0 = np.zeros(sum(NEO))
    for i in range(1, len(NEO)):
        NEO[i] = NEO[i] + NEO[i - 1]
    EOData = np.vstack((EO, trueTs, Q, Bs)).T

    #Same procedure as the f0 data, split up the fit coefficients and pass them
    #to a fitting function
    def wrapEO(x, *args):
        V_Coeff = list(args[:NEO[0]])
        VT_Coeff = list(args[NEO[0]:NEO[1]])
        VQ_Coeff = list(args[NEO[1]:NEO[2]])
        VB_Coeff = list(args[NEO[2]:NEO[3]])
        return fit_EO(x, V_Coeff, VT_Coeff, VQ_Coeff, VB_Coeff)

    #the actual fit function
    def fit_EO(data, V_Coeff, VT_Coeff, VQ_Coeff, VB_Coeff):
        eo = data[:, 0]
        ts = data[:, 1] - np.mean(data[:, 1])
        Qs = data[:, 2]
        Qs = (Qs - np.mean(Qs)) / np.std(Qs)
        Bs = data[:, 3]
        plt.show()
        fit = np.zeros(len(eo))
        for a in zip(V_Coeff):
            fit += a
        j = 1
        for a in zip(VT_Coeff):
            fit += a * np.power(ts, j)
            j += 1
        j = 1
        for a in zip(VQ_Coeff):
            fit += a * np.power(Qs, j)
            j += 1
        j = 1
        for a in zip(VB_Coeff):
            fit += a * np.power(Bs, j)
            j += 1
        return fit

    #Call the fit function
    poptV, pcovV = curve_fit(lambda EOData, *p0: wrapEO(EOData, *p0),
                             EOData,
                             leftover,
                             p0=p0)
    print(
        'Fit coefficients in the order: 0 field offset, temperature polynomial, Q polynomial, B polynomial'
    )
    print(poptV)
    print('Fit coeff. std. dev')
    print(np.sqrt(np.diag(pcovV)))
    print('Fit coeff. in std. dev. from 0')
    print(poptV / np.sqrt(np.diag(pcovV)))

    #Make a plot a fit
    pred = wrapEO(EOData, *poptV)
    plt.plot(leftover)
    plt.plot(pred)
    plt.xlabel('Run Index')
    plt.ylabel('Voltage dependent signal')
    plt.show()
    #Plot residuals to the fit, show that they average to 0
    plt.plot(leftover - pred)
    plt.plot(signal.savgol_filter(leftover - pred, size, 1))
    plt.xlabel('Run Index')
    plt.ylabel('Fit Residual')
    plt.show()
    #Plot the std. dev. of the resisduals. Note that it seems about right,
    #a 2mHz or so uncertainty for most of the data.
    plt.plot(np.sqrt(signal.savgol_filter((leftover - pred)**2, size, 1)))
    plt.xlabel('Run Index')
    plt.ylabel('Fit std. dev.')
    plt.show()

    param_noB = np.zeros(len(poptV))
    param_noB[1:NEO[-2]] = poptV[1:NEO[-2]]
    param_B = np.zeros(len(poptV))
    param_B[0] = poptV[0]
    param_B[NEO[-2]:] = poptV[NEO[-2]:]
    print('B parameters')
    print(param_B)
    OnlyBData = leftover - wrapEO(EOData, *param_noB)
    Bpred = wrapEO(EOData, *param_B)

    index = 0
    startIndex = 0
    runInds = np.zeros(0)
    runBs = np.zeros(0)
    runShifts = np.zeros(0)
    runDevs = np.zeros(0)
    smData = np.zeros(0)
    sz = 21
    for j in range(0, len(Bs) - 1):
        if (Bs[j] != Bs[j + 1] or j == len(Bs) - 2):
            subData = OnlyBData[startIndex:j]
            smData = np.append(smData, signal.savgol_filter(subData, sz, 1))
            runBs = np.append(runBs, Bs[j])
            runShifts = np.append(runShifts, np.mean(subData))
            runDevs = np.append(runDevs,
                                np.std(subData) / np.sqrt(len(subData)))
            runInds = np.append(runInds, index)
            startIndex = j + 1
            index += 1

    plt.plot(OnlyBData)
    plt.plot(smData)
    plt.plot(Bpred)
    plt.show()

    runShifts *= 1000
    runDevs *= 1000
    plt.errorbar(runBs,
                 runShifts,
                 yerr=runDevs,
                 marker='o',
                 linewidth=0,
                 elinewidth=1,
                 capsize=2)
    xs = np.linspace(min(runBs), max(runBs))
    ys = np.ones(len(xs)) * poptV[0]
    for i in range(NEO[-2], len(poptV)):
        ys += poptV[i] * np.power(xs, i - NEO[-2] + 1)
    ys *= 1000
    plt.plot(xs, ys)
    plt.ylabel('Shift in  (mHz)')
    plt.xlabel('Magnetic Field (T)')
    plt.show()

    preds = np.ones(len(runBs)) * poptV[0]
    for i in range(NEO[-2], len(poptV)):
        preds += poptV[i] * np.power(runBs, i - NEO[-2] + 1)
    preds *= 1000
    plt.errorbar(runBs,
                 runShifts - preds,
                 yerr=runDevs,
                 marker='o',
                 linewidth=0,
                 elinewidth=1,
                 capsize=2)
    plt.plot(runBs, np.zeros(len(runBs)))
    plt.ylabel('Fit Residual (mHz)')
    plt.xlabel('Magnetic Field (T)')
    plt.show()

    plt.errorbar(runInds,
                 runShifts - preds,
                 yerr=runDevs,
                 marker='o',
                 linewidth=0,
                 elinewidth=1,
                 capsize=2)
    plt.plot(runInds, np.zeros(len(runInds)))
    plt.ylabel('Fit Residual (mHz)')
    plt.xlabel('Run Index')
    plt.show()

    eYs = np.ones(len(xs)) * poptV[0]
    ErunShifts = runShifts
    for i in range(NEO[-2], len(poptV)):
        if ((i - NEO[-2] + 1) % 2 == 1):
            ErunShifts -= 1000 * poptV[i] * np.power(runBs, i - NEO[-2] + 1)
        else:
            eYs += poptV[i] * np.power(xs, i - NEO[-2] + 1)
    eYs = 1000 * eYs
    plt.errorbar(runBs,
                 ErunShifts,
                 yerr=runDevs,
                 marker='o',
                 linewidth=0,
                 elinewidth=1,
                 capsize=2)
    plt.plot(xs, eYs)
    plt.ylabel('Even Component of Fit (mHz)')
    plt.xlabel('Magnetic Field (T)')
    plt.show()

    allBs = np.zeros(0)
    avgVal = np.zeros(0)
    weight = np.zeros(0)
    for i in range(0, len(runBs)):
        b = np.where(allBs == runBs[i])[0]
        if (len(b) == 0):
            allBs = np.append(allBs, runBs[i])
            avgVal = np.append(avgVal, runShifts[i] / (runDevs[i]**2))
            weight = np.append(weight, 1 / (runDevs[i]**2))
        else:
            avgVal[b] += runShifts[i] / (runDevs[i]**2)
            weight[b] += 1 / (runDevs[i]**2)
    avgVal = avgVal / weight
    sigm = 1 / np.sqrt(weight)

    plt.errorbar(allBs,
                 avgVal,
                 yerr=sigm,
                 marker='o',
                 linewidth=0,
                 elinewidth=1,
                 capsize=2)
    plt.ylabel('Data (mHz)')
    plt.xlabel('Magnetic Field (T)')
    plt.show()

    eBs = np.zeros(0)
    eaV = np.zeros(0)
    eVar = np.zeros(0)

    for i in range(0, len(allBs)):
        b = np.where(eBs == np.abs(allBs[i]))[0]
        if (len(b) == 0):
            eBs = np.append(eBs, np.abs(allBs[i]))
            if (runBs[i] == 0):
                eaV = np.append(eaV, avgVal[i])
                eVar = np.append(eVar, sigm[i]**2)
            else:
                eaV = np.append(eaV, avgVal[i] / 2)
                eVar = np.append(eVar, sigm[i]**2 / 4)
        else:
            eaV[b] += avgVal[i] / 2
            eVar[b] += sigm[i]**2 / 4
    eSig = np.sqrt(eVar)

    def ffE(x, b):
        return b * x**2

    pE, pcovE = curve_fit(ffE, eBs, eaV, p0=[0], sigma=eSig)
    EFs = np.linspace(0, max(eBs), 100)
    FE = ffE(EFs, pE[0])

    print('Even Fit Parameters')
    print(pE)
    print('Even Fit Sig')
    print(np.sqrt(np.diag(pcovE)))
    print('Even Fit Parameters in Std Dev')
    print(pE / np.sqrt(np.diag(pcovE)))
    plt.errorbar(eBs,
                 eaV,
                 yerr=eSig,
                 marker='o',
                 linewidth=0,
                 elinewidth=1,
                 capsize=2)
    plt.plot(EFs, FE)
    plt.ylabel('Even Data (mHz)')
    plt.xlabel('Magnetic Field (T)')
    plt.show()
print('Optimal Values')
print('a: ' + str(a))
print('b: ' + str(b))

# compute r^2
r2 = 1.0-(sum((y-f(x,a,b))**2)/((n-1.0)*np.var(y,ddof=1)))
print('R^2: ' + str(r2))

# calculate parameter confidence interval
a,b = unc.correlated_values(popt, pcov)
print('Uncertainty')
print('a: ' + str(a))
print('b: ' + str(b))

# plot data
plt.errorbar(list(xlist), list(ylist), xerr=errors, fmt='o',markersize=10,capsize=10,alpha=.5)
plt.xlim(-.05,1.05)
plt.ylim(-.05,1.05)
# calculate regression confidence interval
px = np.linspace(-.2, 1.2, 100)
py = a*px+b
nom = unp.nominal_values(py) 
std = unp.std_devs(py)

def predband(x, xd, yd, p, func, conf=0.95): #function that plots prediction bands
    # x = requested points
    # xd = x data
    # yd = y data
    # p = parameters
    # func = function name
    alpha = 1.0 - conf    # significance
Пример #33
0
"""
__Data__

This example fits a single 1D Gaussian, we therefore load and plot data containing one Gaussian.
"""
dataset_path = path.join("dataset", "example_1d", "gaussian_x1")
data = af.util.numpy_array_from_json(
    file_path=path.join(dataset_path, "data.json"))
noise_map = af.util.numpy_array_from_json(
    file_path=path.join(dataset_path, "noise_map.json"))

plt.errorbar(
    x=range(data.shape[0]),
    y=data,
    yerr=noise_map,
    color="k",
    ecolor="k",
    elinewidth=1,
    capsize=2,
)
plt.show()
plt.close()
"""
__Model + Analysis__

We create the model and analysis, which in this example is a single `Gaussian` and therefore has dimensionality N=3.
"""
model = af.Model(m.Gaussian)

model.centre = af.UniformPrior(lower_limit=0.0, upper_limit=100.0)
model.normalization = af.UniformPrior(lower_limit=1e-2, upper_limit=1e2)
Пример #34
0
if __name__ == '__main__':
    import matplotlib.pyplot as plt

    n = 201
    x = np.linspace(0, 2, n)
    y = (np.cos(x * 6. + 2)**2) + 0.1 * np.random.normal(size=n) + 100.

    y_err = 0.1 * np.random.normal(size=n)

    #p, p_error, x_fit, y_fit = cos.fit(x, y, evaluate_function=True,
    #        initialise={'period':5})
    p, p_error, x_fit, y_fit = cos_2.fit(x, y, evaluate_function=True)

    print(p)
    print(p_error)

    plt.errorbar(x, y, yerr=y_err, fmt='.')
    plt.plot(x_fit, y_fit)

    # Weighted fit
    p, p_error, x_fit, y_fit = cos_2.fit(x,
                                         y,
                                         y_err=y_err,
                                         evaluate_function=True)

    print(p)
    print(p_error)
    plt.plot(x_fit, y_fit)

    plt.show()
Пример #35
0
    if Is_train == False:
        plt.xlabel('Latent Variable value', fontsize=12)
        plt.ylabel('Magnetization of a single sample generated by the network',
                   fontsize=10)
        plt.legend()
        plt.show()
    if Is_train == True:
        print("Magnetization Accuracy")
        print(Mdist)
        print(mean_magnetization)
        print(mean_magnetization_data)
        print(var_magnetization)
        print(var_magnetization_data)
        plt.errorbar(T_vals,
                     mean_magnetization,
                     var_magnetization,
                     color='b',
                     label='Samples')
        plt.errorbar(T_vals,
                     mean_magnetization_data,
                     var_magnetization_data,
                     color='g',
                     label='Data')
        plt.xlabel("Temperature")
        plt.ylabel('Magnetization')
        plt.title('Vanilla VAE')
        plt.legend()
        plt.savefig('../../Desktop/Vanilla_VAE-Magnetization.png',
                    bbox_inches='tight')
        plt.show()
Пример #36
0
    X, Y = [], []
    for _ in range(lth):
        X.append(Value(np.random.rand(), np.random.rand()/10))
        Y.append(Value(np.random.rand(), np.random.rand()/20))

    return np.array(X), np.array(Y)


rand_data_1 = generate_random_data_1(10)
rand_data_2 = generate_random_data_2(10)
rand_data_3 = rand_data_2[0]**2 * np.log(rand_data_2[1]) + Value(2.02, 0.25)

plt.figure()
plt.errorbar(
    val(rand_data_1)[:, 0], val(rand_data_1)[:, 1],
    xerr=unc(rand_data_1)[:, 0], yerr=unc(rand_data_1)[:, 1],
    ls='None', c='b', marker='None', label='1'
)

plt.errorbar(
    val(rand_data_2[0]), val(rand_data_2[1]),
    xerr=unc(rand_data_2[0]), yerr=unc(rand_data_2[1]),
    ls='None', c='r', marker='None', label='2'
)

plt.errorbar(
    val(rand_data_2[0]), val(rand_data_3),
    xerr=unc(rand_data_2[0]), yerr=unc(rand_data_3),
    ls='None', c='g', marker='None', label='3'
)
def ChapmanKolmogorovTest(
        RMSDdir='/Users/tud51931/projects/MSM/msm/ff03-hybridkcenter/RMSDCluster4.2',
        stateslist=[],
        tau=50,
        kmax=8,
        cutoff=4.2,
        bootstrap=False,
        bootstrapnumber=100):

    Prob_tau, Prob_ktau = [1], [1]
    Prob_tau_std, Prob_ktau_std = [0], [0]
    for k in range(1, kmax + 1):
        if bootstrap:
            ktau = k * tau
            filepath_tau = os.path.join(RMSDdir, 'lagtime%d' % tau,
                                        'bootstrap')
            filepath_ktau = os.path.join(RMSDdir, 'lagtime%d' % ktau,
                                         'bootstrap')
            Prob_tau_bootstrap = []
            Prob_ktau_bootstrap = []

            for i in range(1, bootstrapnumber + 1):
                print "k = %d of 8 , bootstrap directory = %d of 100" % (k, i)
                Tc_tau = mmread(
                    os.path.join(filepath_tau, '%d' % i, 'tProb.mtx'))
                Populations_tau = loadtxt(
                    os.path.join(filepath_tau, '%d' % i, 'Populations.dat'))
                Mapping_tau = loadtxt(
                    os.path.join(filepath_tau, '%d' % i, 'Mapping.dat'))

                Tc_ktau = mmread(
                    os.path.join(filepath_ktau, '%d' % i, 'tProb.mtx'))
                Populations_ktau = loadtxt(
                    os.path.join(filepath_ktau, '%d' % i, 'Populations.dat'))
                Mapping_ktau = loadtxt(
                    os.path.join(filepath_ktau, '%d' % i, 'Mapping.dat'))

                Probability_tau, Probability_ktau = CalculateStatesProbability(
                    Tc_tau,
                    Tc_ktau,
                    Populations_tau,
                    Populations_ktau,
                    Mapping_tau,
                    Mapping_ktau,
                    k,
                    cutoff,
                    tau,
                    states=stateslist)
                Prob_tau_bootstrap.append(Probability_tau)
                Prob_ktau_bootstrap.append(Probability_ktau)
            Prob_tau.append(np.mean(Prob_tau_bootstrap))
            Prob_tau_std.append(np.std(Prob_tau_bootstrap))
            Prob_ktau.append(np.mean(Prob_ktau_bootstrap))
            Prob_ktau_std.append(np.std(Prob_ktau_bootstrap))

        else:
            ktau = k * tau
            filepath_tau = os.path.join(RMSDdir, 'lagtime%d' % tau)
            filepath_ktau = os.path.join(RMSDdir, 'lagtime%d' % ktau)
            Prob_tau_bootstrap = []
            Prob_ktau_bootstrap = []

            Tc_tau = mmread(os.path.join(filepath_tau, 'tProb.mtx'))
            Populations_tau = loadtxt(
                os.path.join(filepath_tau, 'Populations.dat'))
            Mapping_tau = loadtxt(os.path.join(filepath_tau, 'Mapping.dat'))

            Tc_ktau = mmread(os.path.join(filepath_ktau, 'tProb.mtx'))
            Populations_ktau = loadtxt(
                os.path.join(filepath_ktau, 'Populations.dat'))
            Mapping_ktau = loadtxt(os.path.join(filepath_ktau, 'Mapping.dat'))

            Probability_tau, Probability_ktau = CalculateStatesProbability(
                Tc_tau,
                Tc_ktau,
                Populations_tau,
                Populations_ktau,
                Mapping_tau,
                Mapping_ktau,
                k,
                cutoff,
                tau,
                states=stateslist)

            Prob_tau.append(Probability_tau)
            Prob_ktau.append(Probability_ktau)

        print "list-fixed", stateslist

    if bootstrap:
        if 1:
            path = "/Users/tud51931/projects/MSM/msm/ff03-hybridkcenter/result/ChapmanKolmogorovTest"
            plt.figure()
            plt.errorbar(range(len(Prob_tau)), Prob_tau, Prob_tau_std)
            plt.hold(True)
            plt.errorbar(range(len(Prob_ktau)), Prob_ktau, Prob_ktau_std)
            plt.legend(['Prob_tau', 'Prob_ktau'])
            plt.title("Cutoff = %0.1f tau = %d " % (cutoff, tau))
            plt.ylim(0, 1)
            if 1:
                plt.savefig("CKtestbootstrap_tau%d_Cutoff%0.1f.png" %
                            (tau, cutoff))
                #else:
                plt.show()
    else:
        if 1:
            path = "/Users/tud51931/projects/MSM/msm/ff03-hybridkcenter/result/ChapmanKolmogorovTest"
            plt.figure()
            plt.plot(range(len(Prob_tau)), Prob_tau)
            plt.hold(True)
            plt.plot(range(len(Prob_ktau)), Prob_ktau)
            plt.legend(['Prob_tau', 'Prob_ktau'])
            plt.title("Cutoff = %0.1f tau = %d " % (cutoff, tau))
            plt.xlabel('K')
            plt.ylabel('State Population')
            plt.ylim(0, 1)

            if 1:
                figname = "CKtest_tau%d_RMSDCutoff%0.1f.png" % (tau, cutoff)
                print "Save to %s" % (figname)
                plt.savefig(figname)
Пример #38
0
				if j == 0:
					accuracy1.append(float(row[0]))
				elif j == 1:
					accuracy2.append(float(row[0]))
				elif j == 2:
					accuracy3.append(float(row[0]))
				elif j == 3:
					accuracy4.append(float(row[0]))
				else:
					accuracy5.append(float(row[0]))

# add rounds vs cohort size to plot
accuracy = [sum(accuracy1)/len(accuracy1), sum(accuracy2)/len(accuracy2), sum(accuracy3)/len(accuracy3), sum(accuracy4)/len(accuracy4), sum(accuracy5)/len(accuracy5)]
std = [stat.pstdev(accuracy1), stat.pstdev(accuracy2), stat.pstdev(accuracy3), stat.pstdev(accuracy4), stat.pstdev(accuracy5)]
# plt.plot(cohort_size, accuracy)
plt.errorbar(cohort_size, accuracy, yerr=std, capsize=4)


# finish plot
plt.grid(b=True, which='both', axis='y')
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xlabel('Cohort Size for Each Global Round')
plt.ylabel('Maximum Sparse Categorical Accuracy Reached')
plt.suptitle("MNIST, 100 Shuffle Seeds for 80% IID Data")
plt.title("Maximum Accuracy Reached Averaged Over Cohort Size, with Fairness of Trials")
plt.savefig("results/" + batch_name + "/avg_accuracy_vs_cohort.png")


# averages as deviation from cohort size 5
plt.clf()
accuracy1 = []
Пример #39
0
def show_photons(photons,
                 xmlname,
                 e_min,
                 e_max,
                 area,
                 duration,
                 dir,
                 radius,
                 ebins=30):
    """
    Show photons using matplotlib (if available).
    """
    # Only proceed if matplotlib is available
    try:
        # Import matplotlib
        import matplotlib.pyplot as plt

        # Create figure
        plt.figure(1)
        plt.title("MC simulated photon spectrum (" + str(e_min) + '-' +
                  str(e_max) + " TeV)")

        # Setup energy range covered by data
        ebds = GEbounds(ebins, GEnergy(e_min, "TeV"), GEnergy(e_max, "TeV"))

        # Create energy axis
        energy = []
        for i in range(ebds.size()):
            energy.append(ebds.elogmean(i).TeV())

        # Fill histogram
        counts = [0.0 for i in range(ebds.size())]
        for photon in photons:
            index = ebds.index(photon.energy())
            counts[index] = counts[index] + 1.0

        # Create error bars
        error = [sqrt(c) for c in counts]

        # Get model values
        models = GModels(xmlname)
        mod = models[0]
        model = []
        t = GTime()
        mod.spatial().set_mc_cone(dir, radius)
        for i in range(ebds.size()):
            eng = ebds.elogmean(i)
            ewidth = ebds.ewidth(i)
            f      = mod.spatial().spectrum().eval(eng, t) * \
                     area * duration * ewidth.MeV()
            model.append(f)

        # Plot data
        plt.loglog(energy, counts, 'ro')
        plt.errorbar(energy, counts, error, fmt=None, ecolor='r')

        # Plot model
        plt.plot(energy, model, 'b-')

        # Set axes
        plt.xlabel("Energy (TeV)")
        plt.ylabel("Number of incident photons")

        # Create figure
        plt.figure(2)
        plt.title("MC simulated photon map")

        # Create RA and DEC arrays
        ra = []
        dec = []
        for photon in photons:
            ra.append(photon.dir().ra_deg())
            dec.append(photon.dir().dec_deg())

        # Make scatter plot
        plt.scatter(ra, dec, marker=".")

        # Set axes
        plt.xlabel("Right Ascension (deg)")
        plt.ylabel("Declination (deg)")

        # Notify
        print("PLEASE CLOSE WINDOW TO CONTINUE ...")

        # Allocate histogram
        # Show plot
        plt.show()

    except ImportError:
        print("Matplotlib is not (correctly) installed on your system.")

    # Return
    return
Пример #40
0
def IVtrace(ch,
            Vgap,
            Vrange=[0, 12],
            average=5,
            pngpath='/home/amigos/data/SIS/IV/'):
    '''
    DESCRIPTION
    ================

    ARGUMENT
    ================
        1. : 
            Type: 

    RETURN
    ================
    Nothing.
    '''
    Vres = 0.5
    Vrres = 0.5
    if ch == 0:
        DAch = 0
        VADch = 0
        IADch = 1
    else:
        DAch = 1
        VADch = 2
        IADch = 3
    t = datetime.datetime.now()
    ut = t.strftime('%Y%m%d_%H%M%S')
    filename = 'SISIV' + ut + '.png'
    box = Cryo.mixer()
    V_mon = np.array([])
    Verr_mon = np.array([])
    I_mon = np.array([])
    Ierr_mon = np.array([])
    V_list_low = np.arange(Vrange[0], Vgap - 0.5, Vres)
    V_list_mid = np.arange(Vgap - 0.5, Vgap + 0.5, 0.05)
    V_list_high = np.arange(Vgap + 0.5, Vrange[1], Vres)
    V_list = np.hstack((V_list_low, V_list_mid, V_list_high))
    print(V_list)
    print('======== START ========')
    for v in V_list:
        print('V = ' + str(round(v, 2)) + ' mV')
        box.set_sisv(Vmix=v, ch=DAch)
        dV = np.array([])
        dI = np.array([])
        time.sleep(0.1)
        for j in range(average):
            ret = box.monitor_sis()
            dV = np.append(dV, ret[VADch] * 1e+1)
            dI = np.append(dI, ret[IADch] * 1e+3)
        dV_mean = np.mean(dV, axis=0)
        dV_std = np.std(dV, axis=0)
        V_mon = np.append(V_mon, dV_mean)
        Verr_mon = np.append(Verr_mon, dV_std)
        dI_mean = np.mean(dI, axis=0)
        dI_std = np.std(dI, axis=0)
        I_mon = np.append(I_mon, dI_mean)
        Ierr_mon = np.append(Ierr_mon, dI_std)
    V_rlist = np.arange(Vrange[0], Vrange[1], Vrres)
    V_rlist = np.sort(V_rlist)[::-1]
    for v in V_rlist:
        print('V = ' + str(round(v, 2)) + ' mV')
        box.set_sisv(Vmix=v, ch=DAch)
        time.sleep(0.1)
    print('======== END ========')
    print(dV)
    print(dV)
    plt.errorbar(V_mon,
                 I_mon,
                 xerr=Verr_mon,
                 yerr=Ierr_mon,
                 fmt='.',
                 ecolor='red',
                 color='red',
                 label='ch=' + str(DAch))
    plt.plot(V_mon,
             I_mon,
             linestyle='-',
             color='green',
             linewidth=1.0,
             label='interpolation')
    plt.title('SIS Mixer I-V ' + t.strftime('%Y/%m/%d/ %H:%M:%S'))
    plt.xlim(0, V_mon.max())
    plt.ylim(I_mon.min(), I_mon.max())
    plt.xlabel('Mixer Voltage [mV]')
    plt.ylabel('Mixer Current [uA]')
    plt.grid(True)
    plt.legend(loc='upper left')
    plt.savefig(pngpath + filename)
    plt.show()
Пример #41
0
        lw = LedoitWolf(store_precision=False, assume_centered=True)
        lw.fit(X)
        lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
        lw_shrinkage[i, j] = lw.shrinkage_

        oa = OAS(store_precision=False, assume_centered=True)
        oa.fit(X)
        oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
        oa_shrinkage[i, j] = oa.shrinkage_

# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range,
             lw_mse.mean(1),
             yerr=lw_mse.std(1),
             label='Ledoit-Wolf',
             color='navy',
             lw=2)
plt.errorbar(n_samples_range,
             oa_mse.mean(1),
             yerr=oa_mse.std(1),
             label='OAS',
             color='darkorange',
             lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)

# plot shrinkage coefficient
plt.subplot(2, 1, 2)
Пример #42
0
        np.sum(spectra, axis=0) - Nbg[I] * np.sum(BGspectra, axis=0)) * ss
    SAreas[i] = (np.sum(Areas, axis=1) * sa).T

plt.figure()
plt.bar(0.5 * (binsSpec[1:] + binsSpec[:-1]),
        spectrum,
        width=binsSpec[1:] - binsSpec[:-1],
        label='data')
plt.bar(0.5 * (binsSpec[1:] + binsSpec[:-1]),
        BGspectrum * Nbg[I],
        width=binsSpec[1:] - binsSpec[:-1],
        label='Background',
        alpha=0.5)
plt.errorbar(0.5 * (binsSpec[1:] + binsSpec[:-1]),
             np.mean(Sspectrum, axis=0),
             np.std(Sspectrum, axis=0),
             fmt='k.',
             linewidth=3)
plt.xlabel('PEs', fontsize=25)

fig, ax = plt.subplots(2, 3)
for i in range(6):
    np.ravel(ax)[i].bar(0.5 * (bins[1:] + bins[:-1]),
                        spectra[:, i],
                        width=(bins[1:] - bins[:-1]),
                        label='Data PMT {}'.format(i))
    np.ravel(ax)[i].bar(0.5 * (bins[1:] + bins[:-1]),
                        Nbg[I] * BGspectra[:, i],
                        width=(bins[1:] - bins[:-1]),
                        label='BG'.format(i),
                        alpha=0.5)
Пример #43
0
def run_code(var_class='RRLYR', sample=50, n_gen=20, n_process=3, plot=False):
    table_file = '%s/tables/Blind15A_training_set_goodP.csv' % (mainpath)
    table_15 = pd.read_csv(table_file)
    table_15.set_index('internalID', inplace=True)
    print 'Training set opened...'

    class_example = table_15.query('Var_Type == "%s"' % (var_class))
    if sample != 'False':
        class_example = class_example.sample(sample)

    time_all = []
    for k in range(50):
        print '\r', k,
        if k == 2:
            continue
        path = '%s/info/Blind15A_%02i/Blind15A_%02i_epochs_g.txt' % \
               (mainpath, int(k+1), int(k+1))
        aux = np.loadtxt(path)
        time_all.append(aux)
    print 'Empirical times loaded'

    all_lcs, all_per = [], []
    count = 0
    for item in class_example.iterrows():
        if item[0] == 'Blind15A_22_S27_0428_0159' or \
           item[0] == 'Blind15A_33_S19_1145_0097' or \
           item[0] == 'Blind15A_05_S23_0455_3568' or \
           item[0] == 'Blind15A_19_N4_0938_3573':
            continue

        T = float(item[1]['PeriodLS'])
        T_W = float(item[1]['PeriodWMCC'])
        T_G = float(item[1]['PeriodGLS'])
        print item[0]
        field, CCD, X, Y = re.findall(
            r'(\w+\d+\w?\_\d\d?)\_(\w\d+?)\_(\d+)\_(\d+)', item[0])[0]
        time, mag, err = give_me_lc(field, CCD, X, Y, extract=False)
        filtered_data = sigma_clip(mag,
                                   sigma=3,
                                   iters=1,
                                   cenfunc=np.mean,
                                   copy=False)
        time = time[~filtered_data.mask]
        mag = mag[~filtered_data.mask]
        err = err[~filtered_data.mask]
        print 'Period: %f days' % (T)
        n = np.random.choice(range(49))
        hits_time = time_all[n][:, 1]
        # predicted magnitude for GP
        if var_class == 'NV':
            mag_obs = GP_fit(time, mag, err, plot=plot, x_pred=hits_time)
        else:
            mag_obs = GP_fit_periodic(time,
                                      mag,
                                      err,
                                      T,
                                      T_1=T,
                                      plot=plot,
                                      x_pred=hits_time)

        # scale to HiTS depth and add empirical uncertainties
        hits_mag, hits_noise_mag, hits_err = HiTS_depth_noise(mag_obs)
        if hits_mag.mean() < 20:
            size = int(len(hits_time) * np.random.uniform(.95, 1., size=1))
        elif hits_mag.mean() < 22 and hits_mag.mean() > 20:
            size = int(len(hits_time) * np.random.uniform(.85, 1., size=1))
        else:
            size = int(len(hits_time) * np.random.uniform(.75, 1., size=1))
        print hits_mag.mean(), size,
        idx = np.sort(
            np.random.choice(np.arange(len(hits_time)),
                             size=size,
                             replace=False))
        hits_time = hits_time[idx]
        hits_mag = hits_mag[idx]
        hits_noise_mag = hits_noise_mag[idx]
        hits_err = hits_err[idx]

        if plot:
            if var_class == 'NV':
                plt.figure(figsize=(9, 4))
                plt.errorbar(hits_time,
                             hits_mag,
                             yerr=hits_err,
                             fmt='k.',
                             ms=7,
                             lw=1,
                             alpha=1)
                plt.errorbar(hits_time,
                             hits_noise_mag,
                             yerr=hits_err,
                             fmt='r.',
                             ms=7,
                             lw=1,
                             alpha=1)
                plt.gca().invert_yaxis()
                plt.show()

            else:
                phase = np.mod(hits_time, T) / T
                sort_idx = np.argsort(phase)

                hits_PHASE = phase[sort_idx]
                hits_MAG = hits_mag[sort_idx]
                hits_noise_MAG = hits_noise_mag[sort_idx]
                hits_ERR = hits_err[sort_idx]

                hits_MAG = np.concatenate([hits_MAG, hits_MAG])
                hits_noise_MAG = np.concatenate(
                    [hits_noise_MAG, hits_noise_MAG])
                hits_PHASE = np.concatenate([hits_PHASE, hits_PHASE + 1])
                hits_ERR = np.concatenate([hits_ERR, hits_ERR])

                plt.errorbar(hits_PHASE,
                             hits_MAG,
                             yerr=hits_ERR,
                             fmt='k.',
                             ms=7,
                             lw=1,
                             alpha=1)
                plt.errorbar(hits_PHASE,
                             hits_noise_MAG,
                             yerr=hits_ERR,
                             fmt='r.',
                             ms=7,
                             lw=1,
                             alpha=1)
                plt.gca().invert_yaxis()
                plt.show()

        for k in range(n_gen):
            print '\r', k,
            if var_class in ['ROTVAR', 'EB']:
                up_lim = 10 * T
                if up_lim > 15:
                    up_lim = 15.
                T_1 = np.random.uniform(0.01, up_lim)
            elif var_class == 'RRLYR':
                T_1 = np.random.uniform(.05, 1, 1)
            elif var_class == 'DSCT':
                T_1 = np.random.uniform(0.02, 0.33)
            elif var_class == 'NV':
                T_1 = 'same'

            # HiTS sampling function
            # hits_time = time_obs[HiTS_sample_func(time_obs)]
            n = np.random.choice(range(49))
            hits_time = time_all[n][:, 1]
            # predicted magnitude for
            if var_class == 'NV':
                mag_obs = GP_fit(time, mag, err, plot=False, x_pred=hits_time)
            else:
                mag_obs = GP_fit_periodic(time,
                                          mag,
                                          err,
                                          T,
                                          T_1=T_1,
                                          plot=False,
                                          x_pred=hits_time)
            # scale to HiTS depth and add empirical uncertainties
            hits_mag, hits_noise_mag, hits_err = HiTS_depth_noise(mag_obs)
            if hits_mag.mean() < 20:
                size = int(len(hits_time) * np.random.uniform(.95, 1., size=1))
            elif hits_mag.mean() < 22 and hits_mag.mean() > 20:
                size = int(len(hits_time) * np.random.uniform(.85, 1., size=1))
            else:
                size = int(len(hits_time) * np.random.uniform(.75, 1., size=1))
            print '\r', k, T_1, hits_mag.mean(), size, len(hits_time),
            idx = np.sort(
                np.random.choice(np.arange(len(hits_time)),
                                 size=size,
                                 replace=False))
            hits_time = hits_time[idx]
            hits_mag = hits_mag[idx]
            hits_noise_mag = hits_noise_mag[idx]
            hits_err = hits_err[idx]

            df = pd.DataFrame(np.array([hits_time, hits_noise_mag,
                                        hits_err]).T,
                              columns=['MJD', 'MAG_KRON', 'MAGERR_KRON'])
            if False:
                path = '%s/synt_lcs/%s_%i_%.6f.csv' % (mainpath, var_class, k,
                                                       T_1)
                df.to_csv(path)
            all_lcs.append(df)
            all_per.append(T_1)

    print 'LC generated...'

    print 'Number of LCs: %i' % (len(all_lcs))

    print 'Calculating features values...'
    # p = Pool(processes=n_process)
    # frames = p.map(get_features, zip(all_lcs, range(len(all_lcs)),
    #                                  [var_class]*len(all_lcs)))
    frames = []
    for data in zip(all_lcs, range(len(all_lcs)), [var_class] * len(all_lcs)):
        frames.append(get_features(data))

    print frames
    fats_feat = pd.concat(frames, axis=0)
    fats_feat = fats_feat[np.sort(fats_feat.columns.values)]
    fats_feat['Var_Type'] = var_class
    print fats_feat.head(10)
    print fats_feat.shape

    fats_feat.to_csv('%s/tables/Syntectic_%s_features_noise.csv' %
                     (mainpath, var_class))
    print 'Done!'
Пример #44
0
NN_stds = []
for i in range(len(cs_range)):
    NN_means.append(np.mean(NN_css[:, i]))
    NN_stds.append(np.std(NN_css[:, i] / cs_range[i], ddof=1))

NN_stds = np.array(NN_stds)
NN_std_errs = NN_stds / np.array(np.sqrt(training_reruns))
NN_MC_std = np.array(NN_MC_stds[0])

NN_err = np.sqrt(NN_stds**2 + NN_MC_std**2)
NN_MC_std_err = np.sqrt(np.array(NN_std_errs)**2 + NN_MC_std**2)

print('############### Plotting mean with eprrors ###############')

fig = plt.figure(1)
plt.errorbar(cs_range, np.array(NN_means) / cs_range, yerr=NN_stds)
plt.errorbar(cs_range,
             np.array(NJ_cs) / cs_range,
             label='NJet',
             yerr=NJ_MC_std,
             alpha=0.5)
plt.legend()
if order == 'NLO':
    plt.title('Average {} '.format(order) +
              r'k-factor for $e^+e^-\rightarrow\,q\bar{q}$' +
              '{} w/ FKS'.format(n_gluon * 'g'))
else:
    plt.title('Average {} '.format(order) +
              r'cross section for $e^+e^-\rightarrow\,q\bar{q}$' +
              '{} w/ FKS'.format(n_gluon * 'g'))
plt.savefig(
Пример #45
0
        print n, scipy.mean(tdata), scipy.median(tdata)

matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42

f1 = plt.figure()

plt.xlim(0, 600)
plt.ylim(0, 3)

colors = {}
colors['mem'] = 'red'
colors['nomem'] = 'green'

for n in names:
    for t in sorted(allValues[n][0]):
        values = []
        for e in allValues[n]:
            values.append(int(allValues[n][e][t]['fatals']))
        conf = scipy.stats.norm.interval(0.95,
                                         loc=scipy.mean(values),
                                         scale=scipy.std(values) /
                                         math.sqrt(len(values)))
        #print n,t,conf, values
        plt.errorbar(t,
                     scipy.mean(values),
                     yerr=conf[1] - scipy.mean(values),
                     color=colors[n])

f1.savefig("memfatals.png")
Пример #46
0
        #Where you save standard devations to
        np.savetxt(r'C:\Users\jacob\Desktop\Syborgs\SYBORGS Coding\Deviation' +
                   " " + legend + ".csv",
                   group_Y,
                   delimiter=",")

        plt.scatter(X_ax[1:], group_X[0, 1:], label=legend)
        plt.xscale('log')

        #How you change the range of y values the graph goes over
        #plt.ylim(19000,34000)

        plt.errorbar(X_ax[1:],
                     group_X[0, 1:],
                     group_Y[0, 1:],
                     linestyle='None',
                     capsize=5.0)
        plt.grid(True)
        plt.legend()

        #The labels of your graph
        plt.xlabel("Log of IPTG Concentration uM")
        plt.ylabel("Per Cell Fluoresence")
        plt.title("Log Per Cell Fluoresence" + " " + "vs" + " " +
                  "IPTG Concentration uM")

        #Where you save it
        save_err = r'C:\Users\jacob\Desktop\Syborgs\Jithran Data\Graphs\10.9.20\BARS'

        #Optional extra file location, comment out if not in use
Пример #47
0
 def PlotInject(self,filename=None):
     '''
     Function that uses the global parameters str_min and str_step as well as the global results sigma_ar to
     generate a plot.
     
     Argument :
         fliename : Name of the file in which the plot will be saved. If None, the plot will be just shown
                    but not saved. Default to None.
     '''
     
     # Get the x-values (signal strength)
     if(self.str_scale=='lin'):
         sig_str = np.arange(self.str_min,self.str_min+self.str_step*len(self.sigma_ar),step=self.str_step)
     else:
         sig_str = np.array([i%10*10**(self.str_min+i//10) for i in range(len(self.sigma_ar)+len(self.sigma_ar)//10+1) if i%10!=0])
     
     # If filename is not None and log scale must check
     if(filename!=None and self.str_scale=='log'):
         if(type(filename)==type('str')):
             print('WARNING : log plot for signal injection will not be saved !')
             nolog = True
         else:
             nolog = False
     
     # Do the plot
     F = plt.figure(figsize=(12,8))
     plt.title('Significance vs signal strength')
     plt.errorbar(sig_str,self.sigma_ar[:,0],
                  xerr=0,yerr=[self.sigma_ar[:,1],self.sigma_ar[:,2]],
                  linewidth=2,marker='o')
     plt.xlabel('Signal strength',size='large')
     plt.ylabel('Significance',size='large')
     
     if(filename is None):
         plt.show()
     else:
         if(self.str_scale=='log' and nolog is False):
             plt.savefig(filename[0],bbox_inches='tight')
         else:
             plt.savefig(filename,bbox_inches='tight')
         plt.close(F)
     
     # If log scale, do also a log plot
     if(self.str_scale=='log'):
         F = plt.figure(figsize=(12,8))
         plt.title('Significance vs signal strength (log scale)')
         plt.errorbar(sig_str,self.sigma_ar[:,0],
                      xerr=0,yerr=[self.sigma_ar[:,1],self.sigma_ar[:,2]],
                      linewidth=2,marker='o')
         plt.xlabel('Signal strength',size='large')
         plt.ylabel('Significance',size='large')
         plt.xscale('log')
         
         if(filename is None):
             plt.show()
         else:
             if(nolog is False):
                 plt.savefig(filename[1],bbox_inches='tight')
             plt.close(F)
     
     return
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
    for accuracy in k_to_accuracies[k]:
        print 'k = %d, accuracy = %f' % (k, accuracy)

# plot the raw observations
for k in k_choices:
    accuracies = k_to_accuracies[k]
    plt.scatter([k] * len(accuracies), accuracies)

# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array(
    [np.mean(v) for k, v in sorted(k_to_accuracies.items())])
accuracies_std = np.array(
    [np.std(v) for k, v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()

#%%
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 5

classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
Пример #49
0
def ahmet_ali_nuhoglu_21602149_hw2(question):
    if question == '1':

        print("Question 1")
        print("Part A")

        with h5py.File('hw3_data2.mat', 'r') as file:
            Xn, Yn = list(file['Xn']), list(file['Yn'])

        Xn = np.array(Xn).T
        Yn = np.array(Yn).flatten()

        def ridge_regression(X, y, lmbd):
            return np.linalg.inv(
                X.T.dot(X) + lmbd * np.identity(np.shape(X)[1])).dot(
                    X.T).dot(y)

        def r_squared(Y, pred):
            return (np.corrcoef(Y, pred)[0, 1])**2

        def cross_validation(X, y, K, lmbd):

            part_len = int(np.size(y) / K)

            valid_means_d = dict()
            test_means_d = dict()

            for i in range(K):
                valid_data_start = i * part_len
                test_data_start = (i + 1) * part_len
                train_data_start = (i + 2) * part_len

                train_data_ind, test_data_ind, valid_data_ind = [], [], []

                for j in range(valid_data_start, test_data_start):
                    valid_data_ind.append(j % np.size(y))

                for j in range(test_data_start, train_data_start):
                    test_data_ind.append(j % np.size(y))

                for j in range(train_data_start,
                               valid_data_start + np.size(y)):
                    train_data_ind.append(j % np.size(y))

                x_valid, x_test, x_train = X[valid_data_ind], X[
                    test_data_ind], X[train_data_ind]
                y_valid, y_test, y_train = y[valid_data_ind], y[
                    test_data_ind], y[train_data_ind]

                for l in lmbd:
                    weight = ridge_regression(x_train, y_train, l)

                    valid_means_d.setdefault(l, []).append(
                        r_squared(y_valid, x_valid.dot(weight)))
                    test_means_d.setdefault(l, []).append(
                        r_squared(y_test, x_test.dot(weight)))

            valid_means_d = dict(
                (lmbd, np.mean(val)) for lmbd, val in valid_means_d.items())
            test_means_d = dict(
                (lmbd, np.mean(val)) for lmbd, val in test_means_d.items())

            return valid_means_d, test_means_d

        lambda_values = np.logspace(0, 12, num=500, base=10)
        dict_valid, dict_test = cross_validation(Xn, Yn, 10, lambda_values)

        lambda_opt = max(dict_valid, key=lambda k: dict_valid[k])

        x_val, y_val = zip(*sorted(dict_valid.items()))
        x_tst, y_tst = zip(*sorted(dict_test.items()))

        plt.figure()
        plt.plot(x_tst, y_tst)
        plt.plot(x_val, y_val)
        plt.legend([
            'Test Data',
            'Validation Data',
        ])
        plt.ylabel(r'$R^2$')
        plt.xlabel(r'$\lambda$')
        plt.title(r'$R^2$' ' vs ' '$\lambda$')
        plt.xscale('log')
        plt.grid()
        plt.show(block=False)

        print("Optimal Lambda Value: ", lambda_opt)

        print("Part B")

        np.random.seed(3)

        def bootstrap(iter_num, x, y, lmbd):
            weight_new = []
            for i in range(iter_num):
                new_ind = np.random.choice(np.arange(np.size(y)), np.size(y))
                x_new, y_new = Xn[new_ind], Yn[new_ind]
                weight_r = ridge_regression(x_new, y_new, lmbd)
                weight_new.append(weight_r)
            return weight_new

        def find_significant_w(arr_mean, arr_std):
            p_values = 2 * (1 - norm.cdf(np.abs(arr_mean / arr_std)))
            significant_weights = np.where(p_values < 0.05)
            return significant_weights

        weight_new = []
        weight_new = bootstrap(500, Xn, Yn, 0)

        weight_new_mean = np.mean(weight_new, axis=0)
        weight_new_std = np.std(weight_new, axis=0)
        plt.figure(figsize=(20, 10))
        plt.grid()
        plt.errorbar(np.arange(1, 101),
                     weight_new_mean,
                     yerr=2 * weight_new_std,
                     ecolor='r',
                     fmt='o-k',
                     capsize=5)
        plt.ylabel(r'Resampled Weight Values')
        plt.xlabel(r'Weight Indices')
        plt.title(r'Ridge Regression with ' r'$\lambda = 0$' '\nand %95 CI')
        plt.show(block=False)
        print(
            "Indices of the Resampled Weights which are significantly different than zero:"
        )
        print(find_significant_w(weight_new_mean, weight_new_std)[0])

        print("Part C")

        weight_new_ridge = []
        weight_new_ridge = bootstrap(500, Xn, Yn, lambda_opt)
        weight_newR_mean = np.mean(weight_new_ridge, axis=0)
        weight_newR_std = np.std(weight_new_ridge, axis=0)
        plt.figure(figsize=(20, 10))
        plt.grid()
        plt.errorbar(np.arange(1, 101),
                     weight_newR_mean,
                     yerr=2 * weight_newR_std,
                     ecolor='r',
                     fmt='o-k',
                     capsize=5)
        plt.ylabel(r'Resampled Weight Values')
        plt.xlabel(r'Weight Indices')
        plt.title(r'Ridge Regression with '
                  r'$\lambda = \lambda_{opt}$'
                  '\nand %95 CI')
        plt.show(block=False)
        print(
            "Indices of the Resampled Weights which are significantly different than zero:"
        )
        print(find_significant_w(weight_newR_mean, weight_newR_std)[0])

    elif question == '2':

        print("Question 2")

        print("Part A")

        with h5py.File('hw3_data3.mat', 'r') as file:
            pop1, pop2 = np.array(list(file['pop1'])).flatten(), np.array(
                list(file['pop2'])).flatten()

        def bootstrap(iter_num, x, seed=6):
            np.random.seed(seed)
            x_new = []
            for i in range(iter_num):
                new_ind = np.random.choice(np.arange(np.size(x)), np.size(x))
                x_sample = x[new_ind]
                x_new.append(x_sample)
            return np.array(x_new)

        def mean_difference(x, y, iterations):
            xy_concat = np.concatenate((x, y))
            xy_boot = bootstrap(iterations, xy_concat)
            x_boot = np.zeros((iterations, np.size(x)))
            y_boot = np.zeros((iterations, np.size(y)))
            for i in range(np.size(xy_concat)):
                if i < np.size(x):
                    x_boot[:, i] = xy_boot[:, i]
                else:
                    y_boot[:, i - np.size(x)] = xy_boot[:, i]
            x_means = np.mean(x_boot, axis=1)
            y_means = np.mean(y_boot, axis=1)
            mean_diff = x_means - y_means

            return mean_diff

        mean_diff = mean_difference(pop1, pop2, 10000)

        def find_z_and_p(x, mu):
            mu_0 = np.mean(x)
            sigma = np.std(x)
            z = np.abs((mu - mu_0) / sigma)
            p = (1 - norm.cdf(z))
            return z, p

        plt.figure()
        plt.title('Population Mean Difference')
        plt.xlabel('Difference of Means')
        plt.ylabel('P(x)')
        plt.yticks([])
        plt.hist(mean_diff, bins=60, density=True, edgecolor='black')
        plt.show(block=False)

        z, p = find_z_and_p(mean_diff, np.mean(pop1) - np.mean(pop2))
        print("z-score: ", z)
        print("two sided p-value: ", 2 * p)

        print("Part B")

        with h5py.File('hw3_data3.mat', 'r') as file:
            vox1, vox2 = np.array(list(file['vox1'])).flatten(), np.array(
                list(file['vox2'])).flatten()

        vox1_boot = bootstrap(10000, vox1)
        vox2_boot = bootstrap(10000, vox2)

        corr_boot = np.zeros(10000)
        for i in range(10000):
            corr_boot[i] = np.corrcoef(vox1_boot[i], vox2_boot[i])[0, 1]

        corr_mean = np.mean(corr_boot)
        sorted_corr = np.sort(corr_boot)
        dif = np.size(sorted_corr) / 40
        corr_lower = sorted_corr[int(dif)]
        corr_upper = sorted_corr[int(np.size(sorted_corr) - dif)]
        print("Mean: ", corr_mean)
        print("%95 CI: (", corr_lower, ", ", corr_upper, ")")

        zero_corr = np.where(corr_boot < 10**(-2))
        print("Number of elements with zero correlation: ", np.size(zero_corr))

        print("Part C")

        vox1_indep = bootstrap(10000, vox1, 13)
        vox2_indep = bootstrap(10000, vox2, 5)

        corr_boot_indep = np.zeros(10000)
        for i in range(10000):
            corr_boot_indep[i] = np.corrcoef(vox1_indep[i], vox2_indep[i])[0,
                                                                           1]

        plt.figure()
        plt.title('Correlation between vox1 and vox2')
        plt.xlabel('Correlation (x)')
        plt.ylabel('P(x)')
        plt.yticks([])
        plt.hist(corr_boot_indep, bins=60, density=True, edgecolor='black')
        plt.show(block=False)

        z, p = find_z_and_p(corr_boot_indep, np.corrcoef(vox1, vox2)[0, 1])
        print("z-score: ", z)
        print("one sided p value: ", p)

        print("Part D")

        with h5py.File('hw3_data3.mat', 'r') as file:
            building, face = np.array(list(
                file['building'])).flatten(), np.array(list(
                    file['face'])).flatten()

        mean_diff_d = np.zeros(10000)
        diff_options = np.zeros(4)
        choices = np.zeros(20)

        for i in range(10000):
            for j in range(20):
                ind = np.random.choice(20)
                diff_options[0:1] = 0
                diff_options[2] = building[ind] - face[ind]
                diff_options[3] = -1 * diff_options[2]
                choices[j] = diff_options[np.random.choice(4)]
            mean_diff_d[i] = np.mean(choices)

        plt.figure()
        plt.title(
            'Difference of Means\nBuilding - Face\n(Subject Population = Same)'
        )
        plt.xlabel('Difference of Means (x)')
        plt.ylabel('P(x)')
        plt.yticks([])
        plt.hist(mean_diff_d, bins=60, density=True, edgecolor='black')
        plt.show(block=False)

        z, p = find_z_and_p(mean_diff_d, np.mean(building) - np.mean(face))
        print("z-score: ", z)
        print("Two sided p value: ", 2 * p)

        print("Part E")

        mean_diff_e = mean_difference(building, face, 10000)

        plt.figure()
        plt.title(
            'Difference of Means\nBuilding - Face\n(Subject Population = Different)'
        )
        plt.xlabel('Difference of Means (x)')
        plt.ylabel('P(x)')
        plt.yticks([])
        plt.hist(mean_diff_e, bins=60, density=True, edgecolor='black')
        plt.show(block=False)

        z_e, p_e = find_z_and_p(mean_diff_e, np.mean(building) - np.mean(face))
        print("z-score: ", z_e)
        print("Two sided p value: ", 2 * p_e)
Пример #50
0
def data_collection(loopnumber, start_time, ser, refresh_time, looptime,
                    exptime, collection_countdown):
    timeold = 0
    currsum = []
    global xvals
    global yvals
    global stderrlist
    try:
        for line in ser:
            time_now = time.time()
            data = ser.read(10)
            if data.startswith(b'B'):
                # note: Dstat returns data in binary as hexadecimal,
                # form \xhh (where hh = 2-value hex)
                # parse data read from dstat
                x = data.replace(b'B\n', b'')
                new = struct.unpack('<HHl', x)
                sec, millisec, curr = new
                float(sec)
                float(millisec)
                float(curr)
                exptime = (sec + (millisec / 1000.))
                current = (curr) * (adc_gain / 2) * (1.5 / gain / 8388607)
                currsum.append(current)
            if time_now >= collection_countdown and data.startswith(b'B'):
                # Track time and update data matrix when experimental time exceeds specified timeloop
                exptime_int = int(exptime)

                if exptime_int is not 0:

                    print(
                        "\n%d seconds reached in data collection loop.",
                        " recording averages from data point %d" %
                        (exptime, loopnumber))
                    if exptime < (avg_time / 2):
                        time_average_exp = time_now
                    else:
                        time_average_exp = (time_now - (avg_time / 2))

                    mean_current = np.mean(currsum)
                    sd_current = np.std(currsum)
                    tpointsa = len(currsum)
                    print("time:", time_average_exp)
                    print("current:", mean_current)
                    print("standard_deviation:", sd_current)
                    print("number of points:", tpointsa)
                    z = [(time_average_exp), (mean_current), (sd_current),
                         (tpointsa)]

                    # write data to file
                    print("writing data to file\n")
                    f = open('test.dat', 'a')
                    f.write('\n')
                    f.write(str(z))
                    f.close

                    # update plot

                    if loopnumber == 1:

                        plot_time = (time_average_exp - start_time)
                        xvals = [(plot_time)]
                        yvals = [(mean_current)]
                        stderrlist = [(sd_current)]

                    else:
                        plot_time = (time_average_exp - start_time)
                        xvals.append(plot_time)
                        yvals.append(mean_current)
                        stderrlist.append(sd_current)

                        xmin = min(xvals)
                        xmax = max(xvals)
                        ymin = min(yvals)
                        ymax = max(yvals)
                        axes = plt.gca()
                        axes.set_xlim([xmin, xmax])
                        axes.set_ylim([ymin, ymax])
                        plt.errorbar(xvals,
                                     yvals,
                                     yerr=stderrlist,
                                     color='g',
                                     fmt='--o')
                        plt.pause(0.1)

                    currsum = []
                    collection_countdown = time_now + avg_time
                    loopnumber += 1
            if time_now >= looptime:

                # potentiostat refresh when "refresh frequncy" exceeded by timeloop

                print("\npotentiostat refresh, breaking data collection loop")
                time.sleep(1)
                if loopnumber == "":
                    loopnumber = 1
                return (loopnumber, ser)
            difftimes = time_now - timeold
            if difftimes >= countdownint:
                timeleft = collection_countdown - time_now

                if timeleft > 0:
                    print("recording time-averaged data in %d seconds" %
                          timeleft)
                    timeold = time_now

    except:
        print(
            "\nproblem with data collection loop. Will attempt to reset potentiostat\n"
        )
        if ser.is_open is True:
            ser.close()

        if loopnumber is "":
            loopnumber = 1
        if ser == "":
            ser = serial.Serial('/dev/ttyACM0', timeout=3)

        print("/nloopnumber, ser:/n", loopnumber, ser)
        time.sleep(5)
        return (loopnumber, ser)
Пример #51
0
mean = np.mean(xPlotTime, axis=0)
# print(var)
# print(std)
# print(mean)

CI = 0.999
a = stats.norm.interval(alpha=CI, loc=0,
                        scale=1)  # CI: Certification Intervals
err = a[1] * std
print(err)
### 0.95の場合の, z * std: 9.25111678933, mean: 2132.29302199[sec]
print("99.9%の信頼区間(0.99の確率での時間について): " + str(err[len(err) - 1]))
plt.errorbar(mean,
             yPlotProb,
             xerr=err,
             ecolor='blue',
             capsize=5,
             elinewidth=1,
             markeredgewidth=1)
plt.plot(mean,
         yPlotProb,
         marker="o",
         markersize=4,
         markeredgecolor="blue",
         markerfacecolor="white",
         color='blue',
         label="Lower Bound\n(Monte Carlo)")

# plt.hist(y, bins=100, normed=True, cumulative=True, histtype='step', label='Lower Bound \n (Montecarlo)') #

Пример #52
0
 def PlotBump(self,data,bkg,is_hist=False,filename=None):
     '''
     Plot the data and bakground histograms with the bump found by BumpHunter highlighted.
     
     Arguments :
         data : Numpy array containing the data.
         
         bkg : Numpy array containing the background.
         
         is_hist : Boolean specifying if data and bkg are in histogram form or not. Default to False.
         
         filename : Name of the file in which the plot will be saved. If None, the plot will be just shown
                    but not saved. Default to None.
     '''
     
     # Get the data in histogram form
     if(is_hist is False):
         H = np.histogram(data,bins=self.bins,range=self.rang)
     else:
         H = [data,self.bins]
     
     # Get bump min and max
     Bmin = H[1][self.min_loc_ar[0]]
     Bmax = H[1][self.min_loc_ar[0]+self.min_width_ar[0]]
     
     # Get the background in histogram form
     if(is_hist is False):
         Hbkg = np.histogram(bkg,bins=self.bins,range=self.rang,weights=self.weights)[0]
     else:
         if(self.weights is None):
             Hbkg = bkg
         else:
             Hbkg = bkg * self.weights
     
     # Calculate significance for each bin
     sig = np.ones(Hbkg.size)
     sig[(H[0]>Hbkg) & (Hbkg>0)] = G(H[0][(H[0]>Hbkg) & (Hbkg>0)],Hbkg[(H[0]>Hbkg) & (Hbkg>0)])
     sig[H[0]<Hbkg] = 1-G(H[0][H[0]<Hbkg]+1,Hbkg[H[0]<Hbkg])
     sig = norm.ppf(1-sig)
     sig[sig<0.0] = 0.0 # If negative, set it to 0
     np.nan_to_num(sig, posinf=0, neginf=0, nan=0, copy=False) # Avoid errors
     sig[H[0]<Hbkg] = -sig[H[0]<Hbkg]  # Now we can make it signed
     
     # Plot the test histograms with the bump found by BumpHunter plus a little significance plot
     F = plt.figure(figsize=(12,10))
     gs = grd.GridSpec(2, 1, height_ratios=[4, 1])
     
     pl1 = plt.subplot(gs[0])
     plt.title('Distributions with bump')
     
     if(is_hist is False):
         plt.hist(bkg,bins=self.bins,histtype='step',range=self.rang,weights=self.weights,label='background',linewidth=2,color='red')
         plt.errorbar(0.5*(H[1][1:]+H[1][:-1]),H[0],
                      xerr=(H[1][1]-H[1][0])/2,yerr=np.sqrt(H[0]),
                      ls='',color='blue',label='data')
     else:
         plt.hist(self.bins[:-1],bins=self.bins,histtype='step',range=self.rang,weights=Hbkg,label='background',linewidth=2,color='red')
         plt.errorbar(0.5*(H[1][1:]+H[1][:-1]),H[0],
                      xerr=(H[1][1]-H[1][0])/2,yerr=np.sqrt(H[0]),
                      ls='',color='blue',label='data')
     
     plt.plot(np.full(2,Bmin),np.array([0,H[0][self.min_loc_ar[0]]]),'r--',label=('BUMP'))
     plt.plot(np.full(2,Bmax),np.array([0,H[0][self.min_loc_ar[0]+self.min_width_ar[0]-1]]),'r--')
     plt.legend(fontsize='large')
     plt.yscale('log')
     if self.rang!=None:
         plt.xlim(self.rang)
     plt.tight_layout()
     
     plt.subplot(gs[1],sharex=pl1)
     plt.hist(H[1][:-1],bins=H[1],range=self.rang,weights=sig)
     plt.plot(np.full(2,Bmin),np.array([sig.min(),sig.max()]),'r--',linewidth=2)
     plt.plot(np.full(2,Bmax),np.array([sig.min(),sig.max()]),'r--',linewidth=2)
     plt.yticks(np.arange(np.round(sig.min()),np.round(sig.max())+1,step=1))
     plt.ylabel('significance',size='large')
     
     # Check if the plot should be saved or just displayed
     if(filename is None):
         plt.show()
     else:
         plt.savefig(filename,bbox_inches='tight')
         plt.close(F)
     
     return
Пример #53
0
def normlc(event,
           fit,
           fignum,
           savefile=None,
           istitle=True,
           j=0,
           interclip=None):
    plt.rcParams.update({'legend.fontsize': 13})
    plt.figure(fignum, figsize=(8, 6))
    plt.clf()
    # Normalized subplot
    a = plt.axes([0.15, 0.35, 0.8, 0.55])
    a.yaxis.set_major_formatter(
        plt.matplotlib.ticker.FormatStrFormatter('%0.4f'))
    if istitle:
        plt.suptitle(event.eventname + ' Normalized Binned Data With Best Fit',
                     size=16)
        plt.title(fit.model, size=10)
    if fit.preclip > 0:
        plt.errorbar(fit.precbinphase,
                     fit.precbinflux,
                     fit.precbinstd,
                     fmt='o',
                     color='0.7',
                     ms=4,
                     lw=1)
    if fit.postclip < fit.nobjuc:
        plt.errorbar(fit.postbinphase,
                     fit.postbinflux,
                     fit.postbinstd,
                     fmt='o',
                     color='0.7',
                     ms=4,
                     lw=1)

    plt.errorbar(fit.abscissa,
                 fit.normbinflux,
                 fit.normbinstd,
                 fmt='ko',
                 ms=4,
                 lw=1,
                 label='Binned Data')
    plt.plot(fit.timeunit,
             fit.normbestfit,
             pltfmt[j % len(pltfmt)],
             label='Best Fit',
             lw=2)

    ymin, ymax = plt.ylim()
    if interclip is not None:
        for i in range(len(interclip)):
            plt.plot(fit.timeunituc[interclip[i][0]:interclip[i][1]],
                     fit.normbestfituc[interclip[i][0]:interclip[i][1]],
                     'w-',
                     lw=2)
    plt.setp(a.get_xticklabels(), visible=False)
    plt.yticks(size=13)
    plt.ylabel('Normalized Flux', size=14)
    plt.ylim(ymin, ymax)
    plt.legend(loc='best')
    xmin, xmax = plt.xlim()

    # Residuals subplot
    plt.axes([0.15, 0.1, 0.8, 0.2])
    flatline = np.zeros(len(fit.abscissa))
    plt.plot(fit.abscissa, fit.normbinresiduals, 'ko', ms=4)
    plt.plot(fit.abscissa, flatline, 'k:', lw=1.5)
    plt.xlim(xmin, xmax)
    plt.xticks(size=13)
    plt.yticks(size=13)
    plt.xlabel(fit.xlabel, size=14)
    plt.ylabel('Residuals', size=14)
    if savefile is not None:
        plt.savefig(savefile)
    return
Пример #54
0
def makep6plots(lines, plotdir):
    names = lines.keys()

    sigiter = (4, 4)

    sdnr = np.zeros(len(names))
    bic = np.zeros(len(names))
    depth = np.zeros(len(names))
    derr = np.zeros(len(names))
    bsig = np.zeros(len(names))
    method = []

    methoddtype = [('cent', 'S10'), ('prefix', 'S10'), ('scale', float),
                   ('offset', float), ('model', 'S10')]

    for i, name in enumerate(lines):
        sdnr[i] = lines[name][0][4]
        bic[i] = lines[name][0][5]
        depth[i] = lines[name][0][6]
        derr[i] = lines[name][0][7]
        bsig[i] = lines[name][0][8]
        method.append((lines[name][0][0], lines[name][0][1], lines[name][0][2],
                       lines[name][0][3], lines[name][0][9]))

    method = np.array(method, dtype=methoddtype)

    methodsortind = np.argsort(
        method, order=['cent', 'prefix', 'scale', 'offset', 'model'])

    sdnrsort = sdnr[methodsortind]
    bicsort = bic[methodsortind]
    depthsort = depth[methodsortind]
    derrsort = derr[methodsortind]
    bsigsort = bsig[methodsortind]

    bestind = np.where(bsigsort == np.min(bsigsort))
    sdnrind = np.where(sdnrsort == np.min(sdnrsort))

    namesort = np.array(list(names))[methodsortind]

    print("Lowest binned-sigma chisq: {}".format(namesort[bestind]))
    print("Lowest SDNR:               {}".format(namesort[sdnrind]))

    textx = 0.85
    texty = 0.95

    fig = plt.figure(figsize=(20, 8))
    msk = sr.sigrej(depthsort, sigiter)
    nrej = depthsort.size - np.sum(msk)
    plt.errorbar(np.arange(len(names))[msk],
                 depthsort[msk],
                 derrsort[msk],
                 fmt='ko')
    plt.xticks(np.arange(len(names)),
               namesort,
               rotation='vertical',
               fontsize=6)
    plt.errorbar(np.arange(len(names))[bestind],
                 depthsort[bestind],
                 derrsort[bestind],
                 fmt='ro')
    plt.ylabel("Depth (%)")
    plt.text(textx,
             texty,
             "Note: {} outliers hidden".format(nrej),
             transform=plt.gca().transAxes)
    plt.savefig(plotdir + "/depth.pdf", bbox_inches='tight')
    plt.savefig(plotdir + "/depth.png", bbox_inches="tight")
    plt.savefig(plotdir + "/depth.ps", transparent=True)
    plt.close()

    fig = plt.figure(figsize=(20, 8))
    msk = sr.sigrej(sdnrsort, sigiter)
    nrej = depthsort.size - np.sum(msk)
    plt.scatter(np.arange(len(names))[msk], sdnrsort[msk], c='black')
    plt.xticks(np.arange(len(names)),
               namesort,
               rotation='vertical',
               fontsize=6)
    plt.scatter(np.arange(len(names))[bestind], sdnrsort[bestind], c='red')
    plt.ylabel("SDNR")
    plt.text(textx,
             texty,
             "Note: {} outliers hidden".format(nrej),
             transform=plt.gca().transAxes)
    plt.savefig(plotdir + "/sdnr.pdf", bbox_inches='tight')
    plt.savefig(plotdir + "/sdnr.png", bbox_inches='tight')
    plt.savefig(plotdir + "/sdnr.ps", transparent=True)
    plt.close()

    fig = plt.figure(figsize=(20, 8))
    msk = sr.sigrej(bicsort, sigiter)
    nrej = depthsort.size - np.sum(msk)
    plt.scatter(np.arange(len(names))[msk], bicsort[msk], c='black')
    plt.xticks(np.arange(len(names)),
               namesort,
               rotation='vertical',
               fontsize=6)
    plt.scatter(np.arange(len(names))[bestind], bicsort[bestind], c='red')
    plt.ylabel("BIC")
    plt.text(textx,
             texty,
             "Note: {} outliers hidden".format(nrej),
             transform=plt.gca().transAxes)
    plt.savefig(plotdir + "/BIC.pdf", bbox_inches='tight')
    plt.savefig(plotdir + "/BIC.png", bbox_inches='tight')
    plt.savefig(plotdir + "/BIC.ps", transparent=True)
    plt.close()

    fig = plt.figure(figsize=(20, 8))
    msk = sr.sigrej(bsigsort, sigiter)
    nrej = depthsort.size - np.sum(msk)
    plt.scatter(np.arange(len(names))[msk], bsigsort[msk], c='black')
    plt.xticks(np.arange(len(names)),
               namesort,
               rotation='vertical',
               fontsize=6)
    plt.scatter(np.arange(len(names))[bestind], bsigsort[bestind], c='red')
    plt.ylabel(r"$\chi^2_{bin}$")
    plt.text(textx,
             texty,
             "Note: {} outliers hidden".format(nrej),
             transform=plt.gca().transAxes)
    plt.savefig(plotdir + "/bsig.pdf", bbox_inches='tight')
    plt.savefig(plotdir + "/bsig.png", bbox_inches='tight')
    plt.savefig(plotdir + "/bsig.ps", transparent=True)
    plt.close()
def test_marker_paths_pdf():
    N = 7

    plt.errorbar(np.arange(N), np.ones(N) + 4, np.ones(N))
    plt.xlim(-1, N)
    plt.ylim(-1, 7)
Пример #56
0
def chainsnlc(normfit,
              normbinflux,
              normbinsd,
              fit,
              fignum,
              savefile=None,
              istitle=True,
              j=0,
              interclip=None):
    """
  Plot chains end normalized lightcurves:
  """
    plt.rcParams.update({'legend.fontsize': 13})
    plt.figure(fignum, figsize=(8, 10))
    plt.clf()

    # Normalized subplot
    numplots = len(normfit)
    ncolumns = 3
    nrows = int(np.ceil(1.0 * numplots / ncolumns))
    for c in np.arange(numplots):
        a = plt.subplot(nrows, ncolumns, c + 1)
        a.yaxis.set_major_formatter(
            plt.matplotlib.ticker.FormatStrFormatter('%0.4f'))
        if istitle:
            plt.title('Chain %2d' % (c + 1), size=10)

        plt.errorbar(fit.abscissauc,
                     normbinflux[c],
                     normbinsd[c],
                     fmt='ko',
                     ms=4,
                     lw=1)
        plt.plot(fit.timeunit, normfit[c], pltfmt[j % len(pltfmt)], lw=2)
        if interclip is not None:
            for i in np.arange(len(interclip)):
                plt.plot(fit.tuall[interclip[i][0]:interclip[i][1]],
                         np.ones(interclip[i][1] - interclip[i][0]),
                         '-w',
                         lw=3)

                plt.plot(fit.timeunituc[interclip[i][0]:interclip[i][1]],
                         fit.normbestfituc[interclip[i][0]:interclip[i][1]],
                         'w-',
                         lw=2)

        # plt.setp(a.get_xticklabels(), visible = False)
        plt.yticks(size=13)
        if (c + 1) % ncolumns == 1:
            plt.ylabel('Normalized Flux', size=10)
        else:
            a.set_yticklabels([''])
        if numplots - (c + 1) < ncolumns:
            plt.xlabel(fit.xlabel, size=10)
        else:
            a.set_xticklabels([''])

    plt.subplots_adjust(hspace=0.2, wspace=0.1, right=0.99)
    plt.suptitle('Last Burn-in Iteration Models', size=16)
    if savefile is not None:
        plt.savefig(savefile)

    return
Пример #57
0
plt.ylabel('observed species-prey')
plt.title('KF2')
plt.xlim((0, n))

plt.figure(2)
#plt.plot(xronos,real_obs2,'ro')
plt.plot(t_all, y_all[:, 1] - np.sqrt(y_all[:, 4]), 'g')
plt.plot(t_all, y_all[:, 1] + np.sqrt(y_all[:, 4]), 'g')
plt.plot(t_all, y_all[:, 1], 'magenta')
plt.xlim((0, n))
plt.xlabel('time')
plt.ylabel('unobserved species-predator')
plt.title('KF2')

plt.figure(5)
plt.errorbar(xronos, mean1, yerr=2 * np.sqrt(variance1), fmt=None, color='b')
plt.plot(xronos, real_obs1, 'ro')

plt.title('KF2 aggregate - observed')
plt.xlim((0, n + step))

plt.figure(6)
plt.errorbar(xronos, mean2, yerr=2 * np.sqrt(variance2), fmt=None, color='b')
plt.plot(xronos, real_obs2, 'ro')

plt.title('KF2 aggregate - unobserved')
plt.xlim((0, n + step))
plt.show()

plt.figure(101)
plt.plot(time1, species[:, 0], 'k')
Пример #58
0
def pol_cl_parse(pol_cl_out_file,
                 pol_cov_out_file,
                 wl_array=None,
                 cn=0,
                 rebin=None,
                 nbin=25,
                 lmin=1,
                 lmax=1500,
                 bin_type='lin',
                 custom_bins=None,
                 show=False):
    """
    Parser of the txt output file of PolSpice, which contains the angular power spectrum (APS).

    Parameters
	----------
    pol_cl_out_file : str
        ascii output file created by PolSpice
    pol_cov_out_file : str
        .fits file containing the covariance matrix created by PolSpice
    raw_corr : (float, numpy array)
        must be a python list with 2 entries: the first one must be the
        white poissonian noise, the second must be the array (or the spline)
        of the  Wbeam function as a funcion of l integrated in a energy bin.
    rebin : list (or array)
        if not None, it has to be the list defining the edges of the new binning.

    Returns
    -------
    array, array, array
        in order: array of the ells, array of the Cls, array of the Cls errors
        (estimated from the covariance matrix). If rebin is not None the arrays are
        the rebinned array.
    """
    if pol_cov_out_file:
        hdu = pf.open(pol_cov_out_file)
        _cov = hdu[0].data[0]
        _invcov = np.linalg.inv(_cov)
    f = open(pol_cl_out_file, 'r')

    _l, _cl = [], []
    for line in f:
        try:
            l, cl = [float(item) for item in line.split()]
            _l.append(l)
            _cl.append(cl)
        except:
            pass
    _l = np.array(_l)
    _cl = np.array(_cl)
    if len(_cl) < lmax:
        logger.info('ATT: Setting new l_max=%i ...' % len(_cl))
        lmax = len(_cl)

    if wl_array is not None:
        wl = wl_array[:lmax]
        _l = _l[:lmax]
        _cl = (_cl[:lmax] - cn) / (wl**2)

        if pol_cov_out_file:
            _cov_ = np.array([_cov[i][:len(wl)] for i in range(0, len(wl))])
            _cov_ = _cov_ / (wl**2)
            for l in range(len(wl)):
                _cov_[l] = _cov_[l] / (wl**2)
        else:
            _clerr = np.sqrt(2 / (2 * _l + 1)) * (_cl + cn / wl**2)
            print(len(_clerr))
    else:
        _clerr = np.sqrt(2 / (2 * _l + 1)) * (_cl)
        pass

    if rebin:
        logger.info('Rebinning in multipole range')
        _lr, _clr, _clerrr = [], [], []
        if custom_bins is not None:
            logger.info('Custom multipole binning.')
            rebinning = new_binning(lmin,
                                    lmax,
                                    nbin,
                                    bin_type=bin_type,
                                    custom_bins=custom_bins)
        else:
            rebinning = new_binning(lmin, lmax, nbin, bin_type=bin_type)
        for bmin, bmax in zip(rebinning[:-1], rebinning[1:]):
            logger.info('considering %i < li < %i' % (bmin, bmax))
            _index = np.where(np.logical_and(_l >= bmin, _l < bmax))[0]
            _index = _index.astype(int)
            if bin_type == 'log':
                _lmean = np.sqrt(bmin * bmax)
            if bin_type == 'lin':
                _lmean = (bmin + bmax) / 2
            _lr.append(_lmean)
            _clmean = np.mean(_cl[_index])
            _clr.append(_clmean)
            logger.info('cl_mean %.3e' % _clmean)

            if pol_cov_out_file:
                _clerr = np.sqrt(np.mean(_cov[bmin:bmax, bmin:bmax]))
                logger.info('cl_mean err %.3e' % _clerr)
                _clerrr.append(_clerr)
            else:
                _cler = np.mean(_clerr[_index])
                logger.info('cl_mean err %.3e' % _cler)
                _clerrr.append(_cler)

        _l = np.array(_lr)
        _cl = np.array(_clr)
        _clerr = np.array(_clerrr)
    else:
        if pol_cov_out_file:
            _clerr = np.array([np.sqrt(_cov[i][i]) for i in _l.astype(int)])
        else:
            pass

    if show:
        plt.figure()
        plt.errorbar(_l, _cl, yerr=_clerr, fmt='.')
        plt.plot([_l[0], _l[-1]], [0, 0], c='silver', linewidth=1)
        plt.title('CAPS', size=20)
        plt.xlabel('$\ell$', size=18)
        plt.ylabel('C$_{\ell}$', size=18)
        plt.xscale('log')
        plt.show()

    return np.array(_l), np.array(_cl), np.array(_clerr)
#def g(t,B,C):
# return 1/np.sqrt((1-B*t**2)**2+t**2*C)
def g(nu, a_0, a_1):
    return (1 / np.sqrt((1 - a_0 * nu**2)**2 + (nu**2) * a_1))


nu = np.linspace(500, 900000, 9000)
parameters1, pocv = curve_fit(g, f, uc, maxfev=10000)
plt.plot(nu, g(nu, *parameters1), 'r-')
print(parameters1)
fehler = np.sqrt(np.diag(pocv))
print(fehler)
errX = 0.1
errY = 0.001
plt.errorbar(f, uc, xerr=errX, yerr=errY, fmt='none')
plt.plot(f, uc, 'b.', label="Kondensatorspannung")

plt.xlabel(r'$Frequenz / Hz$')
plt.ylabel(r'$Uc/U$')
plt.xscale('log')
plt.tight_layout()
plt.legend(loc='best')
plt.savefig('Kondensatorspannungen.pdf')

L = ufloat(10.11e-3, 0.03e-3)
C = ufloat(2.098e-9, 0.006e-9)
R = ufloat(509.5, 0.5)
Re = R + 50

rdiffomega = Re / L
Пример #60
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--environment", type=str, default='Pendulum-v0')
    parser.add_argument("--update_hyperstate", type=int, default=0)
    parser.add_argument("--train_hp_iterations", type=int, default=2000)
    parser.add_argument("--basis_dim", type=int, default=256)
    parser.add_argument("--basis_dim_reward", type=int, default=256)
    args = parser.parse_args()

    print args

    no_data_points = 1000
    no_samples = 50
    env = gym.make(args.environment)
    #env.render(mode='human')

    states = []
    actions = []
    rewards = []
    next_states = []
    data_points = 0
    while no_data_points > data_points:
        state = env.reset()

        while True:
            action = np.random.uniform(low=env.action_space.low, high=env.action_space.high)
            next_state, reward, done, _ = env.step(action)
            states.append(state)
            actions.append(action)
            rewards.append(reward)
            next_states.append(next_state)
            data_points += 1

            state = next_state.copy()

            if done:
                break

    X_train = np.concatenate([np.stack(states, axis=0), np.stack(actions, axis=0)], axis=-1)
    y_train = np.stack(next_states, axis=0)
    r_train = np.array(rewards)[..., None]

    rws = [RegressionWrapper2(env.observation_space.shape[0]+env.action_space.shape[0], basis_dim=args.basis_dim, train_hp_iterations=args.train_hp_iterations) for _ in range(env.observation_space.shape[0])]

    rwr = RegressionWrapper2(env.observation_space.shape[0]+env.action_space.shape[0], basis_dim=args.basis_dim_reward, train_hp_iterations=args.train_hp_iterations)

    for i in range(len(rws)):
        rws[i]._train_hyperparameters(X_train, y_train[..., i:i+1])
    rwr._train_hyperparameters(X_train, r_train)

    while True:
        state = env.reset()
        states = []
        actions = []
        rewards = []
        next_states = []
        while True:
            action = np.random.uniform(low=env.action_space.low, high=env.action_space.high)
            next_state, reward, done, _ = env.step(action)
            states.append(state)
            actions.append(action)
            rewards.append(reward)
            next_states.append(next_state)

            state = next_state.copy()

            if done:
                break

        X_test = np.concatenate([np.stack(states, axis=0), np.stack(actions, axis=0)], axis=-1)
        y_test= np.stack(next_states, axis=0)
        r_test = np.array(rewards)[..., None]

        for i in range(len(rws)):
            rws[i]._reset_statistics(X_train, y_train[..., i:i+1], bool(args.update_hyperstate))
        rwr._reset_statistics(X_train, r_train, bool(args.update_hyperstate))

        mu0, sigma0 = [np.concatenate(ele, axis=-1) for ele in zip(*[rws[i]._predict(X_test, False) for i in range(env.observation_space.shape[0])])]

        for i in range(env.observation_space.shape[0]):
            plt.figure()
            plt.plot(np.arange(len(y_test[:, i])), y_test[:, i], 'b-')
            plt.errorbar(np.arange(len(mu0[:, i])), mu0[:, i], yerr=np.sqrt(sigma0[:, i]), color='m', ecolor='g')
            plt.grid()

        mu1, sigma1 = rwr._predict(X_test, False)
        plt.figure()
        plt.plot(np.arange(len(r_test)), r_test, 'b-')
        plt.errorbar(np.arange(len(mu1)), mu1, yerr=np.sqrt(sigma1), color='m', ecolor='g')
        plt.grid()
        plt.title('rewards')

        #Updating of hyperstate by rank one Cholesky updates
        sample_states = []
        sample_rewards = []
        sample_state = np.tile(states[0][None, ...], [no_samples, 1])
        for i in range(len(actions)):
            sample_state_action = np.concatenate([sample_state, np.tile(actions[i][None, ...], [no_samples, 1])], axis=-1)
            mu, sigma = [np.concatenate(ele, axis=-1) for ele in zip(*[rws[i]._predict(sample_state_action, bool(args.update_hyperstate)) for i in range(env.observation_space.shape[0])])]
            sample_state = mu + np.sqrt(sigma) * np.random.standard_normal(size=sigma.shape)
            sample_states.append(sample_state)

            for j in range(env.observation_space.shape[0]):
                rws[j]._update_hyperstate(sample_state_action, sample_state[..., j:j+1], bool(args.update_hyperstate))

            mu, sigma = rwr._predict(sample_state_action, bool(args.update_hyperstate))
            sample_reward = mu + np.sqrt(sigma) * np.random.standard_normal(size=sigma.shape)
            sample_rewards.append(sample_reward)

            rwr._update_hyperstate(sample_state_action, sample_reward, bool(args.update_hyperstate))
        sample_states = np.stack(sample_states, axis=1)
        sample_rewards = np.stack(sample_rewards, axis=1)

        for i in range(env.observation_space.shape[0]):
            plt.figure()
            for sample in sample_states[..., i]:
                plt.plot(np.arange(len(sample)), sample, 'r-')
            plt.plot(np.arange(len(y_test[:, i])), y_test[:, i], 'b-')
            plt.grid()

        plt.figure()
        for sample in sample_rewards.squeeze():
            plt.plot(np.arange(len(sample)), sample, 'r-')
        plt.plot(np.arange(len(rewards)), rewards, 'b-')
        plt.grid()
        plt.title('rewards')

        for i in range(len(rws)):
            rws[i]._reset_statistics(X_train, y_train[..., i:i+1], bool(args.update_hyperstate))
        rwr._reset_statistics(X_train, r_train, bool(args.update_hyperstate))

        #Updating of hyperstate by Cholesky decomposition
        sample_states = []
        sample_rewards = []
        sample_state = np.tile(states[0][None, ...], [no_samples, 1])
        for i in range(len(actions)):
            sample_state_action = np.concatenate([sample_state, np.tile(actions[i][None, ...], [no_samples, 1])], axis=-1)
            mu, sigma = [np.concatenate(ele, axis=-1) for ele in zip(*[rws[i]._predict(sample_state_action, bool(args.update_hyperstate)) for i in range(env.observation_space.shape[0])])]
            sample_state = mu + np.sqrt(sigma) * np.random.standard_normal(size=sigma.shape)
            sample_state = np.clip(sample_state, env.observation_space.low, env.observation_space.high)
            sample_states.append(sample_state)

            for j in range(env.observation_space.shape[0]):
                rws[j]._update_hyperstate2(sample_state_action, sample_state[..., j:j+1], bool(args.update_hyperstate))

            mu, sigma = rwr._predict(sample_state_action, bool(args.update_hyperstate))
            sample_reward = mu + np.sqrt(sigma) * np.random.standard_normal(size=sigma.shape)
            sample_rewards.append(sample_reward)

            rwr._update_hyperstate2(sample_state_action, sample_reward, bool(args.update_hyperstate))
        sample_states = np.stack(sample_states, axis=1)
        sample_rewards = np.stack(sample_rewards, axis=1)

        for i in range(env.observation_space.shape[0]):
            plt.figure()
            for sample in sample_states[..., i]:
                plt.plot(np.arange(len(sample)), sample, 'y-')
            plt.plot(np.arange(len(y_test[:, i])), y_test[:, i], 'b-')
            plt.grid()

        plt.figure()
        for sample in sample_rewards.squeeze():
            plt.plot(np.arange(len(sample)), sample, 'y-')
        plt.plot(np.arange(len(rewards)), rewards, 'b-')
        plt.grid()
        plt.title('rewards')


        plt.show()