Beispiel #1
0
    def plot(self):
        f = pylab.figure(figsize=(8,4))
        co = [] #colors container
        for label, (pVal, logratio) in self.data.get(["pValue", "log2Ratio"]).iterrows():
            if pVal < self.pCut:
                if logratio > 0:
                    co.append(Colors().redColor)
                elif logratio < 0:
                    co.append(Colors().greenColor)
                else:
                    raise Exception
            else:
                co.append(Colors().blueColor)

        #print "Probability this is from a normal distribution: %.3e" %stats.normaltest(self.log2Ratio)[1]
        #ax = f.add_subplot(121)
        #pylab.axvline(self.meanLog2Ratio, color=Colors().redColor)
        #pylab.axvspan(self.meanLog2Ratio-(2*self.stdLog2Ratio), 
        #              self.meanLog2Ratio+(2*self.stdLog2Ratio), color=Colors().blueColor, alpha=0.2)
        #his = pylab.hist(self.log2Ratio, bins=50, color=Colors().blueColor)
        #pylab.xlabel("log2 Ratio %s/%s" %(self.sampleNames[1], self.sampleNames[0]))
        #pylab.ylabel("Frequency")    
        
        ax = f.add_subplot(111, aspect='equal')
        pylab.scatter(self.genes1, self.genes2, c=co, alpha=0.5)        
        pylab.ylabel("%s RPKM" %self.sampleNames[1])
        pylab.xlabel("%s RPKM" %self.sampleNames[0])
        pylab.yscale('log')
        pylab.xscale('log')
        pylab.tight_layout()
Beispiel #2
0
def TestOverAlpha():
    nDim = 5
    numOfParticles = 10
    maxIteration = 2000
    minX = array([-100.0]*nDim)
    maxX = array([100.0]*nDim)
    maxV = 1.0*(maxX - minX)
    minV = -1.0*maxV
    numOfTrial = 10
    intDim = 4
    alpha = 0.3
    while alpha<1.0:
        gBest = array([0.0]*maxIteration)
        for i in xrange(numOfTrial):
            p1 = AUPSO.PSOProblem(nDim, numOfParticles, maxIteration, minX, maxX, minV, maxV, AUPSO.Sphere,intDim,alpha)
            p1.run()
            gBest = gBest + p1.gBestArray[:maxIteration]
        gBest = gBest / numOfTrial
        pylab.plot(range(maxIteration), gBest,label='alpha='+str(alpha))
        print 'alpha = ', alpha
        alpha += 0.3
    print 'now drawing'
    pylab.title('$G_{best}$ over 20 trials'+' intDim='+str(intDim))
    pylab.xlabel('The $N^{th}$ Iteratioin')
    pylab.ylabel('Average gBest over '+str(numOfTrial)+' runs')
    pylab.grid(True)
    pylab.yscale('log')
    ylim = [-6, 1]
    ystep = 1.0
#    pylab.ylim(ylim[0], ylim[1])
#    yticks = linspace(ylim[0], ylim[1], int((ylim[1]-ylim[0])/ystep+1))
#    pylab.yticks(tuple(yticks), tuple(map(str,yticks)))
    pylab.legend(loc='lower left')
    pylab.show()
def plot_values(X, Y, xlabel, ylabel, suffix, ptype='plot'):
    output_filename = constants.ATTRACTIVENESS_FOLDER_NAME + constants.DATASET + '_' + suffix

    X1 = [X[i] for i in range(len(X)) if X[i]>0 and Y[i]>0]
    Y1 = [Y[i] for i in range(len(X)) if X[i]>0 and Y[i]>0]
    X = X1
    Y = Y1
    
    pylab.close("all")
    
    pylab.figure(figsize=(8, 7))

    #pylab.rcParams.update({'font.size': 20})

    pylab.scatter(X, Y)
    
    #pylab.axis(vis.get_bounds(X, Y, False, False))

    #pylab.xscale('log')
    pylab.yscale('log')

    pylab.xlabel(xlabel)
    pylab.ylabel(ylabel)   
    #pylab.xlim(0.1,1)
    #pylab.ylim(ymin=0.01)
    #pylab.tight_layout()

    pylab.savefig(output_filename + '.pdf')
Beispiel #4
0
def plot_fft_brams(new_pasp):

    run = True

    pylab.ion()
    pylab.cla()
    pylab.yscale("log")

    # read in initial data from the fft brams
    fftscope_power = new_pasp.get_fft_brams_power()
    # set up bars for each pasp channel
    fftscope_power_line = pylab.bar(range(0, new_pasp.numchannels), fftscope_power)

    pylab.ylim(1, 1000000)

    # plot forever
    # for i in range(1,10):
    while run:
        try:
            fftscope_power = new_pasp.get_fft_brams_power()

            # update the rectangles
            for j in range(0, new_pasp.numchannels):
                fftscope_power_line[j].set_height(fftscope_power[j])
            pylab.draw()
        except KeyboardInterrupt:
            run = False

    # after receiving an interrupt wait before closing the plot
    raw_input("Press enter to quit: ")

    pylab.cla()
Beispiel #5
0
def main(k,m,x,v,t0,tf,dt):
    xs=numpy.arange(t0,tf+dt/2,dt)
    w2=k/m
    ys1=[]
    es1=[]
    x1=x
    v1=v
    for i in xs:
        #euler
        vtemp=v1
        v1=v1-w2*x1*dt
        x1=x1+v1*dt
        e1=(k*x1**2+m*v1**2)/2
        ys1+=[x1]
        es1+=[e1]
    #pylab.plot(xs,ys1,'-',label='x(t) - Euler')
    e0=(k*x**2+m*v**2)/2
    incl=numpy.log(e1-e0)/(tf-t0)
    pylab.plot(xs,es1,'-',label='E(t); dt=%f'%(dt))
    pylab.xlabel('t')
    pylab.ylabel('E')
    pylab.yscale('log')
    pylab.legend(loc=8)
    for x,y1,e1 in zip(xs,ys1,es1):
        print x, y1, e1
Beispiel #6
0
def ShowRawToF():
    raw_codes = GetTimecodes_AllFilesInDir(timepix_path_1, xmin, xmax, ymin, ymax, 0, checkerboard_phase = None)
    offset = 0
    tmin = 0
    tmax = 11810

    fig = pl.figure(figsize=(14,10))
    n_codes, bins, patches = pl.hist([11810-i for i in raw_codes], bins = 11810, range = [0,11810])
    pl.yscale('log', nonposy='clip')


    for i,code in enumerate(raw_codes):
        raw_codes[i] = (11810. - code) - offset
        raw_codes[i] *= 20

    fig = pl.figure(figsize=(14,10))
    n_codes, bins, patches = pl.hist(raw_codes, bins = 1000, range = [0,40000])
    
#     n_codes, bins, patches = pl.hist(raw_codes, bins = tmax-tmin, range = [tmin,tmax])

#     pl.clf()
#     n_codes = n_codes[:-1] 
#     pl.plot(bins, n_codes, 'k-.o', label="Timecodes")
    
    pl.tight_layout()
    
    pl.show()
Beispiel #7
0
def plot_charts2(data1, data2=None, xlabel=None, ylabel=None, size=(10, 4), log_scale=True):
    plt.figure(figsize=size)
    plt.grid()
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.plot(data1, color='blue', lw=2)
    plt.plot(data1,
             linestyle='None',
             markerfacecolor='white',
             markeredgecolor='blue',
             marker='o',
             markeredgewidth=2,
             markersize=8)
    if data2 is not None:
        plt.plot(data2, color='red', lw=2)
        plt.plot(data2,
                 linestyle='None',
                 markerfacecolor='white',
                 markeredgecolor='red',
                 marker='o',
                 markeredgewidth=2,
                 markersize=8)
    if log_scale:
        plt.yscale('log')
    plt.xlim(-0.2, len(data1) + 0.2)
    plt.ylim(0.8)
    plt.show()
Beispiel #8
0
def esat_comparison_plot(t=_np.linspace(173.15, 373.15, 20), std="Hyland_Wexler", percent=True, log=False):
    import pylab
    import brewer2mpl

    methods = list(esat(0, method="return"))
    methods.remove(std)
    print len(methods)
    pylab.rcParams.update({"axes.color_cycle": brewer2mpl.get_map("Paired", "qualitative", 12).mpl_colors})
    y = esat(t, method=std, info=False)
    i = 0
    style = "-"
    for im in methods:
        if i > 11:
            style = "--"
        print im
        if percent:
            pylab.plot(t, 100.0 * (esat(t, method=im, info=False) - y) / y, lw=2, ls=style)
        else:
            pylab.plot(t, esat(t, method=im, info=False) - y, lw=2, ls=style)
        i += 1
    pylab.legend(methods, loc="upper right", fontsize=8)
    pylab.xlabel("Temperature [K]")
    if percent:
        # pylab.semilogy()
        pylab.ylabel("Water Vapor Pressure Difference [%]")
    else:
        pylab.ylabel("Water Vapor Pressure [Pa]")
    pylab.title("Comparison of Water Vapor Calculations Ref:" + std)
    pylab.xlim(_np.round(t[0]), _np.round(t[-1]))
    pylab.grid()
    pylab.axvline(x=273.15, color="k")
    if log:
        pylab.yscale("log")
def _show_rates(rate, wo, wt, attenuator, tau_NP, tau_P):
    import pylab

    #pylab.figure()
    pylab.errorbar(rate, wt[0], yerr=wt[1], fmt='g.', label='attenuated')
    pylab.errorbar(rate, wo[0], yerr=wo[1], fmt='b.', label='unattenuated')

    pylab.xscale('log')
    pylab.yscale('log')
    pylab.xlabel('incident rate (counts/second)')
    pylab.ylabel('observed rate (counts/second)')
    pylab.legend(loc='best')
    pylab.grid(True)
    pylab.plot(rate, rate/attenuator, 'g-', label='target')
    pylab.plot(rate, rate, 'b-', label='target')

    Ipeak, Rpeak = peak_rate(tau_NP=tau_NP, tau_P=tau_P)
    if rate[0] <= Ipeak <= rate[-1]:
        pylab.axvline(x=Ipeak, ls='--', c='b')
        pylab.text(x=Ipeak, y=0.05, s=' %g'%Ipeak,
                   ha='left', va='bottom',
                   transform=pylab.gca().get_xaxis_transform())
    if False:
        pylab.axhline(y=Rpeak, ls='--', c='b')
        pylab.text(y=Rpeak, x=0.05, s=' %g\n'%Rpeak,
                   ha='left', va='bottom',
                   transform=pylab.gca().get_yaxis_transform())
def plot_percentage(women_percentage, men_percentage, colors, xaxis_values, filename):
    """
    plots the percentage of people who show up in N language editions
    :param men:
    :param women:
    :param colors:
    :return:
    """
    fig = plt.figure(figsize=(6,6))
    if len(colors) == 0:
        plt.gca().set_color_cycle(['pink', 'blue', 'yellow', 'red', 'black'])
    else:
        plt.gca().set_color_cycle(colors)
    #print xaxis_values
    #print women_percentage
    #print len(xaxis_values)
    #print len(women_percentage)
    plt.plot(xaxis_values, women_percentage[:len(xaxis_values)], linewidth=1)
    plt.plot(xaxis_values, men_percentage[:len(xaxis_values)], linewidth=1)
    plt.xlabel("Log Num Editions")
    plt.ylabel("Log Proportion")
    plt.xscale("log")
    plt.yscale("log")
    #plt.ysca:len(xaxis_values)lotle("log")
    plt.savefig(filename)
    plt.close()
Beispiel #11
0
    def plot(self):
        f = pylab.figure(figsize=(8,4))
        co = [] #colors container
        for zScore, r in itertools.izip(self.zScores, self.log2Ratio):
            if zScore < self.pCut:
                if r > 0:
                    co.append(Colors().greenColor)
                elif r < 0:
                    co.append(Colors().redColor)
                else:
                    raise Exception
            else:
                co.append(Colors().blueColor)

        #print "Probability this is from a normal distribution: %.3e" %stats.normaltest(self.log2Ratio)[1]
        ax = f.add_subplot(121)
        pylab.axvline(self.meanLog2Ratio, color=Colors().redColor)
        pylab.axvspan(self.meanLog2Ratio-(2*self.stdLog2Ratio), 
                      self.meanLog2Ratio+(2*self.stdLog2Ratio), color=Colors().blueColor, alpha=0.2)
        his = pylab.hist(self.log2Ratio, bins=50, color=Colors().blueColor)
        pylab.xlabel("log2 Ratio %s/%s" %(self.sampleNames[1], self.sampleNames[0]))
        pylab.ylabel("Frequency")
        
        ax = f.add_subplot(122, aspect='equal')
        pylab.scatter(self.genes1, self.genes2, c=co, alpha=0.5)        
        pylab.ylabel("%s RPKM" %self.sampleNames[1])
        pylab.xlabel("%s RPKM" %self.sampleNames[0])
        pylab.yscale('log')
        pylab.xscale('log')
        pylab.tight_layout()
Beispiel #12
0
 def _set_axis_parameter( self ):
     # set axis to equal length
     params = self.__params
     ax_0 = self._get_axis()
     # set axis aspect 
     pylab.xlim(params['xlim'])
     pylab.ylim(params['ylim'])
     x0,x1 = ax_0.get_xlim()
     y0,y1 = ax_0.get_ylim()
     if params['xlog'] and params['ylog']:
         delta_x = float(np.log(x1)-np.log(x0))
         delta_y = float(np.log(y1)-np.log(y0))
     else:
         delta_x = float(x1 - x0)
         delta_y = float(y1 - y0)
     ax_0.set_aspect(delta_x/delta_y)
     # set tick size
     ax_0.tick_params(axis='both', labelsize=params['ticksize'])
     # set logarithmic scale
     if params['xlog']:
         pylab.xscale('log')
         if params['grid']:
             ax_0.xaxis.grid( True, which='both' )
     if params['ylog']:
         pylab.yscale('log')
         if params['grid']:
             ax_0.yaxis.grid( True, which='both' )
     # grid below bars and boxes
     ax_0.set_axisbelow(params['axisbelow'])
def Validation():
  numSamples = 1000000
  
  theta = np.random.rand(numSamples)*np.pi
  ECo60 = np.array([1.117,1.332])
  Ef0,Ee0 = Compton(ECo60[0],theta)
  Ef1,Ee1 = Compton(ECo60[1],theta)
  dSdE0 = diffXSElectrons(ECo60[0],theta)
  dSdE1 = diffXSElectrons(ECo60[1],theta)

  # Sampling Values
  values = list()
  piMax = np.max([dSdE0,dSdE1])
  while (len(values) < numSamples):
    values.append(SampleRejection(piMax,ComptonScattering))
  # Binning the data
  bins = np.logspace(-3,0.2,100)
  counts = np.histogram(values,bins)
  counts = counts[0]/float(len(values))
  binCenters = 0.5*(bins[1:]+bins[:-1])
  
  # Plotting
  pylab.figure()
  pylab.plot(binCenters,counts,ls='steps')
  #pylab.bar(binCenters,counts,align='center')
  pylab.grid(True)
  pylab.xlim((1E-3,1.4))
  pylab.xlabel('Electron Energy (MeV)')
  pylab.ylabel('Frequency per Photon')
  pylab.yscale('log')
  pylab.xscale('log')
  pylab.savefig('ValComptonScatteringXS.png')
Beispiel #14
0
def demo():
    import pylab

    # The module normalize is not part of the osrefl code base.
    from reflectometry.reduction import normalize

    from .examples import ng7 as dataset
    spec = dataset.spec()[0]
    water = WaterIntensity(D2O=20,probe=spec.probe)
    spec.apply(normalize())
    theory = water.model(spec.Qz,spec.detector.wavelength)

    pylab.subplot(211)
    pylab.title('Data normalized to water scattering (%g%% D2O)'%water.D2O)
    pylab.xlabel('Qz (inv Ang)')
    pylab.ylabel('Reflectivity')
    pylab.semilogy(spec.Qz,theory,'-',label='expected')
    scale = theory[0]/spec.R[0]
    pylab.errorbar(spec.Qz,scale*spec.R,scale*spec.dR,fmt='.',label='measured')

    spec.apply(water)
    pylab.subplot(212)
    #pylab.title('Intensity correction factor')
    pylab.xlabel('Slit 1 opening (mm)')
    pylab.ylabel('Incident intensity')
    pylab.yscale('log')
    pylab.errorbar(spec.slit1.x,spec.R,spec.dR,fmt='.',label='correction')

    pylab.show()
    def main(self):
        global weights, densities, weighted_densities
        plt.figure()

        cluster = clusters.SingleStation()
        self.station = cluster.stations[0]

        R = np.linspace(0, 100, 100)
        densities = []
        weights = []
        for E in np.linspace(1e13, 1e17, 10000):
            relative_flux = E ** -2.7
            Ne = 10 ** (np.log10(E) - 15 + 4.8)
            self.ldf = KascadeLdf(Ne)
            min_dens = self.calculate_minimum_density_for_station_at_R(R)

            weights.append(relative_flux)
            densities.append(min_dens)
        weights = np.array(weights)
        densities = np.array(densities).T

        weighted_densities = (np.sum(weights * densities, axis=1) /
                              np.sum(weights))
        plt.plot(R, weighted_densities)
        plt.yscale('log')
        plt.ylabel("Min. density [m^{-2}]")
        plt.xlabel("Core distance [m]")
        plt.axvline(5.77)
        plt.show()
Beispiel #16
0
def plot_xy(cursor, query, prefix=None, color='b', marker='.', xlog=False, ylog=False, xlabel='', ylabel='', title=''):
    """
        Executes the 'query' which should return two numerical columns.
    """
    cursor.execute(query)
    x_list = []
    y_list = []
    for row in cursor:
        (x, y) = row
        if (x != None and y != None):
            x_list.append(x)
            y_list.append(y)
    
    X = pylab.array(x_list)
    Y = pylab.array(y_list)
    pylab.figure()
    pylab.hold(True)
    pylab.plot(X, Y, color=color, marker=marker, linestyle='None')
    if (xlog):
        pylab.xscale('log')
    if (ylog):
        pylab.yscale('log')
    
    pylab.title(title + " (R^2 = %.2f)" % pylab.corrcoef(X,Y)[0,1]**2)
    pylab.xlabel(xlabel)
    pylab.ylabel(ylabel)
    if (prefix != None):
        pylab.savefig('../res/%s.pdf' % prefix, format='pdf')
    pylab.hold(False)
Beispiel #17
0
def run_analysis(filename,mode,method):
    click.echo('Reading file : %s'%filename)
    data = IOfile.parsing_input_file(filename)
    click.echo('Creating class...')
    theclass = TFC(data)
    click.echo('Calculating transfer function using %s method'%method)
    if method=='tf_kramer286_sh':
        theclass.tf_kramer286_sh()
    elif method=='tf_knopoff_sh':
        theclass.tf_knopoff_sh()
    elif method=='tf_knopoff_sh_adv':
        theclass.tf_knopoff_sh_adv()
        
    plt.plot(theclass.freq,np.abs(theclass.tf[0]),label=method)
    plt.xlabel('frequency (Hz)')
    plt.ylabel('Amplification')
    plt.yscale('log')
    plt.xscale('log')
    plt.grid(True,which='both')
    plt.legend(loc='best',fancybox=True,framealpha=0.5)
    #plt.axis('tight')
    plt.autoscale(True,axis='x',tight=True)
    plt.tight_layout()
    plt.savefig('test.png', format='png')
    click.echo(click.style('Calculation has been finished!',fg='green'))
Beispiel #18
0
def show_plot(xlabel, ylabel, xlog=False, ylog=False):
  plt.xscale('log') if xlog else None
  plt.yscale('log') if ylog else None
  plt.xlabel(xlabel)
  plt.ylabel(ylabel)
  plt.subplot(111).legend()
  plt.show()
def get_ac_caracteristique(freq_start, freq_stop, BW, power, resistance, sample = 'sample', membrane = 'membrane'):
    index = []
    data = []
    span = int(1601*BW)
    print 'up'
    
    for freq in range(int(freq_start+span/2),int(freq_stop-span/2),span) :
        print freq
        NA.frequency_center = freq
        NA.span = span+40*BW
        NA.if_bandwidth = BW
        NA.sweep_types.power = power
        NA.averaging = True
        NA.averaging_factor = 10
        time.sleep(float(NA.sweep_time*NA.averaging_factor))
        measurements = NA.get_curve()
        index = concatenate((index,measurements.data.index[20:-20]))
        data = concatenate((data,measurements.data[20:-20]))
    final_file = array([index, data]).T.astype(complex)
    print final_file
    np.savetxt('Ac_caracteristic_'+sample+'_'+membrane+'_start'+str(freq_start)+'_stop'+str(freq_stop)+'_BW'+str(BW)+str(resistance)+'.txt',final_file)
    for line in fileinput.FileInput('Ac_caracteristic_'+sample+'_'+membrane+'_start'+str(freq_start)+'_stop'+str(freq_stop)+'_BW'+str(BW)+str(resistance)+'.txt',inplace=1):
        line=line.replace("+-","-")
        print line,
    pylab.close()
    pylab.plot(np.abs(index),np.abs(data)**2, color='red',lw=1)
    pylab.yscale('log')
    pylab.title('Ac_caracteristic_'+sample+'_'+membrane+'_start'+str(freq_start)+'_stop'+str(freq_stop)+'_BW'+str(BW)+str(resistance))
    pylab.show()
    pylab.savefig('Ac_caracteristic_'+sample+'_'+membrane+'_start'+str(freq_start)+'_stop'+str(freq_stop)+'_BW'+str(BW)+str(resistance)+'.png')      
Beispiel #20
0
def TestOverStep():
    nDim = 10
    numOfParticles = 20
    maxIteration = 2000
    minX = array([-100.0]*nDim)
    maxX = array([100.0]*nDim)
    maxV = 0.2*(maxX - minX)
    minV = -1.0*maxV
    numOfTrial = 20
    intDim = 5
    stopstep = 200
    for i in xrange(4):
        step = i*3
        gBest = array([0.0]*maxIteration)
        for i in xrange(numOfTrial):
            p1 = AOPSO.PSOProblem(nDim, numOfParticles, maxIteration, minX, maxX, minV, maxV, AOPSO.Rastrigrin,intDim,step, stopstep)
            p1.run()
            gBest = gBest + p1.gBestArray[:maxIteration]
        gBest = gBest / numOfTrial
        pylab.plot(range(maxIteration), gBest,label='step='+str(step))
    pylab.title('Rastrigrin function, intDim=5 ($G_{best}$ over 20 trials)')
    pylab.xlabel('The $N^{th}$ Iteration')
    pylab.ylabel('Average gBest over '+str(numOfTrial)+' runs')
    pylab.grid(True)
    pylab.yscale('log')
    ylim = [-6, 1]
    ystep = 1.0
#    pylab.ylim(ylim[0], ylim[1])
#    yticks = linspace(ylim[0], ylim[1], int((ylim[1]-ylim[0])/ystep+1))
#    pylab.yticks(tuple(yticks), tuple(map(str,yticks)))
    pylab.legend(loc='lower left')
    pylab.show()
Beispiel #21
0
    def test_fit_bb(self):
        def func(nu, T):
            return np.pi * rf.planck(nu, T, inp="Hz", out="freq")

        self.sp.cut_flux(max(self.sp.Flux) * 1e-5)

        freq = self.sp.Freq
        flux = self.sp.Flux
        Tinit = 1.e4

        popt, pcov = curve_fit(func, freq, flux, p0=Tinit)
        Tbest = popt
        # bestT, pcov = curve_fit(rf.fit_planck(nu, inp='Hz'), nu, flux, p0=Tinit, sigma=sigma)
        sigmaT = np.sqrt(np.diag(pcov))

        print 'True model values'
        print '  Tbb = %.2f K' % self.sp.T

        print 'Parameters of best-fitting model:'
        print '  T = %.2f +/- %.2f K' % (Tbest, sigmaT)

        # Tcol = self.sp.temp_color
        ybest = np.pi * rf.planck(freq, Tbest, inp="Hz", out="freq")
        # plot the solution
        plt.plot(freq, flux, 'b*', label='Spectral T: %f' % self.sp.T)
        plt.plot(freq, ybest, 'r-', label='Best Tcol: %f' % Tbest)
        plt.xscale('log')
        plt.yscale('log')
        plt.legend(loc=3)
        plt.show()

        self.assertAlmostEqual(Tbest, self.sp.T,
                               msg="For planck  Tcolor [%f] should be equal sp.T [%f]." % (Tbest, self.sp.T),
                               delta=Tbest*0.01)
Beispiel #22
0
def con(text, stopwords):
    alfa = []
    content = [w for w in text if w.lower() not in stopwords and w.isalpha()]
    fdist = FreqDist(content)
    keys = fdist.keys()
    vals = fdist.values()
    maxiFreq = vals[0]
    for i in range(1, maxiFreq + 1):
	k = len([w for w in keys if fdist[w] == i])
	alfa.append(k)

    ys = range(1, maxiFreq + 1)
    xs = alfa
    pylab.xlabel("Anzahl der Woerter")
    pylab.ylabel("Haeufigkeit")
    pylab.plot(xs, ys)
    pylab.show()
 
    pylab.xlabel("Anzahl der Woerter log scale")
    pylab.ylabel("Haeufigkeit log scale")
    pylab.plot(xs, ys)
    pylab.xscale('log')
    pylab.yscale('log')
    pylab.show()		
    return alfa
Beispiel #23
0
def save_plot(l, xlabel=None, title=None, filename=None, xticks_labels=None, legend_loc='upper left', bottom_adjust=None, yscale='linear', ymin=0.0, ymax_factor=1.1):
    params = {
                'backend': 'ps',
                #'text.usetex': True,'
                'text.latex.unicode': True,
             }
    pylab.rcParams.update(params)
    pylab.figure(1, figsize=(6,4))
    pylab.grid(True)
    pylab.xlabel(xlabel)
    pylab.ylabel(u"čas [s]")
    pylab.title(title)
    pylab.xlim(0, max(l[0][0][0]) * 1.1)
    if xticks_labels is not None:
        pylab.xticks(l[0][0][0], xticks_labels, rotation=25, size='small', horizontalalignment='right')
    ymax = 0.0
    for i in l:
        pylab.plot(*i[0], **i[1])
        ymax = max(ymax,max(i[0][1]))
    pylab.ylim(ymin, ymax * ymax_factor)
    if bottom_adjust is not None:
        pylab.subplots_adjust(bottom=bottom_adjust)
    pylab.yscale(yscale)
    pylab.legend(loc=legend_loc)
    pylab.savefig(filename, format='eps')
    pylab.clf()
Beispiel #24
0
def plot_stats(sbstats):
    subbands = map(subband_from_stats, sbstats)
    flagged = [max(sb['real']['flagged%'],sb['imag']['flagged%']) for sb in sbstats]
    astd    = [sb['real']['all-std'] for sb in sbstats]
    gstd    = [sb['real']['good-std'] for sb in sbstats]

    clf()
    
    subplot(311)
    title('Flagged')
    plot(subbands, flagged)
    xlabel('Subband number')
    
    subplot(312)
    title(r'$\sigma$ all')
    plot(subbands, astd)
    yscale('log')
    xlabel('Subband number')
    
    subplot(313)
    title(r'$\sigma$ good')
    plot(subbands, gstd)
    yscale('log')
    xlabel('Subband number')
    pass
Beispiel #25
0
    def makeComp(save=0):

        data = Repeatability.getRepeatsData()

        dc = data['dc_min']
        dv = data['dv']

        P.figure()
        P.plot( dc, dv, 'k.', alpha=0.4)
        P.ylim(0.1, 1e6)
        P.xlim(1e-6, 1)
        P.xscale('log')
        P.yscale('log')
        ylim = P.ylim()
        P.plot( [1e-2, 1e-2], ylim, 'b--', lw=2)
        P.plot( [5e-3, 5e-3], ylim, 'r--', lw=2)
        P.plot( [1e-6, 1], [1000, 1000], 'm--', lw=2)
        P.xlabel(r'$\Delta \chi^2/dof$')
        P.ylabel(r'$\Delta v$ (km/s)')
        P.tight_layout()
        if save:
            P.savefig(Repeatability.directory+'/plots/Repeat_all_rchi2_vel.pdf',\
                         bbox_inches='tight')

        ngals = len(dv)*1.
        print 'Total galaxies in plot', ngals
        print '   dchi2 < 0.01', N.sum( dc< 0.01),  N.sum( dc< 0.01)/ngals
        print '   dchi2 < 0.005', N.sum( dc< 0.005),  N.sum( dc< 0.005)/ngals
        print '   dv > 1000 km/s', N.sum( dv > 1000.), N.sum( dv > 1000.)/ngals

        return data
Beispiel #26
0
    def mesh2d_mcolor_mask(self, data, axis, output=None, mask=None, datscale='log', 
           axiscale=['log', 'log'], pcolors='Greys', maskcolors=None):

        """       >>> generate 2D mesh plot <<<
	"""

        pl.clf()
	fig=pl.figure()
	ax=fig.add_subplot(111)

	pldat=data  
	
        # get the color norm
	if(datscale=='log'):
	    cnorm=colors.LogNorm()
	elif(datscale=='linear'):
	    cnorm=colors.NoNorm()
	else:
	    raise Exception


        color1=colors.colorConverter.to_rgba('white')
        color2=colors.colorConverter.to_rgba('blue')
        color3=colors.colorConverter.to_rgba('yellow')
        my_cmap0=colors.LinearSegmentedColormap.from_list('mycmap0',[color1, color1, color2, color2, color2, color3, color3], 512) 
        my_cmap0._init()


        if pcolors!=None:
            cm=ax.pcolormesh(axis[0,:], axis[1,:], pldat, cmap=pl.cm.get_cmap(pcolors),
	                     norm=cnorm) 

            #cm=ax.pcolormesh(axis[0,:], axis[1,:], pldat, cmap=my_cmap0, norm=cnorm) 
	else:
            cm=ax.pcolormesh(axis[0,:], axis[1,:], pldat, norm=cnorm) 


        if mask!=None:

            # get the color map of mask
	    """
            color1=colors.colorConverter.to_rgba('white')
            color2=colors.colorConverter.to_rgba('red')
            my_cmap=colors.LinearSegmentedColormap.from_list('mycmap',[color1, color2], 512) 
            my_cmap._init()
            alphas=np.linspace(0.2, 0.7, my_cmap.N+3)
            my_cmap._lut[:,-1] = alphas 
	    """
    
	    maskdata=np.ma.masked_where((mask<=1e-2)&(mask>=-1e-2) , mask)
            mymap=ax.contourf(axis[0,:], axis[1,:], maskdata, cmap=maskcolors)

            cbar=fig.colorbar(mymap, ticks=[4, 6, 8]) #, orientation='horizontal')
            cbar.ax.set_yticklabels(['void', 'filament', 'halo'])

	pl.xscale(axiscale[0])
	pl.yscale(axiscale[1])


        return 
Beispiel #27
0
def plot_tmp_imp( name_plot ):

	# distance between axes and ticks
	pl.rcParams['xtick.major.pad']='8'
	pl.rcParams['ytick.major.pad']='8'

	# set latex font
	pl.rc('text', usetex=True)
	pl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 20})

	# plotting
	x_plf = pow(10,pl.linspace(-6,0,1000))

	pl.clf()
	p_pl, = pl.plot(x_plf,ff_pl(x_plf,par_pl[0],par_pl[1]), ls='--', color='Red')
	p_lg, = pl.plot(x_plf,ff_lg(x_plf,par_lg[0],par_lg[1]), ls='-', color='RoyalBlue')
	p_points, = pl.plot(df_imp_1d.pi,df_imp_1d.imp,'.', color='Black',ms=10)
	pl.xscale('log')
	pl.yscale('log')
	pl.xlabel('$\phi$')
	pl.ylabel('$\mathcal{I}_{tmp}(\Omega=\{ \phi \})$')
	pl.grid()
	pl.axis([0.00001,1,0.0001,0.1])

	leg_1 = '$\hat{Y} = $' + str("%.4f" % round(par_pl[0],4)) + '$\pm$' + str("%.4f" % round(vv_pl[0][0],4)) + ' $\hat{\delta} = $' + str("%.4f" % round(par_pl[1],4)) + '$\pm$' + str("%.4f" % round(vv_pl[1][1],4)) + ' $E_{RMS} = $' + str("%.4f" % round(pl.sqrt(chi_pl/len(df_imp_1d.imp)),4))
	leg_2 = '$\hat{a} = $' + str("%.3f" % round(par_lg[0],3)) + '$\pm$' + str("%.3f" % round(vv_lg[0][0],3)) + ' $\hat{b} = $' + str("%.0f" % round(par_lg[1],3)) + '$\pm$' + str("%.0f" % round(vv_lg[1][1],3)) + ' $E_{RMS} = $' + str("%.4f" % round(pl.sqrt(chi_lg/len(df_imp_1d.imp)),4))
	l1 = pl.legend([p_pl,p_lg], ['$f(\phi) = Y\phi^{\delta}$', '$g(\phi)= a \log_{10}(1+b\phi)$'], loc=2, prop={'size':15})
	l2 = pl.legend([p_pl,p_lg], [leg_1 ,leg_2 ], loc=4, prop={'size':15})
	pl.gca().add_artist(l1)
	pl.subplots_adjust(bottom=0.15)
	pl.subplots_adjust(left=0.17)
	pl.savefig("../plot/" + name_plot + ".pdf")
def plot_values(X, Y, xlabel, ylabel, suffix):
    output_filename = constants.CHARTS_FOLDER_NAME + constants.DATASET + '_' + suffix

    
    pylab.figure(figsize=(8, 7))

    pylab.rcParams.update({'font.size': 20})

    pylab.scatter(X, Y)
    
    '''
    #smoothing
    s = np.square(np.max(Y))
    tck = interpolate.splrep(X, Y, s=s)
    Y_smooth = interpolate.splev(X, tck)
    pylab.plot(X, Y_smooth)
    '''

    #pylab.axis(vis.get_bounds(X, Y, False, False))

    pylab.xscale('log')
    pylab.yscale('log')

    pylab.xlabel(xlabel)
    pylab.ylabel(ylabel)   

    #pylab.tight_layout()

    pylab.savefig(output_filename + '.pdf')
Beispiel #29
0
def quartile_plot(
        fits,
        group_index_start, group_index_end,
        model_param_index,
        ylim=None,
        log=True,
        xlabel=None,
        ylabel=None,
        labels=None):
    model_param_values = [
        fit_params(fits, group_index, model_param_index)
        for group_index in xrange(
            group_index_start, group_index_end)
    ]
    fig = plt.figure(figsize=(len(model_param_values), 7))
    if log is True:
        plt.yscale('log')
    if ylim is not None:
        plt.ylim(ylim)
    if xlabel is not None:
        plt.xlabel(xlabel)
    if ylabel is not None:
        plt.ylabel(ylabel)
    plt.boxplot(
        model_param_values,
        labels=labels,
        showmeans=True)
    plt.grid()
    plt.show()
def plot_fft_brams():
    import pylab
    run = True
    
    # turn on live updating
    pylab.ion()
    # plot the power in log scale
    pylab.yscale('log')
    
    # get an initial power spectrum and plot the power lines as rectangles
    fftscope_power = get_fft_brams_power()
    fftscope_power_line=pylab.bar(range(0,numchannels),fftscope_power)
    
    pylab.ylim(1,1000000)
    
    # plot until an interrupt is received
    # for i in range(1,10):
    while(run):
        try:
            # read in a new spectrum
            fftscope_power = get_fft_brams_power()
            
            # update the rectangles based on the new power spectrum
            for j in range(0,numchannels):
                fftscope_power_line[j].set_height(fftscope_power[j])
            #update the plot
            pylab.draw()
        except KeyboardInterrupt:
            run = False
    
    # after stopping the liveupdate leave the plot up until the user is done with it
    raw_input('Press enter to quit: ')
            
    pylab.cla() 
Beispiel #31
0
    plt.xlim([1.e-1, 1e8])
    ax3.set_xscale('log')
    plt.xlabel("$z$")
    plt.ylabel("$\phi$")
    ax3.grid(True)

plt.savefig(name + ".pdf")
plt.show()

if False:
    ax4 = fig.add_subplot(1, 3, 4)
    for i in range(4):
        ax4.plot(lna, pts4[i])
    plt.xlabel("$\ln a$")
    plt.ylabel("$\dot \phi^2$")
    pylab.yscale('log')
    ax4.grid(True)

    ax5 = fig.add_subplot(2, 3, 5)
    ax5.plot(lna, T.Pot(xx, 0))
    plt.xlabel("$\ln a$")
    plt.ylabel("$V(\phi)$")
    pylab.yscale('log')
    ax5.grid(True)

    #ax6=fig.add_subplot(2,3,6)
    #ax6.plot(xx, (T.Pot(xx,1)**2/T.Pot(xx,0)/4. - T.Pot(xx,0))*exp(-140)*4.E4*(3/2.36))
    #plt.xlim([-35,-30])
    #plt.xlabel("$\ln a$")
    #plt.ylabel("$V,{\phi}(\phi)$")
    #pylab.yscale('log')
Beispiel #32
0
#plt.show()

# Log-normal box
smre_k, smre_pk, smre_stddev \
    = box.binned_power_spectrum(delta_k=fft.fftn(delta_smoothed))

# Plot some stuff
fig = plt.figure()
plt.plot(th_k, th_pk, 'b-', label="Theoretical P(k)")
#plt.errorbar(re_k, re_pk, yerr=re_stddev, fmt=".", color='r')
plt.errorbar(re_k, re_pk, yerr=re_stddev, marker='.', color='r', 
             ls='none', label="P(k) from density field")
plt.errorbar(smre_k, smre_pk, yerr=smre_stddev, marker='x', color='g', 
             ls='none', label="P(k) from smoothed field")
plt.xscale('log')
plt.yscale('log')
plt.legend(loc='lower left', frameon=False)
plt.ylim((1e0, 1e5))
plt.show()


sys.exit(0)


# Log-normal box
delta_ln = box.lognormal(box.delta_x)
lnre_k, lnre_pk, lnre_stddev \
    = box.binned_power_spectrum(delta_k=fft.fftn(delta_ln))

plt.matshow(delta_ln[0], vmin=-1., vmax=2., cmap='cividis')
plt.title("Log-normal density")
Beispiel #33
0
thBragg = float(Sub.calc_Bragg_angle(Energy).subs(Sub.structure.subs).evalf())
angle = pl.linspace(data[:, 0][0] * np.pi / 180, data[:, 0][-1] * np.pi / 180,
                    len(data[:, 0]))

XR = crystal.calc_reflectivity(angle, Energy)
crystal.print_values(angle, Energy)

#pl.plot(angle*180/np.pi,abs(layer1.XR)**2)

#pl.plot(angle*180/np.pi,abs(Sub.XR)**2)

pl.plot(data[:, 0], abs(XR)**2)

pl.plot(data[:, 0] + thetaoffset, data[:, 1] * scale, '.')

pl.yscale('log')

pl.show()


def residual(params, angle, data):
    strainmax = params['strainmax'].value
    straindepth = params['straindepth'].value
    scale = params['scale'].value
    thetaoffset = params['thetaoffset'].value

    c = layer1.strain.keys()[0]
    tvector, strain = my_strain_profile(strainmax, straindepth)
    layer1.strain[c] = strain
    layer1.thickness = tvector
    XR = crystal.calc_reflectivity(angle + thetaoffset * np.pi / 180, Energy)
Beispiel #34
0
def main():

    matlab_path = '/Applications/MATLAB_R2020b.app/bin/matlab'

    ## THE FOLLOWING FLAGS TO TRUE/FALSE CONTROL WHICH TESTS ARE DONE
    do_ibp_tests = True
    do_maaipm_tests = True
    do_colgen_tests = True
    do_comparison_plots = True

    # Control which data set to use: 'sparsesquare', 'sparsedisc', 'noisygrid', 'translatedgrid'
    test_type = 'sparsesquare'
    k_vals = [10]
    n_vals = [10]

    # Control parameters for IBP vs. MAAIPM method.
    ibp_eps_vals = [0.1, 0.01] # Support size gridding for IBP.
    ibp_filter_sizes = [1] # the regularization parameter is eta = (1/eps)^2 / (2 * filter_size^2)
    maaipm_eps_inv_vals = [10] # eps_inv x eps_inv grid for MAAIPM.

    # Plot parameters
    y_limits = (1e-10 - 1e-11, 10.0)
    x_limits = (0.009,100)


    for k in k_vals:

        for n in n_vals:

            np.random.seed(19238412)
            random.seed(12834981)

            data_identifier = test_type + '_n' + str(n) + 'k' + str(k)

            ##########################################################################
            ## A) Generate the sparse test data
            if test_type == 'sparsesquare':
                sparse_data = get_sparsesquare_test(k=k,n=n)
            elif test_type == 'sparsedisc':
                sparse_data = get_sparsedisc_test(k=k,n=n)
            elif test_type == 'noisygrid':
                sparse_data = get_noisygrid_test(k=k,n=n)
            elif test_type == 'translatedgrid':
                sparse_data = get_translatedgrid_test(k=k,n=n)
            else:
                assert(0)
#            show_sparse_data(sparse_data)

            # And save it for use by IBP and MAAIPM
            data_filename = 'comparison_test_files/experiment_data/' + data_identifier

            # Save it as gridded images (for IBP):
            eps_vals = ibp_eps_vals
            for i, eps_val in enumerate(eps_vals):
                dense_data = get_dense_data_representation(sparse_data, eps=eps_val, bounding_box=((-1,-1),(1,1)))
                eps_desc = str(eps_val).split('.')[1]
                dense_data_filename = data_filename + 'eps' + eps_desc
                save_dense_data_representation(dense_data, dense_data_filename)

            # Save it as a list of points (for MAAIPM):
            save_sparse_data_representation(sparse_data, data_filename)

            ##########################################################################
            # B) Run our column generation method, collecting an error plot.
            if do_colgen_tests:
                filename_colgen = 'comparison_test_files/experiment_results/colgen_results/' + data_identifier + '.txt';
                if os.path.exists(filename_colgen):
                    print('ALREADY EXISTS: ' + filename_colgen)
                else:
                    bary_prob = BarycenterProblem(sparse_data, method='powerdiagram', log_file=filename_colgen)
                    bary_prob.solve()

            ########################################################################
            ## C) Run IBP method, gridding the space.

            ## 1) Run the IBP code (MATLAB) computing regularized barycenters (different regularizations and epsilons).
            if do_ibp_tests:
                eps_vals = ibp_eps_vals
                filter_sizes = ibp_filter_sizes # the regularization parameter is eta = (1/eps)^2 / (2 * filter_size^2)
                for i, eps_val in enumerate(eps_vals):
                    eps_desc = str(eps_val).split('.')[1]
                    for filter_size in filter_sizes:
                        ibp_res_prefix = 'comparison_test_files/experiment_results/ibp_results/' + data_identifier + 'eps' + eps_desc + 'f' + str(filter_size);
                        ibp_res_file = ibp_res_prefix + '.txt'
                        if os.path.exists(ibp_res_file):
                            print('ALREADY EXISTS: ' + ibp_res_file)
                            continue
                        time.sleep(0.05)
                        matlab_command = matlab_path + ' -nodisplay -r "cd(\'comparison_test_files/IBP_code\'); test_barycenters_ibp(' + str(n) + ', ' + str(k) + ',\'' + eps_desc + '\',\'' + test_type + '\',' + str(filter_size) + ',{imgortime:d});exit"';

                        ## i) Assign scores to the IBP barycenters w.r.t. actual point cloud.
                        ## Use the Lemon solver to obtain the exact optimal transport distance between pairs of points.
                        costs_filename = ibp_res_prefix + '_costs.txt'
                        costs_lemon = []
                        if not os.path.exists(costs_filename):
                            print(matlab_command)
                            os.system(matlab_command.format(imgortime=1)) # Write the barycenter at each iteration to the disk.
                            t = 1
                            while True:
                                curr_filename = ibp_res_prefix + '/b_' + str(t) + '.png'
                                if not os.path.exists(curr_filename):
                                    break
                                if t % 40 == 1 or (t < 80 and t % 10 == 1):
                                    f1 = pylab.imread(curr_filename)
                                    print(f1)
                                    f1 = f1 / np.sum(f1)
                                    sparsedatum_f1 = get_sparse_data_representation([f1], bounding_box=((-1,-1),(1,1)))[0]

                                    print(t)
                                    curr_lemon = 0
                                    for i in range(k):
                                        print(t,i)
                                        curr_lemon += exact_cost_lemon(sparse_data[i], sparsedatum_f1, lemon_solver_location='../lemon_solver/LemonNetworkSimplex') / k
                                    costs_lemon.append(curr_lemon)
                                else:
                                    costs_lemon.append(-1)
                                t += 1

                            costs_file = open(costs_filename, 'w')
                            for i in range(len(costs_lemon)):
                                costs_file.write('{v:.9f}\n'.format(v=costs_lemon[i]))
                            costs_file.close()
                        else:
                            print('USING PREVIOUSLY-COMPUTED COSTS FILE: ' + costs_filename)
                            costs_file = open(costs_filename, 'r')
                            costs_lemon = costs_file.readlines()
                            costs_lemon = [float(i) for i in costs_lemon]
                            costs_file.close()

                        # ii) Separately re-run the IBP code to get the timing information.
                        timings_filename = ibp_res_prefix + "_timing.txt";
                        if not os.path.exists(timings_filename):
                            os.system(matlab_command.format(imgortime=2)) # Write the cumulative time to each iteration to the disk.
                        else:
                            print('USING PREVIOUSLY-COMPUTED TIMINGS FILE: ' + timings_filename)
                        timings_file = open(timings_filename, 'r')
                        times = timings_file.readlines()
                        times = [float(i) for i in times]
                        timings_file.close()

                        res_file = open(ibp_res_file, 'w')
                        assert(len(times) == len(costs_lemon))
                        for i in range(len(times)):
                            res_file.write('{v:.9f} {t:.9f}\n'.format(v=costs_lemon[i], t=times[i]));
                        res_file.close()

            ##########################################################################
            ## D) Run MAAIPM method with fixed-support assumption.

            if do_maaipm_tests:
                for eps_inv in maaipm_eps_inv_vals: # eps_inv x eps_inv-size grid
                    maaipm_res_file = 'comparison_test_files/experiment_results/ipm_results/' + data_identifier + 'epsinv' + str(eps_inv) + '.txt'
                    if os.path.exists(maaipm_res_file):
                        print('ALREADY EXISTS: ' + maaipm_res_file)
                        continue
                    time.sleep(0.05)
                    matlab_command = matlab_path + ' -nodisplay -r "cd(\'comparison_test_files/MAAIPM_code\'); test_barycenters_maaipm_grid_support(' + str(n) + ', ' + str(k) + ',' + str(eps_inv) + ',\'' + test_type + '\');exit"';

                    os.system(matlab_command)
                    os.system('mv comparison_test_files/experiment_results/ipm_results/latest_maaipm_results.txt ' + maaipm_res_file)



    ##############################################################################
    ## E) Create the comparison plots between the algorithms
    if do_comparison_plots:
        print('Comparison plots here')
        markers = ['x', 'o', 'v', '^', '.']*100
        best_val = 10;
        last_time = 0;
        title_text = '';
        legend_names = None;


        # Which data to plot.
        # Example comparison plot: n = 5, k = 5, sparsesquare.
        series = ['colgen_results/sparsesquare_n5k5.txt', 'ipm_results/sparsesquare_n5k5epsinv10.txt', 'ibp_results/sparsesquare_n5k5eps1f1.txt']
        legend_names = ['Proposed algorithm', 'MAAIPM 10x10 grid', 'IBP 10 x 10 grid, filter-size 1']

        assert(len(k_vals) == 1)
        assert(len(n_vals) == 1)
        k = k_vals[0]
        n = n_vals[0]
        test_prefix = test_type + '_n' + str(n) + 'k' + str(k);
        title_text = test_type + ', n = ' + str(n) + ', k = ' + str(k)

        series = []
        legend_names = []
        if do_colgen_tests:
            series.append('colgen_results/' + test_prefix + '.txt')
            legend_names.append('Proposed algorithm')

        if do_ibp_tests:
            for eps_val in ibp_eps_vals:
                eps_desc = str(eps_val).split('.')[1]
                for filter_size in ibp_filter_sizes:
                    series.append('ibp_results/' + test_prefix + 'eps' + eps_desc + 'f' + str(filter_size) + '.txt')
                    legend_names.append('IBP eps=' + str(eps_val) + ' filter-size=' + str(filter_size))

        if do_maaipm_tests:
            for eps_inv in maaipm_eps_inv_vals:
                series.append('ipm_results/' + test_prefix + 'epsinv' + str(eps_inv) + '.txt')
                legend_names.append('IPM epsinv=' + str(eps_inv))

#        series = ['colgen_results/sparsesquare_n25k10.txt', 'ibp_results/sparsesquare_n25k10eps1f1.txt', 'ibp_results/sparsesquare_n25k10eps1f2.txt', 'ibp_results/sparsesquare_n25k10eps05f1.txt', 'ibp_results/sparsesquare_n25k10eps05f2.txt']
#        series = ['colgen_results/sparsesquare_n25k10.txt', 'ipm_results/sparsesquare_n25k10epsinv10.txt', 'ipm_results/sparsesquare_n25k10epsinv30.txt', 'ipm_results/sparsesquare_n25k10epsinv70.txt']

##        # IPM plot: n = 20, k = 20, noisygrid.
##        series = ['colgen_results/noisygrid_n20k10.txt', 'ipm_results/noisygrid_n20k10epsinv10.txt', 'ipm_results/noisygrid_n20k10epsinv30.txt', 'ipm_results/noisygrid_n20k10epsinv50.txt']

##        # IBP plot: n = 20, k = 20, sparsesquare.
##        series = ['colgen_results/sparsesquare_n20k10.txt', 'ibp_results/sparsesquare_n20k10eps04f1.06.txt', 'ibp_results/sparsesquare_n20k10eps01f7.txt', 'ibp_results/sparsesquare_n20k10eps004f12.txt']
##        legend_names = ['Proposed algorithm', 'IBP 25x25 grid, $\eta=100$', 'IBP 100x100 grid, $\eta=100$', 'IBP 250x250 grid, $\eta=200$']
##        title_text = 'Comparison with IBP'

#        # IPM plot: n = 20, k = 20, sparsesquare.
#        series = ['colgen_results/sparsesquare_n20k10.txt', 'ipm_results/sparsesquare_n20k10epsinv10.txt', 'ipm_results/sparsesquare_n20k10epsinv40.txt', 'ipm_results/sparsesquare_n20k10epsinv70.txt']
#        legend_names = ['Proposed algorithm', 'MAAIPM 10x10 grid', 'MAAIPM 40x40 grid', 'MAAIPM 70x70 grid']
#        title_text = 'Comparison with MAAIPM'

        # # Comparison plot: n = 5, k = 5, sparsesquare.
        # series = ['colgen_results/sparsesquare_n5k5.txt', 'ipm_results/sparsesquare_n5k5epsinv10.txt', 'ibp_results/sparsesquare_n5k5eps1f1.txt']
        # legend_names = ['Proposed algorithm', 'MAAIPM 10x10 grid', 'IBP 10 x 10 grid, filter-size 1']
        # title_text = 'Example comparison with MAAIPM and IBP'


        if legend_names is None:
            legend_names = series

        val_data = []
        time_data = []

        # Sanitize data (remove places where cost was not computed.)
        for i in range(len(series)):
            temp_vals, temp_times = read_val_time_file('comparison_test_files/experiment_results/' + series[i])
            vals = []
            times = []
            for j in range(len(temp_vals)):
                if temp_vals[j] >= 0:
                    vals.append(temp_vals[j])
                    times.append(temp_times[j])
            val_data.append(vals)
            time_data.append(times)

        for i, serie in enumerate(series):
            vals = val_data[i]
            times = time_data[i]
            best_val = min(best_val, np.min(vals)) # Provided that column generation is one of the methods run, best_val will be the true optimum for the problem.
            last_time = max(last_time, np.max(times))

        pylab.figure(figsize=(12,8))
        for i, serie in enumerate(series):
            vals = val_data[i]
            times = time_data[i]
            vals.append(vals[-1])
            times.append(last_time)
            pylab.plot(times, vals-best_val + 1e-18, marker=markers[i], label=legend_names[i], linewidth=3.0)
        pylab.ylim(y_limits)
        pylab.xlim(x_limits)
        pylab.yscale('log')
        pylab.xscale('log')

        font = {'family' : 'sans-serif',
                'size'   : 30}
        pylab.rc('font', **font)
        pylab.rcParams['text.usetex'] = True

        ax = pylab.gca();
        ax.set_ylabel('Suboptimality gap')
        ax.set_xlabel('Time (seconds)')
        ax.set_title(title_text)
        for item in ([ax.xaxis.label, ax.yaxis.label] +
                     ax.get_xticklabels() + ax.get_yticklabels()):
            item.set_fontsize(30)
            item.set_usetex(True)
        ax.title.set_fontsize(40)
        ax.title.set_usetex(True)

        pylab.legend()
        pylab.show()
Beispiel #35
0
    plt.plot([n['count_packets_good'] for n in nodestats],
             '.-',
             label='packets: good')
    plt.plot([n['count_packets_bad'] for n in nodestats],
             '.-',
             label='packets: bad')
    plt.plot([n['count_packets_dropped'] for n in nodestats],
             '.-',
             label='packets: dropped')
    plt.plot([n['count_beam_id_mismatch'] for n in nodestats],
             '.-',
             label='beam id mismatch')
    plt.xlabel('L1 node number')
    plt.xticks(np.arange(n_l1_nodes))
    plt.legend(fontsize=8)
    plt.yscale('symlog')
    plt.savefig('counts.png')

    print('Sending write_chunks requests...')
    for socket in sockets:
        beams = [0, 1, 2]
        minchunk = 0
        maxchunk = 5
        filename_pat = 'chunk-beam(BEAM)-fpga(FPGA0)+(FPGAN)-py.msgpack'
        msg = (msgpack.packb(['write_chunks', token]) +
               msgpack.packb([beams, minchunk, maxchunk, filename_pat]))
        socket.send(msg)
    print('Waiting for write_chunks replies...')
    for socket in sockets:
        hdr, msg = socket.recv_multipart()
        print('Received reply: %i bytes' % len(msg))
Beispiel #36
0
def eval_gwas(pv, i_causal, out_fn=None, plot=False):
    """
    
    """

    pv_thresholds = [1e-3, 5e-4, 1e-4, 5e-5, 1e-5, 5e-6, 1e-6, 5e-7, 1e-7, 5e-8, 1e-8]

    # compute lambda on all p-values?
    #lambda_gc = estimate_lambda(p_values)
    lambda_gc = estimate_lambda(pv)
        
    n_causal = i_causal.sum()

    #compute power and Type-1 error
    power = np.zeros_like(pv_thresholds)
    t1err = np.zeros_like(pv_thresholds)
    power_corr = np.zeros_like(pv_thresholds)
    t1err_corr = np.zeros_like(pv_thresholds)
        
    pvcorr = stats.chi2.sf(stats.chi2.isf(pv,1)/lambda_gc,1)

    for i_t, t in enumerate(pv_thresholds):
        #compute uncorrected power and T1
        i_lower = pv<t
        power[i_t] =  i_causal[i_lower].sum()/(1.0*(n_causal))
        t1err[i_t] = (~i_causal[i_lower]).sum()/(1.0*(len(i_causal)-n_causal))

        #compute GC corrected Power and T1
        i_lower_corr = pvcorr<t
        power_corr[i_t] =  i_causal[i_lower_corr].sum()/(1.0*(n_causal))
        t1err_corr[i_t] = (~i_causal[i_lower_corr]).sum()/(1.0*(len(i_causal)-n_causal))


    if plot == True:
        import pylab
        pylab.figure()
        pylab.title("lambda_gc=%f" % lambda_gc)
        pylab.plot(pv_thresholds, power, "-o")
        pylab.yscale("log")
        pylab.xscale("log")
        pylab.xlabel("pv threshold")
        pylab.ylabel("power")
        pylab.grid(True)
        pylab.plot(pv_thresholds, power_corr, "-o")
            
        if not out_fn is None:
            pow_fn = out_fn.replace(".pickle", "_pow.pdf")
            pylab.savefig(pow_fn)
        else:
            pylab.show()

        pylab.figure()
        pylab.title("lambda_gc=%f" % lambda_gc)
        pylab.plot(pv_thresholds, t1err, "-o", label="t1err")
        pylab.plot(pv_thresholds, t1err_corr, "-o", label="t1err_gc")
        pylab.yscale("log")
        pylab.xscale("log")
        pylab.xlabel("pv threshold")
        pylab.ylabel("t1err")
        pylab.grid(True)
          
        pylab.plot(pv_thresholds, pv_thresholds, "-", label="thres")
        pylab.legend(loc="upper left")
        if not out_fn is None:
            t1err_fn = out_fn.replace(".pickle", "_t1err.pdf")
            pylab.savefig(t1err_fn)
        else:
            pylab.show()

        # plot auROC
        if out_fn is None:
            roc_fn = None
        else:
            roc_fn = out_fn.replace(".pickle", "_roc.pdf")
        plot_roc(i_causal, -pv, label='lambda_gc=%0.4f' % (lambda_gc), out_fn=roc_fn)

        # plot auPRC
        if out_fn is None:
            prc_fn = None
        else:
            prc_fn = out_fn.replace(".pickle", "_prc.pdf")
        plot_prc(i_causal, -pv, label='lambda_gc=%0.4f' % (lambda_gc), out_fn=prc_fn)


    # wrap up metrics
    res = {}
    res["lambda"] = lambda_gc
    res["pv_thresholds"] = pv_thresholds
    res["power"] = power
    res["power_corr"] = power_corr
    res["t1err"] = t1err
    res["t1err_corr"] = t1err_corr

    return res
Beispiel #37
0
def f_1(x):
    return x**5 / 5 - x**2 + x


def integral(f, N, a, b):
    h = (b - a) / N
    result = 0.5 * f(a) + 0.5 * f(b)
    for k in range(1, N):
        result += f(a + k * h)

    result = h * result

    return result


area_exact = f_1(1) - f_1(0)
N = []
deltas = []
for n in range(1, 1000):
    N.append(n)
    area_trapezoidal = integral(f, n, 0, 1)
    delta = np.abs(area_trapezoidal - area_exact) / area_exact
    deltas.append(delta)

pl.plot(N, deltas)
pl.title("Error development of trapezoidal integration method")
pl.xlabel("Number of trapezoidal slices")
pl.ylabel("Error")
pl.yscale("log")
pl.show()
def PlotStatsForParam(config, param_name):
    """
    @brief Save plots for the results of reconvolution_validation, when param_name is varied.
    @param config         galsim yaml config, which was used to produce the results, read by yaml
    @param param_name     varied parameter name, listed under config['vary_params'], for which
                          to create the plots
    """

    # get the shortcut to the dict corresponding to current varied parameter
    param = config['vary_params'][param_name]

    # prepare the output dict and initialise lists
    bias_list = {
        'm1': [],
        'm2': [],
        'c1': [],
        'c2': [],
        'm1_std': [],
        'm2_std': [],
        'c1_std': [],
        'c2_std': []
    }
    bias_moments_list = copy.deepcopy(bias_list)
    bias_hsmcorr_list = copy.deepcopy(bias_list)

    # loop over values changed for the varied parameter
    for iv, value in enumerate(param['values']):

        # get the filename for the results file
        filename_results_direct = 'results.%s.%s.%03d.direct.cat' % (
            config['filename_config'], param_name, iv)
        filename_results_reconv = 'results.%s.%s.%03d.reconv.cat' % (
            config['filename_config'], param_name, iv)

        # get the path for the results files
        filepath_results_reconv = os.path.join(config['results_dir'],
                                               filename_results_reconv)
        filepath_results_direct = os.path.join(config['results_dir'],
                                               filename_results_direct)

        logging.info('parameter %s, index %03d, value %2.4e' %
                     (param_name, iv, float(value)))

        # if there is no .reconv or .direct file, look for the default to compare it against
        if not os.path.isfile(filepath_results_direct):
            logging.info('file %s not found, looking for defaults' %
                         filepath_results_direct)
            filename_results_direct = 'results.%s.default.direct.cat' % (
                config['filepath_config'])
            filepath_results_direct = os.path.join(config['results_dir'],
                                                   filename_results_direct)
            if not os.path.isfile(filepath_results_direct):
                raise NameError('file %s not found' % filepath_results_direct)

        if not os.path.isfile(filepath_results_reconv):
            logging.info('file %s not found, looking for defaults' %
                         filepath_results_reconv)
            filename_results_reconv = 'results.%s.default.reconv.cat' % (
                config['filepath_config'])
            filepath_results_reconv = os.path.join(config['results_dir'],
                                                   filename_results_reconv)
            if not os.path.isfile(filepath_results_reconv):
                raise NameError('file %s not found' % filepath_results_reconv)

        # measure m and c biases
        bias_moments, bias_hsmcorr = GetBias(config, filepath_results_direct,
                                             filepath_results_reconv)

        logging.info('bias_moments has %d points ' % len(bias_moments))

        # append results lists  - slightly clunky way
        bias_moments_list[iv] = bias_moments
        bias_hsmcorr_list[iv] = bias_hsmcorr

    # yaml is bad at converting lists of floats in scientific notation to floats
    values_float = map(float, param['values'])

    # get the tick labels
    values_float_ticklabels = map(str, values_float)

    # if very large value is used, put it closer to other points
    for ivf, vf in enumerate(values_float):
        if vf > 1e10:
            values_float_ticklabels[ivf] = str(vf)
            values_float_sorted = sorted(values_float)
            values_float[ivf] = values_float_sorted[-2] * 10

    # set some plot parameters
    fig_xsize, fig_ysize, legend_ncol, legend_loc = 12, 10, 2, 3

    # plot figures for moments
    pylab.figure(1, figsize=(fig_xsize, fig_ysize))
    pylab.title('Weighted moments - uncorrected')
    pylab.xscale('log')

    # add the scattered m values to plot
    for iv, value in enumerate(param['values']):

        m1 = [b['m1'] for b in bias_moments_list[iv]]
        # m2 = [b['m2'] for b in bias_moments_list[iv]]

        print any(numpy.isnan(m1))

        pylab.plot(numpy.ones([len(m1)]) * values_float[iv], m1, 'x')
        pylab.errorbar(values_float[iv],
                       numpy.mean(m1),
                       yerr=numpy.std(m1, ddof=1),
                       fmt='o',
                       capsize=30)
        # pylab.plot(numpy.ones([len(m2)])*values_float[iv],m2,'o')

    print values_float
    print values_float_ticklabels
    pylab.xticks(values_float, values_float_ticklabels)
    pylab.yscale('symlog', linthreshy=1e-2)
    pylab.ylabel('m1')
    pylab.xlabel(param_name)
    pylab.xlim([min(values_float) * 0.5, max(values_float) * 1.5])
    pylab.legend(ncol=legend_ncol, loc=legend_loc, mode="expand")
    filename_fig = 'fig.moments.%s.%s.png' % (config['filename_config'],
                                              param_name)
    pylab.savefig(filename_fig)
    pylab.close()
    logging.info('saved figure %s' % filename_fig)

    # plot for HSM
    pylab.figure(2, figsize=(fig_xsize, fig_ysize))
    pylab.title('Weighted moments - corrected')
    pylab.xscale('log')
    pylab.yscale('symlog', linthreshy=1e-3)

    for iv, value in enumerate(param['values']):

        m1 = [b['m1'] for b in bias_hsmcorr_list[iv]]
        # m2 = [b['m2'] for b in bias_moments_list[iv]]

        pylab.plot(numpy.ones([len(m1)]) * values_float[iv], m1, 'x')
        pylab.errorbar(values_float[iv],
                       numpy.mean(m1),
                       yerr=numpy.std(m1, ddof=1),
                       fmt='o',
                       capsize=30)
        # pylab.plot(numpy.ones([len(m2)])*values_float[iv],m2,'o')

    pylab.ylabel('m1')
    pylab.xlabel(param_name)
    pylab.xlim([min(values_float) * 0.5, max(values_float) * 1.5])
    pylab.legend(ncol=legend_ncol, loc=legend_loc, mode="expand")
    filename_fig = 'fig.hsmcorr.%s.%s.png' % (config['filename_config'],
                                              param_name)
    pylab.savefig(filename_fig)
    pylab.close()
    logging.info('saved figure %s' % filename_fig)

    # now plot the log std of m1 as a function of parameter

    for iv, value in enumerate(param['values']):
        m1 = [b['m1'] for b in bias_moments_list[iv]]
        pylab.plot(values_float[iv], numpy.std(m1, ddof=1), 'x')

    pylab.ylabel('std(m_1)', interpreter='latex')
    pylab.xlabel(param_name)
    pylab.xlim([min(values_float) * 0.5, max(values_float) * 1.5])
    # pylab.legend(ncol=legend_ncol,loc=legend_loc,mode="expand")
    filename_fig = 'fig.moments.stdm1.%s.%s.png' % (config['filename_config'],
                                                    param_name)
    pylab.savefig(filename_fig)
    pylab.close()
    logging.info('saved figure %s' % filename_fig)
Beispiel #39
0
def stage_fitplots(T=None,
                   coimgs=None,
                   cons=None,
                   cat=None,
                   targetrd=None,
                   pixscale=None,
                   targetwcs=None,
                   W=None,
                   H=None,
                   bands=None,
                   ps=None,
                   brickid=None,
                   plots=False,
                   plots2=False,
                   tims=None,
                   tractor=None,
                   pipe=None,
                   outdir=None,
                   **kwargs):

    for tim in tims:
        print 'Tim', tim, 'PSF', tim.getPsf()

    writeModels = False

    if pipe:
        t0 = Time()
        # Produce per-band coadds, for plots
        coimgs, cons = compute_coadds(tims, bands, targetwcs)
        print 'Coadds:', Time() - t0

    plt.figure(figsize=(10, 10.5))
    #plt.subplots_adjust(left=0.002, right=0.998, bottom=0.002, top=0.998)
    plt.subplots_adjust(left=0.002, right=0.998, bottom=0.002, top=0.95)

    plt.clf()
    dimshow(get_rgb(coimgs, bands))
    plt.title('Image')
    ps.savefig()

    ax = plt.axis()
    cat = tractor.getCatalog()
    for i, src in enumerate(cat):
        rd = src.getPosition()
        ok, x, y = targetwcs.radec2pixelxy(rd.ra, rd.dec)
        cc = (0, 1, 0)
        if isinstance(src, PointSource):
            plt.plot(x - 1, y - 1, '+', color=cc, ms=10, mew=1.5)
        else:
            plt.plot(x - 1, y - 1, 'o', mec=cc, mfc='none', ms=10, mew=1.5)
        # plt.text(x, y, '%i' % i, color=cc, ha='center', va='bottom')
    plt.axis(ax)
    ps.savefig()

    mnmx = -5, 300
    arcsinha = dict(mnmx=mnmx, arcsinh=1)

    # After plot
    rgbmod = []
    rgbmod2 = []
    rgbresids = []
    rgbchisqs = []

    chibins = np.linspace(-10., 10., 200)
    chihist = [np.zeros(len(chibins) - 1, int) for band in bands]

    wcsW = targetwcs.get_width()
    wcsH = targetwcs.get_height()
    print 'Target WCS shape', wcsW, wcsH

    t0 = Time()
    mods = _map(_get_mod, [(tim, cat) for tim in tims])
    print 'Getting model images:', Time() - t0

    orig_wcsxy0 = [tim.wcs.getX0Y0() for tim in tims]
    for iband, band in enumerate(bands):
        coimg = coimgs[iband]
        comod = np.zeros((wcsH, wcsW), np.float32)
        comod2 = np.zeros((wcsH, wcsW), np.float32)
        cochi2 = np.zeros((wcsH, wcsW), np.float32)
        for itim, (tim, mod) in enumerate(zip(tims, mods)):
            if tim.band != band:
                continue

            #mod = tractor.getModelImage(tim)

            if plots2:
                plt.clf()
                dimshow(tim.getImage(), **tim.ima)
                plt.title(tim.name)
                ps.savefig()
                plt.clf()
                dimshow(mod, **tim.ima)
                plt.title(tim.name)
                ps.savefig()
                plt.clf()
                dimshow((tim.getImage() - mod) * tim.getInvError(), **imchi)
                plt.title(tim.name)
                ps.savefig()

            R = tim_get_resamp(tim, targetwcs)
            if R is None:
                continue
            (Yo, Xo, Yi, Xi) = R
            comod[Yo, Xo] += mod[Yi, Xi]
            ie = tim.getInvError()
            noise = np.random.normal(size=ie.shape) / ie
            noise[ie == 0] = 0.
            comod2[Yo, Xo] += mod[Yi, Xi] + noise[Yi, Xi]
            chi = ((tim.getImage()[Yi, Xi] - mod[Yi, Xi]) *
                   tim.getInvError()[Yi, Xi])
            cochi2[Yo, Xo] += chi**2
            chi = chi[chi != 0.]
            hh, xe = np.histogram(np.clip(chi, -10, 10).ravel(), bins=chibins)
            chihist[iband] += hh

            if not writeModels:
                continue

            im = tim.imobj
            fn = 'image-b%06i-%s-%s.fits' % (brickid, band, im.name)

            wcsfn = create_temp()
            wcs = tim.getWcs().wcs
            x0, y0 = orig_wcsxy0[itim]
            h, w = tim.shape
            subwcs = wcs.get_subimage(int(x0), int(y0), w, h)
            subwcs.write_to(wcsfn)

            primhdr = fitsio.FITSHDR()
            primhdr.add_record(
                dict(name='X0', value=x0, comment='Pixel origin of subimage'))
            primhdr.add_record(
                dict(name='Y0', value=y0, comment='Pixel origin of subimage'))
            xfn = im.wcsfn.replace(decals_dir + '/', '')
            primhdr.add_record(dict(name='WCS_FILE', value=xfn))
            xfn = im.psffn.replace(decals_dir + '/', '')
            primhdr.add_record(dict(name='PSF_FILE', value=xfn))
            primhdr.add_record(dict(name='INHERIT', value=True))

            imhdr = fitsio.read_header(wcsfn)
            imhdr.add_record(
                dict(name='EXTTYPE',
                     value='IMAGE',
                     comment='This HDU contains image data'))
            ivhdr = fitsio.read_header(wcsfn)
            ivhdr.add_record(
                dict(name='EXTTYPE',
                     value='INVVAR',
                     comment='This HDU contains an inverse-variance map'))
            fits = fitsio.FITS(fn, 'rw', clobber=True)
            tim.toFits(fits,
                       primheader=primhdr,
                       imageheader=imhdr,
                       invvarheader=ivhdr)

            imhdr.add_record(
                dict(name='EXTTYPE',
                     value='MODEL',
                     comment='This HDU contains a Tractor model image'))
            fits.write(mod, header=imhdr)
            print 'Wrote image and model to', fn

        comod /= np.maximum(cons[iband], 1)
        comod2 /= np.maximum(cons[iband], 1)

        rgbmod.append(comod)
        rgbmod2.append(comod2)
        resid = coimg - comod
        resid[cons[iband] == 0] = np.nan
        rgbresids.append(resid)
        rgbchisqs.append(cochi2)

        # Plug the WCS header cards into these images
        wcsfn = create_temp()
        targetwcs.write_to(wcsfn)
        hdr = fitsio.read_header(wcsfn)
        os.remove(wcsfn)

        if outdir is None:
            outdir = '.'
        wa = dict(clobber=True, header=hdr)
        for name, img in [('image', coimg), ('model', comod), ('resid', resid),
                          ('chi2', cochi2)]:
            fn = os.path.join(outdir,
                              '%s-coadd-%06i-%s.fits' % (name, brickid, band))
            fitsio.write(fn, img, **wa)
            print 'Wrote', fn

    del cons

    plt.clf()
    dimshow(get_rgb(rgbmod, bands))
    plt.title('Model')
    ps.savefig()

    plt.clf()
    dimshow(get_rgb(rgbmod2, bands))
    plt.title('Model + Noise')
    ps.savefig()

    plt.clf()
    dimshow(get_rgb(rgbresids, bands))
    plt.title('Residuals')
    ps.savefig()

    plt.clf()
    dimshow(get_rgb(rgbresids, bands, mnmx=(-30, 30)))
    plt.title('Residuals (2)')
    ps.savefig()

    plt.clf()
    dimshow(get_rgb(coimgs, bands, **arcsinha))
    plt.title('Image (stretched)')
    ps.savefig()

    plt.clf()
    dimshow(get_rgb(rgbmod2, bands, **arcsinha))
    plt.title('Model + Noise (stretched)')
    ps.savefig()

    del coimgs
    del rgbresids
    del rgbmod
    del rgbmod2

    plt.clf()
    g, r, z = rgbchisqs
    im = np.log10(np.dstack((z, r, g)))
    mn, mx = 0, im.max()
    dimshow(np.clip((im - mn) / (mx - mn), 0., 1.))
    plt.title('Chi-squared')
    ps.savefig()

    plt.clf()
    xx = np.repeat(chibins, 2)[1:-1]
    for y, cc in zip(chihist, 'grm'):
        plt.plot(xx, np.repeat(np.maximum(0.1, y), 2), '-', color=cc)
    plt.xlabel('Chi')
    plt.yticks([])
    plt.axvline(0., color='k', alpha=0.25)
    ps.savefig()

    plt.yscale('log')
    mx = np.max([max(y) for y in chihist])
    plt.ylim(1, mx * 1.05)
    ps.savefig()

    return dict(tims=tims)
Beispiel #40
0
        color='gold',
        label='JADE')
scatter(planilha['Pluto']['x'],
        planilha['Pluto']['y'],
        color='red',
        label='PLUTO')
scatter(planilha['Tasso']['x'],
        planilha['Tasso']['y'],
        color='lime',
        label='TASSO')
scatter(planilha['MarkJ']['x'],
        planilha['MarkJ']['y'],
        color='violet',
        label='MARK-J')

yscale('log')
grid(True, linestyle='--')
legend()
xlabel('$\sqrt{s}$ (GeV)', fontsize=20)
ylabel('$\sigma$ (pb)', fontsize=20)
show()

# Parte 2 --------------------------------------------------------------------------------

barber = read_csv('barber.csv')

y = (10**(-3)) * (4 * pi * hc2 * alpha2 / 3) * (x**(-2))

plot(x, y, color='navy', label='Previsão do Modelo')

errorbar(barber['x'],
Beispiel #41
0
# import libraries
import numpy, pylab
from pylab import *

# plot DOF convergence graph
pylab.yscale("log")
pylab.title("Error convergence")
pylab.xlabel("Degrees of freedom")
pylab.ylabel("Error [%]")
axis('equal')
data = numpy.loadtxt("conv_dof_exact.dat")
x = data[:, 0]
y = data[:, 1]
loglog(x, y, '-s', label="error (exact)")
data = numpy.loadtxt("conv_dof_est.dat")
x = data[:, 0]
y = data[:, 1]
loglog(x, y, '-s', label="error (est)")
legend()

# initialize new window
pylab.figure()

# plot CPU convergence graph
pylab.yscale("log")
pylab.title("Error convergence")
pylab.xlabel("CPU time (s)")
pylab.ylabel("Error [%]")
axis('equal')
data = numpy.loadtxt("conv_cpu_exact.dat")
x = data[:, 0]
Beispiel #42
0
# Plot best-fit result (radio)
P.subplot(121)
P.plot(L_radio, radio_lumfn(L_radio, pcur), 'r-', lw=1.8)
P.plot(L_radio, radio_lumfn(L_radio, p0), 'b-', lw=1.8)

# Symmetrise radio errors
logerr_radio = 0.5 * (np.abs(errp_radio) + np.abs(errm_radio))
err_radio = Phi_radio * (10.**logerr_radio - 1.)
P.errorbar(L_radio,
           Phi_radio,
           yerr=err_radio,
           color='k',
           marker='.',
           ls='none')
#P.errorbar(L_data, phi_data, yerr=[errp, np.abs(errm)], color='k', marker='.')
P.yscale('log')
P.xscale('log')

# Plot best-fit result (optical)
P.subplot(122)
P.plot(mag_gama, optical_lumfn(mag_gama, BAND, pcur), 'r-', lw=1.8)
P.plot(mag_gama, optical_lumfn(mag_gama, BAND, p0), 'b-', lw=1.8)
P.errorbar(mag_gama, Phi_gama, yerr=err_gama, color='k', marker='.', ls='none')
P.gca().invert_xaxis()
#P.errorbar(L_data, phi_data, yerr=[errp, np.abs(errm)], color='k', marker='.')
P.yscale('log')
#P.xscale('log')

P.tight_layout()
P.show()
Beispiel #43
0
def _data_plot(models, X, Y, **kwargs):
    with_legend = False
    use = [0, 0, 0]

    if isinstance(X, basestring): X = [X, None]
    if isinstance(Y, basestring): Y = [Y, None]

    x_prop, x_units = X
    y_prop, y_units = Y

    ret_list = []

    every = kwargs.pop('every', 1)
    upto = kwargs.pop('upto', len(models))
    mark_images = kwargs.pop('mark_images', True)
    hilite_model = kwargs.pop('hilite_model', None)
    hilite_color = kwargs.pop('hilite_color', 'm')
    yscale = kwargs.pop('yscale', 'log')
    xscale = kwargs.pop('xscale', 'linear')
    xlabel = kwargs.pop('xlabel', None)
    ylabel = kwargs.pop('ylabel', None)

    kwargs.setdefault('color', 'k')
    kwargs.setdefault('marker', '.')
    kwargs.setdefault('ls', '-')

    normal_kw = {'zorder': 0, 'drawstyle': 'steps', 'alpha': 1.0}
    hilite_kw = {
        'zorder': 1000,
        'drawstyle': 'steps',
        'alpha': 1.0,
        'lw': 4,
        'ls': '--'
    }
    accepted_kw = {'zorder': 500, 'drawstyle': 'steps', 'alpha': 0.5}

    normal = []
    hilite = []
    accepted = []
    #imgs = set()
    imgs = defaultdict(set)
    xmin, xmax = np.inf, -np.inf
    ymin, ymax = np.inf, -np.inf

    objplot = defaultdict(dict)
    for mi in xrange(0, upto, every):
        m = models[mi]

        si = m.get('accepted', 2)
        tag = ''
        if si == False: tag = 'rejected'
        if si == True: tag = 'accepted'

        for [obj, data] in m['obj,data']:

            try:
                xs = data[x_prop][x_units]
                ys = data[y_prop][y_units]

                xlabel = _axis_label(xs, x_units) if not xlabel else None
                ylabel = _axis_label(ys, y_units) if not ylabel else None

                objplot[obj].setdefault(tag, {'ys': [], 'xs': None})
                objplot[obj][tag]['ys'].append(ys)
                objplot[obj][tag]['xs'] = xs

                #objplot[obj].setdefault('%s:xs'%tag, xs)
                #objplot[obj].setdefault('%s:ymax'%tag, ys)
                #objplot[obj].setdefault('%s:ymin'%tag, ys)
                #objplot[obj].setdefault('%s:ysum'%tag, np.zeros_like(ys))
                #objplot[obj].setdefault('%s:count'%tag, 0)

                #objplot[obj]['%s:ymax'%tag]  = np.amax((objplot[obj]['%s:ymax'%tag], ys), axis=0)
                #objplot[obj]['%s:ymin'%tag]  = np.amin((objplot[obj]['%s:ymin'%tag], ys), axis=0)
                #objplot[obj]['%s:ysum'%tag] += ys
                #objplot[obj]['%s:count'%tag] += 1

                if mark_images:
                    for i, src in enumerate(obj.sources):
                        for img in src.images:
                            imgs[i].add(
                                convert('arcsec to %s' % x_units,
                                        np.abs(img.pos), obj.dL, data['nu']))

            except KeyError as bad_key:
                Log("Missing information for object %s with key %s. Skipping plot."
                    % (obj.name, bad_key))
                continue

            use[si] = 1

            s = _styles[si]

            #xmin, xmax = min(xmin, amin(data[X])), max(xmax, amax(data[X]))
            #ymin, ymax = min(ymin, amin(data[Y])), max(ymax, amax(data[Y]))

    for i, tag in enumerate(['rejected', 'accepted', '']):
        for k, v in objplot.iteritems():
            if tag not in v: break

            ys = np.array(v[tag]['ys'])
            xs = np.repeat(np.atleast_2d(v[tag]['xs']), len(ys), axis=0)

            ret_list.append([xs, ys])
            if tag == 'rejected':
                pl.plot(xs, ys, c=_styles[0]['c'], zorder=_styles[0]['z'])
            else:
                pl.plot(xs.T, ys.T, **kwargs)

#   return

    pl.yscale(yscale)
    pl.xscale(xscale)

    si = style_iterator()
    for k, v in imgs.iteritems():
        lw, ls, c = si.next()
        for img_pos in v:
            pl.axvline(img_pos, c=c, ls=ls, lw=lw, zorder=-2, alpha=0.5)


#   if use[0] or use[1]:
#       lines  = [s['line']  for s,u in zip(_styles, use) if u]
#       labels = [s['label'] for s,u in zip(_styles, use) if u]
#       pl.legend(lines, labels)

    if use[0]:
        lines = [_styles[0]['line']]
        labels = [_styles[0]['label']]
        pl.legend(lines, labels)

    #axis('scaled')
    if xlabel: pl.xlabel(xlabel)
    if ylabel: pl.ylabel(ylabel)
    pl.xlim(xmin=pl.xlim()[0] - 0.01 * (pl.xlim()[1] - pl.xlim()[0]))
    #pl.ylim(0, ymax)

    return ret_list
########################################################################################################################

cancer = load_breast_cancer()

X_train, X_test, y_train, y_test = train_test_split(cancer.data,
                                                    cancer.target,
                                                    random_state=0)

svc = SVC()
svc.fit(X_train, y_train)

print("Accuracy on training set: {:.2f}".format(svc.score(X_train, y_train)))
print("Accuracy on test set: {:.2f}".format(svc.score(X_test, y_test)))

plt.boxplot(X_train, manage_xticks=False)
plt.yscale("symlog")
plt.xlabel("Feature index")
plt.ylabel("Feature magnitude")

# Compute the minimum value per feature on the training set
min_on_training = X_train.min(axis=0)
# Compute the range of each feature (max - min) on the training set
range_on_training = (X_train - min_on_training).max(axis=0)

# subtract the min, divide by range
# afterward, min=0 and max=1 for each feature
X_train_scaled = (X_train - min_on_training) / range_on_training
print("Minimum for each feature\n{}".format(X_train_scaled.min(axis=0)))
print("Maximum for each feature\n {}".format(X_train_scaled.max(axis=0)))

# use THE SAME transformation on the test set,
Beispiel #45
0
def tikhonov_result_out(W, lc, M, folder_name):

    if not os.path.isdir(folder_name):
        os.mkdir(folder_name)
    mprior = np.zeros(M)  #prior
    lamb = 0.1  # regularization parameter
    start = time.time()
    U, S, VT = np.linalg.svd(W)
    elapsed_time = time.time() - start
    logger.info("elapsed_time (SVD):%f [sec]", elapsed_time)
    nlcurve = 30
    lmin = 0.1
    lmax = 100.0
    lamseq = np.logspace(np.log10(lmin), np.log10(lmax), num=nlcurve)

    modelnormseq = []
    residualseq = []
    curveseq = []

    for lamb in lamseq:
        mest, dpre, residual, modelnorm, curv_lcurve = tikhonov_regularization(
            W, lc, mprior, U, VT, S, lamb)
        modelnormseq.append(modelnorm)
        residualseq.append(residual)
        curveseq.append(curv_lcurve)
        logger.debug("lambda:%f, curv_lcurve: %f", lamb, curv_lcurve)
    residualseq = np.array(residualseq)
    modelnormseq = np.array(modelnormseq)
    imax = np.argmax(curveseq)
    lamb = lamseq[imax]
    logger.info("Best lambda:%f", lamb)

    #define small and large lambda cases
    ismall = imax - 7

    if ismall < 0:
        ismall = 0

    ilarge = imax + 9

    if ilarge > len(residualseq) - 1:
        ilarge = len(residualseq) - 1

    #plot a L-curve
    fig = plt.figure(figsize=(10, 3))
    ax = fig.add_subplot(121)
    pylab.xscale('log')
    pylab.yscale('log')
    pylab.ylabel("Norm of Model", fontsize=12)
    pylab.xlabel("Norm of Prediction Error", fontsize=12)
    ax.plot(residualseq, modelnormseq, marker=".", c="gray")
    ax.plot([residualseq[imax]], [modelnormseq[imax]], marker="o", c="green")
    ax.plot([residualseq[ismall]], [modelnormseq[ismall]], marker="s", c="red")
    ax.plot([residualseq[ilarge]], [modelnormseq[ilarge]],
            marker="^",
            c="blue")
    plt.tick_params(labelsize=12)
    ax2 = fig.add_subplot(122)
    pylab.xscale('log')
    pylab.ylabel("Curvature", fontsize=12)
    pylab.xlabel("Norm of Prediction Error", fontsize=12)
    ax2.plot(residualseq, curveseq, marker=".", c="gray")
    ax2.plot([residualseq[imax]], [curveseq[imax]], marker="o", c="green")
    plt.tick_params(labelsize=12)
    plt.savefig(folder_name + "others/lcurve.pdf",
                bbox_inches="tight",
                pad_inches=0.0)
    plt.close()

    #getting maps!
    mest_lbest, dpre, residual, modelnorm, curv_lcurve = tikhonov_regularization(
        W, lc, mprior, U, VT, S, lamb)
    mest_ls, dpre, residual, modelnorm, curv_lcurve = tikhonov_regularization(
        W, lc, mprior, U, VT, S, lamseq[ismall])
    mest_ll, dpre, residual, modelnorm, curv_lcurve = tikhonov_regularization(
        W, lc, mprior, U, VT, S, lamseq[ilarge])

    dpi = 200
    #best map
    output = io.StringIO()
    sys.stdout = output

    hp.mollview(mest_lbest,
                title="",
                flip="geo",
                cmap=plt.cm.bone,
                min=0,
                max=1.0)
    hp.graticule(color="white")
    plt.savefig(folder_name + "others/best_kw.pdf",
                dpi=dpi,
                bbox_inches="tight")
    plt.close()
    logger.debug("Fit finished")

    np.save(folder_name + "model/best_tik", mest_lbest)

    # too small lambda
    hp.mollview(mest_ls, title="", flip="geo", cmap=plt.cm.bone)
    hp.graticule(color="white")
    plt.savefig(folder_name + "others/small_lam_kw.pdf",
                dpi=dpi,
                bbox_inches="tight")
    plt.close()
    np.save(folder_name + "model/too_small_tik", mest_ls)

    # too large lambda
    hp.mollview(mest_ll, title="", flip="geo", cmap=plt.cm.bone)
    hp.graticule(color="white")
    plt.savefig(folder_name + "others/large_lam_kw.pdf",
                dpi=dpi,
                bbox_inches="tight")
    plt.close()
    np.save(folder_name + "model/too_large_tik", mest_ll)

    sys.stdout = sys.__stdout__
Beispiel #46
0
def _data_error_plot(models, X, Y, **kwargs):
    with_legend = False
    use = [0, 0, 0]

    if isinstance(X, basestring): X = [X, None]
    if isinstance(Y, basestring): Y = [Y, None]

    x_prop, x_units = X
    y_prop, y_units = Y

    ret_list = []

    every = kwargs.pop('every', 1)
    upto = kwargs.pop('upto', len(models))
    mark_images = kwargs.pop('mark_images', True)
    hilite_model = kwargs.pop('hilite_model', None)
    hilite_color = kwargs.pop('hilite_color', 'm')
    yscale = kwargs.pop('yscale', 'log')
    xscale = kwargs.pop('xscale', 'linear')
    xlabel = kwargs.pop('xlabel', None)
    ylabel = kwargs.pop('ylabel', None)
    sigma = kwargs.pop('sigma', '1sigma')

    kwargs.setdefault('color', 'k')
    kwargs.setdefault('marker', '.')
    kwargs.setdefault('ls', '-')

    normal_kw = {'zorder': 0, 'drawstyle': 'steps', 'alpha': 1.0}
    hilite_kw = {
        'zorder': 1000,
        'drawstyle': 'steps',
        'alpha': 1.0,
        'lw': 4,
        'ls': '--'
    }
    accepted_kw = {'zorder': 500, 'drawstyle': 'steps', 'alpha': 0.5}

    normal = []
    hilite = []
    accepted = []
    #imgs = set()
    imgs = defaultdict(set)
    xmin, xmax = np.inf, -np.inf
    ymin, ymax = np.inf, -np.inf

    objplot = defaultdict(dict)
    for mi in xrange(0, upto, every):
        m = models[mi]

        si = m.get('accepted', 2)
        #print si
        tag = ''
        if si == False: tag = 'rejected'
        if si == True: tag = 'accepted'

        for [obj, data] in m['obj,data']:

            try:
                xs = data[x_prop][x_units]
                ys = data[y_prop][y_units]

                xlabel = _axis_label(xs, x_units) if not xlabel else xlabel
                ylabel = _axis_label(ys, y_units) if not ylabel else ylabel

                objplot[obj].setdefault(tag, {'ys': [], 'xs': None})
                objplot[obj][tag]['ys'].append(ys)
                objplot[obj][tag]['xs'] = xs

                #objplot[obj].setdefault('%s:xs'%tag, xs)
                #objplot[obj].setdefault('%s:ymax'%tag, ys)
                #objplot[obj].setdefault('%s:ymin'%tag, ys)
                #objplot[obj].setdefault('%s:ysum'%tag, np.zeros_like(ys))
                #objplot[obj].setdefault('%s:count'%tag, 0)

                #objplot[obj]['%s:ymax'%tag]  = np.amax((objplot[obj]['%s:ymax'%tag], ys), axis=0)
                #objplot[obj]['%s:ymin'%tag]  = np.amin((objplot[obj]['%s:ymin'%tag], ys), axis=0)
                #objplot[obj]['%s:ysum'%tag] += ys
                #objplot[obj]['%s:count'%tag] += 1

                if mark_images:
                    for i, src in enumerate(obj.sources):
                        for img in src.images:
                            imgs[i].add(
                                convert('arcsec to %s' % x_units,
                                        np.abs(img.pos), obj.dL, data['nu']))

            except KeyError as bad_key:
                Log("Missing information for object %s with key %s. Skipping plot."
                    % (obj.name, bad_key))
                continue

            use[si] = 1

            s = _styles[si]

            #xmin, xmax = min(xmin, amin(data[X])), max(xmax, amax(data[X]))
            #ymin, ymax = min(ymin, amin(data[Y])), max(ymax, amax(data[Y]))

    for i, tag in enumerate(['rejected', 'accepted', '']):
        for k, v in objplot.iteritems():
            if tag not in v: break
            #if not v.has_key('%s:count'%tag): break

            avg, errp, errm = dist_range(v[tag]['ys'], sigma=sigma)
            errp = errp - avg
            errm = avg - errm
            #s = np.sort(v[tag]['ys'], axis=0)
            #avg = s[len(s)//2] if len(s)%2==1 else (s[len(s)//2] + s[len(s)//2+1])/2
            #print s
            #avg = np.median(v[tag]['ys'], axis=0)
            #print avg
            #print np.median(v[tag]['ys'], axis=1)
            #errp = s[len(s) * .841] - avg
            #errm = avg - s[len(s) * .159]

            #errp = np.amax(v[tag]['ys'], axis=0) - avg
            #errm = avg - np.amin(v[tag]['ys'], axis=0)
            #errp = errm = np.std(v[tag]['ys'], axis=0, dtype=np.float64)
            xs = v[tag]['xs']

            #           print [x[1] for x in v[tag]['ys']]
            #           pl.hist([x[1] for x in v[tag]['ys']])
            #           break

            #avg = v['%s:ysum'%tag] / v['%s:count'%tag]
            #errp = v['%s:ymax'%tag]-avg
            #errm = avg-v['%s:ymin'%tag]
            #errm = errp = np.std(

            #print len(v['xs'])
            #print len(avg)
            #assert 0
            #print len(xs)
            #print len(avg)

            ret_list.append([xs, avg, errm, errp])
            yerr = (errm, errp) if not np.all(errm == errp) else None
            if tag == 'rejected':
                pl.errorbar(xs,
                            avg,
                            yerr=yerr,
                            c=_styles[0]['c'],
                            zorder=_styles[0]['z'])
            else:
                pl.errorbar(xs, avg, yerr=yerr, **kwargs)

#   return

    pl.xscale(xscale)
    pl.yscale(yscale)

    si = style_iterator()
    for k, v in imgs.iteritems():
        lw, ls, c = si.next()
        for img_pos in v:
            pl.axvline(img_pos, c=c, ls=ls, lw=lw, zorder=-2, alpha=0.5)


#   if use[0] or use[1]:
#       lines  = [s['line']  for s,u in zip(_styles, use) if u]
#       labels = [s['label'] for s,u in zip(_styles, use) if u]
#       pl.legend(lines, labels)

    if use[0]:
        lines = [_styles[0]['line']]
        labels = [_styles[0]['label']]
        pl.legend(lines, labels)

    #axis('scaled')
    if xlabel: pl.xlabel(xlabel)
    if ylabel: pl.ylabel(ylabel)
    pl.xlim(xmin=pl.xlim()[0] - 0.01 * (pl.xlim()[1] - pl.xlim()[0]))
    #pl.ylim(0, ymax)

    return ret_list
Beispiel #47
0
def apphot_ps1stars(ccd,
                    ps,
                    apertures,
                    survey,
                    sky_inner_r=40,
                    sky_outer_r=50):
    im = survey.get_image_object(ccd)

    tim = im.get_tractor_image(gaussPsf=True, splinesky=True)
    img = tim.getImage()

    wcs = tim.subwcs

    magrange = (15, 21)
    ps1 = ps1cat(ccdwcs=wcs)
    ps1 = ps1.get_stars(magrange=magrange)
    print 'Got', len(ps1), 'PS1 stars'
    band = ccd.filter
    piband = ps1cat.ps1band[band]
    print 'band:', band

    ps1.cut(ps1.nmag_ok[:, piband] > 0)
    print 'Keeping', len(ps1), 'stars with nmag_ok'

    ok, x, y = wcs.radec2pixelxy(ps1.ra, ps1.dec)
    apxy = np.vstack((x - 1., y - 1.)).T

    ap = []
    aperr = []
    nmasked = []
    with np.errstate(divide='ignore'):
        ie = tim.getInvError()
        imsigma = 1. / ie
        imsigma[ie == 0] = 0
    mask = (imsigma == 0)
    for rad in apertures:
        aper = photutils.CircularAperture(apxy, rad)
        p = photutils.aperture_photometry(img, aper, error=imsigma, mask=mask)
        aperr.append(p.field('aperture_sum_err'))
        ap.append(p.field('aperture_sum'))
        p = photutils.aperture_photometry((ie == 0), aper)
        nmasked.append(p.field('aperture_sum'))
    ap = np.vstack(ap).T
    aperr = np.vstack(aperr).T
    nmasked = np.vstack(nmasked).T

    print 'Aperture fluxes:', ap[:5]
    print 'Aperture flux errors:', aperr[:5]
    print 'Nmasked:', nmasked[:5]

    H, W = img.shape
    sky = []
    skysigma = []
    skymed = []
    skynmasked = []
    for xi, yi in zip(x, y):
        ix = int(np.round(xi))
        iy = int(np.round(yi))
        skyR = sky_outer_r
        xlo = max(0, ix - skyR)
        xhi = min(W, ix + skyR + 1)
        ylo = max(0, iy - skyR)
        yhi = min(H, iy + skyR + 1)
        xx, yy = np.meshgrid(np.arange(xlo, xhi), np.arange(ylo, yhi))
        r2 = (xx - xi)**2 + (yy - yi)**2
        inannulus = ((r2 >= sky_inner_r**2) * (r2 < sky_outer_r**2))
        unmasked = (ie[ylo:yhi, xlo:xhi] > 0)

        #sky.append(np.median(img[ylo:yhi, xlo:xhi][inannulus * unmasked]))

        skypix = img[ylo:yhi, xlo:xhi][inannulus * unmasked]
        # this is the default value...
        nsigma = 4.
        goodpix, lo, hi = sigmaclip(skypix, low=nsigma, high=nsigma)
        # sigmaclip returns unclipped pixels, lo,hi, where lo,hi are
        # mean(goodpix) +- nsigma * sigma
        meansky = np.mean(goodpix)
        sky.append(meansky)
        skysigma.append((meansky - lo) / nsigma)
        skymed.append(np.median(skypix))
        skynmasked.append(np.sum(inannulus * np.logical_not(unmasked)))
    sky = np.array(sky)
    skysigma = np.array(skysigma)
    skymed = np.array(skymed)
    skynmasked = np.array(skynmasked)

    print 'sky', sky[:5]
    print 'median sky', skymed[:5]
    print 'sky sigma', skysigma[:5]

    psmag = ps1.median[:, piband]

    ap2 = ap - sky[:, np.newaxis] * (np.pi * apertures**2)[np.newaxis, :]

    if ps is not None:
        plt.clf()
        nstars, naps = ap.shape
        for iap in range(naps):
            plt.plot(psmag, ap[:, iap], 'b.')
        #for iap in range(naps):
        #    plt.plot(psmag, ap2[:,iap], 'r.')
        plt.yscale('symlog')
        plt.xlabel('PS1 %s mag' % band)
        plt.ylabel('DECam Aperture Flux')

        #plt.plot(psmag, nmasked[:,-1], 'ro')
        plt.plot(np.vstack((psmag, psmag)),
                 np.vstack((np.zeros_like(psmag), nmasked[:, -1])),
                 'r-',
                 alpha=0.5)
        plt.ylim(0, 1e3)
        ps.savefig()

        plt.clf()
        plt.plot(ap.T / np.max(ap, axis=1), '.')
        plt.ylim(0, 1)
        ps.savefig()

        plt.clf()
        dimshow(tim.getImage(), **tim.ima)
        ax = plt.axis()
        plt.plot(x, y, 'o', mec='r', mfc='none', ms=10)
        plt.axis(ax)
        ps.savefig()

    color = ps1_to_decam(ps1.median, band)
    print 'Color terms:', color

    T = fits_table()
    T.apflux = ap.astype(np.float32)
    T.apfluxerr = aperr.astype(np.float32)
    T.apnmasked = nmasked.astype(np.int16)

    # Zero out the errors when pixels are masked
    T.apfluxerr[T.apnmasked > 0] = 0.

    #T.apflux2 = ap2.astype(np.float32)
    T.sky = sky.astype(np.float32)
    T.skysigma = skysigma.astype(np.float32)
    T.expnum = np.array([ccd.expnum] * len(T))
    T.ccdname = np.array([ccd.ccdname] * len(T)).astype('S3')
    T.band = np.array([band] * len(T))
    T.ps1_objid = ps1.obj_id
    T.ps1_mag = psmag + color
    T.ra = ps1.ra
    T.dec = ps1.dec
    T.tai = np.array([tim.time.toMjd()] * len(T)).astype(np.float32)
    T.airmass = np.array([tim.primhdr['AIRMASS']] * len(T)).astype(np.float32)
    T.x = (x + tim.x0).astype(np.float32)
    T.y = (y + tim.y0).astype(np.float32)

    if False:
        plt.clf()
        plt.plot(skymed, sky, 'b.')
        plt.xlabel('sky median')
        plt.ylabel('sigma-clipped sky')
        ax = plt.axis()
        lo, hi = min(ax), max(ax)
        plt.plot([lo, hi], [lo, hi], 'k-', alpha=0.25)
        plt.axis(ax)
        ps.savefig()

    return T, tim.primhdr
Beispiel #48
0
plt.figure(figsize=(10, 10))

lw = 0.5
plt.clf()
plt.plot(f2, rD, linewidth=lw)
plt.title("Signal")
plt.xlabel("Frequency [MHz]")
plt.ylabel("Temperature [K]")
plt.savefig("signal.png")

plt.clf()
plt.plot(f2, np.abs(np.fft.fftshift(np.fft.fft(rD))), linewidth=lw)
plt.title("Signal FFT")
plt.xlabel("Frequency [MHz]")
plt.ylabel("Temperature [K]")
plt.yscale("log")
plt.savefig("signal_fft.png")

plt.clf()
plt.plot(f2, filter(rD), linewidth=lw)
plt.title("Filtered Signal")
plt.xlabel("Frequency [MHz]")
plt.ylabel("Temperature [K]")
plt.savefig("filtered_low.png")

plt.clf()
plt.plot(f2, np.abs(np.fft.fftshift(np.fft.fft(filter(rD)))), linewidth=lw)
plt.title("Filtered Signal FFT")
plt.xlabel("Frequency [MHz]")
plt.ylabel("Temperature [K]")
plt.yscale("log")
    def GasMassFunction(self, G):

        print('Plotting the cold gas mass function')

        plt.figure()  # New figure
        ax = plt.subplot(111)  # 1 plot on the figure

        binwidth = 0.1  # mass function histogram bin width

        # calculate all
        w = np.where(G.ColdGas > 0.0)[0]
        mass = np.log10(G.ColdGas[w] * 1.0e10 / self.Hubble_h)
        sSFR = (G.SfrDisk[w] + G.SfrBulge[w]) / (G.StellarMass[w] * 1.0e10 /
                                                 self.Hubble_h)
        mi = np.floor(min(mass)) - 2
        ma = np.floor(max(mass)) + 2
        NB = (ma - mi) / binwidth

        (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB)

        # Set the x-axis values to be the centre of the bins
        xaxeshisto = binedges[:-1] + 0.5 * binwidth

        # additionally calculate red
        w = np.where(sSFR < 10.0**sSFRcut)[0]
        massRED = mass[w]
        (countsRED, binedges) = np.histogram(massRED, range=(mi, ma), bins=NB)

        # additionally calculate blue
        w = np.where(sSFR > 10.0**sSFRcut)[0]
        massBLU = mass[w]
        (countsBLU, binedges) = np.histogram(massBLU, range=(mi, ma), bins=NB)

        # Baldry+ 2008 modified data used for the MCMC fitting
        Zwaan = np.array([[6.933, -0.333], [7.057, -0.490], [7.209, -0.698],
                          [7.365, -0.667], [7.528, -0.823], [7.647, -0.958],
                          [7.809, -0.917], [7.971, -0.948], [8.112, -0.927],
                          [8.263, -0.917], [8.404, -1.062], [8.566, -1.177],
                          [8.707, -1.177], [8.853, -1.312], [9.010, -1.344],
                          [9.161, -1.448], [9.302, -1.604], [9.448, -1.792],
                          [9.599, -2.021], [9.740, -2.406], [9.897, -2.615],
                          [10.053, -3.031], [10.178, -3.677], [10.335, -4.448],
                          [10.492, -5.083]],
                         dtype=np.float32)

        ObrRaw = np.array([[7.300, -1.104], [7.576, -1.302], [7.847, -1.250],
                           [8.133, -1.240], [8.409, -1.344], [8.691, -1.479],
                           [8.956, -1.792], [9.231, -2.271], [9.507, -3.198],
                           [9.788, -5.062]],
                          dtype=np.float32)

        ObrCold = np.array([[8.009, -1.042], [8.215, -1.156], [8.409, -0.990],
                            [8.604, -1.156], [8.799, -1.208], [9.020, -1.333],
                            [9.194, -1.385], [9.404, -1.552], [9.599, -1.677],
                            [9.788, -1.812], [9.999, -2.312], [10.172, -2.656],
                            [10.362, -3.500], [10.551, -3.635],
                            [10.740, -5.010]],
                           dtype=np.float32)

        ObrCold_xval = np.log10(10**(ObrCold[:, 0]) / self.Hubble_h /
                                self.Hubble_h)
        ObrCold_yval = (10**(ObrCold[:, 1]) * self.Hubble_h * self.Hubble_h *
                        self.Hubble_h)
        Zwaan_xval = np.log10(10**(Zwaan[:, 0]) / self.Hubble_h /
                              self.Hubble_h)
        Zwaan_yval = (10**(Zwaan[:, 1]) * self.Hubble_h * self.Hubble_h *
                      self.Hubble_h)
        ObrRaw_xval = np.log10(10**(ObrRaw[:, 0]) / self.Hubble_h /
                               self.Hubble_h)
        ObrRaw_yval = (10**(ObrRaw[:, 1]) * self.Hubble_h * self.Hubble_h *
                       self.Hubble_h)

        plt.plot(ObrCold_xval,
                 ObrCold_yval,
                 color='black',
                 lw=7,
                 alpha=0.25,
                 label='Obr. \& Raw. 2009 (Cold Gas)')
        plt.plot(Zwaan_xval,
                 Zwaan_yval,
                 color='cyan',
                 lw=7,
                 alpha=0.25,
                 label='Zwaan et al. 2005 (HI)')
        plt.plot(ObrRaw_xval,
                 ObrRaw_yval,
                 color='magenta',
                 lw=7,
                 alpha=0.25,
                 label='Obr. \& Raw. 2009 (H2)')

        # Overplot the model histograms
        plt.plot(xaxeshisto,
                 counts / self.volume * self.Hubble_h * self.Hubble_h *
                 self.Hubble_h / binwidth,
                 'k-',
                 label='Model - Cold Gas')

        plt.yscale('log', nonposy='clip')
        plt.axis([8.0, 11.5, 1.0e-6, 1.0e-1])

        # Set the x-axis minor ticks
        ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1))

        plt.ylabel(
            r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$')  # Set the y...
        plt.xlabel(r'$\log_{10} M_{\mathrm{X}}\ (M_{\odot})$'
                   )  # and the x-axis labels

        leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1)
        leg.draw_frame(False)  # Don't want a box frame
        for t in leg.get_texts():  # Reduce the size of the text
            t.set_fontsize('medium')

        outputFile = OutputDir + '3.GasMassFunction' + OutputFormat
        plt.savefig(outputFile)  # Save the figure
        print('Saved file to', outputFile)
        plt.close()

        # Add this plot to our output list
        OutputList.append(outputFile)
Beispiel #50
0
def plot_AM_correlation(mcmodel,
                        startvariances=None,
                        variables=None,
                        trim=0,
                        thin=1,
                        plotcov=True,
                        plotcorrelation=True,
                        plotvalues=True,
                        plotdeviance=False):
    """Plot correlation or covariance from AdaptativeMetropolis traces.

    mcmodel -- a pymc MCMC object with a db containing an
    AdaptativeMetropolis trace.

    """
    oldnumpyerrsettings = numpy.seterr(invalid='ignore')
    cname = None
    for key in mcmodel.db.trace_names[-1]:
        if key.startswith('AdaptiveMetropolis'):
            cname = key
    #print cname
    if cname is None:
        print "Could not find an AdaptiveMetropolis trace."
        return
    Ctrace = mcmodel.db.trace(cname)[trim::thin]
    indices = numpy.arange(trim, len(mcmodel.db.trace(cname)[:]), thin)

    ### Figure out order of stochastics. ###
    positions = []
    stochlist = list(mcmodel.stochastics.copy())
    icount = 0
    olength = len(stochlist)
    cname = cname.replace('AdaptiveMetropolis', '')
    while len(stochlist) > 0:
        icount += 1
        stoch = stochlist.pop()
        print stoch
        if cname.count(stoch.__name__) == 0:
            print "Couldn't find %s in %s" % (stoch.__name__, cname)
            continue
        positions.append([stoch, cname.find('_' + stoch.__name__)])

    # Sort list by position in cname string.
    positions.sort(key=lambda l: l[1])
    stochlist = [l[0] for l in positions]
    names = [s.__name__ for s in stochlist]
    title = " ".join(names)
    print title
    covlist = []
    fig1 = pylab.figure()
    fig1.subplots_adjust(right=0.7)
    fig1.set_label('AMcorrelations')
    ax1 = pylab.gca()
    divider = make_axes_locatable(ax1)
    if plotvalues:
        ax2 = divider.append_axes("bottom", 1.5, pad=0.0, sharex=ax1)
        fig1.sca(ax1)
    if plotdeviance:
        ax3 = divider.append_axes("top", 1.5, pad=0.0, sharex=ax1)
        fig1.sca(ax1)

    pylab.title(cname)
    plottedinds = set([])
    inds = set(range(Ctrace.shape[1]))
    colors = ['r', 'g', 'b', 'c', 'm', 'k', 'y']
    for (i, stoch) in enumerate(stochlist):
        if variables is not None:
            if stoch not in variables:
                continue
        plottedinds.add(i)
        if plotvalues:
            ax2.plot(indices, mcmodel.db.trace(stoch.__name__)[trim::thin])
        if plotdeviance:
            ax3.plot(indices, mcmodel.db.trace('deviance')[trim::thin])
        if not plotcorrelation:
            lines = pylab.plot(indices,
                               Ctrace[:, i, i]**0.5,
                               alpha='0.5',
                               lw=3.0,
                               label=names[i] + " stdev",
                               color=colors[i])
            if startvariances is not None:
                pylab.axhline(y=startvariances[stoch]**0.5,
                              ls='-',
                              c=lines[0]._color,
                              lw=1.5)
        else:
            lines = pylab.plot(indices,
                               (Ctrace[:, i, i] / Ctrace[-1, i, i])**0.5,
                               alpha='0.5',
                               lw=3.0,
                               label=names[i] + " stdev/stdev_final",
                               color=colors[i])
            if startvariances is not None:
                pylab.axhline(y=(startvariances[stoch] /
                                 Ctrace[-1, i, i])**0.5,
                              ls='-',
                              c=colors[i],
                              lw=1.5)

        if not plotcov:
            continue
        for j in inds.difference(plottedinds):
            if plotcorrelation:
                cov = Ctrace[:, i,
                             j] / (Ctrace[:, i, i]**0.5 * Ctrace[:, j, j]**0.5)
                mag = abs(cov)
            else:
                cov = Ctrace[:, i, j]
                mag = abs(cov)**0.5
            sign = (cov > 0) * 1 + (cov <= 0) * -1
            covlist.append([names[i] + ' * ' + names[j], mag[-1], sign[-1]])
            pylab.plot(indices,
                       sign * mag,
                       alpha='0.9',
                       lw=3.0,
                       c=colors[i],
                       ls='--')
            pylab.plot(indices,
                       -sign * mag,
                       alpha='0.9',
                       lw=3.0,
                       c=colors[i],
                       ls=':')
            pylab.plot(indices,
                       mag,
                       alpha='0.9',
                       lw=1.0,
                       color=colors[j],
                       ls='-',
                       label=names[i] + ' * ' + names[j])

    covlist.sort(key=lambda l: l[1], reverse=True)
    for l in covlist:
        print "%50s: %.3g" % (l[0], l[1] * l[2])
        #pylab.legend(loc='upper left', bbox_to_anchor=(1.00,1.0,0.25,-1.0))
        pylab.legend(loc=(1.0, 0.0))
    if plotcorrelation:
        pylab.ylim(ymin=0)
    else:
        pylab.yscale('log')
    pylab.draw()
    numpy.seterr(**oldnumpyerrsettings)
    def StellarMassFunction(self, G):

        print('Plotting the stellar mass function')

        plt.figure()  # New figure
        ax = plt.subplot(111)  # 1 plot on the figure

        binwidth = 0.1  # mass function histogram bin width

        # calculate all
        w = np.where(G.StellarMass > 0.0)[0]
        mass = np.log10(G.StellarMass[w] * 1.0e10 / self.Hubble_h)
        sSFR = (G.SfrDisk[w] + G.SfrBulge[w]) / (G.StellarMass[w] * 1.0e10 /
                                                 self.Hubble_h)

        mi = np.floor(min(mass)) - 2
        ma = np.floor(max(mass)) + 2
        NB = (ma - mi) / binwidth

        (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB)

        # Set the x-axis values to be the centre of the bins
        xaxeshisto = binedges[:-1] + 0.5 * binwidth

        # additionally calculate red
        w = np.where(sSFR < 10.0**sSFRcut)[0]
        massRED = mass[w]
        (countsRED, binedges) = np.histogram(massRED, range=(mi, ma), bins=NB)

        # additionally calculate blue
        w = np.where(sSFR > 10.0**sSFRcut)[0]
        massBLU = mass[w]
        (countsBLU, binedges) = np.histogram(massBLU, range=(mi, ma), bins=NB)

        # Baldry+ 2008 modified data used for the MCMC fitting
        Baldry = np.array([
            [7.05, 1.3531e-01, 6.0741e-02],
            [7.15, 1.3474e-01, 6.0109e-02],
            [7.25, 2.0971e-01, 7.7965e-02],
            [7.35, 1.7161e-01, 3.1841e-02],
            [7.45, 2.1648e-01, 5.7832e-02],
            [7.55, 2.1645e-01, 3.9988e-02],
            [7.65, 2.0837e-01, 4.8713e-02],
            [7.75, 2.0402e-01, 7.0061e-02],
            [7.85, 1.5536e-01, 3.9182e-02],
            [7.95, 1.5232e-01, 2.6824e-02],
            [8.05, 1.5067e-01, 4.8824e-02],
            [8.15, 1.3032e-01, 2.1892e-02],
            [8.25, 1.2545e-01, 3.5526e-02],
            [8.35, 9.8472e-02, 2.7181e-02],
            [8.45, 8.7194e-02, 2.8345e-02],
            [8.55, 7.0758e-02, 2.0808e-02],
            [8.65, 5.8190e-02, 1.3359e-02],
            [8.75, 5.6057e-02, 1.3512e-02],
            [8.85, 5.1380e-02, 1.2815e-02],
            [8.95, 4.4206e-02, 9.6866e-03],
            [9.05, 4.1149e-02, 1.0169e-02],
            [9.15, 3.4959e-02, 6.7898e-03],
            [9.25, 3.3111e-02, 8.3704e-03],
            [9.35, 3.0138e-02, 4.7741e-03],
            [9.45, 2.6692e-02, 5.5029e-03],
            [9.55, 2.4656e-02, 4.4359e-03],
            [9.65, 2.2885e-02, 3.7915e-03],
            [9.75, 2.1849e-02, 3.9812e-03],
            [9.85, 2.0383e-02, 3.2930e-03],
            [9.95, 1.9929e-02, 2.9370e-03],
            [10.05, 1.8865e-02, 2.4624e-03],
            [10.15, 1.8136e-02, 2.5208e-03],
            [10.25, 1.7657e-02, 2.4217e-03],
            [10.35, 1.6616e-02, 2.2784e-03],
            [10.45, 1.6114e-02, 2.1783e-03],
            [10.55, 1.4366e-02, 1.8819e-03],
            [10.65, 1.2588e-02, 1.8249e-03],
            [10.75, 1.1372e-02, 1.4436e-03],
            [10.85, 9.1213e-03, 1.5816e-03],
            [10.95, 6.1125e-03, 9.6735e-04],
            [11.05, 4.3923e-03, 9.6254e-04],
            [11.15, 2.5463e-03, 5.0038e-04],
            [11.25, 1.4298e-03, 4.2816e-04],
            [11.35, 6.4867e-04, 1.6439e-04],
            [11.45, 2.8294e-04, 9.9799e-05],
            [11.55, 1.0617e-04, 4.9085e-05],
            [11.65, 3.2702e-05, 2.4546e-05],
            [11.75, 1.2571e-05, 1.2571e-05],
            [11.85, 8.4589e-06, 8.4589e-06],
            [11.95, 7.4764e-06, 7.4764e-06],
        ],
                          dtype=np.float32)

        # Finally plot the data
        # plt.errorbar(
        #     Baldry[:, 0],
        #     Baldry[:, 1],
        #     yerr=Baldry[:, 2],
        #     color='g',
        #     linestyle=':',
        #     lw = 1.5,
        #     label='Baldry et al. 2008',
        #     )

        Baldry_xval = np.log10(10**Baldry[:, 0] / self.Hubble_h /
                               self.Hubble_h)
        if (whichimf == 1):
            Baldry_xval = Baldry_xval - 0.26  # convert back to Chabrier IMF
        Baldry_yvalU = (Baldry[:, 1] + Baldry[:, 2]
                        ) * self.Hubble_h * self.Hubble_h * self.Hubble_h
        Baldry_yvalL = (Baldry[:, 1] - Baldry[:, 2]
                        ) * self.Hubble_h * self.Hubble_h * self.Hubble_h

        plt.fill_between(Baldry_xval,
                         Baldry_yvalU,
                         Baldry_yvalL,
                         facecolor='purple',
                         alpha=0.25,
                         label='Baldry et al. 2008 (z=0.1)')

        # This next line is just to get the shaded region to appear correctly in the legend
        plt.plot(xaxeshisto,
                 counts / self.volume * self.Hubble_h * self.Hubble_h *
                 self.Hubble_h / binwidth,
                 label='Baldry et al. 2008',
                 color='purple',
                 alpha=0.3)

        # # Cole et al. 2001 SMF (h=1.0 converted to h=0.73)
        # M = np.arange(7.0, 13.0, 0.01)
        # Mstar = np.log10(7.07*1.0e10 /self.Hubble_h/self.Hubble_h)
        # alpha = -1.18
        # phistar = 0.009 *self.Hubble_h*self.Hubble_h*self.Hubble_h
        # xval = 10.0 ** (M-Mstar)
        # yval = np.log(10.) * phistar * xval ** (alpha+1) * np.exp(-xval)
        # plt.plot(M, yval, 'g--', lw=1.5, label='Cole et al. 2001')  # Plot the SMF

        # Overplot the model histograms
        plt.plot(xaxeshisto,
                 counts / self.volume * self.Hubble_h * self.Hubble_h *
                 self.Hubble_h / binwidth,
                 'k-',
                 label='Model - All')
        plt.plot(xaxeshisto,
                 countsRED / self.volume * self.Hubble_h * self.Hubble_h *
                 self.Hubble_h / binwidth,
                 'r:',
                 lw=2,
                 label='Model - Red')
        plt.plot(xaxeshisto,
                 countsBLU / self.volume * self.Hubble_h * self.Hubble_h *
                 self.Hubble_h / binwidth,
                 'b:',
                 lw=2,
                 label='Model - Blue')

        plt.yscale('log', nonposy='clip')
        plt.axis([8.0, 12.5, 1.0e-6, 1.0e-1])

        # Set the x-axis minor ticks
        ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1))

        plt.ylabel(
            r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$')  # Set the y...
        plt.xlabel(r'$\log_{10} M_{\mathrm{stars}}\ (M_{\odot})$'
                   )  # and the x-axis labels

        plt.text(12.2, 0.03, whichsimulation, size='large')

        leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1)
        leg.draw_frame(False)  # Don't want a box frame
        for t in leg.get_texts():  # Reduce the size of the text
            t.set_fontsize('medium')

        outputFile = OutputDir + '1.StellarMassFunction' + OutputFormat
        plt.savefig(outputFile)  # Save the figure
        print('Saved file to', outputFile)
        plt.close()

        # Add this plot to our output list
        OutputList.append(outputFile)
Beispiel #52
0
def weak_bounds():

    pylab.ion()
    pylab.figure(1, figsize=figsize)
    pylab.clf()

    qvec = [0.001, 0.01, 0.1]

    m = np.logspace(0, 5)
    pylab.loglog(m, m, 'k:', linewidth=lw)

    cvec = ['b', 'c', 'g']
    """
	for (q, c) in zip(qvec, cvec):
		K = np.log(q)/np.log(1-q)
		M = np.linspace(1, K)
		MM = np.logspace(np.log10(M[-1]), 5)
		SS = np.zeros(len(MM)) + K
		SS[MM < K] = MM[MM < K]
		SS[MM > 2**K] += np.log2(MM[MM > 2**K] - K)
		pylab.loglog(MM, SS, c + '-', linewidth=1)	
	"""

    for (q, c) in zip(qvec, cvec):
        K = np.log(q) / np.log(1 - q)
        print q, K
        M = np.linspace(1, K)
        S = (1 - q - (1 - q)**M) / q
        MM = np.array(M.tolist() + np.logspace(np.log10(M[-1]), 5).tolist())
        SS = np.array(S.tolist() + [S[-1]] * 50)
        SL = SS * 0 + K
        SL[MM < K] = MM[MM < K]
        SL[MM > 2**K] += np.log2(MM[MM > 2**K] - K)
        pylab.fill_between(MM, SL, SS, color=c, alpha=0.2)

    A = 1
    for (q, c) in zip(qvec, cvec):
        p = 1 - q

        R = np.log(q) / np.log(1 - q)

        B = p**np.arange(1, R)
        D = np.ones(len(B))

        B = np.concatenate([B, q * p**np.arange(0, R)])
        D = np.concatenate([D, np.arange(R)])

        for pow in range(2, 88):
            B = np.concatenate([B, q**pow * p**np.arange(0, R)])
            D = np.concatenate([D, sm.comb(np.arange(R), pow)])
            assert len(B) == len(D), 'len(B) != len(D)'
            if len(B) > 10**8:
                print pow, 'breaking'
                break

        B = np.concatenate(([1], B))
        D = np.concatenate(([1], D))
        i = B.argsort()[::-1]
        B = (D[i] * B[i]).cumsum()
        D = D[i].cumsum()
        j = np.nonzero((D >= A) & (D <= 10**5))[0]
        #pylab.loglog(np.arange(A, 100001), A*C[np.arange(A-1, 100000)/A])
        pylab.loglog(D[j], A * B[j / A], c, linewidth=lw)
        pylab.draw()

    pylab.loglog(m, np.log2(m + 1), 'purple', linewidth=lw)

    pylab.yscale('log')
    pylab.xscale('log')

    #pylab.loglog(MM, SS, c + '-', linewidth=1)

    pylab.xlabel('Number of cores $(J)$', fontsize=fs)
    pylab.ylabel('Expected speedup $(E[S_J])$', fontsize=fs)
    pylab.title('Expected speedup with simple bounds', fontsize=fs)
    pylab.legend(['$E[S_J] = J$'] + [('$q = %1.4f' % q).strip('0') + '$'
                                     for q in qvec] +
                 ['$E[S_J] = \log_2 (J+1)$'],
                 loc='upper left',
                 fontsize=fs)
    pylab.xticks(fontsize=fs)
    pylab.yticks(fontsize=fs)
    pylab.axis((1, 10**4, 1, 10**4))
    pylab.savefig('../figs/expected-speedup.pdf')
def main():

    if (len(sys.argv) != 5):
        print()
        print("##################################")
        print("Ariel J. Amsellem")
        print("*****@*****.**")
        print("KICP UChicago")
        print("##################################\n")

        print(
            "run_Multinest.py - Run Multinest on Sigmag Measurements to determine measure of best fit with errors."
        )
        print(
            "Usage: python run_Multinest.py [output directory/file name] [data filename] [color] [plot label/title]"
        )
        print(
            "Example: python run_Multinest.py Fiducial_RM splashback_cov_Fiducial_RM.npz    r    Fiducial_Redmapper"
        )
        sys.exit(0)

    out_directory = str(sys.argv[1])
    dat_filename = str(sys.argv[2])
    color = str(sys.argv[3])
    label = str(sys.argv[4])
    label = label.replace("_", " ")

    # Scipy Minimization
    # Load Data
    data = np.load(
        '/Users/arielamsellem/Desktop/Research/splashback_codes_master/npzs/' +
        dat_filename)
    sigmag = data['sg_mean']
    sigmag_sig = data['sg_sig']
    sigmag_cov = data['cov']
    rperp = data['r_data']

    # Priors
    log_alpha = -0.32085983 - 0.1
    log_beta = 0.16309539
    log_gamma = 0.64815634
    log_r_s = 0.85387196 - 0.1
    log_r_t = 0.08325509
    log_rho_0 = -0.8865869 - 0.5
    log_rho_s = -0.19838697 - 0.3
    se = 1.3290722
    ln_mis = -1.146114384
    f_mis = 0.15857366
    # Chihway: alpha, beta, gamma, r_s, r_t, rho_0, rho_s, se, ln_mis, f_mis
    params = np.array([
        log_alpha, log_beta, log_gamma, log_r_s, log_r_t, log_rho_0, log_rho_s,
        se, ln_mis, f_mis
    ])

    # Minimized Splashback Model of Data
    print('Running Scipy Minimize...')
    print('')
    nll = lambda *args: -1 * lnlikelihood(*args)
    p0 = params.copy()
    bounds = ((None, None), (None, None), (None, None), (np.log10(0.1 / h0),
                                                         np.log10(5.0 / h0)),
              (np.log10(0.1 / h0), np.log10(5.0 / h0)), (None, None),
              (None, None), (-10., 10.), (np.log(0.01), np.log(0.99)), (0.01,
                                                                        0.99))
    data_vec = sigmag.copy()
    invcov = np.linalg.inv(sigmag_cov.copy())
    args = (rperp, z, data_vec, invcov, h0, 1)
    result = op.minimize(nll,
                         p0,
                         args=args,
                         options={'maxiter': 200},
                         bounds=bounds)
    best_params = result.x
    best_lnlike = -result.fun

    # Scipy Stats
    model = Sigmag(rperp, z, best_params, h0, 1)
    diff = data_vec - model
    chisq_min = np.dot(diff, np.dot(invcov, diff))

    # Defining the Multinest Function
    def run_multinest(rperp, sigmag, invcov, splashback, outfile):
        def Prior(cube, ndim, nparams):
            # Sigma Values are from Chang 2018 Table 2. Each sigma is half a prior range
            cube[0] = gaussian(np.log10(0.19), 0.2, cube[0])  # log(alpha)
            cube[1] = gaussian(np.log10(6.), 0.2, cube[1])  # log(beta)
            cube[2] = gaussian(np.log10(4.), 0.2, cube[2])  # log(gamma)
            cube[3] = uniform(0.1, 5., cube[3])  # r_s
            cube[4] = uniform(0.1, 5., cube[4])  # r_t
            cube[5] = uniform(0., 10., cube[5])  # rho_0
            cube[6] = uniform(0., 10., cube[6])  # rho_s
            cube[7] = uniform(1., 10., cube[7])  # s_e
            cube[8] = gaussian(-1.13, 0.22, cube[8])  # ln(c_mis)
            cube[9] = gaussian(0.22, 0.11, cube[9])  # f_mis

        def Loglike(cube, ndim, nparams):
            # Read in parameters
            log_alpha = cube[0]
            log_beta = cube[1]
            log_gamma = cube[2]
            r_s = cube[3]
            r_t = cube[4]
            rho_0 = cube[5]
            rho_s = cube[6]
            se = cube[7]
            ln_mis = cube[8]
            f_mis = cube[9]
            params = [
                log_alpha, log_beta, log_gamma, r_s, r_t, rho_0, rho_s, se,
                ln_mis, f_mis
            ]

            # Calculate likelihood
            sig_m = Sigmag(rperp, z, params, h0, splashback)
            vec = sig_m - sigmag
            likelihood = -0.5 * np.matmul(np.matmul(vec, invcov), vec.T)

            # Calculate prior
            #prior = -0.5*(-1.13-ln_mis)**2/0.22**2 - 0.5*(log_alpha - np.log10(0.19))**2/0.4**2 - 0.5*(log_beta - np.log10(6.0))**2/0.4**2 - 0.5*(log_gamma - np.log10(4.0))**2/0.4**2  -0.5*(f_mis-0.22)**2/0.11**2
            prior = 0.

            # Total probability
            tot = likelihood + prior

            return tot

        # Run Multinest
        mult.run(Loglike,
                 Prior,
                 10,
                 outputfiles_basename=outfile,
                 verbose=False)

    # Saving Results
    os.mkdir('/Users/arielamsellem/Desktop/Research/Multinest/' +
             out_directory)
    out_filename = out_directory
    out_directory = '/Users/arielamsellem/Desktop/Research/Multinest/' + out_directory + '/'
    out_filename = out_directory + out_filename

    # Run Multinest
    run_multinest(rperp, sigmag, invcov, 1, out_filename)

    # Save Output to File "log.txt"
    stdoutOrigin = sys.stdout
    sys.stdout = open(out_directory + "log.txt", "w")
    # Read in Multinest Results
    # Unequal Weights
    #multinest_out = np.genfromtxt(out_filename + '.txt')
    #samples_txt = multinest_out[:,2:]
    #likelihood_txt = -1.*multinest_out[:,1]/2
    # Equal Weights
    multinest_out = np.genfromtxt(out_filename + 'post_equal_weights.dat')
    samples_txt = multinest_out[:, :-1]
    likelihood_txt = multinest_out[:, -1]

    # Multinest Best Parameters
    analyzer = mult.analyse.Analyzer(10,
                                     outputfiles_basename=(out_filename),
                                     verbose=False)
    bestfit_params_multinest = analyzer.get_best_fit()
    best_params_mult = bestfit_params_multinest['parameters']
    best_loglike_mult = bestfit_params_multinest['log_likelihood']

    # Multinest Stats
    model_mult = Sigmag(rperp, z, best_params_mult, h0, 1)
    diff_mult = data_vec - model_mult
    chisq_mult = np.dot(diff_mult, np.dot(invcov, diff_mult))

    print("Best Parameters From Minimization: " + str(best_params))
    print("Loglike From Minimization: " + str(best_lnlike))
    print("Best Parameters From Multinest: " + str(best_params_mult))
    print("Loglike From Multinest: " + str(best_loglike_mult))
    print("Chi-Squared Scipy Minimize: " + str(chisq_min))
    print("Chi-Squared Multinest: " + str(chisq_mult))
    sys.stdout.close()
    sys.stdout = stdoutOrigin

    # Get Rho Values and Error Range
    low, high = profile_range(samples_txt, rperp, z, 16, 84)
    r_rho, r_rhoderiv, rho, drho, rho_i, rho_o = find_rho_drho(best_params)
    r_rho_mult, r_rhoderiv_mult, rho_mult, drho_mult, rho_i_mult, rho_o_mult = find_rho_drho(
        best_params_mult)

    # Plot Results
    print('')
    print('Plotting Results...')
    samples = MCSamples(samples=samples_txt,
                        loglikes=likelihood_txt,
                        names=[
                            'alpha', 'beta', 'gamma', 'rs', 'rt', 'rho0',
                            'rhos', 'se', 'lnmis', 'fmis'
                        ],
                        labels=[
                            '\\alpha', '\\beta', '\\gamma', 'r_s', 'r_t',
                            '\\rho_0', '\\rho_s', 's_e', 'ln(c_{mis})',
                            'f_{mis}'
                        ])

    # Triangle Plot
    sns.set_style("white")
    g = plots.getSubplotPlotter(width_inch=12)
    g.triangle_plot(samples,
                    filled=True,
                    colors=[color],
                    lw=[3],
                    line_args=[{
                        'lw': 2,
                        'color': 'k'
                    }])
    plt.savefig(out_directory + 'Triangle_Multinest.png', dpi=600)

    # Plot Error Region Around \\rho Derivative
    sns.set_style("whitegrid")
    fig = plt.figure(figsize=(20, 10))
    plt.suptitle(label, fontsize=23, fontweight=900)
    plt.subplot(121)
    plt.semilogx(r_rho,
                 rho,
                 color=color,
                 label='Splashback Fit (Scipy)',
                 linewidth=1)
    plt.semilogx(r_rho,
                 rho_i,
                 color=color,
                 label='Scipy Inner Profile',
                 linewidth=1,
                 linestyle='--')
    plt.semilogx(r_rho,
                 rho_o,
                 color=color,
                 label='Scipy Outer Profile',
                 linewidth=1,
                 linestyle='-.')
    plt.semilogx(r_rho_mult,
                 rho_mult,
                 color="fuchsia",
                 label='Splashback Fit (Multinest)',
                 linewidth=1)
    plt.semilogx(r_rho_mult,
                 rho_i_mult,
                 color="fuchsia",
                 label='Multinest Inner Profile',
                 linewidth=1,
                 linestyle='--')
    plt.semilogx(r_rho_mult,
                 rho_o_mult,
                 color="fuchsia",
                 label='Multinest Outer Profile',
                 linewidth=1,
                 linestyle='-.')
    plt.xlabel('$R  [Mpc]$', fontsize=15)
    plt.ylabel('$\\rho(R)$', fontsize=15)
    plt.xscale('log')
    plt.yscale('log')
    plt.ylim(bottom=10**-4)
    plt.legend(fontsize=18, loc='lower left')
    plt.subplot(122)
    plt.semilogx(r_rhoderiv,
                 drho,
                 color=color,
                 label='Splashback Fit (Scipy)',
                 linewidth=1)
    plt.semilogx(r_rhoderiv_mult,
                 drho_mult,
                 color=color,
                 label='Splashback Fit (Multinest)',
                 linestyle='--',
                 linewidth=1)
    plt.fill_between(r_rhoderiv, low, high, color=color, alpha=0.25)
    plt.xlim(0.1, 10.)
    plt.xlabel('$R  [Mpc]$', fontsize=15)
    plt.ylabel('$\\frac{dlog(\\rho(R))}{dlog(R)}$', fontsize=23)
    fig.tight_layout(rect=[0, 0.03, 1, 0.95])
    plt.savefig(out_directory + 'rho_Multinest.png', dpi=600)

    # Plot Sigmag Bestfit from Multinest
    plt.figure(figsize=(7, 5))
    plt.errorbar(rperp,
                 sigmag,
                 yerr=sigmag_sig,
                 capsize=4,
                 label=label,
                 color=color,
                 ls='none')
    plt.semilogx(rperp,
                 Sigmag(rperp, z, best_params, h0, 1),
                 label='Splashback Fit (Scipy)',
                 color=color)
    plt.semilogx(rperp,
                 Sigmag(rperp, z, best_params_mult, h0, 1),
                 label='Splashback Fit (Multinest)',
                 linestyle='--',
                 color=color)
    plt.xlabel('$R [Mpc]$', fontsize=15)
    plt.ylabel('$\Sigma_{g} [(1/Mpc)^2]$', fontsize=15)
    plt.xscale('log')
    plt.yscale('log')
    plt.legend(fontsize=14, loc='lower left')
    plt.savefig(out_directory + 'Sigmag.png', dpi=600)
    def BaryonicMassFunction(self, G):

        print('Plotting the baryonic mass function')

        plt.figure()  # New figure
        ax = plt.subplot(111)  # 1 plot on the figure

        binwidth = 0.1  # mass function histogram bin width

        # calculate BMF
        w = np.where(G.StellarMass + G.ColdGas > 0.0)[0]
        mass = np.log10(
            (G.StellarMass[w] + G.ColdGas[w]) * 1.0e10 / self.Hubble_h)

        mi = np.floor(min(mass)) - 2
        ma = np.floor(max(mass)) + 2
        NB = (ma - mi) / binwidth

        (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB)

        # Set the x-axis values to be the centre of the bins
        xaxeshisto = binedges[:-1] + 0.5 * binwidth

        # Bell et al. 2003 BMF (h=1.0 converted to h=0.73)
        M = np.arange(7.0, 13.0, 0.01)
        Mstar = np.log10(5.3 * 1.0e10 / self.Hubble_h / self.Hubble_h)
        alpha = -1.21
        phistar = 0.0108 * self.Hubble_h * self.Hubble_h * self.Hubble_h
        xval = 10.0**(M - Mstar)
        yval = np.log(10.) * phistar * xval**(alpha + 1) * np.exp(-xval)

        if (whichimf == 0):
            # converted diet Salpeter IMF to Salpeter IMF
            plt.plot(np.log10(10.0**M / 0.7),
                     yval,
                     'b-',
                     lw=2.0,
                     label='Bell et al. 2003')  # Plot the SMF
        elif (whichimf == 1):
            # converted diet Salpeter IMF to Salpeter IMF, then to Chabrier IMF
            plt.plot(np.log10(10.0**M / 0.7 / 1.8),
                     yval,
                     'g--',
                     lw=1.5,
                     label='Bell et al. 2003')  # Plot the SMF

        # Overplot the model histograms
        plt.plot(xaxeshisto,
                 counts / self.volume * self.Hubble_h * self.Hubble_h *
                 self.Hubble_h / binwidth,
                 'k-',
                 label='Model')

        plt.yscale('log', nonposy='clip')
        plt.axis([8.0, 12.5, 1.0e-6, 1.0e-1])

        # Set the x-axis minor ticks
        ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1))

        plt.ylabel(
            r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$')  # Set the y...
        plt.xlabel(r'$\log_{10}\ M_{\mathrm{bar}}\ (M_{\odot})$'
                   )  # and the x-axis labels

        leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1)
        leg.draw_frame(False)  # Don't want a box frame
        for t in leg.get_texts():  # Reduce the size of the text
            t.set_fontsize('medium')

        outputFile = OutputDir + '2.BaryonicMassFunction' + OutputFormat
        plt.savefig(outputFile)  # Save the figure
        print('Saved file to', outputFile)
        plt.close()

        # Add this plot to our output list
        OutputList.append(outputFile)
Beispiel #55
0
    def run(self):
        logging.info("(ptc.doPTC) executing")
        res = []
        ##
        ## for each quadrant
        ##
        for q in self.settings['quadrants']:
            ##
            ## read this quadrant's attributes from settings file
            ##
            qid = q['id']
            pos = q['pos']
            x_lo = int(q['x_lo'])
            x_hi = int(q['x_hi'])
            y_lo = int(q['y_lo'])
            y_hi = int(q['y_hi'])
            overscan_x_lo = int(q['overscan_x_lo'])
            overscan_x_hi = int(q['overscan_x_hi'])
            overscan_y_lo = int(q['overscan_y_lo'])
            overscan_y_hi = int(q['overscan_y_hi'])
            is_defective = bool(q['is_defective'])

            if is_defective:
                logging.info("(ptc.run) omitting defective quadrant " +
                             str(qid) + " with position \"" + str(pos) + "\"")
                res.append(None)
                continue

            logging.info("(ptc.run) processing quadrant " + str(qid + 1) +
                         " with position \"" + str(pos) + "\"")
            logging.debug("(ptc.run) x range of quadrant is defined by " +
                          str(x_lo) + " < x < " + str(x_hi))
            logging.debug("(ptc.run) y range of quadrant is defined by " +
                          str(y_lo) + " < y < " + str(y_hi))
            logging.debug(
                "(ptc.run) overscan x range of quadrant is defined by " +
                str(overscan_x_lo) + " < x < " + str(overscan_x_hi))
            logging.debug(
                "(ptc.run) overscan y range of quadrant is defined by " +
                str(overscan_y_lo) + " < y < " + str(overscan_y_hi))

            ##
            ## read this quadrant's data and remove bias (and dummy if requested)
            ##
            files_data = {}
            files_hdr = {}
            for f in self.files:
                logging.info("(ptc.run) caching file " + f)
                ff = pyfits.open(f)
                this_data = ff[self.settings['data_hdu']].data[
                    y_lo:y_hi, x_lo:x_hi] - np.mean(
                        ff[self.settings['data_hdu']].data[
                            overscan_y_lo:overscan_y_hi,
                            overscan_x_lo:overscan_x_hi])
                this_dummy = ff[self.settings['dummy_hdu']].data[
                    y_lo:y_hi, x_lo:x_hi] - np.mean(
                        ff[self.settings['dummy_hdu']].data[
                            overscan_y_lo:overscan_y_hi,
                            overscan_x_lo:overscan_x_hi])
                this_hdr = ff[self.settings['data_hdu']].header
                exptime = this_hdr['EXPTIME']
                if exptime not in files_data:
                    files_data[exptime] = []
                    files_hdr[exptime] = []
                if self.settings['do_dummy_subtraction']:
                    files_data[exptime].append(this_data - this_dummy)
                else:
                    files_data[exptime].append(this_data)
                files_hdr[exptime].append(this_hdr)
                ff.close()

            ##
            ## order quadrant data by exptime
            ##
            files_data_od = collections.OrderedDict(sorted(files_data.items()))

            ##
            ## for each exposure time, take the mean of the signal and calculate the noise of the difference frame
            ##
            res_thisq = []  # this keeps track of (mean, noise) tuples
            diff_stk = [
            ]  # this generates a difference stack which, when plotted, is useful for diagnosing which regions of the quadrant are suitable
            for exposure_time, data in files_data_od.iteritems():
                if len(
                        data
                ) != 2:  # check we have two frames for this exposure time
                    err.setError(1)
                    err.handleError()
                    continue

                diff = (data[1] - data[0])  # make diff frame
                diff_stk.append(diff)  # append to stack

                thisq_mean = np.mean(data)  # mean of frames
                thisq_std_diff = np.std(diff)  # error on diff frame
                this_shot_and_read_noise = thisq_std_diff / (pow(
                    2, 0.5))  # NOTE: THIS ISN'T BE TRUE FOR DUMMY SUBTRACTION

                if np.mean(thisq_mean) < 0 or np.mean(
                        thisq_std_diff) < 0:  # we have a duff pair here
                    err.setError(2)
                    err.handleError()
                    continue

                logging.debug("(ptc.run) exposure time of " +
                              str(exposure_time) +
                              " has mean signal level of " +
                              str(round(np.mean(thisq_mean), 2)) + "ADU +/- " +
                              str(round(np.mean(thisq_std_diff), 2)) + "ADU")

                res_thisq.append(
                    (thisq_mean, this_shot_and_read_noise, exposure_time))

            if self.diagnosticMode:
                plt.imshow(np.mean(diff_stk, axis=0),
                           vmax=np.percentile(np.mean(diff_stk, axis=0), 95),
                           vmin=np.percentile(np.mean(diff_stk, axis=0), 5))
                plt.colorbar()
                plt.show()

            if len(res_thisq) < MINIMUM_FRAMES_REQUIRED:
                err.setError(-5)
                err.handleError()
                res.append(None)
            else:
                res.append(res_thisq)

        rn = []
        gain = []
        qx = []
        qy = []
        for idx_q, q in enumerate(res):
            pos = self.settings['quadrants'][idx_q]['pos']
            if q is None:
                rn.append(None)
                gain.append(None)
                qx.append(None)
                qy.append(None)
                continue

            thisq_mean_all = []
            thisq_std_all = []
            thisq_exptimes = []
            for p in q:  # p == pair.
                thisq_mean_all.append(p[0])
                thisq_std_all.append(p[1])
                thisq_exptimes.append(p[2])
            thisq_rates = [
                c / e for c, e in zip(thisq_mean_all, thisq_exptimes)
            ]

            x = np.asarray(thisq_mean_all)
            y = np.asarray(thisq_std_all)

            # hazard a guess at read regime by:
            ## i) finding gradients for each index in data array using a linear fit
            ## ii) find index with gradient of ~0.2 (shot regime for loglog) by assessing truth array for adjacent indices of <0.2 and >0.2
            gradients_log = []
            for idx_x in range(1, len(x)):
                gradients_log.append(
                    np.polyfit(np.log10(x[idx_x - 1:idx_x + 1]),
                               np.log10(y[idx_x - 1:idx_x + 1]), 1)[0])
            truth = []
            for idx_g in range(1, len(gradients_log)):
                lt = [True for gi in gradients_log[:idx_g] if gi < 0.2]
                gt = [True for gi in gradients_log[idx_g:] if gi > 0.2]
                n_true = np.sum(lt) + np.sum(gt)
                truth.append(n_true)
            idx_x_nearest = np.argmax(
                truth
            ) + 2  # +2 for offsets incurred from taking gradient and cycling through truth array
            read_guess = range(0, idx_x_nearest)
            if idx_x_nearest > len(x) - 1 or idx_x_nearest < 0:
                read_guess = []

            # hazard a guess at shot regime by:
            ## i) finding gradients for each index in data array using a linear fit
            ## ii) find index with gradient of ~0.5 (shot regime for loglog) by assessing truth array for adjacent indices of <0.5 and >0.5
            gradients_log = []
            for idx_x in range(1, len(x)):
                gradients_log.append(
                    np.polyfit(np.log10(x[idx_x - 1:idx_x + 1]),
                               np.log10(y[idx_x - 1:idx_x + 1]), 1)[0])
            truth = []
            for idx_g in range(1, len(gradients_log)):
                lt = [True for gi in gradients_log[:idx_g] if gi < 0.5]
                gt = [True for gi in gradients_log[idx_g:] if gi > 0.5]
                n_true = np.sum(lt) + np.sum(gt)
                truth.append(n_true)
            idx_x_nearest = np.argmax(
                truth
            ) + 2  # +2 for offsets incurred from taking gradient and cycling through truth array
            shot_guess = [idx_x_nearest - 1, idx_x_nearest, idx_x_nearest + 1]
            if idx_x_nearest + 1 > len(x) - 1 or idx_x_nearest - 1 < 0:
                shot_guess = []

            # interactive selection of PTC
            # - follow on screen prompts
            logging.info("(ptc.run) interactive selection of PTC regions")
            print
            print "\t\tCOMMAND SET"
            print
            print "\tq: define point for read noise"
            print "\tw: define point for shot noise"
            print "\te: define full well"
            print "\ta: smooth data with cubic spline"
            print "\tx: clear point definition"
            print "\tm: clear all point definitions"
            print "\tr: remove point from dataset (will reset point definitions)"
            print

            class define_PTC_regions(object):
                def __init__(self,
                             ax,
                             x,
                             y,
                             read_guess=read_guess,
                             shot_guess=shot_guess,
                             fwd_guess=None):
                    self.ax = ax
                    self.x = x  # data array
                    self.y = y  # data array
                    self.c_idx = None  # current cursor idx

                    self.read = read_guess  # (idx_1, idx_2 ... idx_n)
                    self.shot = shot_guess  # (idx_1, idx_2 ... idx_n)
                    self.fwd = fwd_guess  # idx

                    self.rn = None
                    self.gain = None

                def calculate_nearby_gradient(self, idx):
                    if idx != 0 and idx != len(self.x):
                        c = np.polyfit(self.x[idx - 1:idx + 2],
                                       self.y[idx - 1:idx + 2], 1)
                        return c[0]
                    else:
                        return None

                def calculate_read_noise(self):
                    # calculate read noise (ADU)
                    ## i) fit a second order polynomial to read data array
                    ## ii) find gradient = 0
                    ## iii) find y-intercept
                    if len(self.read) < 2:
                        self.rn = None
                        print "e: need more than two points for read regime"
                        return
                    f_co = np.polyfit(self.x[self.read], self.y[self.read], 2)
                    f_xmin = f_co[1] / -(2 * f_co[0])  # find minimum
                    rn_yi_log = np.polyval(f_co,
                                           f_xmin)  # y-intercept (rn in ADU)
                    print "i: read noise calculated as " + str(
                        round(10**rn_yi_log, 2)) + "ADU"
                    self.rn = rn_yi_log

                def calculate_gain(self):
                    # calculate gain (e-/ADU)
                    ### i) fit second order polynomial to shot data array, and find x coordinate at which gradient is exactly 0.5
                    ### ii) calculate corresponding x-intercept
                    if len(self.shot) < 2:
                        self.gain = None
                        print "e: need more than two points for shot regime"
                        return
                    f_co = np.polyfit(self.x[self.shot], self.y[self.shot], 2)
                    x_g_of_0p5 = (0.5 - f_co[1]) / (2 * f_co[0])
                    y_g_of_0p5 = np.polyval(f_co, x_g_of_0p5)
                    yi_g_of_0p5 = y_g_of_0p5 - (0.5 * x_g_of_0p5)
                    xi_g_of_0p5 = -yi_g_of_0p5 / 0.5
                    print "i: gain calculated as " + str(
                        round(10**xi_g_of_0p5, 2)) + "e-/ADU"
                    self.gain = (xi_g_of_0p5, x_g_of_0p5, y_g_of_0p5)

                def draw(self):
                    self.ax.cla()

                    plt.title("PTC")
                    plt.xlabel("Log10 (Signal, ADU)")
                    plt.ylabel("Log10 (Noise, ADU)")

                    # text location in axes coords
                    self.txt = ax.text(0.1, 0.9, '', transform=ax.transAxes)

                    plt.plot(self.x, self.y, 'kx-')
                    plt.xlim([0, np.max(self.x)])
                    plt.ylim([0, np.max(self.y)])
                    if self.c_idx is not None:
                        # update line positions
                        lx = ax.axhline(color='k')  # horiz line (cursor)
                        ly = ax.axvline(color='k')  # vert line (cursor)
                        lx.set_ydata(self.y[self.c_idx])
                        ly.set_xdata(self.x[self.c_idx])

                        # show gradient at point
                        m = self.calculate_nearby_gradient(self.c_idx)
                        if m is not None:
                            self.txt.set_text('nearby_m=%1.2f' % (m))

                    if self.read is not None and self.rn is not None:
                        # update line positions
                        lx = ax.axhline(
                            color='k',
                            linestyle='--')  # horiz line (read noise)
                        lx.set_ydata(self.rn)

                    if self.shot is not None and self.gain is not None:
                        # update line positions
                        plt.plot([self.gain[0], self.gain[1]],
                                 [0, self.gain[2]], 'k--')

                    # update regime points
                    self.ax.plot(self.x[self.read], self.y[self.read], 'ro')
                    self.ax.plot(self.x[self.shot], self.y[self.shot], 'bo')

                    if self.fwd is not None:
                        lyf = ax.axvline(color='k',
                                         linestyle='--')  # the vert line (fwd)
                        lyf.set_xdata(self.x[self.fwd])

                    # draw
                    plt.draw()

                def find_closest_point(self, xc, yc, x, y):
                    '''
                      xc/yc are the cursor input coords
                      x/y are the data arrays
                    '''
                    delta_x = ([xc] * len(x)) - x
                    delta_y = ([yc] * len(y)) - y
                    r = ((delta_x**2) + (delta_y**2))**0.5
                    return int(np.argmin(r)), np.min(r)

                def key_press(self, event):
                    if not event.inaxes:
                        return

                    x, y = event.xdata, event.ydata
                    if event.key == 'q':
                        idx, val = self.find_closest_point(
                            x, y, self.x, self.y)
                        if idx not in self.read:
                            self.read.append(idx)
                            self.calculate_read_noise()
                            print "i: added read regime point"
                    if event.key == 'w':
                        idx, val = self.find_closest_point(
                            x, y, self.x, self.y)
                        if idx not in self.shot:
                            self.shot.append(idx)
                            self.calculate_gain()
                            print "i: added shot regime point"
                    if event.key == 'e':
                        idx, val = self.find_closest_point(
                            x, y, self.x, self.y)
                        self.fwd = idx
                        print "i: added fwd line"
                    if event.key == 'r':
                        idx, val = self.find_closest_point(
                            x, y, self.x, self.y)
                        self.read = []
                        self.shot = []
                        self.fwd = None
                        self.rn = None
                        self.gain = None
                        self.x = np.delete(self.x, idx)
                        self.y = np.delete(self.y, idx)
                        self.c_idx = None
                        print "i: reset point definitions and removed point from dataset"
                    elif event.key == 'x':
                        idx, val = self.find_closest_point(
                            x, y, self.x, self.y)
                        if idx in self.read:
                            idx_to_pop = self.read.index(idx)
                            self.read.pop(idx_to_pop)
                            self.calculate_read_noise()
                            print "i: cleared read regime point"
                        if idx in self.shot:
                            idx_to_pop = self.shot.index(idx)
                            self.shot.pop(idx_to_pop)
                            self.calculate_gain()
                            print "i: cleared shot regime point"
                        if idx == self.fwd:
                            self.fwd = None
                            print "i: cleared fwd line"
                    elif event.key == 'm':
                        self.read = []
                        self.shot = []
                        self.fwd = None
                        self.rn = None
                        self.gain = None
                        print "i: cleared all point definitions"
                    elif event.key == 'a':
                        self.smooth_data()
                        self.calculate_gain()
                        self.calculate_read_noise()
                        print "i: smoothed data"
                    self.draw()

                def mouse_move(self, event):
                    if not event.inaxes:
                        return

                    x, y = event.xdata, event.ydata
                    idx, val = self.find_closest_point(x, y, self.x, self.y)
                    self.c_idx = idx

                    self.draw()

                def smooth_data(self):
                    to_idx = len(self.x) - 1

                    if self.fwd is not None:  # use FWD if it's been applied
                        to_idx = self.fwd

                    to_rev_idx = [
                        x2 - x1 < 0 for x1, x2 in zip(self.x[:-1], self.x[1:])
                    ]  # catch for reverse turnover (occurs in some data after full well)
                    if True in to_rev_idx and to_rev_idx < self.fwd:
                        to_idx = [
                            idx for idx, xi in enumerate(to_rev_idx)
                            if xi is True
                        ]

                    s = interpolate.UnivariateSpline(
                        self.x[:to_idx + 1], self.y[:to_idx + 1], k=3,
                        s=10)  # apply smoothing cubic bspline
                    self.x = self.x[:to_idx + 1]
                    self.y = s(self.x)

            fig = plt.figure()
            ax = plt.gca()
            reg = define_PTC_regions(ax, np.log10(x), np.log10(y))
            reg.calculate_read_noise()
            reg.calculate_gain()
            reg.draw()
            plt.connect('motion_notify_event', reg.mouse_move)
            plt.connect('key_press_event', reg.key_press)
            plt.show()

            rn.append(reg.rn)
            gain.append(reg.gain)
            qx.append(reg.x)
            qy.append(reg.y)

        if self.makePlots:
            for idx_q in range(len(qx)):
                this_rn = rn[idx_q]
                this_gain = gain[idx_q]
                this_q_x = qx[idx_q]
                this_q_y = qy[idx_q]

                pos = self.settings['quadrants'][idx_q]['pos']
                is_defective = bool(
                    self.settings['quadrants'][idx_q]['is_defective'])
                plt.subplot(2, 2, idx_q + 1)
                plt.yscale('log')
                plt.xscale('log')
                plt.xlabel("Signal (ADU)")
                plt.ylabel("Noise (ADU)")
                if is_defective:
                    plt.plot([],
                             label='data for quadrant: ' +
                             str(self.settings['quadrants'][idx_q]['pos']),
                             color='white')
                    plt.legend(loc='upper left')
                    continue
                plt.plot(10**this_q_x,
                         10**this_q_y,
                         'k.',
                         label='data for quadrant: ' +
                         str(self.settings['quadrants'][idx_q]['pos']))
                if this_rn is not None:
                    plt.plot([10**0, np.max(10**this_q_x)],
                             [10**this_rn, 10**this_rn],
                             'k--',
                             label="read noise: " +
                             str(round(10**this_rn, 2)) + " ADU")
                if this_gain is not None:
                    plt.plot([10**this_gain[0], 10**this_gain[1]],
                             [10**0, 10**this_gain[2]],
                             'k--',
                             label="gain: " + str(round(10**this_gain[0], 2)) +
                             " e-/ADU")
                plt.legend(loc='upper left')
            plt.tight_layout()
            plt.show()
    def VelocityDistribution(self, G):

        print('Plotting the velocity distribution of all galaxies')

        seed(2222)

        mi = -40.0
        ma = 40.0
        binwidth = 0.5
        NB = (ma - mi) / binwidth

        # set up figure
        plt.figure()
        ax = plt.subplot(111)

        pos_x = G.Pos[:, 0] / self.Hubble_h
        pos_y = G.Pos[:, 1] / self.Hubble_h
        pos_z = G.Pos[:, 2] / self.Hubble_h

        vel_x = G.Vel[:, 0]
        vel_y = G.Vel[:, 1]
        vel_z = G.Vel[:, 2]

        dist_los = np.sqrt(pos_x * pos_x + pos_y * pos_y + pos_z * pos_z)
        vel_los = (pos_x / dist_los) * vel_x + (pos_y / dist_los) * vel_y + (
            pos_z / dist_los) * vel_z
        dist_red = dist_los + vel_los / (self.Hubble_h * 100.0)

        tot_gals = len(pos_x)

        (counts, binedges) = np.histogram(vel_los / (self.Hubble_h * 100.0),
                                          range=(mi, ma),
                                          bins=NB)
        xaxeshisto = binedges[:-1] + 0.5 * binwidth
        plt.plot(xaxeshisto,
                 counts / binwidth / tot_gals,
                 'k-',
                 label='los-velocity')

        (counts, binedges) = np.histogram(vel_x / (self.Hubble_h * 100.0),
                                          range=(mi, ma),
                                          bins=NB)
        xaxeshisto = binedges[:-1] + 0.5 * binwidth
        plt.plot(xaxeshisto,
                 counts / binwidth / tot_gals,
                 'r-',
                 label='x-velocity')

        (counts, binedges) = np.histogram(vel_y / (self.Hubble_h * 100.0),
                                          range=(mi, ma),
                                          bins=NB)
        xaxeshisto = binedges[:-1] + 0.5 * binwidth
        plt.plot(xaxeshisto,
                 counts / binwidth / tot_gals,
                 'g-',
                 label='y-velocity')

        (counts, binedges) = np.histogram(vel_z / (self.Hubble_h * 100.0),
                                          range=(mi, ma),
                                          bins=NB)
        xaxeshisto = binedges[:-1] + 0.5 * binwidth
        plt.plot(xaxeshisto,
                 counts / binwidth / tot_gals,
                 'b-',
                 label='z-velocity')

        plt.yscale('log', nonposy='clip')
        plt.axis([mi, ma, 1e-5, 0.5])
        # plt.axis([mi, ma, 0, 0.13])

        plt.ylabel(r'$\mathrm{Box\ Normalised\ Count}$')  # Set the y...
        plt.xlabel(r'$\mathrm{Velocity / H}_{0}$')  # and the x-axis labels

        leg = plt.legend(loc='upper left', numpoints=1, labelspacing=0.1)
        leg.draw_frame(False)  # Don't want a box frame
        for t in leg.get_texts():  # Reduce the size of the text
            t.set_fontsize('medium')

        outputFile = OutputDir + '11.VelocityDistribution' + OutputFormat
        plt.savefig(outputFile)  # Save the figure
        print('Saved file to', outputFile)
        plt.close()

        # Add this plot to our output list
        OutputList.append(outputFile)