Ejemplo n.º 1
0
 def plot(self,typ='s3',titre='titre',log=False,stem=True,subp=True):
     """
     """
     fa = np.linspace(self.Br.fmin,self.Br.fmax,self.Br.Nf)
     st = titre+'  shape : '+typ
     plt.suptitle(st,fontsize=14)
     if subp:
         plt.subplot(221)
         titre = '$\sum_f \sum_m |Br_{l}^{(m)}(f)|$'
         self.Br.plot(typ=typ,title=titre, yl=True,color='r',stem=stem,log=log)
     else:
         self.Br.plot(typ=typ,color='r',stem=stem,log=log)
     if subp:
         plt.subplot(222)
         titre = '$\sum_f \sum_m |Bi_{l}^{(m)}(f)|$'
         self.Bi.plot(typ=typ,title=titrei,color='m',stem=stem,log=log)
     else:
         self.Bi.plot(typ=typ,color='m',stem=stem,log=log)
     if subp:
         plt.subplot(223)
         titre = '$\sum_f \sum_m |Cr_{l}^{(m)}(f)|$'
         self.Cr.plot(typ=typ,title=titre, xl=True, yl=True,color='b',stem=stem,log=log)
     else:
         self.Cr.plot(typ=typ,color='b',stem=stem,log=log)
     if subp:
         plt.subplot(224)
         titre = '$\sum_f \sum_m |Ci_{l}^{(m)}(f)|$'
         self.Ci.plot(typ=typ, title = titre, xl=True,color='c',stem=stem,log=log)
     else:
         self.Ci.plot(typ=typ,xl=True,yl=True,color='c',stem=stem,log=log)
     if not subp:
         plt.legend(('$\sum_f \sum_m |Br_{l}^{(m)}(f)|$',
                     '$\sum_f \sum_m |Bi_{l}^{(m)}(f)|$',
                     '$\sum_f \sum_m |Cr_{l}^{(m)}(f)|$',
                     '$\sum_f \sum_m |Ci_{l}^{(m)}(f)|$'))
Ejemplo n.º 2
0
def graphical_analysis(strace, comment=None, cutoff=50, threshold=-45, bins=50):
# -----------------------------------------------------------------------------
    """
    Graphical report of the trace.
    strace - voltage trace of class SimpleTrace
    cutoff - first part of the trace cut away from the analysis (in ms)
    threshold - cutting voltage
    """
    import matplotlib.pylab as pyl
    voltage = strace._data
    time = np.arange(len(voltage))*strace._dt
    pyl.figure()
    rate, mean, var, skew, kurt = full_analysis(strace, cutoff, threshold)
    if comment:
        pyl.suptitle(comment)
    sp1 = pyl.subplot(2,1,1)
    sp1.plot(time,voltage)
    sp1.set_title("Spike rate = {0}".format(rate))
    sp1.set_xlabel("time [ms]")
    sp1.set_ylabel("V [mV]")
    sp2 = pyl.subplot(2,1,2)
    cut_trace = voltage[int(cutoff/strace._dt):]
    data = cut_trace[cut_trace<threshold]
    sp2.hist(data, bins=bins, histtype="stepfilled", normed=1)
    xlim = sp2.get_xlim()
    pyl.text(xlim[0]+0.7*(xlim[1]-xlim[0]), 0.6,
             "mean = {0}\nvar={1}\nskew={2}\nkurt={3}".format(mean, var, skew, kurt))
    sp2.plot([mean, mean],[0,1],"r")
    sp2.plot([mean-np.sqrt(var)/2., mean+np.sqrt(var)/2.],[0.1,0.1], "r")
    sp2.set_xlabel("V [mV]")
    sp2.set_ylabel("normalized distribution")
Ejemplo n.º 3
0
def chooseDegree(npts, mindegree=0, maxdegree=20, filename=None):
    """Gets noisy data, uses cross validation to estimate error, and fits new data with best model."""
    x, y = bv.noisyData(npts)
    degrees = numpy.arange(mindegree,maxdegree+1)
    errs = numpy.zeros_like(degrees,dtype=numpy.float)
    for i,d in enumerate(degrees):
        errs[i] = estimateError(x, y, d)

    plt.subplot(1,2,1)
    plt.plot(degrees,errs,'bo-')
    plt.xlabel("Degree")
    plt.ylabel("CV Error")

    besti = numpy.argmin(errs)
    bestdegree = degrees[besti]

    plt.subplot(1,2,2)
    x2, y2 = bv.noisyData(npts)
    plt.plot(x2,y2,'ro')
    xs = numpy.linspace(min(x),max(x),150)
    fitf = numpy.poly1d(numpy.polyfit(x2,y2,bestdegree))
    plt.plot(xs,fitf(xs),'g-',lw=2)
    plt.xlim((bv.MIN,bv.MAX))
    plt.ylim((-2.,2.))
    plt.suptitle('Selected Degree '+str(bestdegree))
    bv.outputPlot(filename)
Ejemplo n.º 4
0
    def plotFittingResults(self):
        """
        Plot results of Rmax optimization procedure and best fit of the experimental data
        """
        _listFitQ = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitQ()]
        _listFitValues = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitValues()]
        _listExpQ = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataQ()]
        _listExpValues = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataValues()]

        #_listExpStdDev = None
        #if self.getDataInput().getExperimentalDataStdDev():
        #    _listExpStdDev = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataStdDev()]
        #if _listExpStdDev:
        #    pylab.errorbar(_listExpQ, _listExpValues, yerr=_listExpStdDev, linestyle='None', marker='o', markersize=1,  label="Experimental Data")
        #    pylab.gca().set_yscale("log", nonposy='clip')
        #else:         
        #    pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5,  label="Experimental Data")

        pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5, label="Experimental Data")
        pylab.semilogy(_listFitQ, _listFitValues, label="Fitting curve")
        pylab.xlabel('q')
        pylab.ylabel('I(q)')
        pylab.suptitle("RMax : %3.2f. Fit quality : %1.3f" % (self.getDataInput().getRMax().getValue(), self.getDataOutput().getFitQuality().getValue()))
        pylab.legend()
        pylab.savefig(os.path.join(self.getWorkingDirectory(), "gnomFittingResults.png"))
        pylab.clf()
Ejemplo n.º 5
0
    def visualization2(self, sp_to_vis=None):
        if sp_to_vis:
            species_ready = list(set(sp_to_vis).intersection(self.all_sp_signatures.keys()))
        else:
            raise Exception('list of driver species must be defined')

        if not species_ready:
            raise Exception('None of the input species is a driver')

        for sp in species_ready:
            # Setting up figure
            plt.figure()
            plt.subplot(313)

            mon_val = OrderedDict()
            signature = self.all_sp_signatures[sp]
            for idx, mon in enumerate(list(set(signature))):
                if mon[0] == 'C':
                    mon_val[self.all_comb[sp][mon] + (-1,)] = idx
                else:
                    mon_val[self.all_comb[sp][mon]] = idx

            mon_rep = [0] * len(signature)
            for i, m in enumerate(signature):
                if m[0] == 'C':
                    mon_rep[i] = mon_val[self.all_comb[sp][m] + (-1,)]
                else:
                    mon_rep[i] = mon_val[self.all_comb[sp][m]]
            # mon_rep = [mon_val[self.all_comb[sp][m]] for m in signature]

            y_pos = numpy.arange(len(mon_val.keys()))
            plt.scatter(self.tspan[1:], mon_rep)
            plt.yticks(y_pos, mon_val.keys())
            plt.ylabel('Monomials', fontsize=16)
            plt.xlabel('Time(s)', fontsize=16)
            plt.xlim(0, self.tspan[-1])
            plt.ylim(0, max(y_pos))

            plt.subplot(312)

            for name in self.model.odes[sp].as_coefficients_dict():
                mon = name
                mon = mon.subs(self.param_values)
                var_to_study = [atom for atom in mon.atoms(sympy.Symbol)]
                arg_f1 = [numpy.maximum(self.mach_eps, self.y[str(va)][1:]) for va in var_to_study]
                f1 = sympy.lambdify(var_to_study, mon)
                mon_values = f1(*arg_f1)
                mon_name = str(name).partition('__')[2]
                plt.plot(self.tspan[1:], mon_values, label=mon_name)
            plt.ylabel('Rate(m/sec)', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.1, 0.85), loc='upper right', ncol=1)

            plt.subplot(311)
            plt.plot(self.tspan[1:], self.y['__s%d' % sp][1:], label=parse_name(self.model.species[sp]))
            plt.ylabel('Molecules', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.15, 0.85), loc='upper right', ncol=1)
            plt.suptitle('Tropicalization' + ' ' + str(self.model.species[sp]))

            # plt.show()
            plt.savefig('s%d' % sp + '.png', bbox_inches='tight', dpi=400)
Ejemplo n.º 6
0
 def scatter(title, file_name, x_array, y_array, size_array, x_label, \
             y_label, x_range, y_range, print_pdf):
     '''
     Plots the given x value array and y value array with the specified 
     title and saves with the specified file name. The size of points on
     the map are proportional to the values given in size_array. If 
     print_pdf value is 1, the image is also written to pdf file. 
     Otherwise it is only written to png file.
     '''
     rc('text', usetex=True)
     rc('font', family='serif')
     plt.clf() # clear the ploting window, a must.                               
     plt.scatter(x_array, y_array, s =  size_array, c = 'b', marker = 'o', alpha = 0.4)
     if x_label != None:   
         plt.xlabel(x_label)
     if y_label != None:
         plt.ylabel(y_label)                
     plt.axis ([0, x_range, 0, y_range])
     plt.grid(True)
     plt.suptitle(title)
 
     Plotter.print_to_png(plt, file_name)
     
     if print_pdf:
         Plotter.print_to_pdf(plt, file_name)
Ejemplo n.º 7
0
def plot_fcs(normed_df, unnormed_df, pair, basename):
    """
    Plot fold changes for normed and unnormed dataframes.

    Parameters:
    -----------
    normed_df: Normalized dataframe of values
    unnormed_df: Unnormalized dataframe of values
    pair: Tuple containing the two columns to use
    to compute fold change. Fold change is first
    sample divided by second.
    """
    if (pair[0] not in normed_df.columns) or \
       (pair[1] not in normed_df.columns):
        raise Exception, "One of the pairs is not in normed df."
    if (pair[0] not in unnormed_df.columns) or \
       (pair[1] not in unnormed_df.columns):
        raise Exception, "One of the pairs is not in unnormed.df"
    normed_fc = \
        np.log2(normed_df[pair[0]]) - np.log2(normed_df[pair[1]])
    unnormed_fc = \
        np.log2(unnormed_df[pair[0]]) - np.log2(unnormed_df[pair[1]])
    fc_df = pandas.DataFrame({"normed_fc": normed_fc,
                              "unnormed_fc": unnormed_fc})
    # Remove nans/infs etc.
    pandas.set_option('use_inf_as_null', True)
    fc_df = fc_df.dropna(how="any", subset=["normed_fc",
                                            "unnormed_fc"])
    plt.figure()
    fc_df.hist(color="k", bins=40)
    plt.suptitle("%s vs. %s" %(pair[0], pair[1]))
    plt.xlabel("Fold change (log2)")
    save_fig(basename)
    pandas.set_option('use_inf_as_null', False)    
Ejemplo n.º 8
0
    def update(frame):
        global _counter
        centroid = np.random.uniform(-0.5, 0.5, size=2)
        width = np.random.uniform(0, 0.01)
        length = np.random.uniform(0, 0.03) + width
        angle = np.random.uniform(0, 360)
        intens = np.random.exponential(2) * 50
        model = mock.generate_2d_shower_model(centroid=centroid,
                                              width=width,
                                              length=length,
                                              psi=angle * u.deg)
        image, sig, bg = mock.make_mock_shower_image(geom, model.pdf,
                                                     intensity=intens,
                                                     nsb_level_pe=5000)

        # alternate between cleaned and raw images
        if _counter > 20:
            plt.suptitle("Image Cleaning ON")
            cleanmask = reco.cleaning.tailcuts_clean(geom, image, pedvars=80)
            for ii in range(3):
                reco.cleaning.dilate(geom, cleanmask)
            image[cleanmask == 0] = 0  # zero noise pixels
        if _counter >= 40:
            plt.suptitle("Image Cleaning OFF")
            _counter = 0

        disp.image = image
        disp.set_limits_percent(100)
        _counter += 1
Ejemplo n.º 9
0
def q6(abreviation):
	'''#Q6: PLOTS polling data for a given state'''
	#get STATE; connect to db
	state = abreviation.upper()
	connection = sqlite3.connect(path+"/poll.db")
	cursor = connection.cursor()
	
	#query db, get names, rankings for Rep, Dem, Ind candidates
	sql_cmd = "SELECT candidate_names.democrat, candidate_names.republican, candidate_names.independent, rankings.day, rankings.dem, rankings.rep, rankings.indep from rankings left join statetable on rankings.state = statetable.fullname\
	 left join candidate_names on statetable.abrev = candidate_names.state where  statetable.abrev = '%s' order by rankings.day ASC" % state
	cursor.execute(sql_cmd)
	dbinfo = cursor.fetchall()

	#'new' is the name of the record array corresponding to the output from the sql query
	new = N.array(dbinfo, dtype= [('demname', '|S25'),('repname', '|S25'),('indname', '|S25'),('day', N.int16), ("dem", N.int16), ("rep", N.int16), ("ind", N.int16)])

	demname= new['demname'][0] #name of democrat
	repname=  new['repname'][0] # name of rep
	indname= new['indname'][0] #" of ind

	# 2 (+1) lines for dem, rep, candiates ind if he has more than 1 point at the first datapoint, indicating there is an independent candidate
	plt.plot(new['day'],new['dem'], color='blue', label='%s' % demname)
	plt.plot(new['day'],new['rep'], color='red', label='%s' % repname)
	if new['ind'][0] > 1:
		plt.plot(new['day'],new['ind'], color='green', label='%s' % indname)
		
	#plot info
	plt.suptitle('Election Polls for the State of %s'%state)
	plt.xlabel('Day of the year')
	plt.ylabel('Points')
	plt.legend()
	plt.show()
Ejemplo n.º 10
0
def plot_ss_scatter(steadies):
    """ Plot scatter plots of steady states
    """

    def do_scatter(i, j, ax):
        """ Draw single scatter plot
        """
        xs, ys = utils.extract(i, j, steadies)
        ax.scatter(xs, ys)

        ax.set_xlabel(r"$S_%d$" % i)
        ax.set_ylabel(r"$S_%d$" % j)

        cc = utils.get_correlation(xs, ys)
        ax.set_title(r"Corr: $%.2f$" % cc)

    dim = steadies.shape[1]
    fig, axarr = plt.subplots(1, int((dim ** 2 - dim) / 2), figsize=(20, 5))

    axc = 0
    for i in range(dim):
        for j in range(dim):
            if i == j:
                break
            do_scatter(i, j, axarr[axc])
            axc += 1

    plt.suptitle("Correlation overview")

    plt.tight_layout()
    save_figure("images/correlation_scatter.pdf", bbox_inches="tight")
    plt.close()
Ejemplo n.º 11
0
def plotErrorAndOrder(schemesName, spaceErrorList,temporalErrorList,
                      spaceOrderList, temporalOrderList, Ntds):
    legendList = []
    lstyle = ['b', 'r', 'g', 'm']
    fig , axarr = plt.subplots(2, 2, squeeze=False)
    for k, scheme_name in enumerate(schemesName):
        axarr[0][0].plot(np.log10(np.asarray(spaceErrorList[k])),lstyle[k])
        axarr[0][1].plot(np.log10(np.asarray(temporalErrorList[k])),lstyle[k])
        axarr[1][0].plot(spaceOrderList[k],lstyle[k])
        axarr[1][1].plot(temporalOrderList[k],lstyle[k])
        legendList.append(scheme_name)
    plt.suptitle('test_MES_convergence(): Results from convergence test using Method of Exact Solution')

    axarr[1][0].axhline(1.0, xmin=0, xmax=Ntds-2, linestyle=':', color='k')
    axarr[1][0].axhline(2.0, xmin=0, xmax=Ntds-2, linestyle=':', color='k')
    axarr[1][1].axhline(1.0, xmin=0, xmax=Ntds-2, linestyle=':', color='k')
    axarr[1][1].axhline(2.0, xmin=0, xmax=Ntds-2, linestyle=':', color='k')
    axarr[1][0].set_ylim(0, 5)
    axarr[1][1].set_ylim(0, 5)
    axarr[0][0].set_ylabel('rms Error')
    axarr[0][0].set_title('space Error')
    axarr[1][0].set_ylabel('rms Error')
    axarr[0][1].set_title('temporal Error')
    axarr[1][0].set_ylabel('order')
    axarr[1][0].set_title('space order')
    axarr[1][1].set_ylabel('order')
    axarr[1][1].set_title('temporal order')
    axarr[0][1].legend(legendList, frameon=False)
Ejemplo n.º 12
0
def report(path='history.cpkl', tn=0, sns=True):
    if sns:
        import seaborn as sns
        sns.set_style('whitegrid')
        sns.set_style('whitegrid', {'fontsize': 50})
        sns.set_context('poster')
    with open(path) as f:
        logged_data = pickle.load(f)

    history = util.NestedDict()
    for name, val in logged_data.iteritems():
        history.set_nested(name, val)

    num_subplots = len(history)
    cols = 2  # 2 panels for Objective and Accuracy
    rows = 1

    fig = plt.figure(figsize=(12, 8))
    fig.subplots_adjust(wspace=0.3, hspace=0.2)  # room for labels [Objective, Accuracy]
    colors = [sns.xkcd_rgb['blue'], sns.xkcd_rgb['red']]

    # Here we assume that history is only two levels deep
    for k, (subplot_name, trend_lines) in enumerate(history.iteritems()):
        plt.subplot(rows, cols, k + 1)
        plt.ylabel(subplot_name.capitalize())
        plt.xlabel('Epoch')
        for i, (name, (timestamps, values)) in enumerate(trend_lines.iteritems()):
            plt.plot(timestamps, values, label=name, color=colors[i])
        plt.suptitle('Task number %d' % tn)
        plt.legend(loc='best')

    plt.show()
Ejemplo n.º 13
0
    def test2_ESMF_clt(self):
        """
        Out and back, same grid using mvGenericRegrid -> ESMF, linear
        """
        per = 1
        coordSys = 'spherical degrees'
        grid = [self.cltGrid[0], self.cltGrid[1]]
        ro = regrid2.GenericRegrid(grid, grid, 
                                   dtype=self.clt.dtype,
                                   regridMethod='linear', 
                                   regridTool = 'esMf', periodicity = per,
                                   coordSys = coordSys)
        ro.computeWeights()
        ro.apply(numpy.array(self.clt), self.cltInterp, rootPe = 0)
        self.cltInterp = self.comm.bcast(self.cltInterp, root = 0)
        ro.apply(self.cltInterp, self.cltInterpInterp, rootPe = 0)
        nCell = numpy.array(self.clt.shape).prod()

        if self.rank == 0:
            avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
            avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
            self.assertLess(avgDiffInterp, self.tol)
            if self.size > 1:
                self.assertLess(avgDiffInterpInterp, 600)
            else:
                self.assertLess(avgDiffInterpInterp, self.tol)

            if False:
                pl.figure(2)
                pl.subplot(3,2,1)
                pl.pcolor(grid[1], grid[0], self.clt)
                pl.title('clt')
                pl.colorbar()
                pl.subplot(3,2,2)
                pl.pcolor(grid[1], grid[0], self.cltInterp,
                        vmin = 0, vmax = 100)
                pl.title('Interp')
                pl.colorbar()
                pl.subplot(3,2,3)
                pl.pcolor(grid[1], grid[0], self.cltInterpInterp,
                        vmin = 0, vmax = 100)
                pl.title('InterpInterp')
                pl.colorbar()
                pl.subplot(3,2,4)
                pl.pcolor(grid[1],grid[0], self.clt-self.cltInterp)
                pl.colorbar()
                pl.title('clt-cltInterp')
                pl.subplot(3,2,5)
                pl.pcolor(grid[1],grid[0], self.clt-self.cltInterpInterp)
                pl.colorbar()
                pl.title('clt-cltInterpInterp')
                pl.subplot(3,2,6)
                pl.pcolor(grid[1],grid[0], grid[1])
                pl.colorbar()
                pl.title('Longitude')
                string0 = "ESMF coordSys = %s, " % coordSys
                string1 = "periodicity = %d, " % (per)
                string2 = "MPI COMM size = %d" % (self.size)
                pl.suptitle(string0 + string1 + string2)
Ejemplo n.º 14
0
    def Xtest4_ESMF_Conservative_2D_clt(self):
        """
        Out, ESMF, Conservative metric
        """
        per = 1
        coordSys = 'spherical degrees'
        grid = [self.cltGrid[0], self.cltGrid[1]]
        mask = numpy.array(self.clt > 90, dtype = numpy.int32)
        newclt = numpy.ones(self.clt.shape) * self.clt
        newclt[numpy.where(self.clt>75)] = self.clt.missing_value
        ro = regrid2.GenericRegrid(grid, grid, self.clt.dtype, 
                                   regridMethod = 'conserv',
                                   regridTool = 'esMf', 
                                   periodicity = per,
                                   coordSys = coordSys)
        ro.computeWeights()
        print dir(ro.computeWeights())
        ro.apply(numpy.array(newclt), self.cltInterp, 
                 srcMissingValue = self.clt.missing_value, rootPe = 0)
      
        nCell = numpy.array(self.clt.shape).prod()

        if self.rank == 0:
            avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
            avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
            #self.assertLess(avgDiffInterp, 50)

            if True:
                pl.figure(4)
                pl.subplot(3,2,1)
                pl.pcolor(grid[1], grid[0], self.clt)
                pl.title('clt')
                pl.colorbar()
                pl.subplot(3,2,2)
                pl.pcolor(grid[1], grid[0], (newclt == self.clt.missing_value)+mask,
                        vmin = 0, vmax = 2)
                pl.title('newclt == self.clt.missing_value')
                pl.colorbar()
                pl.subplot(3,2,3)
                pl.pcolor(grid[1], grid[0], self.cltInterp)
                mn, mx = self.cltInterp.min(), self.cltInterp.max()
                pl.title('newMask %5.2f %5.2f' % (mn,mx))
                pl.colorbar()
                pl.subplot(3,2,4)
                pl.pcolor(grid[1],grid[0], mask+self.cltInterp)
                pl.colorbar()
                pl.title('mask')
                pl.subplot(3,2,5)
                pl.pcolor(grid[1],grid[0], mask)
                pl.colorbar()
                pl.title('(newclt==self.clt.missing_value) - self.cltInterp')
                pl.subplot(3,2,6)
                pl.pcolor(grid[1],grid[0], newclt == self.clt.missing_value)
                pl.colorbar()
                pl.title('newclt-cltInterp')
                string0 = "ESMF coordSys = %s, " % coordSys
                string1 = "periodicity = %d, " % (per)
                string2 = "MPI COMM size = %d" % (self.size)
                pl.suptitle(string0 + string1 + string2)
Ejemplo n.º 15
0
def plotAstrometry(dist, mag, snr, brightSnr=100,
                   outputPrefix=""):
    """Plot angular distance between matched sources from different exposures.

    Creates a file containing the plot with a filename beginning with `outputPrefix`.

    Parameters
    ----------
    dist : list or numpy.array
        Separation from reference [mas]
    mag : list or numpy.array
        Mean magnitude of PSF flux
    snr : list or numpy.array
        Median SNR of PSF flux
    brightSnr : float, optional
        Minimum SNR for a star to be considered "bright".
    outputPrefix : str, optional
        Prefix to use for filename of plot file.  Will also be used in plot titles.
        E.g., outputPrefix='Cfht_output_r_' will result in a file named
           'Cfht_output_r_check_astrometry.png'
    """
    bright, = np.where(np.asarray(snr) > brightSnr)

    numMatched = len(dist)
    dist_median = np.median(dist)
    bright_dist_median = np.median(np.asarray(dist)[bright])

    fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(18, 12))

    ax[0].hist(dist, bins=100, color=color['all'],
               histtype='stepfilled', orientation='horizontal')
    ax[0].hist(np.asarray(dist)[bright], bins=100, color=color['bright'],
               histtype='stepfilled', orientation='horizontal',
               label='SNR > %.0f' % brightSnr)

    ax[0].set_ylim([0., 500.])
    ax[0].set_ylabel("Distance [mas]")
    ax[0].set_title("Median : %.1f, %.1f mas" %
                    (bright_dist_median, dist_median),
                    x=0.55, y=0.88)
    plotOutlinedLinesHorizontal(ax[0], dist_median, bright_dist_median)

    ax[1].scatter(snr, dist, s=10, color=color['all'], label='All')
    ax[1].scatter(np.asarray(snr)[bright], np.asarray(dist)[bright], s=10,
                  color=color['bright'],
                  label='SNR > %.0f' % brightSnr)
    ax[1].set_xlabel("SNR")
    ax[1].set_xscale("log")
    ax[1].set_ylim([0., 500.])
    ax[1].set_title("# of matches : %d, %d" % (len(bright), numMatched))
    ax[1].legend(loc='upper left')
    ax[1].axvline(brightSnr, color='red', linewidth=4, linestyle='dashed')
    plotOutlinedLinesHorizontal(ax[1], dist_median, bright_dist_median)

    plt.suptitle("Astrometry Check : %s" % outputPrefix.rstrip('_'), fontsize=30)
    plotPath = outputPrefix+"check_astrometry.png"
    plt.savefig(plotPath, format="png")
    plt.close(fig)
def plot_generated_toy_batch(X_real, generator_model, discriminator_model, noise_dim, gen_iter, noise_scale=0.5):

    # Generate images
    X_gen = sample_noise(noise_scale, 10000, noise_dim)
    X_gen = generator_model.predict(X_gen)

    # Get some toy data to plot KDE of real data
    data = load_toy(pts_per_mixture=200)
    x = data[:, 0]
    y = data[:, 1]
    xmin, xmax = -1.5, 1.5
    ymin, ymax = -1.5, 1.5

    # Peform the kernel density estimate
    xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
    positions = np.vstack([xx.ravel(), yy.ravel()])
    values = np.vstack([x, y])
    kernel = stats.gaussian_kde(values)
    f = np.reshape(kernel(positions).T, xx.shape)

    # Plot the contour
    fig = plt.figure(figsize=(10,10))
    plt.suptitle("Generator iteration %s" % gen_iter, fontweight="bold", fontsize=22)
    ax = fig.gca()
    ax.contourf(xx, yy, f, cmap='Blues', vmin=np.percentile(f,80), vmax=np.max(f), levels=np.linspace(0.25, 0.85, 30))

    # Also plot the contour of the discriminator
    delta = 0.025
    xmin, xmax = -1.5, 1.5
    ymin, ymax = -1.5, 1.5
    # Create mesh
    XX, YY = np.meshgrid(np.arange(xmin, xmax, delta), np.arange(ymin, ymax, delta))
    arr_pos = np.vstack((np.ravel(XX), np.ravel(YY))).T
    # Get Z = predictions
    ZZ = discriminator_model.predict(arr_pos)
    ZZ = ZZ.reshape(XX.shape)
    # Plot contour
    ax.contour(XX, YY, ZZ, cmap="Blues", levels=np.linspace(0.25, 0.85, 10))
    dy, dx = np.gradient(ZZ)
    # Add streamlines
    # plt.streamplot(XX, YY, dx, dy, linewidth=0.5, cmap="magma", density=1, arrowsize=1)
    # Scatter generated data
    plt.scatter(X_gen[:1000, 0], X_gen[:1000, 1], s=20, color="coral", marker="o")

    l_gen = plt.Line2D((0,1),(0,0), color='coral', marker='o', linestyle='', markersize=20)
    l_D = plt.Line2D((0,1),(0,0), color='steelblue', linewidth=3)
    l_real = plt.Rectangle((0, 0), 1, 1, fc="steelblue")

    # Create legend from custom artist/label lists
    # bbox_to_anchor = (0.4, 1)
    ax.legend([l_real, l_D, l_gen], ['Real data KDE', 'Discriminator contour',
                                     'Generated data'], fontsize=18, loc="upper left")
    ax.set_xlim(xmin, xmax)
    ax.set_ylim(ymin, ymax + 0.8)
    plt.savefig("../../figures/toy_dataset_iter%s.jpg" % gen_iter)
    plt.clf()
    plt.close()
Ejemplo n.º 17
0
def plot_contours(obj, top_bottom=True):
  '''A function that plots the BRF as an azimuthal projection
  with contours over the TOC and soil.
  Input: rt_layers object, top_bottom - True if only TOC plot, False
  if both TOC and soil.
  Output: contour plot of brf.
  '''
  sun = ((np.pi - obj.sun0[0]) * np.cos(obj.sun0[1] + np.pi), \
      (np.pi - obj.sun0[0]) * np.sin(obj.sun0[1] + np.pi))
  theta = obj.views[:,0]
  x = np.cos(obj.views[:,1]) * theta
  y = np.sin(obj.views[:,1]) * theta
  z = obj.I_top_bottom # * -obj.mu_s
  if top_bottom == True:
    if np.max > 1.:
      maxz = np.max(z)
    else:
      maxz = 1.
  else:
    maxz = np.max(z[:obj.n/2])
  minz = 0. #np.min(z)
  space = np.linspace(minz, maxz, 11)
  x = x[:obj.n/2]
  y = y[:obj.n/2]
  zt = z[:obj.n/2]
  zb = z[obj.n/2:]
  fig = plt.figure()
  if top_bottom == True:
    plt.subplot(121)
  plt.plot(sun[0], sun[1], 'ro')
  triang = tri.Triangulation(x, y)
  plt.gca().set_aspect('equal')
  plt.tricontourf(triang, zt, space, vmax=maxz, vmin=minz)
  plt.title('TOC BRF')
  plt.ylabel('Y')
  plt.xlabel('X')
  if top_bottom == True:
    plt.subplot(122)
    plt.plot(sun[0], sun[1], 'ro')
    plt.gca().set_aspect('equal')
    plt.tricontourf(triang, zb, space, vmax=maxz, vmin=minz)
    plt.title('Soil Absorption')
    plt.ylabel('Y')
    plt.xlabel('X')
  s = obj.__repr__()
  if top_bottom == True:
    cbaxes = fig.add_axes([0.11,0.1,0.85,0.05])
    plt.suptitle(s,x=0.5,y=0.93)
    plt.colorbar(orientation='horizontal', ticks=space,\
      cax = cbaxes, format='%.3f')
  else:
    plt.suptitle(s,x=0.5,y=0.13)
    plt.colorbar(orientation='horizontal', ticks=space,\
        format='%.3f')
    #plt.tight_layout()
  plt.show()
Ejemplo n.º 18
0
    def test1_LibCF_clt(self):
        """
        Out and back, same grid using mvGenericRegrid -> LibCF, linear
        """
        ro = regrid2.GenericRegrid(self.cltGrid, self.cltGrid, self.clt.dtype,
                                   regridMethod='linear', regridTool='libcf')
        ro.computeWeights()

        ro.apply(self.clt, self.cltInterp)
        ro.apply(self.cltInterp, self.cltInterpInterp)
        nCell = numpy.array(self.clt.shape).prod()
        avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
        avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
        self.assertLess(avgDiffInterp, self.tol)
        self.assertLess(avgDiffInterpInterp, self.tol)

        if self.rank == 0:
            avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
            avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
            self.assertLess(avgDiffInterp, self.tol)
            self.assertLess(avgDiffInterpInterp, self.tol)
    
            if False:
                pl.figure(1)
                pl.subplot(3,2,1)
                pl.pcolor(self.cltGrid[1], self.cltGrid[0], self.clt)
                pl.title('clt')
                pl.colorbar()
                pl.subplot(3,2,2)
                pl.pcolor(self.cltGrid[1], self.cltGrid[0], self.cltInterp,
                        vmin = 0, vmax = 100)
                pl.title('Interp')
                pl.colorbar()
                pl.subplot(3,2,3)
                pl.pcolor(self.cltGrid[1], self.cltGrid[0], self.cltInterpInterp,
                        vmin = 0, vmax = 100)
                pl.title('InterpInterp')
                pl.colorbar()
                pl.subplot(3,2,4)
                pl.pcolor(self.cltGrid[1],self.cltGrid[0], self.clt-self.cltInterp)
                pl.colorbar()
                pl.title('clt-cltInterp')
                pl.subplot(3,2,5)
                pl.pcolor(self.cltGrid[1],self.cltGrid[0], self.clt-self.cltInterpInterp)
                pl.colorbar()
                pl.title('clt-cltInterpInterp')
                pl.subplot(3,2,6)
                pl.pcolor(self.cltGrid[1],self.cltGrid[0], self.cltGrid[1])
                pl.colorbar()
                pl.title('Longitude')
                per = 0
                string0 = "LibCF coordSys = Bilinear, "
                string1 = "periodicity = %d, " % (per)
                string2 = "MPI COMM size = %d" % (self.size)
                pl.suptitle(string0 + string1 + string2)
def display(data):
    i = 0
    pylab.figure()
    for d in data:
        pylab.scatter([d[0]], [d[1]], c="r", marker='o')

    pylab.suptitle("Generated teddy toy\ndim=(" + str(dim_x) + ", " + str(dim_y) + ") dist=" + str(clust_dist) + " size=" + str(clust_size) + " stddev=" + str(clust_stddev))

    pylab.savefig(filename + ".pdf")
    pylab.savefig(filename + ".eps")
    pylab.savefig(filename + ".svg")
Ejemplo n.º 20
0
def get_offset_center(f, plot=False, interactive=False):
    '''
    Given a fits image, returns the offset in Ra, DEC, that needs to be applied for the telescope tp go
    from the current pointing position, to the coodinates of the object specified in the fits file.
    '''
    
    if(not os.path.isfile(f)):
        print "File %s does not exist! Returning Zero offsets..."%f
        return -1, 0,0
    else:
        image = pf.open(f)
        wcs = pywcs.WCS(image[0].header)
        rra, rdec = cc.hour2deg(image[0].header['OBJRA'],image[0].header['OBJDEC'] )
        x, y = np.round(wcs.wcs_sky2pix(rra, rdec, 0), 0)
        pra, pdec = wcs.wcs_pix2sky(np.array([[1293., 1280.]] , np.float_), 0)[0]
        dra, ddec = cc.get_offset(pra, pdec, rra, rdec)
            
        xl, yu = np.round(wcs.wcs_sky2pix(rra+90./3600, rdec-90./3600, 0), 0)
        xu, yl = np.round(wcs.wcs_sky2pix(rra-90./3600, rdec+90./3600, 0), 0)

        imageloc = image[0].data.T[xl:xu,yl:yu]

        if imageloc.shape[0]==0 or imageloc.shape[1]==0:
            logger.warn( "Astrometry has FAILED on this! The object is outside the frame! Resending to the numb astrometric solution")
            logger.error("Astrometry has FAILED on this! The object is outside the frame! Resending to the numb astrometric solution")
            print "Pixels are", xl, xu, yl, yu
            try:
                code, dra, ddec = get_offset_center_failed_astro(f, plot=plot, interactive=interactive)
                return 2, dra, ddec
            except:
                return -1,0,0
        if(plot):
            plt.figure(figsize=(8,8))
            
            zmin, zmax = zscale.zscale(imageloc)

            #print zmin, zmax, imageloc, (xl,xu,yl,yu)
    
            obj = fitsutils.get_par(f, "OBJECT")
            plt.suptitle(obj, fontsize=20)
            plt.imshow(imageloc.T,  extent=(xl[0],xu[0],yl[0],yu[0]), aspect="equal", interpolation="none", origin="lower", vmin=zmin, vmax=zmax)
            plt.plot(1293., 1280., "ws", ms=7, label="Current pointing")
            plt.plot(x, y, "b*", ms=10, label="Target pointing")
            plt.gca().invert_xaxis()
            plt.legend()
            if (interactive):
                plt.show()
            else:
                plt.savefig(os.path.join(os.path.dirname(f).replace("raw", "phot"), os.path.basename(f).replace(".fits", "_a.png")))
            plt.clf()


        return 0, dra, ddec
Ejemplo n.º 21
0
def plotPA1(pa1, outputPrefix=""):
    """Plot the results of calculating the LSST SRC requirement PA1.

    Creates a file containing the plot with a filename beginning with `outputPrefix`.

    Parameters
    ----------
    pa1 : pipeBase.Struct
        Must contain:
        rms, iqr, magMean, magDiffs
        rmsUnits, iqrUnits, magDiffsUnits
    outputPrefix : str, optional
        Prefix to use for filename of plot file.  Will also be used in plot titles.
        E.g., outputPrefix='Cfht_output_r_' will result in a file named
           'Cfht_output_r_AM1_D_5_arcmin_17.0-21.5.png'
        for an AMx.name=='AM1' and AMx.magRange==[17, 21.5]
    """
    diffRange = (-100, +100)

    fig = plt.figure(figsize=(18, 12))
    ax1 = fig.add_subplot(1, 2, 1)
    ax1.scatter(pa1.magMean, pa1.magDiffs, s=10, color=color['bright'], linewidth=0)
    ax1.axhline(+pa1.rms, color=color['rms'], linewidth=3)
    ax1.axhline(-pa1.rms, color=color['rms'], linewidth=3)
    ax1.axhline(+pa1.iqr, color=color['iqr'], linewidth=3)
    ax1.axhline(-pa1.iqr, color=color['iqr'], linewidth=3)

    ax2 = fig.add_subplot(1, 2, 2, sharey=ax1)
    ax2.hist(pa1.magDiffs, bins=25, range=diffRange,
             orientation='horizontal', histtype='stepfilled',
             normed=True, color=color['bright'])
    ax2.set_xlabel("relative # / bin")

    yv = np.linspace(diffRange[0], diffRange[1], 100)
    ax2.plot(scipy.stats.norm.pdf(yv, scale=pa1.rms), yv,
             marker='', linestyle='-', linewidth=3, color=color['rms'],
             label="PA1(RMS) = %4.2f %s" % (pa1.rms, pa1.rmsUnits))
    ax2.plot(scipy.stats.norm.pdf(yv, scale=pa1.iqr), yv,
             marker='', linestyle='-', linewidth=3, color=color['iqr'],
             label="PA1(IQR) = %4.2f %s" % (pa1.iqr, pa1.iqrUnits))
    ax2.set_ylim(*diffRange)
    ax2.legend()
#    ax1.set_ylabel(u"12-pixel aperture magnitude diff (mmag)")
#    ax1.set_xlabel(u"12-pixel aperture magnitude")
    ax1.set_xlabel("psf magnitude")
    ax1.set_ylabel("psf magnitude diff (%s)" % pa1.magDiffsUnits)
    for label in ax2.get_yticklabels():
        label.set_visible(False)

    plt.suptitle("PA1: %s" % outputPrefix.rstrip('_'))
    plotPath = "%s%s" % (outputPrefix, "PA1.png")
    plt.savefig(plotPath, format="png")
    plt.close(fig)
def Ploting_Facts(P):
    F_2, F_3 = Run_Facts(P)
    plt.figure()
    plt.plot(F_3, F_2, 'r.')
    plt.plot(25,75, 'ko', label="Q")
    plt.xlabel('Fact #3')
    plt.ylabel('Fact #2')
    plt.suptitle('Classical Behavior of Incomunicated Students', fontsize = 14)
    plt.legend(loc=2)
    plt.grid(True)
    plt.savefig('Estudiantes_F3vF2_P=' + str(P) + '.png')
    plt.show()
Ejemplo n.º 23
0
def guess(velocity, thetaDeg, timeStep=0.01):
    """
    Initial calcultaion / integration of the function using guessed
    initial conditions. The function will also be plotted to help
    visualise the scenario.
    velocity in meters per second, theta in degrees, timeStep in
    seconds. The smaller the timeStep the more accurate the function.
    """
    # Calculate / fill all the variables
    theta = radians(thetaDeg)       # Change the input angle into radians
    trajGuessX = []                 # Create an empty list
    trajGuessX.append(trajXY[0])    # Initialise the list
    trajGuessY = []                 # Create an empty list
    trajGuessY.append(trajXY[1])    # Initialise the list
    accelX = ((rho*drag*pow((velocity*cos(theta)),2)*pi*pow(radius,2))
              /(2*mass))            # Calculate the x-acceleration
    accelY = grav                   # Assign gravity to y-acceleration
    velocityX = velocity*cos(theta) # Calculate the x-velocity component
    velocityY = velocity*sin(theta) # Calculate the y-velocity component

    # Execute the mathematics and build a list of the coordinates.
    # While the bread is in the air perform calculations.
    # As it steps through it updates the velocities and accelerations
    # so that the bread acceleration and velocity slows.
    while trajGuessY[-1] > 0:
        # New velocity equals old velocity minus the updated acceleration
        velocityX = velocityX - accelX*timeStep
        # Change the acceleraion to use the last calculated velocity
        accelX = ((rho*drag*pow(velocityX,2)*pi*pow(radius,2))
                   /(2*mass))
        # New velocity equals old velocity minus the updated acceleration
        velocityY = velocityY - accelY*timeStep
        # Positions equal last position (in the list) + distance moved
        x = trajGuessX[-1] + velocityX*timeStep
        y = trajGuessY[-1] + velocityY*timeStep
        trajGuessX.append(x)    # Append the x-coord to the list
        trajGuessY.append(y)    # Append the y-coord to the list

    # Plot the graphs of the trajectory and the physical environment
    env = plt.plot(physEnvX, physEnvY, 'b', label='Environment')
    traj = plt.plot(trajGuessX, trajGuessY, 'r--', label='Trajectory')
    plt.grid(b=True, which='major', color='k', linestyle='-')
    plt.suptitle('Bread Slingshot') # Set the graph title
    plt.legend(loc='upper right')   # Set the legend location
    plt.ylabel('Height (m)')        # Set the y-axis label
    plt.xlabel('Distance (m)')      # Set the x-axis label
    plt.ylim([-5,50])               # Set the y-axis limits
    plt.xlim([-5,120])              # Set the x-axis limits
    plt.show()                      # Make sure the graph appears

    print 'The bread lands at: {0:.3f}, {1:.3f}'.format(
        trajGuessX[-1], trajGuessY[-1])
    return
def plot_corr_mat_dcm(G, sum_rho_avg, D_average, no_runs):
	""" Plot the sum of the correlation matrices, 
		avergaed over a number of runs
	"""
	plt.figure()
	gs = mpl.gridspec.GridSpec(1, 3)
	plot_graph(G, plt.subplot(gs[:, 0]))
	plot_sum_corr_mat(sum_rho_avg, plt.subplot(gs[:, 1]))
	plot_time_to_sync(D_average, plt.subplot(gs[:, 2]))
	plt.suptitle('Average over %s runs' % (no_runs))
	plt.savefig('images/corrMat_dcm.png')
	plt.savefig('images/corrMat_dcm.pdf')
Ejemplo n.º 25
0
	def plot_colat_slice(self, component, colat, valmin, valmax, iteration=0, verbose=True):

		#- Some initialisations. ------------------------------------------------------------------
		
		colat = np.pi * colat / 180.0

		n_procs = self.setup["procs"]["px"] * self.setup["procs"]["py"] * self.setup["procs"]["pz"]

		vmax = float("-inf")
		vmin = float("inf")

		fig, ax = plt.subplots()

		#- Loop over processor boxes and check if colat falls within the volume. ------------------
		for p in range(n_procs):

			if (colat >= self.theta[p,:].min()) & (colat <= self.theta[p,:].max()):

				#- Read this field and make lats & lons. ------------------------------------------
				field = self.read_single_box(component,p,iteration)

				r, lon = np.meshgrid(self.z[p,:], self.phi[p,:])

				x = r * np.cos(lon)
				y = r * np.sin(lon)

				#- Find the colat index and plot for this one box. --------------------------------
				idx=min(np.where(min(np.abs(self.theta[p,:]-colat))==np.abs(self.theta[p,:]-colat))[0])

				colat_effective = self.theta[p,idx]*180.0/np.pi

				#- Find min and max values. -------------------------------------------------------

				vmax = max(vmax, field[idx,:,:].max())
				vmin = min(vmin, field[idx,:,:].min())

				#- Make a nice colourmap and plot. ------------------------------------------------
				my_colormap=cm.make_colormap({0.0:[0.1,0.0,0.0], 0.2:[0.8,0.0,0.0], 0.3:[1.0,0.7,0.0],0.48:[0.92,0.92,0.92], 0.5:[0.92,0.92,0.92], 0.52:[0.92,0.92,0.92], 0.7:[0.0,0.6,0.7], 0.8:[0.0,0.0,0.8], 1.0:[0.0,0.0,0.1]})

				cax = ax.pcolor(x, y, field[idx,:,:], cmap=my_colormap, vmin=valmin,vmax=valmax)


		#- Add colobar and title. ------------------------------------------------------------------
		cbar = fig.colorbar(cax)
		if component in UNIT_DICT:
			cb.set_label(UNIT_DICT[component], fontsize="x-large", rotation=0)
	
		plt.suptitle("Vertical slice of %s at %i degree colatitude" % (component, colat_effective), size="large")

		plt.axis('equal')
		plt.show()
def plot_tang(X, Y, Z, title, npts=None):

    fig = plt.figure()
    ax = fig.gca(projection='3d')

    # Plot the surface.
    surf = ax.plot_surface(X, Y, Z, cmap="viridis",
                           linewidth=0, antialiased=False)

    # Customize the z axis.
    ax.set_zlim(-100, 250)
    ax.zaxis.set_tick_params(pad=8)
    ax.zaxis.set_major_locator(LinearLocator(5))
    ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))

    # Add a color bar which maps values to colors.
    fig.colorbar(surf, shrink=0.5, aspect=5)

    if "teacher" in title:
        plt.suptitle("Teacher model")

    if "student" in title and "sobolev" not in title:
        assert (npts is not None)
        plt.suptitle("Student model %s training pts" % npts)

    if "sobolev" in title:
        assert (npts is not None)
        plt.suptitle("Student model %s training pts + Sobolev" % npts)

    else:
        plt.suptitle("Styblinski Tang function")

    plt.savefig(title)
 def __plotRMaxSearchResults(self):
     """
     Plot results of Rmax optimization procedure and best fit of the experimental data
     """
     cm = matplotlib.cm.get_cmap('cool')
     #pylab.rcParams['figure.figsize'] = 6, 5
     for itr in range(self.__iNbGnomSeries):
         rMaxList = []
         fitQualityList = []
         for idx in range(self.__rMaxDivide + 1):
             rMaxList.append(self.__xsGnomPlugin[(itr,idx)].getDataInput().getRMax().getValue())
             fitQualityList.append(self.__xsGnomPlugin[(itr,idx)].getDataOutput().getFitQuality().getValue())
         
         #pylab.plot(fitQualityList, marker='o', color=cm(1.0*itr/(self.__iNbGnomSeries-1)), markersize=5,  label="Iteration # %d" % itr)
         pylab.figure(self.__iNbGnomSeries+1, figsize=(6,5))
         pylab.plot(rMaxList, fitQualityList, linestyle='None', marker='o', color=cm(1.0*(itr+1)/self.__iNbGnomSeries), markersize=5,  label="Iteration # %d" % itr)
         pylab.figure(itr+1, figsize=(6,5))
         pylab.plot(rMaxList, fitQualityList, linestyle='-', marker='o', markersize=5,  label="Iteration # %d" % itr)
         pylab.xlabel(u"Rmax / \u00c5")
         pylab.ylabel('Fit quality')
         pylab.legend(loc=4)
         pylab.savefig(os.path.join(self.getWorkingDirectory(),"rMaxSearchResults-%d.png" % (itr+1)))
         
     pylab.figure(self.__iNbGnomSeries+1, figsize=(6,5))
     pylab.xlabel(u"Rmax / \u00c5")
     pylab.ylabel('Fit quality')
     pylab.suptitle("Optimized value of RMax : %3.2f   Maximal fit quality : %1.3f" % (self.__edPluginExecGnom.getDataInput().getRMax().getValue(),self.__edPluginExecGnom.getDataOutput().getFitQuality().getValue()))
     pylab.legend(loc=4)
     pylab.savefig(os.path.join(self.getWorkingDirectory(),"rMaxSearchResults.png"))
     pylab.clf()
     
     
     _listFitQ = [tmp.getValue() for tmp in self.__edPluginExecGnom.getDataOutput().getScatteringFitQ()]
     _listFitValues = [tmp.getValue() for tmp in self.__edPluginExecGnom.getDataOutput().getScatteringFitValues()]
     _listExpQ = [tmp.getValue() for tmp in self.__edPluginExecGnom.getDataInput().getExperimentalDataQ()]
     _listExpValues = [tmp.getValue() for tmp in self.__edPluginExecGnom.getDataInput().getExperimentalDataValues()]
      
     pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5,  label="Experimental Data")
     pylab.semilogy(_listFitQ, _listFitValues, label="Fitting curve")
     pylab.xlabel(u"q / \u00c5$^{-1}$")
     pylab.ylabel('I(q)')
     pylab.suptitle("RMax : %3.2f   Fit quality : %1.3f" % (self.__edPluginExecGnom.getDataInput().getRMax().getValue(),self.__edPluginExecGnom.getDataOutput().getFitQuality().getValue()))
     pylab.legend()
     pylab.savefig(os.path.join(self.getWorkingDirectory(),"gnomFittingResults.png"))
     pylab.clf()
     
     if self.__bPlotFit:
         for gnomJob in self.__xsGnomPlugin.itervalues():
             gnomJob.plotFittingResults()  
         self.__edPluginExecGnom.plotFittingResults()
def display(data, labels):
    i = 0
    pylab.figure()
    for d in data:
        if labels[i] == "inside":
            pylab.scatter([d[0]], [d[1]], c="r", marker='^')
        else:
            pylab.scatter([d[0]], [d[1]], c="b", marker='o')
        i = i + 1
    pylab.suptitle("Generated donut (samples=" + str(samples) + " noise=" + str(noise) + ")")
    pylab.axis([-max_xy - 1, max_xy + 1, -max_xy - 1, max_xy + 1])
    pylab.savefig("images/" + filename + ".pdf")
    pylab.savefig("images/" + filename + ".eps")
    pylab.savefig("images/" + filename + ".svg")
Ejemplo n.º 29
0
def byDayOfWeek(trainDF):
    plt.figure(figsize=(20, 20))
    groups = trainDF.groupby('DayOfWeek')
    ii = 1;
    key_daysofweek = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
    for name, group in sorted(groups, key=lambda x: key_daysofweek.index(x[0])):
        plt.subplot(2, 4, ii)
        histo, xedges, yedges = np.histogram2d(np.array(group.X),np.array(group.Y), bins=(100, 100))
        myextent  =[xedges[0],xedges[-1],yedges[0],yedges[-1]]
        plt.imshow(histo.T,origin='low',extent=myextent,interpolation='nearest',aspect='auto',norm=LogNorm())
        plt.title(name, fontsize=30)
        ii+=1
    plt.suptitle('All Crime Categories on Each Day of Week', fontsize=40)
    plt.show()
Ejemplo n.º 30
0
def plot(res):
	for name in res.series.keys() :
		fig = plt.figure()
		ax = fig.add_subplot(111)
		plt.plot( res.series[name]['ys'] )
		plt.plot( res.series[name]['yhat'] )
		plt.suptitle(name)
		y = 0.998
		for m in ['mape','nll', 'mae', 'rmse', 'r2'] :
			metric = "%s: %.3f" % (str(m).upper(), res.metrics[name][m])
			plt.text(0.998,y, metric, horizontalalignment='right',  verticalalignment='top', transform=ax.transAxes)
			y -= 0.03
		plt.tight_layout()

	plt.show()
Ejemplo n.º 31
0
def Rireki(gt,
           pred,
           path='none',
           title="none",
           label="none",
           isShare=False,
           isSeparate=True,
           isResearch=False,
           iseach=False):
    """
    Args
        gt: gt eq. (best year). [1400,3] 
    """

    if isResearch:
        if iseach:
            dists = myData.eachMAEyear(gt, pred)
        else:
            # degree of similatery
            dists = myData.MAEyear(gt, pred)

        title = dists

    sns.set_style("dark")
    # share gt & pred
    if isShare:
        fig, figInds = plt.subplots(nrows=3, sharex=True)
        for figInd in np.arange(len(figInds)):
            figInds[figInd].plot(np.arange(pred.shape[0]),
                                 pred[:, figInd],
                                 color="skyblue")
            figInds[figInd].plot(np.arange(gt.shape[0]),
                                 gt[:, figInd],
                                 color="coral")

    #pdb.set_trace()
    if isSeparate:
        colors = ["coral", "skyblue", "coral", "skyblue", "coral", "skyblue"]

        # scalling var.
        predV, gtV = np.zeros([1400, 3]), np.zeros([1400, 3])

        # del first year
        pred_nk = [s for s in pred[ntI].tolist() if s != 0]
        pred_tnk = [s for s in pred[tntI].tolist() if s != 0]
        pred_tk = [s for s in pred[ttI].tolist() if s != 0]

        gt_tk = [s for s in gt[ttI].tolist() if s != 0]

        predV[pred_nk, ntI] = 5
        predV[pred_tnk, tntI] = 5
        predV[pred_tk, ttI] = 5

        gtV[gt[ntI].tolist(), ntI] = 5
        gtV[gt[tntI].tolist(), tntI] = 5
        gtV[gt_tk, ttI] = 5
        #pdb.set_trace()

        # [1400,3]
        plot_data = [
            gtV[:, ntI], predV[:, ntI], gtV[:, tntI], predV[:, tntI],
            gtV[:, ttI], predV[:, ttI]
        ]
        # not scalling var. [1400,3]
        #plot_data = [gt[:,ntI],pred[:,ntI],gt[:,tntI],pred[:,tntI],gt[:,ttI],pred[:,ttI]]

        fig = plt.figure()
        fig, axes = plt.subplots(nrows=6, sharex="col")
        for row, (color, data) in enumerate(zip(colors, plot_data)):
            axes[row].plot(np.arange(1400), data, color=color)

    plt.suptitle(f"{title}", fontsize=8)

    myData.isDirectory(path)
    plt.savefig(os.path.join(path, f"{label}.png"))
    plt.close()

    if isResearch:
        return int(np.sum(dists))


# -----------------------------------------------------------------------------
Ejemplo n.º 32
0
    7: {
        'matrix': k_neighbors_conf_matrix,
        'title': 'K Nearest Neighbors',
    },
    8: {
        'matrix': logistic_reg_conf_matrix,
        'title': 'Logistic Regression',
        #                   }
        #                9: {
        #                    'matrix': dumb_conf_matrix,
        #                    'title': 'Dumb',
    },
}

fix, ax = plt.subplots(figsize=(16, 12))
plt.suptitle('Confusion Matrix of Various Classifiers')
for ii, values in conf_matrix.items():
    matrix = values['matrix']
    title = values['title']
    plt.subplot(3, 3, ii)  # starts from 1
    plt.title(title)
    sns.heatmap(matrix, annot=True, fmt='')
plt.savefig('Confusion_Matrix_VC_5c.png')
plt.show()

#==============================================================================
"""------------------------ c4 == c5  ----------------------------"""
#==============================================================================

g1 = df[df.classe == "c1"]
g2 = df[df.classe == "c2"]
Ejemplo n.º 33
0
        with PdfPages(save_name) as pdf_pages:
            for feat, row in fstatistics.iterrows():
                f, ax = plt.subplots(figsize=(15, 6))
                sns.boxplot(x ='strain', 
                            y = feat, 
                            data = feat_means_df, 
                            order= strain_order
                            )
                sns.swarmplot(x ='strain', y = feat, data = feat_means_df, color=".3", linewidth=0, order= strain_order)
                ax.xaxis.grid(True)
                ax.set(ylabel="")
                sns.despine(trim=True, left=True)
                
                args = (feat, int(row['df1']), int(row['df2']), row['fstat'], row['pvalue'])
                strT = '{} | f({},{})={:.3} | (p-value {:.3})'.format(*args)
                plt.suptitle(strT)
                plt.xlabel('')
                
                pdf_pages.savefig()
                plt.close()
        #%%
        dd = {x:ii for ii,x in enumerate(fstatistics.index)}
        multi_comparisons['m'] = multi_comparisons['feat'].map(dd)
        multi_comparisons = multi_comparisons.sort_values(by='m')
        del multi_comparisons['m']
        
        save_name = os.path.join(save_dir, 'pair_comparisons.csv')
        multi_comparisons.to_csv(save_name, index=False)
#%%      

predicted_id = np.argmax(predicted_batch, axis=-1)
predicted_label_batch = class_names[predicted_id]

"""**Plotting predictions**"""

label_id = np.argmax(label_batch, axis=-1)

plt.figure(figsize=(10,9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
  plt.subplot(6,5,n+1)
  plt.imshow(test_image_batch[n])
  color = "green" if predicted_id[n] == label_id[n] else "red"
  plt.title(predicted_label_batch[n].title(), color=color)
  plt.axis('off')
_ = plt.suptitle("Model predictions (green: correct, red: incorrect)")

"""## Random Image Predictions"""

test_human = tf.keras.utils.get_file('image.jpg','https://www.cheatsheet.com/wp-content/uploads/2020/02/Zendaya-2-1024x682.jpg')
test_human = Image.open(test_human).resize(IMAGE_SHAPE)
test_human

# convert img to np array
test_human = np.array(test_human)/255.0
test_human.shape
# call model to make prediction
result = model.predict(test_human[np.newaxis, ...])
predicted_class = np.argmax(result[0], axis=-1)
# predicted_class
predicted_class_name = class_names[predicted_class]
Ejemplo n.º 35
0
def main(r = 1.0, a = 0.1, z = 1.5, e = 0.75):
    """
    Calculates the population density at each time step using the discrete-time
    version of the Lotka-Volterra model
    Plots the results in two graphs saved to ../results/. 
    First, a change in resource and consumer density over time, and second, the 
    change in population density of consumer with respect to the change in 
    population density of resource
    
    Parameters:
        r (float): intrinsic (per-capita) growth rate of the resource 
                   population (time ^ -1)
        a (float): per-capita "search rate" for the resource
                   (area x time ^ -1) multiplied by its attack success
                   probability, which determines the encounter and 
                   consumption rate of the consumer on the resource
        z (float): mortality rate (time ^ -1)
        e (float): consumer's efficiency (a fraction) in converting 
                   resource to consumer biomass
    """

    # define time vector, integrate from time point 0 to 15, using 1000
    # sub-divisions of time
    # note that units of time are arbitrary here
    t = np.linspace(0, 15, 1000)

    # set initial conditions for two populations (10 resources and 5 consumers per 
    # unit area), and convert the two into an array (because our dCR_dt function
    # takes an array as input)
    R0 = 10
    C0 = 5

    # set K, which is the carrying capacity
    K = 33

    # preallocate list
    popu = np.zeros([len(t),2])
    
    # discrete time version of LV model
    for i in range(len(t)): 
        # Looping through both columns at the same time
        eplison = np.random.normal(0,0.05,1)
        Rn = R0 * (1 + (r+eplison) * (1- R0/K) - a * C0)
        Cn = C0 * (1 - z + e * a * R0)
        R0 = Rn
        C0 = Cn
        popu[i,:]= [Rn,Cn]
    
    # visualize with matplotlib
    f1 = p.figure()
    p.plot(t, popu[:,0], 'g-', label = "Resource density") # plot
    p.plot(t, popu[:,1], 'b-', label = "Consumer density")
    p.grid()
    p.legend(loc = "best")
    p.xlabel("Time")
    p.ylabel("Population density")
    p.suptitle("Consumer-Resource population dynamics")
    p.title("r = %.2f, a = %.2f, z = %.2f, e = %.2f" %(r, a, z, e),
        fontsize = 8)
    # p.show()
    f1.savefig("../results/LV_model4.pdf") # save figure

    # plot of Consumer density against Resource density
    f2 = p.figure()
    p.plot(popu[:,0], popu[:,1], 'r-')
    p.grid()
    p.xlabel("Resource density")
    p.ylabel("Consumer density")
    p.suptitle("Consumer-Resource population dynamics")
    p.title("r = %.2f, a = %.2f, z = %.2f, e = %.2f" %(r, a, z, e),
        fontsize = 8)
    # p.show()
    f2.savefig("../results/LV_model4-1.pdf")
Ejemplo n.º 36
0
	err_sup = abs(Epercentile1 - Emedian)
	error = round((err_inf + err_sup) / 2.0,5)


	EEmedian.append(Emedian)
	ee_error.append(error)

	#=========== write ellipticity curve to file =================
	outfile.write(str(T)+"\t"+str(Emedian)+"\t"+str(error)+"\n")

	plt.scatter(nn, EE, color="C0",s=5)
	plt.errorbar(nn, EE, yerr=ssd, fmt=" ", color="C0",alpha=0.2)
	plt.axhline(Emedian, color="red", label="median")
	plt.axhline(Epercentile1, color="blue", label="percentile 15.9")
	plt.axhline(Epercentile2, color="blue", label= "percentile 84.1")
	plt.suptitle("Station: "+station+ " Period: " + str(T)+ "s")
	plt.legend()
	plt.ylabel("Log(E)")
	plt.xlabel("# Event")

	plt.ylim(-1,1)

	plt.savefig("../Results/all_measurements_T"+str(T)+"s.png")
	plt.close()



outfile.close()


Ejemplo n.º 37
0
for j, pec in enumerate(BX.gs.pecfs):
    ax1.plot(BX.gs.R, BX.gs.VT[j, j] * evcm, color='k', label=pec)

# adiabatic potential energy curves
# X.us.diabatic2adiabatic()
for j, pec in enumerate(BX.us.pecfs):
    if BX.us.pecfs[j][-7] == 'S':  # Sigma states
        ax1.plot(BX.us.R,
                 BX.us.VT[j, j] * evcm,
                 'C0',
                 label=r'$^{3}\Sigma_{u}^{-}$')
        # ax1.plot(X.us.R, X.us.AT[j, j]*evcm, 'g', lw=2, label='adiabatic')
    else:
        ax1.plot(BX.us.R, BX.us.VT[j, j] * evcm, 'C1--', label=r'^{3}\Pi$')

ax1.annotate('$X{}^{3}\Sigma_{g}^{-}$', (0.6, 55000), color='k')
ax1.annotate('$B{}^{3}\Sigma_{u}^{-}$', (1.7, 55000), color='C0')
ax1.annotate('$E{}^{3}\Sigma_{u}^{-}$', (0.9, 72000), color='C0')
ax1.annotate('${}^{3}\Pi$', (1.34, 65000), color='C1')

ax1.set_title("diabatic PECs", fontsize=12)
ax1.axis(xmin=0.5, xmax=2, ymin=40000 + BX.gs.cm, ymax=100000 + BX.gs.cm)
ax1.set_xlabel("R ($\AA$)")
# ax1.set_ylabel("V (eV)")
ax1.axes.get_yaxis().set_visible(False)

plt.suptitle('example_O2xs.py', fontsize=12)

plt.savefig('output/example_O2xs.png', dpi=75)
plt.show()
Ejemplo n.º 38
0
def plot_target_analysis(predicted_logits,
                         figures_path,
                         target,
                         dataset,
                         predicted_logits2=None,
                         model_name=None,
                         ref_name=None,
                         main_only=False):
    def _get_data(file_name, path):
        fname = os.path.join(path, '%s.csv' % file_name)
        if not os.path.isfile(fname):
            return
        arr = np_read_csv(fname)
        return arr

    generic_data_path = os.path.join(PATHS.periscope, 'data', dataset)
    target_cm = Protein(target[0:4], target[4]).cm
    target_cm *= _get_mask(target_cm.shape[0])
    aligned_reference = _get_data(target, path=os.path.join(generic_data_path, 'reference'))
    aligned_reference = target_cm if aligned_reference is None else aligned_reference
    np.fill_diagonal(aligned_reference, 0)

    modeller = _get_data(target, path=os.path.join(generic_data_path, 'modeller'))
    modeller = np.zeros_like(target_cm) if modeller is None else modeller

    fig_size = (12, 8) if main_only else (10, 10)
    plt.figure(figsize=fig_size, clear=True)

    if main_only:
        ax1 = plt.subplot(121)

    else:
        ax1 = plt.subplot(232)
        ax3 = plt.subplot(234)
        ax4 = plt.subplot(236)

    target_cm_unmasked = np.array(target_cm > 0, dtype=np.int32)

    def _get_accuracy(prediction_mat, target_cm):
        prediction_mat_triu = np.clip(prediction_mat[np.triu_indices(
            prediction_mat.shape[0])],
                                      a_min=0,
                                      a_max=1)
        target_cm_triu = target_cm[np.triu_indices(target_cm.shape[0])]
        # true_predictions = np.logical_and(target_cm_triu > 0,
        #                                   prediction_mat_triu > 0)
        #
        # acc = np.round(true_predictions.sum() / prediction_mat_triu.sum(), 2)

        acc = np.round(np.mean(target_cm_triu[prediction_mat_triu > 0]), 2)
        return str(acc)

    def _get_fpr_tpr_(prediction_mat, target_cm):
        prediction_mat_triu = prediction_mat[np.triu_indices(
            prediction_mat.shape[0])]
        target_cm_triu = target_cm[np.triu_indices(target_cm.shape[0])]

        true_positive = np.logical_and(target_cm_triu > 0,
                                       prediction_mat_triu > 0)
        positive = target_cm_triu > 0
        tpr = true_positive.sum() / positive.sum()

        false_positive = np.logical_and(target_cm_triu == 0,
                                        prediction_mat_triu > 0)
        negative = target_cm_triu == 0
        fpr = false_positive.sum() / negative.sum()

        return fpr, tpr

    l = aligned_reference.shape[0]

    def _get_cm(logits, l):
        quant = _get_quant(l, 2)

        logits *= _get_mask(l)
        thres = np.quantile(logits[np.triu_indices(l)], quant)
        cm = np.where(logits >= thres, 1, 0)
        return cm, thres

    def _color_predicted_cm(pred):

        pred_c = pred.copy()

        upper_triu_mask = np.zeros_like(pred_c, dtype=np.bool)
        upper_triu_mask[np.triu_indices(l)] = True

        native_contacts = target_cm > 0

        contact_predictions = np.logical_and(pred_c, upper_triu_mask)

        correct_predictions = np.logical_and(target_cm > 0,
                                             contact_predictions)
        incorrect_predictions = np.logical_and(target_cm == 0,
                                               contact_predictions)

        not_in_ref = np.logical_and(aligned_reference == 0,
                                    correct_predictions)

        not_in_ref_or_modeller = np.logical_and(not_in_ref, modeller == 0)
        pred_c[native_contacts] = 1
        pred_c[correct_predictions] = 2
        pred_c[incorrect_predictions] = 3
        pred_c[not_in_ref] = 4
        pred_c[not_in_ref_or_modeller] = 5

        pred_c[_get_mask(l) == 0] = -1

        return pred_c

    predicted_cm, contact_threshold = _get_cm(predicted_logits, l)
    predicted_cm[np.tril_indices(l)] = aligned_reference[np.tril_indices(l)]

    predicted_cm = _color_predicted_cm(predicted_cm)
    prediction_acc = _get_accuracy(predicted_cm, target_cm)

    if predicted_logits2 is not None:
        predicted_cm2, contact_threshold2 = _get_cm(predicted_logits2, l)
        predicted_cm2 = _color_predicted_cm(predicted_cm2)

        prediction_acc2 = _get_accuracy(predicted_cm2, target_cm)
        predicted_cm[np.tril_indices(l)] = predicted_cm2.T[np.tril_indices(l)]

    # aligned_acc = _get_accuracy(aligned_reference, target_cm)

    def _get_plot_colors(arr):
        colors = {
            -1: 'lightgrey',
            0: "white",
            1: "grey",
            2: "darkblue",
            3: "lime",
            4: "red",
            5: "orange"
        }  # use hex colors here, if desired.
        colors_list = [colors[v] for v in np.unique(arr)]
        cmap = ListedColormap(colors_list)
        return cmap

    beyond_ref_contacts = np.array(predicted_cm[np.triu_indices(l)] > 3).sum() / np.array(
        predicted_cm[np.triu_indices(l)] > 1).sum()
    beyond_ref_contacts *= 100
    beyond_ref_contacts = np.round(beyond_ref_contacts, 2)

    beyond_ref_contacts_2 = np.array(predicted_cm[np.tril_indices(l)] > 3).sum() / np.array(
        predicted_cm[np.tril_indices(l)] > 1).sum()
    beyond_ref_contacts_2 *= 100
    beyond_ref_contacts_2 = np.round(beyond_ref_contacts_2, 2)

    ax1.matshow(predicted_cm, cmap=_get_plot_colors(predicted_cm), origin='lower')
    raw_msg = _get_seq_ref_msg(target)
    seq_dist = _get_seq_ref_dist(target)

    if raw_msg is not None:

        target_msg, ref_msg = raw_msg.split('\n')[0], raw_msg.split('\n')[1]
        n = 150
        target_msg_lines = [target_msg[i:i + n] for i in range(0, len(target_msg), n)]
        target_msg_lines[-1] += ' ' * (n - len(target_msg_lines[-1]))
        ref_msg_lines = [ref_msg[i:i + n] for i in range(0, len(ref_msg), n)]
        ref_msg_lines[-1] += ' ' * (n - len(ref_msg_lines[-1]))

        msg = [target_msg_lines[i] + '\n' + ref_msg_lines[i] for i in range(len(target_msg_lines))]

        split_sign = '  ' * (n // 2)

        msg = f"\n{split_sign}\n".join(msg)

    else:
        msg = ''

    title = f'Prediction for {target}\n\nSequence identity: {np.round(1 - seq_dist, 2)}\n\n{msg}'
    # title = f'Prediction for {target}'
    plt.suptitle(title, fontdict={'family': 'monospace'}, fontsize=6)

    prediction_name = 'prediction' if model_name is None else model_name
    reference_name = 'structure' if ref_name is None else ref_name

    ax1.text(0.05,
             0.90,
             reference_name,
             fontsize=8,
             color='black',
             transform=ax1.transAxes)
    ax1.text(0.70,
             0.05,
             prediction_name,
             fontsize=8,
             color='black',
             transform=ax1.transAxes)

    data = [[prediction_acc, np.round(contact_threshold, 2), beyond_ref_contacts]]
    table = pd.DataFrame(data, columns=['ppv', 'threshold', '%  new'], index=[model_name]).transpose()

    if ref_name is not None:
        data = [[prediction_acc, np.round(contact_threshold, 2), beyond_ref_contacts],
                [prediction_acc2, np.round(contact_threshold2, 2), beyond_ref_contacts_2]]
        table = pd.DataFrame(data, columns=['ppv', 'threshold', '%  new'], index=[model_name, ref_name]).transpose()
    cell_text = []
    for row in range(len(table)):
        cell_text.append(table.iloc[row])

    values = [1, 2, 3, 4, 5]
    colors = {1: 'grey', 2: 'darkblue', 3: 'lime', 4: 'red', 5: 'orange'}
    values_map = {
        1: 'native contact',
        2: 'contact identified',
        3: 'contact misidentified',
        4: 'not in reference',
        5: 'not in reference or modeller'
    }
    # create a patch (proxy artist) for every color
    patches = [
        mpatches.Patch(color=colors[i], label="{l}".format(l=values_map[i]))
        for i in values
    ]
    # put those patched as legend-handles into the legend
    ax1.legend(handles=patches,
               bbox_to_anchor=(1.05, 1),
               loc='upper left',
               borderaxespad=0.)
    ax1.set_xticks([], [])
    ax1.set_yticks([], [])
    the_table = ax1.table(cellText=cell_text, colLabels=table.columns, rowLabels=table.index)
    the_table.auto_set_font_size(False)
    the_table.set_fontsize(10)
    the_table.scale(1, 2)

    if main_only:
        plt.savefig(os.path.join(figures_path, '%s_analysis.png' % target))
        # plt.show(False)
        plt.close('all')
        return
    col_ref = _color_predicted_cm(aligned_reference)
    # ax2.matshow(col_ref, cmap=_get_plot_colors(col_ref), origin='lower')
    #
    # ax2.title.set_text('Aligned Reference ppv: %s' % aligned_acc)
    # ax2.text(0.80,
    #          0.05,
    #          'reference',
    #          fontsize=6,
    #          color='black',
    #          transform=ax2.transAxes)
    # ax2.set_xticks([], [])
    # ax2.set_yticks([], [])

    modeller = _color_predicted_cm(modeller)
    modeller[np.tril_indices(l)] = col_ref.T[np.tril_indices(l)]
    modeller_acc = _get_accuracy(modeller, target_cm)

    ax3.matshow(modeller, cmap=_get_plot_colors(modeller), origin='lower')

    ax3.title.set_text('Modeller ppv: %s' % modeller_acc)

    ax3.text(0.80,
             0.05,
             'modeller',
             fontsize=6,
             color='black',
             transform=ax3.transAxes)
    ax3.text(0.05,
             0.9,
             'refs contacts',
             fontsize=6,
             color='black',
             transform=ax3.transAxes)
    ax3.set_xticks([], [])
    ax3.set_yticks([], [])

    fpr, tpr = _get_roc_data(predicted_logits, target_cm_unmasked)

    fpr_modeller, tpr_modeller = _get_fpr_tpr_(modeller - 1, target_cm)
    fpr_reference, tpr_reference = _get_fpr_tpr_(aligned_reference, target_cm)
    fpr_method, tpr_method = _get_fpr_tpr_(predicted_cm - 1, target_cm)

    ax4.plot(fpr, tpr, color='red')

    ax4.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    ax4.plot(fpr_modeller,
             tpr_modeller,
             marker='o',
             markersize=3,
             color="green")
    ax4.annotate('modeller', (fpr_modeller, tpr_modeller), color='green')
    ax4.plot(fpr_reference,
             tpr_reference,
             marker='o',
             markersize=3,
             color="blue")
    ax4.annotate('reference', (fpr_reference, tpr_reference), color='blue')
    ax4.plot(fpr_method,
             tpr_method,
             marker='o',
             markersize=3,
             color="darkred")
    ax4.annotate(model_name, (fpr_method, tpr_method), color='darkred')
    if ref_name is not None:
        fpr_2, tpr_2 = _get_roc_data(predicted_logits2, target_cm_unmasked)
        fpr_method_2, tpr_method_2 = _get_fpr_tpr_(predicted_cm2 - 1, target_cm)
        ax4.plot(fpr_2, tpr_2, color='orange')
        ax4.plot(fpr_method_2,
                 tpr_method_2,
                 marker='o',
                 markersize=3,
                 color="darkorange")
        ax4.annotate(ref_name, (fpr_method_2, tpr_method_2), color='darkorange')

    ax4.title.set_text('ROC Curve')
    ax4.set_xticks([], [])
    ax4.set_yticks([], [])

    ax4.set_xlabel('False Positive Rate')
    ax4.set_ylabel('True Positive Rate')

    plt.savefig(os.path.join(figures_path, '%s_analysis.png' % target))
    # plt.show(False)
    plt.close('all')
Ejemplo n.º 39
0
def final_check(event_code, database, id_string, use_all=False):
    folder = f"{database}/{event_code}/fortran_format_{id_string}"

    point_source_folder = f"{folder}/point_source/"
    observed_folder = f"{folder}/observed_data/"
    out_file = f"{folder}/station2use.txt"
    os.mkdir(f"{folder}/final_check")

    out = open(out_file, "w")
    out.write("Station\tComp\tType\tUse it?\n")
    filelist = glob.glob(f"{point_source_folder}*")

    for n, ps_file in enumerate(filelist, 1):
        print("----------------")
        sta = ps_file.split("/")[-1].split("_")[0]
        comp = ps_file.split("/")[-1].split("_")[1]
        wavetype = ps_file.split("/")[-1].split("_")[2]
        if comp == "T" and wavetype == "P":
            pass
        if wavetype == "W":
            pass
        else:
            print(ps_file)
            obs_file = glob.glob(f"{observed_folder}{sta}_{comp}_{wavetype}_ff")[0]
            print(obs_file)
            print(sta, comp, wavetype, n, "/", len(filelist))

            obs_ff, obs_rreal, obs_iimag, obs_complex = read_fft_file(obs_file)
            ps_ff, ps_rreal, ps_iimag, ps_complex = read_fft_file(ps_file)

            trim = len(ps_ff) // 2

            fig = plt.figure(1, figsize=(11.69, 8.27))
            plt.subplot(311)
            plt.plot(ps_ff[0:trim], ps_rreal[0:trim], color="black")
            plt.plot(obs_ff[0:trim], obs_rreal[0:trim], color="red")
            plt.xscale("log")

            plt.subplot(312)
            plt.plot(ps_ff[0:trim], ps_iimag[0:trim], color="black")
            plt.plot(obs_ff[0:trim], obs_iimag[0:trim], color="red")
            plt.xscale("log")

            npoints = len(obs_ff)
            ps_inv = np.fft.ifft(ps_complex, n=npoints)
            obs_inv = np.fft.ifft(obs_complex, n=npoints)

            corr = round(np.corrcoef(ps_inv.real, obs_inv.real)[0, 1], 2)

            rratio = []
            for i in range(0, len(obs_inv.real)):
                if abs(obs_inv.real[i]) >= max(abs(obs_inv.real)) / 50.0:
                    rratio.append((ps_inv.real[i]) / (obs_inv.real[i]))

            amp_ratio = round(np.mean(rratio), 2)

            plt.subplot(313)
            plt.plot(
                np.arange(0, len(obs_inv), 1),
                obs_inv.real,
                label="Observed",
                color="black",
                zorder=0,
                linewidth=3,
            )
            plt.plot(
                np.arange(0, len(ps_inv), 1),
                ps_inv.real,
                label="Point source",
                color="red",
                zorder=1,
            )

            if corr <= 0.67 or corr >= 1.29:
                corr_txt_colour = "red"
            else:
                corr_txt_colour = "black"
            plt.annotate(
                f"correlation coeff.: {corr}",
                xy=(20, 150),
                xycoords="axes pixels",
                fontsize=14,
                color=corr_txt_colour,
            )

            if amp_ratio <= 0.67 or amp_ratio >= 1.29:
                amp_txt_colour = "red"
            else:
                amp_txt_colour = "black"
            plt.annotate(
                f"amplitude coeff.: {amp_ratio}",
                xy=(20, 128),
                xycoords="axes pixels",
                fontsize=14,
                color=amp_txt_colour,
            )

            npts = len(ps_inv.real)
            h = npts // 2
            fill([-100, h, h, -100], [-1000, -1000, 1000, 1000], "red", alpha=0.15)
            fill([npts, h, h, npts], [-1000, -1000, 1000, 1000], "lime", alpha=0.15)

            plt.ylim(
                1.1 * min(min(obs_inv.real), min(ps_inv.real)),
                1.1 * max(max(obs_inv.real), max(ps_inv.real)),
            )
            plt.xlim(0, npts)
            plt.suptitle(f"Station: {sta} Comp: {comp} Wavetype: {wavetype}")
            plt.savefig(f"{folder}/final_check/{sta}_{comp}_{wavetype}.png")

            if not use_all:
                pointpick = PointPicker(fig)
                plt.show()
                x = pointpick.xx[0]

                if x >= h:
                    out.write(f"{sta}\t{comp}\t{wavetype}\tY\n")
                else:
                    out.write(f"{sta}\t{comp}\t{wavetype}\tN\n")
            else:
                out.write(f"{sta}\t{comp}\t{wavetype}\tY\n")

            plt.close()

    out.close()
Ejemplo n.º 40
0
    def plot_results(self):
        fig, axs = plt.subplots(3, 3, figsize=(50, 30))
        axs = axs.ravel()
        params_to_plot = [
            self.ax_raw, self.ay_raw, self.az_raw, self.gx_raw, self.gy_raw,
            self.gz_raw, self.temperature_original
        ]
        labels = ['ax', 'ay', 'az', 'gx', 'gy', 'gz', 'temperature']
        for i in np.arange(0, 9):
            if i <= 6:
                axs[i].plot(params_to_plot[i])
                axs[i].set_ylabel(labels[i], fontsize=15)
                axs[i].set_ylim(0.7 * min(params_to_plot[i]),
                                1.3 * max(params_to_plot[i]))
            elif i == 7:
                axs[i].plot(self.temperature_interpld_acc, self.ax_smoothed,
                            '-', self.temperature_interpld_acc,
                            self.ay_smoothed, '-',
                            self.temperature_interpld_acc, self.az_smoothed,
                            '-', self.ax_fitted_x, self.ax_fitted_y, '-',
                            self.ay_fitted_x, self.ay_fitted_y, '-',
                            self.az_fitted_x, self.az_fitted_y, '-')
                axs[i].set_xlabel("Temperature", fontsize=15)
                axs[i].set_ylabel("Accelerometer_Temp_Fit", fontsize=15)
                axs[i].legend(
                    ('ax_smoothed', 'ay_smoothed', 'az_smoothed',
                     'ax_linear_fit', 'ay_linear_fit', 'az_linear_fit'))
            else:
                axs[i].plot(self.temperature_interpld_gyro, self.gx_smoothed,
                            '-', self.temperature_interpld_gyro,
                            self.gy_smoothed, '-',
                            self.temperature_interpld_gyro, self.gz_smoothed,
                            '-', self.gx_fitted_x, self.gx_fitted_y, '+',
                            self.gy_fitted_x, self.gy_fitted_y, '+',
                            self.gz_fitted_x, self.gz_fitted_y, '+')
                axs[i].set_xlabel("Temperature", fontsize=15)
                axs[i].set_ylabel("Gyroscope_Temp_Fit", fontsize=15)
                axs[i].legend(
                    ('gx_smoothed', 'gy_smoothed', 'gz_smoothed',
                     'gx_linear_fit', 'gy_linear_fit', 'gz_linear_fit'))

        plt.text(0.5,
                 -0.25,
                 'Gyro_XYZ_linear_fit: %sx + %s, %sx + %s, %sx + %s' %
                 ("{0:.2E}".format(self.coeffs_gx_fit[0]), "{0:.2E}".format(
                     self.coeffs_gx_fit[1]), "{0:.2E}".format(
                         self.coeffs_gy_fit[0]), "{0:.2E}".format(
                             self.coeffs_gy_fit[1]), "{0:.2E}".format(
                                 self.coeffs_gz_fit[0]), "{0:.2E}".format(
                                     self.coeffs_gz_fit[1])),
                 size=15,
                 ha='center',
                 va='center',
                 transform=axs[8].transAxes)

        plt.text(0.5,
                 -0.35,
                 'Residual_Norm_Gyro_XYZ= %s, %s, %s' %
                 ("{0:.4f}".format(self.residual_gx), "{0:.4f}".format(
                     self.residual_gy), "{0:.4f}".format(self.residual_gz)),
                 size=15,
                 ha='center',
                 va='center',
                 transform=axs[8].transAxes)

        plt.suptitle(self.input_log_file_name)
        plt.show()
Ejemplo n.º 41
0
def get_offset_center(f, plot=False, interactive=False):
    '''
    Given a fits image, returns the offset in Ra, DEC, that needs to be applied for the telescope tp go
    from the current pointing position, to the coodinates of the object specified in the fits file.
    '''

    if (not os.path.isfile(f)):
        print "File %s does not exist! Returning Zero offsets..." % f
        return -1, 0, 0
    else:
        image = pf.open(f)
        wcs = pywcs.WCS(image[0].header)
        rra, rdec = cc.hour2deg(image[0].header['OBJRA'],
                                image[0].header['OBJDEC'])
        x, y = np.round(wcs.wcs_sky2pix(rra, rdec, 0), 0)
        pra, pdec = wcs.wcs_pix2sky(np.array([[1293., 1280.]], np.float_),
                                    0)[0]
        dra, ddec = cc.get_offset(pra, pdec, rra, rdec)

        xl, yu = np.round(
            wcs.wcs_sky2pix(rra + 90. / 3600, rdec - 90. / 3600, 0), 0)
        xu, yl = np.round(
            wcs.wcs_sky2pix(rra - 90. / 3600, rdec + 90. / 3600, 0), 0)

        imageloc = image[0].data.T[xl:xu, yl:yu]

        if imageloc.shape[0] == 0 or imageloc.shape[1] == 0:
            logger.warn(
                "Astrometry has FAILED on this! The object is outside the frame! Resending to the numb astrometric solution"
            )
            logger.error(
                "Astrometry has FAILED on this! The object is outside the frame! Resending to the numb astrometric solution"
            )
            print "Pixels are", xl, xu, yl, yu
            try:
                code, dra, ddec = get_offset_center_failed_astro(
                    f, plot=plot, interactive=interactive)
                return 2, dra, ddec
            except:
                return -1, 0, 0
        if (plot):
            plt.figure(figsize=(8, 8))

            zmin, zmax = zscale.zscale(imageloc)

            #print zmin, zmax, imageloc, (xl,xu,yl,yu)

            obj = fitsutils.get_par(f, "OBJECT")
            plt.suptitle(obj, fontsize=20)
            plt.imshow(imageloc.T,
                       extent=(xl[0], xu[0], yl[0], yu[0]),
                       aspect="equal",
                       interpolation="none",
                       origin="lower",
                       vmin=zmin,
                       vmax=zmax)
            plt.plot(1293., 1280., "ws", ms=7, label="Current pointing")
            plt.plot(x, y, "b*", ms=10, label="Target pointing")
            plt.gca().invert_xaxis()
            plt.legend()
            if (interactive):
                plt.show()
            else:
                plt.savefig(
                    os.path.join(
                        os.path.dirname(f).replace("raw", "phot"),
                        os.path.basename(f).replace(".fits", "_a.png")))
            plt.clf()

        return 0, dra, ddec
Ejemplo n.º 42
0
def get_offset_center_failed_astro(f, plot=False, interactive=True):
    '''
    For fields where astrometry is challenging, there is a simple solution.
    Find the brightest peak within the pointing error of the telescope.
    As this fields will usually be centered in Standard stars and very short exposure time,
    the fit intends to 
    
    '''

    image = pf.open(f)
    data = image[0].data
    wcs = pywcs.WCS(image[0].header)
    ra, dec = cc.hour2deg(image[0].header['OBJRA'], image[0].header['OBJDEC'])

    pra, pdec = wcs.wcs_sky2pix(ra, dec, 0)
    #Get a local image
    #xl, yl = np.array(wcs.wcs_sky2pix(ra+(60./3600)*np.cos(np.deg2rad(dec)), dec-60./3600, 0), dtype=np.int)
    #xu, yu = np.array(wcs.wcs_sky2pix(ra-(60./3600)*np.cos(np.deg2rad(dec)), dec+60./3600, 0), dtype=np.int)
    imageloc = image[0].data.T[1293 - 150:1293 + 150, 1280 - 150:1280 + 150]

    nx = 300
    ny = 300

    def_x = np.argmax(np.sum(imageloc, axis=0))
    def_y = np.argmax(np.sum(imageloc, axis=1))

    newx = pra - nx / 2. + def_x
    newy = pdec - ny / 2. + def_y

    pra, pdec = wcs.wcs_pix2sky(np.array([[newx[0], newy[0]]], np.float_),
                                0)[0]
    dra, ddec = cc.get_offset(ra, dec, pra, pdec)

    print "Offset", dra, ddec, "Position RA,DEC", pra, pdec

    x, y, fwhmx, fwhmy, bkg, amp = fit_utils.fit_gauss(imageloc)

    if (plot):
        plt.figure(figsize=(8, 8))
        obj = fitsutils.get_par(f, "OBJECT")

        plt.suptitle(obj, fontsize=20)
        zmin, zmax = zscale.zscale(imageloc)
        plt.imshow(imageloc,
                   aspect="auto",
                   interpolation="none",
                   origin="lower",
                   vmin=zmin,
                   vmax=zmax,
                   extent=(0, +300, 0, +300))
        plt.plot(x, y, "go", ms=20, label="Centroid using gaussiuan fit.")
        plt.plot(def_x, def_y, "b*", ms=20, label="Centroid using max/min.")
        plt.plot(150, 150, "wo", ms=20, label="Initial pointing")
        plt.legend()
        '''zmin, zmax = zscale.zscale(data)
        plt.imshow(data, aspect="auto", interpolation="none", origin="lower", vmin=zmin, vmax=zmax)
        plt.plot(newx, newy, "go")    
        plt.show()'''

        if (interactive):
            plt.show()
        else:
            plt.savefig(
                os.path.join(
                    os.path.dirname(f).replace("raw", "phot"),
                    os.path.basename(f).replace(".fits", "_std.png")))

        plt.clf()

    return 1, ddec, dra
Ejemplo n.º 43
0
    break
# ### Run the classifier on a batch of images
result_batch = classifier.predict(image_batch)
result_batch.shape
# -
predicted_class_names = imagenet_labels[np.argmax(result_batch, axis=-1)]
predicted_class_names
# -
plt.figure(figsize=(10, 4))
plt.subplots_adjust(hspace=0.5)
for n in range(8):
    plt.subplot(2, 4, n + 1)
    plt.imshow(image_batch[n])
    plt.title(predicted_class_names[n])
    plt.axis("off")
_ = plt.suptitle("ImageNet predictions")

# ### Download the headless model
url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2"
feature_extractor_layer = hub.KerasLayer(url, input_shape=(224, 224, 3))
# -
feature_batch = feature_extractor_layer(image_batch)
print(feature_batch.shape)
# -
feature_extractor_layer.trainable = False
# ### Attach a classification head
model = tf.keras.Sequential([
    feature_extractor_layer,
    layers.Dense(image_data.num_classes, activation="softmax"),
])
Ejemplo n.º 44
0
##Plotting freq occupancy

if opts.save_freq or opts.show:
    if opts.verb: print 'Plotting frequency-occupancy plot'
    if not opts.chanaxis:
        pylab.step(fqs, pcnt_f, where='mid')
        pylab.fill_between(fqs, 0, pcnt_f, color='blue', alpha=0.3)
        pylab.xlim(fqs[0], fqs[-1])
        pylab.xlabel('Frequency [MHz]')
    else:
        pylab.step(chans, pcnt_f, where='mid')
        pylab.fill_between(chans, 0, pcnt_f, color='blue', alpha=0.3)
        pylab.xlim(chans[0], chans[-1])
        pylab.xlabel('Channel number')
    pylab.ylabel('Occupancy [%]')
    if len(args) == 1: pylab.suptitle(file2jd(args[0]), size=15)
    else:
        pylab.suptitle('%s - %s' %
                       (file2jd(args[0]), file2jd(args[len(args) - 1])),
                       size=15)

    #pylab.savefig('%s_%s_%s_F.png'%(jd,opts.ant,opts.pol)) #hard to plug into RTP
    if not opts.fimg is None: pylab.savefig(opts.fimg)
    else:
        pylab.savefig('%s_f.png' %
                      args[0])  #zen.2451234.12345.xx.HH.uvcR_f.png
    #^ it's being run on each uvcR file in the RTP system
    if opts.show:
        pylab.show()
    else:
        pylab.close()
Ejemplo n.º 45
0
    plt.figure(3)
    plt.plot(rhop, rhot[0])
    plt.xlabel('rho_pol')
    plt.ylabel('rho_tor')
    plt.title('kkrhopto')

    #------

    br, bz, bt = eqm.rz2brzt(r_in=R, z_in=Z[0], t_in=3)

    plt.figure(4)
    plt.subplot(1, 3, 1)
    plt.ylabel('Br')
    plt.plot(R, br[0, :, 0])
    plt.xlabel('R')
    plt.suptitle('kkrzBrzt')

    plt.subplot(1, 3, 2)
    plt.plot(R, bz[0, :, 0])
    plt.ylabel('Bz')
    plt.xlabel('R')

    plt.subplot(1, 3, 3)
    plt.plot(R, bt[0, :, 0])
    plt.ylabel('Bt')
    plt.xlabel('R')

    #------

    rho = [0.1, 1]
    r2, z2 = eqm.rho2rz(rho, t_in=3, coord_in='rho_pol')
Ejemplo n.º 46
0
import matplotlib.pylab as pylab

#script página 160

im = Image.open('../images/mandrill.jpg')
#choose 5000 random locations inside the image
prop_noise = 0.05
n = int(im.width * im.height * prop_noise)
x = np.random.randint(0, im.width, n)
y = np.random.randint(0, im.height, n)
for (x, y) in zip(x, y):
    #generate salt-and-pepper noise
    im.putpixel((x, y), ((0, 0, 0) if np.random.rand() < 0.5 else (255, 255, 255)))

pylab.figure(figsize=(10, 25))
pylab.subplots_adjust(hspace=0.5)
pylab.subplot(1, 3, 1)
pylab.title('Original image')
pylab.imshow(im)

for n in [3, 5]:
    box_blur_kernel = np.reshape(np.ones(n*n),(n,n))/(n*n)
    im1 = im.filter(ImageFilter.Kernel((n,n), box_blur_kernel.flatten()))
    pylab.subplot(1, 3, (2 if n==3 else 3))
    pylab.title('Blured image with kernel size = ' + str(n) + 'x' + str(n))
    pylab.imshow(im1)

pylab.suptitle('PIL Mean Filter (Box Blur) with different Kernel size', size=30)
pylab.show()

print('FIM')
Ejemplo n.º 47
0
def main():

    if(a00acc() != 1):
        print("Cannot find a valid NAG license")
        sys.exit(1)

    try:
        if(len(sys.argv)>1):
            QuoteData = sys.argv[1]
        else:
            QuoteData = 'QuoteData.dat'

        qd = open(QuoteData, 'r')
        qd_head = []
        qd_head.append(qd.readline())
        qd_head.append(qd.readline())
        qd.close()
    except:
        sys.stderr.write("Usage: implied_volatility.py QuoteData.dat\n")
        sys.stderr.write("Couldn't read QuoteData")
        sys.exit(1)

    print("Implied Volatility for %s %s" % (qd_head[0].strip(), qd_head[1]))

    # Parse the header information in QuotaData
    first = qd_head[0].split(',')
    second = qd_head[1].split()
    qd_date = qd_head[1].split(',')[0]
    
    company = first[0]
    underlyingprice = float(first[1])
    month, day = second[:2]
    today = cumulative_month[month] + int(day) - 30
    current_year = int(second[2])

    def getExpiration(x):
        monthday = x.split()
        adate = monthday[0] + ' ' + monthday[1]
        if adate not in dates:
            dates.append(adate)
        return (int(monthday[0]) - (current_year % 2000)) * 365 + cumulative_month[monthday[1]]

    def getStrike(x):
        monthday = x.split()
        return float(monthday[2])

    data = pandas.io.parsers.read_csv(QuoteData, sep=',', header=2, na_values=' ')

    # Need to fill the NA values in dataframe
    data = data.fillna(0.0)

    # Let's look at data where there was a recent sale 
#    data = data[data.Calls > 0]
    data = data[(data['Last Sale'] > 0) | (data['Last Sale.1'] > 0)]

    # Get the Options Expiration Date
    exp = data.Calls.apply(getExpiration)
    exp.name = 'Expiration'

    # Get the Strike Prices
    strike = data.Calls.apply(getStrike)
    strike.name = 'Strike'

    data = data.join(exp).join(strike)

    print('Calculating Implied Vol of Calls...')
    impvolcall = pandas.Series(pandas.np.zeros(len(data.index)),
                               index=data.index, name='impvolCall')
    for i in data.index:
        impvolcall[i] = (calcvol(data.Expiration[i],
                                 data.Strike[i],
                                 today,
                                 underlyingprice,
                                 (data.Bid[i] + data.Ask[i]) / 2, Nag_Call))

    print('Calculated Implied Vol for %d Calls' % len(data.index))
    data = data.join(impvolcall)

    print('Calculating Implied Vol of Puts...')
    impvolput = pandas.Series(numpy.zeros(len(data.index)),
                              index=data.index, name='impvolPut')

    for i in data.index:
        impvolput[i] = (calcvol(data.Expiration[i],
                                data.Strike[i],
                                today,
                                underlyingprice,
                                (data['Bid.1'][i] + data['Ask.1'][i]) / 2.0, Nag_Put))

    print('Calculated Implied Vol for %i Puts' % len(data.index))

    data = data.join(impvolput)
    fig = plt.figure(1)
    fig.subplots_adjust(hspace=.4, wspace=.3)

    # Plot the Volatility Curves
    # Encode graph layout: 3 rows, 3 columns, 1 is first graph.
    num = 331
    max_xticks = 4
    
    for date in dates:
        # add each subplot to the figure
        plot_year, plot_month = date.split()
        plot_date = (int(plot_year) - (current_year % 2000)) * 365 + cumulative_month[plot_month]
        plot_call = data[(data.impvolCall > .01) &
                       (data.impvolCall < 1) &
                       (data.Expiration == plot_date) &
                       (data['Last Sale'] > 0)]
        plot_put = data[(data.impvolPut > .01) &
                        (data.impvolPut < 1) &
                        (data.Expiration == plot_date) &
                        (data['Last Sale.1'] > 0)]

        myfig = fig.add_subplot(num)
        xloc = plt.MaxNLocator(max_xticks)
        myfig.xaxis.set_major_locator(xloc)
        myfig.set_title('Expiry: %s 20%s' % (plot_month, plot_year))
        myfig.plot(plot_call.Strike, plot_call.impvolCall, 'pr', label='call')
        myfig.plot(plot_put.Strike, plot_put.impvolPut, 'p', label='put')
        myfig.legend(loc=1, numpoints=1, prop={'size': 10})
        myfig.set_ylim([0,1])
        myfig.set_xlabel('Strike Price')
        myfig.set_ylabel('Implied Volatility')
        num += 1

    plt.suptitle('Implied Volatility for %s Current Price: %s Date: %s' %
                 (company, underlyingprice, qd_date))
    

    print("\nPlotting Volatility Curves/Surface")
    """
    The code below will plot the Volatility Surface
    It uses e02ca to fit with a polynomial and e02cb to evalute at 
    intermediate points
    """ 

    m = numpy.empty(len(dates), dtype=nag_int_type())
    y = numpy.empty(len(dates), dtype=numpy.double)
    xmin = numpy.empty(len(dates), dtype=numpy.double)    
    xmax = numpy.empty(len(dates), dtype=numpy.double)
 
    data = data.sort('Strike') # Need to sort for NAG Algorithm

    k = 3   # this is the degree of polynomial for x-axis (Strike Price)
    l = 3   # this is the degree of polynomial for y-axis (Expiration Date)

    i = 0

    for date in dates:
        plot_year, plot_month = date.split()
        plot_date = (int(plot_year) - (current_year % 2000)) * 365 + cumulative_month[plot_month]
        
        call_data = data[(data.Expiration == plot_date) & 
				(data.impvolPut > .01) & 
				(data.impvolPut < 1) &
                                (data['Last Sale.1'] > 0)]
         
        exp_sizes = call_data.Expiration.size
        if(exp_sizes > 0):       
            m[i] = exp_sizes
            n = len(dates)

            if(i == 0):
                x = numpy.array(call_data.Strike)
                call = numpy.array(call_data.impvolPut)
                xmin[0] = x.min()
                xmax[0] = x.max()
            else:
                x2 = numpy.array(call_data.Strike)
                x = numpy.append(x,x2)
                call2 = numpy.array(call_data.impvolPut)
                call = numpy.append(call,call2)
                xmin[i] = x2.min()
                xmax[i] = x2.max()
            y[i] = plot_date-today
            i+=1
    nux = numpy.zeros(1,dtype=numpy.double)
    nuy = numpy.zeros(1,dtype=numpy.double)
    inux = 1
    inuy = 1

    if(len(dates) != i):
        print("Error with data: the CBOE may not be open for trading or one expiration date has null data")
        return 0
    weight = numpy.ones(call.size, dtype=numpy.double)

    output_coef = numpy.empty((k + 1) * (l + 1),dtype=numpy.double)
     
    fail = noisy_fail()    
    
    #Call the NAG Chebyshev fitting function
    e02cac(m,n,k,l,x,y,call,weight,output_coef,xmin,xmax,nux,inux,nuy,inuy,fail)        
   
    """
    Now that we have fit the function,
    we use e02cb to evaluate at different strikes/expirations 
    """

    nStrikes = 100 # number of Strikes to evaluate    
    spacing = 20 # number of Expirations to evaluate
    
    for i in range(spacing):
        mfirst = 1	
        mlast = nStrikes
        xmin = data.Strike.min()
        xmax = data.Strike.max()
         
        x = numpy.linspace(xmin, xmax, nStrikes)

        ymin = data.Expiration.min() - today
        ymax = data.Expiration.max() - today
      
        y = (ymin) + i * numpy.floor((ymax - ymin) / spacing) 

        fx=numpy.empty(nStrikes)
        fail=quiet_fail()

        e02cbc(mfirst,mlast,k,l,x,xmin,xmax,y,ymin,ymax,fx,output_coef,fail)
        
        if(fail.code != 0):
            print(fail.message)
        
        if 'xaxis' in locals():
            xaxis = numpy.append(xaxis, x)
            temp = numpy.empty(len(x))
            temp.fill(y)
            yaxis = numpy.append(yaxis, temp)    
            for j in range(len(x)):
                zaxis.append(fx[j])
        else:
            xaxis = x
            yaxis = numpy.empty(len(x), dtype=numpy.double)
            yaxis.fill(y)
            zaxis = []
            for j in range(len(x)):
                zaxis.append(fx[j])
   
    fig = plt.figure(2)
    ax = fig.add_subplot(111, projection='3d')

    # A try-except block for Matplotlib
    try:
        ax.plot_trisurf(xaxis, yaxis, zaxis, cmap=cm.jet)
    except AttributeError:
        print ("Your version of Matplotlib does not support plot_trisurf")
        print ("...plotting wireframe instead")
        ax.plot(xaxis, yaxis, zaxis)
    
    ax.set_xlabel('Strike Price')
    ax.set_ylabel('Days to Expiration')
    ax.set_zlabel('Implied Volatility for Put Options')
    plt.suptitle('Implied Volatility Surface for %s Current Price: %s Date: %s' %
                 (company, underlyingprice, qd_date))
    
    plt.show()    
Ejemplo n.º 48
0
                plt.figure(figsize=(12, 6))
                for ii, tg2 in enumerate(time_groups):
                    df_t = df_g.get_group(tg2)

                    plt.subplot(1, 2, ii + 1)
                    with tables.File(fname, 'r') as fid:
                        food_cnt_coord = fid.get_node('/food_cnt_coord')[:]
                        plt.plot(food_cnt_coord[:, 1],
                                 food_cnt_coord[:, 0],
                                 'r',
                                 lw=3.0)

                        w_g = df_t.groupby('worm_index')
                        cols = sns.color_palette("hls", len(w_g.groups))
                        for c, (w_ind, w_dat) in zip(cols, w_g):
                            skels = fid.get_node('/coordinates/skeletons')[
                                w_dat.index[::50]]
                            skels = skels.T

                            plt.plot(skels[1], skels[0], color=c)

                    plt.axis('equal')
                    plt.xlim((0, 18000))
                    plt.ylim((0, 20000))
                    #
                    plt.title('Time : {} - {}'.format(int(tg2), int(tg2 + 15)))
                plt.suptitle('Cohort {}'.format(cohort_n))
                pdf_pages.savefig()
                plt.close()
Ejemplo n.º 49
0
result_batch = classifier.predict(image_batch)

# predict some classes
pred_class_names = imagenet_labels[np.argmax(result_batch, axis=-1)]
# see the predicted names
print('Class Names:\n', pred_class_names)

# pair predictions with the images
plt.figure(figsize=(10, 9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
    plt.subplot(6, 5, n + 1)
    plt.imshow(image_batch[n])
    plt.title(pred_class_names[n])
    plt.axis = 'off'
_ = plt.suptitle("ImageNet Predictions")
plt.savefig("predictions.png", bbox_inches='tight')

# get the headless model so we can retrain
feature_extractor_url = \
    'https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2'

feature_extractor_layer = hub.KerasLayer(feature_extractor_url,
                                         input_shape=(224, 224, 3))
feature_batch = feature_extractor_layer(image_batch)
# see the shape
print("Feature Shape:", feature_batch.shape)

# ensure that onle the classifier layer can be trained
feature_extractor_layer.trainable = False
Ejemplo n.º 50
0
plt.figure()
plt.ylabel("Accuracy")
plt.xlabel("Training Steps")
plt.ylim([0, 1])
plt.plot(batch_stats.batch_acc)

#plt.show()
plt.savefig("outputs/fig2.png")

label_names = sorted(image_data.class_indices.items(),
                     key=lambda pair: pair[1])
label_names = np.array([key.title() for key, value in label_names])
print("label_names:{0}".format(label_names))

result_batch = model.predict(image_batch)

labels_batch = label_names[np.argmax(result_batch, axis=-1)]
print("labels_batch:{0}".format(labels_batch))

plt.figure(figsize=(10, 9))
for n in range(30):
    plt.subplot(6, 5, n + 1)
    plt.imshow(image_batch[n])
    plt.title(labels_batch[n])
    plt.axis('off')
_ = plt.suptitle("Model predictions")
plt.savefig("outputs/fig3.png")

export_path = tf.contrib.saved_model.save_keras_model(model, output_filepath)
print("export_path:{0}".format(export_path))
Ejemplo n.º 51
0
    for file in dpfiles:
        (f, x) = readfile(file)
        x_comp.append(x)
        f_comp.append(f)

    # initialize Interpolator
    wfunc = lambda q, x: x
    # not useable
    I = Interpolator(q_inp, x_inp, f_inp, wfunc=wfunc, debug=True, k=2)

    # get interpolated spectra
    (x_out, f_out) = I.get_spectrum(q_comp)

    # plot
    ax = plt.figure().add_subplot(111)
    plt.suptitle("Interpolation for momentum dependent EELS, bulk Silicon")
    plt.title("[Weissker, et.al.: PRB(79) 094102 (2009), fig. 10]")

    for iq in range(len(q_inp)):
        ax.plot(x_inp[iq],
                f_inp[iq],
                "g-",
                label="input, q=%5.3f" % (q_inp[iq]))
    for iq in range(len(q_comp)):
        ax.plot(x_comp[iq],
                f_comp[iq],
                "r-",
                label="comp,  q=%5.3f" % (q_comp[iq]))
        ax.plot(x_out, f_out[iq], "b.", label="interp,q=%5.3f" % (q_comp[iq]))

    ax.legend()
Ejemplo n.º 52
0
    print(labels_batch.shape)
    break

result_batch = classifier.predict(train_ds)

predicted_class_names = imagenet_labels[np.argmax(result_batch, axis=-1)]
print(predicted_class_names)

plt.figure(figsize=(10, 9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
    plt.subplot(6, 5, n + 1)
    plt.imshow(image_batch[n])
    plt.title(predicted_class_names[n])
    plt.axis('off')
_ = plt.suptitle("ImageNet predictions")
plt.show()

# the headless model
feature_extractor_model = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"

feature_extractor_layer = hub.KerasLayer(feature_extractor_model,
                                         input_shape=(224, 224, 3),
                                         trainable=False)

feature_batch = feature_extractor_layer(image_batch)
print(feature_batch.shape)  # (32, 1280)

# Attach a classification head
num_classes = len(class_names)
Ejemplo n.º 53
0
def LV5(argv):
    ####Packages#####
    import scipy as sc
    import scipy.stats as stats
    import scipy.integrate as integrate
    import matplotlib.pylab as p
    import sys

    #####Functions######
    def dCR_dt(pops, t=0):
        """
        function find the change in consumers and resources at time, t, in discrete time.
        """
        R = pops[0]
        C = pops[1]
        Rt1 = sc.zeros(len(t))
        Ct1 = sc.zeros(len(t))

        for i in range(len(t)):
            if i == 0:  # for first iteration
                Rt1[i] = R * (1 + r * (1 - (R / K) - a * C))
                Ct1[i] = C * (1 - z + (e * a * R))
            else:  ## every subsequent iteration
                Rt1[i] = Rt1[i - 1] * (1 + (r + E) *
                                       (1 - (Rt1[i - 1] / K) - a * Ct1[i - 1]))
                Ct1[i] = Ct1[i - 1] * (1 - z + E + (e * a * Rt1[i - 1]))

                ### if values reach or exceed 0, stop
                if Rt1[i] <= 0:
                    break

                if Ct1[i] <= 0:
                    break

        # Rt1 = Rt1[~sc.isnan(Rt1)] # the ~ cause return True only on valid numbers (https://stackoverflow.com/questions/11620914/removing-nan-values-from-an-array)
        # Ct1 = Ct1[~sc.isnan(Ct1)]
        # return sc.array([Rt1.tolist(), Ct1.tolist()])
        return sc.array([Rt1, Ct1])

    ######Main#######

    if len(sys.argv) == 1:
        r = 1.
        a = 0.1
        z = 1.5
        e = 0.75
        print("Using default arguments for r, a, z, e")

    elif len(sys.argv):
        print("Not enought arguments given.  Please give r, a, z and e")

    else:
        args = sys.argv
        r = float(args[1])
        a = float(args[2])
        z = float(args[3])
        e = float(args[4])
        print("Using args from user in order: r, a, z, e")

    K = 30  ## K arbitrarily defined
    t = sc.linspace(0, 15, 1000)
    E = stats.norm.rvs(0.5, 0.1, size=1)  #random gaussian fluctuation

    R0 = 10
    C0 = 5
    RC0 = sc.array([R0, C0])

    pops = dCR_dt(RC0, t)

    print(len(pops[1]))

    ######plotting######
    p.ioff()
    f1 = p.figure()

    p.plot(t, pops[0], "g-", label="Resource density")  #plot
    p.plot(t, pops[1], "b-", label="Consumer density")
    # p.xlim(0, 1)
    p.grid()
    p.legend(loc="best")
    p.xlabel('Time')
    p.ylabel("Population density")
    p.suptitle("Consumer-Resource population dynamics")
    p.title("r={}, a={}, z={}, e={}, K={}".format(r, a, z, e, K))
    # p.show()
    f1.savefig('../Results/LV_model_LV5.pdf')  #Save figure
    ##### Practical #####
    f2 = p.figure()
    p.plot(pops[0], pops[1], "r-")
    p.grid()
    p.xlabel("Resource density")
    p.ylabel("Consumer density")
    p.suptitle("Consumer-Resource population dynamics")
    p.title("r={}, a={}, z={}, e={}, K={}".format(r, a, z, e, K))
    # p.show()
    f2.savefig("../Results/LV_model2_LV5.pdf")
    print("r={}, a={}, z={}, e={}, K={}".format(r, a, z, e, K))
    return 0
Ejemplo n.º 54
0
def select_windows(
    data_trace,
    synthetic_trace,
    stf_trace,
    event_latitude,
    event_longitude,
    event_depth_in_km,
    station_latitude,
    station_longitude,
    minimum_period,
    maximum_period,
    min_cc=0.10,
    max_noise=0.10,
    max_noise_window=0.4,
    min_velocity=2.4,
    threshold_shift=0.30,
    threshold_correlation=0.75,
    min_length_period=1.5,
    min_peaks_troughs=2,
    max_energy_ratio=10.0,
    min_envelope_similarity=0.2,
    verbose=False,
    plot=False,
):
    """
    Window selection algorithm for picking windows suitable for misfit
    calculation based on phase differences.

    Returns a list of windows which might be empty due to various reasons.

    This function is really long and a lot of things. For a more detailed
    description, please see the LASIF paper.

    :param data_trace: The data trace.
    :type data_trace: :class:`~obspy.core.trace.Trace`
    :param synthetic_trace: The synthetic trace.
    :type synthetic_trace: :class:`~obspy.core.trace.Trace`
    :param stf_trace: The stf trace.
    :type stf_trace: :class:`~obspy.core.trace.Trace`
    :param event_latitude: The event latitude.
    :type event_latitude: float
    :param event_longitude: The event longitude.
    :type event_longitude: float
    :param event_depth_in_km: The event depth in km.
    :type event_depth_in_km: float
    :param station_latitude: The station latitude.
    :type station_latitude: float
    :param station_longitude: The station longitude.
    :type station_longitude: float
    :param minimum_period: The minimum period of the data in seconds.
    :type minimum_period: float
    :param maximum_period: The maximum period of the data in seconds.
    :type maximum_period: float
    :param min_cc: Minimum normalised correlation coefficient of the
        complete traces.
    :type min_cc: float
    :param max_noise: Maximum relative noise level for the whole trace.
        Measured from maximum amplitudes before and after the first arrival.
    :type max_noise: float
    :param max_noise_window: Maximum relative noise level for individual
        windows.
    :type max_noise_window: float
    :param min_velocity: All arrivals later than those corresponding to the
        threshold velocity [km/s] will be excluded.
    :type min_velocity: float
    :param threshold_shift: Maximum allowable time shift within a window,
        as a fraction of the minimum period.
    :type threshold_shift: float
    :param threshold_correlation: Minimum normalised correlation coeeficient
        within a window.
    :type threshold_correlation: float
    :param min_length_period: Minimum length of the time windows relative to
        the minimum period.
    :type min_length_period: float
    :param min_peaks_troughs: Minimum number of extrema in an individual
        time window (excluding the edges).
    :type min_peaks_troughs: float
    :param max_energy_ratio: Maximum energy ratio between data and
        synthetics within a time window. Don't make this too small!
    :type max_energy_ratio: float
    :param min_envelope_similarity: The minimum similarity of the envelopes of
        both data and synthetics. This essentially assures that the
        amplitudes of data and synthetics can not diverge too much within a
        window. It is a bit like the inverse of the ratio of both envelopes
        so a value of 0.2 makes sure neither amplitude can be more then 5
        times larger than the other.
    :type min_envelope_similarity: float
    :param verbose: No output by default.
    :type verbose: bool
    :param plot: Create a plot of the algortihm while it does its work.
    :type plot: bool
    """
    # Shortcuts to frequently accessed variables.
    data_starttime = data_trace.stats.starttime
    data_delta = data_trace.stats.delta
    dt = data_trace.stats.delta
    npts = data_trace.stats.npts
    synth = synthetic_trace.data
    data = data_trace.data
    times = data_trace.times()

    # Fill cache if necessary.
    if not TAUPY_MODEL_CACHE:
        from obspy.taup import TauPyModel  # NOQA

        TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135")
    model = TAUPY_MODEL_CACHE["model"]

    # -------------------------------------------------------------------------
    # Geographical calculations and the time of the first arrival.
    # -------------------------------------------------------------------------
    dist_in_deg = geodetics.locations2degrees(
        station_latitude, station_longitude, event_latitude, event_longitude
    )
    dist_in_km = (
        geodetics.calc_vincenty_inverse(
            station_latitude,
            station_longitude,
            event_latitude,
            event_longitude,
        )[0]
        / 1000.0
    )

    # Get only a couple of P phases which should be the first arrival
    # for every epicentral distance. Its quite a bit faster than calculating
    # the arrival times for every phase.
    # Assumes the first sample is the centroid time of the event.
    ttp = model.get_travel_times(
        source_depth_in_km=event_depth_in_km,
        distance_in_degree=dist_in_deg,
        phase_list=["ttp"],
    )
    # Sort just as a safety measure.
    ttp = sorted(ttp, key=lambda x: x.time)
    first_tt_arrival = ttp[0].time

    # -------------------------------------------------------------------------
    # Window settings
    # -------------------------------------------------------------------------
    # Number of samples in the sliding window. Currently, the length of the
    # window is set to a multiple of the dominant period of the synthetics.
    # Make sure it is an uneven number; just to have a trivial midpoint
    # definition and one sample does not matter much in any case.
    window_length = int(round(float(2 * minimum_period) / dt))
    if not window_length % 2:
        window_length += 1

    # Use a Hanning window. No particular reason for it but its a well-behaved
    # window and has nice spectral properties.
    taper = np.hanning(window_length)

    # =========================================================================
    # check if whole seismograms are sufficiently correlated and estimate
    # noise level
    # =========================================================================

    # Overall Correlation coefficient.
    norm = np.sqrt(np.sum(data ** 2)) * np.sqrt(np.sum(synth ** 2))
    cc = np.sum(data * synth) / norm
    if verbose:
        _log_window_selection(
            data_trace.id, "Correlation Coefficient: %.4f" % cc
        )

    # Estimate noise level from waveforms prior to the first arrival.
    idx_end = int(np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt))
    idx_end = max(10, idx_end)
    idx_start = int(np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt))
    idx_start = max(10, idx_start)

    if idx_start >= idx_end:
        idx_start = max(0, idx_end - 10)

    abs_data = np.abs(data)
    noise_absolute = abs_data[idx_start:idx_end].max()
    noise_relative = noise_absolute / abs_data.max()

    if verbose:
        _log_window_selection(
            data_trace.id, "Absolute Noise Level: %e" % noise_absolute
        )
        _log_window_selection(
            data_trace.id, "Relative Noise Level: %e" % noise_relative
        )

    # Basic global rejection criteria.
    accept_traces = True
    if (cc < min_cc) and (noise_relative > max_noise / 3.0):
        msg = "Correlation %.4f is below threshold of %.4f" % (cc, min_cc)
        if verbose:
            _log_window_selection(data_trace.id, msg)
        accept_traces = msg

    if noise_relative > max_noise:
        msg = "Noise level %.3f is above threshold of %.3f" % (
            noise_relative,
            max_noise,
        )
        if verbose:
            _log_window_selection(data_trace.id, msg)
        accept_traces = msg

    minimum_distance = 2 * minimum_period * min_velocity
    if dist_in_km < minimum_distance:
        msg = "Source - Receiver distance %.3f is below threshold of %.3f" % (
            dist_in_km,
            minimum_distance,
        )
        if verbose:
            _log_window_selection(data_trace.id, msg)
        accept_traces = msg

    # Calculate the envelope of both data and synthetics. This is to make sure
    # that the amplitude of both is not too different over time and is
    # used as another selector. Only calculated if the trace is generally
    # accepted as it is fairly slow.
    if accept_traces is True:
        data_env = obspy.signal.filter.envelope(data)
        synth_env = obspy.signal.filter.envelope(synth)

    # -------------------------------------------------------------------------
    # Initial Plot setup.
    # -------------------------------------------------------------------------
    # All the plot calls are interleaved. I realize this is really ugly but
    # the alternative would be to either have two functions (one with plots,
    # one without) or split the plotting function in various subfunctions,
    # neither of which are acceptable in my opinion. The impact on
    # performance is minimal if plotting is turned off: all imports are lazy
    # and a couple of conditionals are cheap.
    if plot:
        import matplotlib.pylab as plt  # NOQA
        import matplotlib.patheffects as PathEffects  # NOQA

        if accept_traces is True:
            plt.figure(figsize=(18, 12))
            plt.subplots_adjust(
                left=0.05,
                bottom=0.05,
                right=0.98,
                top=0.95,
                wspace=None,
                hspace=0.0,
            )
            grid = (31, 1)

            # Axes showing the data.
            data_plot = plt.subplot2grid(grid, (0, 0), rowspan=8)
        else:
            # Only show one axes it the traces are not accepted.
            plt.figure(figsize=(18, 3))

        # Plot envelopes if needed.
        if accept_traces is True:
            plt.plot(
                times,
                data_env,
                color="black",
                alpha=0.5,
                lw=0.4,
                label="data envelope",
            )
            plt.plot(
                synthetic_trace.times(),
                synth_env,
                color="#e41a1c",
                alpha=0.4,
                lw=0.5,
                label="synthetics envelope",
            )

        plt.plot(times, data, color="black", label="data", lw=1.5)
        plt.plot(
            synthetic_trace.times(),
            synth,
            color="#e41a1c",
            label="synthetics",
            lw=1.5,
        )

        # Symmetric around y axis.
        middle = data.mean()
        d_max, d_min = data.max(), data.min()
        r = max(d_max - middle, middle - d_min) * 1.1
        ylim = (middle - r, middle + r)
        xlim = (times[0], times[-1])
        plt.ylim(*ylim)
        plt.xlim(*xlim)

        offset = (xlim[1] - xlim[0]) * 0.005
        plt.vlines(first_tt_arrival, ylim[0], ylim[1], colors="#ff7f00", lw=2)
        plt.text(
            first_tt_arrival + offset,
            ylim[1] - (ylim[1] - ylim[0]) * 0.02,
            "first arrival",
            verticalalignment="top",
            horizontalalignment="left",
            color="#ee6e00",
            path_effects=[
                PathEffects.withStroke(linewidth=3, foreground="white")
            ],
        )

        plt.vlines(
            first_tt_arrival - minimum_period / 2.0,
            ylim[0],
            ylim[1],
            colors="#ff7f00",
            lw=2,
        )
        plt.text(
            first_tt_arrival - minimum_period / 2.0 - offset,
            ylim[0] + (ylim[1] - ylim[0]) * 0.02,
            "first arrival - min period / 2",
            verticalalignment="bottom",
            horizontalalignment="right",
            color="#ee6e00",
            path_effects=[
                PathEffects.withStroke(linewidth=3, foreground="white")
            ],
        )

        for velocity in [6, 5, 4, 3, min_velocity]:
            tt = dist_in_km / velocity
            plt.vlines(tt, ylim[0], ylim[1], colors="gray", lw=2)
            if velocity == min_velocity:
                hal = "right"
                o_s = -1.0 * offset
            else:
                hal = "left"
                o_s = offset
            plt.text(
                tt + o_s,
                ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                str(velocity) + " km/s",
                verticalalignment="bottom",
                horizontalalignment=hal,
                color="0.15",
            )
        plt.vlines(
            dist_in_km / min_velocity + minimum_period / 2.0,
            ylim[0],
            ylim[1],
            colors="gray",
            lw=2,
        )
        plt.text(
            dist_in_km / min_velocity + minimum_period / 2.0 - offset,
            ylim[1] - (ylim[1] - ylim[0]) * 0.02,
            "min surface velocity + min period / 2",
            verticalalignment="top",
            horizontalalignment="right",
            color="0.15",
            path_effects=[
                PathEffects.withStroke(linewidth=3, foreground="white")
            ],
        )

        plt.hlines(
            noise_absolute, xlim[0], xlim[1], linestyle="--", color="gray"
        )
        plt.hlines(
            -noise_absolute, xlim[0], xlim[1], linestyle="--", color="gray"
        )
        plt.text(
            offset,
            noise_absolute + (ylim[1] - ylim[0]) * 0.01,
            "noise level",
            verticalalignment="bottom",
            horizontalalignment="left",
            color="0.15",
            path_effects=[
                PathEffects.withStroke(linewidth=3, foreground="white")
            ],
        )
        plt.legend(
            loc="lower right", fancybox=True, framealpha=0.5, fontsize="small"
        )
        plt.gca().xaxis.set_ticklabels([])

        # Plot the basic global information.
        ax = plt.gca()
        txt = (
            "Total CC Coeff: %.4f\nAbsolute Noise: %e\nRelative Noise: %.3f"
            % (cc, noise_absolute, noise_relative,)
        )
        ax.text(
            0.01,
            0.95,
            txt,
            transform=ax.transAxes,
            fontdict=dict(fontsize="small", ha="left", va="top"),
            bbox=dict(boxstyle="round", fc="w", alpha=0.8),
        )
        plt.suptitle("Channel %s" % data_trace.id, fontsize="larger")

        # Show plot and return if not accepted.
        if accept_traces is not True:
            txt = "Rejected: %s" % (accept_traces)
            ax.text(
                0.99,
                0.95,
                txt,
                transform=ax.transAxes,
                fontdict=dict(fontsize="small", ha="right", va="top"),
                bbox=dict(boxstyle="round", fc="red", alpha=1.0),
            )
            plt.show()
    if accept_traces is not True:
        return []

    # Initialise masked arrays. The mask will be set to True where no
    # windows are chosen.
    time_windows = np.ma.ones(npts)
    time_windows.mask = False
    if plot:
        old_time_windows = time_windows.copy()

    # Elimination Stage 1: Eliminate everything half a period before or
    # after the minimum and maximum travel times, respectively.
    # theoretical arrival as positive.
    # Account for delays in the source time functions as well
    min_idx = int((first_tt_arrival - (minimum_period / 2.0)) / dt)
    stf_env = obspy.signal.filter.envelope(stf_trace)

    max_env_amplitude_idx = np.argmax(stf_env.data)
    threshold = 0.5 * np.max(stf_env.data)
    max_idx = int(
        math.ceil((dist_in_km / min_velocity + minimum_period / 2.0) / dt)
    )
    max_idx += int(
        np.argmax(stf_env[max_env_amplitude_idx:] < threshold)
        + np.argmax(stf_env.data)
    )
    # shift by peak of envelope misfit
    # max_idx += max_env_amplitude_idx

    time_windows.mask[: min_idx + 1] = True
    time_windows.mask[max_idx:] = True

    if plot:
        plt.subplot2grid(grid, (8, 0), rowspan=1)
        _plot_mask(
            time_windows, old_time_windows, name="TRAVELTIME ELIMINATION"
        )
        old_time_windows = time_windows.copy()

    # -------------------------------------------------------------------------
    # Compute sliding time shifts and correlation coefficients for time
    # frames that passed the traveltime elimination stage.
    # -------------------------------------------------------------------------
    # Allocate arrays to collect the time dependent values.
    sliding_time_shift = np.ma.zeros(npts, dtype="float32")
    sliding_time_shift.mask = True
    max_cc_coeff = np.ma.zeros(npts, dtype="float32")
    max_cc_coeff.mask = True

    # Compute the amount of indices by which to shift the sliding windows
    # for long seismograms this otherwise gets unnecessarily expensive
    window_shift = int(0.05 * window_length)
    if not window_shift % 2:
        window_shift += 1
    window_shift = max(window_shift, 1)

    for start_idx, end_idx, midpoint_idx in _window_generator(
        npts, window_length, window_shift
    ):
        if not min_idx < midpoint_idx < max_idx:
            continue

        # Slice windows. Create a copy to be able to taper without affecting
        # the original time series.
        data_window = data[start_idx:end_idx].copy() * taper
        synthetic_window = synth[start_idx:end_idx].copy() * taper

        # Elimination Stage 2: Skip windows that have essentially no energy
        # to avoid instabilities. No windows can be picked in these.
        sw_start_idx = int(midpoint_idx - ((window_shift - 1) / 2))
        sw_end_idx = int(midpoint_idx + ((window_shift - 1) / 2) + 1)

        if synthetic_window.ptp() < synth.ptp() * 0.001:
            time_windows.mask[sw_start_idx:sw_end_idx] = True
            continue

        # Calculate the time shift. Here this is defined as the shift of the
        # synthetics relative to the data. So a value of 2, for instance, means
        # that the synthetics are 2 timesteps later then the data.
        cc = np.correlate(data_window, synthetic_window, mode="full")

        time_shift = cc.argmax() - window_length + 1
        # Express the time shift in fraction of the minimum period.
        sliding_time_shift[sw_start_idx:sw_end_idx] = (
            time_shift * dt
        ) / minimum_period

        # Normalized cross correlation.
        max_cc_value = cc.max() / np.sqrt(
            (synthetic_window ** 2).sum() * (data_window ** 2).sum()
        )
        max_cc_coeff[sw_start_idx:sw_end_idx] = max_cc_value

    if plot:
        plt.subplot2grid(grid, (9, 0), rowspan=1)
        _plot_mask(
            time_windows, old_time_windows, name="NO ENERGY IN CC WINDOW"
        )
        # Axes with the CC coeffs
        plt.subplot2grid(grid, (15, 0), rowspan=4)
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.hlines(
            -threshold_shift, xlim[0], xlim[1], color="gray", linestyle="--"
        )
        plt.hlines(
            threshold_shift, xlim[0], xlim[1], color="gray", linestyle="--"
        )
        plt.text(
            5,
            -threshold_shift - (2) * 0.03,
            "threshold",
            verticalalignment="top",
            horizontalalignment="left",
            color="0.15",
            path_effects=[
                PathEffects.withStroke(linewidth=3, foreground="white")
            ],
        )
        plt.plot(
            times,
            sliding_time_shift,
            color="#377eb8",
            label="Time shift in fraction of minimum period",
            lw=1.5,
        )
        ylim = plt.ylim()
        plt.yticks([-0.75, 0, 0.75])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.ylim(ylim[0], ylim[1] + ylim[1] - ylim[0])
        plt.ylim(-1.0, 1.0)
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(
            loc="lower right", fancybox=True, framealpha=0.5, fontsize="small"
        )

        plt.subplot2grid(grid, (10, 0), rowspan=4)
        plt.hlines(
            threshold_correlation,
            xlim[0],
            xlim[1],
            color="0.15",
            linestyle="--",
        )
        plt.hlines(1, xlim[0], xlim[1], color="lightgray")
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.text(
            5,
            threshold_correlation + (1.4) * 0.01,
            "threshold",
            verticalalignment="bottom",
            horizontalalignment="left",
            color="0.15",
            path_effects=[
                PathEffects.withStroke(linewidth=3, foreground="white")
            ],
        )
        plt.plot(
            times,
            max_cc_coeff,
            color="#4daf4a",
            label="Maximum CC coefficient",
            lw=1.5,
        )
        plt.ylim(-0.2, 1.2)
        plt.yticks([0, 0.5, 1])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(
            loc="lower right", fancybox=True, framealpha=0.5, fontsize="small"
        )

    # Elimination Stage 3: Mark all areas where the normalized cross
    # correlation coefficient is under threshold_correlation as negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[max_cc_coeff < threshold_correlation] = True
    if plot:
        plt.subplot2grid(grid, (14, 0), rowspan=1)
        _plot_mask(
            time_windows,
            old_time_windows,
            name="CORRELATION COEFF THRESHOLD ELIMINATION",
        )

    # Elimination Stage 4: Mark everything with an absolute travel time
    # shift of more than # threshold_shift times the dominant period as
    # negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[np.ma.abs(sliding_time_shift) > threshold_shift] = True
    if plot:
        plt.subplot2grid(grid, (19, 0), rowspan=1)
        _plot_mask(
            time_windows,
            old_time_windows,
            name="TIME SHIFT THRESHOLD ELIMINATION",
        )

    # Elimination Stage 5: Mark the area around every "travel time shift
    # jump" (based on the traveltime time difference) negative. The width of
    # the area is currently chosen to be a tenth of a dominant period to
    # each side.
    if plot:
        old_time_windows = time_windows.copy()
    sample_buffer = int(np.ceil(minimum_period / dt * 0.1))
    indices = np.ma.where(np.ma.abs(np.ma.diff(sliding_time_shift)) > 0.1)[0]
    for index in indices:
        time_windows.mask[index - sample_buffer : index + sample_buffer] = True
    if plot:
        plt.subplot2grid(grid, (20, 0), rowspan=1)
        _plot_mask(
            time_windows, old_time_windows, name="TIME SHIFT JUMPS ELIMINATION"
        )

    # Clip both to avoid large numbers by division.
    stacked = np.vstack(
        [
            np.ma.clip(
                synth_env,
                synth_env.max() * min_envelope_similarity * 0.5,
                synth_env.max(),
            ),
            np.ma.clip(
                data_env,
                data_env.max() * min_envelope_similarity * 0.5,
                data_env.max(),
            ),
        ]
    )
    # Ratio.
    ratio = stacked.min(axis=0) / stacked.max(axis=0)

    # Elimination Stage 6: Make sure the amplitudes of both don't vary too
    # much.
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[ratio < min_envelope_similarity] = True
    if plot:
        plt.subplot2grid(grid, (25, 0), rowspan=1)
        _plot_mask(
            time_windows,
            old_time_windows,
            name="ENVELOPE AMPLITUDE SIMILARITY ELIMINATION",
        )

    if plot:
        plt.subplot2grid(grid, (21, 0), rowspan=4)
        plt.hlines(
            min_envelope_similarity,
            xlim[0],
            xlim[1],
            color="gray",
            linestyle="--",
        )
        plt.text(
            5,
            min_envelope_similarity + (2) * 0.03,
            "threshold",
            verticalalignment="bottom",
            horizontalalignment="left",
            color="0.15",
            path_effects=[
                PathEffects.withStroke(linewidth=3, foreground="white")
            ],
        )
        plt.plot(
            times,
            ratio,
            color="#9B59B6",
            label="Envelope amplitude similarity",
            lw=1.5,
        )
        plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
        plt.ylim(0.05, 1.05)
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(
            loc="lower right", fancybox=True, framealpha=0.5, fontsize="small"
        )

    # First minimum window length elimination stage. This is cheap and if
    # not done it can easily destabilize the peak-and-trough marching stage
    # which would then have to deal with way more edge cases.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = min(
        minimum_period / dt * min_length_period, maximum_period / dt
    )
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start : i.stop] = True
    if plot:
        plt.subplot2grid(grid, (26, 0), rowspan=1)
        _plot_mask(
            time_windows,
            old_time_windows,
            name="MINIMUM WINDOW LENGTH ELIMINATION 1",
        )

    # -------------------------------------------------------------------------
    # Peak and trough marching algorithm
    # -------------------------------------------------------------------------
    final_windows = []
    for i in flatnotmasked_contiguous(time_windows):
        # Cut respective windows.
        window_npts = i.stop - i.start
        synthetic_window = synth[i.start : i.stop]
        data_window = data[i.start : i.stop]

        # Find extrema in the data and the synthetics.
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)

        window_mask = np.ones(window_npts, dtype="bool")

        # sometimes, no local extrema are found and the below fails
        # in that case dont crash, but return the windows that did not fail.
        try:
            closest_peaks = find_closest(data_p, synth_p)
            diffs = np.diff(closest_peaks)

            for idx in np.where(diffs == 1)[0]:
                if idx > 0:
                    start = synth_p[idx - 1]
                else:
                    start = 0
                if idx < (len(synth_p) - 1):
                    end = synth_p[idx + 1]
                else:
                    end = -1
                window_mask[start:end] = False

            closest_troughs = find_closest(data_t, synth_t)
            diffs = np.diff(closest_troughs)

            for idx in np.where(diffs == 1)[0]:
                if idx > 0:
                    start = synth_t[idx - 1]
                else:
                    start = 0
                if idx < (len(synth_t) - 1):
                    end = synth_t[idx + 1]
                else:
                    end = -1
                window_mask[start:end] = False
        except Exception as e:
            print(e)
            continue

        window_mask = np.ma.masked_array(window_mask, mask=window_mask)

        if window_mask.mask.all():
            continue

        for j in flatnotmasked_contiguous(window_mask):
            final_windows.append((i.start + j.start, i.start + j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False
    if plot:
        plt.subplot2grid(grid, (27, 0), rowspan=1)
        _plot_mask(
            time_windows,
            old_time_windows,
            name="PEAK AND TROUGH MARCHING ELIMINATION",
        )

    # Loop through all the time windows, remove windows not satisfying the
    # minimum number of peaks and troughs per window. Acts mainly as a
    # safety guard.
    old_time_windows = time_windows.copy()
    for i in flatnotmasked_contiguous(old_time_windows):
        synthetic_window = synth[i.start : i.stop]
        data_window = data[i.start : i.stop]
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)
        if (
            np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)])
            < min_peaks_troughs
        ):
            time_windows.mask[i.start : i.stop] = True
    if plot:
        plt.subplot2grid(grid, (28, 0), rowspan=1)
        _plot_mask(
            time_windows,
            old_time_windows,
            name="PEAK/TROUGH COUNT ELIMINATION",
        )

    # Second minimum window length elimination stage.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = min(
        minimum_period / dt * min_length_period, maximum_period / dt
    )
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start : i.stop] = True
    if plot:
        plt.subplot2grid(grid, (29, 0), rowspan=1)
        _plot_mask(
            time_windows,
            old_time_windows,
            name="MINIMUM WINDOW LENGTH ELIMINATION 2",
        )

    # Final step, eliminating windows with little energy.
    final_windows = []
    for j in flatnotmasked_contiguous(time_windows):
        # Again assert a certain minimal length.
        if (j.stop - j.start) < min_length:
            continue

        # Compare the energy in the data window and the synthetic window.
        data_energy = (data[j.start : j.stop] ** 2).sum()
        synth_energy = (synth[j.start : j.stop] ** 2).sum()
        energies = sorted([data_energy, synth_energy])
        if energies[1] > max_energy_ratio * energies[0]:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due to energy ratio between "
                    "data and synthetics.",
                )
            continue

        # Check that amplitudes in the data are above the noise
        if noise_absolute / data[j.start : j.stop].ptp() > max_noise_window:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due having no amplitude above the "
                    "signal to noise ratio.",
                )
        final_windows.append((j.start, j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False

    if plot:
        plt.subplot2grid(grid, (30, 0), rowspan=1)
        _plot_mask(
            time_windows, old_time_windows, name="LITTLE ENERGY ELIMINATION"
        )

    if verbose:
        _log_window_selection(
            data_trace.id, "Done, Selected %i window(s)" % len(final_windows)
        )

    # Final step is to convert the index value windows to actual times.
    windows = []
    for start, stop in final_windows:
        start = data_starttime + start * data_delta
        stop = data_starttime + stop * data_delta
        weight = 1.0
        windows.append((start, stop, weight))

    if plot:
        # Plot the final windows to the data axes.
        import matplotlib.transforms as mtransforms  # NOQA

        ax = data_plot
        trans = mtransforms.blended_transform_factory(
            ax.transData, ax.transAxes
        )
        for start, stop in final_windows:
            ax.fill_between(
                [start * data_delta, stop * data_delta],
                0,
                1,
                facecolor="#CDDC39",
                alpha=0.5,
                transform=trans,
            )

        plt.show()

    return windows
Ejemplo n.º 55
0
def plotPhotometryErrorModel(dataset,
                             photomModel,
                             filterName='',
                             outputPrefix=''):
    """Plot photometric RMS for matched sources.

    Parameters
    ----------
    dataset : `lsst.verify.Blob`
        A `Blob` with the multi-visit photometry model.
    photomModel : `lsst.verify.Blob`
        A `Blob` hlding the analytic photometry model parameters.
    filterName : str, optional
        Name of the observed filter to use on axis labels.
    outputPrefix : str, optional
        Prefix to use for filename of plot file.  Will also be used in plot
        titles. E.g., ``outputPrefix='Cfht_output_r_'`` will result in a file
        named ``'Cfht_output_r_check_photometry.png'``.
    """
    bright, = np.where(
        dataset['snr'].quantity > photomModel['brightSnr'].quantity)

    numMatched = len(dataset['mag'].quantity)
    magrms = dataset['magrms'].quantity
    mmagRms = magrms.to(u.mmag)
    mmagRmsHighSnr = mmagRms[bright]
    magerr = dataset['magerr'].quantity
    mmagErr = magerr.to(u.mmag)
    mmagErrHighSnr = mmagErr[bright]

    mmagrms_median = np.median(mmagRms)
    bright_mmagrms_median = np.median(mmagRmsHighSnr)

    fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(18, 16))

    ax[0][0].hist(mmagRms,
                  bins=100,
                  range=(0, 500),
                  color=color['all'],
                  histtype='stepfilled',
                  orientation='horizontal')
    ax[0][0].hist(mmagRmsHighSnr,
                  bins=100,
                  range=(0, 500),
                  color=color['bright'],
                  histtype='stepfilled',
                  orientation='horizontal')
    plotOutlinedAxline(ax[0][0].axhline,
                       mmagrms_median.value,
                       color=color['all'],
                       label="Median RMS: {v:.1f}".format(v=mmagrms_median))
    plotOutlinedAxline(ax[0][0].axhline,
                       bright_mmagrms_median.value,
                       color=color['bright'],
                       label="SNR > {snr:.0f}\nMedian RMS: {v:.1f}".format(
                           snr=photomModel['brightSnr'].quantity.value,
                           v=bright_mmagrms_median))

    ax[0][0].set_ylim([0, 500])
    ax[0][0].set_ylabel("{magrms.label} [{mmagrms.unit:latex}]".format(
        magrms=dataset['magrms'], mmagrms=mmagRms))
    ax[0][0].legend(loc='upper right')
    mag = dataset['mag'].quantity
    ax[0][1].scatter(mag, mmagRms, s=10, color=color['all'], label='All')
    ax[0][1].scatter(mag[bright],
                     mmagRmsHighSnr,
                     s=10,
                     color=color['bright'],
                     label='{label} > {value:.0f}'.format(
                         label=photomModel['brightSnr'].label,
                         value=photomModel['brightSnr'].quantity.value))
    ax[0][1].set_xlabel("{label} [{unit:latex}]".format(label=filterName,
                                                        unit=mag.unit))
    ax[0][1].set_ylabel("{label} [{unit:latex}]".format(
        label=dataset['magrms'].label, unit=mmagRmsHighSnr.unit))
    ax[0][1].set_xlim([17, 24])
    ax[0][1].set_ylim([0, 500])
    ax[0][1].legend(loc='upper left')
    plotOutlinedAxline(ax[0][1].axhline,
                       mmagrms_median.value,
                       color=color['all'])
    plotOutlinedAxline(ax[0][1].axhline,
                       bright_mmagrms_median.value,
                       color=color['bright'])
    matchCountTemplate = '\n'.join(
        ['Matches:', '{nBright:d} high SNR,', '{nAll:d} total'])
    ax[0][1].text(0.1,
                  0.6,
                  matchCountTemplate.format(nBright=len(bright),
                                            nAll=numMatched),
                  transform=ax[0][1].transAxes,
                  ha='left',
                  va='top')

    ax[1][0].scatter(mmagRms, mmagErr, s=10, color=color['all'], label=None)
    ax[1][0].scatter(mmagRmsHighSnr,
                     mmagErrHighSnr,
                     s=10,
                     color=color['bright'],
                     label=None)
    ax[1][0].set_xscale('log')
    ax[1][0].set_yscale('log')
    ax[1][0].plot([0, 1000], [0, 1000],
                  linestyle='--',
                  color='black',
                  linewidth=2)
    ax[1][0].set_xlabel("{label} [{unit:latex}]".format(
        label=dataset['magrms'].label, unit=mmagRms.unit))
    ax[1][0].set_ylabel("Median Reported Magnitude Err [{unit:latex}]".format(
        unit=mmagErr.unit))

    brightSnrMag = 2.5 * np.log10(
        1 + (1 / photomModel['brightSnr'].quantity.value)) * u.mag
    label = r'$SNR > {snr:.0f} \equiv \sigma <  {snrMag:0.1f}$'.format(
        snr=photomModel['brightSnr'].quantity.value,
        snrMag=brightSnrMag.to(u.mmag))
    ax[1][0].axhline(brightSnrMag.to(u.mmag).value,
                     color='red',
                     linewidth=4,
                     linestyle='dashed',
                     label=label)
    ax[1][0].set_xlim([1, 500])
    ax[1][0].set_ylim([1, 500])
    ax[1][0].legend(loc='upper center')

    ax[1][1].scatter(mag, mmagErr, color=color['all'], label=None)
    ax[1][1].set_yscale('log')
    ax[1][1].scatter(np.asarray(mag)[bright],
                     mmagErrHighSnr,
                     s=10,
                     color=color['bright'],
                     label=None)
    ax[1][1].set_xlabel("{name} [{unit:latex}]".format(name=filterName,
                                                       unit=mag.unit))
    ax[1][1].set_ylabel("Median Reported Magnitude Err [{unit:latex}]".format(
        unit=mmagErr.unit))
    ax[1][1].set_xlim([17, 24])
    ax[1][1].set_ylim([1, 500])
    ax[1][1].axhline(brightSnrMag.to(u.mmag).value,
                     color='red',
                     linewidth=4,
                     linestyle='dashed',
                     label=None)

    w, = np.where(mmagErr < 200. * u.mmag)
    plotPhotErrModelFit(mag[w].to(u.mag).value,
                        magerr[w].to(u.mmag).value,
                        photomModel,
                        ax=ax[1][1])
    ax[1][1].legend(loc='upper left')

    plt.suptitle("Photometry Check : %s" % outputPrefix, fontsize=30)
    ext = 'png'
    pathFormat = "{name}.{ext}"
    plotPath = makeFilename(outputPrefix,
                            pathFormat,
                            name="check_photometry",
                            ext=ext)
    plt.savefig(plotPath, format=ext)
    plt.close(fig)
    print("Wrote plot:", plotPath)
Ejemplo n.º 56
0
def plotAstrometryErrorModel(dataset, astromModel, outputPrefix=''):
    """Plot angular distance between matched sources from different exposures.

    Creates a file containing the plot with a filename beginning with
    `outputPrefix`.

    Parameters
    ----------
    dataset : `lsst.verify.Blob`
        Blob with the multi-visit photometry model.
    photomModel : `lsst.verify.Blob`
        A `Blob` containing the analytic photometry model.
    outputPrefix : str, optional
        Prefix to use for filename of plot file.  Will also be used in plot
        titles. E.g., ``outputPrefix='Cfht_output_r_'`` will result in a file
        named ``'Cfht_output_r_check_astrometry.png'``.
    """
    bright, = np.where(
        dataset['snr'].quantity > astromModel['brightSnr'].quantity)

    dist = dataset['dist'].quantity
    numMatched = len(dist)
    dist_median = np.median(dist)
    bright_dist_median = np.median(dist[bright])

    fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(18, 12))

    ax[0].hist(dist,
               bins=100,
               color=color['all'],
               histtype='stepfilled',
               orientation='horizontal')
    ax[0].hist(dist[bright],
               bins=100,
               color=color['bright'],
               histtype='stepfilled',
               orientation='horizontal')

    ax[0].set_ylim([0., 500.])
    ax[0].set_ylabel("Distance [{unit:latex}]".format(unit=dist.unit))
    plotOutlinedAxline(
        ax[0].axhline,
        dist_median.value,
        color=color['all'],
        label="Median RMS: {v.value:.1f} {v.unit:latex}".format(v=dist_median))
    plotOutlinedAxline(
        ax[0].axhline,
        bright_dist_median.value,
        color=color['bright'],
        label="SNR > {snr:.0f}\nMedian RMS: {v.value:.1f} {v.unit:latex}".
        format(snr=astromModel['brightSnr'].quantity.value,
               v=bright_dist_median))
    ax[0].legend(loc='upper right')

    snr = dataset['snr'].quantity
    ax[1].scatter(snr, dist, s=10, color=color['all'], label='All')
    ax[1].scatter(snr[bright],
                  dist[bright],
                  s=10,
                  color=color['bright'],
                  label='SNR > {0:.0f}'.format(
                      astromModel['brightSnr'].quantity.value))
    ax[1].set_xlabel("SNR")
    ax[1].set_xscale("log")
    ax[1].set_ylim([0., 500.])
    matchCountTemplate = '\n'.join(
        ['Matches:', '{nBright:d} high SNR,', '{nAll:d} total'])
    ax[1].text(0.6,
               0.6,
               matchCountTemplate.format(nBright=len(bright), nAll=numMatched),
               transform=ax[1].transAxes,
               ha='left',
               va='baseline')

    w, = np.where(dist < 200 * u.marcsec)
    plotAstromErrModelFit(snr[w], dist[w], astromModel, ax=ax[1])

    ax[1].legend(loc='upper right')
    ax[1].axvline(astromModel['brightSnr'].quantity,
                  color='red',
                  linewidth=4,
                  linestyle='dashed')
    plotOutlinedAxline(ax[0].axhline, dist_median.value, color=color['all'])
    plotOutlinedAxline(ax[0].axhline,
                       bright_dist_median.value,
                       color=color['bright'])

    # Using title rather than suptitle because I can't get the top padding
    plt.suptitle("Astrometry Check : %s" % outputPrefix, fontsize=30)
    ext = 'png'
    pathFormat = "{name}.{ext}"
    plotPath = makeFilename(outputPrefix,
                            pathFormat,
                            name="check_astrometry",
                            ext=ext)
    plt.savefig(plotPath, format=ext)
    plt.close(fig)
    print("Wrote plot:", plotPath)
        plt.xlabel("Time (seconds)")
        plt.ylabel("Concentration (uM)")


startVals = r2.getGlobalParameterValues()
names = list(
    enumerate(
        [x for x in r2.getGlobalParameterIds() if ("K" in x or "k" in x)]))
n = len(names) + 1
dim = math.ceil(math.sqrt(n))

for i, next_param in enumerate(names):
    plt.subplot(dim, dim, i + 1)
    plot_param_uncertainty(r2, startVals[next_param[0]], next_param[1], 100)

plt.suptitle('Inactive integrin concentration')
plt.tight_layout()
plt.subplots_adjust(top=0.88)
plt.show()
#plt.savefig('uncertainty_inactiveIntegrin.png')

#%%
r2 = te.loada(Ant_str)


def plot_param_uncertainty(model, startVal, name, num_sims):
    stdDev = 0.6
    # assumes initial parameter estimate as mean and iterates 60% above and below.
    vals = np.linspace((1 - stdDev) * startVal, (1 + stdDev) * startVal, 100)
    for val in vals:
        r2.resetToOrigin()
Ejemplo n.º 58
0
def main(target, cap, ifirst, ilast, zmin, zmax, P0, options):

    print(f'Target: {target}')
    print(f'cap:    {cap}')
    print(f'zmin:   {zmin}')
    print(f'zmax:   {zmax}')
    print(f'P0:     {P0}')
    print(f'Options for systematics: {options}') 
    
    #-- Optional systematics
    add_contaminants = False
    add_photosyst = 'syst' in options
    add_fibercompleteness = 'comp' in options
    add_fibercollisions = 'cp' in options
    correct_fibercollisions = not 'cp0' in options
    add_zfailures = 'noz' in options
    add_radial_integral_constraint = 'ric' in options
    
    suffix = '_comp'*add_fibercompleteness
    suffix += '_cp'*add_fibercollisions  + '0'*(correct_fibercollisions == False)
    suffix += '_noz'*add_zfailures
    suffix += '_syst'*add_photosyst
    suffix += '_ric'*add_radial_integral_constraint
    
    output_dir = f'/mnt/lustre/eboss/EZ_MOCKs/EZmock_v7.1'+suffix+f'/eBOSS_{target}'
    os.makedirs(output_dir, exist_ok=True)
    print(f'Mocks will be written at: {output_dir}')
    
    #-- Read data
    data, rand_data, mask_dict, nbar_data = read_data(target=target, cap=cap)
    
    #-- Get stars, bad targets, unpluggged fibers and the following columns
    fields_redshift_failures = ['XFOCAL', 'YFOCAL', 'PLATE', 'MJD', 'FIBERID', 
                                'SPEC1_G', 'SPEC1_R', 'SPEC1_I', 
                                'SPEC2_G', 'SPEC2_R', 'SPEC2_I', 
                                'WEIGHT_NOZ']
    fields_bad_targets = ['RA', 'DEC', 'Z', 'IMATCH', 'NTILES', 'SECTOR']  \
                         + fields_redshift_failures
    bad_targets = get_contaminants(data, fields_bad_targets, zmin, zmax)
    
    #-- Fit for photo-systematics on data and obtain a density model for sub-sampling mocks
    density_model_data = None
    if add_photosyst:
        _, density_model_data = get_systematic_weights(data, rand_data, plotit=False,
                                                      target=target, zmin=zmin, zmax=zmax)
    


    #-- Read randoms just once
    input_dir  = f'/mnt/lustre/eboss/EZ_MOCKs/EZmock_{target}_v7.0'
    rand_name  = input_dir + f'/random/random_20x_eBOSS_{target}_{cap}_v7.fits'
    print('')
    print('Reading random from ', rand_name)
    rand0 = Table.read(rand_name)
    if 'SECTOR' not in rand0.colnames:
        assign_sectors(rand0, mask_dict)
        rand0.write(rand_name, overwrite=True)

    area_eff_data = nbar_data['area_eff']
    area_data = area_eff_data * len(rand0) / np.sum(rand0['COMP_BOSS_IN']) 

    for imock in range(ifirst, ilast):

        #-- Read mock
        mock_name_in  = input_dir + f'/eBOSS_{target}/EZmock_eBOSS_{target}_{cap}_v7_{imock:04d}.dat'
        print('')
        print('Reading mock from', mock_name_in)
        try:
            mock0 = Table.read(mock_name_in+'.fits')
        except:
            mock0 = Table.read(mock_name_in, format='ascii', 
                               names=('RA', 'DEC', 'Z', 'WEIGHT_FKP'))
            w = np.isnan(mock0['RA']) | np.isnan(mock0['DEC']) | np.isnan(mock0['Z'])
            mock0 = mock0[~w]
            mock0.write(mock_name_in+'.fits', overwrite=True)
        
        assign_sectors(mock0, mask_dict)

        #-- Getting an estimate of how many galaxies are lost through photo systematics
        #-- but not yet perform sub-sampling
        if add_photosyst:
            w_photo = downsample_photo(density_model_data, mock0, seed=imock)
            photo_ratio = np.sum(w_photo)/len(mock0)
        else:
            photo_ratio = 1
        print(f' Fraction kept after photo syst downsampling: {photo_ratio:.4f}')

        #-- Removing this since now we correctly re-compute the effective area 
        #if add_fibercompleteness:
        #    w_comp = downsample_completeness(mock0, seed=imock)
        #    comp_ratio = np.sum(w_comp)/len(mock0)
        #else:
        comp_ratio = 1.
        #print(f' Fraction kept after fiber comp downsampling: {comp_ratio:.4f}')

        #-- Compute original density
        #nbar_mock0 = compute_nbar(mock0, nbar_data['z_edges'], nbar_data['shell_vol'], P0=P0)
        nbar_mock0 = compute_nbar(mock0, nbar_data['z_edges'], area_data, P0=P0)
        w = nbar_mock0['nbar']>0
        nbar_mock0_const = np.median(nbar_mock0['nbar'][w])
        print(f' Original number density (assumed constant): {nbar_mock0_const*1e4:.3f} x 1e-4')

        #-- Divide the original density of the mock by photo_ratio
        nbar_mock0_const *= photo_ratio * comp_ratio
        nbar_rand0_const = nbar_mock0_const 

        #-- Sub-sample to match n(z) of the data
        w_sub = downsample_nbar(nbar_data, nbar_mock0_const, mock0, seed=imock)
        mock = mock0[w_sub]
        w_sub = downsample_nbar(nbar_data, nbar_rand0_const, rand0, seed=imock)
        rand = rand0[w_sub]

        #-- Sub-sample of mock with photometric systematic map
        if add_photosyst:
            w_photo = downsample_photo(density_model_data, mock, seed=imock)
            photo_ratio = np.sum(w_photo)/len(mock)
            mock = mock[w_photo]
            print(f' Fraction kept after photo syst downsampling: {photo_ratio:.4f}')
        
        #-- Add missing fields to mock with zeros 
        for field in fields_bad_targets:
            if field not in mock.colnames:
                mock[field] = np.zeros(len(mock), dtype=data[field].dtype) 
        
        #-- Assign legacy targets for QSO only
        if target=='QSO':
            assign_imatch2(data, mock, zmin=zmin, zmax=zmax, seed=imock)

        #-- Add data contaminants to mock
        mock_full = vstack([mock, bad_targets]) if add_contaminants else mock

        assign_sectors(mock_full, mask_dict)

        #-- Sub-sample following completeness
        if add_fibercompleteness:
            w_comp = downsample_completeness(mock_full, seed=imock)
            comp_ratio = np.sum(w_comp)/len(mock_full)
            mock_full['IMATCH'][~w_comp] = -1
            print(f' Fraction kept after fiber comp downsampling: {comp_ratio:.4f}')

        #-- Add fiber collisions
        if add_fibercollisions:
            print('')
            print('Adding fiber collisions...')
            add_fiber_collisions(mock_full)

            #-- Correcting for fiber collisions
            if correct_fibercollisions:
                print('')
                print('Correcting fiber collisions...')
                correct_fiber_collisions(mock_full)
            else:
                mock_full['WEIGHT_CP'] = 1.0
        else:
            w = (mock_full['IMATCH'] == 0)
            mock_full['IMATCH'][w] = 1
            mock_full['WEIGHT_CP'] = 1.0
        
        #-- Add redshift failures
        if add_zfailures:
            print('')
            print('Adding redshift failures...')
            add_redshift_failures(mock_full, data, fields_redshift_failures, seed=imock)
     
            #-- Correct redshift failures
            print('')
            print('Correcting for redshift failures...')
            redshift_failures.get_weights_noz(mock_full)
            mock_full['WEIGHT_NOZ'][mock_full['IMATCH']==2] = 1  
            #redshift_failures.plot_failures(mock_full)
        else:
            mock_full['WEIGHT_NOZ'] = 1

        mock_full['COMP_BOSS'] = fiber_completeness(mock_full) 
        mock_full['sector_SSR'] = sector_ssr(mock_full) 
        
        print('Galaxies information:')
        w_both_mock = print_stats(mock_full) 
        
        #-- Make random catalog for this mock 
        rand_full = make_randoms(mock_full, rand, seed=imock, 
                                 add_radial_integral_constraint=add_radial_integral_constraint)
        
        print('Randoms information:')
        w_both_rand = print_stats(rand_full)
        frac_area = np.sum(w_both_rand)/len(rand_full)
    
        mock_full = mock_full[w_both_mock]
        rand_full = rand_full[w_both_rand]

        #-- Compute effective area
        area_eff_mock = area_data * frac_area * np.sum(rand_full['COMP_BOSS'])/ len(rand_full)
        print(f'Area effective mock: {area_eff_mock:.2f}')
        print(f'Area effective data: {area_eff_data:.2f}')

        #-- Compute nbar

        #nbar_mock = compute_nbar(mock_full, nbar_data['z_edges'], nbar_data['shell_vol'], P0=P0)
        nbar_mock = compute_nbar(mock_full, nbar_data['z_edges'], area_eff_mock, P0=P0)
        
        #-- Compute FKP weights
        compute_weight_fkp(mock_full, nbar_mock, P0=P0)
        compute_weight_fkp(rand_full, nbar_mock, P0=P0)
     
        #-- Correct photometric systematics
        if add_photosyst:
            print('')
            print('Getting photo-systematic weights...')
            _ = get_systematic_weights(mock_full, rand_full,
                                       target=target, zmin=zmin, zmax=zmax,
                                       random_fraction=1, seed=imock, nbins=20, 
                                       plotit=False)
        else: 
            mock_full['WEIGHT_SYSTOT'] = 1.0
            rand_full['WEIGHT_SYSTOT'] = 1.0

        print('')
        print('Mock IMATCH statistics')
        get_imatch_stats(mock_full, zmin, zmax)
        print('')
        print('Data IMATCH statistics')
        get_imatch_stats(data, zmin, zmax)


        #-- Make clustering catalog
        mock_clust = make_clustering_catalog(mock_full, zmin, zmax)
        rand_clust = make_clustering_catalog_random(rand_full, mock_clust, seed=imock)

        #-- Export  
        mock_name_out = output_dir+ f'/EZmock_eBOSS_{target}_{cap}_v7_{imock:04d}.dat.fits'
        mask_name_out = output_dir+ f'/EZmock_eBOSS_{target}_{cap}_v7_{imock:04d}.mask.fits'
        nbar_name_out = output_dir+ f'/EZmock_eBOSS_{target}_{cap}_v7_{imock:04d}.nbar.txt'
        rand_name_out = output_dir+ f'/EZmock_eBOSS_{target}_{cap}_v7_{imock:04d}.ran.fits'

        print('')
        print('Exporting mock to', mock_name_out)
        mock_clust.write(mock_name_out, overwrite=True) 
        print('Exporting random to', rand_name_out)
        rand_clust.write(rand_name_out, overwrite=True) 
        print('Exporting nbar to', nbar_name_out)
        export_nbar(nbar_mock, nbar_name_out)

        #print('Exporting mask to ', mask_name_out)
        #mask.write(mask_name_out, overwrite=True)
       
        if imock < -1:
            plot_completeness(mock_full, data, field='COMP_BOSS', sub=1)
            plt.suptitle(f'COMP_BOSS {target} {cap} mock #{imock:04d}')
            plt.savefig(mock_name_out.replace('dat.fits', 'comp_boss.png'))
            plot_completeness(mock_full, data, field='sector_SSR', sub=1)
            plt.suptitle(f'sector_SSR {target} {cap} mock #{imock:04d}')
            plt.savefig(mock_name_out.replace('dat.fits', 'sector_ssr.png'))
            mock_full.write(mock_name_out.replace('.dat.fits', '_full.dat.fits'), overwrite=True)
            rand_full.write(rand_name_out.replace('.ran.fits', '_full.ran.fits'), overwrite=True)
Ejemplo n.º 59
0
def get_offsets_A_B(f, plot=False, interactive=False):
    '''
    Returns the offsets for A and B, so that when offseting, the images do not overlap.  
    Example fits:/scr2/nblago/Projects/SEDM/data/finders/f_b_a_rPTF15fks_r.fits
    '''
    from scipy import stats

    image = pf.open(f)
    data = image[0].data.T
    wcs = pywcs.WCS(image[0].header)
    ra, dec = cc.hour2deg(image[0].header['OBJRA'], image[0].header['OBJDEC'])
    obj = fitsutils.get_par(f, "OBJECT")
    pra, pdec = wcs.wcs_sky2pix(ra, dec, 0)

    #Get a local image
    xl, yl = np.round(wcs.wcs_sky2pix(ra + 30. / 3600, dec - 30. / 3600, 0), 0)
    xu, yu = np.round(wcs.wcs_sky2pix(ra - 30. / 3600, dec + 30. / 3600, 0), 0)
    imageloc = image[0].data[xl:xu, yu:yl]

    bkg = np.median(imageloc)
    perc10 = np.percentile(imageloc, 15)
    perc90 = np.percentile(imageloc, 85)
    mask = (image[0].data > perc10) * (image[0].data < perc90)
    bkg_std = np.std(image[0].data[mask])  # mad(image[0].data)

    linestyles = ['solid', 'dashed', 'dashdot', 'dotted', 'solid']

    offsets = np.array([[+3, -4], [+3, +4], [+2, +4], [+2, +2]])

    Noff = len(offsets)
    pvalues = np.zeros((Noff, 2))

    if (plot):
        fig, axarr = plt.subplots(2,
                                  Noff,
                                  sharey='row',
                                  figsize=(6 * len(offsets), 12))
        axarr = np.array(axarr, ndmin=1)

    for i, off in enumerate(offsets):
        ra_off = ra - off[0] / 3600.
        dec_off = dec - off[1] / 3600.

        ra_off_op = ra + off[0] / 3600.
        dec_off_op = dec + off[1] / 3600.

        prao, pdeco = wcs.wcs_sky2pix(ra + off[0] / 3600.,
                                      dec + off[1] / 3600., 0)
        praosym, pdecosym = wcs.wcs_sky2pix(ra - 2 * off[0] / 3600.,
                                            dec - 2 * off[1] / 3600., 0)

        #Extract sample window to check wether the background matches well.
        #number on samples on each side
        ns = 7
        sample = data[prao - ns:prao + ns, pdeco - ns:pdeco + ns]
        sm = np.median(sample)
        sstd = np.std(sample[(sample < 6000) * (sample > 0)])
        bkg_prob = np.minimum(1 - stats.norm.cdf(sm, bkg, bkg_std),
                              stats.norm.cdf(sm, bkg, bkg_std))

        #Extract sample window to check wether the background matches well.
        sample = data[praosym - 7:praosym + 7, pdecosym - 7:pdecosym + 7]
        smo = np.median(sample)
        sstdo = np.std(sample)
        bkg_probo = np.minimum(1 - stats.norm.cdf(smo, bkg, bkg_std),
                               stats.norm.cdf(smo, bkg, bkg_std))

        pvalues[i] = np.array([bkg_prob, bkg_probo])

        if (plot):

            #Retrieve the image of the object
            xl, yl = wcs.wcs_sky2pix(ra + 20. / 3600, dec - 20. / 3600, 0)
            xu, yu = wcs.wcs_sky2pix(ra - 20. / 3600, dec + 20. / 3600, 0)
            ifuwin = data[xl:xu, yu:yl]

            #Retrieve the offset A image of the object
            x0, y0 = wcs.wcs_sky2pix(ra_off + 20. / 3600, dec_off - 20. / 3600,
                                     0)
            x1, y1 = wcs.wcs_sky2pix(ra_off - 20. / 3600, dec_off + 20. / 3600,
                                     0)
            ifuwin1 = data[x0:x1, y1:y0]

            nx, ny = ifuwin1.shape

            #print nx,ny, ifuwin1.shape

            #Retrieve the offset A image of the object
            x0, y0 = wcs.wcs_sky2pix(ra_off_op + 20. / 3600,
                                     dec_off_op - 20. / 3600, 0)
            x1, y1 = wcs.wcs_sky2pix(ra_off_op - 20. / 3600,
                                     dec_off_op + 20. / 3600, 0)
            ifuwin2 = data[x0:x0 + nx, y1:y1 + ny]

            #Plot the A and B
            zmin, zmax = zscale.zscale(ifuwin)
            zmin1, zmax1 = zscale.zscale(ifuwin1 - ifuwin2)

            axarr[0, i].imshow(ifuwin.T,
                               aspect="auto",
                               vmin=zmin,
                               vmax=zmax,
                               extent=(20, -20, -20, 20),
                               alpha=0.5)
            axarr[1, i].imshow(ifuwin1.T - ifuwin2.T,
                               aspect="auto",
                               vmin=zmin1,
                               vmax=zmax1,
                               extent=(20, -20, -20, 20),
                               alpha=0.5)  #, cmap=matplotlib.cm.RdBu_r)
            #axarr[1, i].imshow(-1 * ifuwin2.T, aspect="auto", vmin=zmin2, vmax=zmax2, extent=(20,-20,-20,20), alpha=0.5)#, cmap=matplotlib.cm.RdBu_r)

            axarr[0, i].scatter(0, 0, marker="x", s=20)
            axarr[1, i].scatter(0, 0, marker="x", s=20)

            axarr[0, i].set_xlabel("OBJRA")
            axarr[0, i].set_ylabel("OBJDEC")
            axarr[1, i].set_xlabel("OBJRA")
            axarr[1, i].set_ylabel("OBJDEC")
            axarr[0, i].text(
                19,
                17,
                "Red stats: $\mu=$%.2f, $\sigma=$%.2f, p-value=%.5f" %
                (sm, sstd, bkg_prob),
                bbox=dict(facecolor='white', alpha=0.5))
            axarr[0, i].text(
                19,
                14,
                "Blue stats: $\mu=$%.2f, $\sigma=$%.2f, p-value=%.5f" %
                (smo, sstdo, bkg_probo),
                bbox=dict(facecolor='white', alpha=0.5))
            axarr[0, i].text(19,
                             11,
                             "Background stats: $\mu=$%.2f, $\sigma=$%.2f" %
                             (bkg, bkg_std),
                             bbox=dict(facecolor='white', alpha=0.5))

            #r = plt.Circle((prao-pra, pdeco-pdec), 5, facecolor="none", edgecolor="red", lw=2)
            #b = plt.Circle((praosym-pra, pdecosym-pdec), 5, facecolor="none", edgecolor="blue", lw=2)
            r = plt.Circle((2 * off[0], +2 * off[1]),
                           2,
                           facecolor="none",
                           edgecolor="red",
                           lw=3,
                           ls=linestyles[i])
            b = plt.Circle((-2 * off[0], -2 * off[1]),
                           2,
                           facecolor="none",
                           edgecolor="blue",
                           lw=3,
                           ls=linestyles[i])

            #Plot the true location of the IFU
            patches = []
            Path = mpath.Path
            path_data = [(Path.MOVETO, [20, -12]), (Path.LINETO, [20, 20]),
                         (Path.LINETO, [-13, 13]), (Path.LINETO, [-20, -18]),
                         (Path.CLOSEPOLY, [20, 10])]
            codes, verts = zip(*path_data)
            path = mpath.Path(verts, codes)
            patch = mpatches.PathPatch(path)
            patches.append(patch)
            collection = PatchCollection(patches, cmap=plt.cm.YlGn, alpha=0.2)
            colors = np.linspace(0, 1, len(patches))
            collection.set_array(np.array(colors))

            axarr[0, i].add_artist(r)
            axarr[0, i].add_artist(b)
            axarr[1, i].add_collection(collection)

            plt.suptitle(obj, fontsize=20)

    prod = np.prod(pvalues, axis=1)

    if (plot and interactive):
        axarr[0, np.argmax(prod)].text(0, 0, "WINNER")
        plt.show()
    elif (plot):
        axarr[0, np.argmax(prod)].text(0, 0, "WINNER")
        plt.savefig(
            os.path.join(
                os.path.dirname(f).replace("raw", "phot"),
                os.path.basename(f).replace(".fits", "_ab.png")))
        plt.clf()

    return 0, offsets[np.argmax(prod)], -2 * offsets[np.argmax(prod)]
def decision_surface_modified(X_data,
                              Y_data,
                              model,
                              pairs_variables,
                              plot_step=0.1,
                              size=20):
    '''
    Function to create decision surface, created for 7 different variables from columns (variable to variable+6)
    Modified from: http://scikit-learn.org/stable/auto_examples/tree/plot_iris.html
    '''
    plot_colors = "bry"

    for pairidx, pair in enumerate(pairs_variables):

        # We only take the two corresponding features
        X_for_surface = X_data.loc[:, X_data.columns[pair]]
        X_for_surface = X_for_surface.reset_index(drop=True)
        y_for_surface = np.asarray(Y_data)
        n_classes = len(np.unique(y_for_surface))

        model.fit(X_for_surface, y_for_surface)
        auc = np.mean(
            cross_val_score(model,
                            X_for_surface,
                            y_for_surface,
                            scoring="roc_auc"))

        # Plot the decision boundary
        plt.subplot(2, 2, pairidx + 1)

        x_min, x_max = X_for_surface.loc[:, X_data.columns[pair[0]]].min(
        ) - 1, X_for_surface.loc[:, X_data.columns[pair[0]]].max() + 1
        y_min, y_max = X_for_surface.loc[:, X_data.columns[pair[1]]].min(
        ) - 1, X_for_surface.loc[:, X_data.columns[pair[1]]].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
                             np.arange(y_min, y_max, plot_step))

        Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)
        cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)

        plt.xlabel(X_data.columns[pair[0]], fontsize=8)
        plt.ylabel(X_data.columns[pair[1]],
                   verticalalignment='top',
                   fontsize=8)
        plt.axis("tight")
        plt.title("auc = " + str(round(auc, 3)), fontsize=8)

        # Plot the training points
        for i, color in zip(range(n_classes), plot_colors):
            idx = np.where(y_for_surface == i)
            plt.scatter(X_for_surface.loc[idx][X_data.columns[pair[0]]],
                        X_for_surface.loc[idx][X_data.columns[pair[1]]],
                        c=color,
                        label=np.str_(y_for_surface[i]),
                        cmap=plt.cm.Paired,
                        s=size)

        plt.axis("tight")

    plt.suptitle(
        "Decision surface of a logistic regression using paired features")
    plt.legend()
    plt.show()