def run(self, npts=25, inv_points=None, access_limited=True, **kwargs):
        r"""
        Parameters
        ----------
        npts : int (default = 25)
            The number of pressure points to apply.  The list of pressures
            is logarithmically spaced between the lowest and highest throat
            entry pressures in the network.

        inv_points : array_like, optional
            A list of specific pressure point(s) to apply.

        """
        if 'inlets' in kwargs.keys():
            logger.info('Inlets recieved, passing to set_inlets')
            self.set_inlets(pores=kwargs['inlets'])
        if 'outlets' in kwargs.keys():
            logger.info('Outlets recieved, passing to set_outlets')
            self.set_outlets(pores=kwargs['outlets'])
        self._AL = access_limited
        if inv_points is None:
            logger.info('Generating list of invasion pressures')
            min_p = sp.amin(self['throat.entry_pressure']) * 0.98  # nudge down
            max_p = sp.amax(self['throat.entry_pressure']) * 1.02  # bump up
            inv_points = sp.logspace(sp.log10(min_p),
                                     sp.log10(max_p),
                                     npts)

        self._npts = sp.size(inv_points)
        # Execute calculation
        self._do_outer_iteration_stage(inv_points)
예제 #2
0
def plotHeatmap(fwrap, aclass, algoparams, trials, maxsteps):
    """ Visualizing performance across trials and across time 
    (iterations in powers of 2) """
    psteps = int(log2(maxsteps)) + 1
    storesteps = [0] + [2 ** x  for x in range(psteps)]
    ls = lossTraces(fwrap, aclass, dim=trials, maxsteps=maxsteps,
                    storesteps=storesteps, algoparams=algoparams,
                    minLoss=1e-10)
            
    initv = mean(ls[0])
    maxgain = exp(fwrap.stochfun.maxLogGain(maxsteps) + 1)
    maxneggain = (sqrt(maxgain))
    
    M = zeros((psteps, trials))
    for sid in range(psteps):
        # skip the initial values
        winfactors = clip(initv / ls[sid+1], 1. / maxneggain, maxgain)
        winfactors[isnan(winfactors)] = 1. / maxneggain
        M[sid, :] = log10(sorted(winfactors))
        
    pylab.imshow(M.T, interpolation='nearest', cmap=cm.RdBu, #@UndefinedVariable
                 aspect=psteps / float(trials) / 1,  
                 vmin= -log10(maxgain), vmax=log10(maxgain),
                 )   
    pylab.xticks([])
    pylab.yticks([])
    return ls
예제 #3
0
 def getAxis(self,X,Y):
     """
     return the proper axis limits for the plots
     """
     out = []
     mM = [(min(X),max(X)),(min(Y),max(Y))]
     for i,j in mM:
         #YJC: checking if values are negative, if yes, return 0 and break
         if j <0 or i <0:
             return 0
         log_i = scipy.log10(i)
         d, I = scipy.modf(log_i)
         if log_i < 0:
             add = 0.5 *(scipy.absolute(d)<0.5)
         else:
             add = 0.5 *(scipy.absolute(d)>0.5)
         m = scipy.floor(log_i) + add
         out.append(10**m)
         log_j = scipy.log10(j)
         d, I = scipy.modf(log_j)
         if log_j < 0:
             add = - 0.5 *(scipy.absolute(d)>0.5)
         else:
             add = - 0.5 *(scipy.absolute(d)<0.5)
         m = scipy.ceil(log_j) + add
         out.append(10**m)
     return tuple(out)
예제 #4
0
def powerlaw_fit(xdata, ydata, yerr):
    # Power-law fitting is best done by first converting
    # to a linear equation and then fitting to a straight line.
    #  y = a * x^b
    #  log(y) = log(a) + b*log(x)
    from scipy import log10
    from scipy import optimize

    powerlaw = lambda x, amp, index: amp*np.power(x,index)

    logx = log10(xdata)
    logy = log10(ydata)
    logyerr = yerr / ydata

    # define our (line) fitting function
    fitfunc = lambda p, x: p[0] + p[1] * x
    errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err

    pinit = [1.0, -1.0]
    out = optimize.leastsq(errfunc, pinit, args=(logx, logy, logyerr), full_output=1)	
    pfinal = out[0]
    covar = out[1]

    #y = amp * x^exponent
    exponent = pfinal[1] #index in original
    amp = 10.0**pfinal[0]
    
    exponentErr = np.sqrt( covar[0][0] )
    ampErr = np.sqrt( covar[1][1] ) * amp

    chisq = np.sum((((ydata - powerlaw(xdata, amp, exponent))/yerr)**2),axis=0)

    return exponent, amp, chisq
예제 #5
0
 def fit(self, kk=None):
     """
     Fit Fourier spectrum with the function set at class instantination
     ==> NB: fitting is done in logarithmic coordinates
     and fills plotting arrays with data
     --------
     Options:
     --------
     kk
        (k1,k2) <None> spectral interval for function fitting
        by default interval [ kk[1], kk[imax__kk] ] will be fitted
        ==> i.e. k=0 is excluded
     """
     # fitting interval
     if kk:
         ik_min=(self.fft_data.kk[1:self.fft_data.imax__kk]<=kk[0]).nonzero()[0][-1]
         ik_max=(self.fft_data.kk[1:self.fft_data.imax__kk]<=kk[1]).nonzero()[0][-1]
     else:
         ik_min=1;
         ik_max=self.fft_data.imax__kk
     # do fitting
     self.__popt,self.__pcov = scipy.optimize.curve_fit(self.__func_fit,
                                                        scipy.log(self.fft_data.kk[ik_min:ik_max]),
                                                        scipy.log(self.fft_data.Ik[ik_min:ik_max]) )
     # boundaries of fitted interval
     self.kmin = self.fft_data.kk[ik_min]
     self.kmax = self.fft_data.kk[ik_max]
     # fill plot arrays <===============
     self.kk_plot=scipy.logspace( scipy.log10(self.kmin),
                                  scipy.log10(self.kmax),
                                  self.nk_plot )
     self.Ik_plot=self.fitting_function(self.kk_plot)
예제 #6
0
 def test_permutation(self):
     #test permutation function
     for dn in self.datasets:
         D = data.load(os.path.join(self.dir_name,dn))
         perm = SP.random.permutation(D['X'].shape[0])
         #1. set permuattion
         lmm = dlimix.CLMM()
         lmm.setK(D['K'])
         lmm.setSNPs(D['X'])
         lmm.setCovs(D['Cov'])
         lmm.setPheno(D['Y'])
         if 1:
             #pdb.set_trace()
             perm = SP.array(perm,dtype='int32')#Windows needs int32 as long -> fix interface to accept int64 types
         lmm.setPermutation(perm)
         lmm.process()
         pv_perm1 = lmm.getPv().ravel()
         #2. do by hand
         lmm = dlimix.CLMM()
         lmm.setK(D['K'])
         lmm.setSNPs(D['X'][perm])
         lmm.setCovs(D['Cov'])
         lmm.setPheno(D['Y'])
         lmm.process()
         pv_perm2 = lmm.getPv().ravel()
         D2 = (SP.log10(pv_perm1)-SP.log10(pv_perm2))**2
         RV = SP.sqrt(D2.mean())
         self.assertTrue(RV<1E-6)
def entropyloss(act, pred):
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1-epsilon, pred)
    el = sum(act*sp.log10(pred) + sp.subtract(1,act)*sp.log10(sp.subtract(1,pred)))
    el = el * -1.0/len(act)
    return el
예제 #8
0
def addqqplotinfo(qnull,M,xl='-log10(P) observed',yl='-log10(P) expected',xlim=None,ylim=None,alphalevel=0.05,legendlist=None,fixaxes=False):    
    distr='log10'
    pl.plot([0,qnull.max()], [0,qnull.max()],'k')
    pl.ylabel(xl)
    pl.xlabel(yl)
    if xlim is not None:
        pl.xlim(xlim)
    if ylim is not None:
        pl.ylim(ylim)        
    if alphalevel is not None:
        if distr == 'log10':
            betaUp, betaDown, theoreticalPvals = _qqplot_bar(M=M,alphalevel=alphalevel,distr=distr)
            lower = -sp.log10(theoreticalPvals-betaDown)
            upper = -sp.log10(theoreticalPvals+betaUp)
            pl.fill_between(-sp.log10(theoreticalPvals),lower,upper,color="grey",alpha=0.5)
            #pl.plot(-sp.log10(theoreticalPvals),lower,'g-.')
            #pl.plot(-sp.log10(theoreticalPvals),upper,'g-.')
    if legendlist is not None:
        leg = pl.legend(legendlist, loc=4, numpoints=1)
        # set the markersize for the legend
        for lo in leg.legendHandles:
            lo.set_markersize(10)

    if fixaxes:
        fix_axes()        
예제 #9
0
def testPlot():
    """ Get/generate the data to play with """
    TIME_INC = 1e-6
    NUM_POINTS = 40000
    t = timeScale(TIME_INC,NUM_POINTS)
    noisy_sig = genData(TIME_INC,True, t)
    clean_sig = genData(TIME_INC,False,t)
    """ Get FFT of signal and the sampling frequency from the time intervals used to generate the signals"""
    freq, s_fft  = getFFT(noisy_sig, TIME_INC)
    freq2,s_fft2 = getFFT(clean_sig, TIME_INC)


    """ Show in 2 subplots the signals and their spectrums"""
    plb.subplot(211,axisbg='#FFFFCC')
    p.plot(t,clean_sig,'b')
    p.hold(True)
    p.grid(True)
    p.plot(t,noisy_sig,'r')
    plb.subplot(212,axisbg='#FFFFCC')
    #p.hold(False)
    p.plot(freq2, 20*sp.log10(s_fft2),'x-b')
    p.hold(True)
    p.plot(freq,  20*sp.log10(s_fft), '+-r')
    p.xticks([-10e4,-5e4,-4e4,-3e4,-2e4,-1e4,0,1e4,2e4,3e4,4e4,5e4,10e4])
    p.xlim([-1e5,1e5])
    p.grid(True)
    #p.show()
    q = ScrollingToolQT(p.gcf())
    return q   # WARNING: it's important to return this object otherwise
예제 #10
0
def create_grid(r_in, r_out, nshell, space = 'powerlaw1', end = True):
    # function to create grid
    if space == 'log10':
        from scipy import log10, logspace
        # get the exponent of the start- and
        # stop-radius in input units
        start = [log10(r_in), 0][r_in == 0]
        stop = log10(r_out)
        radii = logspace(start, stop, num=nshell, endpoint=end)
    elif space == "powerlaw1":
        from scipy import arange
        radii = r_in * (r_out/r_in)**(arange(nshell)/(nshell - 1.0))
    elif space == 'linear':
        from scipy import linspace
        # linearly spaced grid
        radii = linspace(r_in, r_out, num=nshell, endpoint=end)
    elif space == 'powerlaw2':
        from scipy import linspace
        # first check if coefficients to the power-law was given
        #~ if 'exp' in kwargs:
            #~ p_exp = kwargs['exp']
        #~ else: # if not, set it to 2, i.e. r^2
            #~ p_exp = 2
        radii = r_in + (r_out - r_in)*(linspace(r_in, r_out, num=nshell, endpoint=end)/(r_out))**2
        #pr_int('Not implemented yet.')
        #raise ParError(spaced)
    else:
        raise Exception(space)
    return radii
예제 #11
0
def testAll(tests, allalgos, tolerant=True):
    countgood = 0
    for i, algo in enumerate(sorted(allalgos)):
        print("%d, %s:" % (i + 1, algo.__name__))
        print(' ' * int(log10(i + 1) + 2),)
        good = True
        messages = []
        for t in tests:
            try:
                res = t(algo)
            except Exception, e:
                if not tolerant:
                    raise e
                res = e

            if res is True:
                print('.',)
            else:
                good = False
                messages.append(res)
                print('F',)
        if good:
            countgood += 1
            print('--- OK.')
        else:
            print('--- NOT OK.')
            for m in messages:
                if m is not None:
                    print(' ' * int(log10(i + 1) + 2), '->', m)
예제 #12
0
 def residual_lmfit(self, pars, x, y):
     a = P4Rm()
     self.strain_DW(pars)
     res = f_Refl_fit(a.AllDataDict["geometry"], self.Data4f_Refl)
     y_cal = convolve(abs(res) ** 2, a.ParamDict["resol"], mode="same")
     y_cal = y_cal / y_cal.max() + a.AllDataDict["background"]
     return log10(y) - log10(y_cal)
예제 #13
0
def r_ion_neutral(s,t,Ni,Nn,Ti,Tn):
    """ This will calculate resonant ion - neutral reactions collision frequencies. See
    table 4.5 in Schunk and Nagy.
    Inputs
    s - Ion name string
    t - neutral name string
    Ni - Ion density cm^-3
    Nn - Neutral density cm^-3
    Ti - Ion tempreture K
    Tn - Neutral tempreture K
    Outputs
    nu_ineu - collision frequency s^-1
    """
    Tr = (Ti+Tn)*0.5
    sp1 = (s,t)
    # from Schunk and Nagy table 4.5
    nudict={('H+','H'):[2.65e-10,0.083],('He+','He'):[8.73e-11,0.093], ('N+','N'):[3.84e-11,0.063],
            ('O+','O'):[3.67e-11,0.064], ('N2+','N'):[5.14e-11,0.069], ('O2+','O2'):[2.59e-11,0.073],
            ('H+','O'):[6.61e-11,0.047],('O+','H'):[4.63e-12,0.],('CO+','CO'):[3.42e-11,0.085],
            ('CO2+','CO'):[2.85e-11,0.083]}
    A = nudict[sp1][0]
    B = nudict[sp1][1]
    if sp1==('O+','H'):

        nu_ineu = A*Nn*sp.power(Ti/16.+Tn,.5)
    elif sp1==('H+','O'):
        nu_ineu = A*Nn*sp.power(Ti,.5)*(1-B*sp.log10(Ti))**2
    else:
        nu_ineu = A*Nn*sp.power(Tr,.5)*(1-B*sp.log10(Tr))**2
    return nu_ineu
예제 #14
0
def plot_median_errors(RefinementLevels):
        for i in RefinementLevels[0].cases:
            x =[];
            y =[];
            print "Analyzing median error on: ", i ;
            for r in RefinementLevels:                
                x.append(r.LUT.D_dim*r.LUT.P_dim)
                r.get_REL_ERR_SU2(i)
                y.append(r.SU2[i].median_ERR*100)
            
            x = sp.array(x)
            y = sp.array(y)            
            y = y[sp.argsort(x)]
            x = x[sp.argsort(x)]
                                    
            LHM = sp.ones((len(x),2))
            RHS = sp.ones((len(x),1))            
            LHM[:,1] = sp.log10(x)
            RHS[:,0] = sp.log10(y)

            sols = sp.linalg.lstsq(LHM,RHS)
            b = -sols[0][1]
            plt.loglog(x,y, label='%s, %s'%(i,r'$O(\frac{1}{N})^{%s}$'%str(sp.around(b,2))), basex=10, basey=10, \
                       subsy=sp.linspace(10**(-5), 10**(-2),20),\
                       subsx=sp.linspace(10**(2), 10**(5),50))
            
            #for r in RefinementLevels:                
               # x.append(r.LUT.D_dim*r.LUT.P_dim)
              #  r.get_REL_ERR_SciPy(i)
             #   y.append(r.SciPy[i].median_ERR*100)
            #plt.plot(x,y, label='SciPy: %s'%i)
        plt.grid(which='both')
        plt.xlabel('Grid Nodes (N)')
        plt.ylabel('Median relative error [%]')
        return;
예제 #15
0
def getLogBins(first_point, last_point, log_step):
    """
    get the bin in log scale and the center bin value
    
    Parameters:
    ----------------
    first_point, last_point : number
    First and last point of the x-axis
    
    log_step : number
    Required log-distance between x-points
    
    Returns:
    -----------
    xbins : array of the x values at the center (in log-scale) of the bin
    bins : array of the x values of the bins 
    """
    log_first_point = scipy.log10(first_point)
    log_last_point = scipy.log10(last_point)
    # Calculate the bins as required by the histogram function, i.e. the bins edges including the rightmost one
    N_log_steps = scipy.floor((log_last_point - log_first_point) / log_step) + 1.0
    llp = N_log_steps * log_step + log_first_point
    bins_in_log_scale = np.linspace(log_first_point, llp, N_log_steps + 1)
    bins = 10 ** bins_in_log_scale
    center_of_bins_log_scale = bins_in_log_scale[:-1] + log_step / 2.0
    xbins = 10 ** center_of_bins_log_scale
    return xbins, bins
예제 #16
0
def testAll(tests, allalgos, tolerant=True):
    countgood = 0
    for i, algo in enumerate(sorted(allalgos)):
        print(("%d, %s:" % (i + 1, algo.__name__)))
        print((" " * int(log10(i + 1) + 2),))
        good = True
        messages = []
        for t in tests:
            try:
                res = t(algo)
            except Exception as e:
                if not tolerant:
                    raise e
                res = e

            if res is True:
                print((".",))
            else:
                good = False
                messages.append(res)
                print(("F",))
        if good:
            countgood += 1
            print("--- OK.")
        else:
            print("--- NOT OK.")
            for m in messages:
                if m is not None:
                    print((" " * int(log10(i + 1) + 2), "->", m))
    print()
    print(("Summary:", countgood, "/", len(allalgos), "of test were passed."))
예제 #17
0
def testAll(tests, allalgos, tolerant=True):
    countgood = 0
    for i, algo in enumerate(sorted(allalgos)):
        print "%d, %s:" % (i + 1, algo.__name__)
        print " " * int(log10(i + 1) + 2),
        good = True
        messages = []
        for t in tests:
            try:
                res = t(algo)
            except Exception, e:
                if not tolerant:
                    raise e
                res = e

            if res is True:
                print ".",
            else:
                good = False
                messages.append(res)
                print "F",
        if good:
            countgood += 1
            print "--- OK."
        else:
            print "--- NOT OK."
            for m in messages:
                if m is not None:
                    print " " * int(log10(i + 1) + 2), "->", m
예제 #18
0
파일: gt_analysis.py 프로젝트: openube/NNGT
def degree_distrib(net, deg_type="total", node_list=None, use_weights=True,
                   log=False, num_bins=30):
    '''
    Computing the degree distribution of a network.
    
    Parameters
    ----------
    net : :class:`~nngt.Graph` or subclass
        the network to analyze.
    deg_type : string, optional (default: "total")
        type of degree to consider ("in", "out", or "total").
    node_list : list or numpy.array of ints, optional (default: None)
        Restrict the distribution to a set of nodes (default: all nodes).
    use_weights : bool, optional (default: True)
        use weighted degrees (do not take the sign into account: all weights
        are positive).
    log : bool, optional (default: False)
        use log-spaced bins.
    
    Returns
    -------
    counts : :class:`numpy.array`
        number of nodes in each bin
    deg : :class:`numpy.array`
        bins
    '''
    ia_node_deg = net.get_degrees(node_list, deg_type, use_weights)
    ra_bins = sp.linspace(ia_node_deg.min(), ia_node_deg.max(), num_bins)
    if log:
        ra_bins = sp.logspace(sp.log10(sp.maximum(ia_node_deg.min(),1)),
                               sp.log10(ia_node_deg.max()), num_bins)
    counts,deg = sp.histogram(ia_node_deg, ra_bins)
    ia_indices = sp.argwhere(counts)
    return counts[ia_indices], deg[ia_indices]
예제 #19
0
    def PlotManhattan(self,
                      xname=str,
                      yname=str,
                      Log=Logger):

        LogString = '**** Generating Manhattan plot ...'
        print LogString
        Log.Write(LogString+'\n')

        XName = ''
        YName = ''
        for Key in self.DataContainers.iterkeys():
            if(re.search(xname,Key)):
                XName = Key
            if(re.search(yname,Key)):
                YName = Key

        X = []
        for Entry in self.DataContainers[XName].GetDataArray():
            X.append(int(Entry))
        X = scipy.array(X)
        Y = []
        for Entry in self.DataContainers[YName].GetDataArray():
            if(Entry=='-1'):
                Y.append(float(1.0))
            else:
                Y.append(float(Entry))
        Y = -scipy.log10(scipy.array(Y))

        PylabParameters,\
        Rectangle         = self.PylabUpdateParams()
        pylab.rcParams.update(PylabParameters)
        PylabFigure = pylab.figure()
        PylabFigure.clf()
        PylabAxis = PylabFigure.add_axes(Rectangle)
        PylabAxis.scatter(x=X,
                          y=Y)
        XSign = scipy.array(PylabAxis.get_xlim())
        YSign = -scipy.log10(scipy.array([5.0e-8,5.0e-8]))
        PylabAxis.plot(XSign,
                       YSign,
                       linestyle='--',
                       color='grey',
                       linewidth=1.0)
        XSugg = scipy.array(PylabAxis.get_xlim())
        YSugg = -scipy.log10(scipy.array([1.0e-5,1.0e-5]))
        PylabAxis.plot(XSugg,
                       YSugg,
                       linestyle=':',
                       color='grey',
                       linewidth=1.0)
        PylabAxis.set_ylim([0.0,PylabAxis.get_ylim()[1]])
        PylabFigure.savefig('Manhattan.png')
        PylabAxis.clear()
        pylab.close(PylabFigure)
        del PylabFigure
        del PylabAxis

        return
예제 #20
0
def effective_order(x, y):
    '''Find slope of log log plot to find our effective order of accuracy'''

    logx = log10(x)
    logy = log10(y)
    out = curve_fit(linear_fit, logx, logy)

    return out[0][1]
예제 #21
0
def makeGrid(minval,maxval,gridpoints=0,log=0,floatornot=0):
    
    """
    Make grid between max and min value.
    
    @param minval: lower boundary of grid range
    @type minval: float
    @param maxval: upper boundary of grid range
    @type maxval: float
    
    @keyword gridpoints: number of grid points, including boundaries. If 0 it 
                         is replaced by 2, if 1 then minval is gridpoint
                                
                         (default: 0)
    @type gridpoints: int
    @keyword log: if grid should be calculated in logspace

                  (default: 0)
    @type log: bool
    @keyword floatornot: 0 if final gridpoints should be rounded to nearest 
                         integer
                                
                         (default: 0)
    @type floatornot: bool
    
    @return: the grid points including the boundaries
    @rtype: array

    """
    
    if gridpoints == 0:
        gridpoints = 2
    else:
        gridpoints = int(gridpoints)
    final_grid = []
    
    if log:
        minval = float(log10(minval))
        maxval = float(log10(maxval))
        grid = linspace(minval,maxval,gridpoints)
        if floatornot:
            for i in xrange(len(grid)):
                final_grid.append(int(round(10**(grid[i]))))
        else:
            for i in xrange(len(grid)):
                final_grid.append(10**(grid[i]))
        
    else:
        minval = float(minval)
        maxval = float(maxval)
        grid = linspace(minval,maxval,gridpoints)
        if floatornot:
            for i in xrange(len(grid)):
                final_grid.append(int(round(grid[i])))
        else:
            for i in xrange(len(grid)):
                final_grid.append(grid[i])
    return final_grid
예제 #22
0
def svpice( t) :
  '''
  Returns saturation vapor pressure over ice, in hPa, given temperature in K.
  The Goff-Gratch equation (Smithsonian Met. Tables,  5th ed., pp. 350, 1984)
  '''
  a = 273.16 / t
  exponent = -9.09718 * (a - 1.) - 3.56654 * log10(a) + 0.876793 * (1. - 1./a) + log10(6.1071)

  return 10.0**exponent
예제 #23
0
파일: Intersect.py 프로젝트: jsnel/pysces
 def D2_setup_scan(self,min1,max1,step1,min2,max2,step2):
     self.P1min = min1
     self.P1max = max1
     self.P1steps = step1
     self.P2min = min2
     self.P2max = max2
     self.P2steps = step2
     self.P1range = scipy.logspace(scipy.log10(min1),scipy.log10(max1),step1)
     self.P2range = scipy.logspace(scipy.log10(min2),scipy.log10(max2),step2)
def powerlaw(y):
  s_y = np.sort(y)[::-1]
  X = sp.log10(np.arange(1,len(s_y)+1))
  Y = sp.log10(s_y)
  pinit = [10.0, 0.0]
  out = sp.optimize.leastsq(errfunc, pinit, args=(X, Y), full_output = 1)
  index = out[0][1]
  amp = 10**out[0][0]
  return index
예제 #25
0
def plot_overlap_ps(result_file, ss_file='/Users/bjarnivilhjalmsson/data/GIANT/GIANT_HEIGHT_Wood_et_al_2014_publicrelease_HapMapCeuFreq.txt',
                   fig_filename='/Users/bjarnivilhjalmsson/data/tmp/manhattan_combPC_HGT.png', method='combPC',
                   ylabel='Comb. PC (HIP,WC,HGT,BMI) $-log_{10}(P$-value$)$', xlabel='Height $-log_{10}(P$-value$)$', p_thres=0.00001):
    # Parse results ans SS file
    res_table = pandas.read_table(result_file)
    ss_table = pandas.read_table(ss_file)
    # Parse 
    res_sids = sp.array(res_table['SNPid'])
    if method == 'MVT':
        comb_ps = sp.array(res_table['pval'])
    elif method == 'combPC':
        comb_ps = sp.array(res_table['combPC'])
    if 'MarkerName' in ss_table.keys():
        ss_sids = sp.array(ss_table['MarkerName'])
    elif 'SNP' in ss_table.keys():
        ss_sids = sp.array(ss_table['SNP'])
    else:
        raise Exception("Don't know where to look for rs IDs")
    marg_ps = sp.array(ss_table['p'])
    
    # Filtering boring p-values
    res_p_filter = comb_ps < p_thres
    res_sids = res_sids[res_p_filter]
    comb_ps = comb_ps[res_p_filter]
#     ss_p_filter = marg_ps<p_thres
#     ss_sids = ss_sids[ss_p_filter]
#     marg_ps = marg_ps[ss_p_filter]
    
    common_sids = sp.intersect1d(res_sids, ss_sids)
    print 'Found %d SNPs in common' % (len(common_sids))
    ss_filter = sp.in1d(ss_sids, common_sids)
    res_filter = sp.in1d(res_sids, common_sids)
    
    ss_sids = ss_sids[ss_filter]
    res_sids = res_sids[res_filter]
    marg_ps = marg_ps[ss_filter]
    comb_ps = comb_ps[res_filter]
    
    print 'Now sorting'
    ss_index = sp.argsort(ss_sids)
    res_index = sp.argsort(res_sids)
    
    marg_ps = -sp.log10(marg_ps[ss_index])
    comb_ps = -sp.log10(comb_ps[res_index])
    
    with plt.style.context('fivethirtyeight'):
        plt.plot(marg_ps, comb_ps, 'b.', alpha=0.2)
        (x_min, x_max) = plt.xlim()
        (y_min, y_max) = plt.ylim()
        
        plt.plot([x_min, x_max], [y_min, y_max], 'k--', alpha=0.2)
        plt.ylabel(ylabel)
        plt.xlabel(xlabel)
        plt.tight_layout()
        plt.savefig(fig_filename)
    plt.clf()
예제 #26
0
    def test_lmm2(self):
        """another test, establishing an lmm-equivalent by a design matrix choice"""
        for dn in self.datasets:
            D = data.load(os.path.join(self.dir_name,dn))
            #construct Kronecker LMM model which has the special case of standard LMM
            #covar1: genotype matrix
            N = D['K'].shape[0]
            P = 3
            K1r = D['K']
            #K1c = SP.zeros([2,2])
            #K1c[0,0] = 1
            K1c = SP.eye(P)
            K2r = SP.eye(N)
            K2c = SP.eye(P)

            #A   = SP.zeros([1,2])
            #A[0,0] =1
            A = SP.eye(P)
            Acov = SP.eye(P)
            Xcov = D['Cov'][:,SP.newaxis]
            X      = D['X']
            Y      = D['Y'][:,SP.newaxis]
            Y      = SP.tile(Y,(1,P))

            lmm = dlimix.CKroneckerLMM()
            lmm.setK1r(K1r)
            lmm.setK1c(K1c)
            lmm.setK2r(K2r)
            lmm.setK2c(K2c)

            lmm.setSNPs(X)
            #add covariates
            lmm.addCovariates(Xcov,Acov)
            #add SNP design
            lmm.setSNPcoldesign(A)
            lmm.setPheno(Y)
            lmm.setNumIntervalsAlt(0)
            lmm.setNumIntervals0(100)

            lmm.process()

            #get p-values with P-dof:
            pv_Pdof = lmm.getPv().ravel()
            #transform in P-values with a single DOF:
            import scipy.stats as st
            lrt = st.chi2.isf(pv_Pdof,P)/P
            pv = st.chi2.sf(lrt,1)
            #compare with single DOF P-values:
            D2= ((SP.log10(pv)-SP.log10(D['pv']))**2)
            RV = SP.sqrt(D2.mean())
            #print "\n"
            #print pv[0:10]
            #print D['pv'][0:10]
            #print RV
            #pdb.set_trace()
            self.assertTrue(RV<1E-6)
예제 #27
0
 def residual_square(self, p, E_min, nb_minima):
     a = P4Rm()
     P4Rm.ParamDict["_fp_min"] = p
     self.strain_DW()
     res = f_Refl_fit(a.AllDataDict["geometry"], self.Data4f_Refl)
     y_cal = convolve(abs(res) ** 2, a.ParamDict["resol"], mode="same")
     y_cal = y_cal / y_cal.max() + a.AllDataDict["background"]
     y_obs = a.ParamDict["Iobs"]
     self.on_pass_data_to_thread(y_cal, p, E_min, nb_minima)
     return ((log10(y_obs) - log10(y_cal)) ** 2).sum() / len(y_cal)
예제 #28
0
def to_log(x1, x2, x1err, x2err):
    """
    Take linear measurements and uncertainties and transform to log values.

    """
    logx1 = scipy.log10(scipy.array(x1))
    logx2 = scipy.log10(scipy.array(x2))
    x1err = scipy.log10(scipy.array(x1)+scipy.array(x1err)) - logx1
    x2err = scipy.log10(scipy.array(x2)+scipy.array(x2err)) - logx2
    return logx1, logx2, x1err, x2err
예제 #29
0
파일: bestFit.py 프로젝트: rlamy/pyFitting
def residual(params, theory, data, linlog, sigma=None, logResidual=False):
    """Calculate residual for fitting"""
    residuals = np.array([])
    if sigma is None:  sigma = 1.
    P = theory.Y(data.X, params)
    if not logResidual:
        res = (P - data.Y)/sigma
    else:
        res = (sp.log10(P) - sp.log10(data.Y))/sigma
    residuals = np.concatenate((residuals, res))
    return residuals
예제 #30
0
파일: OPTutils.py 프로젝트: markm541374/GPc
 def plot(self,truex,truey):
     f,a = plt.subplots(3)
     #print self.X.shape
     a[0].plot(self.X[:,0].flatten(),'b')
     a[0].set_ylabel("augx")
     a[0].twinx().plot(self.C,'r')
     a[1].plot(sp.log10(self.Rreg.flatten()-truey))
     a[1].set_ylabel("regret")
     a[2].plot([sum(self.C[:j]) for j in xrange(len(self.C))],sp.log10(self.Rreg.flatten()-truey))
     a[2].set_ylabel("regret/c")
     return
예제 #31
0
    KS = SP.zeros((Y.shape[0], Y.shape[0]))
    for iph in range(Y.shape[0]):
        for jph in range(Y.shape[0]):
            if SP.bitwise_and(phase_vec[iph] == phase_vec[jph],
                              phase_vec[iph] == 2):
                KS[iph, jph] = 1

    KG2M = SP.zeros((Y.shape[0], Y.shape[0]))
    for iph in range(Y.shape[0]):
        for jph in range(Y.shape[0]):
            if SP.bitwise_and(phase_vec[iph] == phase_vec[jph],
                              phase_vec[iph] == 3):
                KG2M[iph, jph] = 1

    #intra-phase variations in cell size
    sfCellSize = SP.log10(f['ratioEndo'][:])
    sfCellSize -= sfCellSize.mean()
    sfCellSize = sfCellSize.reshape(1, sfCellSize.shape[0])
    Ksize = SP.dot(sfCellSize.transpose(), sfCellSize)
    Ksize /= Ksize.diagonal().mean()

    # filter cell cycle genes
    idx_cell_cycle = SP.union1d(cellcyclegenes_filter,
                                cellcyclegenes_filterCB600)
    Ymean2 = Y.mean(0)**2 > 0
    idx_cell_cycle_noise_filtered = SP.intersect1d(
        idx_cell_cycle, SP.array(SP.where(Ymean2.ravel() > 0)))
    Ycc = Y[:, idx_cell_cycle_noise_filtered]

    #Fit GPLVM to data
    k = 1  # number of latent factors
예제 #32
0
def plotFigure4(subfolder, FigFolder, rank, subFigName, Indx, oversample,
                FigNumber, FigRows, FigCols):
    font = {'family': 'sans-serif', 'weight': 'normal', 'size': 14}

    matplotlib.rc('font', **font)

    # set tick width
    matplotlib.rcParams['xtick.major.size'] = 7
    matplotlib.rcParams['xtick.major.width'] = 1.0
    matplotlib.rcParams['xtick.minor.size'] = 3
    matplotlib.rcParams['xtick.minor.width'] = 1.0
    matplotlib.rcParams['ytick.major.size'] = 7
    matplotlib.rcParams['ytick.major.width'] = 1.0
    matplotlib.rcParams['ytick.minor.size'] = 3
    matplotlib.rcParams['ytick.minor.width'] = 1.0
    matplotlib.rcParams['axes.linewidth'] = 1.0  #set the value globally

    # Make LATEX font the same as text font:
    matplotlib.rcParams['mathtext.fontset'] = 'custom'
    matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
    matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
    matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'

    # Get Spectral errors:
    MeanErrOnSD = sp.load(subfolder + '/' + 'MeanSpecErrOnJ_' +
                          str(int(oversample)) + 'over.npy')
    MeanErrOnSDT = sp.load(subfolder + '/' + 'MeanSpecErrOnJT_' +
                           str(int(oversample)) + 'over.npy')
    MeanErrNystrom = sp.load(subfolder + '/' + 'MeanSpecErrNystrom_' +
                             str(int(oversample)) + 'over.npy')
    MeanErrPinched = sp.load(subfolder + '/' + 'MeanSpecErrPinched_' +
                             str(int(oversample)) + 'over.npy')
    # Get sketch sizes:
    ViewVec = sp.load(subfolder + '/' + 'ViewVec.npy')

    # Get min and max values:
    Errs = [MeanErrOnSD, MeanErrOnSDT, MeanErrNystrom, MeanErrPinched]
    ymin = 1.e99
    ymax = -1.e99
    for Err in Errs:
        emax = max(Err)
        if emax > ymax:
            ymax = emax
        emin = min(Err)
        if emin < ymin:
            ymin = emin

    fig = plt.figure(1, figsize=(12, 12))
    ax = plt.subplot(FigRows, FigCols, FigNumber)
    plt.plot(ViewVec,
             MeanErrNystrom,
             'k-.',
             markersize=11,
             markerfacecolor='k',
             markeredgecolor='k',
             markeredgewidth=2,
             linewidth=2.0)
    plt.plot(ViewVec,
             MeanErrPinched,
             'k--',
             markersize=11,
             markerfacecolor='g',
             markeredgecolor='g',
             markeredgewidth=2,
             linewidth=2.0)
    plt.plot(ViewVec,
             MeanErrOnSD,
             'rv',
             markersize=11,
             markerfacecolor='r',
             markeredgecolor='r',
             linewidth=1.5)
    plt.plot(ViewVec,
             MeanErrOnSDT,
             'bo',
             markersize=11,
             markerfacecolor='b',
             markeredgecolor='b',
             linewidth=1.5)
    plt.yscale('log')
    # For y-ticks:
    ytksMin = floor(sp.log10(MeanErrNystrom[4] * 0.5))
    ytksMax = ceil(sp.log10(MeanErrPinched[0] * 2.))
    plt.xlabel('Views, $v$', labelpad=10)
    plt.ylabel('Relative Spectral Error', labelpad=10)
    # Set y axis limits:
    if sp.absolute(ytksMin - ytksMax) < 12.5:
        plt.yticks(sp.logspace(-16, 4, 11))
    else:
        plt.yticks(sp.logspace(-16, 4, 6))
    plt.ylim(10.**(ytksMin), 10.**(ytksMax))
    plt.xlim(1.5, 6.5)
    ax.xaxis.set_major_formatter(ScalarFormatter())
    ax.minorticks_off()
    ax.set_title('$S_\mathrm{D}$' + ' ($p=$' + str(rank) + ', $l = $' +
                 str(oversample) + ')',
                 y=1.02)
    ax.text(-0.3,
            1.1,
            '(' + string.ascii_lowercase[Indx] + ')',
            transform=ax.transAxes,
            weight='bold')
    if Indx == 7:
        ax.legend(['Prolonged', 'Pinched', 'Alg. 2 on $J$', 'Alg. 2 on $J^*$'],
                  numpoints=1,
                  loc=9,
                  bbox_to_anchor=(0.4, -0.24),
                  ncol=5,
                  frameon=False)
    fig.subplots_adjust(left=0.01,
                        bottom=0.01,
                        right=0.99,
                        top=0.99,
                        wspace=0.4,
                        hspace=0.4)
    fig.savefig(FigFolder + '/Figure4.pdf', format='pdf', bbox_inches='tight')
예제 #33
0
def get_freqs_db(signal, sample_rate):
    freqs, fft_abs = get_freqs_fft(signal, sample_rate)
    fft_db = 20 * scipy.log10(fft_abs)
    return freqs, fft_db
예제 #34
0
def main():
    tstart = time.time()

    tb = pfb_top_block()
    tb.run()

    tend = time.time()
    print "Run time: %f" % (tend - tstart)

    if 1:
        fig_in = pylab.figure(1, figsize=(16, 9), facecolor="w")
        fig1 = pylab.figure(2, figsize=(16, 9), facecolor="w")
        fig2 = pylab.figure(3, figsize=(16, 9), facecolor="w")

        Ns = 1000
        Ne = 10000

        fftlen = 8192
        winfunc = scipy.blackman
        fs = tb._fs

        # Plot the input signal on its own figure
        d = tb.snk_i.data()[Ns:Ne]
        spin_f = fig_in.add_subplot(2, 1, 1)

        X, freq = mlab.psd(d,
                           NFFT=fftlen,
                           noverlap=fftlen / 4,
                           Fs=fs,
                           window=lambda d: d * winfunc(fftlen),
                           scale_by_freq=True)
        X_in = 10.0 * scipy.log10(abs(X))
        f_in = scipy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
        pin_f = spin_f.plot(f_in, X_in, "b")
        spin_f.set_xlim([min(f_in), max(f_in) + 1])
        spin_f.set_ylim([-200.0, 50.0])

        spin_f.set_title("Input Signal", weight="bold")
        spin_f.set_xlabel("Frequency (Hz)")
        spin_f.set_ylabel("Power (dBW)")

        Ts = 1.0 / fs
        Tmax = len(d) * Ts

        t_in = scipy.arange(0, Tmax, Ts)
        x_in = scipy.array(d)
        spin_t = fig_in.add_subplot(2, 1, 2)
        pin_t = spin_t.plot(t_in, x_in.real, "b")
        pin_t = spin_t.plot(t_in, x_in.imag, "r")

        spin_t.set_xlabel("Time (s)")
        spin_t.set_ylabel("Amplitude")

        Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
        Nrows = int(scipy.floor(tb._M / Ncols))
        if (tb._M % Ncols != 0):
            Nrows += 1

        # Plot each of the channels outputs. Frequencies on Figure 2 and
        # time signals on Figure 3
        fs_o = tb._fs / tb._M
        Ts_o = 1.0 / fs_o
        Tmax_o = len(d) * Ts_o
        for i in xrange(len(tb.snks)):
            # remove issues with the transients at the beginning
            # also remove some corruption at the end of the stream
            #    this is a bug, probably due to the corner cases
            d = tb.snks[i].data()[Ns:Ne]

            sp1_f = fig1.add_subplot(Nrows, Ncols, 1 + i)
            X, freq = mlab.psd(d,
                               NFFT=fftlen,
                               noverlap=fftlen / 4,
                               Fs=fs_o,
                               window=lambda d: d * winfunc(fftlen),
                               scale_by_freq=True)
            X_o = 10.0 * scipy.log10(abs(X))
            f_o = scipy.arange(-fs_o / 2.0, fs_o / 2.0, fs_o / float(X_o.size))
            p2_f = sp1_f.plot(f_o, X_o, "b")
            sp1_f.set_xlim([min(f_o), max(f_o) + 1])
            sp1_f.set_ylim([-200.0, 50.0])

            sp1_f.set_title(("Channel %d" % i), weight="bold")
            sp1_f.set_xlabel("Frequency (Hz)")
            sp1_f.set_ylabel("Power (dBW)")

            x_o = scipy.array(d)
            t_o = scipy.arange(0, Tmax_o, Ts_o)
            sp2_o = fig2.add_subplot(Nrows, Ncols, 1 + i)
            p2_o = sp2_o.plot(t_o, x_o.real, "b")
            p2_o = sp2_o.plot(t_o, x_o.imag, "r")
            sp2_o.set_xlim([min(t_o), max(t_o) + 1])
            sp2_o.set_ylim([-2, 2])

            sp2_o.set_title(("Channel %d" % i), weight="bold")
            sp2_o.set_xlabel("Time (s)")
            sp2_o.set_ylabel("Amplitude")

        pylab.show()
예제 #35
0
def snr_est_simple(signal):
    s = scipy.mean(abs(signal)**2)
    n = 2 * scipy.var(abs(signal))
    snr_rat = s / n
    return 10.0 * scipy.log10(snr_rat), snr_rat
예제 #36
0
print('sc.sqrt(a) = ', sc.sqrt(a))
print('sc.sqrt(z) = ', sc.sqrt(z))

c = np.sqrt(a)
w = np.sqrt(z)
print(c**2)
print(w**2)

c = sc.sqrt(a)
w = sc.sqrt(z)
print(c**2)
print(w**2)

print(np.exp(a))
print(np.sin(a))
print(np.log(a))
print(np.log10(a))
print(np.exp(a))
print(np.sin(z))
print(np.log(z))
print(np.log10(z))

print(sc.exp(a))
print(sc.sin(a))
print(sc.log(a))
print(sc.log10(a))
print(sc.exp(z))
print(sc.sin(z))
print(sc.log(z))
print(sc.log10(z))
예제 #37
0
파일: coverage.py 프로젝트: xtmgah/spladder
def heatmap_from_bam(chrm,
                     start,
                     stop,
                     files,
                     subsample=0,
                     verbose=False,
                     bins=None,
                     log=False,
                     ax=None,
                     ymax=0,
                     outfile=None,
                     frm='pdf',
                     xlim=None,
                     title=None,
                     xoff=None,
                     yoff=None,
                     intron_cov=False,
                     cmap=None,
                     col_idx=None):
    """This function takes a list of bam files and a set of coordinates (chrm, start, stop), to 
       plot a coverage heatmap over all files in that region."""

    ### subsampling
    if subsample > 0 and len(files) > subsample:
        npr.seed(23)
        files = sp.array(files)
        files = npr.choice(files, subsample)

    ### augment chromosome name
    #chr_name = 'chr%s' % chrm
    chr_name = chrm

    (counts, intron_counts, intron_list) = _get_counts(chr_name,
                                                       start,
                                                       stop,
                                                       files,
                                                       intron_cov,
                                                       verbose=verbose,
                                                       collapsed=False)

    if ax is None:
        fig = plt.figure(figsize=(10, 4))
        ax = fig.add_subplot(111)

    if intron_cov:
        data = intron_counts
    else:
        data = counts

    if col_idx is not None:
        data = data[:, col_idx]

    if log:
        data = sp.log10(data + 1)

    if cmap is not None:
        ax.matshow(data, cmap=cmap, aspect='auto')
    else:
        ax.matshow(data, aspect='auto')

    if outfile is not None:
        plt.savefig(outfile, dpi=300, format=frm)
예제 #38
0
def porosimetry(im, sizes=25, inlets=None, access_limited=True, mode='hybrid'):
    r"""
    Performs a porosimetry simulution on the image

    Parameters
    ----------
    im : ND-array
        An ND image of the porous material containing True values in the
        pore space.

    sizes : array_like or scalar
        The sizes to invade.  If a list of values of provided they are used
        directly.  If a scalar is provided then that number of points spanning
        the min and max of the distance transform are used.

    inlets : ND-array, boolean
        A boolean mask with True values indicating where the invasion
        enters the image.  By default all faces are considered inlets,
        akin to a mercury porosimetry experiment.  Users can also apply
        solid boundaries to their image externally before passing it in,
        allowing for complex inlets like circular openings, etc.  This argument
        is only used if ``access_limited`` is ``True``.

    access_limited : Boolean
        This flag indicates if the intrusion should only occur from the
        surfaces (``access_limited`` is True, which is the default), or
        if the invading phase should be allowed to appear in the core of
        the image.  The former simulates experimental tools like mercury
        intrusion porosimetry, while the latter is useful for comparison
        to gauge the extent of shielding effects in the sample.

    mode : string
        Controls with method is used to compute the result.  Options are:

        *'hybrid'* - (default) Performs a distance tranform of the void space,
        thresholds to find voxels larger than ``sizes[i]``, trims the resulting
        mask if ``access_limitations`` is ``True``, then dilates it using the
        efficient fft-method to obtain the non-wetting fluid configuration.

        *'dt'* - Same as 'hybrid', except uses a second distance transform,
        relative to the thresholded mask, to find the invading fluid
        configuration.  The choice of 'dt' or 'hybrid' depends on speed, which
        is system and installation specific.

        *'mio'* - Using a single morphological image opening step to obtain the
        invading fluid confirguration directly, *then* trims if
        ``access_limitations`` is ``True``.  This method is not ideal and is
        included mostly for comparison purposes.  The morphological operations
        are done using fft-based method implementations.

    Returns
    -------
    An ND-image with voxel values indicating the sphere radius at which it
    becomes accessible from the inlets.  This image can be used to find
    invading fluid configurations as a function of applied capillary pressure
    by applying a boolean comparison: ``inv_phase = im > r`` where ``r`` is
    the radius (in voxels) of the invading sphere.  Of course, ``r`` can be
    converted to capillary pressure using your favorite model.

    See Also
    --------
    fftmorphology

    """
    def trim_blobs(im, inlets):
        temp = sp.zeros_like(im)
        temp[inlets] = True
        labels, N = spim.label(im + temp)
        im = im ^ (clear_border(labels=labels) > 0)
        return im

    dt = spim.distance_transform_edt(im > 0)

    if inlets is None:
        inlets = get_border(im.shape, mode='faces')
    inlets = sp.where(inlets)

    if isinstance(sizes, int):
        sizes = sp.logspace(start=sp.log10(sp.amax(dt)), stop=0, num=sizes)
    else:
        sizes = sp.sort(a=sizes)[-1::-1]

    if im.ndim == 2:
        strel = ps_disk
    else:
        strel = ps_ball

    imresults = sp.zeros(sp.shape(im))
    if mode == 'mio':
        pw = int(sp.floor(dt.max()))
        impad = sp.pad(im, mode='symmetric', pad_width=pw)
        imresults = sp.zeros(sp.shape(impad))
        for r in tqdm(sizes):
            imtemp = fftmorphology(impad, strel(r), mode='opening')
            if access_limited:
                imtemp = trim_blobs(imtemp, inlets)
            if sp.any(imtemp):
                imresults[(imresults == 0) * imtemp] = r
        if im.ndim == 2:
            imresults = imresults[pw:-pw, pw:-pw]
        else:
            imresults = imresults[pw:-pw, pw:-pw, pw:-pw]
    elif mode == 'dt':
        for r in tqdm(sizes):
            imtemp = dt >= r
            if access_limited:
                imtemp = trim_blobs(imtemp, inlets)
            if sp.any(imtemp):
                imtemp = spim.distance_transform_edt(~imtemp) < r
                imresults[(imresults == 0) * imtemp] = r
    elif mode == 'hybrid':
        for r in tqdm(sizes):
            imtemp = dt >= r
            if access_limited:
                imtemp = trim_blobs(imtemp, inlets)
            if sp.any(imtemp):
                imtemp = fftconvolve(imtemp, strel(r), mode='same') > 0.0001
                imresults[(imresults == 0) * imtemp] = r
    else:
        raise Exception('Unreckognized mode ' + mode)
    return imresults
예제 #39
0
    def __init__(self,
                 ll,
                 fl,
                 iv,
                 thid,
                 ra,
                 dec,
                 zqso,
                 plate,
                 mjd,
                 fid,
                 order,
                 diff=None,
                 reso=None,
                 mmef=None):
        qso.__init__(self, thid, ra, dec, zqso, plate, mjd, fid)

        if not self.ebv_map is None:
            corr = unred(10**ll, self.ebv_map[thid])
            fl /= corr
            iv *= corr**2

        ## cut to specified range
        bins = sp.floor((ll - forest.lmin) / forest.dll + 0.5).astype(int)
        ll = forest.lmin + bins * forest.dll
        w = (ll >= forest.lmin)
        w = w & (ll < forest.lmax)
        w = w & (ll - sp.log10(1. + self.zqso) > forest.lmin_rest)
        w = w & (ll - sp.log10(1. + self.zqso) < forest.lmax_rest)
        w = w & (iv > 0.)
        if w.sum() == 0:
            return
        bins = bins[w]
        ll = ll[w]
        fl = fl[w]
        iv = iv[w]
        ## mmef is the mean expected flux fraction using the mock continuum
        if mmef is not None:
            mmef = mmef[w]
        if diff is not None:
            diff = diff[w]
        if reso is not None:
            reso = reso[w]

        ## rebin
        cll = forest.lmin + sp.arange(bins.max() + 1) * forest.dll
        cfl = sp.zeros(bins.max() + 1)
        civ = sp.zeros(bins.max() + 1)
        if mmef is not None:
            cmmef = sp.zeros(bins.max() + 1)
        ccfl = sp.bincount(bins, weights=iv * fl)
        cciv = sp.bincount(bins, weights=iv)
        if mmef is not None:
            ccmmef = sp.bincount(bins, weights=iv * mmef)
        if diff is not None:
            cdiff = sp.bincount(bins, weights=iv * diff)
        if reso is not None:
            creso = sp.bincount(bins, weights=iv * reso)

        cfl[:len(ccfl)] += ccfl
        civ[:len(cciv)] += cciv
        if mmef is not None:
            cmmef[:len(ccmmef)] += ccmmef
        w = (civ > 0.)
        if w.sum() == 0:
            return
        ll = cll[w]
        fl = cfl[w] / civ[w]
        iv = civ[w]
        if mmef is not None:
            mmef = cmmef[w] / civ[w]
        if diff is not None:
            diff = cdiff[w] / civ[w]
        if reso is not None:
            reso = creso[w] / civ[w]

        ## Flux calibration correction
        if not self.correc_flux is None:
            correction = self.correc_flux(ll)
            fl /= correction
            iv *= correction**2
        if not self.correc_ivar is None:
            correction = self.correc_ivar(ll)
            iv /= correction

        self.T_dla = None
        self.ll = ll
        self.fl = fl
        self.iv = iv
        self.mmef = mmef
        self.order = order
        #if diff is not None :
        self.diff = diff
        self.reso = reso
        #        else :
        #           self.diff = sp.zeros(len(ll))
        #           self.reso = sp.ones(len(ll))

        # compute means
        if reso is not None: self.mean_reso = sum(reso) / float(len(reso))

        err = 1.0 / sp.sqrt(iv)
        SNR = fl / err
        self.mean_SNR = sum(SNR) / float(len(SNR))
        lam_lya = constants.absorber_IGM["LYA"]
        self.mean_z = (sp.power(10., ll[len(ll) - 1]) +
                       sp.power(10., ll[0])) / 2. / lam_lya - 1.0
예제 #40
0
    def _scale_psf(self, input_irf_file, config):
        """
        This internal method scales the IRF PSF extension.

        Parameters
        ----------
        input_irf_file: pyfits.HDUList
            Open pyfits IRF file, which contains the PSF that should be scaled.
        config: dict
            A dictionary with the scaling settings. Must have following keys defined:
            "energy_scaling": dict
                Contains setting for the energy scaling (see the structure below).
            "angular_scaling": dict
                Contains setting for the off-center angle scaling (see the structure below).

            In both cases, internally the above dictionaries should contain:
            "err_func_type": str
                The name of the scaling function to use. Accepted values are: "constant",
                "gradient" and "step".

            If err_func_type == "constant":
                scale: float
                    The scale factor. passing 1.0 results in no scaling.

            If err_func_type == "gradient":
                scale: float
                    The scale factor. passing 0.0 results in no scaling.
                range_min: float
                    The x value (energy or off-center angle), that corresponds to -1 scale.
                range_max: float
                    The x value (energy or off-center angle), that corresponds to +1 scale.

            If err_func_type == "step":
                scale: float
                    The scale factor. passing 0.0 results in no scaling.
                transition_pos: list
                    The list of x values (energy or off-center angle), at which
                    step-like transitions occur. If scaling the energy dependence,
                    values must be in TeVs, if angular - in degrees.
                transition_widths: list
                    The list of step-like transition widths, that correspond to transition_pos.
                    For energy scaling the widths must be in log10 scale.

        Returns
        -------
        None

        """

        # Find all "sigma" values - tells how many PSF components we have in the IRF file
        column_names = [
            col.name.lower()
            for col in input_irf_file['POINT SPREAD FUNCTION'].columns
        ]
        sigma_columns = list(
            filter(lambda s: "sigma" in s.lower(), column_names))

        # --------------------------
        # Reading the PSF parameters
        self._psf = dict()
        self._psf['Elow'] = input_irf_file['POINT SPREAD FUNCTION'].data[
            'Energ_lo'][0].copy()
        self._psf['Ehigh'] = input_irf_file['POINT SPREAD FUNCTION'].data[
            'Energ_hi'][0].copy()
        self._psf['ThetaLow'] = input_irf_file['POINT SPREAD FUNCTION'].data[
            'Theta_lo'][0].copy()
        self._psf['ThetaHi'] = input_irf_file['POINT SPREAD FUNCTION'].data[
            'Theta_hi'][0].copy()

        for i in range(0, len(sigma_columns)):
            sigma_name = 'sigma_{:d}'.format(i + 1)
            self._psf[sigma_name] = input_irf_file[
                'POINT SPREAD FUNCTION'].data[sigma_name][0].transpose().copy(
                )

        self._psf['E'] = scipy.sqrt(self._psf['Elow'] * self._psf['Ehigh'])
        self._psf['Theta'] = (self._psf['ThetaLow'] +
                              self._psf['ThetaHi']) / 2.0
        # --------------------------

        # Creating the energy-theta mesh grid
        energy, theta = scipy.meshgrid(self._psf['E'],
                                       self._psf['Theta'],
                                       indexing='ij')

        # ---------------------------------
        # Scaling the PSF energy dependence

        # Constant error function
        if config['energy_scaling']['err_func_type'] == "constant":
            scale_params = config['energy_scaling']["constant"]
            # Constant scaling. Loop over all "sigma" values and scale them by the same factor.
            for sigma_column in sigma_columns:
                self._psf[
                    sigma_column +
                    '_new'] = scale_params['scale'] * self._psf[sigma_column]

        # Gradients error function
        elif config['energy_scaling']['err_func_type'] == "gradient":
            scale_params = config['energy_scaling']["gradient"]
            for sigma_column in sigma_columns:
                self._psf[sigma_column + '_new'] = self._psf[sigma_column] * (
                    1 + scale_params['scale'] *
                    gradient(scipy.log10(energy),
                             scipy.log10(scale_params['range_min']),
                             scipy.log10(scale_params['range_max'])))

        # Step error function
        elif config['energy_scaling']['err_func_type'] == "step":
            scale_params = config['energy_scaling']["step"]
            break_points = list(
                zip(scipy.log10(scale_params['transition_pos']),
                    scale_params['transition_widths']))

            for sigma_column in sigma_columns:
                self._psf[sigma_column + '_new'] = self._psf[sigma_column] * (
                    1 + scale_params['scale'] *
                    step(scipy.log10(energy), break_points))

        else:
            raise ValueError("Unknown PSF scaling function {:s}".format(
                config['energy_scaling']['err_func_type']))
        # ---------------------------------

        # ---------------------------------
        # Scaling the PSF angular dependence

        # Constant error function
        if config['angular_scaling']['err_func_type'] == "constant":
            scale_params = config['angular_scaling']["constant"]
            # Constant scaling. Loop over all "sigma" values and scale them by the same factor.
            for sigma_column in sigma_columns:
                # input_irf_file['POINT SPREAD FUNCTION'].data[sigma_column] *= scale_params['scale']
                self._psf[sigma_column +
                          '_new'] = scale_params['scale'] * self._psf[
                              sigma_column + '_new']

        # Gradients error function
        elif config['angular_scaling']['err_func_type'] == "gradient":
            scale_params = config['angular_scaling']["gradient"]
            for sigma_column in sigma_columns:
                self._psf[sigma_column +
                          '_new'] = self._psf[sigma_column + '_new'] * (
                              1 + scale_params['scale'] *
                              gradient(theta, scale_params['range_min'],
                                       scale_params['range_max']))

        # Step error function
        elif config['angular_scaling']['err_func_type'] == "step":
            scale_params = config['angular_scaling']["step"]
            break_points = list(
                zip(scale_params['transition_pos'],
                    scale_params['transition_widths']))

            for sigma_column in sigma_columns:
                self._psf[
                    sigma_column +
                    '_new'] = self._psf[sigma_column + '_new'] * (
                        1 + scale_params['scale'] * step(theta, break_points))

        else:
            raise ValueError("Unknown PSF scaling function {:s}".format(
                config['angular_scaling']['err_func_type']))
        # ---------------------------------

        # Recording the scaled PSF
        for i in range(0, len(sigma_columns)):
            sigma_name = 'sigma_{:d}'.format(i + 1)

            input_irf_file['POINT SPREAD FUNCTION'].data[sigma_name][
                0] = self._psf[sigma_name + '_new'].transpose()
예제 #41
0
def pwrlaw_plot (xdata, ydata, yerr):
	from scipy import linspace, randn, log10, optimize, sqrt

	powerlaw = lambda x, amp, index: amp * (x ** index)

	logx = log10(xdata)
	logy = log10(ydata)
	logyerr = yerr / ydata

	# define our (line) fitting function
	fitfunc = lambda p, x: p[0] + p[1] * x
	errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err

	pinit = [1.0, -1.0]
	out = optimize.leastsq(errfunc, pinit,
												 args=(logx, logy, logyerr), full_output=1)

	pfinal = out[0]
	covar = out[1]
	print pfinal
	print covar

	index = pfinal[1]
	amp = 10.0 ** pfinal[0]

	indexErr = sqrt(covar[0][0])
	ampErr = sqrt(covar[1][1]) * amp

	print index

	# ########
	# plotting
	# ########
	# ax.plot(ydata)
	# ax.plot(pl_sequence)

	fig, axs = plt.subplots(2, 1)

	axs[0].plot(xdata, powerlaw(xdata, amp, index))	# Fit
	axs[0].errorbar(xdata, ydata, yerr=yerr, fmt='k.')	# Data
	(yh1, yh2) = (axs[0].get_ylim()[1] * .9, axs[0].get_ylim()[1] * .8)
	xh = axs[0].get_xlim()[0] * 1.1
	print axs[0].get_ylim()
	print (yh1, yh2)

	axs[0].text(xh, yh1, 'Ampli = %5.2f +/- %5.2f' % (amp, ampErr))
	axs[0].text(xh, yh2, 'Index = %5.2f +/- %5.2f' % (index, indexErr))
	axs[0].set_title('Best Fit Power Law')
	axs[0].set_xlabel('X')
	axs[0].set_ylabel('Y')
	# xlim(1, 11)
	#
	# subplot(2, 1, 2)
	axs[1].loglog(xdata, powerlaw(xdata, amp, index))
	axs[1].errorbar(xdata, ydata, yerr=yerr, fmt='k.')	# Data
	axs[1].set_xlabel('X (log scale)')
	axs[1].set_ylabel('Y (log scale)')

	import datetime
	figfname = datetime.datetime.now().strftime("%d%b%y") + "_pl"
	plt.savefig(figfname, bbox_inches='tight')
	return figfname
예제 #42
0
    def _scale_edisp(self, input_irf_file, config):
        """
        This internal method scales the IRF energy dispersion through the Migration Matrix.
        Two scalings can be applied: (1) vs energy and (2) vs off-axis angle. In both cases
        the scaling function is taken as (1 + scale * tanh((x-x0)/dx)). In case (1) the scaling
        is performed in log-energy.

        Parameters
        ----------
        input_irf_file: pyfits.HDUList
            Open pyfits IRF file, which contains the Migration Matrix that should be scaled.

        config: dict
            A dictionary with the scaling settings. Must have following keys defined:
            "energy_scaling": dict
                Contains setting for the energy scaling (see the structure below).
            "angular_scaling": dict
                Contains setting for the off-center angle scaling (see the structure below).

            In both cases, internally the above dictionaries should contain:
            "err_func_type": str
                The name of the scaling function to use. Accepted values are: "constant",
                "gradient" and "step".

            If err_func_type == "constant":
                scale: float
                    The scale factor. passing 1.0 results in no scaling.

            If err_func_type == "gradient":
                scale: float
                    The scale factor. passing 0.0 results in no scaling.
                range_min: float
                    The x value (energy or off-center angle), that corresponds to -1 scale.
                range_max: float
                    The x value (energy or off-center angle), that corresponds to +1 scale.

            If err_func_type == "step":
                scale: float
                    The scale factor. passing 0.0 results in no scaling.
                transition_pos: list
                    The list of x values (energy or off-center angle), at which
                    step-like transitions occur. If scaling the energy dependence,
                    values must be in TeVs, if angular - in degrees.
                transition_widths: list
                    The list of step-like transition widths, that correspond to transition_pos.
                    For energy scaling the widths must be in log10 scale.

        Returns
        -------
        None

        """

        # Reading the MATRIX parameters
        self._edisp = dict()
        self._edisp['Elow'] = input_irf_file['ENERGY DISPERSION'].data[
            'ETRUE_LO'][0].copy()
        self._edisp['Ehigh'] = input_irf_file['ENERGY DISPERSION'].data[
            'ETRUE_HI'][0].copy()
        self._edisp['ThetaLow'] = input_irf_file['ENERGY DISPERSION'].data[
            'THETA_LO'][0].copy()
        self._edisp['ThetaHi'] = input_irf_file['ENERGY DISPERSION'].data[
            'THETA_HI'][0].copy()
        self._edisp['Mlow'] = input_irf_file['ENERGY DISPERSION'].data[
            'MIGRA_LO'][0].copy()
        self._edisp['Mhigh'] = input_irf_file['ENERGY DISPERSION'].data[
            'MIGRA_HI'][0].copy()
        self._edisp['Matrix_'] = input_irf_file['ENERGY DISPERSION'].data[
            'MATRIX'][0].transpose().copy()
        self._edisp['E'] = scipy.sqrt(self._edisp['Elow'] *
                                      self._edisp['Ehigh'])
        self._edisp['M'] = (self._edisp['Mlow'] + self._edisp['Mhigh']) / 2.0
        self._edisp['T'] = (self._edisp['ThetaLow'] +
                            self._edisp['ThetaHi']) / 2.0
        # Creating the energy-migration matix-theta mesh grid
        energy, migration, theta = scipy.meshgrid(self._edisp['E'],
                                                  self._edisp['M'],
                                                  self._edisp['T'],
                                                  indexing='ij')

        # -------------------------------------------
        # Scaling the Matrix energy dependence

        # Constant error function
        if config['energy_scaling']['err_func_type'] == "constant":
            self._edisp['Matrix_new'] = self._edisp['Matrix_'] * config[
                'energy_scaling']['constant']['scale']

        # Gradients error function
        elif config['energy_scaling']['err_func_type'] == "gradient":
            scaling_params = config['energy_scaling']['gradient']
            self._edisp['Matrix_new'] = self._edisp['Matrix_'] * (
                1. + scaling_params['scale'] *
                gradient(scipy.log10(energy),
                         scipy.log10(scaling_params['range_min']),
                         scipy.log10(scaling_params['range_max'])))
        # Step error function
        elif config['energy_scaling']['err_func_type'] == "step":
            scaling_params = config['energy_scaling']['step']
            break_points = list(
                zip(scipy.log10(scaling_params['transition_pos']),
                    scaling_params['transition_widths']))
            self._edisp['Matrix_new'] = self._edisp['Matrix_'] * (
                1 + scaling_params['scale'] *
                step(scipy.log10(energy), break_points))
        else:
            raise ValueError(
                "Edisp energy scaling: unknown scaling function type '{:s}'".
                format(config['energy_scaling']['err_func_type']))
        # -------------------------------------------
        # Scaling the Matrix off-axis angle dependence

        # Constant error function
        if config['angular_scaling']['err_func_type'] == "constant":
            self._edisp['Matrix_new'] = self._edisp['Matrix_new'] * config[
                'angular_scaling']['constant']['scale']

        # Gradients error function
        elif config['angular_scaling']['err_func_type'] == "gradient":
            scaling_params = config['angular_scaling']['gradient']
            self._edisp['Matrix_new'] = self._edisp['Matrix_new'] * (
                1. + scaling_params['scale'] *
                gradient(scipy.log10(theta),
                         scipy.log10(scaling_params['range_min']),
                         scipy.log10(scaling_params['range_max'])))
        # Step error function
        elif config['angular_scaling']['err_func_type'] == "step":
            scaling_params = config['angular_scaling']['step']
            break_points = list(
                zip(scipy.log10(scaling_params['transition_pos']),
                    scaling_params['transition_widths']))
            self._edisp['Matrix_new'] = self._edisp['Matrix_new'] * (
                1 + scaling_params['scale'] *
                step(scipy.log10(theta), break_points))
        else:
            raise ValueError(
                "Edisp angular scaling: unknown scaling function type '{:s}'".
                format(config['energy_scaling']['err_func_type']))
        # ------------------------------------------
        # Recording the scaled Matrix
        input_irf_file['ENERGY DISPERSION'].data['MATRIX'][0] = self._edisp[
            'Matrix_new'].transpose()
예제 #43
0
    def _scale_bkg(self, input_irf_file, config):
        """
        This internal method scales the IRF Background Rate, N.
        Two scalings can be applied: (1) vs energy and (2) vs off-axis angle. In both cases
        the scaling function is taken as (1 + scale * tanh((x-x0)/dx)). In case (1) the scaling
        is performed in log-energy.

        Parameters
        ----------
        input_irf_file: pyfits.HDUList
            Open pyfits IRF file, which contains the Aeff that should be scaled.

        config: dict
            A dictionary with the scaling settings. Must have following keys defined:
            "energy_scaling": dict
                Contains setting for the energy scaling (see the structure below).
            "angular_scaling": dict
                Contains setting for the off-center angle scaling (see the structure below).

            In both cases, internally the above dictionaries should contain:
            "err_func_type": str
                The name of the scaling function to use. Accepted values are: "constant",
                "gradient" and "step".

            If err_func_type == "constant":
                scale: float
                    The scale factor. passing 1.0 results in no scaling.

            If err_func_type == "gradient":
                scale: float
                    The scale factor. passing 0.0 results in no scaling.
                range_min: float
                    The x value (energy or off-center angle), that corresponds to -1 scale.
                range_max: float
                    The x value (energy or off-center angle), that corresponds to +1 scale.

            If err_func_type == "step":
                scale: float
                    The scale factor. passing 0.0 results in no scaling.
                transition_pos: list
                    The list of x values (energy or off-center angle), at which
                    step-like transitions occur. If scaling the energy dependence,
                    values must be in TeVs, if angular - in degrees.
                transition_widths: list
                    The list of step-like transition widths, that correspond to transition_pos.
                    For energy scaling the widths must be in log10 scale.

        Returns
        -------
        None

        """

        # Reading the Background parameters.
        self._bkg = dict()
        self._bkg['dxlow'] = input_irf_file['BACKGROUND'].data['detx_lo'][
            0].copy()
        self._bkg['dxhigh'] = input_irf_file['BACKGROUND'].data['detx_hi'][
            0].copy()
        self._bkg['dylow'] = input_irf_file['BACKGROUND'].data['dety_lo'][
            0].copy()
        self._bkg['dyhigh'] = input_irf_file['BACKGROUND'].data['dety_hi'][
            0].copy()
        self._bkg['Elow'] = input_irf_file['BACKGROUND'].data['energ_lo'][
            0].copy()
        self._bkg['Ehigh'] = input_irf_file['BACKGROUND'].data['energ_hi'][
            0].copy()
        self._bkg['bckgnd'] = input_irf_file['BACKGROUND'].data['bgd'][
            0].transpose().copy()

        self._bkg['detx'] = (self._bkg['dxlow'] + self._bkg['dxhigh']) / 2.0
        self._bkg['dety'] = (self._bkg['dylow'] + self._bkg['dyhigh']) / 2.0
        self._bkg['enrg'] = scipy.sqrt(self._bkg['Elow'] * self._bkg['Ehigh'])

        # Creating the [detx - dety - energy] mesh grid.
        detx, dety, energy = scipy.meshgrid(self._bkg['detx'],
                                            self._bkg['dety'],
                                            self._bkg['enrg'],
                                            indexing='ij')

        # ----------------------------------
        # Scaling the Background Energy dependence.

        # Constant error function
        if config['energy_scaling']['err_func_type'] == "constant":
            self._bkg['bckgnd_new'] = self._bkg['bckgnd'] * config[
                'energy_scaling']['constant']['scale']

        # Gradients error function
        elif config['energy_scaling']['err_func_type'] == "gradient":
            scaling_params = config['energy_scaling']['gradient']
            self._bkg['bckgnd_new'] = self._bkg['bckgnd'] * (
                1 + scaling_params['scale'] *
                gradient(scipy.log10(energy),
                         scipy.log10(scaling_params['range_min']),
                         scipy.log10(scaling_params['range_max'])))

        # Step error function
        elif config['energy_scaling']['err_func_type'] == "step":
            scaling_params = config['energy_scaling']['step']
            break_points = list(
                zip(scipy.log10(scaling_params['transition_pos']),
                    scaling_params['transition_widths']))
            self._bkg['bckgnd_new'] = self._bkg['bckgnd'] * (
                1 + scaling_params['scale'] *
                step(scipy.log10(energy), break_points))
        else:
            raise ValueError(
                "Background energy scaling: unknown scaling function type '{:s}'"
                .format(config['energy_scaling']['err_func_type']))

        # -------------------------------------------
        # Scaling the Background Angular dependence

        # Angular (FOV coordinate X-axis binning)
        # Constant error function
        if config['angular_scaling']['err_func_type'] == "constant":
            self._bkg['bckgnd_new'] = self._bkg['bckgnd_new'] * config[
                'angular_scaling']['constant']['scale']

        # Gradients error function
        elif config['angular_scaling']['err_func_type'] == "gradient":
            scaling_params = config['angular_scaling']['gradient']
            self._bkg['bckgnd_new'] = self._bkg['bckgnd_new'] * (
                1. + scaling_params['scale'] *
                gradient(detx, scaling_params['range_min'],
                         scaling_params['range_max']))
        # Step error function
        elif config['angular_scaling']['err_func_type'] == "step":
            scaling_params = config['angular_scaling']['step']
            break_points = list(
                zip(scaling_params['transition_pos'],
                    scaling_params['transition_widths']))
            self._bkg['bckgnd_new'] = self._bkg['bckgnd_new'] * (
                1 + scaling_params['scale'] * step(detx, break_points))
        else:
            raise ValueError(
                "Background angular scaling: unknown scaling function type '{:s}'"
                .format(config['angular_scaling']['err_func_type']))

        # Angular (FOV coordinate Y-axis binning)
        # Constant error function
        if config['angular_scaling']['err_func_type'] == "constant":
            self._bkg['bckgnd_new'] = self._bkg['bckgnd_new'] * config[
                'angular_scaling']['constant']['scale']

        # Gradients error function
        elif config['angular_scaling']['err_func_type'] == "gradient":
            scaling_params = config['angular_scaling']['gradient']
            self._bkg['bckgnd_new'] = self._bkg['bckgnd_new'] * (
                1. + scaling_params['scale'] *
                gradient(dety, scaling_params['range_min'],
                         scaling_params['range_max']))
        # Step error function
        elif config['angular_scaling']['err_func_type'] == "step":
            scaling_params = config['angular_scaling']['step']
            break_points = list(
                zip(scaling_params['transition_pos'],
                    scaling_params['transition_widths']))
            self._bkg['bckgnd_new'] = self._bkg['bckgnd_new'] * (
                1 + scaling_params['scale'] * step(dety, break_points))
        else:
            raise ValueError(
                "Background angular scaling: unknown scaling function type '{:s}'"
                .format(config['angular_scaling']['err_func_type']))

        # ----------------------------------
        # Recording the scaled Background
        input_irf_file['BACKGROUND'].data['bgd'][0] = self._bkg[
            'bckgnd_new'].transpose()
예제 #44
0
freqs = syfp.fftfreq(len(x), x[1] - x[0])
plt.semilogy(freqs, abs(fft))
plt.axvline(1, color="red", linestyle="--")
plt.show()

import scipy as sy
import scipy.fftpack as syfp

dt = 0.02071
t = np.linspace(0, 10, 1000)  ## time at the same spacing of your data
freq = 10
u = np.sin(2 * np.pi * t * freq)  ## Note freq=0.01 Hz

# Do FFT analysis of array
FFT = sy.fft.fft(u)

# Getting the related frequencies
freqs = syfp.fftfreq(len(u), t[1] -
                     t[0])  ## added dt, so x-axis is in meaningful units

# Create subplot windows and show plot
plt.subplot(211)
plt.plot(t, u)
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.subplot(212)
plt.plot(freqs, sy.log10(abs(FFT)),
         '.')  ## it's important to have the abs here
plt.xlim(-2 * freq, 2 * freq)  ## zoom into see what's going on at the peak
plt.show()
예제 #45
0
파일: porc.py 프로젝트: balbertalli/porc
def roomcomp(impresp, filter, target, ntaps, mixed_phase, opformat, trim,
             nsthresh, noplot):
    """Primary function.

    Determine a room compensation impulse response from a measured room impulse response.
    """
    print("Loading impulse response")

    # Read impulse response
    Fs, data = wavfile.read(impresp)
    data = norm(np.hstack(data))

    if trim:
        print("Removing leading silence")
        for spos, sval in enumerate(data):
            if abs(sval) > nsthresh:
                lzs = max(spos - 1, 0)
                print('Impulse starts at position ', spos, '/', len(data))
                print('Trimming ',
                      float(lzs) / float(Fs), ' seconds of silence')
                data = data[lzs:len(
                    data)]  # remove everything before sample at spos
                break

    print("\nSample rate = ", Fs)

    print("\nGenerating correction filter")

    # Number of taps

    if not ntaps:
        ntaps = len(data)

    # Logarithmic pole positioning

    fplog = np.hstack(
        (sp.logspace(sp.log10(20.), sp.log10(200.),
                     14.), sp.logspace(sp.log10(250.), sp.log10(20000.), 13.)))
    plog = freqpoles(fplog, Fs)

    # Preparing data

    # making the measured response minumum-phase
    cp, minresp = rceps(data)

    # Impulse response
    imp = np.zeros(len(data), dtype=np.float64)
    imp[0] = 1.0

    # Target
    outf = []

    if target is 'flat':
        # Make the target output a bandpass filter
        Bf, Af = sig.butter(4, 30 / (Fs / 2), 'high')
        outf = sig.lfilter(Bf, Af, imp)

    else:
        # load target file
        t = np.loadtxt(target)
        frq = t[:, 0]
        pwr = t[:, 1]

        # calculate the FIR filter via windowing method
        fir = sig.firwin2(5001,
                          frq,
                          np.power(10, pwr / 20.0),
                          fs=(frq[-1] * 2))
        # Minimum phase, zero padding
        cp, outf = rceps(np.append(fir, np.zeros(len(minresp) - len(fir))))

    # Filter design

    # Parallel filter design
    (Bm, Am, FIR) = parfiltid(minresp, outf, plog)

    # equalized loudspeaker response - filtering the
    # measured transfer function by the parallel filter
    equalizedresp = parfilt(Bm, Am, FIR, data)

    # Equalizer impulse response - filtering a unit pulse
    equalizer = norm(parfilt(Bm, Am, FIR, imp))

    # Windowing with a half hanning window in time domain
    han = np.hanning(ntaps * 2)[-ntaps:]
    equalizer = han * equalizer[:ntaps]
    """
    Mixed-phase compensation
    Based on the paper "Mixed Time-Frequency approach for Multipoint
    Room Rosponse Equalization," by A. Carini et al.
    To use this feature, your Room Impulse Response should have all
    the leading zeros removed.
    """
    if mixed_phase is True:

        # prototype function
        hp = norm(np.real(equalizedresp))

        # time integration of the human ear is ~24ms
        # See "Measuring the mixing time in auditoria," by Defrance & Polack
        hop_size = 0.024
        samples = hop_size * Fs

        bins = np.int(np.ceil(len(hp) / samples))

        tmix = 0

        # Kurtosis method
        for b in range(bins):
            start = np.int(b * samples)
            end = np.int((b + 1) * samples)
            k = kurtosis(hp[start:end])
            if k <= 0:
                tmix = b * hop_size
                break

        # truncate the prototype function
        taps = np.int(tmix * Fs)

        print("\nmixing time(secs) = ", tmix, "; taps = ", taps)

        if taps > 0:
            # Time reverse the array
            h = hp[:taps][::-1]
            # create all pass filter
            phase = np.unwrap(np.angle(h))
            h = np.exp(1j * phase)
            # convert from db to linear
            mixed = np.power(10, np.real(h) / 20.0)
            # create filter's impulse response
            mixed = np.real(ifft(mixed))

            # convolve and window to desired length
            equalizer = conv(equalizer, mixed)
            equalizer = han * equalizer[:ntaps]

        else:
            print("zero taps; skipping mixed-phase computation")

    if opformat in ('wav'):
        wav_format = 'WAV'
        subtype = 'PCM_16'
    elif opformat in ('wav24'):
        wav_format = 'WAV'
        subtype = 'PCM_24'
    elif opformat in ('wav32'):
        wav_format = 'WAV'
        subtype = 'PCM_32'
    elif opformat in ('bin'):
        wav_format = 'RAW'
        subtype = 'FLOAT'
    else:
        print('Output format not recognized, no file generated.')

    # Write data
    wavwrite(filter, Fs, norm(np.real(equalizer)), wav_format, subtype)
    print('\nOutput format is ' + opformat)
    print('Output filter length =', len(equalizer), 'taps')
    print('Output filter written to ' + filter)

    print(
        '\nUse sox to convert output .wav to raw 32 bit IEEE floating point if necessary,'
    )
    print('or to merge left and right channels into a stereo .wav')
    print('\nExample (convert): sox leq48.wav -t f32 leq48.bin')
    print('        (merge): sox -M le148.wav re48.wav output.wav\n')

    # Plots

    if not noplot:
        data *= 500
        # original loudspeaker-room response
        plot(data, fs=Fs, avg='abs')
        # 1/3 Octave smoothed
        plot(data, fs=Fs, color='r', plots=True)

        # equalizer transfer function
        plot(0.75 * equalizer, fs=Fs, color='g')
        # indicating pole frequencies
        plt.vlines(fplog, -2, 2, color='k', linestyles='solid')

        # equalized loudspeaker-room response
        plot(equalizedresp * 0.01, fs=Fs, avg='abs')
        # 1/3 Octave smoothed
        plot(equalizedresp * 0.01, fs=Fs, color='r', plots=True)

        # Add labels
        # May need to reposition these based on input data
        plt.text(325, 30, 'Unequalized loudspeaker-room response')
        plt.text(100, -15, 'Equalizer transfer function')
        plt.text(100, -21, '(Black lines: pole locations)')
        plt.text(130, -70, 'Equalized loudspeaker-room response')

        a = plt.gca()
        a.set_xlim([20, 20000])
        a.set_ylim([-80, 80])
        plt.ylabel('Amplitude (dB)', color='b')
        plt.xlabel('Frequency (Hz)')
        plt.grid()
        plt.show()
예제 #46
0
 def _Sublimation_Pressure(cls, T):
     """Special sublimation pressure correlation"""
     # Use decimal logarithm
     P = 10**(-43.39/T+2.5*log10(T)+2.047)
     return unidades.Pressure(P, "mmHg")
예제 #47
0
def main():
    tb = pfb_top_block()

    tstart = time.time()
    tb.run()
    tend = time.time()
    print "Run time: %f" % (tend - tstart)

    if 1:
        fig1 = pylab.figure(1, figsize=(16,9))
        fig2 = pylab.figure(2, figsize=(16,9))

        Ns = 10000
        Ne = 10000

        fftlen = 8192
        winfunc = scipy.blackman
        fs = tb._fs

        # Plot the input to the decimator

        d = tb.snk_i.data()[Ns:Ns+Ne]
        sp1_f = fig1.add_subplot(2, 1, 1)

        X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
                          window = lambda d: d*winfunc(fftlen),
                          scale_by_freq=True)
        X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
        f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
        p1_f = sp1_f.plot(f_in, X_in, "b")
        sp1_f.set_xlim([min(f_in), max(f_in)+1])
        sp1_f.set_ylim([-200.0, 50.0])

        sp1_f.set_title("Input Signal", weight="bold")
        sp1_f.set_xlabel("Frequency (Hz)")
        sp1_f.set_ylabel("Power (dBW)")

        Ts = 1.0/fs
        Tmax = len(d)*Ts

        t_in = scipy.arange(0, Tmax, Ts)
        x_in = scipy.array(d)
        sp1_t = fig1.add_subplot(2, 1, 2)
        p1_t = sp1_t.plot(t_in, x_in.real, "b")
        p1_t = sp1_t.plot(t_in, x_in.imag, "r")
        sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])

        sp1_t.set_xlabel("Time (s)")
        sp1_t.set_ylabel("Amplitude")


        # Plot the output of the decimator
        fs_o = tb._fs / tb._decim

        sp2_f = fig2.add_subplot(2, 1, 1)
        d = tb.snk.data()[Ns:Ns+Ne]
        X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
                          window = lambda d: d*winfunc(fftlen),
                          scale_by_freq=True)
        X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
        f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
        p2_f = sp2_f.plot(f_o, X_o, "b")
        sp2_f.set_xlim([min(f_o), max(f_o)+1])
        sp2_f.set_ylim([-200.0, 50.0])

        sp2_f.set_title("PFB Decimated Signal", weight="bold")
        sp2_f.set_xlabel("Frequency (Hz)")
        sp2_f.set_ylabel("Power (dBW)")


        Ts_o = 1.0/fs_o
        Tmax_o = len(d)*Ts_o

        x_o = scipy.array(d)
        t_o = scipy.arange(0, Tmax_o, Ts_o)
        sp2_t = fig2.add_subplot(2, 1, 2)
        p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
        p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
        sp2_t.set_ylim([-2.5, 2.5])

        sp2_t.set_xlabel("Time (s)")
        sp2_t.set_ylabel("Amplitude")

        pylab.show()
예제 #48
0
파일: coverage.py 프로젝트: xtmgah/spladder
def cov_from_segments(gene,
                      seg_counts,
                      edge_counts,
                      edge_idx,
                      ax,
                      sample_idx=None,
                      log=False,
                      cmap_seg=None,
                      cmap_edg=None,
                      xlim=None,
                      grid=False,
                      order='C'):
    """This function takes a gene and its corresponding segment and edge counts to
    produce a coverage overview plot."""

    if sample_idx is None:
        sample_idx = sp.arange(seg_counts.shape[1])

    norm = plt.Normalize(0, sample_idx.shape[0])

    if cmap_seg is None:
        cmap_seg = plt.get_cmap('jet')
    if cmap_edg is None:
        cmap_edg = plt.get_cmap('jet')

    ### iterate over samples
    for ii, i in enumerate(sample_idx):
        ### collect count information and add segment patches
        for j in range(gene.segmentgraph.segments.shape[1]):
            s = gene.segmentgraph.segments[:, j]
            if log:
                counts = sp.log10(seg_counts[j, i] + 1)
            else:
                counts = seg_counts[j, i]
            #ax.add_patch(patches.Rectangle((s[0], 0), s[1] - s[0], counts, fill=cmap_seg(norm(ii)),
            #             edgecolor='none', alpha=0.5))
            ax.plot(s, [counts, counts],
                    '-',
                    color=cmap_seg(norm(ii)),
                    linewidth=2)

        for j in range(edge_idx.shape[0]):
            [s, t] = sp.unravel_index(edge_idx[j],
                                      gene.segmentgraph.seg_edges.shape,
                                      order=order)
            if log:
                counts = sp.log10(edge_counts[j, i] + 1)
            else:
                counts = edge_counts[j, i]
            add_intron_patch2(ax,
                              gene.segmentgraph.segments[1, s],
                              gene.segmentgraph.segments[0, t],
                              counts,
                              color=cmap_edg(norm(ii)))

    if xlim is not None:
        ax.set_xlim(xlim)

    ### draw grid
    if grid:
        ax.grid(b=True,
                which='major',
                linestyle='--',
                linewidth=0.2,
                color='#222222')
        ax.xaxis.grid(False)

    ax.set_ylim([0, ax.get_ylim()[1]])
# construction du vecteur fréquence
f = dF * arange(N)

# définition de la matrice M
n = arange(N)
k = n.reshape((N, 1))
M = exp(-2j * pi * k * n / N)

# calcul de la transformée de Fourier discrète par simple produit
# matriciel X = M.s
tdebut = time.time()
X = dot(M, s)
print 'Temps de calcul (s) : ', time.time() - tdebut

# calcul du spectre SP normalisé
FacteurNorm = 2.0 / N
SP = FacteurNorm * abs(X)

# pour affichage en dB
SP1 = 20 * log10(SP / SP.max())

# affichage des résultats
fig = plt.figure(figsize=(14, 8))
# affichage du spectre
plt.xlabel('frequence', fontsize=16)
plt.ylabel('amplitude', fontsize=16)
plt.plot(f, SP1)

plt.show()
예제 #50
0
        'Path to file to previously produced picca_delta.py file to correct for multiplicative errors in the flux calibration'
    )

    parser.add_argument(
        '--ivar-calib',
        type=str,
        default=None,
        required=False,
        help=
        'Path to previously produced picca_delta.py file to correct for multiplicative errors in the pipeline inverse variance calibration'
    )

    args = parser.parse_args()

    ### forest args
    forest.lmin = sp.log10(args.lambda_min)
    forest.lmax = sp.log10(args.lambda_max)
    forest.lmin_rest = sp.log10(args.lambda_rest_min)
    forest.lmax_rest = sp.log10(args.lambda_rest_max)
    forest.rebin = args.rebin
    forest.dll = args.rebin * 1e-4
    forest.dla_mask = args.dla_mask

    ### Get Healpy pixel of the given QSO
    objs = {}
    ra, dec, zqso, thid, plate, mjd, fid = io.read_drq(args.drq,
                                                       0.,
                                                       1000.,
                                                       keep_bal=True)
    cut = (plate == args.plate) & (mjd == args.mjd) & (fid == args.fiberid)
    if cut.sum() == 0:
예제 #51
0
파일: coverage.py 프로젝트: xtmgah/spladder
def cov_from_bam(chrm,
                 start,
                 stop,
                 files,
                 subsample=0,
                 verbose=False,
                 bins=None,
                 log=False,
                 ax=None,
                 ymax=0,
                 outfile=None,
                 frm='pdf',
                 xlim=None,
                 title=None,
                 xoff=None,
                 yoff=None,
                 intron_cov=False,
                 intron_cnt=False,
                 marker_pos=None,
                 col_idx=None,
                 color_cov='blue',
                 color_intron_cov='red',
                 color_intron_edge='green',
                 grid=False,
                 strand=None,
                 highlight=None,
                 highlight_color='magenta',
                 highlight_label=None,
                 min_intron_cnt=0,
                 return_legend_handle=False,
                 label=None):
    """This function takes a list of bam files and a set of coordinates (chrm, start, stop), to 
       plot a coverage overview of that files in that region."""

    ### subsampling
    if subsample > 0 and len(files) > subsample:
        npr.seed(23)
        files = sp.array(files)
        files = npr.choice(files, subsample)

    ### augment chromosome name
    #chr_name = 'chr%s' % chrm
    chr_name = chrm

    (counts, intron_counts, intron_list) = _get_counts(chr_name,
                                                       start,
                                                       stop,
                                                       files,
                                                       intron_cov,
                                                       intron_cnt,
                                                       verbose,
                                                       collapsed=True)

    ### get mean counts over all bam files
    counts /= len(files)
    if intron_cov:
        intron_counts /= len(files)
    if intron_cnt:
        for intron in intron_list:
            intron_list[intron] = math.ceil(intron_list[intron] /
                                            float(len(files)))
        if min_intron_cnt > 0:
            intron_list = dict([(x, intron_list[x]) for x in intron_list
                                if intron_list[x] >= min_intron_cnt])
    if col_idx is not None:
        counts = counts[col_idx]
        if intron_cov:
            intron_counts = intron_counts[col_idx]
        if intron_cnt:
            print >> sys.stderr, 'ERROR: column subsetting is currently not implemented for intron edges'
            sys.exit(1)

    ### bin counts according to options
    if bins is None:
        bins = counts.shape[0]
        bin_counts = counts
        bin_intron_counts = intron_counts
        if col_idx is not None:
            counts_x = sp.arange(col_idx.shape[0])
        else:
            counts_x = range(start, stop + 1)
    else:
        if verbose:
            print >> sys.stdout, '... binning counts ...'
        bin_counts = sp.zeros((bins, ))
        bin_intron_counts = sp.zeros((bins, ))
        binsize = int(sp.ceil(float(counts.shape[0]) / bins))
        for ii, i in enumerate(xrange(0, counts.shape[0], binsize)):
            bin_counts[ii] = sp.sum(
                counts[i:min(i + binsize, counts.shape[0] - 1)]) / binsize
            if intron_cov:
                bin_intron_counts[ii] = sp.sum(
                    intron_counts[i:min(i + binsize, intron_counts.shape[0] -
                                        1)]) / binsize
        if col_idx is not None:
            counts_x = sp.linspace(0, col_idx.shape[0], num=bins)
        else:
            counts_x = sp.linspace(start, stop, num=bins)

    ### use log if chosen
    if log:
        bin_counts = sp.log10(bin_counts + 1)
        bin_intron_counts = sp.log10(bin_intron_counts + 1)
        if intron_cnt:
            for intron in intron_list:
                if intron_list[intron] > 0:
                    intron_list[intron] = sp.log10(intron_list[intron] + 1)

    if ax is None:
        fig = plt.figure(figsize=(10, 4))
        ax = fig.add_subplot(111)
    if intron_cov:
        ax.fill_between(counts_x,
                        bin_intron_counts,
                        facecolor=color_intron_cov,
                        edgecolor='none',
                        alpha=0.5)

    ax.fill_between(counts_x,
                    bin_counts,
                    facecolor=color_cov,
                    edgecolor='none',
                    alpha=0.5)
    #ax.set_xticklabels([str(int(x)) for x in sp.linspace(start, stop, num = len(ax.get_xticklabels()))])
    ax.set_xlabel('Position on contig %s' % chrm)

    ### draw strand
    if strand == '+':
        ax.arrow(0.05,
                 0.9,
                 0.2,
                 0,
                 head_width=0.05,
                 head_length=0.02,
                 fc='#cccccc',
                 ec='#cccccc',
                 transform=ax.transAxes)
    elif strand == '-':
        ax.arrow(0.25,
                 0.9,
                 -0.2,
                 0,
                 head_width=0.05,
                 head_length=0.02,
                 fc='#cccccc',
                 ec='#cccccc',
                 transform=ax.transAxes)

    ### draw grid
    if grid:
        ax.grid(b=True,
                which='major',
                linestyle='--',
                linewidth=0.2,
                color='#222222')
        ax.xaxis.grid(False)

    if marker_pos is not None:
        ax.plot(0, marker_pos, 'or')

    if log:
        ax.set_ylabel('Read Coverage (log10)')
    else:
        ax.set_ylabel('Read Coverage')

    if ymax > 0:
        ax.set_ylim([0, ymax])

    if highlight is not None:
        highlight_x(ax,
                    highlight,
                    highlight_color=highlight_color,
                    label=highlight_label)

    if xlim is not None:
        ax.set_xlim(xlim)

    ylim = ax.get_ylim()
    ax.set_ylim([0, ylim[1]])

    if title is not None:
        ax.set_title(title)

    if xoff:
        ax.axes.get_xaxis().set_visible(False)

    if yoff:
        ax.axes.get_yaxis().set_visible(False)

    if intron_cnt:
        for intron in intron_list:
            add_intron_patch2(ax,
                              start + intron[0],
                              start + intron[1] + intron[0],
                              intron_list[intron],
                              color=color_intron_edge)

    if outfile is not None:
        plt.savefig(outfile, dpi=1200, format=frm)

    if return_legend_handle:
        if label is not None:
            return patches.Patch(color=color_cov, alpha=0.5, label=label)
        else:
            return patches.Patch(color=color_cov,
                                 alpha=0.5,
                                 label='Expression')
예제 #52
0
def plot_manhattan(ax,
                   df,
                   pv_thr=None,
                   colors=None,
                   offset=None,
                   callback=None):
    """
    Utility function to make manhattan plot

    Parameters
    ----------
    ax : pyplot plot
        subplot
    df : pandas.DataFrame
        pandas DataFrame with chrom, pos and pv
    colors : list
        colors to use in the manhattan plot
    offset : float
        offset between in chromosome expressed as fraction of the
        length of the longest chromosome (default is 0.2)
    callback : function
        callback function that takes as input df

    Examples
    --------

    .. doctest::

        >>> from matplotlib import pyplot as plt
        >>> from limix_lmm.plot import plot_manhattan
        >>> import scipy as sp
        >>> import pandas as pd
        >>> n_chroms = 5
        >>> n_snps_per_chrom = 10000
        >>> chrom = sp.kron(sp.arange(1, n_chroms + 1), sp.ones(n_snps_per_chrom))
        >>> pos = sp.kron(sp.ones(n_chroms), sp.arange(n_snps_per_chrom))
        >>> pv = sp.rand(n_chroms * n_snps_per_chrom)
        >>> df = pd.DataFrame({'chrom': chrom, 'pos': pos, 'pv': pv})
        >>>
        >>> ax = plt.subplot(111)
        >>> plot_manhattan(ax, df)
    """
    if colors is None:
        colors = ["k", "Gray"]
    if offset is None:
        offset = 0.2
    dx = offset * df["pos"].values.max()
    _x = 0
    xticks = []
    for chrom_i in sp.unique(df["chrom"].values):
        _df = df[df["chrom"] == chrom_i]
        if chrom_i % 2 == 0:
            color = colors[0]
        else:
            color = colors[1]
        ax.plot(_df["pos"] + _x, -sp.log10(_df["pv"]), ".", color=color)
        if callback is not None:
            callback(_df)
        xticks.append(_x + 0.5 * _df["pos"].values.max())
        _x += _df["pos"].values.max() + dx
    ax.set_xticks(xticks)
    ax.set_xticklabels(sp.unique(df["chrom"].values))
예제 #53
0
def snr_est_m2m4(signal):
    M2 = scipy.mean(abs(signal)**2)
    M4 = scipy.mean(abs(signal)**4)
    snr_rat = scipy.sqrt(2 * M2 * M2 - M4) / (M2 -
                                              scipy.sqrt(2 * M2 * M2 - M4))
    return 10.0 * scipy.log10(snr_rat), snr_rat
예제 #54
0
def main():
    gr_estimators = {
        "simple": digital.SNR_EST_SIMPLE,
        "skew": digital.SNR_EST_SKEW,
        "m2m4": digital.SNR_EST_M2M4,
        "svr": digital.SNR_EST_SVR
    }
    py_estimators = {
        "simple": snr_est_simple,
        "skew": snr_est_skew,
        "m2m4": snr_est_m2m4,
        "svr": snr_est_svr
    }

    parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
    parser.add_option(
        "-N",
        "--nsamples",
        type="int",
        default=10000,
        help="Set the number of samples to process [default=%default]")
    parser.add_option("",
                      "--snr-min",
                      type="float",
                      default=-5,
                      help="Minimum SNR [default=%default]")
    parser.add_option("",
                      "--snr-max",
                      type="float",
                      default=20,
                      help="Maximum SNR [default=%default]")
    parser.add_option("",
                      "--snr-step",
                      type="float",
                      default=0.5,
                      help="SNR step amount [default=%default]")
    parser.add_option("-t",
                      "--type",
                      type="choice",
                      choices=gr_estimators.keys(),
                      default="simple",
                      help="Estimator type {0} [default=%default]".format(
                          gr_estimators.keys()))
    (options, args) = parser.parse_args()

    N = options.nsamples
    xx = scipy.random.randn(N)
    xy = scipy.random.randn(N)
    bits = 2 * scipy.complex64(scipy.random.randint(0, 2, N)) - 1
    #bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
    #    1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)

    snr_known = list()
    snr_python = list()
    snr_gr = list()

    # when to issue an SNR tag; can be ignored in this example.
    ntag = 10000

    n_cpx = xx + 1j * xy

    py_est = py_estimators[options.type]
    gr_est = gr_estimators[options.type]

    SNR_min = options.snr_min
    SNR_max = options.snr_max
    SNR_step = options.snr_step
    SNR_dB = scipy.arange(SNR_min, SNR_max + SNR_step, SNR_step)
    for snr in SNR_dB:
        SNR = 10.0**(snr / 10.0)
        scale = scipy.sqrt(2 * SNR)
        yy = bits + n_cpx / scale
        print "SNR: ", snr

        Sknown = scipy.mean(yy**2)
        Nknown = scipy.var(n_cpx / scale)
        snr0 = Sknown / Nknown
        snr0dB = 10.0 * scipy.log10(snr0)
        snr_known.append(float(snr0dB))

        snrdB, snr = py_est(yy)
        snr_python.append(snrdB)

        gr_src = blocks.vector_source_c(bits.tolist(), False)
        gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
        gr_chn = channels.channel_model(1.0 / scale)
        gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
        tb = gr.top_block()
        tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
        tb.run()

        snr_gr.append(gr_snr.snr())

    f1 = pylab.figure(1)
    s1 = f1.add_subplot(1, 1, 1)
    s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
    s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
    s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
    s1.grid(True)
    s1.set_title('SNR Estimators')
    s1.set_xlabel('SNR (dB)')
    s1.set_ylabel('Estimated SNR')
    s1.legend()

    f2 = pylab.figure(2)
    s2 = f2.add_subplot(1, 1, 1)
    s2.plot(yy.real, yy.imag, 'o')

    pylab.show()
예제 #55
0
    def _update_canvas(self):
        """
        Update the figure when the user changes and input value.
        :return:
        """
        # Get the parameters from the form
        prf_stagger = float(self.prf_stagger.text())

        # Get the PRF type
        prf_type = self.prf_type.currentText()

        # Set up the normalized frequency space
        frequency = linspace(0, 4, 1000)

        # Clear the axes for the updated plot
        self.axes1.clear()

        # Calculate response based on PRF type
        if prf_type == 'Single':
            response = countermeasures.delay_line(frequency) / 4.0

            # Display the results
            self.axes1.plot(frequency, 10 * log10(response + finfo(float).eps),
                            '')

        elif prf_type == 'Stagger':
            response_prf1 = countermeasures.delay_line(frequency) / 4.0
            response_prf2 = countermeasures.delay_line(
                prf_stagger * frequency) / 4.0
            response = 0.5 * (response_prf1 + response_prf2)

            # Display the results
            self.axes1.plot(frequency,
                            10 * log10(response_prf1 + finfo(float).eps),
                            '',
                            label='PRF 1')
            self.axes1.plot(frequency,
                            10 * log10(response_prf2 + finfo(float).eps),
                            '--',
                            label='PRF 2')
            self.axes1.plot(frequency,
                            10 * log10(response + finfo(float).eps),
                            ':',
                            label='PRF Staggered')

            # Place the legend
            self.axes1.legend(loc='lower left', prop={'size': 10})

        # Set the plot title and labels
        self.axes1.set_title('Delay Line Response', size=14)
        self.axes1.set_xlabel('Normalized Frequency (f / PRF)', size=12)
        self.axes1.set_ylabel('Amplitude (dB)', size=12)

        # Set the y-axis lim
        self.axes1.set_ylim([-30, 1])

        # Turn on the grid
        self.axes1.grid(linestyle=':', linewidth=0.5)

        # Set the tick label size
        self.axes1.tick_params(labelsize=12)

        # Update the canvas
        self.my_canvas.draw()
예제 #56
0
 def LogVTKIntersectiondata(self):
     print("Logging VTK intersection")
     # log for vtk intersect
     for r in range(self.vtkintersection.shape[0]):
         self.vtkintersection[r, :] = scipy.log10(self.intersection[r, :])
예제 #57
0
 def LogVTKdata(self):
     print("Logging VTK coords")
     # log for vtk
     for r in range(self.vtkcoords.shape[0]):
         self.vtkcoords[r, :self.coords.shape[1]] = scipy.log10(
             self.vtkcoords[r, :self.coords.shape[1]])
예제 #58
0
    def cont_fit(self):
        lmax = forest.lmax_rest + sp.log10(1 + self.zqso)
        lmin = forest.lmin_rest + sp.log10(1 + self.zqso)
        try:
            mc = forest.mean_cont(self.ll - sp.log10(1 + self.zqso))
        except ValueError:
            raise Exception

        if not self.T_dla is None:
            mc *= self.T_dla

        var_lss = forest.var_lss(self.ll)
        eta = forest.eta(self.ll)
        fudge = forest.fudge(self.ll)

        def model(p0, p1):
            line = p1 * (self.ll - lmin) / (lmax - lmin) + p0
            return line * mc

        def chi2(p0, p1):
            m = model(p0, p1)
            var_pipe = 1. / self.iv / m**2
            ## prep_del.variance is the variance of delta
            ## we want here the we = ivar(flux)

            var_tot = variance(var_pipe, eta, var_lss, fudge)
            we = 1 / m**2 / var_tot

            # force we=1 when use-constant-weight
            # TODO: make this condition clearer, maybe pass an option
            # use_constant_weights?
            if (eta == 0).all():
                we = sp.ones(len(we))
            v = (self.fl - m)**2 * we
            return v.sum() - sp.log(we).sum()

        p0 = (self.fl * self.iv).sum() / self.iv.sum()
        p1 = 0

        mig = iminuit.Minuit(chi2,
                             p0=p0,
                             p1=p1,
                             error_p0=p0 / 2.,
                             error_p1=p0 / 2.,
                             errordef=1.,
                             print_level=0,
                             fix_p1=(self.order == 0))
        fmin, _ = mig.migrad()

        self.co = model(mig.values["p0"], mig.values["p1"])
        self.p0 = mig.values["p0"]
        self.p1 = mig.values["p1"]

        self.bad_cont = None
        if not fmin.is_valid:
            self.bad_cont = "minuit didn't converge"
        if sp.any(self.co <= 0):
            self.bad_cont = "negative continuum"

        ## if the continuum is negative, then set it to a very small number
        ## so that this forest is ignored
        if self.bad_cont is not None:
            self.co = self.co * 0 + 1e-10
            self.p0 = 0.
            self.p1 = 0.
예제 #59
0
    N2 = inversion(input_inten_wls, pump)
    N1 = 1 - N2

    # P_p and P_s

    totalSignal = []

    # For example and illustration, solve power equations for 1 m
    for k in range(0, len(wl_grid)):
        p_init = input_inten_wls[k]
        solution = odeint(power_signal, p_init, z, args=(N1, N2, k))
        totalSignal.append(solution[:, 0])

    pump_solution = odeint(power_pump, pump, z, args=(N1, N2))
    pump_absorption = -10 * sp.log10(
        max(pump_solution[1000]) / max(pump_solution[0])
    )  #check to see if pump absorption matches Data Sheet

    print(pump_absorption)

    signalPlotting = sp.asarray(totalSignal)

    plt.figure()
    plt.plot(z, solution[:, 0], '--', label='signal')
    plt.legend(loc='best')
    plt.xlabel('z')
    plt.ylabel('power (W)')
    plt.grid()

    plt.figure()
    plt.plot(z, pump_solution[:, 0], '--', label='pump')
예제 #60
0
def map_phenotype(p_i, phed, snps_data_file, mapping_method, trans_method, p_dict):
	phenotype_name = phed.getPhenotypeName(p_i)
	phen_is_binary = phed.isBinary(p_i)
	file_prefix = _get_file_prefix_(p_dict['run_id'], p_i, phed.getPhenotypeName(p_i),
				mapping_method, trans_method, p_dict['remove_outliers'])
	result_name = "%s_%s_%s" % (phenotype_name, mapping_method, trans_method)

	res = None
	sd = dataParsers.parse_snp_data(snps_data_file , format=p_dict['data_format'], filter=p_dict['debug_filter'])
	num_outliers = gwa.prepare_data(sd, phed, p_i, trans_method, p_dict['remove_outliers'])
	if p_dict['remove_outliers']:
		assert num_outliers != 0, "No outliers were removed, so it makes no sense to go on and perform GWA."

	phen_vals = phed.getPhenVals(p_i)
	snps = sd.getSnps()
	if mapping_method in ['emmax']:
		#Load genotype file (in binary format)
		sys.stdout.write("Retrieving the Kinship matrix K.\n")
		sys.stdout.flush()
		k_file = env['data_dir'] + "kinship_matrix_cm" + str(p_dict['call_method_id']) + ".pickled"
		kinship_file = p_dict['kinship_file']
		if not kinship_file and os.path.isfile(k_file): #Check if corresponding call_method_file is available
			kinship_file = k_file
		if kinship_file:   #Kinship file was somehow supplied..
			print 'Loading supplied kinship'
			k = lm.load_kinship_from_file(kinship_file, sd.accessions)
		else:
			print "No kinship file was found.  Generating kinship file:", k_file
			sd = dataParsers.parse_snp_data(snps_data_file , format=p_dict['data_format'])
			snps = sd.getSnps()
			k_accessions = sd.accessions[:]
			if p_dict['debug_filter']:
				import random
				snps = random.sample(snps, int(p_dict['debug_filter'] * len(snps)))
			k = lm.calc_kinship(snps)
			f = open(k_file, 'w')
			cPickle.dump([k, sd.accessions], f)
			f.close()
			num_outliers = gwa.prepare_data(sd, phed, p_i, trans_method, p_dict['remove_outliers'])
			k = lm.filter_k_for_accessions(k, k_accessions, sd.accessions)
		sys.stdout.flush()
		sys.stdout.write("Done!\n")

	if p_dict['remove_outliers']:
		assert num_outliers != 0, "No outliers were removed, so it makes no sense to go on and perform GWA."


	#Check whether result already exists.
	if p_dict['use_existing_results']:
		print "\nChecking for existing results."
		result_file = file_prefix + ".pvals"
		if os.path.isfile(result_file):
			res = gwaResults.Result(result_file=result_file, name=result_name, snps=snps)
			pvals = True
		else:
			result_file = file_prefix + ".scores"
			if os.path.isfile(result_file):
				res = gwaResults.Result(result_file=result_file, name=result_name, snps=snps)
				pvals = False
		if res:
			print "Found existing results.. (%s)" % (result_file)
		sys.stdout.flush()


	if not res: #If results weren't found in a file... then do GWA.

		sys.stdout.write("Finished loading and handling data!\n")

		print "FIRST STEP: Applying %s to data. " % (mapping_method)
		sys.stdout.flush()
		kwargs = {}
		additional_columns = []
		if mapping_method in ['emmax']:
			res = lm.emmax(snps, phen_vals, k)
		elif mapping_method in ['lm']:
			res = lm.linear_model(snps, phen_vals)
		else:
			print "Mapping method", mapping_method, 'was not found.'
			sys.exit(2)

		if mapping_method in ['lm', 'emmax']:
			kwargs['genotype_var_perc'] = res['var_perc']
			betas = map(list, zip(*res['betas']))
			kwargs['beta0'] = betas[0]
			kwargs['beta1'] = betas[1]
			additional_columns.append('genotype_var_perc')
			additional_columns.append('beta0')
			additional_columns.append('beta1')
			pvals = res['ps']
			sys.stdout.write("Done!\n")
			sys.stdout.flush()



		kwargs['correlations'] = calc_correlations(snps, phen_vals)
		additional_columns.append('correlations')

		res = gwaResults.Result(scores=pvals, snps_data=sd, name=result_name, **kwargs)

		if mapping_method in ["emmax", 'lm']:
		 	result_file = file_prefix + ".pvals"
		else:
		 	result_file = file_prefix + ".scores"
		res.write_to_file(result_file, additional_columns)

		print "Generating a GW plot."
		sys.stdout.flush()
		png_file = file_prefix + "_gwa_plot.png"
		#png_file_max30 = file_prefix+"_gwa_plot_max30.png"
		if mapping_method in ['lm', "emmax"]:
			res.neg_log_trans()
			if mapping_method in ["kw", "ft"]:# or p_dict['data_format'] != 'binary':
				#res.plot_manhattan(png_file=png_file_max30,percentile=90,type="pvals",ylab="$-$log$_{10}(p)$", 
				#	       plot_bonferroni=True,max_score=30)
				res.plot_manhattan(png_file=png_file, percentile=90, type="pvals", ylab="$-$log$_{10}(p)$",
					       plot_bonferroni=True)
			else:
				if res.filter_attr("mafs", p_dict['mac_threshold']) > 0:
					#res.plot_manhattan(png_file=png_file_max30,percentile=90,type="pvals",ylab="$-$log$_{10}(p)$", 
					#	       plot_bonferroni=True,max_score=30)				
					res.plot_manhattan(png_file=png_file, percentile=90, type="pvals", ylab="$-$log$_{10}(p)$",
						       plot_bonferroni=True)
		else:
			pass

		print "plotting histogram"
		hist_file_prefix = _get_file_prefix_(p_dict['run_id'], p_i, phenotype_name, trans_method, p_dict['remove_outliers'])
		hist_png_file = hist_file_prefix + "_hist.png"
		phed.plot_histogram(p_i, pngFile=hist_png_file)
	else:
		res.neg_log_trans()
		assert res.filter_attr("mafs", p_dict['mac_threshold']), 'All SNPs have MAC smaller than threshold'


	print "SECOND STEP:"
	res.filter_top_snps(p_dict['second_step_number'])
	snps = res.snps
	positions = res.positions
	chromosomes = res.chromosomes
	#Checking res_file exists
	file_prefix = _get_file_prefix_(p_dict['run_id'], p_i, phed.getPhenotypeName(p_i),
				mapping_method, trans_method, p_dict['remove_outliers'], p_dict['second_step_number'])
	res_file = file_prefix + '_res.cpickled'
	if p_dict['use_existing_results'] and os.path.isfile(res_file):
			print 'Found existing results for the second step... loading.'
			f = open(res_file, 'rb')
			second_res = cPickle.load(f)
			f.close()
	else:
		if mapping_method == 'lm':
			second_res = lm.linear_model_two_snps(snps, phen_vals)
		if mapping_method == 'emmax':
			second_res = lm.emmax_two_snps(snps, phen_vals, k)

		#Pickling results..
		print 'Saving results as pickled file:', res_file
		f = open(res_file, 'wb')
		cPickle.dump(second_res, f, protocol=2)
		f.close()



	#Plotting second step plots:
	score_array = -sp.log10(second_res['ps'])
	p3_score_array = -sp.log10(second_res['p3_ps'])
	p4_score_array = -sp.log10(second_res['p4_ps'])
	import plotResults as pr
	pr.plot_snp_pair_result(chromosomes, positions, score_array, file_prefix + '_scatter')
	pr.plot_snp_pair_result(chromosomes, positions, p3_score_array, file_prefix + '_p3_scatter')
	pr.plot_snp_pair_result(chromosomes, positions, p4_score_array, file_prefix + '_p4_scatter')



	if p_dict['region_plots']:
		import regionPlotter as rp
		regions_results = res.get_top_region_results(p_dict['region_plots'])
		plotter = rp.RegionPlotter()
		print "Starting region plots..."
		for reg_res in regions_results:
			chromosome = reg_res.chromosomes[0]
			caption = phenotype_name + "_c" + str(chromosome) + "_" + mapping_method
			png_file = file_prefix + "_reg_plot_c" + str(chromosome) + "_s" + str(reg_res.positions[0]) \
				+ "_e" + str(reg_res.positions[-1]) + ".png"
			tair_file = file_prefix + "_reg_plot_c" + str(chromosome) + "_s" + str(reg_res.positions[0]) \
				+ "_e" + str(reg_res.positions[-1]) + "_tair_info.txt"
			plotter.plot_small_result([reg_res], png_file=png_file, highlight_gene_ids=tair_ids,
						  caption=caption, tair_file=tair_file)

			#Plot Box-plot
			png_file = file_prefix + "_reg_plot_c" + str(chromosome) + "_s" + str(reg_res.positions[0]) \
				+ "_e" + str(reg_res.positions[-1]) + "_box_plot.png"
			(marker, score, chromosome, pos) = reg_res.get_max_snp()
			marker_accessions = sd.accessions
			phed.plot_marker_box_plot(p_i, marker=marker, marker_accessions=marker_accessions, \
						png_file=png_file, title="c" + str(chromosome) + "_p" + str(pos), \
						marker_score=score, marker_missing_val=sd.missing_val)