コード例 #1
0
		def phi_f(mu, gam):
			"""
			Interpolate but extrapolate using the nearest value
			on the grid.
			"""
			mu = npmax(self.mumin, mu)
			gam = npmax(self.gammin, gam)
			mu = npmin(self.mumax, mu)
			gam = npmin(self.gammax, gam)
			return interp_phi(mu, gam)
コード例 #2
0
ファイル: dz_LineMesurer.py プロジェクト: Delosari/X_Bin
    def Estimate_p0(self, x, y, p_0, Deblend_Check):
        
        #INCLUDING P_0 HERE IS DIRTY BUT WE NEED IT FOR THE BATCH PROCESS LINE MEASURING
        Emission_Line = True
        
        #Check if emission or absorption
        if self.LocalMedian > npmax(y):
            Emission_Line = False
        
#         print ' the line list is', self.List_BlendedLines[1][self.Current_BlendedGroup]
        #Case of a blended line
        if (Deblend_Check != None) and (p_0 == None):
            if Emission_Line:
                
                mu_0_List, A_0_List, Deblend_Check = self.FindMaxima(x, y, MinLevel = self.LocalMedian  + 4 * self.SigmaContinuum, ListLines=self.List_BlendedLines[1][self.Current_BlendedGroup], Deblend_Check=Deblend_Check)
                sigma_i                  = zeros(len(self.List_BlendedLines[0][self.Current_BlendedGroup]))
                             
                #Calculating the sigmas:
                Interest_Points = [self.Selections[2], self.Selections[3]]
#                 for i in range(len(A_0_List) - 1):
# 
#                     Peak_Left, Peak_Right               = where(y == A_0_List[i])[0],  where(y == A_0_List[i + 1])[0]
#                     x_BetweenMaxima, y_BetweenMaxima    = x[Peak_Left : Peak_Right], y[Peak_Left: Peak_Right]
#                     x_m = x_BetweenMaxima[argmin(y_BetweenMaxima)]            
#                     Interest_Points.append(x_m)
# 
#                 Interest_Points.sort()
#                 for i in range(len(sigma_i)):
#                     sigma_i[i] = (Interest_Points[i+1] - Interest_Points[i]) /2 / 2.335            #If the lines are very closed no point in dividing by our /2 factor.

                for i in range(len(sigma_i)):
                    sigma_i[i] = 1            #If the lines are very closed no point in dividing by our /2 factor.                
            p_0 = [A_0_List, mu_0_List, sigma_i]
            
        if (Deblend_Check == None) and (p_0 == None):
            
            self.Kmpfit_Dictionary = [{},{},{'limits':(0,3)}]
            
            if Emission_Line:
                A_0                 = npmax(y)
                mu_0                = x[where(y == A_0)]
                sigma_0             = ((absolute(mu_0 - x[0]) + absolute(x[-1] - mu_0)) / 2.0) / 2.335
            else:
                                    #SHOULD WE CHANGE THIS ZEROLEV FOR THE ONE TRULLY AT A_0?
                A_0                 = 2 * self.LocalMedian - npmin(y)
                mu_0                = x[where(y == npmin(y))]
                sigma_0             = ((absolute(mu_0 - x[0]) + absolute(x[-1] - mu_0)) / 2.0) / 2.335
                
            p_0 = [A_0, mu_0, sigma_0]
        
        return Emission_Line, p_0, Deblend_Check
コード例 #3
0
ファイル: mklaren.py プロジェクト: rahlk/Bellwether
    def gradient(self, X, bisec, A, residual, c_vec, a_vec, ina):
        """
        The function is used in two contexts:
            a) Selection of the candidate pivot based in vector of correlations
                with residual (c_vec) and bisector (a_vec) from ina. This is
                happening within a G for one kernel.
            b) Determination of gradient, given that candidate pivot has already
                been selected (ina is set of length 1). This is happening within
                the global X for all kernels.

        Assume the input are already of correct dimensions.

        :param X:
            Matrix of covariates.
        :param bisec:
            Bisector vector.
        :param c_vec:
            Estimated correlations with the residual.
        :param a_vec:
            Estimated correlations with the bisector.
        :param ina
            Inactive set to index c_vec, a_vec.
        :param single
            Select minimal absolute value, do not discard negatives. Used when
            repairing the regression line when new ("random") column is added.
        :return:
            gamma: gradient
            pivot: pivot index
        """
        assert len(c_vec) == len(ina)
        assert (a_vec is None) or (len(a_vec) == len(c_vec))

        if bisec is None:
            return max(c_vec), ina[argmax(c_vec)]

        xsig    = sign(X.T.dot(residual)).ravel()
        X       = X * xsig
        C       = max(absolute(X.T.dot(residual)))

        # Get minimum over positive components
        # Since X is standardized, this always exists.
        scores = zeros((2, len(ina)))
        scores[0, :] = div(C - c_vec, A - a_vec).ravel()
        scores[1, :] = div(C + c_vec, A + a_vec).ravel()
        scores[where(scores == 0)] = float("inf")
        scores = npmin(scores, axis=0)
        gamma  = npmin(scores)
        pivot  = ina[argmin(scores)]
        assert not isinf(gamma)
        return gamma, pivot
コード例 #4
0
ファイル: bayesian_data.py プロジェクト: Delosari/Dazer
    def plot_chiSq_Behaviour(self, Traces,  labels):
    
        n_traces = len(Traces)
        
        self.FigConf(Figtype = 'Grid', n_colors = n_traces, n_columns=4, n_rows=2, FigHeight=9, FigWidth=16)
        
        chisq_adapted   = reshape(self.pymc_database.trace('ChiSq')[:], len(self.pymc_database.trace('ChiSq')[:])) * -2
        y_lim           = 30
        min_chi_index   = argmin(chisq_adapted)
        
        for i in range(len(Traces)):
            Trace   = Traces[i]
            label   = labels[i]       

            if Trace != 'ChiSq':
                self.Axis1[i].scatter(x = self.pymc_database.trace(Trace)[:], y = chisq_adapted, color=self.ColorVector[2][i])
                x_min           = npmin(self.pymc_database.trace(Trace)[:]) 
                x_max           = npmax(self.pymc_database.trace(Trace)[:])

                self.Axis1[i].axvline(x = self.statistics_dict[Trace]['mean'], label = 'Inference value: ' + round_sig(self.statistics_dict[Trace]['mean'], 4,scien_notation=False), color='grey', linestyle = 'solid')
                self.Axis1[i].scatter(self.pymc_database.trace(Trace)[:][min_chi_index], chisq_adapted[min_chi_index], color='Black', label = r'$\chi^{2}_{min}$ value: ' + round_sig(self.pymc_database.trace(Trace)[:][min_chi_index],4,scien_notation=False))
                
                self.Axis1[i].set_ylabel(r'$\chi^{2}$',fontsize=20)
                self.Axis1[i].set_ylim(0, y_lim)
                self.Axis1[i].set_xlim(x_min, x_max)
                self.Axis1[i].set_title(label,fontsize=20)
                legend_i = self.Axis1[i].legend(loc='best', fontsize='x-large')
                legend_i.get_frame().set_facecolor('white')
コード例 #5
0
ファイル: line.py プロジェクト: 3ptscience/steno3dpy
 def _validate_seg(self):
     if npmin(self.segments) < 0:
         raise ValueError('Segments may only have positive integers')
     if npmax(self.segments) >= len(self.vertices):
         raise ValueError('Segments expects more vertices than provided')
     self._validate_file_size('segments', self.segments)
     self._validate_file_size('vertices', self.vertices)
     return True
コード例 #6
0
ファイル: surface.py プロジェクト: dopplershift/steno3dpy
 def validate(self):
     """Check if mesh content is built correctly"""
     if npmin(self.triangles) < 0:
         raise ValueError('Triangles may only have positive integers')
     if npmax(self.triangles) >= len(self.vertices):
         raise ValueError('Triangles expects more vertices than provided')
     self._validate_file_size('vertices')
     self._validate_file_size('triangles')
     return True
コード例 #7
0
def general_min(*args):
    if rank == 0:
        if len(args)==1:
            if isinstance(args[0],Raster) and args[0].data is not None:
                return npmin(args[0].data)
        else:
            return min(*args)
    else:
        return 0
コード例 #8
0
ファイル: DZ_DataExplorer.py プロジェクト: Delosari/Dazer
 def load_SpectraData(self):
     
     self.Wave, self.Int, self.ExtraData = self.get_spectra_data(self.Current_Folder + self.Current_Spec)
     
     Wmin = npmin(self.Wave)
     Wmax = npmax(self.Wave)
     
     #We erase the stored emision lines list
     del self.EM_in_plot[:]          
                                      
     for i in range(len(self.Wavelengths_Total)):
         if Wmin <= self.Wavelengths_Total[i] <= Wmax:
                 self.EM_in_plot.append(i)
コード例 #9
0
ファイル: DZ_ScreenManager.py プロジェクト: Delosari/X_Bin
    def Load_LineMesurerData(self):
        
        #SHOULD THIS FOLDER BE LOCATED ON THE LINEMESURER... NOT SURE BECAUSE IT SHOULD BE INTEGRATED WITH THE INTERFACE...
        self.Current_LinesLog           = self.Current_Folder + self.Current_Code + '_'+ self.DataType + self.LinesLogExtension_Name

        self.Wave, self.Int, self.ExtraData = self.File2Data(self.Current_Folder, self.Current_Spec)
        Wmin = npmin(self.Wave)
        Wmax = npmax(self.Wave)
        
        #We erase the stored emision lines list
        del self.EM_in_plot[:]          
                                         
        for i in range(len(self.Wavelengths_Total)):
            if Wmin <= self.Wavelengths_Total[i] <= Wmax:
                    self.EM_in_plot.append(i)
                               
        if len(self.Labels_Total) < 3:
            print "WARNING: Very few emission lines found within spectrum"
        
        #Generate emision lines log
        self.CleanTableMaker(self.Current_LinesLog, self.RemakeFiles, self.ColumnHeaderVector, self.ColumnWidth)   
コード例 #10
0
ファイル: box.py プロジェクト: kayghar/SpanishAcquisitionIQC
def get_mask(x,y, tx, ty):
	dx = (tx[-1] - tx[0])/(tx.size -1)
	dy = (ty[-1] - ty[0])/(ty.size -1)

	d2 = dx**2 + dy**2

	xgrid = meshgrid(x,tx)
	ygrid = meshgrid(y,ty)
	
	xdist = (xgrid[0] - xgrid[1])**2
	ydist = (ygrid[0] - ygrid[1])**2

	mask = ones((tx.shape[0], ty.shape[0]))
	
	for i in range (mask.shape[0]):
		for j in range (mask.shape[1]):
			mask[i,j] = npmin(xdist[i] + ydist[j])

	mask = (mask < d2)*1
	mask = where(mask, mask, nan)

	return mask.T
コード例 #11
0
def find_min_max(min_max, iterable=list()):
    """
    Returns the value and index of maximum in the list
    Parameters
    ----------
        min_max: 'string'
            whether to find the 'min' or the 'max'
        iterable: list
            list to obtain maximum value
    Returns
    -------
        Value and index of maximum in the list
    """

    if min_max == 'max':
        value = npmax(iterable)
        index = argmax(iterable)
    elif min_max == 'min':
        value = npmin(iterable)
        index = argmin(iterable)
    else:
        value, index = None, None

    return value, index
コード例 #12
0
ファイル: specangel.py プロジェクト: jmcourt/PANTHEON
tdg, tfg = meshgrid(td, tf)                                               # Making a grid from the time and frequency domains

specopt=''                                                                # Force spectrogram manipulation mode to trigger
speclog=False                                                             # Indicate that the spectrogram is not initially logarithmic
stitle=deftitle                                                           # Give an initial title
rtlabl=defzlabl                                                           # Give an initial key label

tmdbin=four_res                                                           # Initial time binning
frqbin=(tf[-1]-tf[0])/(len(tf)-1)                                         # Initial freq binning
tdgd=tdg                                                                  # Saving default grid [Time Domain Grid- Default]
tfgd=tfg
tdlm=td                                                                   # Saving 1D arrays to re-form grids [time-domain linear, modifiable]
tflm=tf
ogood=good                                                                # Save copy of the 'good' list

fudge=npmin(abs(fourgr[nonzero(fourgr)]))                                 # Obtain smallest nonzero value in array to add on when using logarithm to prevent log(0)

print 'Done!'
print ''


#-----Setting up Spectrogram Environment-------------------------------------------------------------------------------

es=True                                                                   # Start with errors on by default
saveplots=False
show_block=False

def spectrogram(td,tfc,fourgr,zlabel=defzlabl,title=deftitle):            # Defining the creation of the spectrogram plot 's' as a function for clarity later
   pl.close('Spectrogram')                                                # Close any previous spectrograms that may be open
   fg=pl.figure('Spectrogram')
   ax=fg.add_subplot(1,1,1)
コード例 #13
0
ファイル: numHess.py プロジェクト: s0ap/arpmRes
def derivest(fun, x0, deriv=1, vectorized=True, methodorder=4, maxstep=100, rombergterms=2, style='central',
             stepratio=2.0000001):
    # DERIVEST: estimate the n'th derivative of fun at x0, provide an error estimate
    # usage: der,errest = DERIVEST(fun,x0)  # first derivative
    # usage: der,errest = DERIVEST(fun,x0,prop1,val1,prop2,val2,...)
    #
    # Derivest will perform numerical differentiation of an
    # analytical function provided in fun. It will not
    # differentiate a function provided as data. Use gradient
    # for that purpose, or differentiate a spline model.
    #
    # The methods used by DERIVEST are finite difference
    # approximations of various orders, coupled with a generalized
    # (multiple term) Romberg extrapolation. This also yields
    # the error estimate provided. DERIVEST uses a semi-adaptive
    # scheme to provide the best estimate that it can by its
    # automatic choice of a differencing interval.
    #
    #
    #
    # Arguments (input)
    #  fun - function to differentiate. May be an inline function,
    #        anonymous, or an m-file. fun will be sampled at a set
    #        of distinct points for each element of x0. If there are
    #        additional parameters to be passed into fun, then use of
    #        an anonymous function is recommed.
    #
    #        fun should be vectorized to allow evaluation at multiple
    #        locations at once. This will provide the best possible
    #        speed. IF fun is not so vectorized, then you MUST set
    #        vectorized property to no, so that derivest will
    #        then call your function sequentially instead.
    #
    #        Fun is assumed to return a result of the same
    #        shape as its input x0.
    #
    #  x0  - scalar, vector, or array of points at which to
    #        differentiate fun.
    #
    # Additional inputs must be in the form of property/value pairs.
    #  Properties are character strings. They may be shortened
    #  to the extent that they are unambiguous. Properties are
    #  not case sensitive. Valid property names are:
    #
    #  DerivativeOrder, MethodOrder, Style, RombergTerms
    #  FixedStep, MaxStep
    #
    #  All properties have default values, chosen as intelligently
    #  as I could manage. Values that are character strings may
    #  also be unambiguously shortened. The legal values for each
    #  property are:
    #
    #  DerivativeOrder - specifies the derivative order estimated.
    #        Must be a positive integer from the set [1,2,3,4].
    #
    #        DEFAULT: 1 (first derivative of fun)
    #
    #  MethodOrder - specifies the order of the basic method
    #        used for the estimation.
    #
    #        For central methods, must be a positive integer
    #        from the set [2,4].
    #
    #        For forward or backward difference methods,
    #        must be a positive integer from the set [1,2,3,4].
    #
    #        DEFAULT: 4 (a second order method)
    #
    #        Note: higher order methods will generally be more
    #        accurate, but may also suffere more from numerical
    #        problems.
    #
    #        Note: First order methods would usually not be
    #        recommed.
    #
    #  Style - specifies the style of the basic method
    #        used for the estimation. central, forward,
    #        or backwards difference methods are used.
    #
    #        Must be one of Central, forward, backward.
    #
    #        DEFAULT: Central
    #
    #        Note: Central difference methods are usually the
    #        most accurate, but sometiems one must not allow
    #        evaluation in one direction or the other.
    #
    #  RombergTerms - Allows the user to specify the generalized
    #        Romberg extrapolation method used, or turn it off
    #        completely.
    #
    #        Must be a positive integer from the set [0,1,2,3].
    #
    #        DEFAULT: 2 (Two Romberg terms)
    #
    #        Note: 0 disables the Romberg step completely.
    #
    #  FixedStep - Allows the specification of a fixed step
    #        size, preventing the adaptive logic from working.
    #        This will be considerably faster, but not necessarily
    #        as accurate as allowing the adaptive logic to run.
    #
    #        DEFAULT: []
    #
    #        Note: If specified, FixedStep will define the
    #        maximum excursion from x0 that will be used.
    #
    #  Vectorized - Derivest will normally assume that your
    #        function can be safely evaluated at multiple locations
    #        in a single call. This would minimize the overhead of
    #        a loop and additional function call overhead. Some
    #        functions are not easily vectorizable, but you may
    #        (if your matlab release is new enough) be able to use
    #        arrayfun to accomplish the vectorization.
    #
    #        When all else: fails, set the vectorized property
    #        to no. This will cause derivest to loop over the
    #        successive function calls.
    #
    #        DEFAULT: yes
    #
    #
    #  MaxStep - Specifies the maximum excursion from x0 that
    #        will be allowed, as a multiple of x0.
    #
    #        DEFAULT: 100
    #
    #  StepRatio - Derivest uses a proportionally cascaded
    #        series of function evaluations, moving away from your
    #        point of evaluation. The StepRatio is the ratio used
    #        between sequential steps.
    #
    #        DEFAULT: 2.0000001
    #
    #        Note: use of a non-integer stepratio is intentional,
    #        to avoid integer multiples of the period of a periodic
    #        function under some circumstances.
    #
    #
    # Arguments: (output)
    #  der - derivative estimate for each element of x0
    #        der will have the same shape as x0.
    #
    #  errest - 95# uncertainty estimate of the derivative, such that
    #
    #        abs((der[j]) - f.T(x0[j])) < erest[j]
    #
    #  finaldelta - The final overall stepsize chosen by DERIVEST
    #

    par = namedtuple('par',
                     'DerivativeOrder MethodOrder Style RombergTerms FixedStep MaxStep StepRatio NominalStep Vectorized')

    par.DerivativeOrder = deriv
    par.MethodOrder = methodorder
    par.Style = style
    par.RombergTerms = rombergterms
    par.FixedStep = None
    par.MaxStep = maxstep
    # setting a default stepratio as a non-integer prevents
    # integer multiples of the initial point from being used.
    # In turn that avoids some problems for periodic functions.
    par.StepRatio = stepratio
    par.NominalStep = None
    par.Vectorized = vectorized

    par = check_params(par)

    # Was fun a string, or an inline/anonymous function?
    if fun is None:
        raise ValueError('fun was not supplied.')
    elif isinstance(fun, str):
        # a character function name
        fun = eval(fun)

    # no default for x0
    if x0 is None:
        raise ValueError('x0 was not supplied')

    par.NominalStep = max(x0, np.float128(0.02))

    # was a single point supplied?
    nx0 = x0.shape
    n = prod(nx0)

    # Set the steps to use.
    if par.FixedStep is None:
        # Basic sequence of steps, relative to a stepsize of 1.
        delta = par.MaxStep * par.StepRatio ** arange(0, -25 + -1, -1).T
        ndel = len(delta)
    else:
        # Fixed, user supplied absolute sequence of steps.
        ndel = 3 + ceil(par.DerivativeOrder / 2) + par.MethodOrder + par.RombergTerms
        if par.Style[0] == 'c':
            ndel = ndel - 2

        delta = par.FixedStep * par.StepRatio ** (-arange(ndel)).T

    # generate finite differencing rule in advance.
    # The rule is for a nominal unit step size, and will
    # be scaled later to reflect the local step size.
    fdarule = 1
    if par.Style == 'central':
        # for central rules, we will reduce the load by an
        # even or odd transformation as appropriate.
        if par.MethodOrder == 2:
            if par.DerivativeOrder == 1:
                # the odd transformation did all the work
                fdarule = 1
            elif par.DerivativeOrder == 2:
                # the even transformation did all the work
                fdarule = 2
            elif par.DerivativeOrder == 3:
                # the odd transformation did most of the work, but
                # we need to kill off the linear term
                fdarule = array([0, 1]).dot(pinv(fdamat(par.StepRatio, 1, 2)))
            elif par.DerivativeOrder == 4:
                # the even transformation did most of the work, but
                # we need to kill off the quadratic term
                fdarule = array([0, 1]).dot(pinv(fdamat(par.StepRatio, 2, 2)))

        else:
            # a 4th order method. We've already ruled out the 1st
            # order methods since these are central rules.
            if par.DerivativeOrder == 1:
                # the odd transformation did most of the work, but
                # we need to kill off the cubic term
                fdarule = array([[1, 0]]).dot(pinv(fdamat(par.StepRatio, 1, 2)))
            elif par.DerivativeOrder == 2:
                # the even transformation did most of the work, but
                # we need to kill off the quartic term
                fdarule = array([[1, 0]]).dot(pinv(fdamat(par.StepRatio, 2, 2)))
            elif par.DerivativeOrder == 3:
                # the odd transformation did much of the work, but
                # we need to kill off the linear & quintic terms
                fdarule = array([[0, 1, 0]]).dot(pinv(fdamat(par.StepRatio, 1, 3)))
            elif par.DerivativeOrder == 4:
                # the even transformation did much of the work, but
                # we need to kill off the quadratic and 6th order terms
                fdarule = array([[0, 1, 0]]).dot(pinv(fdamat(par.StepRatio, 2, 3)))
    elif par.Style in ['forward', 'backward']:
        # These two cases are identical, except at the very ,
        # where a sign will be introduced.

        # No odd/even trans, but we already dropped
        # off the constant term
        if par.MethodOrder == 1:
            if par.DerivativeOrder == 1:
                # an easy one
                fdarule = 1
            else:
                # [1:]4
                v = zeros((1, par.DerivativeOrder))
                v[par.DerivativeOrder] = 1
                fdarule = v / fdamat(par.StepRatio, 0, par.DerivativeOrder)

        else:
            # par.MethodOrder methods drop off the lower order terms,
            # plus terms directly above DerivativeOrder
            v = zeros((1, par.DerivativeOrder + par.MethodOrder - 1))
            v[par.DerivativeOrder] = 1
            fdarule = v / fdamat(par.StepRatio, 0, par.DerivativeOrder + par.MethodOrder - 1)

        # correct sign for the backward rule
        if par.Style[0] == 'b':
            fdarule = -fdarule

    # switch on par.style (generating fdarule)
    nfda = max(fdarule.shape)

    # will we need fun((x0))?
    if (remainder(par.DerivativeOrder, 2) == 0) or par.Style != 'central':
        if par.Vectorized:
            f_x0 = fun(x0)
        else:
            # not vectorized, so loop
            f_x0 = zeros((x0.shape))
            for j in range(x0.size):
                f_x0[j] = fun(x0[j])
    else:
        f_x0 = None

    # Loop over the elements of x0, reducing it to
    # a scalar problem. Sorry, vectorization is not
    # complete here, but this IS only a single loop.
    der = zeros((nx0))
    errest = der.copy()
    finaldelta = der.copy()
    for i in range(n):
        x0i = x0[i]
        h = par.NominalStep

        # a central, forward or backwards differencing rule?
        # f_del is the set of all the function evaluations we
        # will generate. For a central rule, it will have the
        # even or odd transformation built in.
        if par.Style[0] == 'c':
            # A central rule, so we will need to evaluate
            # symmetrically around x0i.
            if par.Vectorized:
                f_plusdel = fun(x0i + h * delta)
                f_minusdel = fun(x0i - h * delta)
            else:
                # not vectorized, so loop
                f_minusdel = zeros((delta.shape), dtype=np.float128)
                f_plusdel = zeros((delta.shape), dtype=np.float128)
                for j in range(delta.size):
                    f_plusdel[j] = fun(x0i + h * delta[j])
                    f_minusdel[j] = fun(x0i - h * delta[j])

            if par.DerivativeOrder in [1, 3]:
                # odd transformation
                f_del = (f_plusdel - f_minusdel) / 2
            else:
                f_del = (f_plusdel + f_minusdel) / 2 - f_x0[i]

        elif par.Style[0] == 'f':
            # forward rule
            # drop off the constant only
            if par.Vectorized:
                f_del = fun(x0i + h * delta) - f_x0[i]
            else:
                # not vectorized, so loop
                f_del = zeros((delta.shape))
                for j in range(delta.size):
                    f_del[j] = fun(x0i + h * delta[j]) - f_x0[i]
        else:
            # backward rule
            # drop off the constant only
            if par.Vectorized:
                f_del = fun(x0i - h * delta) - f_x0[i]
            else:
                # not vectorized, so loop
                f_del = zeros((delta.shape))
                for j in range(delta.size):
                    f_del[j] = fun(x0i - h * delta[j]) - f_x0[i]

        # check the size of f_del to ensure it was properly vectorized.
        f_del = f_del.flatten()
        if len(f_del) != ndel:
            raise ValueError('fun did not return the correct size result(fun must be vectorized).')

    # Apply the finite difference rule at each delta, scaling
    # as appropriate for delta and the requested DerivativeOrder.
    # First, decide how many of these estimates we will  up with.
    ne = ndel + 1 - nfda - par.RombergTerms

    # Form the initial derivative estimates from the chosen
    # finite difference method.
    der_init = vec2mat(f_del, ne, nfda) @ fdarule.T

    # scale to reflect the local delta
    der_init = der_init.flatten('F') / (h * delta[:ne]) ** par.DerivativeOrder

    # Each approximation that results is an approximation
    # of order par.DerivativeOrder to the desired derivative.
    # Additional (higher order, even or odd) terms in the
    # Taylor series also remain. Use a generalized (multi-term)
    # Romberg extrapolation to improve these estimates.
    if par.Style == 'central':
        rombexpon = 2 * arange(1, par.RombergTerms + 1) + par.MethodOrder - 2
    else:
        rombexpon = arange(1, par.RombergTerms + 1) + par.MethodOrder - 1

    der_romb, errors = rombextrap(par.StepRatio, der_init, rombexpon)

    # Choose which result to return

    # first, trim off the
    if par.FixedStep is None:
        # trim off the estimates at each  of the scale
        nest = len(der_romb)
        if par.DerivativeOrder in [1, 2]:
            trim = r_[1, 2, nest - 1, nest]
        elif par.DerivativeOrder == 3:
            trim = r_[arange(1, 5), nest + arange(-3, 1)]
        elif par.DerivativeOrder == 4:
            trim = r_[arange(1, 7), nest + arange(-5, 1)]

        der_romb, tags = sort(der_romb), np.argsort(der_romb)

        np.delete(der_romb, trim)
        np.delete(tags, trim)
        errors = errors[tags]
        trimdelta = delta[tags]

        errest[i], ind = npmin(errors), np.argmin(errors)

        finaldelta[i] = h * trimdelta[ind]
        der[i] = der_romb[ind]
    else:
        [errest[i], ind] = npmin(errors), np.argmin(errors)
        finaldelta[i] = h * delta[ind]
        der[i] = der_romb(ind)

    return der, errest, finaldelta
コード例 #14
0
    y_Hor_brownian[0,:,i] = norm.pdf(x_Hor[0,:,i], exp_brown[i], sigma_brown[0,i])  # Brownian approximation
    y_Hor_asympt[0,:,i] = norm.pdf(x_Hor[0,:,i], exp_asympt, sigma_asympt)  # Normal asymptoptic approximation

    # figure

    lgrey = [0.8, 0.8, 0.8]  # light grey
    dgrey = [0.4, 0.4, 0.4]  # dark grey
    lblue = [0.27, 0.4, 0.9]  # light blu
    j_sel = 15  # selected MC simulations

    figure()

    # simulated path, mean and standard deviation
    plot(horiz_u[:i], X[0, :j_sel, :i].T, color=lgrey)
    xticks(range(15))
    xlim([npmin(horiz_u) - 0.01, 17])
    ylim([-0.03, 0.06])
    l1 = plot(horiz_u[:i], x[0,-1] + mu_u[0, :i], color='g',label='Expectation')
    l2 = plot(horiz_u[:i], x[0,-1] + mu_u[0, :i] + sigma_u[:i], color='r', label=' + / - st.deviation')
    plot(horiz_u[:i], x[0,-1] + mu_u[0, :i] - sigma_u[:i], color='r')

    # analytical pdf
    option = namedtuple('option', 'n_bins')
    option.n_bins = round(10*log(j_))
    y_hist, x_hist = HistogramFP(X[[0],:,i], pp_.T, option)
    scale = 200*sigma_u[i] / npmax(y_hist)
    y_hist = y_hist*scale
    shift_y_hist = horiz_u[i] + y_hist

    emp_pdf = plt.barh(x_hist[:-1], shift_y_hist[0]-horiz_u[i], height=x_hist[1]-x_hist[0],
                       left=horiz_u[i], facecolor=lgrey, edgecolor= lgrey, lw=2,label='Horizon pdf')
コード例 #15
0
ファイル: lcfit.py プロジェクト: joshuawallace/astrobase
def legendre_fit_magseries(times,
                           mags,
                           errs,
                           period,
                           legendredeg=10,
                           sigclip=30.0,
                           plotfit=False,
                           magsarefluxes=False,
                           verbose=True):
    '''
    Fit an arbitrary-order Legendre series, via least squares, to the
    magnitude/flux time series. This is a series of the form:

        p(x) = c_0*L_0(x) + c_1*L_1(x) + c_2*L_2(x) + ... + c_n*L_n(x)

    where L_i's are Legendre polynomials (also caleld "Legendre functions of
    the first kind") and c_i's are the coefficients being fit.

    Args:

    legendredeg (int): n in the above equation. (I.e., if you give n=5, you
    will get 6 coefficients). This number should be much less than the number
    of data points you are fitting.

    sigclip (float): number of standard deviations away from the mean of the
    magnitude time-series from which to "clip" data points.

    magsarefluxes (bool): sets the ylabel and ylimits of plots for either
    magnitudes (False) or flux units (i.e. normalized to 1, in which case
    magsarefluxes should be set to True).

    Returns:

    returndict:
    {
        'fittype':'legendre',
        'fitinfo':{
            'legendredeg':legendredeg,
            'fitmags':fitmags,
            'fitepoch':magseriesepoch
        },
        'fitchisq':fitchisq,
        'fitredchisq':fitredchisq,
        'fitplotfile':None,
        'magseries':{
            'times':ptimes,
            'phase':phase,
            'mags':pmags,
            'errs':perrs,
            'magsarefluxes':magsarefluxes},
    }

    where `fitmags` is the values of the fit function interpolated onto
    magseries' `phase`.

    This function is mainly just a wrapper to
    numpy.polynomial.legendre.Legendre.fit.

    '''
    stimes, smags, serrs = sigclip_magseries(times,
                                             mags,
                                             errs,
                                             sigclip=sigclip,
                                             magsarefluxes=magsarefluxes)

    # get rid of zero errs
    nzind = npnonzero(serrs)
    stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]

    phase, pmags, perrs, ptimes, mintime = (_get_phased_quantities(
        stimes, smags, serrs, period))

    if verbose:
        LOGINFO('fitting Legendre series with '
                'maximum Legendre polynomial order %s to '
                'mag series with %s observations, '
                'using period %.6f, folded at %.6f' %
                (legendredeg, len(pmags), period, mintime))

    # Least squares fit of Legendre polynomial series to the data. The window
    # and domain (see "Using the Convenience Classes" in the numpy
    # documentation) are handled automatically, scaling the times to a minimal
    # domain in [-1,1], in which Legendre polynomials are a complete basis.

    p = Legendre.fit(phase, pmags, legendredeg)
    coeffs = p.coef
    fitmags = p(phase)

    # Now compute the chisq and red-chisq.

    fitchisq = npsum(((fitmags - pmags) * (fitmags - pmags)) / (perrs * perrs))

    nparams = legendredeg + 1
    fitredchisq = fitchisq / (len(pmags) - nparams - 1)

    if verbose:
        LOGINFO('Legendre fit done. chisq = %.5f, reduced chisq = %.5f' %
                (fitchisq, fitredchisq))

    # figure out the time of light curve minimum (i.e. the fit epoch)
    # this is when the fit mag is maximum (i.e. the faintest)
    # or if magsarefluxes = True, then this is when fit flux is minimum
    if not magsarefluxes:
        fitmagminind = npwhere(fitmags == npmax(fitmags))
    else:
        fitmagminind = npwhere(fitmags == npmin(fitmags))
    magseriesepoch = ptimes[fitmagminind]

    # assemble the returndict
    returndict = {
        'fittype': 'legendre',
        'fitinfo': {
            'legendredeg': legendredeg,
            'fitmags': fitmags,
            'fitepoch': magseriesepoch,
            'finalparams': coeffs,
        },
        'fitchisq': fitchisq,
        'fitredchisq': fitredchisq,
        'fitplotfile': None,
        'magseries': {
            'times': ptimes,
            'phase': phase,
            'mags': pmags,
            'errs': perrs,
            'magsarefluxes': magsarefluxes
        }
    }

    # make the fit plot if required
    if plotfit and isinstance(plotfit, str):

        _make_fit_plot(phase,
                       pmags,
                       perrs,
                       fitmags,
                       period,
                       mintime,
                       magseriesepoch,
                       plotfit,
                       magsarefluxes=magsarefluxes)

        returndict['fitplotfile'] = plotfit

    return returndict
コード例 #16
0
ファイル: S_ProjectionVGSub.py プロジェクト: s0ap/arpmRes
f, ax = subplots(3, 1)

# figure settings
dgrey = [0.5, 0.5, 0.5]
color = {}
color[0] = 'b'
color[1] = [.9, .35, 0]
color[2] = 'm'
color[3] = 'g'
color[4] = 'c'
color[5] = 'y'
t = r_[arange(-s_, 1), t_j[1:]]

plt.sca(ax[0])
m = min([
    npmin(X) * 0.91,
    npmin(pnl[t_ - s_:]) * 0.91, pnl[-1] - 3 * sigma_tau / 2
])
M = max([
    npmax(X) * 1.1,
    npmax(pnl[t_ - s_:]) * 1.1, expectation + 1.2 * sigma_tau
])
plt.axis([-s_, tau, m, M])
xlabel('time (days)')
ylabel('Risk driver')
xticks(arange(-s_, tau + 1))
plt.grid(False)
title('Variance Gamma process (subordinated Brownian motion)')
for j in range(j_):
    plot(t_j, X[j, :], color=color[j], lw=2)
コード例 #17
0
ファイル: tad_cmo.py プロジェクト: 3DGenomes/TADbit
def optimal_cmo(hic1, hic2, num_v=None, max_num_v=None, verbose=False,
                method='frobenius', long_nw=True, long_dist=True):
    """
    Calculates the optimal contact map overlap between 2 matrices

    TODO: make the selection of number of eigen vectors automatic or relying on
          the summed contribution (e.g. select the EVs that sum 80% of the info)

    .. note::

      penalty is defined as the minimum value of the pre-scoring matrix.
    
    :param hic1: first matrix to align
    :param hic2: second matrix to align
    :param None num_v: number of eigen vectors to consider, max is:
        max(min(len(hic1), len(hic2)))
    :param None max_num_v: maximum number of eigen vectors to consider.
    :param score method: distance function to use as alignment score. if 'score'
       distance will be the result of the last value of the Needleman-Wunsch
       algorithm. If 'frobenius' a modification of the Frobenius distance will
       be used

    :returns: two lists, one per aligned matrix, plus a dict summarizing the
        goodness of the alignment with the distance between matrices, their 
        Spearman correlation Rho value and pvalue.
    """

    l_p1 = len(hic1)
    l_p2 = len(hic2)
    num_v = num_v or min(l_p1, l_p2)
    if max_num_v:
        num_v = min(max_num_v, num_v)
    if num_v > l_p1 or num_v > l_p2:
        raise Exception('\nnum_v should be at most %s\n' % (min(l_p1, l_p2)))
    val1, vec1 = eigh(hic1)
    if npsum(vec1).imag:
        raise Exception("ERROR: Hi-C data is not symmetric.\n" +
                        '%s\n\n%s' % (hic1, vec1))
    val2, vec2 = eigh(hic2)
    if npsum(vec2).imag:
        raise Exception("ERROR: Hi-C data is not symmetric.\n" +
                        '%s\n\n%s' % (hic2, vec2))
    #
    val1 = array([sqrt(abs(v)) for v in val1])
    val2 = array([sqrt(abs(v)) for v in val2])
    idx = val1.argsort()[::-1]
    val1 = val1[idx]
    vec1 = vec1[idx]
    idx = val2.argsort()[::-1]
    val2 = val2[idx]
    vec2 = vec2[idx]
    #
    vec1 = array([val1[i] * vec1[:, i] for i in xrange(num_v)]).transpose()
    vec2 = array([val2[i] * vec2[:, i] for i in xrange(num_v)]).transpose()
    nearest = float('inf')
    nw = core_nw_long if long_nw else core_nw
    dister = _get_dist_long if long_dist else _get_dist
    best_alis = []
    for num in xrange(1, num_v + 1):
        for factors in product([1, -1], repeat=num):
            vec1p = factors * vec1[:, :num]
            vec2p = vec2[:, :num]
            p_scores = _prescoring(vec1p, vec2p, l_p1, l_p2)
            penalty = min([npmin(p_scores)] + [-npmax(p_scores)])
            align1, align2, dist = nw(p_scores, penalty, l_p1, l_p2)
            try:
                if method == 'frobenius':
                    dist = dister(align1, align2, hic1, hic2)
                else:
                    dist *= -1
                if dist < nearest:
                    if not penalty:
                        for scr in p_scores:
                            print ' '.join(['%7s' % (round(y, 2)) for y in scr])
                    nearest = dist
                    best_alis = [align1, align2]
                    best_pen = penalty
            except IndexError as e:
                print e
    try:
        align1, align2 = best_alis
    except ValueError:
        pass
    if verbose:
        print '\n Alignment (score = %s):' % (nearest)
        print 'TADS 1: '+'|'.join(['%4s' % (str(int(x)) \
                                            if x!='-' else '-'*3) for x in align1])
        print 'TADS 2: '+'|'.join(['%4s' % (str(int(x)) \
                                            if x!='-' else '-'*3) for x in align2])
    rho, pval = _get_score(align1, align2, hic1, hic2)
    # print best_pen
    if not best_pen:
        print 'WARNING: penalty NULL!!!\n\n'
    return align1, align2, {'dist': nearest, 'rho': rho, 'pval': pval}
コード例 #18
0
 def psi_f(x, y):
     x = npmax(npmin(x, self.thet_max), self.thet_min)
     return psi_interp(x, y)
コード例 #19
0
options.i_c = range(5)
options.l_c = [0, 2, 5]
c2_hom = HomCl(c2_bar, options)[0]
# -

# ## Create figure

# +
c_gray = [0.8, 0.8, 0.8]

gray_mod = c_gray

tick = l_s[:-1] + diff(l_s) / 2
rho2_f = c2_sec - eye(i_)
c_max = npmax(rho2_f)
c_min = npmin(rho2_f)

f, ax = plt.subplots(1, 2)
plt.sca(ax[0])
ytlab = arange(5)
cax = imshow(c2_bar, aspect='equal')
cbar = f.colorbar(cax,
                  ticks=linspace(c_min, c_max, 11),
                  format='%.2f',
                  shrink=0.53)
plt.grid(False)
# colormap gray
xticks(arange(5), arange(1, 6))
yticks(arange(5), arange(1, 6))
title('Starting Correlation')
plt.sca(ax[1])
コード例 #20
0
Theta = zeros((k_, j_))

j = 1
while j < j_:
    Theta_tilde = 2 * rand(k_,
                           1) - 1  # generate the uninformative correlations
    k = 0
    for i in range(i_):  # build the candidate matrix
        for m in range(i + 1, i_):
            C2[i, m, j] = Theta_tilde[k]
            C2[m, i, j] = C2[i, m, j]
            k = k + 1

    lam[:, j], _ = eig(C2[:, :, j])  # compute eigenvalues to check positivity

    if npmin(lam[:, j]) > 0:  # check positivity
        Theta[:, [j]] = Theta_tilde  # store the correlations
        j = j + 1
# -

# ## Create figures

# +
# titles
names = {}
k = 0
for i in range(1, i_ + 1):
    for m in range(i + 1, i_ + 1):
        names[k] = r'$\Theta_{%d,%d}$' % (i, m)
        k = k + 1
コード例 #21
0
def plot_phased_mag_series(times,
                           mags,
                           period,
                           magsarefluxes=False,
                           errs=None,
                           normto='globalmedian',
                           normmingap=4.0,
                           epoch='min',
                           outfile=None,
                           sigclip=30.0,
                           phasewrap=True,
                           phasesort=True,
                           phasebin=None,
                           plotphaselim=[-0.8, 0.8],
                           fitknotfrac=0.01,
                           yrange=None,
                           plotdpi=100):
    '''This plots a phased magnitude time series using the period provided.

    If epoch is None, uses the min(times) as the epoch.

    If epoch is a string 'min', then fits a cubic spline to the phased light
    curve using min(times), finds the magnitude minimum from the fitted light
    curve, then uses the corresponding time value as the epoch.

    If epoch is a float, then uses that directly to phase the light curve and as
    the epoch of the phased mag series plot.

    If outfile is none, then plots to matplotlib interactive window. If outfile
    is a string denoting a filename, uses that to write a png/eps/pdf figure.

    plotdpi sets the DPI for PNG plots.

    '''

    # sigclip the magnitude timeseries
    stimes, smags, serrs = sigclip_magseries(times,
                                             mags,
                                             errs,
                                             magsarefluxes=magsarefluxes,
                                             sigclip=sigclip)

    # check if we need to normalize
    if normto is not False:
        stimes, smags = normalize_magseries(stimes,
                                            smags,
                                            normto=normto,
                                            magsarefluxes=magsarefluxes,
                                            mingap=normmingap)

    # figure out the epoch, if it's None, use the min of the time
    if epoch is None:
        epoch = stimes.min()

    # if the epoch is 'min', then fit a spline to the light curve phased
    # using the min of the time, find the fit mag minimum and use the time for
    # that as the epoch
    elif isinstance(epoch, str) and epoch == 'min':

        try:
            spfit = spline_fit_magseries(stimes,
                                         smags,
                                         serrs,
                                         period,
                                         knotfraction=fitknotfrac)
            epoch = spfit['fitinfo']['fitepoch']
            if len(epoch) != 1:
                epoch = epoch[0]
        except Exception as e:
            LOGEXCEPTION('spline fit failed, using min(times) as epoch')
            epoch = npmin(stimes)

    # now phase (and optionally, phase bin the light curve)
    if errs is not None:

        # phase the magseries
        phasedlc = phase_magseries_with_errs(stimes,
                                             smags,
                                             serrs,
                                             period,
                                             epoch,
                                             wrap=phasewrap,
                                             sort=phasesort)
        plotphase = phasedlc['phase']
        plotmags = phasedlc['mags']
        ploterrs = phasedlc['errs']

        # if we're supposed to bin the phases, do so
        if phasebin:

            binphasedlc = phase_bin_magseries_with_errs(plotphase,
                                                        plotmags,
                                                        ploterrs,
                                                        binsize=phasebin)
            binplotphase = binphasedlc['binnedphases']
            binplotmags = binphasedlc['binnedmags']
            binploterrs = binphasedlc['binnederrs']

    else:

        # phase the magseries
        phasedlc = phase_magseries(stimes,
                                   smags,
                                   period,
                                   epoch,
                                   wrap=phasewrap,
                                   sort=phasesort)
        plotphase = phasedlc['phase']
        plotmags = phasedlc['mags']
        ploterrs = None

        # if we're supposed to bin the phases, do so
        if phasebin:

            binphasedlc = phase_bin_magseries(plotphase,
                                              plotmags,
                                              binsize=phasebin)
            binplotphase = binphasedlc['binnedphases']
            binplotmags = binphasedlc['binnedmags']
            binploterrs = None

    # finally, make the plots

    # initialize the plot
    fig = plt.figure()
    fig.set_size_inches(7.5, 4.8)

    if phasebin:
        plt.errorbar(plotphase,
                     plotmags,
                     fmt='o',
                     color='#B2BEB5',
                     yerr=ploterrs,
                     markersize=3.0,
                     markeredgewidth=0.0,
                     ecolor='#B2BEB5',
                     capsize=0)
        plt.errorbar(binplotphase,
                     binplotmags,
                     fmt='bo',
                     yerr=binploterrs,
                     markersize=5.0,
                     markeredgewidth=0.0,
                     ecolor='#B2BEB5',
                     capsize=0)

    else:
        plt.errorbar(plotphase,
                     plotmags,
                     fmt='ko',
                     yerr=ploterrs,
                     markersize=3.0,
                     markeredgewidth=0.0,
                     ecolor='#B2BEB5',
                     capsize=0)

    # make a grid
    plt.grid(color='#a9a9a9',
             alpha=0.9,
             zorder=0,
             linewidth=1.0,
             linestyle=':')

    # make lines for phase 0.0, 0.5, and -0.5
    plt.axvline(0.0, alpha=0.9, linestyle='dashed', color='g')
    plt.axvline(-0.5, alpha=0.9, linestyle='dashed', color='g')
    plt.axvline(0.5, alpha=0.9, linestyle='dashed', color='g')

    # fix the ticks to use no offsets
    plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
    plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)

    # get the yrange
    if yrange and isinstance(yrange, list) and len(yrange) == 2:
        ymin, ymax = yrange
    else:
        ymin, ymax = plt.ylim()

    # set the y axis labels and range
    if not magsarefluxes:
        plt.ylim(ymax, ymin)
        yaxlabel = 'magnitude'
    else:
        plt.ylim(ymin, ymax)
        yaxlabel = 'flux'

    # set the x axis limit
    if not plotphaselim:
        plot_xlim = plt.xlim()
        plt.xlim((npmin(plotphase) - 0.1, npmax(plotphase) + 0.1))
    else:
        plt.xlim((plotphaselim[0], plotphaselim[1]))

    # set up the axis labels and plot title
    plt.xlabel('phase')
    plt.ylabel(yaxlabel)
    plt.title('period: %.6f d - epoch: %.6f' % (period, epoch))

    LOGINFO('using period: %.6f d and epoch: %.6f' % (period, epoch))

    # make the figure
    if outfile and isinstance(outfile, str):

        if outfile.endswith('.png'):
            plt.savefig(outfile, bbox_inches='tight', dpi=plotdpi)
        else:
            plt.savefig(outfile, bbox_inches='tight')
        plt.close()
        return period, epoch, os.path.abspath(outfile)

    elif dispok:

        plt.show()
        plt.close()
        return period, epoch

    else:

        LOGWARNING('no output file specified and no $DISPLAY set, '
                   'saving to magseries-phased-plot.png in current directory')
        outfile = 'magseries-phased-plot.png'
        plt.savefig(outfile, bbox_inches='tight', dpi=plotdpi)
        plt.close()
        return period, epoch, os.path.abspath(outfile)
コード例 #22
0
ファイル: plotbase.py プロジェクト: waqasbhatti/hats19to21
def plot_phased_mag_series(times,
                           mags,
                           period,
                           errs=None,
                           epoch='min',
                           outfile=None,
                           sigclip=30.0,
                           phasewrap=True,
                           phasesort=True,
                           phasebin=None,
                           plotphaselim=[-0.8,0.8],
                           yrange=None):
    '''This plots a phased magnitude time series using the period provided.

    If epoch is None, uses the min(times) as the epoch.

    If epoch is a string 'min', then fits a cubic spline to the phased light
    curve using min(times), finds the magnitude minimum from the fitted light
    curve, then uses the corresponding time value as the epoch.

    If epoch is a float, then uses that directly to phase the light curve and as
    the epoch of the phased mag series plot.

    If outfile is none, then plots to matplotlib interactive window. If outfile
    is a string denoting a filename, uses that to write a png/eps/pdf figure.

    '''

    if errs is not None:

        # remove nans
        find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
        ftimes, fmags, ferrs = times[find], mags[find], errs[find]

        # get the median and stdev = 1.483 x MAD
        median_mag = npmedian(fmags)
        stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483

        # sigclip next
        if sigclip:

            sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)

            stimes = ftimes[sigind]
            smags = fmags[sigind]
            serrs = ferrs[sigind]

            LOGINFO('sigclip = %s: before = %s observations, '
                    'after = %s observations' %
                    (sigclip, len(times), len(stimes)))

        else:

            stimes = ftimes
            smags = fmags
            serrs = ferrs

    else:

        # remove nans
        find = npisfinite(times) & npisfinite(mags)
        ftimes, fmags, ferrs = times[find], mags[find], None

        # get the median and stdev = 1.483 x MAD
        median_mag = npmedian(fmags)
        stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483

        # sigclip next
        if sigclip:

            sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)

            stimes = ftimes[sigind]
            smags = fmags[sigind]
            serrs = None

            LOGINFO('sigclip = %s: before = %s observations, '
                    'after = %s observations' %
                    (sigclip, len(times), len(stimes)))

        else:

            stimes = ftimes
            smags = fmags
            serrs = None


    # figure out the epoch, if it's None, use the min of the time
    if epoch is None:

        epoch = npmin(stimes)

    # if the epoch is 'min', then fit a spline to the light curve phased
    # using the min of the time, find the fit mag minimum and use the time for
    # that as the epoch
    elif isinstance(epoch,str) and epoch == 'min':

        spfit = spline_fit_magseries(stimes, smags, serrs, period)
        epoch = spfit['fitepoch']


    # now phase (and optionally, phase bin the light curve)
    if errs is not None:

        # phase the magseries
        phasedlc = phase_magseries_with_errs(stimes,
                                             smags,
                                             serrs,
                                             period,
                                             epoch,
                                             wrap=phasewrap,
                                             sort=phasesort)
        plotphase = phasedlc['phase']
        plotmags = phasedlc['mags']
        ploterrs = phasedlc['errs']

        # if we're supposed to bin the phases, do so
        if phasebin:

            binphasedlc = phase_bin_magseries_with_errs(plotphase,
                                                        plotmags,
                                                        ploterrs,
                                                        binsize=phasebin)
            plotphase = binphasedlc['binnedphases']
            plotmags = binphasedlc['binnedmags']
            ploterrs = binphasedlc['binnederrs']

    else:

        # phase the magseries
        phasedlc = phase_magseries(stimes,
                                   smags,
                                   period,
                                   epoch,
                                   wrap=phasewrap,
                                   sort=phasesort)
        plotphase = phasedlc['phase']
        plotmags = phasedlc['mags']
        ploterrs = None

        # if we're supposed to bin the phases, do so
        if phasebin:

            binphasedlc = phase_bin_magseries(plotphase,
                                              plotmags,
                                              binsize=phasebin)
            plotphase = binphasedlc['binnedphases']
            plotmags = binphasedlc['binnedmags']
            ploterrs = None


    # finally, make the plots

    # initialize the plot
    fig = plt.figure()
    fig.set_size_inches(7.5,4.8)

    plt.errorbar(plotphase, plotmags, fmt='bo', yerr=ploterrs,
                 markersize=2.0, markeredgewidth=0.0, ecolor='#B2BEB5',
                 capsize=0)

    # make a grid
    plt.grid(color='#a9a9a9',
             alpha=0.9,
             zorder=0,
             linewidth=1.0,
             linestyle=':')

    # make lines for phase 0.0, 0.5, and -0.5
    plt.axvline(0.0,alpha=0.9,linestyle='dashed',color='g')
    plt.axvline(-0.5,alpha=0.9,linestyle='dashed',color='g')
    plt.axvline(0.5,alpha=0.9,linestyle='dashed',color='g')

    # fix the ticks to use no offsets
    plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
    plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)

    # get the yrange
    if yrange and isinstance(yrange,list) and len(yrange) == 2:
        ymin, ymax = yrange
    else:
        ymin, ymax = plt.ylim()
    plt.ylim(ymax,ymin)

    # set the x axis limit
    if not plotphaselim:
        plot_xlim = plt.xlim()
        plt.xlim((npmin(plotphase)-0.1,
                  npmax(plotphase)+0.1))
    else:
        plt.xlim((plotphaselim[0],plotphaselim[1]))

    # set up the labels
    plt.xlabel('phase')
    plt.ylabel('magnitude')
    plt.title('using period: %.6f d and epoch: %.6f' % (period, epoch))

    # make the figure
    if outfile and isinstance(outfile, str):

        plt.savefig(outfile,bbox_inches='tight')
        plt.close()
        return os.path.abspath(outfile)

    else:

        plt.show()
        plt.close()
        return
コード例 #23
0
ファイル: yarn_cs.py プロジェクト: axelvonderheide/scratch
def read_yarn_structure():

    slice_point_list = []
    slice_radius_list = []
    slice_len_list = []

    #cut_off_start = zeros( ( n_slices, ), dtype='int' )
    #cut_off_start[ 1: ] += 0

    for slice_idx, data_file in zip( slice_range, slice_files ):

        #data_file = join( data_dir, 'V1Schnitt%d.txt' % slice_idx )

        print 'reading data_file'

        points = loadtxt( data_file ,
                          skiprows=1,
                          usecols=( 1, 2, 3 ) )

        y = points[ :, 0]
        z = points[ :, 1]
        x = ones_like( y ) * slice_idx * slice_distance
        r = points[ :, 2]

        slice_point_list.append( c_[ x, y, z ] )
        slice_radius_list.append( r )
        slice_len_list.append( points.shape[0] )

    lens_arr = array( slice_len_list )
    print 'slice lens', lens_arr
    offset_arr = cumsum( lens_arr )
    slice_offset_arr = zeros_like( offset_arr )
    slice_offset_arr[1:] = offset_arr[:-1]
    print 'slice offsets', slice_offset_arr

    data_file = join( data_dir, 'connectivity.txt' )
    filam_connect_arr = loadtxt( data_file )
    filam_connect_arr = filam_connect_arr[ npmin( filam_connect_arr, axis=1 ) != -1. ]
    filam_connect_arr = filam_connect_arr[ npmin( filam_connect_arr, axis=1 ) != 0.0 ]
    print filam_connect_arr.shape

    print filam_connect_arr.shape
    #print slice_offset_arr.shape

    fil_map = array( filam_connect_arr + slice_offset_arr, dtype='int' )
    
    points = vstack( slice_point_list )
    radius = hstack( slice_radius_list )

    print points.shape
    print max( fil_map.flatten() )

    p = points[ fil_map.flatten() ]
    r = radius[ fil_map.flatten() ]

    mlab.plot3d( p[:, 0], p[:, 1], p[:, 2], r,
                 tube_radius=20, tube_sides=20, colormap='Spectral' )#
    
    offset = array( [0, 3, 6] )
    cells = array( [10, 4000, 20, 5005, 20, 4080, 4000, 20, 404 ] )

#    line_type = tvtk.Line().cell_type # VTKLine == 10
#    cell_types = array( [line_type] )
#    # Create the array of cells unambiguously.
#    cell_array = tvtk.CellArray()
#    cell_array.set_cells( 3, cells )

    # Now create the UG.
    ug = tvtk.UnstructuredGrid( points=points )
    # Now just set the cell types and reuse the ug locations and cells.
#    ug.set_cells( cell_types, offset, cell_array )
    ug.point_data.scalars = radius
    ug.point_data.scalars.name = 'radius'
    ug.save()
    return ug
コード例 #24
0
ファイル: yarn_cs.py プロジェクト: axelvonderheide/scratch
    """Given a dirname, returns a list of all slice files."""
    result = []
    group = []
    paths = os.listdir( datadir )  # list of paths in that dir
    for fname in paths:
        match = re.search( r'\w+Schnitt(\d+).txt', fname )
        if match:
            result.append( os.path.abspath( os.path.join( datadir, fname ) ) )
            group.append( int( match.group( 1 ) ) )
    return zip( *sorted( zip( group, result ) ) ) 
    #return sort( result ), group
    
num_slice, slice_files = get_slice_files( data_dir )

n_slices = len( slice_files )
start_slice = npmin( num_slice )
slice_range = range( start_slice, start_slice + n_slices )
slice_distance = 500 # micrometers


def read_yarn_structure():

    slice_point_list = []
    slice_radius_list = []
    slice_len_list = []

    #cut_off_start = zeros( ( n_slices, ), dtype='int' )
    #cut_off_start[ 1: ] += 0

    for slice_idx, data_file in zip( slice_range, slice_files ):
コード例 #25
0
ファイル: S_FlexProbBootstrap.py プロジェクト: s0ap/arpmRes
             1.05 * npmax(prob_bs[q, :]),
             TEXT,
             horizontalalignment='left')

    # scatter colormap and colors
    CM, C = ColorCodedFP(prob_bs[[q], :], 10**-20, npmax(prob_bs[:5, :]),
                         arange(0, 0.95, 0.05), 0, 1, [1, 0])

    # Time series of S&P500 log-rets
    ax = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
    scatter(date_dt, epsi, 15, c=C, marker='.', cmap=CM)
    xlim([min(date_dt), max(date_dt)])
    xticks(date_dt[date_tick])
    plt.gca().xaxis.set_major_formatter(myFmt)
    ax.set_facecolor('white')
    ylim([1.1 * npmin(epsi), 1.1 * npmax(epsi)])
    ylabel('returns')
    title('S&P')

    # HFP histogram
    plt.subplot2grid((3, 3), (1, 2), rowspan=2)
    plt.gca().set_facecolor('white')
    plt.barh(x[q][:-1],
             p[q][0],
             height=x[q][1] - x[q][0],
             facecolor=[0.7, 0.7, 0.7],
             edgecolor=[0.5, 0.5, 0.5])
    xlim([0, 1.05 * npmax(p[q])])
    xticks([])
    yticks([]), ylim([1.1 * npmin(epsi), 1.1 * npmax(epsi)])
    xlabel('probability')
コード例 #26
0
ファイル: hic_data.py プロジェクト: lelou6666/TADbit
    def find_compartments(self, crms=None, savefig=None, savedata=None,
                          show=False, **kwargs):
        """
        Search for A/B copartments in each chromsome of the Hi-C matrix.
        Hi-C matrix is normalized by the number interaction expected at a given
        distance, and by visibility (one iteration of ICE). A correlation matrix
        is then calculated from this normalized matrix, and its first
        eigenvector is used to identify compartments. Changes in sign marking
        boundaries between compartments.
        Result is stored as a dictionary of compartment boundaries, keys being
        chromsome names.
        
        :param 99 perc_zero: to filter bad columns
        :param 0.05 signal_to_noise: to calculate expected interaction counts,
           if not enough reads are observed at a given distance the observations
           of the distance+1 are summed. a signal to noise ratio of < 0.05
           corresponds to > 400 reads.
        :param None crms: only runs these given list of chromosomes
        :param None savefig: path to a directory to store matrices with
           compartment predictions, one image per chromosome, stored under
           'chromosome-name.png'.
        :param False show: show the plot
        :param None savedata: path to a new file to store compartment
           predictions, one file only.
        :param -1 vmin: for the color scale of the plotted map
        :param 1 vmax: for the color scale of the plotted map

        TODO: this is really slow...

        Notes: building the distance matrix using the amount of interactions
               instead of the mean correlation, gives generally worse results.
        
        """
        if not self.bads:
            if kwargs.get('verbose', True):
                print 'Filtering bad columns %d' % 99
            self.filter_columns(perc_zero=kwargs.get('perc_zero', 99),
                                by_mean=False, silent=True)
        if not self.expected:
            if kwargs.get('verbose', True):
                print 'Normalizing by expected values'
            self.expected = expected(self, bads=self.bads, **kwargs)
        if not self.bias:
            if kwargs.get('verbose', True):
                print 'Normalizing by ICE (1 round)'
            self.normalize_hic(iterations=0)
        if savefig:
            mkdir(savefig)

        cmprts = {}
        for sec in self.section_pos:
            if crms and sec not in crms:
                continue
            if kwargs.get('verbose', False):
                print 'Processing chromosome', sec
                warn('Processing chromosome %s' % (sec))
            matrix = [[(float(self[i,j]) / self.expected[abs(j-i)]
                       / self.bias[i] / self.bias[j])
                      for i in xrange(*self.section_pos[sec])
                       if not i in self.bads]
                     for j in xrange(*self.section_pos[sec])
                      if not j in self.bads]
            if not matrix: # MT chromosome will fall there
                warn('Chromosome %s is probably MT :)' % (sec))
                cmprts[sec] = []
                continue
            for i in xrange(len(matrix)):
                for j in xrange(i+1, len(matrix)):
                    matrix[i][j] = matrix[j][i]
            matrix = [list(m) for m in corrcoef(matrix)]
            try:
                # This eighs is very very fast, only ask for one eigvector
                _, evect = eigsh(array(matrix), k=1)
            except LinAlgError:
                warn('Chromosome %s too small to compute PC1' % (sec))
                cmprts[sec] = [] # Y chromosome, or so...
                continue
            first = list(evect[:, -1])
            beg, end = self.section_pos[sec]
            bads = [k - beg for k in self.bads if beg <= k <= end]
            _ = [first.insert(b, 0) for b in bads]
            _ = [matrix.insert(b, [float('nan')] * len(matrix[0]))
                 for b in bads]
            _ = [matrix[i].insert(b, float('nan'))
                 for b in bads for i in xrange(len(first))]
            breaks = [0] + [i for i, (a, b) in
                            enumerate(zip(first[1:], first[:-1]))
                            if a * b < 0] + [len(first)]
            breaks = [{'start': b, 'end': breaks[i+1]}
                      for i, b in enumerate(breaks[: -1])]
            cmprts[sec] = breaks
            
            # calculate compartment internal density
            for k, cmprt in enumerate(cmprts[sec]):
                beg = self.section_pos[sec][0]
                beg1, end1 = cmprt['start'] + beg, cmprt['end'] + beg
                sec_matrix = [(self[i,j] / self.expected[abs(j-i)]
                               / self.bias[i] / self.bias[j])
                              for i in xrange(beg1, end1) if not i in self.bads
                              for j in xrange(i, end1) if not j in self.bads]
                try:
                    cmprt['dens'] = sum(sec_matrix) / len(sec_matrix)
                except ZeroDivisionError:
                    cmprt['dens'] = 0.
            try:
                meanh = sum([cmprt['dens'] for cmprt in cmprts[sec]]) / len(cmprts[sec])
            except ZeroDivisionError:
                meanh = 1.
            for cmprt in cmprts[sec]:
                try:
                    cmprt['dens'] /= meanh
                except ZeroDivisionError:
                    cmprt['dens'] = 1.
            gammas = {}
            for gamma in range(101):
                gammas[gamma] = _find_ab_compartments(float(gamma)/100, matrix,
                                                      breaks, cmprts[sec],
                                                      save=False)
                # print gamma, gammas[gamma]
            gamma = min(gammas.keys(), key=lambda k: gammas[k][0])
            _ = _find_ab_compartments(float(gamma)/100, matrix, breaks,
                                      cmprts[sec], save=True)
            if savefig or show:
                vmin = kwargs.get('vmin', -1)
                vmax = kwargs.get('vmax',  1)
                if vmin == 'auto' == vmax:
                    vmax = max([abs(npmin(matrix)), abs(npmax(matrix))])
                    vmin = -vmax
                plot_compartments(sec, first, cmprts, matrix, show,
                                  savefig + '/chr' + sec + '.pdf',
                                  vmin=vmin, vmax=vmax)
                plot_compartments_summary(sec, cmprts, show,
                                          savefig + '/chr' + sec + '_summ.pdf')
            
        self.compartments = cmprts
        if savedata:
            self.write_compartments(savedata)
コード例 #27
0
ファイル: S_FitShiftedLognormal.py プロジェクト: s0ap/arpmRes
date_tick = arange(200 - 1, t_, 820)
# -

# ## Generate the figure

# +
f = figure()
# HFP histogram with MMFP pdf superimposed
h1 = plt.subplot(3, 1, 1)
b = bar(x[:-1],
        p[0],
        width=x[1] - x[0],
        facecolor=[.8, .8, .8],
        edgecolor=[.6, .6, .6])
bb = plot(xx, sln, lw=2)
xlim([npmin(xx), npmax(xx)])
ylim([0, max(npmax(p), npmax(sln))])
yticks([])
P1 = 'Fitted shift.logn.( $\mu$=%3.1f,$\sigma^2$=%3.1f,c=%3.2f)' % (
    real(mu), real(sig2), real(c))
l = legend([P1, 'HFP distr.'])
# Scatter plot of the pnl with color-coded observations (according to the FP)
[CM, C] = ColorCodedFP(flex_probs, npmin(flex_probs), npmax(flex_probs),
                       arange(0, 0.71, 0.01), 0, 18, [18, 0])
h3 = plt.subplot(3, 1, 2)

scatter(date_dt, pnl, 5, c=C, marker='.', cmap=CM)
xlim([min(date_dt), max(date_dt)])
xticks(date_dt[date_tick])
h3.xaxis.set_major_formatter(myFmt)
ylim([min(pnl), max(pnl)])
コード例 #28
0
ファイル: eval_pnd.py プロジェクト: OscarDeGar/py_grama
def floor_sig(sig, sig_min=1e-8):
    r"""Floor an array of variances
    """
    sig_floor = npmin([norm(sig), sig_min])
    return list(map(lambda s: npmax([s, sig_floor]), sig))
コード例 #29
0
ファイル: timeseries.py プロジェクト: stefco/gwpy
    def gen_plot(self, args):
        """Generate the plot from time series and arguments
        """
        self.max_size = 16384. * 6400.  # that works on my mac
        self.yscale_factor = 1.0

        from gwpy.plotter.tex import label_to_latex
        from numpy import min as npmin
        from numpy import max as npmax

        if self.timeseries[0].size <= self.max_size:
            self.plot = self.timeseries[0].plot()
        else:
            self.plot = self.timeseries[0].plot(linestyle='None', marker='.')
        self.ymin = self.timeseries[0].min().value
        self.ymax = self.timeseries[0].max().value
        self.xmin = self.timeseries[0].times.value.min()
        self.xmax = self.timeseries[0].times.value.max()

        if len(self.timeseries) > 1:
            for idx in range(1, len(self.timeseries)):
                chname = self.timeseries[idx].channel.name
                lbl = label_to_latex(chname)
                if self.timeseries[idx].size <= self.max_size:
                    self.plot.add_timeseries(self.timeseries[idx], label=lbl)
                else:
                    self.plot.add_timeseries(self.timeseries[idx], label=lbl,
                                             linestyle='None', marker='.')
                self.ymin = min(self.ymin, self.timeseries[idx].min().value)
                self.ymax = max(self.ymax, self.timeseries[idx].max().value)
                self.xmin = min(self.xmin,
                                self.timeseries[idx].times.value.min())
                self.xmax = max(self.xmax,
                                self.timeseries[idx].times.value.max())
        # if they chose to set the range of the x-axis find the range of y
        strt = self.xmin
        stop = self.xmax
        # a bit weird but global ymax will be >= any value in
        # the range same for ymin
        new_ymin = self.ymax
        new_ymax = self.ymin

        if args.xmin:
            strt = float(args.xmin)
        if args.xmax:
            stop = float(args.xmax)
        if strt != self.xmin or stop != self.xmax:
            for idx in range(0, len(self.timeseries)):
                x0 = self.timeseries[idx].x0.value
                dt = self.timeseries[idx].dt.value
                if strt < 1e8:
                    strt += x0
                if stop < 1e8:
                    stop += x0
                b = int(max(0, (strt - x0) / dt))

                e = int(min(self.xmax, (stop - x0) / dt))

                if e >= self.timeseries[idx].size:
                    e = self.timeseries[idx].size - 1
                new_ymin = min(new_ymin,
                               npmin(self.timeseries[idx].value[b:e]))
                new_ymax = max(new_ymax,
                               npmax(self.timeseries[idx].value[b:e]))
            self.ymin = new_ymin
            self.ymax = new_ymax
        if self.yscale_factor > 1:
            self.log(2, ('Scaling y-limits, original: %f, %f)' %
                         (self.ymin, self.ymax)))
            yrange = self.ymax - self.ymin
            mid = (self.ymax + self.ymin) / 2.
            self.ymax = mid + yrange / (2 * self.yscale_factor)
            self.ymin = mid - yrange / (2 * self.yscale_factor)
            self.log(2, ('Scaling y-limits, new: %f, %f)' %
                         (self.ymin, self.ymax)))
コード例 #30
0
ファイル: plotbase.py プロジェクト: waqasbhatti/hats19to21
def plot_mag_series(times,
                    mags,
                    errs=None,
                    outfile=None,
                    sigclip=30.0,
                    timebin=None,
                    yrange=None):
    '''This plots a magnitude time series.

    If outfile is none, then plots to matplotlib interactive window. If outfile
    is a string denoting a filename, uses that to write a png/eps/pdf figure.

    timebin is either a float indicating binsize in seconds, or None indicating
    no time-binning is required.

    '''

    if errs is not None:

        # remove nans
        find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
        ftimes, fmags, ferrs = times[find], mags[find], errs[find]

        # get the median and stdev = 1.483 x MAD
        median_mag = npmedian(fmags)
        stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483

        # sigclip next
        if sigclip:

            sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)

            stimes = ftimes[sigind]
            smags = fmags[sigind]
            serrs = ferrs[sigind]

            LOGINFO('sigclip = %s: before = %s observations, '
                    'after = %s observations' %
                    (sigclip, len(times), len(stimes)))

        else:

            stimes = ftimes
            smags = fmags
            serrs = ferrs

    else:

        # remove nans
        find = npisfinite(times) & npisfinite(mags)
        ftimes, fmags, ferrs = times[find], mags[find], None

        # get the median and stdev = 1.483 x MAD
        median_mag = npmedian(fmags)
        stddev_mag = (npmedian(npabs(fmags - median_mag))) * 1.483

        # sigclip next
        if sigclip:

            sigind = (npabs(fmags - median_mag)) < (sigclip * stddev_mag)

            stimes = ftimes[sigind]
            smags = fmags[sigind]
            serrs = None

            LOGINFO('sigclip = %s: before = %s observations, '
                    'after = %s observations' %
                    (sigclip, len(times), len(stimes)))

        else:

            stimes = ftimes
            smags = fmags
            serrs = None

    # now we proceed to binning
    if timebin and errs is not None:

        binned = time_bin_magseries_with_errs(stimes, smags, serrs,
                                              binsize=timebin)
        btimes, bmags, berrs = (binned['binnedtimes'],
                                binned['binnedmags'],
                                binned['binnederrs'])

    elif timebin and errs is None:

        binned = time_bin_magseries(stimes, smags,
                                    binsize=timebin)
        btimes, bmags, berrs = binned['binnedtimes'], binned['binnedmags'], None

    else:

        btimes, bmags, berrs = stimes, smags, serrs


    # finally, proceed with plotting
    fig = plt.figure()
    fig.set_size_inches(7.5,4.8)

    plt.errorbar(btimes, bmags, fmt='go', yerr=berrs,
                 markersize=2.0, markeredgewidth=0.0, ecolor='grey',
                 capsize=0)

    # make a grid
    plt.grid(color='#a9a9a9',
             alpha=0.9,
             zorder=0,
             linewidth=1.0,
             linestyle=':')

    # fix the ticks to use no offsets
    plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
    plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)

    # get the yrange
    if yrange and isinstance(yrange,list) and len(yrange) == 2:
        ymin, ymax = yrange
    else:
        ymin, ymax = plt.ylim()
    plt.ylim(ymax,ymin)

    plt.xlim(npmin(btimes) - 0.001*npmin(btimes),
             npmax(btimes) + 0.001*npmin(btimes))

    plt.xlabel('time [JD]')
    plt.ylabel('magnitude')

    if outfile and isinstance(outfile, str):

        plt.savefig(outfile,bbox_inches='tight')
        plt.close()
        return os.path.abspath(outfile)

    else:

        plt.show()
        plt.close()
        return
コード例 #31
0
def plot_mag_series(times,
                    mags,
                    magsarefluxes=False,
                    errs=None,
                    outfile=None,
                    sigclip=30.0,
                    normto='globalmedian',
                    normmingap=4.0,
                    timebin=None,
                    yrange=None,
                    segmentmingap=100.0,
                    plotdpi=100):
    '''This plots a magnitude time series.

    If magsarefluxes = False, then this function reverses the y-axis as is
    customary for magnitudes. If magsarefluxes = True, then this isn't done.

    If outfile is none, then plots to matplotlib interactive window. If outfile
    is a string denoting a filename, uses that to write a png/eps/pdf figure.

    timebin is either a float indicating binsize in seconds, or None indicating
    no time-binning is required.

    sigclip is either a single float or a list of two floats. in the first case,
    the sigclip is applied symmetrically. in the second case, the first sigclip
    in the list is applied to +ve magnitude deviations (fainter) and the second
    sigclip in the list is appleid to -ve magnitude deviations (brighter).

    normto is either 'globalmedian', 'zero' or a float to normalize the mags
    to. If it's False, no normalization will be done on the magnitude time
    series. normmingap controls the minimum gap required to find possible
    groupings in the light curve that may belong to a different instrument (so
    may be displaced vertically)

    segmentmingap controls the minimum length of time (in days) required to
    consider a timegroup in the light curve as a separate segment. This is
    useful when the light curve consists of measurements taken over several
    seasons, so there's lots of dead space in the plot that can be cut out to
    zoom in on the interesting stuff. If segmentmingap is not None, the
    magseries plot will be cut in this way.

    plotdpi sets the DPI for PNG plots (default = 100).

    '''

    # sigclip the magnitude timeseries
    stimes, smags, serrs = sigclip_magseries(times,
                                             mags,
                                             errs,
                                             magsarefluxes=magsarefluxes,
                                             sigclip=sigclip)

    # now we proceed to binning
    if timebin and errs is not None:

        binned = time_bin_magseries_with_errs(stimes,
                                              smags,
                                              serrs,
                                              binsize=timebin)
        btimes, bmags, berrs = (binned['binnedtimes'], binned['binnedmags'],
                                binned['binnederrs'])

    elif timebin and errs is None:

        binned = time_bin_magseries(stimes, smags, binsize=timebin)
        btimes, bmags, berrs = binned['binnedtimes'], binned[
            'binnedmags'], None

    else:

        btimes, bmags, berrs = stimes, smags, serrs

    # check if we need to normalize
    if normto is not False:
        btimes, bmags = normalize_magseries(btimes,
                                            bmags,
                                            normto=normto,
                                            magsarefluxes=magsarefluxes,
                                            mingap=normmingap)

    btimeorigin = btimes.min()
    btimes = btimes - btimeorigin

    ##################################
    ## FINALLY PLOT THE LIGHT CURVE ##
    ##################################

    # if we're going to plot with segment gaps highlighted, then find the gaps
    if segmentmingap is not None:
        ntimegroups, timegroups = find_lc_timegroups(btimes,
                                                     mingap=segmentmingap)

    # get the yrange for all the plots if it's given
    if yrange and isinstance(yrange, list) and len(yrange) == 2:
        ymin, ymax = yrange

    # if it's not given, figure it out
    else:

        # the plot y limits are just 0.05 mags on each side if mags are used
        if not magsarefluxes:
            ymin, ymax = (bmags.min() - 0.05, bmags.max() + 0.05)
        # if we're dealing with fluxes, limits are 2% of the flux range per side
        else:
            ycov = bmags.max() - bmags.min()
            ymin = bmags.min() - 0.02 * ycov
            ymax = bmags.max() + 0.02 * ycov

    # if we're supposed to make the plot segment-aware (i.e. gaps longer than
    # segmentmingap will be cut out)
    if segmentmingap and ntimegroups > 1:

        LOGINFO('%s time groups found' % ntimegroups)

        # our figure is now a multiple axis plot
        # the aspect ratio is a bit wider
        fig, axes = plt.subplots(1, ntimegroups, sharey=True)
        fig.set_size_inches(10, 4.8)
        axes = np.ravel(axes)

        # now go through each axis and make the plots for each timegroup
        for timegroup, ax, axind in zip(timegroups, axes, range(len(axes))):

            tgtimes = btimes[timegroup]
            tgmags = bmags[timegroup]

            if berrs:
                tgerrs = berrs[timegroup]
            else:
                tgerrs = None

            LOGINFO('axes: %s, timegroup %s: JD %.3f to %.3f' %
                    (axind, axind + 1, btimeorigin + tgtimes.min(),
                     btimeorigin + tgtimes.max()))

            ax.errorbar(tgtimes,
                        tgmags,
                        fmt='go',
                        yerr=tgerrs,
                        markersize=2.0,
                        markeredgewidth=0.0,
                        ecolor='grey',
                        capsize=0)

            # don't use offsets on any xaxis
            ax.get_xaxis().get_major_formatter().set_useOffset(False)

            # fix the ticks to use no yoffsets and remove right spines for first
            # axes instance
            if axind == 0:
                ax.get_yaxis().get_major_formatter().set_useOffset(False)
                ax.spines['right'].set_visible(False)
                ax.yaxis.tick_left()
            # remove the right and left spines for the other axes instances
            elif 0 < axind < (len(axes) - 1):
                ax.spines['right'].set_visible(False)
                ax.spines['left'].set_visible(False)
                ax.tick_params(right='off',
                               labelright='off',
                               left='off',
                               labelleft='off')
            # make the left spines invisible for the last axes instance
            elif axind == (len(axes) - 1):
                ax.spines['left'].set_visible(False)
                ax.spines['right'].set_visible(True)
                ax.yaxis.tick_right()

            # set the yaxis limits
            if not magsarefluxes:
                ax.set_ylim(ymax, ymin)
            else:
                ax.set_ylim(ymin, ymax)

            # now figure out the xaxis ticklabels and ranges
            tgrange = tgtimes.max() - tgtimes.min()

            if tgrange < 10.0:
                ticklocations = [tgrange / 2.0]
                ax.set_xlim(npmin(tgtimes) - 0.5, npmax(tgtimes) + 0.5)
            elif 10.0 < tgrange < 30.0:
                ticklocations = np.linspace(tgtimes.min() + 5.0,
                                            tgtimes.max() - 5.0,
                                            num=2)
                ax.set_xlim(npmin(tgtimes) - 2.0, npmax(tgtimes) + 2.0)

            elif 30.0 < tgrange < 100.0:
                ticklocations = np.linspace(tgtimes.min() + 10.0,
                                            tgtimes.max() - 10.0,
                                            num=3)
                ax.set_xlim(npmin(tgtimes) - 2.5, npmax(tgtimes) + 2.5)
            else:
                ticklocations = np.linspace(tgtimes.min() + 20.0,
                                            tgtimes.max() - 20.0,
                                            num=3)
                ax.set_xlim(npmin(tgtimes) - 3.0, npmax(tgtimes) + 3.0)

            ax.xaxis.set_ticks([int(x) for x in ticklocations])

        # done with plotting all the sub axes

        # make the distance between sub plots smaller
        plt.subplots_adjust(wspace=0.07)

        # make the overall x and y labels
        fig.text(0.5,
                 0.00,
                 'JD - %.3f (not showing gaps)' % btimeorigin,
                 ha='center')
        if not magsarefluxes:
            fig.text(0.02, 0.5, 'magnitude', va='center', rotation='vertical')
        else:
            fig.text(0.02, 0.5, 'flux', va='center', rotation='vertical')

    # make normal figure otherwise
    else:

        fig = plt.figure()
        fig.set_size_inches(7.5, 4.8)

        plt.errorbar(btimes,
                     bmags,
                     fmt='go',
                     yerr=berrs,
                     markersize=2.0,
                     markeredgewidth=0.0,
                     ecolor='grey',
                     capsize=0)

        # make a grid
        plt.grid(color='#a9a9a9',
                 alpha=0.9,
                 zorder=0,
                 linewidth=1.0,
                 linestyle=':')

        # fix the ticks to use no offsets
        plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
        plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)

        plt.xlabel('JD - %.3f' % btimeorigin)

        # set the yaxis limits and labels
        if not magsarefluxes:
            plt.ylim(ymax, ymin)
            plt.ylabel('magnitude')
        else:
            plt.ylim(ymin, ymax)
            plt.ylabel('flux')

    # write the plot out to a file if requested
    if outfile and isinstance(outfile, str):

        if outfile.endswith('.png'):
            plt.savefig(outfile, bbox_inches='tight', dpi=plotdpi)
        else:
            plt.savefig(outfile, bbox_inches='tight')
        plt.close()
        return os.path.abspath(outfile)

    elif dispok:

        plt.show()
        plt.close()
        return

    else:

        LOGWARNING('no output file specified and no $DISPLAY set, '
                   'saving to magseries-plot.png in current directory')
        outfile = 'magseries-plot.png'
        plt.savefig(outfile, bbox_inches='tight', dpi=plotdpi)
        plt.close()
        return os.path.abspath(outfile)
コード例 #32
0
# -

# ## Generate a figure showing the plot of the cumulative volume and the cumulative sign

# +
# color settings
orange = [.9, .3, .0]
blue = [0, 0, .8]

t_ms_dt = array([date_mtop(i) for i in t_ms])
xtick = linspace(1999, len(t_ms_dt) - 1, 8, dtype=int)
myFmt = mdates.DateFormatter('%H:%M:%S')

# axes settings
ymax_2 = npmax(q_line) + 5
ymin_2 = npmin(q_line[0, q_line[0] > 0])
ytick_2 = linspace(ymin_2, ymax_2, 5)
ymax_3 = npmax(sgn_line) + 1
ymin_3 = npmin(sgn_line) - 1
ytick_3 = linspace(ymin_3, ymax_3, 5)

f, ax = subplots(1, 1)
plt.sca(ax)
ax.xaxis.set_major_formatter(myFmt)
ylabel('Cumulative volume', color=orange)
ylim([ymin_2, ymax_2])
idx = q[0] > 0
plt.scatter(t_ms_dt[idx], q[0, idx], color=orange, marker='.', s=2)
plot(t_ms_dt, q_line[0], color=orange, lw=1)
ax2 = ax.twinx()
ylim([ymin_3, ymax_3])
コード例 #33
0
ファイル: tad_cmo.py プロジェクト: gui11aume/tadbit-1
def optimal_cmo(hic1,
                hic2,
                num_v=None,
                max_num_v=None,
                verbose=False,
                method='frobenius',
                long_nw=True,
                long_dist=True):
    """

    Note: penalty is defined as the minimum value of the pre-scoring matrix.
    
    :param hic1: first matrix to align
    :param hic2: second matrix to align
    :param None num_v: number of eigen vectors to consider, max is:
        max(min(len(hic1), len(hic2)))
    :param None max_num_v: maximum number of eigen vectors to consider.
    :param score method: distance function to use as alignment score. if 'score'
       distance will be the result of the last value of the Needleman-Wunsch
       algorithm. If 'frobenius' a modification of the Frobenius distance will
       be used

    :returns: two lists, one per aligned matrix, plus a dict summarizing the
        goodness of the alignment with the distance between matrices, their 
        Spearman correlation Rho value and pvalue.
    """

    l_p1 = len(hic1)
    l_p2 = len(hic2)
    num_v = num_v or min(l_p1, l_p2)
    if max_num_v:
        num_v = min(max_num_v, num_v)
    if num_v > l_p1 or num_v > l_p2:
        raise Exception('\nnum_v should be at most %s\n' % (min(l_p1, l_p2)))
    val1, vec1 = eigh(hic1)
    if npsum(vec1).imag:
        raise Exception("ERROR: Hi-C data is not symmetric.\n" + '%s\n\n%s' %
                        (hic1, vec1))
    val2, vec2 = eigh(hic2)
    if npsum(vec2).imag:
        raise Exception("ERROR: Hi-C data is not symmetric.\n" + '%s\n\n%s' %
                        (hic2, vec2))
    #
    val1 = array([sqrt(abs(v)) for v in val1])
    val2 = array([sqrt(abs(v)) for v in val2])
    idx = val1.argsort()[::-1]
    val1 = val1[idx]
    vec1 = vec1[idx]
    idx = val2.argsort()[::-1]
    val2 = val2[idx]
    vec2 = vec2[idx]
    #
    vec1 = array([val1[i] * vec1[:, i] for i in xrange(num_v)]).transpose()
    vec2 = array([val2[i] * vec2[:, i] for i in xrange(num_v)]).transpose()
    nearest = float('inf')
    nw = core_nw_long if long_nw else core_nw
    dister = get_dist_long if long_dist else get_dist
    best_alis = []
    for num in xrange(1, num_v + 1):
        for factors in product([1, -1], repeat=num):
            vec1p = factors * vec1[:, :num]
            vec2p = vec2[:, :num]
            p_scores = prescoring(vec1p, vec2p, l_p1, l_p2)
            penalty = min([npmin(p_scores)] + [-npmax(p_scores)])
            align1, align2, dist = nw(p_scores, penalty, l_p1, l_p2)
            try:
                if method == 'frobenius':
                    dist = dister(align1, align2, hic1, hic2)
                else:
                    dist = -dist
                if dist < nearest:
                    if not penalty:
                        for scr in p_scores:
                            print ' '.join(
                                ['%7s' % (round(y, 2)) for y in scr])
                    nearest = dist
                    best_alis = [align1, align2]
                    best_pen = penalty
            except IndexError as e:
                print e
                pass
    try:
        align1, align2 = best_alis
    except ValueError:
        pass
    if verbose:
        print '\n Alignment (score = %s):' % (nearest)
        print 'TADS 1: '+'|'.join(['%4s' % (str(int(x)) \
                                            if x!='-' else '-'*3) for x in align1])
        print 'TADS 2: '+'|'.join(['%4s' % (str(int(x)) \
                                            if x!='-' else '-'*3) for x in align2])
    rho, pval = get_score(align1, align2, hic1, hic2)
    # print best_pen
    if not best_pen:
        print 'WARNING: penalty NULL!!!\n\n'
    return align1, align2, {'dist': nearest, 'rho': rho, 'pval': pval}
コード例 #34
0
ファイル: ffgn.py プロジェクト: s0ap/arpmRes
def ffgn(H, n, N):
    # Written jointly by Yingchun Zhou (Jasmine), [email protected]
    # and Stilian Stoev, [email protected], September, 2005.
    # abridged by A. Meucci: all credit to Yingchun and Stilian
    #
    # Generates exact paths of Fractional Gaussian Noise by using
    # circulant embedding (for 1/2<H<1) and Lowen's method (for 0<H<1/2).
    #
    # Input:
    #   H     <- Hurst exponent
    #   n     <- number of independent paths
    #   N     <- the length of the time series

    if 0.5 < H < 1:
        # Use the "circulant ebedding" technique.  This method works only in the case when 1/2 < H < 1.

        # First step: specify the covariance
        c_1 = abs(pow(arange(-1, N), (2 * H)))
        c_1[0] = 1
        c = 1 / 2 * (arange(1, N + 2)**(2 * H) - 2 *
                     (arange(N + 1)**(2 * H)) + c_1)
        v = r_[c[:N], c[N:0:-1]]

        # Second step: calculate Fourier transform of c
        g = real(fft(v))

        if npmin(g) < 0:
            raise ValueError('Some of the g[k] are negative!')
        g = abs(g).reshape(1, -1)
        z = zeros((n, N + 1), dtype=np.complex128)
        y = zeros((n, N + 1), dtype=np.complex128)
        # Third step: generate {z[0],...,z( 2*N)}
        z[:, [0]] = sqrt(2) * randn(n, 1)
        y[:, 0] = z[:, 0]
        z[:, [N]] = sqrt(2) * randn(n, 1)
        y[:, N] = z[:, N].copy()
        a = randn(n, N - 1)
        b = randn(n, N - 1)
        z1 = a + b * 1j
        z[:, 1:N] = z1
        y1 = z1
        y[:, 1:N] = y1
        y = r_['-1', y, conj(y[:, N - 1:0:-1])]
        y = y * (ones((n, 1)) @ sqrt(g))

        # Fourth step: calculate the stationary process f
        f = real(fft(y)) / sqrt(4 * N)
        f = f[:, :N]
    elif H == 0.5:
        f = randn((n, N))
    elif (H < 0.5) & (H > 0):
        # Use Lowen's method for this case.  Copied from the code "fftfgn.m"

        G1 = randn((n, N - 1))
        G2 = randn((n, N - 1))
        G = (G1 + sqrt(-1) @ G2) / sqrt(2)
        GN = randn((n, 1))
        G0 = zeros((n, 1))
        H2 = 2 * H
        R = (1 - (arange(1, N) / N)**H2)
        R = r_[1, R, 0, R, arange(N - 1, 1 + -1, -1)]
        S = ones((n, 1)) @ (abs(fft(R, 2 * N))**.5)
        X = r_[zeros((n, 1)), G, GN, conj(G[:, range(N - 1, 0, -1)])] * S
        x = ifft(X.T, 2 * N).T
        y = sqrt(N) @ real((x[:, :N] - x[:, 0] @ ones((1, N))))
        f = N**H @ [y[:, 0], diff(y.T).T]
    else:
        raise ValueError(
            'The value of the Hurst parameter H must be in (0,1) and was %.f' %
            H)

    return f
コード例 #35
0
    def gen_plot(self, args):
        """Generate the plot from time series and arguments"""
        self.max_size = 16384. * 6400.  # that works on my mac
        self.yscale_factor = 1.0

        from gwpy.plotter.tex import label_to_latex
        from numpy import min as npmin
        from numpy import max as npmax

        if self.timeseries[0].size <= self.max_size:
            self.plot = self.timeseries[0].plot()
        else:
            self.plot = self.timeseries[0].plot(linestyle='None', marker='.')
        self.ymin = self.timeseries[0].min().value
        self.ymax = self.timeseries[0].max().value
        self.xmin = self.timeseries[0].times.value.min()
        self.xmax = self.timeseries[0].times.value.max()

        if len(self.timeseries) > 1:
            for idx in range(1, len(self.timeseries)):
                chname = self.timeseries[idx].channel.name
                lbl = label_to_latex(chname)
                if self.timeseries[idx].size <= self.max_size:
                    self.plot.add_timeseries(self.timeseries[idx], label=lbl)
                else:
                    self.plot.add_timeseries(self.timeseries[idx],
                                             label=lbl,
                                             linestyle='None',
                                             marker='.')
                self.ymin = min(self.ymin, self.timeseries[idx].min().value)
                self.ymax = max(self.ymax, self.timeseries[idx].max().value)
                self.xmin = min(self.xmin,
                                self.timeseries[idx].times.value.min())
                self.xmax = max(self.xmax,
                                self.timeseries[idx].times.value.max())
        # if they chose to set the range of the x-axis find the range of y
        strt = self.xmin
        stop = self.xmax
        # a bit weird but global ymax will be >= any value in
        # the range same for ymin
        new_ymin = self.ymax
        new_ymax = self.ymin

        if args.xmin:
            strt = float(args.xmin)
        if args.xmax:
            stop = float(args.xmax)
        if strt != self.xmin or stop != self.xmax:
            for idx in range(0, len(self.timeseries)):
                x0 = self.timeseries[idx].x0.value
                dt = self.timeseries[idx].dt.value
                if strt < 1e8:
                    strt += x0
                if stop < 1e8:
                    stop += x0
                b = int(max(0, (strt - x0) / dt))

                e = int(min(self.xmax, (stop - x0) / dt))

                if e >= self.timeseries[idx].size:
                    e = self.timeseries[idx].size - 1
                new_ymin = min(new_ymin,
                               npmin(self.timeseries[idx].value[b:e]))
                new_ymax = max(new_ymax,
                               npmax(self.timeseries[idx].value[b:e]))
            self.ymin = new_ymin
            self.ymax = new_ymax
        if self.yscale_factor > 1:
            self.log(2, ('Scaling y-limits, original: %f, %f)' %
                         (self.ymin, self.ymax)))
            yrange = self.ymax - self.ymin
            mid = (self.ymax + self.ymin) / 2.
            self.ymax = mid + yrange / (2 * self.yscale_factor)
            self.ymin = mid - yrange / (2 * self.yscale_factor)
            self.log(2, ('Scaling y-limits, new: %f, %f)' %
                         (self.ymin, self.ymax)))
        return
コード例 #36
0
ファイル: S_LogIsoCont.py プロジェクト: s0ap/arpmRes
Y = exp(X)

# ## Select an equispaced grid and compute the lognormal pdf

# +
x1 = arange(0.01, 7, 0.1)
x2 = arange(0.01, 7, 0.1)
X1, X2 = np.meshgrid(x1, x2)
lX1 = log(X1)
lX2 = log(X2)
z = r_[lX2.flatten()[np.newaxis, ...], lX1.flatten()[np.newaxis, ...]]
s = len(x1) * len(x2)
f = zeros(s)
for i in range(s):
    f[i] = exp(-1 / 2 *
               ((z[:, [i]] - mu).T) @ solve(sigma2, eye(sigma2.shape[0]))
               @ (z[:, [i]] - mu)) / (2 * pi * sqrt(det(sigma2)) *
                                      (X1.flatten()[i] * X2.flatten()[i]))

f = np.reshape(f, (len(x2), len(x1)), order='F')
# -

# ## Display the iso-contours and the scatter plot of the corresponding sample

plt.contour(X1, X2, f, arange(0.01, 0.03, 0.005), colors='b', lw=1.5)
scatter(Y[:, 0], Y[:, 1], 1, [.3, .3, .3], '.')
plt.axis([npmin(x1), npmax(x1), npmin(x2), npmax(x2)])
xlabel(r'$x_1$')
ylabel(r'$x_2$')
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
コード例 #37
0
ファイル: varfeatures.py プロジェクト: JinbiaoJi/astrobase
def nonperiodic_lightcurve_features(times, mags, errs, magsarefluxes=False):
    '''This calculates the following nonperiodic features of the light curve,
    listed in Richards, et al. 2011):

    - amplitude
    - beyond1std
    - flux_percentile_ratio_mid20
    - flux_percentile_ratio_mid35
    - flux_percentile_ratio_mid50
    - flux_percentile_ratio_mid65
    - flux_percentile_ratio_mid80
    - linear_trend
    - max_slope
    - median_absolute_deviation
    - median_buffer_range_percentage
    - pair_slope_trend
    - percent_amplitude
    - percent_difference_flux_percentile
    - skew
    - stdev
    - timelength
    - mintime
    - maxtime

    Parameters
    ----------

    times,mags,errs : np.array
        The input mag/flux time-series to process.

    magsarefluxes : bool
        If True, will treat values in `mags` as fluxes instead of magnitudes.

    Returns
    -------

    dict
        A dict containing all of the features listed above.

    '''

    # remove nans first
    finiteind = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
    ftimes, fmags, ferrs = times[finiteind], mags[finiteind], errs[finiteind]

    # remove zero errors
    nzind = npnonzero(ferrs)
    ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]

    ndet = len(fmags)

    if ndet > 9:

        # calculate the moments
        moments = lightcurve_moments(ftimes, fmags, ferrs)

        # calculate the flux measures
        fluxmeasures = lightcurve_flux_measures(ftimes,
                                                fmags,
                                                ferrs,
                                                magsarefluxes=magsarefluxes)

        # calculate the point-to-point measures
        ptpmeasures = lightcurve_ptp_measures(ftimes, fmags, ferrs)

        # get the length in time
        mintime, maxtime = npmin(ftimes), npmax(ftimes)
        timelength = maxtime - mintime

        # get the amplitude
        series_amplitude = 0.5 * (npmax(fmags) - npmin(fmags))

        # calculate the linear fit to the entire mag series
        fitcoeffs = nppolyfit(ftimes, fmags, 1, w=1.0 / (ferrs * ferrs))
        series_linear_slope = fitcoeffs[1]

        # roll fmags by 1
        rolled_fmags = nproll(fmags, 1)

        # calculate the magnitude ratio (from the WISE paper)
        series_magratio = ((npmax(fmags) - moments['median']) /
                           (npmax(fmags) - npmin(fmags)))

        # this is the dictionary returned containing all the measures
        measures = {
            'ndet': fmags.size,
            'mintime': mintime,
            'maxtime': maxtime,
            'timelength': timelength,
            'amplitude': series_amplitude,
            'ndetobslength_ratio': ndet / timelength,
            'linear_fit_slope': series_linear_slope,
            'magnitude_ratio': series_magratio,
        }
        if moments:
            measures.update(moments)
        if ptpmeasures:
            measures.update(ptpmeasures)
        if fluxmeasures:
            measures.update(fluxmeasures)

        return measures

    else:

        LOGERROR('not enough detections in this magseries '
                 'to calculate non-periodic features')
        return None
コード例 #38
0
ファイル: lcfit.py プロジェクト: joshuawallace/astrobase
def fourier_fit_magseries(times,
                          mags,
                          errs,
                          period,
                          fourierorder=None,
                          fourierparams=None,
                          sigclip=3.0,
                          magsarefluxes=False,
                          plotfit=False,
                          ignoreinitfail=True,
                          verbose=True):
    '''This fits a Fourier series to a magnitude time series.

    This uses an 8th-order Fourier series by default. This is good for light
    curves with many thousands of observations (HAT light curves have ~10k
    observations). Lower the order accordingly if you have fewer observations in
    your light curves to avoid over-fitting.

    Set the Fourier order by using either the fourierorder kwarg OR the
    fourierparams kwarg. If fourierorder is None, then fourierparams is a
    list of the form for fourier order = N:

    [fourier_amp1, fourier_amp2, fourier_amp3,...,fourier_ampN,
     fourier_phase1, fourier_phase2, fourier_phase3,...,fourier_phaseN]

    If both/neither are specified, the default Fourier order of 3 will be used.

    Returns the Fourier fit parameters, the minimum chisq and reduced
    chisq. Makes a plot for the fit to the mag series if plotfit is a string
    containing a filename to write the plot to.

    This folds the time series using the given period and at the first
    observation. Can optionally sigma-clip observations.

    if ignoreinitfail is True, ignores the initial failure to find a set of
    optimized Fourier parameters and proceeds to do a least-squares fit anyway.

    magsarefluxes is a boolean value for setting the ylabel and ylimits of
    plots for either magnitudes (False) or flux units (i.e. normalized to 1, in
    which case magsarefluxes should be set to True).

    '''

    stimes, smags, serrs = sigclip_magseries(times,
                                             mags,
                                             errs,
                                             sigclip=sigclip,
                                             magsarefluxes=magsarefluxes)

    # get rid of zero errs
    nzind = npnonzero(serrs)
    stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]

    phase, pmags, perrs, ptimes, mintime = (_get_phased_quantities(
        stimes, smags, serrs, period))

    # get the fourier order either from the scalar order kwarg...
    if fourierorder and fourierorder > 0 and not fourierparams:

        fourieramps = [0.6] + [0.2] * (fourierorder - 1)
        fourierphas = [0.1] + [0.1] * (fourierorder - 1)
        fourierparams = fourieramps + fourierphas

    # or from the fully specified coeffs vector
    elif not fourierorder and fourierparams:

        fourierorder = int(len(fourierparams) / 2)

    else:
        LOGWARNING('specified both/neither Fourier order AND Fourier coeffs, '
                   'using default Fourier order of 3')
        fourierorder = 3
        fourieramps = [0.6] + [0.2] * (fourierorder - 1)
        fourierphas = [0.1] + [0.1] * (fourierorder - 1)
        fourierparams = fourieramps + fourierphas

    if verbose:
        LOGINFO('fitting Fourier series of order %s to '
                'mag series with %s observations, '
                'using period %.6f, folded at %.6f' %
                (fourierorder, len(phase), period, mintime))

    # initial minimize call to find global minimum in chi-sq
    initialfit = spminimize(_fourier_chisq,
                            fourierparams,
                            method='BFGS',
                            args=(phase, pmags, perrs))

    # make sure this initial fit succeeds before proceeding
    if initialfit.success or ignoreinitfail:

        if verbose:
            LOGINFO('initial fit done, refining...')

        leastsqparams = initialfit.x

        try:
            leastsqfit = spleastsq(_fourier_residual,
                                   leastsqparams,
                                   args=(phase, pmags))
        except Exception as e:
            leastsqfit = None

        # if the fit succeeded, then we can return the final parameters
        if leastsqfit and leastsqfit[-1] in (1, 2, 3, 4):

            finalparams = leastsqfit[0]

            # calculate the chisq and reduced chisq
            fitmags = _fourier_func(finalparams, phase, pmags)

            fitchisq = npsum(
                ((fitmags - pmags) * (fitmags - pmags)) / (perrs * perrs))

            fitredchisq = fitchisq / (len(pmags) - len(finalparams) - 1)

            if verbose:
                LOGINFO('final fit done. chisq = %.5f, reduced chisq = %.5f' %
                        (fitchisq, fitredchisq))

            # figure out the time of light curve minimum (i.e. the fit epoch)
            # this is when the fit mag is maximum (i.e. the faintest)
            # or if magsarefluxes = True, then this is when fit flux is minimum
            if not magsarefluxes:
                fitmagminind = npwhere(fitmags == npmax(fitmags))
            else:
                fitmagminind = npwhere(fitmags == npmin(fitmags))
            magseriesepoch = ptimes[fitmagminind]

            # assemble the returndict
            returndict = {
                'fittype': 'fourier',
                'fitinfo': {
                    'fourierorder': fourierorder,
                    'finalparams': finalparams,
                    'initialfit': initialfit,
                    'leastsqfit': leastsqfit,
                    'fitmags': fitmags,
                    'fitepoch': magseriesepoch
                },
                'fitchisq': fitchisq,
                'fitredchisq': fitredchisq,
                'fitplotfile': None,
                'magseries': {
                    'times': ptimes,
                    'phase': phase,
                    'mags': pmags,
                    'errs': perrs,
                    'magsarefluxes': magsarefluxes
                },
            }

            # make the fit plot if required
            if plotfit and isinstance(plotfit, str):

                _make_fit_plot(phase,
                               pmags,
                               perrs,
                               fitmags,
                               period,
                               mintime,
                               magseriesepoch,
                               plotfit,
                               magsarefluxes=magsarefluxes)

                returndict['fitplotfile'] = plotfit

            return returndict

        # if the leastsq fit did not succeed, return Nothing
        else:
            LOGERROR(
                'fourier-fit: least-squared fit to the light curve failed')
            return {
                'fittype': 'fourier',
                'fitinfo': {
                    'fourierorder': fourierorder,
                    'finalparams': None,
                    'initialfit': initialfit,
                    'leastsqfit': None,
                    'fitmags': None,
                    'fitepoch': None
                },
                'fitchisq': npnan,
                'fitredchisq': npnan,
                'fitplotfile': None,
                'magseries': {
                    'times': ptimes,
                    'phase': phase,
                    'mags': pmags,
                    'errs': perrs,
                    'magsarefluxes': magsarefluxes
                }
            }

    # if the fit didn't succeed, we can't proceed
    else:

        LOGERROR('initial Fourier fit did not succeed, '
                 'reason: %s, returning scipy OptimizeResult' %
                 initialfit.message)

        return {
            'fittype': 'fourier',
            'fitinfo': {
                'fourierorder': fourierorder,
                'finalparams': None,
                'initialfit': initialfit,
                'leastsqfit': None,
                'fitmags': None,
                'fitepoch': None
            },
            'fitchisq': npnan,
            'fitredchisq': npnan,
            'fitplotfile': None,
            'magseries': {
                'times': ptimes,
                'phase': phase,
                'mags': pmags,
                'errs': perrs,
                'magsarefluxes': magsarefluxes
            }
        }
コード例 #39
0
# +
date_tick = arange(0, t_, 80)  # tick for the time axes
xticklabels = date[date_tick[::2]]

# colors
c0 = [0.9, 0.5, 0]
c1 = [.4, .4, 1]
c2 = [0.3, 0.3, 0.3]
myFmt = mdates.DateFormatter('%d-%b-%y')

f = figure(figsize=(12, 6))

# axes for prices
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax1.set_facecolor('white')
plt.axis([min(date), max(date), npmin(price), npmax(price) + 5])
ylabel('prices', color=c1)
ax1.plot(date[1:], price[1:], color=c1)  # prices
ax1.set_xticks(xticklabels)
ax1.xaxis.set_major_formatter(myFmt)
ax1.tick_params(axis='y', colors=c1)

# axes for log-returns
ax2 = ax1.twinx()
ax2.grid(False)
ax2.scatter(date[1:], ret, s=2.5, c=c2, marker='.')  # log-returns
ax2.set_ylabel('log-returns', color=c2)
ax2.tick_params(axis='y', colors=c2)

# axes for hidden volatility
ax3 = plt.subplot2grid((2, 2), (1, 0))
コード例 #40
0
ファイル: lcfit.py プロジェクト: joshuawallace/astrobase
def spline_fit_magseries(times,
                         mags,
                         errs,
                         period,
                         knotfraction=0.01,
                         maxknots=30,
                         sigclip=30.0,
                         plotfit=False,
                         ignoreinitfail=False,
                         magsarefluxes=False,
                         verbose=True):
    '''This fits a univariate cubic spline to the phased light curve.

    This fit may be better than the Fourier fit for sharply variable objects,
    like EBs, so can be used to distinguish them from other types of variables.

    The knot fraction is the number of internal knots to use for the spline. A
    value of 0.01 (or 1%) of the total number of non-nan observations appears to
    work quite well, without over-fitting. maxknots controls the maximum number
    of knots that will be allowed.

    magsarefluxes is a boolean value for setting the ylabel and ylimits of
    plots for either magnitudes (False) or flux units (i.e. normalized to 1, in
    which case magsarefluxes should be set to True).

    Returns the chisq of the fit, as well as the reduced chisq. FIXME: check
    this equation below to see if it's right.

    reduced_chisq = fit_chisq/(len(pmags) - len(knots) - 1)

    '''

    # this is required to fit the spline correctly
    if errs is None:
        errs = npfull_like(mags, 0.005)

    # sigclip the magnitude time series
    stimes, smags, serrs = sigclip_magseries(times,
                                             mags,
                                             errs,
                                             sigclip=sigclip,
                                             magsarefluxes=magsarefluxes)
    # get rid of zero errs
    nzind = npnonzero(serrs)
    stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]

    # phase the mag series
    phase, pmags, perrs, ptimes, mintime = (_get_phased_quantities(
        stimes, smags, serrs, period))

    # now figure out the number of knots up to max knots (=100)
    nobs = len(phase)
    nknots = int(npfloor(knotfraction * nobs))
    nknots = maxknots if nknots > maxknots else nknots
    splineknots = nplinspace(phase[0] + 0.01, phase[-1] - 0.01, num=nknots)

    # generate and fit the spline
    spl = LSQUnivariateSpline(phase, pmags, t=splineknots, w=1.0 / perrs)

    # calculate the spline fit to the actual phases, the chisq and red-chisq
    fitmags = spl(phase)

    fitchisq = npsum(((fitmags - pmags) * (fitmags - pmags)) / (perrs * perrs))

    fitredchisq = fitchisq / (len(pmags) - nknots - 1)

    if verbose:
        LOGINFO('spline fit done. nknots = %s,  '
                'chisq = %.5f, reduced chisq = %.5f' %
                (nknots, fitchisq, fitredchisq))

    # figure out the time of light curve minimum (i.e. the fit epoch)
    # this is when the fit mag is maximum (i.e. the faintest)
    # or if magsarefluxes = True, then this is when fit flux is minimum
    if not magsarefluxes:
        fitmagminind = npwhere(fitmags == npmax(fitmags))
    else:
        fitmagminind = npwhere(fitmags == npmin(fitmags))
    magseriesepoch = ptimes[fitmagminind]

    # assemble the returndict
    returndict = {
        'fittype': 'spline',
        'fitinfo': {
            'nknots': nknots,
            'fitmags': fitmags,
            'fitepoch': magseriesepoch
        },
        'fitchisq': fitchisq,
        'fitredchisq': fitredchisq,
        'fitplotfile': None,
        'magseries': {
            'times': ptimes,
            'phase': phase,
            'mags': pmags,
            'errs': perrs,
            'magsarefluxes': magsarefluxes
        },
    }

    # make the fit plot if required
    if plotfit and isinstance(plotfit, str):

        _make_fit_plot(phase,
                       pmags,
                       perrs,
                       fitmags,
                       period,
                       mintime,
                       magseriesepoch,
                       plotfit,
                       magsarefluxes=magsarefluxes)

        returndict['fitplotfile'] = plotfit

    return returndict
コード例 #41
0
date_tick = arange(99, len(date), 380)
date_dt = array([date_mtop(i) for i in date])
myFmt = mdates.DateFormatter('%d-%b-%y')

figure(figsize=(16, 10))
# VIX
ax = plt.subplot2grid((2, 5), (0, 0), colspan=2)
ph0 = ax.plot(date_dt, p1[0], lw=0.5, color='gray')
xticks([])
yticks([])
ax2 = ax.twinx()
ax2.plot(date_dt, z1[0], color=[0, 0, 0.6], lw=0.5)
ph1 = ax2.plot(date_dt, z1[0, -1] * ones(t_), color='r', linestyle='--')
xlim([min(date_dt), max(date_dt)])
ax.set_ylim([0, 1.5 * npmax(p1)])
ax2.set_ylim([npmin(z1), 1.3 * npmax(z1)])
ax2.set_yticks(arange(20, 100, 20))
ax2.set_ylabel('VIX', color=[0, 0, 0.6])
ax2.grid(False)
LEG = 'target %2.2f' % z1[0, -1]
LEG1 = 'Entr. Pool. Flex. Probs'
legend(handles=[ph1[0], ph0[0]], labels=[LEG, LEG1], loc='upper right')
title('Conditioning variable: VIX')
ENS_text = 'Effective Num.Scenarios =  % 3.0f' % ens1
plt.text(min(date_dt), npmax(z1) * 1.2, ENS_text, horizontalalignment='left')
# 5 YEARS ZERO SWAP RATE
ax = plt.subplot2grid((2, 5), (1, 0), colspan=2)
ph0 = ax.plot(date_dt, p2[0], lw=0.5, color='gray')
yticks([])
xticks([])
ax2 = ax.twinx()
コード例 #42
0
ファイル: lcfit.py プロジェクト: joshuawallace/astrobase
def savgol_fit_magseries(times,
                         mags,
                         errs,
                         period,
                         windowlength=None,
                         polydeg=2,
                         sigclip=30.0,
                         plotfit=False,
                         magsarefluxes=False,
                         verbose=True):
    '''
    Fit a Savitzky-Golay filter to the magnitude/flux time series.
    SG fits successive sub-sets (windows) of adjacent data points with a
    low-order polynomial via least squares. At each point (magnitude),
    it returns the value of the polynomial at that magnitude's time.
    This is made significantly cheaper than *actually* performing least squares
    for each window through linear algebra tricks that are possible when
    specifying the window size and polynomial order beforehand.
    Numerical Recipes Ch 14.8 gives an overview, Eq. 14.8.6 is what Scipy has
    implemented.

    The idea behind Savitzky-Golay is to preserve higher moments (>=2) of the
    input data series than would be done by a simple moving window average.

    Note that the filter assumes evenly spaced data, which magnitude time
    series are not. By *pretending* the data points are evenly spaced, we
    introduce an additional noise source in the function values. This is a
    relatively small noise source provided that the changes in the magnitude
    values across the full width of the N=windowlength point window is <
    sqrt(N/2) times the measurement noise on a single point.

    Args:

    windowlength (int): length of the filter window (the number of
    coefficients). Must be either positive and odd, or None. (The window is
    the number of points to the left, and to the right, of whatever point is
    having a polynomial fit to it locally). Bigger windows at fixed polynomial
    order risk lowering the amplitude of sharp features. If None, this routine
    (arbitrarily) sets the windowlength for phased LCs to be either the number
    of finite data points divided by 300, or polydeg+3, whichever is bigger.

    polydeg (int): the order of the polynomial used to fit the samples. Must
    be less than windowlength. "Higher-order filters do better at preserving
    feature heights and widths, but do less smoothing on broader features."
    (NumRec).

    magsarefluxes (bool): sets the ylabel and ylimits of plots for either
    magnitudes (False) or flux units (i.e. normalized to 1, in which case
    magsarefluxes should be set to True).

    '''
    stimes, smags, serrs = sigclip_magseries(times,
                                             mags,
                                             errs,
                                             sigclip=sigclip,
                                             magsarefluxes=magsarefluxes)

    # get rid of zero errs
    nzind = npnonzero(serrs)
    stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]

    phase, pmags, perrs, ptimes, mintime = (_get_phased_quantities(
        stimes, smags, serrs, period))

    if not isinstance(windowlength, int):
        windowlength = max(polydeg + 3, int(len(phase) / 300))
        if windowlength % 2 == 0:
            windowlength += 1

    if verbose:
        LOGINFO('applying Savitzky-Golay filter with '
                'window length %s and polynomial degree %s to '
                'mag series with %s observations, '
                'using period %.6f, folded at %.6f' %
                (windowlength, polydeg, len(pmags), period, mintime))

    # generate the function values obtained by applying the SG filter. The
    # "wrap" option is best for phase-folded LCs.
    sgf = savgol_filter(pmags, windowlength, polydeg, mode='wrap')

    # here the "fit" to the phases is the function produced by the
    # Savitzky-Golay filter. then compute the chisq and red-chisq.
    fitmags = sgf

    fitchisq = npsum(((fitmags - pmags) * (fitmags - pmags)) / (perrs * perrs))

    # TODO: quantify dof for SG filter.
    nparams = int(len(pmags) / windowlength) * polydeg
    fitredchisq = fitchisq / (len(pmags) - nparams - 1)
    fitredchisq = -99.

    if verbose:
        LOGINFO('SG filter applied. chisq = %.5f, reduced chisq = %.5f' %
                (fitchisq, fitredchisq))

    # figure out the time of light curve minimum (i.e. the fit epoch)
    # this is when the fit mag is maximum (i.e. the faintest)
    # or if magsarefluxes = True, then this is when fit flux is minimum
    if not magsarefluxes:
        fitmagminind = npwhere(fitmags == npmax(fitmags))
    else:
        fitmagminind = npwhere(fitmags == npmin(fitmags))
    magseriesepoch = ptimes[fitmagminind]

    # assemble the returndict
    returndict = {
        'fittype': 'savgol',
        'fitinfo': {
            'windowlength': windowlength,
            'polydeg': polydeg,
            'fitmags': fitmags,
            'fitepoch': magseriesepoch
        },
        'fitchisq': fitchisq,
        'fitredchisq': fitredchisq,
        'fitplotfile': None,
        'magseries': {
            'times': ptimes,
            'phase': phase,
            'mags': pmags,
            'errs': perrs,
            'magsarefluxes': magsarefluxes
        }
    }

    # make the fit plot if required
    if plotfit and isinstance(plotfit, str):

        _make_fit_plot(phase,
                       pmags,
                       perrs,
                       fitmags,
                       period,
                       mintime,
                       magseriesepoch,
                       plotfit,
                       magsarefluxes=magsarefluxes)

        returndict['fitplotfile'] = plotfit

    return returndict