示例#1
0
def contour_2dfit(x, y, data, fit=None, par=None, nfig=None, interp=True):
    """ Create a figure with the fit shown as contours

    :param x: input xaxis coordinates - array
    :param y: input yaxis coordinates - array (should have the same dim as x)
    :param data: input data points (should have the same dim as x)
    :param interp: interpolation used or not (Default is True)
    :param par: parameters of the fit - Default is None. Only used if interpolation (interp=True) is used
    :param fit: fitted points (should have the same dim as x)

    :returns: Nothing

    """
    from pygme.binning.voronoibinning import derive_unbinned_field, guess_regular_grid
    from matplotlib.mlab import griddata
    from pygme.mgefunctions import convert_xy_to_polar
    from pygme.fitting.fitn2dgauss_mpfit import n_centred_twodgaussian_I

    if nfig is None:
        fig = plt.gcf()
    else:
        fig = plt.figure(nfig)
    fig.clf()

    ## If interpolation is requested
    if interp:
        if fit is None:
            print "ERROR: you did not provide 'fit' data"
            return
        xu, yu = guess_regular_grid(x, y)
        du = griddata(x, y, data, xu, yu, interp="nn")
        ## if par is provided, then we compute the real MGE function
        if par is None:
            fu = griddata(x, y, fit, xu, yu, interp="nn")
        else:
            r, t = convert_xy_to_polar(xu, yu)
            fu = n_centred_twodgaussian_I(pars=par)(r, t)
            fu = fu.reshape(xu.shape)
    ## Otherwise we just use the nearest neighbour
    else:
        xu, yu, du = derive_unbinned_field(x, y, data)
        xu, yu, fu = derive_unbinned_field(x, y, fit)

    CS = plt.contour(xu, yu, np.log10(du), colors="k", label="Data")
    CSfit = plt.contour(xu, yu, np.log10(fu), levels=CS.levels, colors="r", label="MGE Fit")
    plt.axes().set_aspect("equal")
    plt.legend()
示例#2
0
def multi_2dgauss_lmfit(xax, yax, data, ngauss=1, err=None, params=None, paramsPSF=None,
        fixed=None, limitedmin=None, limitedmax=None, minpars=None, 
        maxpars=None, force_Sigma_bounds=True, factor_Chi2=1.01, iprint=50, lmfit_method='leastsq',
        verbose=True, veryverbose=False, linear_method="nnls", default_q=0.3, default_PA=0.0, 
        samePA=True, sameQ=False, minSigma=None, maxSigma=None, lmfit_iprint=lmfit_iprint(), **fcnargs):
    """
    An improvement on gaussfit.  Lets you fit multiple 2D gaussians.

    Inputs:
       xax - x axis
       yax - y axis
       data - count axis
       ngauss - How many gaussians to fit?  Default 1 
       err - error corresponding to data

     These parameters need to have the same length. 
        It should by default be 3*ngauss.  
        If ngauss > 1 and length = 3, they will be replicated ngauss times, 
        otherwise they will be reset to defaults:

       params - Fit parameters: [width, axis ratio, pa] * ngauss
              If len(params) % 3 == 0, ngauss will be set to len(params) / 3
       fixed - Is parameter fixed?
       limitedmin/minpars - set lower limits on each parameter (default: width>0)
       limitedmax/maxpars - set upper limits on each parameter

       force_Sigma_bounds: force the Sigmas to be within the radii range with some margin
                           default to True
       factor_Chi2 : if one Gaussian contribute less than (factor-1) to the Chi2, we remove it
                     If set to 1, it means only zero Gaussians will be removed
                     If set to default=1.01, it means any Gaussian contributing to less than 1% will be
                     removed
       minSigma, maxSigma: default to None but can be set for bounds for Sigma
       samePA : by default set to True. In that case, only one PA value is used as a free parameter
                (all Gaussians will share the same PA)
       sameQ: by default set to False. In that case, only one Axis ratio value is used as a free parameter
                (all Gaussians will share the same axis ratio)


       lmfit_method : method to pass on to lmfit ('leastsq', 'lbfgsb', 'anneal')
                      Default is leastsq (most efficient for the problem)

       linearmethod: Method used to solve the linear part of the problem
                     Two methods are implemented: 
                         "nnls" -> NNLS (default, included in scipy)
                         "bvls" -> LLSP/BVLS in openopt (only if available)
                         The variable Exist_OpenOpt is (internally) set to True if available

       **fcnargs - dictionary which will be passed to LMFIT, you can for example use: 
                   xtol , gtol, ftol,  etc

       iprint - if > 0, print every iprint iterations of lmfit. default is 50
       verbose - self-explanatory
       veryverbose - self-explanatory

    Returns:
       Fit parameters
       Model
       Fit errors
       chi2
    """
    import copy

    ## Default values
    lmfit_methods = ['leastsq', 'lbfgsb', 'anneal']

    ## Method check
    if lmfit_method not in lmfit_methods :
        print "ERROR: method must be one of the three following methods : ", lmfit_methods

    ## Setting up epsfcn if not forced by the user
    ## Removing epsfcn to get the default machine precision
    ## if "epsfcn" not in fcnargs.keys() : fcnargs["epsfcn"] = 0.01
    if "xtol" not in fcnargs.keys() : fcnargs["xtol"] = 1.e-7
    if "gtol" not in fcnargs.keys() : fcnargs["gtol"] = 1.e-7
    if "ftol" not in fcnargs.keys() : fcnargs["ftol"] = 1.e-7

    ## Checking the method used for the linear part
    if linear_method == "bvls" and not Exist_OpenOpt :
        print "WARNING: you selected BVLS, but OpenOpt is not installed"
        print "WARNING: we will therefore use NNLS instead"
        linear_method == "nnls"

    ## Checking if the linear_method is implemented
    if linear_method.lower() not in dic_linear_methods.keys():
        print "ERROR: you should use one of the following linear_method: ", dic_linear_methods.keys()
        return 0, 0, 0

    f_Get_Iamp = dic_linear_methods[linear_method.lower()]
    ## If no coordinates is given, create them
    if xax is None:
        xax = np.arange(len(data))
    if yax is None:
        yax = np.arange(len(data))

    if not isinstance(xax,np.ndarray): 
        xax = np.asarray(xax)
    if not isinstance(yax,np.ndarray): 
        yax = np.asarray(yax)
    if not isinstance(data,np.ndarray): 
        data = np.asarray(data)
    xax = xax.ravel()
    yax = yax.ravel()
    datashape = data.shape
    data = data.ravel()

    ## Polar coordinates
    r, theta = convert_xy_to_polar(xax, yax)

    selxy = (xax != 0) & (yax != 0)
    rin = sqrt(xax[selxy]**2+yax[selxy]**2)

    if minSigma is None : minSigma = np.min(rin)
    if maxSigma is None : maxSigma = np.max(rin) / sqrt(SLOPE_outer)
    lminSigma = np.log10(minSigma)
    lmaxSigma = np.log10(maxSigma)
    DlSigma = 0.5 * (lmaxSigma - lminSigma) / ngauss

    if isinstance(params,np.ndarray): params=params.tolist()
    if params is not None : 
        if len(params) != ngauss and (len(params) / 3) > ngauss: 
            ngauss = len(params) / 3 
            if verbose :
                print "WARNING: Your input parameters do not fit the Number of input Gaussians"
                print "WARNING: the new number of input Gaussians is: ", ngauss

    ## Extracting the parameters for the PSF and normalising the Imax for integral = 1
    if paramsPSF is None : 
        paramsPSF = _default_parPSF
    paramsPSF = norm_PSFParam(paramsPSF)

    ## if no input parameters are given, we set up the guess as a log spaced sigma between min and max
    default_params = np.concatenate((np.log10(np.logspace(lminSigma + DlSigma, lmaxSigma - DlSigma, ngauss)), \
            np.array([default_q]*ngauss), np.array([default_PA]*ngauss))).reshape(3,ngauss).transpose().ravel()

    newdefault_minpars = copy.copy(default_minpars)
    newdefault_maxpars = copy.copy(default_maxpars)
    if force_Sigma_bounds :
        newdefault_minpars[0] = lminSigma
        newdefault_maxpars[0] = lmaxSigma
    else :
        newdefault_minpars[0] = lminSigma - np.log10(2.)
        newdefault_maxpars[0] = lmaxSigma + np.log10(2.)

    ## Set up the default parameters if needed
    params = set_parameters_and_default(params, default_params, ngauss)
    fixed = set_parameters_and_default(fixed, default_fixed, ngauss)
    limitedmin = set_parameters_and_default(limitedmin, default_limitedmin, ngauss)
    limitedmax = set_parameters_and_default(limitedmax, default_limitedmax, ngauss)
    minpars = set_parameters_and_default(minpars, newdefault_minpars, ngauss)
    maxpars = set_parameters_and_default(maxpars, newdefault_maxpars, ngauss)

    class input_residuals() :
        def __init__(self, iprint, verbose) :
            self.iprint = iprint
            self.verbose = verbose
            self.aprint = 0

    ## -----------------------------------------------------------------------------------------
    ## lmfit function which returns the residual from the best fit N 2D Gaussians
    ## Parameters are just sigma,q,pa - the amplitudes are optimised at each step
    ## -----------------------------------------------------------------------------------------
    def opt_lmfit(pars, parPSF, myinput=None, r=None, theta=None, err=None, data=None, f_Get_Iamp=None):
        """ Provide the residuals for the lmfit minimiser
            for a Multi 1D gaussian
        """

        # We retrieve the parameters
        pars_array = extract_mult2dG_params(pars)

        ## Derive the Normalised Gaussians for this set of parameters
        nGnorm, Iamp = f_Get_Iamp(pars_array, parPSF, r, theta, data)
        if err is None :
            res = fitn2dgauss_residuals1(nGnorm, Iamp)
        else :
            res = fitn2dgauss_residuals_err1(err, nGnorm, Iamp)
#        newp = (np.vstack((Iamp, pars_array.transpose()))).transpose()
#        if err is None :
#            res = fitn2dgauss_residuals(newp, parPSF, r, theta, data)
#        else :
#            res = fitn2dgauss_residuals_err(newp, parPSF, r, theta, data, err)
        lmfit_iprint(res, myinput, pars)
        return res
    ## -----------------------------------------------------------------------------------------

    ## Information about the parameters
    nameParam = ['logSigma', 'Q', 'PA']
    Lparams = Parameters()
    if verbose :
        print "--------------------------------------"
        print "GUESS:      Sig         Q         PA"
        print "--------------------------------------"
        for i in xrange(ngauss) :
            print "GAUSS %02d: %8.3e  %8.3f  %8.3f"%(i+1, 10**(params[3*i]), params[3*i+1], params[3*i+2])
    print "--------------------------------------"

    for i in xrange(ngauss) :
        Lparams.add(nameParam[0]+"%02d"%(i+1), value=params[3*i], min=minpars[3*i], max=maxpars[3*i], vary= not fixed[3*i])
        Lparams.add(nameParam[1]+"%02d"%(i+1), value=params[3*i+1], min=minpars[3*i+1], max=maxpars[3*i+1], vary= not fixed[3*i+1])
        Lparams.add(nameParam[2]+"%02d"%(i+1), value=params[3*i+2], min=minpars[3*i+2], max=maxpars[3*i+2], vary= not fixed[3*i+2])
    ## Adding indices to follow up the Gaussians we may remove
    Lparams.ind = range(ngauss)

    ## Setting the samePA option if True
    ## For this we set up the first PA to the default and 
    ## then use "expr" to say that all other PA are equal to the first one
    if samePA:
        Lparams = Set_SamePA_params(Lparams, ngauss, params[2], minpars[2], maxpars[2], not fixed[2])

    if veryverbose :
        for i in xrange(ngauss) :
            print "GAUSS %02d: %8.3e  %8.3f  %8.3f"%(i+1, 10**(params[3*i]), params[3*i+1], params[3*i+2])
        if samePA:
            print "WARNING: All PAs will be forced to one single value"
    print "--------------------------------------"

    ## Setting up the printing option
    myinput = input_residuals(iprint, verbose)

    ####################################
    ## Doing the minimisation with lmfit
    ####################################
    if verbose: 
        print "------ Starting the minimisation -------"
    result = minimize(opt_lmfit, Lparams, method=lmfit_method, args=(paramsPSF, myinput, r, theta, err, data, f_Get_Iamp), **fcnargs)
    ## Remove the Null Gaussians
    result.params.ind = range(ngauss)
    ngauss, Ind_ZGauss = Remove_Zero_2DGaussians(ngauss, nameParam, result, paramsPSF, r, theta, data, err, factor_Chi2,
            f_Get_Iamp, niter=1, verbose=veryverbose, samePA=samePA) 

    ## Recall the Minimizer function for a second iteration to get the new chi2 etc
    newresult = minimize(opt_lmfit, result.params, method=lmfit_method, args=(paramsPSF, myinput, r, theta, err, data, f_Get_Iamp), **fcnargs)
    ## Remove the Null Gaussians
    newresult.params.ind = result.params.ind 
    ngauss, Ind_ZGauss = Remove_Zero_2DGaussians(ngauss, nameParam, newresult, paramsPSF, r, theta, data, err, factor_Chi2,
            f_Get_Iamp, niter=2, verbose=veryverbose, samePA=samePA) 

    ## We add the Amplitudes to the array and renormalise them
    bestfit_params = extract_mult2dG_params(newresult.params)
    nGnorm, Iamp = f_Get_Iamp(bestfit_params, paramsPSF, r, theta, data)
    Ibestfit_params = (np.vstack((Iamp, bestfit_params.transpose()))).transpose()
    ## Changing the parameters back to Sigma
    Ibestfit_params[:,1] = 10**(Ibestfit_params[:,1])
    Ibestfit_params[:,0] /= (2.0 * Ibestfit_params[:,1]**2 * Ibestfit_params[:,2] * pi)
    ## And we sort them with Sigma
    Ibestfit_params = Ibestfit_params[Ibestfit_params[:,1].argsort()]

    if verbose :
        print "=================================================="
        print "FIT:      Imax       Sig         Q           PA"
        print "=================================================="
        for i in xrange(ngauss) :
            print "GAUSS %02d: %8.3e  %8.3f  %8.3f  %8.3f"%(i+1, Ibestfit_params[i,0], Ibestfit_params[i,1], Ibestfit_params[i,2], Ibestfit_params[i,3])

        print "Chi2: ",newresult.chisqr," Reduced Chi2: ",newresult.redchi

    return Ibestfit_params, newresult, n_centred_twodgaussian_Imax(pars=Ibestfit_params, parPSF=paramsPSF)(r, theta).reshape(datashape)
示例#3
0
def multi_2dgauss_mpfit(xax, yax, data, ngauss=1, err=None, params=None, paramsPSF=None,
        fixed=None, limitedmin=None, limitedmax=None, minpars=None, 
        maxpars=None, force_Sigma_bounds=True, factor_Chi2=1.01, verbose=True, veryverbose=False, 
        linear_method="nnls", default_q=0.3, default_PA=0.0, samePA=True, minSigma=None, maxSigma=None, 
        mpfitprint=_mpfitprint(), **fcnargs):
    """
    An improvement on gaussfit.  Lets you fit multiple 2D gaussians.

    Inputs:
       xax - x axis
       yax - y axis
       data - count axis
       ngauss - How many gaussians to fit?  Default 1 
       err - error corresponding to data

     These parameters need to have length = 3*ngauss.  If ngauss > 1 and length = 3, they will
     be replicated ngauss times, otherwise they will be reset to defaults:
       params - Fit parameters: [width, axis ratio, pa] * ngauss
              If len(params) % 3 == 0, ngauss will be set to len(params) / 3
       fixed - Is parameter fixed?
       limitedmin/minpars - set lower limits on each parameter (default: width>0)
       limitedmax/maxpars - set upper limits on each parameter

       force_Sigma_bounds: force the Sigmas to be within the radii range with some margin
                           default to True
       factor_Chi2 : if one Gaussian contribute less than (factor-1) to the Chi2, we remove it
                     If set to 1, it means only zero Gaussians will be removed
                     If set to default=1.01, it means any Gaussian contributing to less than 1% will be
                     removed

       minSigma, maxSigma: default to None but can be set for bounds for Sigma

       linearmethod: Method used to solve the linear part of the problem
                     Two methods are implemented: 
                         "nnls" -> NNLS (default, included in scipy)
                         "bvls" -> LLSP/BVLS in openopt (only if available)
                         The variable Exist_OpenOpt is (internally) set to True if available

       **fcnargs - Will be passed to MPFIT, you can for example use: 
                   xtol, gtol, ftol, quiet

       verbose - self-explanatory
       veryverbose - self-explanatory

    Returns:
       Fit parameters
       Model
       Fit errors
       chi2
    """
    import copy

    ## Set up some default parameters for mpfit
    if "xtol" not in fcnargs.keys() : fcnargs["xtol"] = 1.e-7
    if "gtol" not in fcnargs.keys() : fcnargs["gtol"] = 1.e-7
    if "ftol" not in fcnargs.keys() : fcnargs["ftol"] = 1.e-7
    if "quiet" not in fcnargs.keys() : fcnargs["quiet"] = True

    ## Checking the method used for the linear part
    if linear_method == "bvls" and not Exist_OpenOpt :
        print "WARNING: you selected BVLS, but OpenOpt is not installed"
        print "WARNING: we will therefore use NNLS instead"
        linear_method == "nnls"

    ## Checking if the linear_method is implemented
    if linear_method.lower() not in dic_linear_methods.keys():
        print "ERROR: you should use one of the following linear_method: ", dic_linear_methods.keys()
        return 0, 0, 0, 0

    f_Get_Iamp = dic_linear_methods[linear_method.lower()]
    ## If no coordinates is given, create them
    if xax is None:
        xax = np.arange(len(data))
    if yax is None:
        yax = np.arange(len(data))

    if not isinstance(xax,np.ndarray): 
        xax = np.asarray(xax)
    if not isinstance(yax,np.ndarray): 
        yax = np.asarray(yax)
    if not isinstance(data,np.ndarray): 
        data = np.asarray(data)
    xax = xax.ravel()
    yax = yax.ravel()
    datashape = data.shape
    data = data.ravel()

    ## Polar coordinates
    r, theta = convert_xy_to_polar(xax, yax)

    selxy = (xax != 0) & (yax != 0)
    rin = sqrt(xax[selxy]**2+yax[selxy]**2)
    if minSigma is None : minSigma = np.min(rin)
    if maxSigma is None : maxSigma = np.max(rin) / sqrt(SLOPE_outer)
    lminSigma = np.log10(minSigma)
    lmaxSigma = np.log10(maxSigma)
    DlSigma = 0.5 * (lmaxSigma - lminSigma) / ngauss

    if isinstance(params,np.ndarray): params=params.tolist()
    if params is not None : 
        if len(params) != ngauss and (len(params) / 3) > ngauss: 
            ngauss = len(params) / 3 
            if verbose :
                print "WARNING: Your input parameters do not fit the Number of input Gaussians"
                print "WARNING: the new number of input Gaussians is: ", ngauss

    ## Extracting the parameters for the PSF and normalising the Imax for integral = 1
    if paramsPSF is None : 
        paramsPSF = _default_parPSF
    paramsPSF = norm_PSFParam(paramsPSF)

    ## if no input parameters are given, we set up the guess as a log spaced sigma between min and max
    default_params = np.concatenate((np.log10(np.logspace(lminSigma + DlSigma, lmaxSigma - DlSigma, ngauss)), \
            np.array([default_q]*ngauss), np.array([default_PA]*ngauss))).reshape(3,ngauss).transpose().ravel()

    newdefault_minpars = copy.copy(default_minpars)
    newdefault_maxpars = copy.copy(default_maxpars)
    if force_Sigma_bounds :
        newdefault_minpars[0] = lminSigma
        newdefault_maxpars[0] = lmaxSigma
    else :
        newdefault_minpars[0] = lminSigma - np.log10(2.)
        newdefault_maxpars[0] = lmaxSigma + np.log10(2.)

    ## Set up the default parameters if needed
    params = set_parameters_and_default(params, default_params, ngauss)
    fixed = set_parameters_and_default(fixed, default_fixed, ngauss)
    limitedmin = set_parameters_and_default(limitedmin, default_limitedmin, ngauss)
    limitedmax = set_parameters_and_default(limitedmax, default_limitedmax, ngauss)
    minpars = set_parameters_and_default(minpars, newdefault_minpars, ngauss)
    maxpars = set_parameters_and_default(maxpars, newdefault_maxpars, ngauss)

    ## -------------------------------------------------------------------------------------
    ## mpfit function which returns the residual from the best fit N 2D Gaussians
    ## Parameters are just sigma,q,pa - the amplitudes are optimised at each step
    ## Two versions are available depending on whether BVLS or NNLS is used (and available)
    ## -------------------------------------------------------------------------------------
    def mpfitfun(p, parPSF, fjac=None, r=None, theta=None, err=None, data=None, f_Get_Iamp=None, samePA=False):
        if samePA : p = regrow_PA(p)
        nGnorm, Iamp = f_Get_Iamp(p, parPSF, r, theta, data)
        if err is None : 
            return [0,fitn2dgauss_residuals1(nGnorm, Iamp)]
        else :
            return [0,fitn2dgauss_residuals_err1(err, nGnorm, Iamp)]
#        newp = (np.vstack((Iamp, p.reshape(ngauss,3).transpose()))).transpose()
#        if err is None : 
#            return [0,fitn2dgauss_residuals(newp, parPSF, r, theta, data)]
#        else :
#            return [0,fitn2dgauss_residuals_err(newp, parPSF, r, theta, data, err)]
    ## -------------------------------------------------------------------------------------

    ## Information about the parameters
    if verbose :
        print "--------------------------------------"
        print "GUESS:      Sig         Q         PA"
        print "--------------------------------------"
        for i in xrange(ngauss) :
            print "GAUSS %02d: %8.3e  %8.3f  %8.3f"%(i+1, 10**(params[3*i]), params[3*i+1], params[3*i+2])
    print "--------------------------------------"

    ## Information about the parameters
    parnames = {0:"LOGSIGMA",1:"AXIS RATIO",2:"POSITION ANGLE"}
    parinfo = [ {'n':ii, 'value':params[ii], 'limits':[minpars[ii],maxpars[ii]], 
        'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii], 
        'parname':parnames[ii%3]+str(ii/3+1)} for ii in xrange(len(params)) ]

    ## If samePA we remove all PA parameters except the last one
    ## We could use the 'tied' approach but we prefer setting up just one parameter
    if samePA : parinfo = shrink_PA(parinfo)

    ## Fit with mpfit of q, sigma, pa on xax, yax, and data (+err)
    fa = {'parPSF':paramsPSF, 'r': r, 'theta': theta, 'data': data, 'err':err, 'f_Get_Iamp':f_Get_Iamp, 'samePA':samePA}

    result = mpfit(mpfitfun, functkw=fa, iterfunct=mpfitprint, nprint=10, parinfo=parinfo, **fcnargs) 

    ## Getting these best fit values into the dictionnary
    if samePA : result.params = regrow_PA(result.params)
    bestparinfo = [ {'n':ii, 'value':result.params[ii], 'limits':[minpars[ii],maxpars[ii]], 
        'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii], 
        'parname':parnames[ii%3]+str(ii/3)} for ii in xrange(len(result.params)) ]

    ## Recompute the best amplitudes to output the right parameters
    ## And renormalising them
    nGnorm, Iamp = f_Get_Iamp(result.params, paramsPSF, r, theta, data)
#    Ibestpar_array = (np.vstack((Iamp, result.params.reshape(ngauss,3).transpose()))).transpose()
    bestpar_array = result.params.reshape(ngauss,3)

    ## Getting rid of the non-relevant Gaussians
    ## If parameters factor_Chi2 is set we use it as a threshold to remove gaussians
    ## Otherwise we just remove the zeros
    if err is None : nerr = np.ones_like(data)
    else : nerr =  err
    ## First get the Chi2 from this round
#    bestChi2 = fitn2dgauss_chi2_err(Ibestpar_array, paramsPSF, r, theta, data, nerr)
    bestChi2 = np.sum(fitn2dgauss_residuals1(nGnorm, Iamp)**2)
    result.ind = range(ngauss)

    k = 0
    Removed_Gaussians = []
    for i in xrange(ngauss) :
        ## Derive the Chi2 WITHOUT the ith Gaussian
        new_nGnorm, new_Iamp = f_Get_Iamp(np.delete(bestpar_array, i, 0), paramsPSF, r, theta, data)
        newChi2 = np.sum(fitn2dgauss_residuals1(new_nGnorm, new_Iamp)**2)
#        newChi2 = fitn2dgauss_chi2_err(np.delete(Ibestpar_array, i, 0), paramsPSF, r, theta, data, nerr)
        ## If this Chi2 is smaller than factor_Chi2 times the best value, then remove
        ## It just means that Gaussian is not an important contributor
        if newChi2 <= factor_Chi2 * bestChi2 :
            val = bestparinfo.pop(3*k)
            val = bestparinfo.pop(3*k)
            val = bestparinfo.pop(3*k)
            result.ind.pop(k)
            Removed_Gaussians.append(i+1)
        else : k += 1

    if veryverbose :
        if len(Removed_Gaussians) != 0 :
            print "WARNING Removed Gaussians ", Removed_Gaussians
            print "WARNING: (not contributing enough to the fit)"
    ngauss = len(result.ind)

    ## New minimisation after removing all the non relevant Gaussians
    if samePA : bestparinfo = shrink_PA(bestparinfo)
    newresult = mpfit(mpfitfun, functkw=fa, iterfunct=mpfitprint, nprint=10, parinfo=bestparinfo, **fcnargs) 

    newresult.ind = range(ngauss)
    if samePA : newresult.params = regrow_PA(newresult.params)
    bestfit_params = newresult.params.reshape(ngauss, 3)

    ## We add the Amplitudes to the array and renormalise them
    nGnorm, Iamp = f_Get_Iamp(bestfit_params, paramsPSF, r, theta, data)
    Ibestfit_params = (np.vstack((Iamp, bestfit_params.transpose()))).transpose()
    ## Going back to sigma from logsigma
    Ibestfit_params[:,1] = 10**(Ibestfit_params[:,1])
    Ibestfit_params[:,0] /= (2.0 * Ibestfit_params[:,1]**2 * Ibestfit_params[:,2] * pi)
    ## And we sort them with Sigma
    Ibestfit_params = Ibestfit_params[Ibestfit_params[:,1].argsort()]

    if newresult.status == 0:
        raise Exception(newresult.errmsg)

    if verbose :
        print "=================================================="
        print "FIT:      Imax       Sig         Q           PA"
        print "=================================================="
        for i in xrange(ngauss) :
            print "GAUSS %02d: %8.3e  %8.3f  %8.3f  %8.3f"%(i+1, Ibestfit_params[i,0], Ibestfit_params[i,1], Ibestfit_params[i,2], Ibestfit_params[i,3])

        print "Chi2: ",newresult.fnorm," Reduced Chi2: ",newresult.fnorm/len(data)

    return Ibestfit_params, newresult, n_centred_twodgaussian_Imax(pars=Ibestfit_params, parPSF=paramsPSF)(r, theta).reshape(datashape)
示例#4
0
def plot_2dfit_residuals(x, y, data, fit, PAmin=0.0, PAmax=360.0, nSectors=8, WedgeFactor=1.0, nfig=None, legend=False):
    """ Create a figure with the residuals and fit

    :param x: input xaxis coordinates - array
    :param y: input yaxis coordinates - array (should have the same dim as x)
    :param data: input data points (should have the same dim as x)
    :param fit: fitted points (should have the same dim as x)

    :returns: Nothing

    """

    if nfig is None:
        fig = plt.gcf()
    else:
        fig = plt.figure(nfig)
    fig.clf()

    ## Making the arrays as 1D
    x_rav = x.ravel()
    y_rav = y.ravel()
    d_rav = data.ravel()
    f_rav = fit.ravel()

    lx = len(x_rav)
    ly = len(y_rav)
    ld = len(d_rav)
    lf = len(f_rav)

    ## checking that the dimensions are correct
    if (lx != ld) or (lx != lf) or (lx != ly):
        print "ERROR: dimensions for x, y, data, and fit are not the same"
        print " (respectively: %d %d %d and %d)" % (lx, ly, ld, lf)
        return

    ## Polar coordinates
    r, theta = convert_xy_to_polar(x_rav, y_rav)
    theta = np.degrees(theta)

    ## Selecting the points with respect to their sectors
    ## And sorting them out
    Sample_Theta = np.linspace(PAmin, PAmax, nSectors + 1)
    Step_Theta = Sample_Theta[1] - Sample_Theta[0]
    Min_Theta = Sample_Theta[:-1] - Step_Theta / WedgeFactor
    Max_Theta = Sample_Theta[:-1] + Step_Theta / WedgeFactor

    Sel_Theta = []
    for i in xrange(nSectors):
        newsel = np.argwhere((theta >= Min_Theta[i]) & (theta < Max_Theta[i]))
        Sel_Theta.append(newsel)

    xmin = np.min(np.abs(r[r > 0.0]))
    xmax = np.max(np.abs(r))
    plt.ioff()
    ## Plotting the results using matplotlib
    ax01 = fig.add_subplot(nSectors, 2, 1)
    ax02 = fig.add_subplot(nSectors, 2, 2)
    _plot_1dfit(
        ax01,
        ax02,
        r[Sel_Theta[0]],
        d_rav[Sel_Theta[0]],
        f_rav[Sel_Theta[0]],
        xmin=xmin,
        xmax=xmax,
        labelx=False,
        legend=legend,
    )
    for i in xrange(1, nSectors - 1):
        ax1 = fig.add_subplot(nSectors, 2, 2 * i + 1, sharex=ax01)
        ax2 = fig.add_subplot(nSectors, 2, 2 * i + 2, sharex=ax02)
        _plot_1dfit(
            ax1,
            ax2,
            r[Sel_Theta[i]],
            d_rav[Sel_Theta[i]],
            f_rav[Sel_Theta[i]],
            xmin=xmin,
            xmax=xmax,
            labelx=False,
            legend=legend,
        )
    ax1 = fig.add_subplot(nSectors, 2, 2 * nSectors - 1, sharex=ax01)
    ax2 = fig.add_subplot(nSectors, 2, 2 * nSectors, sharex=ax02)
    _plot_1dfit(
        ax1,
        ax2,
        r[Sel_Theta[i]],
        d_rav[Sel_Theta[i]],
        f_rav[Sel_Theta[i]],
        xmin=xmin,
        xmax=xmax,
        labelx=True,
        legend=legend,
    )
    plt.ion()
    plt.subplots_adjust(hspace=0, bottom=0.08, right=0.98, left=0.1, top=0.98, wspace=0.3)
示例#5
0
def bin_to_sectors(x, y, data, **kwargs) :
    """
    This routine will bin your data (x, y, data) into sectors defined by
    the number (between 0 and 90 degrees) and their width.

    Ellipticity : ellipticity (scalar) which will be used for defining the rings
    Center : 2 numbers giving the center in X and Y (default is [0.,0.])
    PA : position angle measured from top (counter-clockwise, default is -90.0 meaning the X is
         already along the abscissa)
    
    NSectors : number of sectors. Default is 19 (to cover 0-90 degrees with 5 degrees sectors)
    FactorRadius : factor for sampling the radius. Default is 1.1, which provides about 24 points per decade.
    WidthAngle : total Width in degrees of each sector (default is 5)
    SymQuad: by default to True. Will thus derive the sectors only from 0 to 90. If set to False, it will
             compute the binning in sectors between 0 and 360 degrees.

    MinLevel : minimum level for the data to be taken into account (default is -999.0)
    MinBinLevel : minimum Binned level for the data to be taken into account (default is 0.)
    Gain : in case we need the Poissonian Error to be computed

    verbose : default to 0

    Return: 
            xout, yout, dataout, sigmaout
            Where xout, yout are the cartesian coordinates of the bins
            dataout is the binned value of that bin
            sigmaout is the estimated standard deviation
    """
    Ellipticity = kwargs.get('Ellipticity', 0.0)
    Center = kwargs.get('Center', [0.0,0.0])
    MinLevel = kwargs.get('MinLevel', -999.0)
    MinBinLevel = kwargs.get('MinBinLevel', 0.0)
    PA = kwargs.get('PA', -90.0)

    NSectors = kwargs.get('NSectors', 19)
    WidthAngle = kwargs.get('WidthAngle', 5.0)
    SymQuad = kwargs.get('SymQuad', True)

    Gain = kwargs.get('Gain', 1.0)
    verbose = kwargs.get('verbose', 0)

    ## This provides a factor of 1.1 per range
    FactorRadius = kwargs.get('FactorRadius', 1.1)
    Nradii_per_decade = 1. / np.log10(1.1)

    ## First check that the sizes of the input coordinates + data are the same
    if (np.size(x) != np.size(y)) | (np.size(x) != np.size(data)) :
        print "ERROR : please check the sizes of your input arrays -> they should be the same!"
        return [0.], [0.], [0.]

    ## Then selecting the good points
    seldata = data > MinLevel
    x = x[seldata].ravel()
    y = y[seldata].ravel()
    data = data[seldata].ravel()
    ## Checking if all is ok
    if np.size(data) == 0 :
        print "ERROR : after selecting points above MinLevel (%f), the array is empty!"%(MinLevel)
        return [0.], [0.], [0.]

    ## Then check that the ellipticity is 0 <= eps < 1
    if (Ellipticity < 0) | (Ellipticity >= 1) :
        print "ERROR : please check your input Ellipticity (%f) as it should be in [0,1[ !"%(Ellipticity)
        return [0.], [0.], [0.]

    ## We first recenter the data
    PARadian = np.radians(PA)
    rcx, rcy = rotxyC(x, y, cx=Center[0], cy=Center[1], angle=PARadian + np.pi/2.)
    ## We then convert to polar coordinates with x axis along as abscissa
    ## If the symmetry is forced we use the absolute values
    if SymQuad :
        radii, theta = convert_xy_to_polar(np.abs(rcx), np.abs(rcy))
    else :
        radii, theta = convert_xy_to_polar(rcx, rcy)
    ## We do the same but using the ellipticity now
    qaxis = 1. - Ellipticity
    radii_ell, theta_ell = convert_xy_to_polar(rcx, rcy / qaxis)

##     ## And getting a log spaced sample
##     sample_rell = np.logspace(np.log10(minr_ell), np.log10(maxr_ell), Nradii_per_decade * np.log10(maxr_ell/minr_ell))
##     ## Adding the central points with a negative radii
##     sample_rell = np.concatenate(([-1.0], sample_rell))

    ## Now we sample the radii - First getting the minimum and maximum radii
    minr_ell, maxr_ell = np.min(radii_ell[np.nonzero(radii_ell)]), np.max(radii_ell)
    ## We go from minimum to max * 1.1
    sampleR = np.logspace(np.log10(minr_ell), np.log10(maxr_ell), 
                 Nradii_per_decade * np.log10(maxr_ell/minr_ell) + 1)
    ## And add the centre
    sampleR = np.concatenate(([-1.0], sampleR))
    Nradii = len(sampleR)

    ## Now we sample the Angles - All following angles in Radian
    if SymQuad :
        center_Angles = np.linspace(0., np.pi/2., NSectors)
    else :
        center_Angles = np.linspace(-np.pi / 2., 3. * np.pi / 2., NSectors)
    stepAngle = (center_Angles[1] - center_Angles[0]) / 2.
    low_Angles = center_Angles - stepAngle
    sampleT = np.concatenate((low_Angles, [center_Angles[-1] + stepAngle]))

    ## Creating the output Angle array by duplicating center_Angles Nradii-1 times
    thetaout = np.repeat(center_Angles[np.newaxis,:], Nradii-1, 0)
    ## Now we select for each sector the right points and compute the new binned data and errors
    radout = np.zeros_like(thetaout)
    dataout = np.zeros_like(radout) - 999.0
    sigmaout = np.zeros_like(radout)

##     ################ OLD - SLOW #################################################################################
##     def ChoiceMean(x) :
##         lx = len(x)
##         if lx > 10 : return robust_mean(x)
##         elif lx > 0 : return x.mean()
##         else : return -999.0
## 
##     def ChoiceStd(x) :
##         lx = len(x)
##         if lx > 10 : return robust_mean(x)
##         elif lx > 0 : return np.sqrt(Gain * x.sum()) / lx
##         else : return -999.0
## 
##     def ChoiceRadii(x) :
##         lx = len(x)
##         return np.average(x[:lx/2], weights=x[lx/2:])
## 
##     if OPTION_Scipy :
##         if verbose : print "Using Scipy Version (as scipy 0.11.0 or later is available)"
##         dradii_ell = np.concatenate((radii_ell, radii_ell))
##         dtheta = np.concatenate((theta, theta))
##         ddata = np.concatenate((radii_ell, data))
##         dataout = binned_statistic_2d(radii_ell, theta, data, bins=[sampleR, sampleT], statistic=ChoiceMean)[0]
##         sigmaout = binned_statistic_2d(radii_ell, theta, data, bins=[sampleR, sampleT], statistic=ChoiceStd)[0]
##         radout = binned_statistic_2d(dradii_ell, dtheta, ddata, bins=[sampleR, sampleT], statistic=ChoiceRadii)[0]
##     ################ OLD - SLOW #################################################################################

    ## Counting the number of points per bin
    histBins = np.histogram2d(radii_ell, theta, bins=[sampleR, sampleT])[0]
    ## We use the elliptical radius, but the circular angle
    ## We now get which bins each point gets into
    digitR = np.digitize(radii_ell, sampleR)
    digitT = np.digitize(theta, sampleT)

    ## And we loop over the Sectors
    for j in xrange(NSectors) :
        if verbose :
             print "Section %02d starting"%(j+1)
        ## We select the bins within that sector
        selJ = (digitT == j+1)
        dataJ = data[selJ]
        radiiJ = radii[selJ]
        digitRJ = digitR[selJ]
        ## We select the bins which have >0 and >10 within that sector
        selH0 = np.where((histBins[:,j] > 0) & (histBins[:,j] <= 10))[0]
        selH10 = np.where(histBins[:,j] > 10)[0]
        ## Then we make the calculation for the two species
        ## The ones which have more than 10 points and the ones which have at
        ## least one point.
        for i in selH0 :
            spoints = (digitRJ == i+1) 
            dataS = dataJ[spoints]
            radout[i, j] = np.average(radiiJ[spoints], weights=dataS)
            dataout[i,j] = dataS.mean()
            sigmaout[i,j] = np.sqrt(Gain * dataS.sum()) / histBins[i,j]
        for i in selH10 :
            spoints = (digitRJ == i+1)
            dataS = dataJ[spoints]
            radout[i, j] = np.average(radiiJ[spoints], weights=dataS)
            dataout[i,j] = robust_mean(dataS)
            sigmaout[i,j] = robust_sigma(dataS)

    ## Final selection without the negative points
    selfinal = (dataout > -999) & (dataout > MinBinLevel)
    ## Finally converting it back to x, y
    xout, yout = convert_polar_to_xy(radout[selfinal], thetaout[selfinal] + np.pi/2. + PARadian)
    return xout, yout, dataout[selfinal], sigmaout[selfinal]