def return_numerical_ML_Bias(parameter_value, parameter_label, imageParams, order = 1, maxEval = 1000):
    import model_Production as modPro
    """
    Returns the theoretically motivated ML estiamtor bias due to finite data sample. First instance only calculates the linear bias. This is most useful for a `brute force` approach to the ML bias correction, as (in the Gaussian case) the result depends on the image: therefore K, J and L must be calculated over many samples; therefore, this is likely to be computationally expensive compared to the analytic method where the statistics of the image are known.

    Differs from the analytic case in the sense that *this method does nto assume that the statistics of the image are known*, so that one must consider many simulated images to obtain the bias components

    Requires:
    parameter_value: value of beta on which to calculate the bias (either intrinsic parameter value, or ML measurment itself)
    parameter_label: labels the parameter being varied. Must be of the form of the default model parameter dictionary.
    image: 2-dimensional ndarray containing the image to be analysed
    imageParams: model parameters set to the default value.
    order: not implemented yet. Defines to what order the bias is returned. Default is first order.

    Side Effects: None
    
    Returns: bias to stated order.
    """
    from image_measurement_ML import get_logLikelihood
    from derivatives import finite_difference_derivative

    ##Redefine input for ease of notation
    pVal = parameter_value; pLab = parameter_label

    ##Get derivative of log-likelihood wrt the parameter
    #If returnType = pix, then derivative is still kept in pixel form
    ##Note: even though for simple, uncorrelated noise, the ML point does not depend on the noise value, for the derivative it does. Therefore, it is likely that the noise value passed in via setParams, as measured on the image, must be accurate.

    nPix = np.prod(imageParams['stamp_size'])

    K = np.zeros(maxEval); J = np.zeros(maxEval); I = np.zeros(maxEval);

    for ev in range(maxEval):
        ## Get a new simulated image
        image, imageParams = modPro.get_Pixelised_Model(imageParams, noiseType = 'G', Verbose = debug)

        ## Store imageParams in temporary storage to ensure that dictionary in not overwritten
        iParams = deepcopy(imageParams)
        ## Take derivative of likelihood (function of image and model at entered parameter values) around the ML point (entered parameter values)
        dpixlnL = finite_difference_derivative(get_logLikelihood, pVal, args = [pLab, image, iParams, 'pix'], n = [1,2,3], dx = [0.0001, 0.0001], maxEval = 1000, eps = 1.e-3)

        K[ev] = dpixlnL[2].sum()/nPix
        
        J[ev] = (dpixlnL[0]*dpixlnL[1]).sum()/nPix
        
        I[ev] = -(dpixlnL[1].sum())/nPix

    K = K.mean()
    J = J.mean()
    I = I.mean()

    print 'Bias components found:'
    print 'K:', K
    print 'J:', J
    print 'I:', I

    bias = (K+(2.*J))/(2.*nPix*I*I)
    
    return bias
def differentiate_Pixelised_Model_Numerical(modelParams, pVal, pLab, n = [1], order = 3, interval = 0.1, eps = 1.e-3, maxEval = 100):
    from derivatives import finite_difference_derivative
    """
    28/5/15
    Numerically differentiates pixelised model with respect to a given parameter. The model must be produced by a routine which returns a gridded (and/or pixelised) image, and must be accessible using a function of form f(x, *args), where x sets the value of the parameter being differentiated wrt, and args allows this value to be correctly labelled in the input model parameter dictionary. These functions are hard coded in this original version, but may be generalised to a user defined  function in future versions.

    This is useful for the numerical evaluation of ML bias.

    Requires:
    --modelParams: Dictionary of model parameters
    -- pVal: Value of free parameters defining point at which derivative is set
    -- pLab: String labels of model parameters corresponding to pVal
    -- n: Order to which derivative is returned
    --- order: number of function evaluations used to evaluate derivative (named to mimic SciPy definition)
    --- interval: step size used in finite difference method. As defined in finite_difference_derivative()
    --- eps: Tolerance for convergence.  As defined in finite_difference_derivative()
    --- maxEval: Maximum number of derivative evaluations (and step-size intervals) considered in testing for convergence.

    """

    result = finite_difference_derivative(get_Pixelised_Model_wrapFunction, pVal, args = [modelParams, pLab, 1], n = n, order = order, dx = interval, eps = eps, convergenceType = 'sum', maxEval = maxEval)

    return result
Ejemplo n.º 3
0
def return_numerical_ML_Bias(parameter_value,
                             parameter_label,
                             imageParams,
                             order=1,
                             maxEval=1000):
    import model_Production as modPro
    """
    Returns the theoretically motivated ML estiamtor bias due to finite data sample. First instance only calculates the linear bias. This is most useful for a `brute force` approach to the ML bias correction, as (in the Gaussian case) the result depends on the image: therefore K, J and L must be calculated over many samples; therefore, this is likely to be computationally expensive compared to the analytic method where the statistics of the image are known.

    Differs from the analytic case in the sense that *this method does nto assume that the statistics of the image are known*, so that one must consider many simulated images to obtain the bias components

    Requires:
    parameter_value: value of beta on which to calculate the bias (either intrinsic parameter value, or ML measurment itself)
    parameter_label: labels the parameter being varied. Must be of the form of the default model parameter dictionary.
    image: 2-dimensional ndarray containing the image to be analysed
    imageParams: model parameters set to the default value.
    order: not implemented yet. Defines to what order the bias is returned. Default is first order.

    Side Effects: None
    
    Returns: bias to stated order.
    """
    from image_measurement_ML import get_logLikelihood
    from derivatives import finite_difference_derivative

    ##Redefine input for ease of notation
    pVal = parameter_value
    pLab = parameter_label

    ##Get derivative of log-likelihood wrt the parameter
    #If returnType = pix, then derivative is still kept in pixel form
    ##Note: even though for simple, uncorrelated noise, the ML point does not depend on the noise value, for the derivative it does. Therefore, it is likely that the noise value passed in via setParams, as measured on the image, must be accurate.

    nPix = np.prod(imageParams['stamp_size'])

    K = np.zeros(maxEval)
    J = np.zeros(maxEval)
    I = np.zeros(maxEval)

    for ev in range(maxEval):
        ## Get a new simulated image
        image, imageParams = modPro.get_Pixelised_Model(imageParams,
                                                        noiseType='G',
                                                        Verbose=debug)

        ## Store imageParams in temporary storage to ensure that dictionary in not overwritten
        iParams = deepcopy(imageParams)
        ## Take derivative of likelihood (function of image and model at entered parameter values) around the ML point (entered parameter values)
        dpixlnL = finite_difference_derivative(
            get_logLikelihood,
            pVal,
            args=[pLab, image, iParams, 'pix'],
            n=[1, 2, 3],
            dx=[0.0001, 0.0001],
            maxEval=1000,
            eps=1.e-3)

        K[ev] = dpixlnL[2].sum() / nPix

        J[ev] = (dpixlnL[0] * dpixlnL[1]).sum() / nPix

        I[ev] = -(dpixlnL[1].sum()) / nPix

    K = K.mean()
    J = J.mean()
    I = I.mean()

    print 'Bias components found:'
    print 'K:', K
    print 'J:', J
    print 'I:', I

    bias = (K + (2. * J)) / (2. * nPix * I * I)

    return bias
Ejemplo n.º 4
0
def analytic_GaussianLikelihood_Bias(parameter_value,
                                     parameter_label,
                                     imageParams,
                                     order=1,
                                     diffType='analytic'):
    import model_Production as modPro
    import surface_Brightness_Profiles as SBPro
    from derivatives import finite_difference_derivative
    """
    Returns the theoretically motivated ML estimator bias due to finite data sample (noise bias) to first order (by default). First instance only calculates the linear bias. This is only applicable to the case where the estimate is taken as the ML point of a Gaussian Likelihood function, or minimising chi^2, and where the noise variance is uniform across the stamp.

    This formalism removes the dependence on the derivative of the lnL on the image by using the simplifying statistics of the image and its noise properties: in the Gaussian case, and in the formalism of `return_numerical_ML_Bias`, K = -3J, <image - model> = 0 and <(image-model)^2> = noise_var

    NOTE:
    --As bias depends on the noise properties of the image (here assumed to uncorrelated Gaussian noise), the `noise` parameter of the imageParams dictionary *must* be correct.
    --where diffType == `num` or `numeric`, finite differences are used to calcalate the derivative, and a rough convergence test is used as stated in the documentation for `finite_difference_derivative`

    To Do:
    Edit to include fully analytic derivative of the image

    Requires:
    -- parameter_value: value of beta on which to calculate the bias (either intrinsic parameter value, or ML measurment itself): *must be singular in this case*
    -- parameter_label: labels the parameter being varied. Must be of the form of the default model parameter dictionary.
    -- imageParams: parameters which define the image. Parameters which are not being varied must be set to default values. `noise` must be accurate.
    -- order: ONLY FIRST ORDER IS SUPPORTED. Defines to what order the bias is returned. Default is first order.
    -- diffType: Accepted values are `analytic` or `ana`, and `numerical` and `num` (case-insensitive). If the former, then anaylic (exact) derivatives are used for the model as defined in modPro.differentiate_Pixelised_Model_Analytic in model_Production.py. If the latter, then finite difference is used.

    Side Effects: None
    
    Returns: bias to stated order for all parameters entered, as 1D array.
    """

    pVal = parameter_value
    pLab = parameter_label

    iParams = deepcopy(imageParams)

    ##-- Get the derivatives of the pixelised, noise-free model
    if diffType.lower() == 'numeric' or diffType.lower() == 'num':
        ##Get fully numeric derivative. This takes the derivative of the image as a whole: therefore note that this is likely to be more problematic in ensuring that derivative has converged. NOTE:
        diffIm = finite_difference_derivative(
            modPro.get_Pixelised_Model_wrapFunction,
            pVal,
            args=[
                iParams, pLab, 1, {
                    'sbProfileFunc': SBPro.gaussian_SBProfile_Weave,
                    'noiseType': None,
                    'outputImage': False
                }
            ],
            n=[1, 2],
            dx=[0.001, 0.001],
            order=5,
            eps=1.e-3,
            convergenceType='sum',
            maxEval=100)
    elif diffType.lower() == 'analytic' or diffType.lower() == 'ana':
        diffIm = [
            modPro.differentiate_Pixelised_Model_Analytic(iParams,
                                                          pVal,
                                                          pLab,
                                                          1,
                                                          permute=True),
            modPro.differentiate_Pixelised_Model_Analytic(iParams,
                                                          pVal,
                                                          pLab,
                                                          2,
                                                          permute=True)
        ]
    else:
        raise RuntimeError(
            'analytic_GaussianLikelihood_Bias - Invalid differential type (diffType) entered:'
            + diffType)

    nPar = len(pVal)

    if (nPar == 1):
        #### ----------------------- Single Parameter Fit Case ---------------------------------------###
        ## This is verified to work with the old definition of the derivative function call. New definition may need extra [0]s added to end of all diffIms
        ##Original: bias = ( (diffIm[0]*diffIm[1]).sum() )/np.power( np.power(diffIm[0],2.).sum(), 2.);
        #Set up bias to return consistently for any number of input parameters
        bias = np.zeros((1, 1))

        ## get prefactor : (sigma^2)/(2)
        preFactor = -1.0 * (imageParams['noise'] * imageParams['noise']) / 2.
        # get bias as prefactor*(sum I' * I'')/ (sum I' ^2)^2
        bias[0,
             0] = ((diffIm[0][0, :, :] * diffIm[1][0, :, :]).sum()) / np.power(
                 np.power(diffIm[0][0, :, :], 2.).sum(), 2.)

        bias[0, 0] *= preFactor
        #### -----------------------------------------------------------------------------------------###

    else:

        ### ---------------------- Multi-Parameter Fit ---------------------------------------------- ###
        ### --- Verified to give identical results to single parameter case above for single parameter fit --- ###
        #Verifed to work in single parameter case (17th Jul 2015)
        nPix = np.prod(diffIm[0][0].shape)

        I, K, J = bias_components(diffIm, imageParams['noise'])

        Iin = np.linalg.inv(I)

        KJ = 0.5 * K + J
        IKJ = [(Iin * KJ[i]).sum() for i in range(KJ.shape[0])
               ]  ##Constitutes a single loop: IJK should have dimension [nPar]
        bias = [(Iin[s, :] * IKJ).sum() for s in range(nPar)]  ## Single loop

        bias /= nPix

    return bias
def analytic_GaussianLikelihood_Bias(parameter_value, parameter_label, imageParams, order = 1, diffType = 'analytic'):
    import src.model_Production as modPro
    import src.surface_Brightness_Profiles as SBPro
    from derivatives import finite_difference_derivative
    """
    Returns the theoretically motivated ML estimator bias due to finite data sample (noise bias) to first order (by default). First instance only calculates the linear bias. This is only applicable to the case where the estimate is taken as the ML point of a Gaussian Likelihood function, or minimising chi^2, and where the noise variance is uniform across the stamp.

    This formalism removes the dependence on the derivative of the lnL on the image by using the simplifying statistics of the image and its noise properties: in the Gaussian case, and in the formalism of `return_numerical_ML_Bias`, K = -3J, <image - model> = 0 and <(image-model)^2> = noise_var

    NOTE:
    --As bias depends on the noise properties of the image (here assumed to uncorrelated Gaussian noise), the `noise` parameter of the imageParams dictionary *must* be correct.
    --where diffType == `num` or `numeric`, finite differences are used to calcalate the derivative, and a rough convergence test is used as stated in the documentation for `finite_difference_derivative`

    To Do:
    Edit to include fully analytic derivative of the image

    Requires:
    -- parameter_value: value of beta on which to calculate the bias (either intrinsic parameter value, or ML measurment itself): *must be singular in this case*
    -- parameter_label: labels the parameter being varied. Must be of the form of the default model parameter dictionary.
    -- imageParams: parameters which define the image. Parameters which are not being varied must be set to default values. `noise` must be accurate.
    -- order: ONLY FIRST ORDER IS SUPPORTED. Defines to what order the bias is returned. Default is first order.
    -- diffType: Accepted values are `analytic` or `ana`, and `numerical` and `num` (case-insensitive). If the former, then anaylic (exact) derivatives are used for the model as defined in modPro.differentiate_Pixelised_Model_Analytic in model_Production.py. If the latter, then finite difference is used.

    Side Effects: None
    
    Returns: bias to stated order for all parameters entered, as 1D array.
    """


    pVal = parameter_value; pLab = parameter_label

    
    iParams = deepcopy(imageParams)

    ##-- Get the derivatives of the pixelised, noise-free model
    if diffType.lower() == 'numeric' or diffType.lower() == 'num':
        ##Get fully numeric derivative. This takes the derivative of the image as a whole: therefore note that this is likely to be more problematic in ensuring that derivative has converged. NOTE: 
        diffIm = finite_difference_derivative(modPro.get_Pixelised_Model_wrapFunction, pVal, args = [iParams, pLab, 1, {'sbProfileFunc':SBPro.gaussian_SBProfile_Weave, 'noiseType':None, 'outputImage':False}], n = [1,2], dx = [0.001, 0.001], order = 5, eps = 1.e-3, convergenceType = 'sum', maxEval = 100)
    elif diffType.lower() == 'analytic' or diffType.lower() == 'ana':
        diffIm = [modPro.differentiate_Pixelised_Model_Analytic(iParams, pVal, pLab, 1, permute = True), modPro.differentiate_Pixelised_Model_Analytic(iParams, pVal, pLab, 2, permute = True)]
    else:
        raise RuntimeError('analytic_GaussianLikelihood_Bias - Invalid differential type (diffType) entered:'+diffType)

    nPar = len(pVal)

    if(nPar == 1):
        #### ----------------------- Single Parameter Fit Case ---------------------------------------###
        ## This is verified to work with the old definition of the derivative function call. New definition may need extra [0]s added to end of all diffIms
        ##Original: bias = ( (diffIm[0]*diffIm[1]).sum() )/np.power( np.power(diffIm[0],2.).sum(), 2.);
        #Set up bias to return consistently for any number of input parameters
        bias = np.zeros((1,1))
        
        ## get prefactor : (sigma^2)/(2)
        preFactor = -1.0*(imageParams['noise']*imageParams['noise'])/2.
        # get bias as prefactor*(sum I' * I'')/ (sum I' ^2)^2
        bias[0,0] = ( (diffIm[0][0,:,:]*diffIm[1][0,:,:]).sum() )/np.power( np.power(diffIm[0][0,:,:],2.).sum(), 2.);
    
        bias[0,0] *= preFactor
        #### -----------------------------------------------------------------------------------------###
    
    else:
        
        ### ---------------------- Multi-Parameter Fit ---------------------------------------------- ###
        ### --- Verified to give identical results to single parameter case above for single parameter fit --- ###
        #Verifed to work in single parameter case (17th Jul 2015)
        nPix = np.prod(diffIm[0][0].shape)
        
        I, K, J = bias_components(diffIm, imageParams['noise'])
        
        Iin = np.linalg.inv(I)

        KJ = 0.5*K+J
        IKJ = [(Iin*KJ[i]).sum() for i in range(KJ.shape[0])] ##Constitutes a single loop: IJK should have dimension [nPar]
        bias = [(Iin[s,:]*IKJ).sum() for s in range(nPar)] ## Single loop
        
        bias /= nPix
        
    return bias