def differentiate_logLikelihood_Gaussian_Analytic(parameters, pLabels, image, setParams, modelLookup = None, returnType = None, order = 1, signModifier = -1.):
    import generalManipulation
    import model_Production as modPro
    from surface_Brightness_Profiles import gaussian_SBProfile_Weave
    '''
    Returns the analytic derivative of the Gaussian log-Likelihood (ignoring parameter-independent prefactor whose derivative is zero) for parameters labelled by pLabels.
    Uses analytic derivative of the pixelised model as given in differentiate_Pixelised_Model_Analytic routine of model_Production routine.

    *** Note: `noise` as defined in set params must the noise_std, and must accurately describe the noise properties of the image. ***

    Requires:
    parameters: flattened array of parameter values to vary (allows for external program to set variation in these params)
    pLabels: tuple of length `parameters`, which is used to identify the parameters being varied. These labels should satisfy the modelParameter dictionary keys using in setting up the model
    image: 2d <ndarray> of pixelised image
    setParams: dictionary of fixed model parameters which sets the model SB profile being fit.
    modelLookup: An instance of the model lookup table, as set in model_Production module
    returnType: IGNORED, but included so that this method mimic the call fingerprint of the log-Likelihood evaluation routine if used as part of a pre-fab minimisation routine.
    order: sets the order to which derivatives are taken. If order == 1, the return is a tuple (ndarray) of length len(parameters), which contains the first derivatives of all parameters. If order == 2, the retrun is a two-dimensional ndarray, where each element i,j gives the sendon derivative wrt parameter i and parameter j. Order >= 3 or <= 0 are not supported.
    signModifier: default -1. Result is multiplied by abs(signModifier)/signModifier, to change the sing of the output. This is required as the lnL routine actually returns -lnL = chi^2 where a minimisation routine is used. Thus, where the minimisation uses first derivatives, the signModifier should be postive, whilst for other applications (such as the fisher error) on requires the derivative of lnL, and so sign modifier must be negative. The absolute value of signModifier is unimportant.

    Returns:
    [dlnL/dbeta], repeated for all beta in order <1D ndarray>: derivative of -1*log_likelihood evaulated at entered model parameters if order == 1
    [[dlnL/dbeta_i dbeta_j]], repeated for all beta in order <2D ndarray>: second derivative of -1*log_likelihood evaulated at entered model parameters if order == 1

    Possible Extensions:
    -- In calculating second order derivatives, a nested loop is used. This is likely to be slow, and as this is used in producing fisher errors (and thus done every run-time), then this cold be a bottle-neck on the measurement of the ML point where errors are used

    Tests:
    -- Fisher error agrees wel with simulated output for error.
    '''
    
    ##To be useful as part of a minimisation routine, the arguements passed to this function must be the same as those passed to the ln-Likelihood evalutaion also. This suggest possibly two routines: one, like the model differentiation itself should just return the various derivatives, and a wrapper routine which produces only the relevent derivatives required for mimimisation
    ## Third order is ignored for now, as this wold require an edit to the methdo of calculating model derivatives, and it is unlikely that a third order derivative would ever really be necessary (excpet in the case where an analytic derivative of the model is wanted for the calculation of the bias, where simulations over many images are used: usually, the known statistics of the Gaussian lileihood can be used to remove this necessity anyway).


    ### First derivative only are needed, so for now this will be coded only to deal with first derivatives.
    ### Therefore, n = 1, permute = false by default
    ### Note, that this code is unlikely to speed up any computation provided that the derivative is calculated using SymPY. Therefore this must be addressed.

    ### Set up model parameters as input
    ##Set up dictionary based on model parameters. Shallow copy so changes do not overwrite the original
    modelParams = deepcopy(setParams)

    if(setParams['stamp_size'] != image.shape):
        raise RuntimeError('differentiate_logLikelihood_Gaussian_Analytic - stamp size passed does not match image:', str(setParams['stamp_size']), ':', str( image.shape))

    ##Check whether parameters input are iterable and assign to a tuple if not: this allows both `parameters' and `pLabels' to be passed as e.g. a float and string and the method to still be used as it
    parameters = generalManipulation.makeIterableList(parameters); pLabels = generalManipulation.makeIterableList(pLabels)
    if(len(parameters) != len(pLabels)):
        raise ValueError('get_logLikelihood - parameters and labels entered do not have the same length (iterable test)')

    ##Vary parameters which are being varied as input
    modPro.set_modelParameter(modelParams, pLabels, parameters)

    ''' Get Model'''
    if(modelLookup is not None and modelLookup['useLookup']):
        model = np.array(modPro.return_Model_Lookup(modelLookup, parameters)[0]) #First element of this routine is the model image itself
    else:
        model = modPro.user_get_Pixelised_Model(modelParams, sbProfileFunc = gaussian_SBProfile_Weave)[0]


    ''' Get model derivatives '''    
    modDer = modPro.differentiate_Pixelised_Model_Analytic(modelParams, parameters, pLabels, n = 1, permute = False)
    #modDer stores only the n'th derivative of all parameters entered, stored as an nP*nPix*nPix array.

    if(order == 2):
        ##Calculate 2nd derivative also
        modDer2 = modPro.differentiate_Pixelised_Model_Analytic(modelParams, parameters, pLabels, n = 2, permute = True)
            #modDer2 stores the 2nd derivative of all parameters entered, stored as an nP*nP*nPix*nPix array.

    ##Construct the result to be returned. This is a scalar array, with length equal to nP, and where each element corresponds to the gradient in that parameter direction
    nP = len(parameters)
    delI = image - model

    if(order == 1):
        res = np.zeros(nP)
        
        ##Create tdI, which stores dI in the same shape as modDer by adding a first dimension
        tdelI = np.zeros(modDer.shape); tdelI[:] = delI.copy()
        ##Alternatively: tdelI = np.repeat(delI.reshape((1,)+delI.shape), modDer.shape[0], axis = 0)

        ##Set derivative as sum_pix(delI*derI)/sig^2 for all parameters entered
        ## ReturnTypes other than sum could be implemented by removing the sum pats of this relation, however the implementation of fprime in the minimisation routines requires the return to be a 1D array containing the gradient in each direction.
        res = (tdelI*modDer).sum(axis = -1).sum(axis = -1)
        res /= (signModifier/abs(signModifier))*modelParams['noise']*modelParams['noise']
    elif(order == 2):
        res = np.zeros((nP,nP))
        ##This could and should be sped-up using two single loops rather than a nested loop, or by defining delI and dIm*dIm in the same dimension as modDer2
        ## Alternate speed-up is to implement with Weave
        for i in range(nP):
            for j in range(nP):
                res[i,j] = (delI*modDer2[i,j] - modDer[i]*modDer[j]).sum(axis = -1).sum(axis = -1)

        res /= (signModifier/abs(signModifier))*modelParams['noise']*modelParams['noise']


    return res
def differentiate_logLikelihood_Gaussian_Analytic(parameters,
                                                  pLabels,
                                                  image,
                                                  setParams,
                                                  modelLookup=None,
                                                  returnType=None,
                                                  order=1,
                                                  signModifier=-1.):
    import generalManipulation
    import model_Production as modPro
    from surface_Brightness_Profiles import gaussian_SBProfile_CXX
    '''
    Returns the analytic derivative of the Gaussian log-Likelihood (ignoring parameter-independent prefactor whose derivative is zero) for parameters labelled by pLabels.
    Uses analytic derivative of the pixelised model as given in differentiate_Pixelised_Model_Analytic routine of model_Production routine.

    *** Note: `noise` as defined in set params must the noise_std, and must accurately describe the noise properties of the image. ***

    Requires:
    parameters: flattened array of parameter values to vary (allows for external program to set variation in these params)
    pLabels: tuple of length `parameters`, which is used to identify the parameters being varied. These labels should satisfy the modelParameter dictionary keys using in setting up the model
    image: 2d <ndarray> of pixelised image
    setParams: dictionary of fixed model parameters which sets the model SB profile being fit.
    modelLookup: An instance of the model lookup table, as set in model_Production module
    returnType: IGNORED, but included so that this method mimic the call fingerprint of the log-Likelihood evaluation routine if used as part of a pre-fab minimisation routine.
    order: sets the order to which derivatives are taken. If order == 1, the return is a tuple (ndarray) of length len(parameters), which contains the first derivatives of all parameters. If order == 2, the retrun is a two-dimensional ndarray, where each element i,j gives the sendon derivative wrt parameter i and parameter j. Order >= 3 or <= 0 are not supported.
    signModifier: default -1. Result is multiplied by abs(signModifier)/signModifier, to change the sing of the output. This is required as the lnL routine actually returns -lnL = chi^2 where a minimisation routine is used. Thus, where the minimisation uses first derivatives, the signModifier should be postive, whilst for other applications (such as the fisher error) on requires the derivative of lnL, and so sign modifier must be negative. The absolute value of signModifier is unimportant.

    Returns:
    [dlnL/dbeta], repeated for all beta in order <1D ndarray>: derivative of -1*log_likelihood evaulated at entered model parameters if order == 1
    [[dlnL/dbeta_i dbeta_j]], repeated for all beta in order <2D ndarray>: second derivative of -1*log_likelihood evaulated at entered model parameters if order == 1

    Possible Extensions:
    -- In calculating second order derivatives, a nested loop is used. This is likely to be slow, and as this is used in producing fisher errors (and thus done every run-time), then this cold be a bottle-neck on the measurement of the ML point where errors are used

    Tests:
    -- Fisher error agrees wel with simulated output for error.
    '''

    #raise ValueError("differentiate_logLikelihood_Gaussian_Analytic: This has been disabled as Weave is not behaving. Further modifications require that model, and derivatives are flattened to mimic that requirement that image is also flattened, and an extension to multiple images (this should occur naturally if model and derivatives are repeated to mimic multiple images")

    #if(len(image.shape) > 1):
    #    raise ValueError("differentiate_logLikelihood_Gaussian_Analytic: This routine has not been extended to multiple realisations yet")

    ##To be useful as part of a minimisation routine, the arguements passed to this function must be the same as those passed to the ln-Likelihood evalutaion also. This suggest possibly two routines: one, like the model differentiation itself should just return the various derivatives, and a wrapper routine which produces only the relevent derivatives required for mimimisation
    ## Third order is ignored for now, as this wold require an edit to the methdo of calculating model derivatives, and it is unlikely that a third order derivative would ever really be necessary (excpet in the case where an analytic derivative of the model is wanted for the calculation of the bias, where simulations over many images are used: usually, the known statistics of the Gaussian lileihood can be used to remove this necessity anyway).

    ### First derivative only are needed, so for now this will be coded only to deal with first derivatives.
    ### Therefore, n = 1, permute = false by default

    ### Set up model parameters as input
    ##Set up dictionary based on model parameters. Shallow copy so changes do not overwrite the original
    modelParams = deepcopy(setParams)

    ##Check whether parameters input are iterable and assign to a tuple if not: this allows both `parameters' and `pLabels' to be passed as e.g. a float and string and the method to still be used as it
    parameters = generalManipulation.makeIterableList(parameters)
    pLabels = generalManipulation.makeIterableList(pLabels)
    if (len(parameters) != len(pLabels)):
        raise ValueError(
            'get_logLikelihood - parameters and labels entered do not have the same length (iterable test)'
        )

    ##Vary parameters which are being varied as input
    modPro.set_modelParameter(modelParams, pLabels, parameters)
    ''' Get Model'''
    if (modelLookup is not None and modelLookup['useLookup']):
        model = np.array(
            modPro.return_Model_Lookup(modelLookup, parameters)
            [0])  #First element of this routine is the model image itself
    else:
        model = modPro.user_get_Pixelised_Model(
            modelParams, sbProfileFunc=gaussian_SBProfile_CXX)[0]
    ''' Get model derivatives '''
    modDer = modPro.differentiate_Pixelised_Model_Analytic(modelParams,
                                                           parameters,
                                                           pLabels,
                                                           n=1,
                                                           permute=False)
    #modDer stores only the n'th derivative of all parameters entered, stored as an nP*nPix*nPix array.
    ''' Testing flattening
    print "modDer shape:", modDer.shape()

    #Flatten modDer to mimic flattened image
    modDer = [modDer[i].flatten() for i in range(nP)]
    print "modDer shape:", modDer.shape()
    raw_input()
    '''

    modDer2 = None
    if (order == 2):
        ##Calculate 2nd derivative also
        modDer2 = modPro.differentiate_Pixelised_Model_Analytic(modelParams,
                                                                parameters,
                                                                pLabels,
                                                                n=2,
                                                                permute=True)
        #modDer2 stores the 2nd derivative of all parameters entered, stored as an nP*nP*nPix*nPix array.

    #Flatten and reshape model and derivative model images to reflect the form of the input image (which can by multi-realisations)
    model = model.flatten()
    modDer = modDer.reshape((modDer.shape[0], -1))
    if (modDer2 is not None):
        modDer2 = modDer2.reshape((modDer2.shape[0], modDer2.shape[1], -1))

    if (len(image.shape) == 2):
        ## Repeat each nReal times
        nRepeat = image.shape[0]

        model = np.tile(model, (nRepeat, 1))

        modDer = np.array(
            [np.tile(modDer[i], (nRepeat, 1)) for i in range(modDer.shape[0])])

        #There's most likely a better way to do this (i.e. quicker)
        modDer2 = np.array([[
            np.tile(modDer2[i, j], (nRepeat, 1))
            for j in range(modDer2.shape[1])
        ] for i in range(modDer2.shape[0])])

    # print "Shape check:"
    # print "Image:", image.shape
    # print "Model:", model.shape
    # print "Derivative:", modDer.shape
    # if(modDer2 is not None):
    #     print "2nd Derivative: ", modDer2.shape
    # raw_input("Check")

    ##Construct the result to be returned. This is a scalar array, with length equal to nP, and where each element corresponds to the gradient in that parameter direction
    nP = len(parameters)
    delI = image - model

    if (order == 1):
        res = np.zeros(nP)

        ##Create tdI, which stores dI in the same shape as modDer by adding a first dimension
        tdelI = np.zeros(modDer.shape)
        tdelI[:] = delI.copy()
        ##Alternatively: tdelI = np.repeat(delI.reshape((1,)+delI.shape), modDer.shape[0], axis = 0)

        ##Set derivative as sum_pix(delI*derI)/sig^2 for all parameters entered
        ## ReturnTypes other than sum could be implemented by removing the sum pats of this relation, however the implementation of fprime in the minimisation routines requires the return to be a 1D array containing the gradient in each direction.
        res = (tdelI * modDer).sum(axis=-1).sum(axis=-1)

    elif (order == 2):
        res = np.zeros((nP, nP))
        ##This could and should be sped-up using two single loops rather than a nested loop, or by defining delI and dIm*dIm in the same dimension as modDer2
        ## Alternate speed-up is to implement with CXX
        for i in range(nP):
            for j in range(nP):
                res[i, j] = (delI * modDer2[i, j] -
                             modDer[i] * modDer[j]).sum(axis=-1).sum(axis=-1)

    res /= (signModifier /
            abs(signModifier)) * modelParams['noise'] * modelParams['noise']

    return res
def get_logLikelihood(parameters, pLabels, image, setParams, modelLookup = None, returnType = 'sum'):
    import math, sys
    import model_Production as modPro
    import surface_Brightness_Profiles as SBPro
    import generalManipulation
    """
    Returns the (-1.)*log-Likelihood as a Gaussian of lnL propto (I-Im)^2/sigma_n, where Im is image defined by dictionary ``modelParams'', and I is image being analysed, and sigma_n the pixel noise.
    Minimisiation routine should be directed to this function.

    Requires:
    parameters: flattened array of parameter values for free parameters (allows for external program to set variation in these params)
    pLabels: string tuple of length `parameters`, which is used to identify the parameters being varied. These labels should satisfy the modelParameter dictionary keys using in setting up the model.
    image: 2d <ndarray> of pixelised image.
    setParams: dictionary of fixed model parameters which sets the model SB profile being fit.
    modelLookup: An instance of the model lookup table, as set in model_Production module. If None, the the pixelised model image is re-evaluated for each change in parameters.
    returnType (default sum):
    ---`sum`: Total log-likelihood, summing over all pixels
    ---`pix`: log-likelihood evaluated per pixel. Returns ndarray of the same shape as the input image

    Returns:
    lnL <scalar>: -1*log_likelihood evaulated at entered model parameters

    """

    ##Set up dictionary based on model parameters. Shallow copy so changes do not overwrite the original
    modelParams = deepcopy(setParams)

    if(setParams['stamp_size'] != image.shape):
        raise RuntimeError('get_logLikelihood - stamp size passed does not match image:', str(setParams['stamp_size']), ':', str( image.shape))

    parameters = generalManipulation.makeIterableList(parameters); pLabels = generalManipulation.makeIterableList(pLabels)
    if(len(parameters) != len(pLabels)):
        raise ValueError('get_logLikelihood - parameters and labels entered do not have the same length (iterable test): parameters:', str(parameters), ' labels:', str(pLabels))


    ##Vary parameters which are being varied as input
    modPro.set_modelParameter(modelParams, pLabels, parameters)

    ''' Deprecated for above
    for l in range(len(pLabels)):
        if(pLabels[l] not in modelParams):
            raw_input('Error setting model parameters in get_logLikelihood: Parameter not recognised. <Enter> to continue')
        else:
            modelParams[pLabels[l]] = parameters[l]
    '''

    #Test reasonable model values - Effectively applying a hard prior
    if(math.sqrt(modelParams['SB']['e1']**2. + modelParams['SB']['e2']**2.) >= 0.99):
        ##Set log-probability to be as small as possible
        return sys.float_info.max/10 #factor of 10 to avoid any chance of memory issues here
        #raise ValueError('get_logLikelihood - Invalid Ellipticty values set')
    if(modelParams['SB']['size'] <= 0.):
        return sys.float_info.max/10

    ''' Get Model'''
    if(modelLookup is not None and modelLookup['useLookup']):
        model = np.array(modPro.return_Model_Lookup(modelLookup, parameters)[0]) #First element of this routine is the model image itself
    else:
        model, disc = modPro.user_get_Pixelised_Model(modelParams, sbProfileFunc = SBPro.gaussian_SBProfile_Weave)

    ''' Model, lookup comparison '''
    '''
    modelEx, disc = modPro.user_get_Pixelised_Model(modelParams, sbProfileFunc = modPro.gaussian_SBProfile)
    print 'Model, lookup Comparison:', (model-modelEx).sum(), parameters
    import pylab as pl
    f = pl.figure()
    ax = f.add_subplot(211)
    im = ax.imshow(modelEx-model); ax.set_title('model - lookup'); pl.colorbar(im)
    ax = f.add_subplot(212)
    im = ax.imshow(modelEx/model); ax.set_title('model/lookup'); pl.colorbar(im)
    pl.show()
    '''

    if(model.shape != image.shape):
        raise ValueError('get_logLikelihood - model returned is not of the same shape as the input image.')
    
    #Construct log-Likelihood assuming Gaussian noise. As this will be minimised, remove the -1 preceeding
    if(vverbose):
        print 'Noise in ln-Like evaluation:', modelParams['noise']
    pixlnL =  (np.power(image-model,2.))
    lnL = pixlnL.sum()
    pixlnL *= 0.5/(modelParams['noise']**2.); lnL *= 0.5/(modelParams['noise']**2.)

    if(vverbose):
        print 'lnL:', lnL, [ str(pLabels[i])+':'+str(parameters[i]) for i in range(len(pLabels))]

    ##Model is noise free, so the noise must be seperately measured and passed in
    ## Answer is independent of noise provided invariant across image
    #lnL2 = 0.5*( (np.power(image-model,2.)).sum()/(modelParams['noise']**2.))
    if(returnType.lower() == 'sum'):
        return lnL
    elif(returnType.lower() == 'pix'):
        return pixlnL
    elif(returnType.lower() == 'all'):
        return [lnL, pixlnL]
def get_logLikelihood(parameters,
                      pLabels,
                      image,
                      setParams,
                      modelLookup=None,
                      returnType='sum',
                      signModifier=1,
                      callCount=0):
    import math, sys
    import model_Production as modPro
    import surface_Brightness_Profiles as SBPro
    import generalManipulation
    """
    Returns the (-1.)*log-Likelihood as a Gaussian of lnL propto (I-Im)^2/sigma_n, where Im is image defined by dictionary ``modelParams'', and I is image being analysed, and sigma_n the pixel noise.
    Minimisiation routine should be directed to this function.

    Requires:
    parameters: flattened array of parameter values for free parameters (allows for external program to set variation in these params)
    pLabels: string tuple of length `parameters`, which is used to identify the parameters being varied. These labels should satisfy the modelParameter dictionary keys using in setting up the model.
    image: 2d <ndarray> of pixelised image.
    setParams: dictionary of fixed model parameters which sets the model SB profile being fit.
    modelLookup: An instance of the model lookup table, as set in model_Production module. If None, the the pixelised model image is re-evaluated for each change in parameters.
    returnType (default sum):
    ---`sum`: Total log-likelihood, summing over all pixels
    ---`pix`: log-likelihood evaluated per pixel. Returns ndarray of the same shape as the input image

    Returns:
    lnL <scalar>: -1*log_likelihood evaulated at entered model parameters

    """

    callCount += 1

    ##Set up dictionary based on model parameters. Shallow copy so changes do not overwrite the original
    modelParams = deepcopy(setParams)

    if ((setParams['stamp_size'] - np.array(image.shape)).sum() > 0):
        raise RuntimeError(
            'get_logLikelihood - stamp size passed does not match image:',
            str(setParams['stamp_size']), ':', str(image.shape))

    parameters = generalManipulation.makeIterableList(parameters)
    pLabels = generalManipulation.makeIterableList(pLabels)
    if (len(parameters) != len(pLabels)):
        raise ValueError(
            'get_logLikelihood - parameters and labels entered do not have the same length (iterable test): parameters:',
            str(parameters), ' labels:', str(pLabels))

    ##Vary parameters which are being varied as input
    modPro.set_modelParameter(modelParams, pLabels, parameters)
    ''' Deprecated for above
    for l in range(len(pLabels)):
    if(pLabels[l] not in modelParams):
    raw_input('Error setting model parameters in get_logLikelihood: Parameter not recognised. <Enter> to continue')
    else:
    modelParams[pLabels[l]] = parameters[l]
    '''

    #Test reasonable model values - Effectively applying a hard prior
    if (math.sqrt(modelParams['SB']['e1']**2. + modelParams['SB']['e2']**2.) >=
            0.99):
        ##Set log-probability to be as small as possible
        return sys.float_info.max / 10  #factor of 10 to avoid any chance of memory issues here
        #raise ValueError('get_logLikelihood - Invalid Ellipticty values set')
    if (modelParams['SB']['size'] <= 0.):
        return sys.float_info.max / 10
    ''' Get Model'''
    if (modelLookup is not None and modelLookup['useLookup']):
        model = np.array(
            modPro.return_Model_Lookup(modelLookup, parameters)
            [0])  #First element of this routine is the model image itself
    else:
        model, disc = modPro.user_get_Pixelised_Model(
            modelParams, sbProfileFunc=SBPro.gaussian_SBProfile_CXX)
    ''' Model, lookup comparison '''
    '''
    modelEx, disc = modPro.user_get_Pixelised_Model(modelParams, sbProfileFunc = modPro.gaussian_SBProfile)
    print 'Model, lookup Comparison:', (model-modelEx).sum(), parameters
    import pylab as pl
    f = pl.figure()
    ax = f.add_subplot(211)
    im = ax.imshow(modelEx-model); ax.set_title('model - lookup'); pl.colorbar(im)
    ax = f.add_subplot(212)
    im = ax.imshow(modelEx/model); ax.set_title('model/lookup'); pl.colorbar(im)
    pl.show()
    '''
    """ DEPRECATED for multiple models
    if(model.shape != image.shape):
        print "\n\n Model shape: ", model.shape, " :: Image Shape:", image.shape 
        raise ValueError('get_logLikelihood - model returned is not of the same shape as the input image.')
    """

    #Flatten model
    model = model.flatten()

    #Construct log-Likelihood assuming Gaussian noise. As this will be minimised, remove the -1 preceeding
    if (vverbose):
        print 'Noise in ln-Like evaluation:', modelParams['noise']

    keepPix = returnType.lower() == 'pix' or returnType.lower() == 'all'

    pixlnL = np.array([])
    lnL = 0
    absSign = signModifier / abs(signModifier)
    if (len(image.shape) == len(model.shape) + 1):
        #print "Considering sum over images", pLabels, parameters
        for i in range(image.shape[0]):
            tpixlnL = absSign * np.power(image[i] - model, 2.)
            lnL += tpixlnL.sum()
            if (keepPix):
                pixlnL = np.append(pixlnL, tpixlnL)

    else:
        tpixlnL = absSign * np.power(image - model, 2.)
        lnL += tpixlnL.sum()
        if (keepPix):
            pixlnL = np.append(pixlnL, tpixlnL)

    pixlnL *= 0.5 / (modelParams['noise']**2.)
    lnL *= 0.5 / (modelParams['noise']**2.)

    if (vverbose):
        print 'lnL:', lnL, [
            str(pLabels[i]) + ':' + str(parameters[i])
            for i in range(len(pLabels))
        ]

    ##Model is noise free, so the noise must be seperately measured and passed in
    ## Answer is independent of noise provided invariant across image
    #lnL2 = 0.5*( (np.power(image-model,2.)).sum()/(modelParams['noise']**2.))
    if (returnType.lower() == 'sum'):
        return lnL
    elif (returnType.lower() == 'pix'):
        return pixlnL
    elif (returnType.lower() == 'all'):
        return [lnL, pixlnL]