def set_modelParameter(Dict, param_labels, param_values):
    from copy import deepcopy
    from generalManipulation import isIterableList, makeIterableList
    """
    Sets model Parameters (as defined above) by input lists. As opposed to update_Dictionary, this routine allows for the keys to be put in without defining which sub-dictionary they belong to, as seperate_Keys_byModel seperates out into sub-dictionaries specified in the default declaration

    Requires:
    --Dict: model Parameter dictionary to be modified
    -- param_labels: list of parameter labels to be modified in Dict
    -- param_values: list of values corresponding to parameters in param_labels.
    """

    if(not (isIterableList(param_labels) == isIterableList(param_values))):
        raise ValueError('set_modelParameter - Both  param_labels, param_values must be lists or scalar')

    iparam_labels = makeIterableList(param_labels); iparam_values = makeIterableList(param_values)

    if(len(iparam_labels) != len(iparam_values)):
        raise RuntimeError('set_modelParameter - labels and lists not conformal')

    ## Seperate out into respective parts (SB/PSF)
    SBLab, PSFLab, SBVals, PSFVals = seperate_Keys_byModel(iparam_labels, iparam_values, refParam = default_ModelParameter_Dictionary())

    for i, s in enumerate(SBLab):
        Dict['SB'][s] = SBVals[i]
    for i, s in enumerate(PSFLab):
        Dict['SB'][s] = PSFVals[i]
def unpack_Dictionary(dic, requested_keys = None):
    from generalManipulation import makeIterableList
    """
    Helper routine which returns a list of dictionary values corresponding to the list of requested keys input. If no keys are input, the full list of values corresponding to the full dictionary keys list (in stored order) is returned. Used to extract model parameters. Automatically searchs all sub-directory levels defined (hardwired to SB and PSF only)
    """

    ##This could be generalised if a list of subdicts (currently SB and PSF could be passed), or inferred

    if(requested_keys is None):
        #This could be improved by querying intelligently, e.g. getting list of keys, then checking each as dictionary, then replacing (remove + concat) if dictionary
        requested_keys = dic['SB'].keys()
        requested_keys += dic['PSF'].keys()
    elif(not hasattr(requested_keys, "__iter")):
        requested_keys = makeIterableList(requested_keys)

    ## Set SB Keys
    rescount = 0; res = ['F']*len(requested_keys)
    for k in requested_keys:
        if((np.array(dic['SB'].keys()) == k).sum() > 0): #SB Parameter
            res[rescount] = dic['SB'][k]; rescount += 1
        elif((np.array(dic['PSF'].keys()) == k).sum() > 0): #PSF Parameter
            res[rescount] = dic['PSF'][k]; rescount += 1
        #elif( (np.array(dic.keys()) == k).sum() > 0): #Other Parameter
        #    res[rescount] = dic[k]; rescount += 1
    return res[:rescount]
def differentiate_Pixelised_Model_Analytic(modelParams, pVal, pLab, n, permute = False):
    import surface_Brightness_Profiles as SBPro
    from generalManipulation import makeIterableList
    """
    Wrapper function to produce an analytic derivatve of the pixelised image, by using the fact that the model production routines can be called defining the surface brightness profile routine, and the arguments that are passed into it.

    Surface Brightness Profiles: An alterantive SB profile implementation can be provided by using sbProfileFunc = SBPro.gaussian_SBProfile_Sympy, however the Weave implementation uses the output of the Sympy routine in C++ through weave (SciPy), is noticably faster, and has been tested to be exact to the default float precision in python. WEAVE replaced by SWIG compiled CXX version

    Requires:
    -- modelParams: Disctionary containing default (fixed) values for all parameters which are not being measured
    -- pVal: List ofparamter values, around which the derivative is taken
    -- pLab: List of strings labelling the measured parameters for which the derivative is taken. Must be the same length as pVal
    -- n: Order to which the derivative is taken. SCALAR IN THIS VERSION
    -- permute: If false, single derivative is output for each order entered. If true, the result is returned in an nParamter^order list covering all permutations of the derivatives, where symmetry is enforced. In this case, the diagonal elements cover the nth derivatve with respect to that parameter. Result is on order of that the parameters are entered. E.g. for parameters a and b entered in that order:
    --- Res = [ ddI/dada, ddI/dadb
                ddI/dbda, ddI/dbdb ]

    Returns:
    Array containing derivatives over entered parameters, and all permutations (if permute == True)
    """

    if( n<1 or n>2 ):
        raise ValueError('differentiate_Pixelised_Model_Analytic - Error - Only derivatives up to second order are supported for now')

    nP = len(pVal)
    nPix = modelParams['stamp_size']
    if permute:
        ##Consider all permutations of entered parameters. Use numpy array
        if n == 1:
            Res = np.zeros((nP, nPix[0], nPix[1]))
            for i in range(nP):
                der = [pLab[i]]
                Res[i,:,:] = get_Pixelised_Model_wrapFunction(pVal, modelParams, pLab,  noiseType = None, outputImage = False, sbProfileFunc = SBPro.gaussian_SBProfile_CXX, der = der)
        
        elif n == 2:
            Res = np.zeros((nP, nP, nPix[0], nPix[1]))
            
            for i in range(nP):
                for j in range(i, nP):
                    der = [pLab[i], pLab[j]]
                    Res[i,j,:,:] = get_Pixelised_Model_wrapFunction(pVal, modelParams, pLab,  noiseType = None, outputImage = False, sbProfileFunc = SBPro.gaussian_SBProfile_CXX, der = der)
                    Res[j,i,:,:] = Res[i,j] #Enforce symmetry

    else:
        ## Consider the derivative to given order for each parameter entered
        Res = np.zeros((nP, nPix[0], nPix[1]))
        for par in range(nP):
            ppLab = makeIterableList(pLab[par])
            if(len(ppLab) == n):
                der = ppLab
            #elif(len(pLab) == 1): ##Disabled for now, asmay allow for bugs to be intorduced. For now, only accept cases where pLab labels the derivatives exactly, or raise exception
            #    der = [pLab]*n
            else:
                raise ValueError('differentiate_Pixelised_Model_Analytic - pLab entered is not acceptable for the order of differentiation entered:'+str(n)+':'+str(pLab))
            Res[par] = get_Pixelised_Model_wrapFunction(pVal, modelParams, pLab,  noiseType = None, outputImage = False, sbProfileFunc = SBPro.gaussian_SBProfile_CXX, der = der)

    return Res
def find_ML_Estimator(image, fitParams, outputHandle = None, setParams = None, modelLookup = None, searchMethod = 'simplex', preSearchMethod = None, Prior = None, bruteRange = None, biasCorrect = 0, noiseCalc = None, bcoutputHandle = None, error = 'Fisher', **iParams):
    import scipy.optimize as opt
    import model_Production as modPro
    from surface_Brightness_Profiles import gaussian_SBProfile_Weave
    import measure_Bias as mBias
    from generalManipulation import makeIterableList
    """
    MAIN ROUTINE FOR THIS MODULE. Takes in an image (at minimum) and a set of values which defines the model parameters (fit and those which are free to vary), and returns the parameter values at which the log-Likelihood is minimised (or Likelihood is maximised). Can correct for first order noise bias (if biasCorrect != 0), and an estimate of the error (if error is equal to a set of pre-defined values [see below]).
    
    Requires:
    -- image: 2d array of pixelised image
    -- fitParams: tuple of strings which define the model parameters which are free to vary (those which will be fit). These must satisfy the definition of model parameters as set out in the default model dictionary. If None, then e1, e2 and T are fit (this could be made stricter by removing the default None initialisation, thereby requiring that a set of parameters to be fit is passed in).
    -- outputHandle: handle of the output file. **Result is always appended**. If not passed in, then result is not output. Output is in ASCII form.
    -- setParams: Default model dictionary containing fixed parameters which describes the model being fixed. One part of a two part approach to setting the full model parameter dictionary, along with iParams. If None, then default model dictionary is taken.
    -- modelLookup: Dictionary containing lookup table for pixelised model images, as defined in model_Production module. If None, no lookup is used, and the model is re-evalauted for each change in model parameters.
    -- searchMethod: String detailing which form of minimisation to use. Accepted values are:
    ___ simplex, brent, powell, cg, bfgs, l_bfgs_b, ncg (as defined in SciPy documentation)
    -- preSearchMethod: String detailing initial search over parameter space to find global Minimium, used as an initial guess for refinement with searchMethod. If None, initial guess is set to default passed in by the combination of setParams and iParams. If not None, then code will run an initial, coarse search over the parameter space to attempt to find the global mimima. By default this is switched off. Where preSearchMethod == grid or brute, the a grid based search is used. Where this is used, a range must either be entered by the user through bruteRange, or it is taken from the entered prior information. NOTE: This still uses a typically coarse grid, therefore if the range is too wide then it is possible that the code may still find a local mimimum if this exists within one grid point interval of the global miminum.
    -- Prior: NOT USED YET. Skeleton to allow for a parameter prior structure to be passed in
    -- bruteRange: [nPar, 2] sized tuple setting the range in which the initial preSearchMethod is evaluated, if this is done using a grid or brute method (both equivalent), where nPar is the number of free model parameters being fit. THIS DOES NOT CONSTITUTE A PRIOR, as the refinement may still find an ML value outside this range, however where the global maximum occurs outside this range the returned ML value may be expected to be biased.
    -- biasCorrect: integer, states what level of noise bias to correct the estimate to. Only 1st order correction (biasCorrect == 1) is supported. If biasCorrect == 0, the uncorrected estimate (and error if applicable) are output. If biasCorrect > 0, the uncorrected, corrected and error (if applicable) are output. When used, it is important that *the entered model parameter dictionary contains an accurate measure of the pixel noise of appropriate signal--to--noise, as the analytic bias scales according to both*. Noise can be estimate using estimate_Noise() before entry.
    -- bcOutputhandle: As outputHandle, except for the bias corrected estimator.
    -- error: String detailing error estiamte to output. Supported values are:
    ___ fisher: Marginalised fisher error for each parameter around the ML point. See docstring for fisher_Error_ML().
    ___ brute: UNSUPPORTED, however an error defined on the parameter likelihood itself can be derived if the preSearchMethod and bruteRange is defined such that the Likelihood has *compact support*. If not, then this would be inaccurate (underestimated). Therefore coding for this is deferred until the application of a prior is developed, as use of a prior ensures compact support by default.
    -- iParams: set of optional arguments which, together with setParams, defines the intial model dictionary. Allows parameter values to be input individually on call, and is particularly useful for setting initial guesses where preSearchMethod == None.
    
    
    Model Parameter entry: Model Parameters can be entered using two methods
    ___ setParams: Full Dictionary of initial guess/fixed value for set of parameters. If None, this is set to default set. May not be complete: if not, then model parameters set to default as given in default_ModelParameter_Dictionary()
    ___iParams: generic input which allows model parameters to be set individually. Keys not set are set to default as given by default_ModelParameter_Dictionary(). Where an iParams key is included in the default dictionary, or setParams, it will be updated to this value (**therefore iParams values have preferrence**). If key not present in default is entered, it is ignored
    ___ The initial choice of model parameters (including intial guesses for the minimisation routine where preSearchMethod == False) is thus set as setParams+{iParams}



    Returns:
    Returned: tuple of length equal to fitParams. Gives ML estimator for each fit parameter, with bias corrected version (if biasCorrect != 0) and error (if applicable) aslways in that order.
    """

    ''' Set up defaults '''

    err = None
    
    ##Initialise output tuple
    Returned = []

    ## Exceptions based on input objects
    if(image is None or sum(image.shape) == 0):
        raise RuntimeError('find_ML_Estimator - image supplied is None or uninitialised')
        
    if(len(fitParams) > 2 and modelLookup is not None and modelLookup['useLookup']):
        raise RuntimeError('find_ML_Estimator - Model Lookup is not supported for more than double parameter fits')

    ##Set up initial params, which sets the intial guess or fixed value for the parameters which defines the model
    ##This line sets up the keywords that are accepted by the routine
    ## pixle_Scale and size should be in arsec/pixel and arcsec respectively. If pixel_scale = 1., then size can be interpreted as size in pixels
    ## centroid should be set to the center of the image, here assumed to be the middle pixel

    if(setParams is None):
        initialParams = modPro.default_ModelParameter_Dictionary()
    else:
        initialParams = modPro.default_ModelParameter_Dictionary()
        modPro.update_Dictionary(initialParams, setParams)
        ## Deprecated initialParams.update(setParams)

    modPro.set_modelParameter(initialParams, iParams.keys(), iParams.values())

    ## Define modelParams
    modelParams = deepcopy(initialParams)

    ## Estimate Noise of Image
    if(calcNoise is not None):
        modelParams['noise'] = calcNoise(image, modelParams['centroid'])

    ####### Search lnL for minimum
    #Construct initial guess for free parameters by removing them from dictionary
    x0 = modPro.unpack_Dictionary(modelParams, requested_keys = fitParams)

    if(preSearchMethod is not None):
        ## Conduct a presearch of the parameter space to set initial guess (usually grid-based or brute-force)
        if(vverbose or debug):
            print '\n Conducting a pre-search of parameter space to idenitfy global minima'
        if(preSearchMethod.lower() == 'grid' or preSearchMethod.lower() == 'brute'):
            ##Brute force method over a range either set as the prior, or the input range.
            if(bruteRange is not None):
                if(vverbose or debug):
                    print '\n Using user-defined parameter range:', bruteRange

                #x0, fval, bruteGrid, bruteVal
                bruteOut = opt.brute(get_logLikelihood, ranges = bruteRange, args = (fitParams, image, modelParams, modelLookup, 'sum'), finish = None, full_output = True)
                x0, fval, bruteGrid, bruteVal = bruteOut
                ## x0 has len(nParam); fval is scalar; bruteGrid has len(nParam), nGrid*nParam; bruteVal has nGrid*nParam

                ###Evaluate error based on brute by integration - this would only work if bruteRange cover the full range where the PDF is non-zero

                if(error.lower() == 'brute'):
                    raise RuntimeError('find_ML_Estimator - brute labelled as means of evaluating error. This is possbible, but not coded as limitation in use of bruteRange to cover the whole region where the likelihood is non-zero. When a prior is included, this could be taken to be exact, provided one knows the range where the prior has compact support, and the bruteRange reflects this.')
                ## use scipy.integrate.trapz(bruteVal, x = bruteGrid[i], axis = i) with i looping over all parameters (ensure axis set properly...

                

                if(vverbose or debug):
                    print '\n preSearch has found a minimum (on a coarse grid) of:', x0
                
            elif(Prior is not None):
                if(vverbose or debug):
                    print '\n Using prior range'
                raise RuntimeError('find_ML_Estimator - Prior entry has not yet been coded up')

            else:
                raise RuntimeError('find_ML_Estimator - Brute preSearch is active, but prior or range is not set')

    if(debug or vverbose):
        ##Output Model Dictionary and initial guess information
        print 'Model Dictionary:', modelParams
        print '\n Initial Guess:', x0

    ##Find minimum chi^2 using scipy optimize routines
    ##version 11+ maxima = opt.minimize(get_logLikelihood, x0, args = (fitParams, image, modelParams))
    if(searchMethod.lower() == 'simplex'):
        maxima = opt.fmin(get_logLikelihood, x0 = x0, xtol = 0.00001, args = (fitParams, image, modelParams, modelLookup, 'sum'), disp = (verbose or debug))
    elif(searchMethod.lower() == 'brent'):
        maxima = opt.fmin_brent(get_logLikelihood, x0 = x0, xtol = 0.00001, args = (fitParams, image, modelParams, modelLookup, 'sum'), disp = (verbose or debug))
    elif(searchMethod.lower() == 'powell'):
        maxima = opt.fmin_powell(get_logLikelihood, x0 = x0, xtol = 0.00001, args = (fitParams, image, modelParams, modelLookup, 'sum'), disp = (verbose or debug))
    elif(searchMethod.lower() == 'cg'):
        ##Not tested (10Aug)
        maxima = opt.fmin_cg(get_logLikelihood, x0 = x0, fprime = differentiate_logLikelihood_Gaussian_Analytic, args = (fitParams, image, modelParams, modelLookup, 'sum'), disp = (verbose or debug), ftol = 0.000001)
    elif(searchMethod.lower() == 'bfgs'):
        ##Not tested (10Aug)
        maxima = opt.fmin_bfgs(get_logLikelihood, x0 = x0, fprime = differentiate_logLikelihood_Gaussian_Analytic, args = (fitParams, image, modelParams, modelLookup, 'sum'), disp = (verbose or debug))
    elif(searchMethod.lower() == 'l_bfgs_b'):
        ##Not tested (10Aug)
        maxima = opt.fmin_l_bfgs_b(get_logLikelihood, x0 = x0, fprime = differentiate_logLikelihood_Gaussian_Analytic, args = (fitParams, image, modelParams, modelLookup, 'sum'), disp = (verbose or debug))
    elif(searchMethod.lower() == 'ncg'):
        ##Not tested (10Aug)
        maxima = opt.fmin_ncg(get_logLikelihood, x0 = x0, fprime = differentiate_logLikelihood_Gaussian_Analytic, args = (fitParams, image, modelParams, modelLookup, 'sum'), disp = (verbose or debug))
    else:
        raise ValueError('find_ML_Estimator - searchMethod entered is not supported:'+str(searchMethod))

    ##Make numpy array (in the case where 1D is used and scalar is returned):
    if(len(fitParams)==1):
        maxima = np.array(makeIterableList(maxima))

    if(vverbose):
        print 'maxima is:', maxima

    if(debug):
        ##Plot and output residual
        print 'Plotting residual..'
        
        fittedParams = deepcopy(modelParams)
        modPro.set_modelParameter(fittedParams, fitParams, maxima)
        ''' Deprecated
        for i in range(len(fitParams)):
            fittedParams[fitParams[i]] =  maxima[i]
        '''
 
        model, disc =  modPro.user_get_Pixelised_Model(fittedParams, sbProfileFunc = gaussian_SBProfile_Weave)
        residual = image-model

        import pylab as pl
        ##Plot image and model
        f = pl.figure()
        ax = f.add_subplot(211)
        ax.set_title('Model')
        im = ax.imshow(model, interpolation = 'nearest')
        pl.colorbar(im)
        ax = f.add_subplot(212)
        ax.set_title('Image')
        im = ax.imshow(image, interpolation = 'nearest')
        pl.colorbar(im)

        pl.show()

        ##Plot Residual
        f = pl.figure()
        ax = f.add_subplot(111)
        im = ax.imshow(residual, interpolation = 'nearest')
        ax.set_title('Image-Model')
        pl.colorbar(im)
        pl.show()

    if(np.isnan(maxima).sum() > 0):
        raise ValueError('get_ML_estimator - FATAL - NaNs found in maxima:', maxima)

    if(verbose):
        print 'Maxima found to be:', maxima

    ##Output Result
    if(outputHandle is not None):
        np.savetxt(outputHandle, np.array(maxima).reshape(1,maxima.shape[0]))

    ## Bias Correct
    if(biasCorrect == 0):
        Returned.append(maxima)
    elif(biasCorrect == 1):
        ana = mBias.analytic_GaussianLikelihood_Bias(maxima, fitParams, modelParams, order = biasCorrect, diffType = 'analytic')
        bc_maxima = maxima-ana

        ##Output Result
        if(bcoutputHandle is not None):
            np.savetxt(bcoutputHandle, np.array(bc_maxima).reshape(1,bc_maxima.shape[0]))

        if(verbose):
            print 'BC Maxima found to be:', bc_maxima

        ##Return minimised parameters
        Returned.append(maxima, bc_maxima)
    else:
        raise ValueError('get_ML_estimator - biasCorrect(ion) value entered is not applicable:'+ str(biasCorrect))


    ## Get Error on measurement. Brute error would have been constructed on the original brute force grid evaluation above.
    if(error.lower() == 'fisher'):
        err = fisher_Error_ML(maxima, fitParams, image, setParams, modelLookup) #Use finalised modelParams here?
        Returned.append(err)

    return Returned
def differentiate_logLikelihood_Gaussian_Analytic(parameters, pLabels, image, setParams, modelLookup = None, returnType = None, order = 1, signModifier = -1.):
    import generalManipulation
    import model_Production as modPro
    from surface_Brightness_Profiles import gaussian_SBProfile_Weave
    '''
    Returns the analytic derivative of the Gaussian log-Likelihood (ignoring parameter-independent prefactor whose derivative is zero) for parameters labelled by pLabels.
    Uses analytic derivative of the pixelised model as given in differentiate_Pixelised_Model_Analytic routine of model_Production routine.

    *** Note: `noise` as defined in set params must the noise_std, and must accurately describe the noise properties of the image. ***

    Requires:
    parameters: flattened array of parameter values to vary (allows for external program to set variation in these params)
    pLabels: tuple of length `parameters`, which is used to identify the parameters being varied. These labels should satisfy the modelParameter dictionary keys using in setting up the model
    image: 2d <ndarray> of pixelised image
    setParams: dictionary of fixed model parameters which sets the model SB profile being fit.
    modelLookup: An instance of the model lookup table, as set in model_Production module
    returnType: IGNORED, but included so that this method mimic the call fingerprint of the log-Likelihood evaluation routine if used as part of a pre-fab minimisation routine.
    order: sets the order to which derivatives are taken. If order == 1, the return is a tuple (ndarray) of length len(parameters), which contains the first derivatives of all parameters. If order == 2, the retrun is a two-dimensional ndarray, where each element i,j gives the sendon derivative wrt parameter i and parameter j. Order >= 3 or <= 0 are not supported.
    signModifier: default -1. Result is multiplied by abs(signModifier)/signModifier, to change the sing of the output. This is required as the lnL routine actually returns -lnL = chi^2 where a minimisation routine is used. Thus, where the minimisation uses first derivatives, the signModifier should be postive, whilst for other applications (such as the fisher error) on requires the derivative of lnL, and so sign modifier must be negative. The absolute value of signModifier is unimportant.

    Returns:
    [dlnL/dbeta], repeated for all beta in order <1D ndarray>: derivative of -1*log_likelihood evaulated at entered model parameters if order == 1
    [[dlnL/dbeta_i dbeta_j]], repeated for all beta in order <2D ndarray>: second derivative of -1*log_likelihood evaulated at entered model parameters if order == 1

    Possible Extensions:
    -- In calculating second order derivatives, a nested loop is used. This is likely to be slow, and as this is used in producing fisher errors (and thus done every run-time), then this cold be a bottle-neck on the measurement of the ML point where errors are used

    Tests:
    -- Fisher error agrees wel with simulated output for error.
    '''
    
    ##To be useful as part of a minimisation routine, the arguements passed to this function must be the same as those passed to the ln-Likelihood evalutaion also. This suggest possibly two routines: one, like the model differentiation itself should just return the various derivatives, and a wrapper routine which produces only the relevent derivatives required for mimimisation
    ## Third order is ignored for now, as this wold require an edit to the methdo of calculating model derivatives, and it is unlikely that a third order derivative would ever really be necessary (excpet in the case where an analytic derivative of the model is wanted for the calculation of the bias, where simulations over many images are used: usually, the known statistics of the Gaussian lileihood can be used to remove this necessity anyway).


    ### First derivative only are needed, so for now this will be coded only to deal with first derivatives.
    ### Therefore, n = 1, permute = false by default
    ### Note, that this code is unlikely to speed up any computation provided that the derivative is calculated using SymPY. Therefore this must be addressed.

    ### Set up model parameters as input
    ##Set up dictionary based on model parameters. Shallow copy so changes do not overwrite the original
    modelParams = deepcopy(setParams)

    if(setParams['stamp_size'] != image.shape):
        raise RuntimeError('differentiate_logLikelihood_Gaussian_Analytic - stamp size passed does not match image:', str(setParams['stamp_size']), ':', str( image.shape))

    ##Check whether parameters input are iterable and assign to a tuple if not: this allows both `parameters' and `pLabels' to be passed as e.g. a float and string and the method to still be used as it
    parameters = generalManipulation.makeIterableList(parameters); pLabels = generalManipulation.makeIterableList(pLabels)
    if(len(parameters) != len(pLabels)):
        raise ValueError('get_logLikelihood - parameters and labels entered do not have the same length (iterable test)')

    ##Vary parameters which are being varied as input
    modPro.set_modelParameter(modelParams, pLabels, parameters)

    ''' Get Model'''
    if(modelLookup is not None and modelLookup['useLookup']):
        model = np.array(modPro.return_Model_Lookup(modelLookup, parameters)[0]) #First element of this routine is the model image itself
    else:
        model = modPro.user_get_Pixelised_Model(modelParams, sbProfileFunc = gaussian_SBProfile_Weave)[0]


    ''' Get model derivatives '''    
    modDer = modPro.differentiate_Pixelised_Model_Analytic(modelParams, parameters, pLabels, n = 1, permute = False)
    #modDer stores only the n'th derivative of all parameters entered, stored as an nP*nPix*nPix array.

    if(order == 2):
        ##Calculate 2nd derivative also
        modDer2 = modPro.differentiate_Pixelised_Model_Analytic(modelParams, parameters, pLabels, n = 2, permute = True)
            #modDer2 stores the 2nd derivative of all parameters entered, stored as an nP*nP*nPix*nPix array.

    ##Construct the result to be returned. This is a scalar array, with length equal to nP, and where each element corresponds to the gradient in that parameter direction
    nP = len(parameters)
    delI = image - model

    if(order == 1):
        res = np.zeros(nP)
        
        ##Create tdI, which stores dI in the same shape as modDer by adding a first dimension
        tdelI = np.zeros(modDer.shape); tdelI[:] = delI.copy()
        ##Alternatively: tdelI = np.repeat(delI.reshape((1,)+delI.shape), modDer.shape[0], axis = 0)

        ##Set derivative as sum_pix(delI*derI)/sig^2 for all parameters entered
        ## ReturnTypes other than sum could be implemented by removing the sum pats of this relation, however the implementation of fprime in the minimisation routines requires the return to be a 1D array containing the gradient in each direction.
        res = (tdelI*modDer).sum(axis = -1).sum(axis = -1)
        res /= (signModifier/abs(signModifier))*modelParams['noise']*modelParams['noise']
    elif(order == 2):
        res = np.zeros((nP,nP))
        ##This could and should be sped-up using two single loops rather than a nested loop, or by defining delI and dIm*dIm in the same dimension as modDer2
        ## Alternate speed-up is to implement with Weave
        for i in range(nP):
            for j in range(nP):
                res[i,j] = (delI*modDer2[i,j] - modDer[i]*modDer[j]).sum(axis = -1).sum(axis = -1)

        res /= (signModifier/abs(signModifier))*modelParams['noise']*modelParams['noise']


    return res
def get_logLikelihood(parameters, pLabels, image, setParams, modelLookup = None, returnType = 'sum'):
    import math, sys
    import model_Production as modPro
    import surface_Brightness_Profiles as SBPro
    import generalManipulation
    """
    Returns the (-1.)*log-Likelihood as a Gaussian of lnL propto (I-Im)^2/sigma_n, where Im is image defined by dictionary ``modelParams'', and I is image being analysed, and sigma_n the pixel noise.
    Minimisiation routine should be directed to this function.

    Requires:
    parameters: flattened array of parameter values for free parameters (allows for external program to set variation in these params)
    pLabels: string tuple of length `parameters`, which is used to identify the parameters being varied. These labels should satisfy the modelParameter dictionary keys using in setting up the model.
    image: 2d <ndarray> of pixelised image.
    setParams: dictionary of fixed model parameters which sets the model SB profile being fit.
    modelLookup: An instance of the model lookup table, as set in model_Production module. If None, the the pixelised model image is re-evaluated for each change in parameters.
    returnType (default sum):
    ---`sum`: Total log-likelihood, summing over all pixels
    ---`pix`: log-likelihood evaluated per pixel. Returns ndarray of the same shape as the input image

    Returns:
    lnL <scalar>: -1*log_likelihood evaulated at entered model parameters

    """

    ##Set up dictionary based on model parameters. Shallow copy so changes do not overwrite the original
    modelParams = deepcopy(setParams)

    if(setParams['stamp_size'] != image.shape):
        raise RuntimeError('get_logLikelihood - stamp size passed does not match image:', str(setParams['stamp_size']), ':', str( image.shape))

    parameters = generalManipulation.makeIterableList(parameters); pLabels = generalManipulation.makeIterableList(pLabels)
    if(len(parameters) != len(pLabels)):
        raise ValueError('get_logLikelihood - parameters and labels entered do not have the same length (iterable test): parameters:', str(parameters), ' labels:', str(pLabels))


    ##Vary parameters which are being varied as input
    modPro.set_modelParameter(modelParams, pLabels, parameters)

    ''' Deprecated for above
    for l in range(len(pLabels)):
        if(pLabels[l] not in modelParams):
            raw_input('Error setting model parameters in get_logLikelihood: Parameter not recognised. <Enter> to continue')
        else:
            modelParams[pLabels[l]] = parameters[l]
    '''

    #Test reasonable model values - Effectively applying a hard prior
    if(math.sqrt(modelParams['SB']['e1']**2. + modelParams['SB']['e2']**2.) >= 0.99):
        ##Set log-probability to be as small as possible
        return sys.float_info.max/10 #factor of 10 to avoid any chance of memory issues here
        #raise ValueError('get_logLikelihood - Invalid Ellipticty values set')
    if(modelParams['SB']['size'] <= 0.):
        return sys.float_info.max/10

    ''' Get Model'''
    if(modelLookup is not None and modelLookup['useLookup']):
        model = np.array(modPro.return_Model_Lookup(modelLookup, parameters)[0]) #First element of this routine is the model image itself
    else:
        model, disc = modPro.user_get_Pixelised_Model(modelParams, sbProfileFunc = SBPro.gaussian_SBProfile_Weave)

    ''' Model, lookup comparison '''
    '''
    modelEx, disc = modPro.user_get_Pixelised_Model(modelParams, sbProfileFunc = modPro.gaussian_SBProfile)
    print 'Model, lookup Comparison:', (model-modelEx).sum(), parameters
    import pylab as pl
    f = pl.figure()
    ax = f.add_subplot(211)
    im = ax.imshow(modelEx-model); ax.set_title('model - lookup'); pl.colorbar(im)
    ax = f.add_subplot(212)
    im = ax.imshow(modelEx/model); ax.set_title('model/lookup'); pl.colorbar(im)
    pl.show()
    '''

    if(model.shape != image.shape):
        raise ValueError('get_logLikelihood - model returned is not of the same shape as the input image.')
    
    #Construct log-Likelihood assuming Gaussian noise. As this will be minimised, remove the -1 preceeding
    if(vverbose):
        print 'Noise in ln-Like evaluation:', modelParams['noise']
    pixlnL =  (np.power(image-model,2.))
    lnL = pixlnL.sum()
    pixlnL *= 0.5/(modelParams['noise']**2.); lnL *= 0.5/(modelParams['noise']**2.)

    if(vverbose):
        print 'lnL:', lnL, [ str(pLabels[i])+':'+str(parameters[i]) for i in range(len(pLabels))]

    ##Model is noise free, so the noise must be seperately measured and passed in
    ## Answer is independent of noise provided invariant across image
    #lnL2 = 0.5*( (np.power(image-model,2.)).sum()/(modelParams['noise']**2.))
    if(returnType.lower() == 'sum'):
        return lnL
    elif(returnType.lower() == 'pix'):
        return pixlnL
    elif(returnType.lower() == 'all'):
        return [lnL, pixlnL]
def differentiate_logLikelihood_Gaussian_Analytic(parameters,
                                                  pLabels,
                                                  image,
                                                  setParams,
                                                  modelLookup=None,
                                                  returnType=None,
                                                  order=1,
                                                  signModifier=-1.):
    import generalManipulation
    import model_Production as modPro
    from surface_Brightness_Profiles import gaussian_SBProfile_CXX
    '''
    Returns the analytic derivative of the Gaussian log-Likelihood (ignoring parameter-independent prefactor whose derivative is zero) for parameters labelled by pLabels.
    Uses analytic derivative of the pixelised model as given in differentiate_Pixelised_Model_Analytic routine of model_Production routine.

    *** Note: `noise` as defined in set params must the noise_std, and must accurately describe the noise properties of the image. ***

    Requires:
    parameters: flattened array of parameter values to vary (allows for external program to set variation in these params)
    pLabels: tuple of length `parameters`, which is used to identify the parameters being varied. These labels should satisfy the modelParameter dictionary keys using in setting up the model
    image: 2d <ndarray> of pixelised image
    setParams: dictionary of fixed model parameters which sets the model SB profile being fit.
    modelLookup: An instance of the model lookup table, as set in model_Production module
    returnType: IGNORED, but included so that this method mimic the call fingerprint of the log-Likelihood evaluation routine if used as part of a pre-fab minimisation routine.
    order: sets the order to which derivatives are taken. If order == 1, the return is a tuple (ndarray) of length len(parameters), which contains the first derivatives of all parameters. If order == 2, the retrun is a two-dimensional ndarray, where each element i,j gives the sendon derivative wrt parameter i and parameter j. Order >= 3 or <= 0 are not supported.
    signModifier: default -1. Result is multiplied by abs(signModifier)/signModifier, to change the sing of the output. This is required as the lnL routine actually returns -lnL = chi^2 where a minimisation routine is used. Thus, where the minimisation uses first derivatives, the signModifier should be postive, whilst for other applications (such as the fisher error) on requires the derivative of lnL, and so sign modifier must be negative. The absolute value of signModifier is unimportant.

    Returns:
    [dlnL/dbeta], repeated for all beta in order <1D ndarray>: derivative of -1*log_likelihood evaulated at entered model parameters if order == 1
    [[dlnL/dbeta_i dbeta_j]], repeated for all beta in order <2D ndarray>: second derivative of -1*log_likelihood evaulated at entered model parameters if order == 1

    Possible Extensions:
    -- In calculating second order derivatives, a nested loop is used. This is likely to be slow, and as this is used in producing fisher errors (and thus done every run-time), then this cold be a bottle-neck on the measurement of the ML point where errors are used

    Tests:
    -- Fisher error agrees wel with simulated output for error.
    '''

    #raise ValueError("differentiate_logLikelihood_Gaussian_Analytic: This has been disabled as Weave is not behaving. Further modifications require that model, and derivatives are flattened to mimic that requirement that image is also flattened, and an extension to multiple images (this should occur naturally if model and derivatives are repeated to mimic multiple images")

    #if(len(image.shape) > 1):
    #    raise ValueError("differentiate_logLikelihood_Gaussian_Analytic: This routine has not been extended to multiple realisations yet")

    ##To be useful as part of a minimisation routine, the arguements passed to this function must be the same as those passed to the ln-Likelihood evalutaion also. This suggest possibly two routines: one, like the model differentiation itself should just return the various derivatives, and a wrapper routine which produces only the relevent derivatives required for mimimisation
    ## Third order is ignored for now, as this wold require an edit to the methdo of calculating model derivatives, and it is unlikely that a third order derivative would ever really be necessary (excpet in the case where an analytic derivative of the model is wanted for the calculation of the bias, where simulations over many images are used: usually, the known statistics of the Gaussian lileihood can be used to remove this necessity anyway).

    ### First derivative only are needed, so for now this will be coded only to deal with first derivatives.
    ### Therefore, n = 1, permute = false by default

    ### Set up model parameters as input
    ##Set up dictionary based on model parameters. Shallow copy so changes do not overwrite the original
    modelParams = deepcopy(setParams)

    ##Check whether parameters input are iterable and assign to a tuple if not: this allows both `parameters' and `pLabels' to be passed as e.g. a float and string and the method to still be used as it
    parameters = generalManipulation.makeIterableList(parameters)
    pLabels = generalManipulation.makeIterableList(pLabels)
    if (len(parameters) != len(pLabels)):
        raise ValueError(
            'get_logLikelihood - parameters and labels entered do not have the same length (iterable test)'
        )

    ##Vary parameters which are being varied as input
    modPro.set_modelParameter(modelParams, pLabels, parameters)
    ''' Get Model'''
    if (modelLookup is not None and modelLookup['useLookup']):
        model = np.array(
            modPro.return_Model_Lookup(modelLookup, parameters)
            [0])  #First element of this routine is the model image itself
    else:
        model = modPro.user_get_Pixelised_Model(
            modelParams, sbProfileFunc=gaussian_SBProfile_CXX)[0]
    ''' Get model derivatives '''
    modDer = modPro.differentiate_Pixelised_Model_Analytic(modelParams,
                                                           parameters,
                                                           pLabels,
                                                           n=1,
                                                           permute=False)
    #modDer stores only the n'th derivative of all parameters entered, stored as an nP*nPix*nPix array.
    ''' Testing flattening
    print "modDer shape:", modDer.shape()

    #Flatten modDer to mimic flattened image
    modDer = [modDer[i].flatten() for i in range(nP)]
    print "modDer shape:", modDer.shape()
    raw_input()
    '''

    modDer2 = None
    if (order == 2):
        ##Calculate 2nd derivative also
        modDer2 = modPro.differentiate_Pixelised_Model_Analytic(modelParams,
                                                                parameters,
                                                                pLabels,
                                                                n=2,
                                                                permute=True)
        #modDer2 stores the 2nd derivative of all parameters entered, stored as an nP*nP*nPix*nPix array.

    #Flatten and reshape model and derivative model images to reflect the form of the input image (which can by multi-realisations)
    model = model.flatten()
    modDer = modDer.reshape((modDer.shape[0], -1))
    if (modDer2 is not None):
        modDer2 = modDer2.reshape((modDer2.shape[0], modDer2.shape[1], -1))

    if (len(image.shape) == 2):
        ## Repeat each nReal times
        nRepeat = image.shape[0]

        model = np.tile(model, (nRepeat, 1))

        modDer = np.array(
            [np.tile(modDer[i], (nRepeat, 1)) for i in range(modDer.shape[0])])

        #There's most likely a better way to do this (i.e. quicker)
        modDer2 = np.array([[
            np.tile(modDer2[i, j], (nRepeat, 1))
            for j in range(modDer2.shape[1])
        ] for i in range(modDer2.shape[0])])

    # print "Shape check:"
    # print "Image:", image.shape
    # print "Model:", model.shape
    # print "Derivative:", modDer.shape
    # if(modDer2 is not None):
    #     print "2nd Derivative: ", modDer2.shape
    # raw_input("Check")

    ##Construct the result to be returned. This is a scalar array, with length equal to nP, and where each element corresponds to the gradient in that parameter direction
    nP = len(parameters)
    delI = image - model

    if (order == 1):
        res = np.zeros(nP)

        ##Create tdI, which stores dI in the same shape as modDer by adding a first dimension
        tdelI = np.zeros(modDer.shape)
        tdelI[:] = delI.copy()
        ##Alternatively: tdelI = np.repeat(delI.reshape((1,)+delI.shape), modDer.shape[0], axis = 0)

        ##Set derivative as sum_pix(delI*derI)/sig^2 for all parameters entered
        ## ReturnTypes other than sum could be implemented by removing the sum pats of this relation, however the implementation of fprime in the minimisation routines requires the return to be a 1D array containing the gradient in each direction.
        res = (tdelI * modDer).sum(axis=-1).sum(axis=-1)

    elif (order == 2):
        res = np.zeros((nP, nP))
        ##This could and should be sped-up using two single loops rather than a nested loop, or by defining delI and dIm*dIm in the same dimension as modDer2
        ## Alternate speed-up is to implement with CXX
        for i in range(nP):
            for j in range(nP):
                res[i, j] = (delI * modDer2[i, j] -
                             modDer[i] * modDer[j]).sum(axis=-1).sum(axis=-1)

    res /= (signModifier /
            abs(signModifier)) * modelParams['noise'] * modelParams['noise']

    return res
def get_logLikelihood(parameters,
                      pLabels,
                      image,
                      setParams,
                      modelLookup=None,
                      returnType='sum',
                      signModifier=1,
                      callCount=0):
    import math, sys
    import model_Production as modPro
    import surface_Brightness_Profiles as SBPro
    import generalManipulation
    """
    Returns the (-1.)*log-Likelihood as a Gaussian of lnL propto (I-Im)^2/sigma_n, where Im is image defined by dictionary ``modelParams'', and I is image being analysed, and sigma_n the pixel noise.
    Minimisiation routine should be directed to this function.

    Requires:
    parameters: flattened array of parameter values for free parameters (allows for external program to set variation in these params)
    pLabels: string tuple of length `parameters`, which is used to identify the parameters being varied. These labels should satisfy the modelParameter dictionary keys using in setting up the model.
    image: 2d <ndarray> of pixelised image.
    setParams: dictionary of fixed model parameters which sets the model SB profile being fit.
    modelLookup: An instance of the model lookup table, as set in model_Production module. If None, the the pixelised model image is re-evaluated for each change in parameters.
    returnType (default sum):
    ---`sum`: Total log-likelihood, summing over all pixels
    ---`pix`: log-likelihood evaluated per pixel. Returns ndarray of the same shape as the input image

    Returns:
    lnL <scalar>: -1*log_likelihood evaulated at entered model parameters

    """

    callCount += 1

    ##Set up dictionary based on model parameters. Shallow copy so changes do not overwrite the original
    modelParams = deepcopy(setParams)

    if ((setParams['stamp_size'] - np.array(image.shape)).sum() > 0):
        raise RuntimeError(
            'get_logLikelihood - stamp size passed does not match image:',
            str(setParams['stamp_size']), ':', str(image.shape))

    parameters = generalManipulation.makeIterableList(parameters)
    pLabels = generalManipulation.makeIterableList(pLabels)
    if (len(parameters) != len(pLabels)):
        raise ValueError(
            'get_logLikelihood - parameters and labels entered do not have the same length (iterable test): parameters:',
            str(parameters), ' labels:', str(pLabels))

    ##Vary parameters which are being varied as input
    modPro.set_modelParameter(modelParams, pLabels, parameters)
    ''' Deprecated for above
    for l in range(len(pLabels)):
    if(pLabels[l] not in modelParams):
    raw_input('Error setting model parameters in get_logLikelihood: Parameter not recognised. <Enter> to continue')
    else:
    modelParams[pLabels[l]] = parameters[l]
    '''

    #Test reasonable model values - Effectively applying a hard prior
    if (math.sqrt(modelParams['SB']['e1']**2. + modelParams['SB']['e2']**2.) >=
            0.99):
        ##Set log-probability to be as small as possible
        return sys.float_info.max / 10  #factor of 10 to avoid any chance of memory issues here
        #raise ValueError('get_logLikelihood - Invalid Ellipticty values set')
    if (modelParams['SB']['size'] <= 0.):
        return sys.float_info.max / 10
    ''' Get Model'''
    if (modelLookup is not None and modelLookup['useLookup']):
        model = np.array(
            modPro.return_Model_Lookup(modelLookup, parameters)
            [0])  #First element of this routine is the model image itself
    else:
        model, disc = modPro.user_get_Pixelised_Model(
            modelParams, sbProfileFunc=SBPro.gaussian_SBProfile_CXX)
    ''' Model, lookup comparison '''
    '''
    modelEx, disc = modPro.user_get_Pixelised_Model(modelParams, sbProfileFunc = modPro.gaussian_SBProfile)
    print 'Model, lookup Comparison:', (model-modelEx).sum(), parameters
    import pylab as pl
    f = pl.figure()
    ax = f.add_subplot(211)
    im = ax.imshow(modelEx-model); ax.set_title('model - lookup'); pl.colorbar(im)
    ax = f.add_subplot(212)
    im = ax.imshow(modelEx/model); ax.set_title('model/lookup'); pl.colorbar(im)
    pl.show()
    '''
    """ DEPRECATED for multiple models
    if(model.shape != image.shape):
        print "\n\n Model shape: ", model.shape, " :: Image Shape:", image.shape 
        raise ValueError('get_logLikelihood - model returned is not of the same shape as the input image.')
    """

    #Flatten model
    model = model.flatten()

    #Construct log-Likelihood assuming Gaussian noise. As this will be minimised, remove the -1 preceeding
    if (vverbose):
        print 'Noise in ln-Like evaluation:', modelParams['noise']

    keepPix = returnType.lower() == 'pix' or returnType.lower() == 'all'

    pixlnL = np.array([])
    lnL = 0
    absSign = signModifier / abs(signModifier)
    if (len(image.shape) == len(model.shape) + 1):
        #print "Considering sum over images", pLabels, parameters
        for i in range(image.shape[0]):
            tpixlnL = absSign * np.power(image[i] - model, 2.)
            lnL += tpixlnL.sum()
            if (keepPix):
                pixlnL = np.append(pixlnL, tpixlnL)

    else:
        tpixlnL = absSign * np.power(image - model, 2.)
        lnL += tpixlnL.sum()
        if (keepPix):
            pixlnL = np.append(pixlnL, tpixlnL)

    pixlnL *= 0.5 / (modelParams['noise']**2.)
    lnL *= 0.5 / (modelParams['noise']**2.)

    if (vverbose):
        print 'lnL:', lnL, [
            str(pLabels[i]) + ':' + str(parameters[i])
            for i in range(len(pLabels))
        ]

    ##Model is noise free, so the noise must be seperately measured and passed in
    ## Answer is independent of noise provided invariant across image
    #lnL2 = 0.5*( (np.power(image-model,2.)).sum()/(modelParams['noise']**2.))
    if (returnType.lower() == 'sum'):
        return lnL
    elif (returnType.lower() == 'pix'):
        return pixlnL
    elif (returnType.lower() == 'all'):
        return [lnL, pixlnL]
def find_ML_Estimator(image,
                      fitParams,
                      outputHandle=None,
                      setParams=None,
                      modelLookup=None,
                      searchMethod='simplex',
                      preSearchMethod=None,
                      Prior=None,
                      bruteRange=None,
                      biasCorrect=0,
                      calcNoise=None,
                      bcoutputHandle=None,
                      error='Fisher',
                      **iParams):
    import scipy.optimize as opt
    import model_Production as modPro
    from surface_Brightness_Profiles import gaussian_SBProfile_CXX
    import measure_Bias as mBias
    from generalManipulation import makeIterableList
    """
    MAIN ROUTINE FOR THIS MODULE. Takes in an image (at minimum) and a set of values which defines the model parameters (fit and those which are free to vary), and returns the parameter values at which the log-Likelihood is minimised (or Likelihood is maximised). Can correct for first order noise bias (if biasCorrect != 0), and an estimate of the error (if error is equal to a set of pre-defined values [see below]).
    
    Requires:
    -- image: 2d array of pixelised image
    -- fitParams: tuple of strings which define the model parameters which are free to vary (those which will be fit). These must satisfy the definition of model parameters as set out in the default model dictionary. If None, then e1, e2 and T are fit (this could be made stricter by removing the default None initialisation, thereby requiring that a set of parameters to be fit is passed in).
    -- outputHandle: handle of the output file. **Result is always appended**. If not passed in, then result is not output. Output is in ASCII form.
    -- setParams: Default model dictionary containing fixed parameters which describes the model being fixed. One part of a two part approach to setting the full model parameter dictionary, along with iParams. If None, then default model dictionary is taken.
    -- modelLookup: Dictionary containing lookup table for pixelised model images, as defined in model_Production module. If None, no lookup is used, and the model is re-evalauted for each change in model parameters.
    -- searchMethod: String detailing which form of minimisation to use. Accepted values are:
    ___ simplex, brent, powell, cg, bfgs, l_bfgs_b, ncg (as defined in SciPy documentation)
    -- preSearchMethod: String detailing initial search over parameter space to find global Minimium, used as an initial guess for refinement with searchMethod. If None, initial guess is set to default passed in by the combination of setParams and iParams. If not None, then code will run an initial, coarse search over the parameter space to attempt to find the global mimima. By default this is switched off. Where preSearchMethod == grid or brute, the a grid based search is used. Where this is used, a range must either be entered by the user through bruteRange, or it is taken from the entered prior information. NOTE: This still uses a typically coarse grid, therefore if the range is too wide then it is possible that the code may still find a local mimimum if this exists within one grid point interval of the global miminum.
    -- Prior: NOT USED YET. Skeleton to allow for a parameter prior structure to be passed in
    -- bruteRange: [nPar, 2] sized tuple setting the range in which the initial preSearchMethod is evaluated, if this is done using a grid or brute method (both equivalent), where nPar is the number of free model parameters being fit. THIS DOES NOT CONSTITUTE A PRIOR, as the refinement may still find an ML value outside this range, however where the global maximum occurs outside this range the returned ML value may be expected to be biased.
    -- biasCorrect: integer, states what level of noise bias to correct the estimate to. Only 1st order correction (biasCorrect == 1) is supported. If biasCorrect == 0, the uncorrected estimate (and error if applicable) are output. If biasCorrect > 0, the uncorrected, corrected and error (if applicable) are output. When used, it is important that *the entered model parameter dictionary contains an accurate measure of the pixel noise of appropriate signal--to--noise, as the analytic bias scales according to both*. Noise can be estimate using estimate_Noise() before entry.
    -- bcOutputhandle: As outputHandle, except for the bias corrected estimator.
    -- error: String detailing error estiamte to output. Supported values are:
    ___ fisher: Marginalised fisher error for each parameter around the ML point. See docstring for fisher_Error_ML().
    ___ brute: UNSUPPORTED, however an error defined on the parameter likelihood itself can be derived if the preSearchMethod and bruteRange is defined such that the Likelihood has *compact support*. If not, then this would be inaccurate (underestimated). Therefore coding for this is deferred until the application of a prior is developed, as use of a prior ensures compact support by default.
    -- iParams: set of optional arguments which, together with setParams, defines the intial model dictionary. Allows parameter values to be input individually on call, and is particularly useful for setting initial guesses where preSearchMethod == None.
    
    
    Model Parameter entry: Model Parameters can be entered using two methods
    ___ setParams: Full Dictionary of initial guess/fixed value for set of parameters. If None, this is set to default set. May not be complete: if not, then model parameters set to default as given in default_ModelParameter_Dictionary()
    ___iParams: generic input which allows model parameters to be set individually. Keys not set are set to default as given by default_ModelParameter_Dictionary(). Where an iParams key is included in the default dictionary, or setParams, it will be updated to this value (**therefore iParams values have preferrence**). If key not present in default is entered, it is ignored
    ___ The initial choice of model parameters (including intial guesses for the minimisation routine where preSearchMethod == False) is thus set as setParams+{iParams}



    Returns:
    Returned: tuple of length equal to fitParams. Gives ML estimator for each fit parameter, with bias corrected version (if biasCorrect != 0) and error (if applicable) aslways in that order.
    """
    ''' Set up defaults '''

    ##Initialise result variables
    Returned = []
    err = None

    ## Exceptions based on input objects
    if (image is None or sum(image.shape) == 0):
        raise RuntimeError(
            'find_ML_Estimator - image supplied is None or uninitialised')

    if (len(fitParams) > 2 and modelLookup is not None
            and modelLookup['useLookup']):
        raise RuntimeError(
            'find_ML_Estimator - Model Lookup is not supported for more than double parameter fits'
        )

    ##Set up initial params, which sets the intial guess or fixed value for the parameters which defines the model
    ##This line sets up the keywords that are accepted by the routine
    ## pixle_Scale and size should be in arsec/pixel and arcsec respectively. If pixel_scale = 1., then size can be interpreted as size in pixels
    ## centroid should be set to the center of the image, here assumed to be the middle pixel

    if (setParams is None):
        print "Setting parameters to default"
        initialParams = modPro.default_ModelParameter_Dictionary()
    else:
        print "Updating initial parameters with set Params"
        initialParams = modPro.default_ModelParameter_Dictionary()
        modPro.update_Dictionary(initialParams, setParams)
        ## Deprecated initialParams.update(setParams)

    modPro.set_modelParameter(initialParams, iParams.keys(), iParams.values())

    ## Define modelParams
    modelParams = deepcopy(initialParams)

    ## Estimate Noise of Image
    if (calcNoise is not None):
        #Assumes each image is flattened and therefore needs to be reshaped.
        if (len(image.shape) == 2):
            if (image.shape[0] < 2):
                #Use only the first image
                tImage = image[0].reshape(modelParams['stamp_size'])
                maskCentroid = modelParams['centroid']
            else:
                #Use an alternate stack of closest to even (assumes that pixel error is roughly symmetric), (the alternative stack should negate any feature and background, the effect on the noise is uncertain). Can only be used on multiple realisations of the same field
                if (image.shape[0] % 2 == 0):
                    finalIndex = image.shape[0]
                else:
                    finalIndex = image.shape[0] - 1
                    print "Final Index check (should be even): ", finalIndex
                aStackImage = np.zeros(image[0].shape)
                for i in range(finalIndex):
                    aStackImage += image[i]  #*np.power(-1, i)

                print "\nEstimating noise from stack-subtracted image"
                aStackImage /= float(finalIndex)
                tImage = (image[0] - aStackImage).reshape(
                    modelParams['stamp_size'])

                #Turn off centroid masking (as feature should be removed), subtract stacked from each realisation, and flatten for noise estimation
                maskCentroid = None
                aStackImage = np.tile(aStackImage, (image.shape[0], 1))
                tImage = (image - aStackImage).flatten()

                print "--Done"

                #-- Note, this could be improved by removing maskCentroid in this case, thus allowing the flattened array to be used (a larger data vector), and thus reducing the noise on the error estimation

                ##Plot
                # import pylab as pl
                # f = pl.figure()
                # ax = f.add_subplot(111)
                # im = ax.imshow(tImage)
                # pl.colorbar(im)
                # pl.show()

        elif (len(image.shape) == 1):
            tImage = image.reshape(modelParams['stamp_size'])
            maskCentroid = modelParams['centroid']
        else:
            raise ValueError(
                "find_ML_Estimate: calcNoise: image not of expected shape")
        modelParams['noise'] = calcNoise(tImage, maskCentroid)

    ####### Search lnL for minimum
    #Construct initial guess for free parameters by removing them from dictionary
    x0 = modPro.unpack_Dictionary(modelParams, requested_keys=fitParams)

    ###### Sanity check image dimensions compared to model parameters
    imDim = len(image.shape)
    if (imDim > 2):
        raise ValueError(
            "find_ML_Estimator: Image must not have more than two dimensions. Single postage stamp image must be flattened"
        )
    elif (imDim == 1
          and image.shape[0] != np.array(modelParams['stamp_size']).prod()):
        raise ValueError(
            "find_ML_Estimator: Flattened image (1D) length does not correspond to model parameter dimensions"
        )
    elif (imDim == 2
          and image.shape[1] != np.array(modelParams['stamp_size']).prod()):
        print 'Image shape: ', image.shape, ' Model shape:', modelParams[
            'stamp_size']
        raise ValueError(
            "find_ML_Estimator: image sahpe of second dimension is not consistent with expected model parameter dimension. 2D image array must contain multiple images across first dimension, and (flattened) pixels as a data vector in the second dimension: Have you remembered to flatten the image?"
        )

    if (preSearchMethod is not None):
        ## Conduct a presearch of the parameter space to set initial guess (usually grid-based or brute-force)
        if (vverbose or debug):
            print '\n Conducting a pre-search of parameter space to idenitfy global minima'
        if (preSearchMethod.lower() == 'grid'
                or preSearchMethod.lower() == 'brute'):
            ##Brute force method over a range either set as the prior, or the input range.
            if (bruteRange is not None):
                if (vverbose or debug):
                    print '\n Using user-defined parameter range:', bruteRange

                print "Using bruteRange: ", bruteRange
                #x0, fval, bruteGrid, bruteVal
                bruteOut = opt.brute(get_logLikelihood,
                                     ranges=bruteRange,
                                     args=(fitParams, image, modelParams,
                                           modelLookup, 'sum'),
                                     finish=None,
                                     full_output=True)
                x0, fval, bruteGrid, bruteVal = bruteOut
                ## x0 has len(nParam); fval is scalar; bruteGrid has len(nParam), nGrid*nParam; bruteVal has nGrid*nParam

                ###Evaluate error based on brute by integration - this would only work if bruteRange cover the full range where the PDF is non-zero

                if (error is not None and error.lower() == 'brute'):
                    raise RuntimeError(
                        'find_ML_Estimator - brute labelled as means of evaluating error. This is possbible, but not coded as limitation in use of bruteRange to cover the whole region where the likelihood is non-zero. When a prior is included, this could be taken to be exact, provided one knows the range where the prior has compact support, and the bruteRange reflects this.'
                    )
                ## use scipy.integrate.trapz(bruteVal, x = bruteGrid[i], axis = i) with i looping over all parameters (ensure axis set properly...

                ##Testing of error determination
                # tErr = fisher_Error_ML(x0, fitParams, image, modelParams, modelLookup)
                # from scipy.stats import norm
                # rv = norm(loc = x0, scale = tErr)
                # ##Plot this
                # import pylab as pl
                # f = pl.figure()
                # ax = f.add_subplot(111)
                # import math
                # ax.plot(bruteGrid, np.exp(-1.*(bruteVal-np.amin(bruteVal))), bruteGrid, (np.sqrt(2*math.pi)*tErr)*rv.pdf(bruteGrid))
                # pl.show()
                # raw_input("Check")

                if (vverbose or debug):
                    print '\n preSearch has found a minimum (on a coarse grid) of:', x0

            elif (Prior is not None):
                if (vverbose or debug):
                    print '\n Using prior range'
                raise RuntimeError(
                    'find_ML_Estimator - Prior entry has not yet been coded up'
                )

            else:
                raise RuntimeError(
                    'find_ML_Estimator - Brute preSearch is active, but prior or range is not set'
                )

    if (debug or vverbose):
        ##Output Model Dictionary and initial guess information
        print 'Model Dictionary:', modelParams
        print '\n Initial Guess:', x0

    ##Find minimum chi^2 using scipy optimize routines
    ##version 11+ maxima = opt.minimize(get_logLikelihood, x0, args = (fitParams, image, modelParams))
    if (searchMethod.lower() == 'simplex'):
        maxima = opt.fmin(get_logLikelihood,
                          x0=x0,
                          xtol=0.00001,
                          args=(fitParams, image, modelParams, modelLookup,
                                'sum'),
                          disp=(verbose or debug))
    elif (searchMethod.lower() == "emcee"):
        import emcee

        if (verbose):
            print "\n-Running emcee....."

        #Define MCMC parameters. These should be passed in
        nWalkers = 6
        nRun = 1000
        nBurn = 100

        if (not isinstance(x0, np.ndarray)):
            x0 = np.array(x0)
        nDim = x0.shape[0]

        print "x0: ", x0

        #Produce a new x0 for each parameter. For now, take as -1.5x0 to 1.5x0. Better to pass this in, or inform from prior range
        p0 = np.zeros((nWalkers, nDim))
        for i in range(x0.shape[0]):
            p0[:, i] = np.random.uniform(-1.5 * x0[i], 1.5 * x0[i], nWalkers)

        print "P0:", p0

        sampler = emcee.EnsembleSampler(nWalkers,
                                        nDim,
                                        get_logLikelihood,
                                        args=(fitParams, image, modelParams,
                                              modelLookup, 'sum', -1))

        #Burn-in
        if (verbose):
            print "-Running burn-in....."
        pos, prob, state = sampler.run_mcmc(p0, nBurn)
        sampler.reset()
        if (verbose):
            print "--Finished burn-in."
            print " Position is ", pos
            print "with prob: ", prob

        #Run
        if (verbose):
            print "-Sampling....."
        sampler.run_mcmc(pos, nRun)
        if (verbose):
            print "--Finished", nRun, " samples."

        #Get output
        chain = sampler.flatchain
        pChain = sampler.flatlnprobability

        maxIndex = np.argmax(pChain, axis=0)
        maxima = chain[maxIndex, :]
        err = np.std(chain, axis=0)

        if (debug):
            import pylab as pl
            f = pl.figure()
            for i in range(1, nDim + 1):
                ax = f.add_subplot(nDim, 1, i)
                ax.hist(chain[:, i - 1], bins=100)
                ax.set_title("Par: " + fitParams[i - 1])

            pl.show()

    elif (searchMethod.lower() == 'brent'):
        maxima = opt.fmin_brent(get_logLikelihood,
                                x0=x0,
                                xtol=0.00001,
                                args=(fitParams, image, modelParams,
                                      modelLookup, 'sum'),
                                disp=(verbose or debug))
    elif (searchMethod.lower() == 'powell'):
        maxima = opt.fmin_powell(get_logLikelihood,
                                 x0=x0,
                                 xtol=0.00001,
                                 args=(fitParams, image, modelParams,
                                       modelLookup, 'sum'),
                                 disp=(verbose or debug))
    elif (searchMethod.lower() == 'cg'):
        ##Not tested (10Aug)
        maxima = opt.fmin_cg(
            get_logLikelihood,
            x0=x0,
            fprime=differentiate_logLikelihood_Gaussian_Analytic,
            args=(fitParams, image, modelParams, modelLookup, 'sum'),
            disp=(verbose or debug),
            ftol=0.000001)
    elif (searchMethod.lower() == 'bfgs'):
        ##Not tested (10Aug)
        maxima = opt.fmin_bfgs(
            get_logLikelihood,
            x0=x0,
            fprime=differentiate_logLikelihood_Gaussian_Analytic,
            args=(fitParams, image, modelParams, modelLookup, 'sum'),
            disp=(verbose or debug))
    elif (searchMethod.lower() == 'l_bfgs_b'):
        ##Not tested (10Aug)
        maxima = opt.fmin_l_bfgs_b(
            get_logLikelihood,
            x0=x0,
            fprime=differentiate_logLikelihood_Gaussian_Analytic,
            args=(fitParams, image, modelParams, modelLookup, 'sum'),
            disp=(verbose or debug))
    elif (searchMethod.lower() == 'ncg'):
        ##Not tested (10Aug)
        maxima = opt.fmin_ncg(
            get_logLikelihood,
            x0=x0,
            fprime=differentiate_logLikelihood_Gaussian_Analytic,
            args=(fitParams, image, modelParams, modelLookup, 'sum'),
            disp=(verbose or debug))
    else:
        raise ValueError(
            'find_ML_Estimator - searchMethod entered is not supported:' +
            str(searchMethod))

    ##Make numpy array (in the case where 1D is used and scalar is returned):
    if (len(fitParams) == 1):
        maxima = np.array(makeIterableList(maxima))

    if (vverbose):
        print 'maxima is:', maxima

    if (debug):
        ##Plot and output residual
        print 'Plotting residual..'

        fittedParams = deepcopy(modelParams)
        modPro.set_modelParameter(fittedParams, fitParams, maxima)
        ''' Deprecated
        for i in range(len(fitParams)):
            fittedParams[fitParams[i]] =  maxima[i]
        '''

        model, disc = modPro.user_get_Pixelised_Model(
            fittedParams, sbProfileFunc=gaussian_SBProfile_CXX)
        residual = image
        if (len(image.shape) == 2):
            residual -= image
        elif (len(image.shape) == 3):
            for i in range(image.shape[0]):
                residual[i] -= image[i]
        else:
            raise ValueError(
                "Error calculating residual: Image has an unknown rank")

        import pylab as pl
        ##Plot image and model
        f = pl.figure()
        ax = f.add_subplot(211)
        ax.set_title('Model')
        im = ax.imshow(model, interpolation='nearest')
        pl.colorbar(im)
        ax = f.add_subplot(212)
        ax.set_title('Image')
        if (len(image.shape) == 3):
            im = ax.imshow(image[0], interpolation='nearest')
        else:
            im = ax.imshow(image, interpolation='nearest')
        pl.colorbar(im)

        pl.show()

        ##Plot Residual
        f = pl.figure()
        ax = f.add_subplot(111)
        im = ax.imshow(residual, interpolation='nearest')
        ax.set_title('Image-Model')
        pl.colorbar(im)
        pl.show()

    if (np.isnan(maxima).sum() > 0):
        raise ValueError('get_ML_estimator - FATAL - NaNs found in maxima:',
                         maxima)

    if (verbose):
        print 'Maxima found to be:', maxima

    ##Output Result
    if (outputHandle is not None):
        np.savetxt(outputHandle, np.array(maxima).reshape(1, maxima.shape[0]))

    ## Bias Correct
    if (biasCorrect == 0):
        Returned.append(maxima)
    elif (biasCorrect == 1):
        ana = mBias.analytic_GaussianLikelihood_Bias(maxima,
                                                     fitParams,
                                                     modelParams,
                                                     order=biasCorrect,
                                                     diffType='analytic')
        bc_maxima = maxima - ana

        ##Output Result
        if (bcoutputHandle is not None):
            np.savetxt(bcoutputHandle,
                       np.array(bc_maxima).reshape(1, bc_maxima.shape[0]))

        if (verbose):
            print 'BC Maxima found to be:', bc_maxima

        ##Return minimised parameters
        Returned.append(maxima, bc_maxima)
    else:
        raise ValueError(
            'get_ML_estimator - biasCorrect(ion) value entered is not applicable:'
            + str(biasCorrect))

    ## Get Error on measurement. Brute error would have been constructed on the original brute force grid evaluation above.
    if (error is not None):
        if (err is not None):
            err = err  #Do nothing
        elif (error.lower() == 'fisher'):
            err = fisher_Error_ML(
                maxima, fitParams, image, modelParams,
                modelLookup)  #Use finalised modelParams here?
        else:
            raise ValueError(
                "get_ML_estimator - failed to return error, error requested, but value not found nor acceptable lable used"
            )
        Returned.append(err)

    return Returned