예제 #1
0
파일: ptc.py 프로젝트: nhaddad/pydtk
def ron_adu(b1, b2, *coor, **kargs):
    """
    Take two bias images, subtract one from another and
    then compute the std for many windows. Compute the median of the RMS

    Syntax:
    ron_adu(b1, b2, 200,400, 300, 600)
    ron_adu(b1, b2, 200,400, 300, 600, PRINT=True)  Print individual values
    ron_adu(b1, b2, 200,400, 300, 600, HIST=True)   Plot histogram

    TODO: return mean instead of median
    DONE: make histogram plot
    DONE: provide RETURN option

    """

    nwx = kargs.get('NWX', 10)  # set number of windows in x direction
    nwy = kargs.get('NWY', 10)  # set number of windows in y direction

    x1, x2, y1, y2 = b1.get_windowcoor(*coor)

    # compute difference of images
    biasdiff = b1 - b2
    # prepare array to receive std values
    std_biasdiff = np.zeros((nwx, nwy))

    # TODO modify to use subwindows to generate them
    # compute stddev and mean for every subwindow
    windows = subwindowcoor(x1, x2, y1, y2, **kargs)
    for i, j, xi, xf, yi, yf in windows:
        std_biasdiff[i, j] = biasdiff[xi:xf, yi:yf].std() / np.sqrt(2)
        if kargs.get('PRINT', False):
            print(f'[{xi:5}:{xf:5},{yi:5}:{yf:5}] => {std_biasdiff[i, j]:.3}')

    diff_median = np.median(std_biasdiff, axis=None)

    if kargs.get('RETURN', False):
        return np.median(std_biasdiff, axis=None)  # /np.sqrt(2.0)
    else:
        print(
            f'RON @ [{x1}:{x2},{y1}:{y2}] = {np.median(std_biasdiff, axis=None):2.2} ADUs'
        )
    if kargs.get('HIST', False):
        std_biasdiff.shape = (nwx * nwy, )
        diff_std = np.std(std_biasdiff, axis=None)
        print(f'StdDev = {diff_std:.2}')
        plt.clf()
        plt.hist(std_biasdiff,
                 range=(diff_median - 3 * diff_std,
                        diff_median + 3 * diff_std),
                 bins=30)
        plt.show()
예제 #2
0
파일: ptc.py 프로젝트: nhaddad/pydtk
def ptc_ffpairs_mw(imagelist, *coor, **kargs):
    """
    Perform ptc plot for pairs of ff at same level in multiple windows.
    The pairs of ff should have the same light level.
    The first 2 images in the list must be bias
    To eliminate the FPN, the 'shotnoise' image is computed as the subtraction
    of two debiased flat field images
    optional kargs arguments:
    FACTOR (default = 2.0)
    MAXSIGNAL (default 65535)  => compute PTC only for signal values less than MAXSIGNAL
    VERBOSE (default=False)  ==> print out table with signal and variance
    CLIP (default=True) ==> Use clipped statistic to on images before computing CF

    """

    order = kargs.get('ORDER', 2)  # order of polynomial regression
    if order > 2:
        order = 2

    MAXSIGNAL = kargs.get('MAXSIGNAL', 65535.0)
    sigma = kargs.get('SIGMA', 3)
    ext = kargs.get('ext', 0)

    x1 = coor[0]
    x2 = coor[1]
    y1 = coor[2]
    y2 = coor[3]

    oddimageindex = list(range(3, len(imagelist), 2))
    evenimageindex = list(range(2, len(imagelist), 2))

    # Read in bias1 and bias2
    b1 = Image(imagelist[0], ext).crop(x1, x2, y1, y2).get_data()
    b2 = Image(imagelist[1], ext).crop(x1, x2, y1, y2).get_data()

    biasRON = sigma_clip(b1 - b2).std() / np.sqrt(2.0)

    # Separate images in even and odd (crop the images..)
    ff1list = [
        Image(imagelist[i], ext).crop(x1, x2, y1, y2).get_data()
        for i in oddimageindex
    ]
    ff2list = [
        Image(imagelist[i], ext).crop(x1, x2, y1, y2).get_data()
        for i in evenimageindex
    ]

    # NHA new implementation with multi windows START
    # generate auxiliary arrays of nwx * nwy elements and initialize to zero

    meanff = []
    signalvar = []

    for ff1, ff2 in zip(ff1list, ff2list):
        # windows is a generator of subwindows
        windows = subwindowcoor(0, ff1.shape[0], 0, ff2.shape[0], **kargs)
        for i, j, xi, xf, yi, yf in windows:
            win = slice(xi, xf), slice(yi, yf)
            # compute mean value on each window for normalized ff
            meanff.append(
                sigma_clip((ff1[win] + ff2[win]) / 2.0, sigma=sigma).mean() -
                sigma_clip((b1[win] + b2[win]) / 2.0, sigma=sigma).mean())
            # compute standard deviation on each window for normalized ff
            if kargs.get('NORMFF2', True):
                ff_diff = ff1[win] - ff2[win] * (
                    sigma_clip(ff1[win], sigma=sigma).mean() /
                    sigma_clip(ff2[win]).mean())
            else:
                ff_diff = ff1[win] - ff2[win]
            var_ff_diff = sigma_clip(ff_diff, sigma=sigma).std()**2
            # Measure RON from difference of two bias
            var_ron = (sigma_clip(b1[win] - b2[win], sigma=sigma).std()**
                       2) / 2.0
            signalvar.append(0.5 * (var_ff_diff - 2 * var_ron))

    # compute polynomial coeficients
    meanff = np.array(meanff)
    signalvar = np.array(signalvar)
    coefts = np.polyfit(meanff, signalvar, order)
    polyts = np.poly1d(coefts)
    variance_fitted = np.polyval(polyts, meanff)

    fig = plt.figure()  # create a figure object
    ax = fig.add_subplot(1, 1, 1)  # create an axes object in the figure

    ax.set_ylabel('Variance')
    ax.set_xlabel('Signal')
    ax.grid(True)
    ax.set_title('Photon Transfer Curve')

    # plot variance v/s signal
    # figure()
    ax.plot(meanff, signalvar, 'b.')
    ax.plot(meanff, variance_fitted, 'r-')
    plt.show()

    if order == 1:
        cf = 1 / coefts[0]
        print(
            f'Extension: {ext}   CF = {cf:2.3f} -e/ADU   RON = {cf * biasRON:2.3f}'
        )
    elif order == 2:
        cf = 1 / coefts[1]
        print(
            f'Extension: {ext}   CF = {cf:2.3f} -e/ADU   RON = {cf * biasRON:2.3f} -e'
        )

    if kargs.get('RETURN', True):
        return meanff, signalvar
예제 #3
0
파일: ptc.py 프로젝트: nhaddad/pydtk
def gain2(imagelist, *coor, **kargs):
    """
    Variation of CCD gain computation
    Compute the gain of the system using 2 Bias and 2 FF. The procedure devides the window
    in NWX*NWY subwindows and computes the Gain for each one of them and then computes the mean
    value and display an histogram. If the windows coordinates are not given, it will use the full
    CCD.

    Syntax:
    gain(imagelist[,xi,xf,yi,yf][,NWX=10][,NWY=10][,VERBOSE=True/False][,SAVE=True][,TITLE='Graph Title'][,RETURN=True/False][, MEDIAN=True/False])

    Note: the image list must contain 2 bias and 2 ff in that order!
    imagelist can be a list of names, a list of Path or list of images
    b1,b2= bias images
    f1,f2= ff images
    *coor=[xi,xf,yi,yf] = coordinates of the window to analize (should be a flat region)
    kargs
    -------
    VERBOSE=True => print intermediate values
    TITLE='Graph Title' => put a title in the graph
    SAVE=True/False => if True, it saves the graph in pnp format
    RETURN=True/False => if True, return only ConFAc without plots
    MEDIAN=True/False => default True, computes median instead of mean
    RATIO=True/FALSE => defaul True. just change the way the FPN is elliminated. Both
    methods give almost the same rusults
    NWX= number of windows in X direction (default 10)
    NWY= number of windows in Y direction (default 10)
    EXT=image extension to load, default 0


    """
    ext = kargs.get('ext', 0)

    if len(imagelist) != 4:
        print('imagelist len different from 4')
        return None

    #  if imagelist contains only image names, load them
    if all([isinstance(i, str) for i in imagelist]):
        images = [Image(i, ext) for i in imagelist]
        print(f'Extension={ext}')
        b1 = images[0]
        b2 = images[1]
        ff1 = images[2]
        ff2 = images[3]
    elif all([isinstance(i, Path) for i in imagelist]):
        images = [Image(i, ext) for i in imagelist]
        print(f'Extension={ext}')
        b1 = images[0]
        b2 = images[1]
        ff1 = images[2]
        ff2 = images[3]
    # elif all are images, just assign them
    elif all([isinstance(i, Image) for i in imagelist]):
        b1 = imagelist[0]
        b2 = imagelist[1]
        ff1 = imagelist[2]
        ff2 = imagelist[3]
    else:
        print("Not all objects in image list are Images or filenames")
        return None

    #Check if bias have EXPTIME = 0.0 and FF EXPTIME > 0
    testlist = []
    testlist.append(b1.header['EXPTIME'] == 0.0)
    testlist.append(b2.header['EXPTIME'] == 0.0)
    testlist.append(ff1.header['EXPTIME'] > 0.0)
    testlist.append(ff2.header['EXPTIME'] > 0.0)
    print(testlist)

    if not all(testlist):
        print('Exposure times for at least one file are not correct')
        return None

    nwx = kargs.get('NWX', 10)  # set number of windows in x direction
    nwy = kargs.get('NWY', 10)  # set number of windows in y direction

    x1, x2, y1, y2 = b1.get_windowcoor(*coor)
    # print(x1,x2,y1,y2)

    # now work with cropped images, where the signal is more or less flat....
    b1 = b1.crop(x1, x2, y1, y2)
    b2 = b2.crop(x1, x2, y1, y2)
    ff1 = ff1.crop(x1, x2, y1, y2)
    ff2 = ff2.crop(x1, x2, y1, y2)

    dbiasff1 = ff1 - b1  # debiased FF1
    dbiasff2 = ff2 - b2  # debiased FF2
    meanff2 = dbiasff2.mean()  # mean signal on FF2 debiased
    meanff1 = dbiasff1.mean()  # mean signal on FF1 debiased
    #ratio = meanff1/meanff2
    # print(ratio)

    if kargs.get('VERBOSE', False):
        print(f'format images X={b1.shape[0]} pix Y={b1.shape[1]} pix')
        print(
            f'Nx:{nwx} Ny:{nwy} X1:{x1} X2:{x2} Y1:{y1} Y2:{y2} WX:{(x2-x1)//nwx} WY:{(y2-y1)//nwy}'
        )
        print('')
        print(f'meanff2 ={meanff2}')

    #dbiasff2 = dbiasff2*ratio
    #dbias_ff_diff = dbiasff1 - dbiasff2
    #dbias_ff_sig = (dbiasff1 + dbiasff2)/2.0

    # from Fabrice Chisthen
    dbias_ff_sig = (dbiasff1 / dbiasff2) * meanff2

    # compute difference of 2 bias to get the RON
    dbias = b1 - b2

    # generate auxiliary arrays of nwx * nwy elements and initialize to zero
    meansig = np.zeros((nwx, nwy))
    stdff = np.zeros((nwx, nwy))
    stdbias = np.zeros((nwx, nwy))
    cf = np.zeros((nwx, nwy))
    signal = (dbiasff1 / dbiasff2)
    stdsig = np.zeros((nwx, nwy))

    # windows is a generator of subwindows
    windows = subwindowcoor(0, b1.shape[0], 0, b1.shape[1], **kargs)
    for i, j, xi, xf, yi, yf in windows:
        # compute mean value on each window for normalized ff
        meansig[i, j] = np.mean(dbias_ff_sig[xi:xf, yi:yf])
        # compute standard deviation on each window for normalized ff
        stdsig[i, j] = np.std(dbias_ff_sig[xi:xf, yi:yf]) / np.sqrt(2.0)
        cf[i,
           j] = meansig[i, j] / (stdsig[i, j]**2)  # compute CF for each window
        # compute standard deviation for each window of bias difference
        stdbias[i, j] = np.std(dbias[xi:xf, yi:yf]) / np.sqrt(2.0)

        if kargs.get('VERBOSE', False):
            print(
                f"X({xi+x1},{xf+x1}) Y({yi+y1},{yf+y2}) Mean:{meansig[i, j]:.2f} stdff:{stdsig[i, j]:.2f}  CF:{cf[i, j]:.2f}"
            )

    if kargs.get('MEDIAN', True):
        ConFac = np.median(cf, axis=None)
        RON = np.median(stdbias, axis=None)

    else:
        ConFac = np.mean(cf, axis=None)
        RON = np.mean(stdbias, axis=None)  # RON in ADUs

    # RON =  RMS / sqrt(2)    #RON in ADUs
    RONe = RON * ConFac  # RON in electrons

    # Error in CF estimation is the std/sqrt(number of windows)
    CFstd = np.std(cf, axis=None) / np.sqrt(nwx * nwy)

    # Check if run as ROUTINE, in that case return only the Conversion Factor and don't continue with plotting
    if kargs.get('RETURN', False):
        return x1, x2, y1, y2, ConFac, RONe, meanff2
    else:
        plt.figure()

    print("*******************************************")
    print(f"*CF  ={ConFac:.2f} +/-{CFstd:.2f} e/ADU")
    print(f"*RON ={RONe:.3f} -e")
    print(f"*RON ={RON:.3f} ADUs")
    print("*******************************************")

    # change shape of cf array to later compute the standard deviation and also make the histogram

    cf.shape = (nwx * nwy, )
    cfstd = np.std(cf, axis=None)
    plt.clf()
    plt.hist(cf, range=(ConFac - 3 * cfstd, ConFac + 3 * cfstd), bins=20)
    plt.figtext(0.15,
                0.8, ("CF mean=%5.3f +/-%5.3f e/ADU") % (ConFac, CFstd),
                fontsize=11,
                bbox=dict(facecolor='yellow', alpha=0.5))
    plt.figtext(0.15,
                0.75, ("RON =%6.3f -e") % (RONe),
                fontsize=11,
                bbox=dict(facecolor='yellow', alpha=0.5))
    plt.figtext(0.15,
                0.70, ("Computed @ %6.3f ADUs") % (np.mean(meansig)),
                fontsize=11,
                bbox=dict(facecolor='yellow', alpha=0.5))

    Title = kargs.get('TITLE', '')
    plt.title(Title)
    filetitle = Title.replace(' ', '_')
    plt.show()

    if kargs.get('SAVE', False):
        plt.savefig('ConFac_' + filetitle + '.png')
예제 #4
0
파일: ptc.py 프로젝트: nhaddad/pydtk
def gain(imagelist, *coor, **kargs):
    """
    Compute the gain of the system using 2 Bias and 2 FF. The procedure divides the window
    in NWX*NWY subwindows and computes the Gain for each one of them and then computes the mean
    value and display an histogram. If the windows coordinates are not given, it will use the full
    CCD.

    Syntax:
    gain(imagelist[,xi,xf,yi,yf][,NWX=10][,NWY=10][,VERBOSE=True/False][,SAVE=True][,TITLE='Graph Title'][,RETURN=True/False][, MEDIAN=True/False])

    Note: the image list must contain 2 bias and 2 ff in that order!
    imagelist can be a list of names, a list of Paths or list of images
    b1,b2= bias images
    f1,f2= ff images
    *coor=[xi,xf,yi,yf] = coordinates of the window to analize (should be a flat region)
    kargs
    -------
    VERBOSE=True => print intermediate values
    TITLE='Graph Title' => put a title in the graph
    SAVE=True/False => if True, it saves the graph in pnp format
    RETURN=True/False => if True, return only ConFAc without plots
    MEDIAN=True/False => default True, computes median instead of mean
    NORMFF2=True/False => default False, normalize FF2 level to set its mean level equal to FF1
    RATIO=True/FALSE => defaul True. just change the way the FPN is elliminated. Both
    methods give almost the same rusults
    NWX= number of windows in X direction (default 10)
    NWY= number of windows in Y direction (default 10)
    EXT=image extension to load, default 0
    SIGMA = std deviation used by sigma_clip, default=3


    """
    ext = kargs.get('ext', 0)
    sigma = kargs.get('SIGMA', 3)

    if len(imagelist) != 4:
        print('imagelist len different from 4')
        return None

    #  if imagelist contains only image names, load them
    if all([isinstance(i, str) for i in imagelist]):
        images = [Image(i, ext) for i in imagelist]
        print(f'Extension={ext}')
        b1 = images[0]
        b2 = images[1]
        ff1 = images[2]
        ff2 = images[3]
    # elif all are images, just assign them
    elif all([isinstance(i, Path) for i in imagelist]):
        images = [Image(i, ext) for i in imagelist]
        b1 = images[0]
        b2 = images[1]
        ff1 = images[2]
        ff2 = images[3]
    # elif all are images, just assign them
    elif all([isinstance(i, Image) for i in imagelist]):
        b1 = imagelist[0]
        b2 = imagelist[1]
        ff1 = imagelist[2]
        ff2 = imagelist[3]
    else:
        print("Not all objects in image list are Images or filenames")
        return None

    x1 = coor[0]
    x2 = coor[1]
    y1 = coor[2]
    y2 = coor[3]

    b1 = b1.get_data()
    b2 = b2.get_data()
    ff1 = ff1.get_data()
    ff2 = ff2.get_data()

    nwx = kargs.get('NWX', 10)  # set number of windows in x direction
    nwy = kargs.get('NWY', 10)  # set number of windows in y direction

    if kargs.get('VERBOSE', False):
        print(f'format images X={b1.shape[0]} pix Y={b1.shape[1]} pix')
        print(
            f'Nx:{nwx} Ny:{nwy} X1:{x1} X2:{x2} Y1:{y1} Y2:{y2} WX:{(x2-x1)//nwx} WY:{(y2-y1)//nwy}'
        )
        print('')

    # generate auxiliary arrays of nwx * nwy elements and initialize to zero
    meansig = np.zeros((nwx, nwy))

    stdbias = np.zeros((nwx, nwy))
    cf = np.zeros((nwx, nwy))

    stdsig = np.zeros((nwx, nwy))

    # windows is a generator of subwindows
    windows = subwindowcoor(x1, x2, y1, y2, **kargs)
    for i, j, xi, xf, yi, yf in windows:
        win = slice(xi, xf), slice(yi, yf)
        # compute mean value on each window for normalized ff
        meansig[i, j] = sigma_clip(
            (ff1[win] + ff2[win]) / 2.0).mean() - sigma_clip(
                (b1[win] + b2[win]) / 2.0).mean()

        # compute standard deviation on each window for normalized ff
        if kargs.get('NORMFF2', False):
            ff_diff = ff1[win] - ff2[win] * (sigma_clip(ff1[win]).mean() /
                                             sigma_clip(ff2[win]).mean())
        else:
            ff_diff = ff1[win] - ff2[win]
        var_ff_diff = sigma_clip(ff_diff).std()**2
        # Measure RON from difference of two bias
        var_ron = (sigma_clip(b1[win] - b2[win]).std()**2) / 2.0
        stdsig[i, j] = (var_ff_diff - 2 * var_ron)
        cf[i,
           j] = 2 * meansig[i, j] / stdsig[i, j]  # compute CF for each window
        # compute standard deviation for each window of bias difference
        stdbias[i, j] = np.sqrt(var_ron)

        if kargs.get('VERBOSE', False):
            print(
                f"X({xi+x1},{xf+x1}) Y({yi+y1},{yf+y2}) Mean:{meansig[i, j]:.2f} stdff:{stdsig[i, j]:.2f}  CF:{cf[i, j]:.2f}"
            )

    if kargs.get('MEDIAN', False):
        ConFac = np.median(cf, axis=None)
        RON = np.median(stdbias, axis=None)

    else:
        ConFac = sigma_clip(cf, sigma=sigma).mean()
        RON = sigma_clip(stdbias, sigma=sigma).mean()

    # RON =  RMS / sqrt(2)    #RON in ADUs
    RONe = RON * ConFac  # RON in electrons

    # Error in CF estimation is the std/sqrt(number of windows)
    #CFstd = np.std(cf, axis=None)/np.sqrt(nwx*nwy)
    CFstd = sigma_clip(cf).std()  #np.std(cf, axis=None)/np.sqrt(nwx*nwy)

    # Check if run as ROUTINE, in that case return only the Conversion Factor and don't continue with plotting
    if kargs.get('RETURN', False):
        return ConFac, RONe, np.mean(meansig, axis=None),np.median(meansig, axis=None), \
            np.mean(stdsig, axis=None)**2,np.median(stdsig, axis=None)**2,x1, x2, y1, y2
    else:
        plt.figure()

    print("*******************************************")
    print(f"*CF  ={ConFac:.2f} +/-{CFstd:.2f} e/ADU")
    print(f"*RON ={RONe:.3f} -e")
    print(f"*RON ={RON:.3f} ADUs")
    print("*******************************************")

    # change shape of cf array to later compute the standard deviation and also make the histogram

    cf.shape = (nwx * nwy, )
    cfstd = np.std(cf, axis=None)
    plt.clf()
    plt.hist(cf, range=(ConFac - 3 * cfstd, ConFac + 3 * cfstd), bins=20)
    plt.figtext(0.15,
                0.8, ("CF mean=%5.3f +/-%5.3f e/ADU") % (ConFac, CFstd),
                fontsize=11,
                bbox=dict(facecolor='yellow', alpha=0.5))
    plt.figtext(0.15,
                0.75, ("RON =%6.3f -e") % (RONe),
                fontsize=11,
                bbox=dict(facecolor='yellow', alpha=0.5))
    plt.figtext(0.15,
                0.70, ("Computed @ %6.3f ADUs") % (np.mean(meansig)),
                fontsize=11,
                bbox=dict(facecolor='yellow', alpha=0.5))

    Title = kargs.get('TITLE', '')
    plt.title(Title)
    filetitle = Title.replace(' ', '_')
    plt.show()

    if kargs.get('SAVE', False):
        plt.savefig('ConFac_' + filetitle + '.png')
예제 #5
0
파일: ptc.py 프로젝트: nhaddad/pydtk
def ptc_pixels(biaslist, fflist, ext=0, *coor, **kargs):
    """
    Perform ptc computation to get gain and RON from a list of bias names and a
    list of ff images names.
    The ff images should be the same scene with all possible light levels.
    An example would be a grism ff on FORS or MUSE
    To eliminate the FPN, the analysis is done pixel by pixel.

    optional kargs:
    LOW: low level in ADUs to compute ptc
    HIGH: high level in ADU to compute ptc
    STEP: step in ADUs to compute variance (small steps will slow down the computation)
    ORDER (default = 1) polynomial order to fit the ptc
    RETURN (default=FALSE) returns CF and RON

    Ex:
    ptc_pixels(biaslist, fflst, 0,2000,300,600, LOW=100, HIGH=50000, STEP=10)

    signal, var, var_fitted, cf = ptc_pixels(
        b1, ff1, ff2, 100,200,10, 2000, LOW=100, HIGH=50000, STEP=100, OLAYERS=0.4, RETURN=True)
    Compute the PTC in the window [100:200,10:2000] from 100ADUs up to 50000 ADUs each 100 ADUs,

    """

    low = kargs.get('LOW', 0)  # minimum signal level to explore
    high = kargs.get('HIGH', 60000)  # maximum signal level to explore
    step = kargs.get('STEP', 100)  # step size, minimum is 1
    nwx = kargs.get('NWX', 10)  # size of windows in X to compute RON
    nwy = kargs.get('NWY', 10)  # size of windows in Y to compute RON

    # print("Low = {}".format(low))
    # print("High = {}".format(high))
    # print("Step = {}".format(step))

    order = kargs.get('ORDER', 1)  # order of polynomial regression
    if order > 2:
        order = 2

    # print("Order = {}".format(order))

    # read biaslist
    biasimages = [Image(i, ext) for i in biaslist]

    # read fflist
    ffimages = [Image(i, ext) for i in fflist]

    x1, x2, y1, y2 = biasimages[0].get_windowcoor(*coor)

    # print("{},{},{},{}".format(x1,x2,y1,y2))

    # crop bias images
    biascroped = [i.crop(*coor) for i in biasimages]

    # compute bias mean
    biasmean = meanstack(biascroped)
    # print(biasmean.shape)

    # compute
    stdsig = np.zeros((nwx, nwy))
    windows = subwindowcoor((x2 - x1) // 2 - 5 * nwx, (x2 - x1) // 2 + 5 * nwx,
                            (y2 - y1) // 2 - 5 * nwy, (y2 - y1) // 2 + 5 * nwy,
                            **kargs)
    for i, j, xi, xf, yi, yf in windows:
        stdsig[i, j] = biasmean[xi:xf, yi:yf].std() * np.sqrt(len(biascroped))

    # crop ff images
    ffcroped = [i.crop(*coor) for i in ffimages]
    # print(ffcroped[0].shape)

    # debiased ff
    ffcropdb = [(i - biasmean) for i in ffcroped]

    ffcropdb_data = [i.get_data() for i in ffcropdb]

    # compute signal
    ffsignal = meanstack(ffcropdb)

    # use only data from images
    ffsignal_data = ffsignal.get_data()

    # compute variance of all ffcropdb images along axis 0
    ffcropdb_stacked = np.stack(ffcropdb_data)
    ffvar = ffcropdb_stacked.var(ddof=1, axis=0)

    # flatten resulting arrays
    ffsignal_flatten = ffsignal_data.flatten()
    ffvar_flatten = ffvar.flatten()
    # print("ffsignal_flatten, ffvar_flatten : {}, {}".format(len(ffsignal_flatten), len(ffvar_flatten)))

    # convert ffsignal_flatten in integer
    ffsignal_flatten = ffsignal_flatten.astype(
        int)  # [int(i) for i in ffsignal_flatten]
    ffvar_flatten = ffvar_flatten.astype(int)

    # sort ffsignal_flatten and  ffvar_flatten
    # indx = np.argsort(ffsignal_flatten)
    # ffsignal_flatten = ffsignal_flatten[indx]
    # ffvar_flatten =ffvar_flatten[indx]

    # get unique values in ff
    ffsignal_unique = np.unique(ffsignal_flatten)
    # print("ffsignal_unique : {}".format(len(ffsignal_unique)))

    # filter out  values lower than LOW and higher than HIGH
    ffsig_unique = [i for i in ffsignal_unique if i >= low and i <= high]
    # print("ffsig_unique: {}".format(len(ffsig_unique)))

    # generate sampling values
    sampling = list(range(low, len(ffsig_unique), step))
    # print("sampling ={}".format(len(sampling)))

    # create subset of unique values using sampling
    ffsampled = [ffsig_unique[i] for i in sampling]
    # print("ffsampled : {}".format(len(ffsampled)))

    # by default use mean computation for variance
    if kargs.get('MEDIAN', False):
        # Compute variance mean for values in ffsignalflatten that are in ffsampled
        variance = [
            np.median(ffvar_flatten[ffsignal_flatten == i]) for i in ffsampled
        ]
    else:
        variance = [
            np.mean(ffvar_flatten[ffsignal_flatten == i]) for i in ffsampled
        ]


# filter out the pixels with variance==0 or greater than 200000
    variance = np.array(variance)
    ffsampled = np.array(ffsampled)
    vfiltered_index = np.where((variance > 0) & (variance < 200000))
    ffsampled = ffsampled[vfiltered_index]
    variance = variance[vfiltered_index]

    # print(len(ffsignal_unique), len(variance))

    plt.scatter(ffsampled[::], variance[::])
    plt.grid()
    plt.show()

    # compute polynomial without filtering outlayers
    coefts_nf = np.polyfit(ffsampled, variance, order)
    polyts_nf = np.poly1d(coefts_nf)
    # var_fitted = polyts_nf(ffsampled)

    if order == 2:
        gain = 1 / coefts_nf[1]
        print(
            f"GAIN = {1/coefts_nf[1]} -e/ADU  RON = {gain*np.median(stdsig)} -e"
        )
    else:
        gain = 1 / coefts_nf[0]
        print(
            f"GAIN = {1/coefts_nf[0]} -e/ADU  RON = {gain*np.median(stdsig)} -e"
        )