def FoVanalysis(run=True, outfile='PSFdata.pk'):
    #start the script
    log = lg.setUpLogger('PSFproperties.log')

    #derive results for each file
    if run:
        log.info('Deriving PSF properties...')

        #find files
        fls = glob.glob('/Volumes/disk_xray10/smn2/euclid/PSFs/detector_jitter-1_TOL05_MC_T0133_Nim=*.fits')

        txt = 'Processing %i files...' % (len(fls))
        print txt
        log.info(txt)

        filedata = {}
        for file in fls:
            data = readData(file)
            info = parseName(file)
            values = measureChars(data, info, log)
            filedata[file] = dict(info=info, values=values)
            txt = 'File %s processed...' % file
            print txt
            log.info(txt)

        #save data
        fileIO.cPickleDumpDictionary(filedata, outfile)
    else:
        filedata = cPickle.load(open(outfile))

    #generate plots
    generatePlots(filedata)

    log.info('Run finished...\n\n\n')
def shapeMeasurement(log):
    """
    Shape measurement bias as a result of ghosts.
    """
    print '\n\n\nShape Measurement'
    print '-'*100
    print 'Ghost contribution in electrons, sigma=0.2, ghost not centred'
    res = ghostContributionElectrons(log, sigma=0.2)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStarElectrons.pk')
    res = cPickle.load(open('ghostContributionToStarElectrons.pk'))
    plotGhostContributionElectrons(log, res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source', 'shapeBiasElectrons.pdf',
                                    r'Size Bias: 24.5 mag$_{AB}$ Point Source', 'sizeBiasElectrons.pdf')

    print '-'*100
    print '\n\n\nGhost contribution in electrons, ghost centered on the object'
    res = ghostContributionElectrons(log, centered=True)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStarCenteredElectrons.pk')
    res = cPickle.load(open('ghostContributionToStarCenteredElectrons.pk'))
    plotGhostContributionElectrons(log, res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source', 'shapeBiasCentredElectrons.pdf',
                                    r'Size Bias: 24.5 mag$_{AB}$ Point Source', 'sizeBiasCentredElectrons.pdf')

    res = ghostContribution(log)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStar.pk')
    res = cPickle.load(open('ghostContributionToStar.pk'))
    plotGhostContribution(res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source', 'shapeBias.pdf',
                          r'Size Bias: 24.5 mag$_{AB}$ Point Source', 'sizeBias.pdf')

    res = ghostContribution(log, centered=True)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStarCentered.pk')
    res = cPickle.load(open('ghostContributionToStarCentered.pk'))
    plotGhostContribution(res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source', 'shapeBiasCentred.pdf',
                          r'Size Bias: 24.5 mag$_{AB}$ Point Source', 'sizeBiasCentred.pdf')
Esempio n. 3
0
def FoVanalysis(run=True, outfile='PSFdata.pk'):
    #start the script
    log = lg.setUpLogger('PSFproperties.log')

    #derive results for each file
    if run:
        log.info('Deriving PSF properties...')

        #find files
        fls = glob.glob(
            '/Volumes/disk_xray10/smn2/euclid/PSFs/detector_jitter-1_TOL05_MC_T0133_Nim=*.fits'
        )

        txt = 'Processing %i files...' % (len(fls))
        print txt
        log.info(txt)

        filedata = {}
        for file in fls:
            data = readData(file)
            info = parseName(file)
            values = measureChars(data, info, log)
            filedata[file] = dict(info=info, values=values)
            txt = 'File %s processed...' % file
            print txt
            log.info(txt)

        #save data
        fileIO.cPickleDumpDictionary(filedata, outfile)
    else:
        filedata = cPickle.load(open(outfile))

    #generate plots
    generatePlots(filedata)

    log.info('Run finished...\n\n\n')
def shapeMeasurement(log):
    """
    Shape measurement bias as a result of ghosts.
    """
    print '\n\n\nShape Measurement'
    print '-' * 100
    print 'Ghost contribution in electrons, sigma=0.2, ghost not centred'
    res = ghostContributionElectrons(log, sigma=0.2)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStarElectrons.pk')
    res = cPickle.load(open('ghostContributionToStarElectrons.pk'))
    plotGhostContributionElectrons(
        log, res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source',
        'shapeBiasElectrons.pdf', r'Size Bias: 24.5 mag$_{AB}$ Point Source',
        'sizeBiasElectrons.pdf')

    print '-' * 100
    print '\n\n\nGhost contribution in electrons, ghost centered on the object'
    res = ghostContributionElectrons(log, centered=True)
    fileIO.cPickleDumpDictionary(
        res, 'ghostContributionToStarCenteredElectrons.pk')
    res = cPickle.load(open('ghostContributionToStarCenteredElectrons.pk'))
    plotGhostContributionElectrons(
        log, res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source',
        'shapeBiasCentredElectrons.pdf',
        r'Size Bias: 24.5 mag$_{AB}$ Point Source',
        'sizeBiasCentredElectrons.pdf')

    res = ghostContribution(log)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStar.pk')
    res = cPickle.load(open('ghostContributionToStar.pk'))
    plotGhostContribution(res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source',
                          'shapeBias.pdf',
                          r'Size Bias: 24.5 mag$_{AB}$ Point Source',
                          'sizeBias.pdf')

    res = ghostContribution(log, centered=True)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStarCentered.pk')
    res = cPickle.load(open('ghostContributionToStarCentered.pk'))
    plotGhostContribution(res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source',
                          'shapeBiasCentred.pdf',
                          r'Size Bias: 24.5 mag$_{AB}$ Point Source',
                          'sizeBiasCentred.pdf')
        msk = res == i
        ax.plot(xpos[msk] - np.round(xpos[msk], decimals=0),
                R2[msk],
                marker=marker.next(),
                linestyle='',
                label='Sampling=%i' % i)

    ax.set_xlim(-0.6, 0.6)
    ax.set_xlabel('X position')
    ax.set_ylabel(r'$R^{2}$')

    plt.legend(shadow=True,
               fancybox=True,
               numpoints=1,
               scatterpoints=1,
               markerscale=1.8,
               loc='best')
    plt.savefig('R2.pdf')
    plt.close()


if __name__ == "__main__":
    log = lg.setUpLogger('resolutionTesting.log')

    #calculate, save and load results
    res = calculateShapes(log, glob.glob('Q0*stars*x.fits'), 'test.dat')
    fileIO.cPickleDumpDictionary(res, 'results.pk')
    res = cPickle.load(open('results.pk'))

    #plot results
    plotResults(res)
        'run5': dict(multiplier=4.0)
    }

    for key, value in runs.iteritems():
        if not os.path.exists(key):
            os.makedirs(key)

        #start a logger
        log = lg.setUpLogger(key + '/nonlinearityModelTransfer.log')
        log.info('Testing non-linearity model transfer...')
        log.info('Multiplier = %f' % value['multiplier'])

        if run:
            if debug:
                res = testNonlinearityModelTransfer(log,
                                                    psfs=2000,
                                                    file='data/psf1x.fits',
                                                    oversample=1.0)
            else:
                res = testNonlinearityModelTransfer(log)

            fileIO.cPickleDumpDictionary(res, key + '/nonlinModelResults.pk')

        if plot:
            if not run:
                res = cPickle.load(open(key + '/nonlinModelResults.pk'))

            plotResults(res, outdir=key)

        log.info('Run finished...\n\n\n')
Esempio n. 7
0
def forwardModel(file, out='Data', gain=3.1, size=10, burn=20, spotx=2888, spoty=3514, run=50,
                 simulation=False, truths=None):
    """
    Forward models the spot data found from the input file. Can be used with simulated and real data.

    Notes:
    - The emcee is run three times as it is important to have a good starting point for the final run.
    - It is very important to have the amplitude well estimated, otherwise it is difficult to get good parameter estimates.
    """
    print '\n\n\n'
    print '_'*120
    print 'Processing:', file
    #get data and convert to electrons
    o = pf.getdata(file)*gain

    if simulation:
        data = o
    else:
        #roughly the correct location - to avoid identifying e.g. cosmic rays
        data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy()

    #maximum position within the cutout
    y, x = m.maximum_position(data)

    #spot and the peak pixel within the spot, this is also the CCD kernel position
    spot = data[y-size:y+size+1, x-size:x+size+1].copy()
    CCDy, CCDx = m.maximum_position(spot)
    print 'CCD Kernel Position (within the postage stamp):', CCDx, CCDy

    #bias estimate
    if simulation:
        bias = 9000.
        rn = 4.5
    else:
        bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) #works for read o
        rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20])

    print 'Readnoise (e):', rn
    if rn < 2. or rn > 6.:
        print 'NOTE: suspicious readout noise estimate...'
    print 'ADC offset (e):', bias

    #remove bias
    spot -= bias

    #save to file
    fileIO.writeFITS(spot, out+'small.fits', int=False)

    #make a copy ot generate error array
    data = spot.copy().flatten()
    data[data + rn**2 < 0.] = 0.  #set highly negative values to zero
    #assume errors scale as sqrt of the values + readnoise
    #sigma = np.sqrt(data/gain + rn**2)
    var = data.copy() + rn**2

    #maximum value
    max = np.max(spot)
    print 'Maximum Value:', max

    #MCMC based fitting
    print 'Bayesian Fitting...'
    ndim = 7
    nwalkers = 1000

    #Choose an initial set of positions for the walkers - fairly large area not to bias the results
    #amplitude, center_x, center_y, radius, focus, width_x, width_y = theta
    p0 = np.zeros((nwalkers, ndim))
    p0[:, 0] = np.random.uniform(max, 2.*max, size=nwalkers)     # amplitude
    p0[:, 1] = np.random.uniform(7., 14., size=nwalkers)         # x
    p0[:, 2] = np.random.uniform(7., 14., size=nwalkers)         # y
    p0[:, 3] = np.random.uniform(.1, 1., size=nwalkers)          # radius
    p0[:, 4] = np.random.uniform(.1, 1., size=nwalkers)          # focus
    p0[:, 5] = np.random.uniform(.1, 0.5, size=nwalkers)         # width_x
    p0[:, 6] = np.random.uniform(.1, 0.5, size=nwalkers)         # width_y

    # Initialize the sampler with the chosen specs.
    #Create the coordinates x and y
    x = np.arange(0, spot.shape[1])
    y = np.arange(0, spot.shape[0])
    #Put the coordinates in a mesh
    xx, yy = np.meshgrid(x, y)

    #Flatten the arrays
    xx = xx.flatten()
    yy = yy.flatten()

    #initiate sampler
    pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword
    sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[xx, yy, data, var], pool=pool)

    # Run a burn-in and set new starting position
    print "Burning-in..."
    pos, prob, state = sampler.run_mcmc(p0, burn)
    best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]
    pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)
    # Reset the chain to remove the burn-in samples.
    sampler.reset()

    # Starting from the final position in the burn-in chain
    print "Running MCMC..."
    pos, prob, state = sampler.run_mcmc(pos, burn)
    sampler.reset()
    pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)

    # Print out the mean acceptance fraction
    print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)

    #Get the index with the highest probability
    maxprob_index = np.argmax(prob)

    #Get the best parameters and their respective errors and print best fits
    params_fit = pos[maxprob_index]
    errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]
    amplitudeE, center_xE, center_yE, radiusE, focusE, width_xE, width_yE = errors_fit
    _printResults(params_fit, errors_fit)

    #Best fit model
    amplitude, center_x, center_y, radius, focus, width_x, width_y = params_fit
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape)
    f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
    focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape)
    foc = signal.convolve2d(adata, focusdata, mode='same')
    CCD = models.Gaussian2D(1., CCDx, CCDy, width_x, width_y, 0.)
    CCDdata = CCD.eval(xx, yy, 1., CCDx, CCDy, width_x, width_y, 0.).reshape(spot.shape)
    model = signal.convolve2d(foc, CCDdata, mode='same')
    #save model
    fileIO.writeFITS(model, out+'model.fits', int=False)

    #residuals
    fileIO.writeFITS(model - spot, out+'residual.fits', int=False)
    fileIO.writeFITS(((model - spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False)

    # a simple goodness of fit
    gof = (1./(np.size(data) - ndim)) * np.sum((model.flatten() - data)**2 / var)
    print 'GoF:', gof, ' Maximum difference:', np.max(np.abs(model - spot))

    #results and save results
    _printFWHM(width_x, width_y, errors_fit[5], errors_fit[6])
    res = dict(wx=width_x, wy=width_y, wxerr=width_xE, wyerr=width_yE, out=out,
               peakvalue=max, CCDmodel=CCD, CCDmodeldata=CCDdata, GoF=gof)
    fileIO.cPickleDumpDictionary(res, out+'.pkl')

    #plot
    samples = sampler.chain.reshape((-1, ndim))
    extents = None
    if simulation:
        extents = [(0.91*truth, 1.09*truth) for truth in truths]
        extents[1] = (truths[1]*0.995, truths[1]*1.005)
        extents[2] = (truths[2]*0.995, truths[2]*1.005)
        extents[3] = (0.395, 0.425)
        extents[4] = (0.503, 0.517)
    fig = triangle.corner(samples,
                          labels=['amplitude', 'x', 'y', 'radius', 'focus', 'width_x', 'width_y'],
                          truths=truths)#, extents=extents)
    fig.savefig(out+'Triangle.png')

    pool.close()
    plt.savefig('deltasize%s.pdf' % output)
    plt.close()


if __name__ == "__main__":
    log = lg.setUpLogger('centroidTesting.log')

    #res = testCentroidingImpact(log)
    #fileIO.cPickleDumpDictionary(res, 'centroidTesting.pk')
    #res = cPickle.load(open('centroidTesting.pk'))
    #plotCentroids(res)

    #PSF
    print 'Real PSF'
    xres, yres = testCentroidingImpactSingleDirection(log)
    fileIO.cPickleDumpDictionary(xres, 'centroidTestingX.pk')
    fileIO.cPickleDumpDictionary(yres, 'centroidTestingY.pk')
    xres = cPickle.load(open('centroidTestingX.pk'))
    yres = cPickle.load(open('centroidTestingY.pk'))
    plotCentroidsSingle(xres)
    plotCentroidsSingle(yres, output='Y')

    # #PSF interpolated
    # print 'Real PSF, interpolated'
    # xres, yres = testCentroidingImpactSingleDirection(log, interpolation=True)
    # fileIO.cPickleDumpDictionary(xres, 'centroidTestingXinterpolated.pk')
    # fileIO.cPickleDumpDictionary(yres, 'centroidTestingYinterpolated.pk')
    # xres = cPickle.load(open('centroidTestingXinterpolated.pk'))
    # yres = cPickle.load(open('centroidTestingYinterpolated.pk'))
    # plotCentroidsSingle(xres, output='Xinterpolated')
    # plotCentroidsSingle(yres, output='Yinterpolated')
def ghostContributionElectrons(log,
                               filename='data/psf1x.fits',
                               magnitude=24.5,
                               zp=25.5,
                               exptime=565.,
                               exposures=3,
                               iterations=100,
                               sigma=0.75,
                               centered=False,
                               offset=9,
                               verbose=False):
    #set sampling etc. for shape measurement
    settings = dict(itereations=iterations, sigma=sigma, debug=True)

    #read in PSF
    data = pf.getdata(filename)

    #place it a larger canvas with zero padding around
    canvas = np.pad(data, 100, mode='constant',
                    constant_values=0)  #requires numpy >= 1.7.0
    ys, xs = canvas.shape
    xcen = int(np.round(xs / 2., 0))
    ycen = int(np.round(ys / 2., 0))

    #normalize canvas, scale it to magnitude and save it
    canvas /= np.max(canvas)
    intscale = 10.0**(-0.4 * (magnitude - zp)) * exptime * exposures
    canvas *= intscale
    fileIO.writeFITS(canvas, 'originalPSF.fits', int=False)

    #reference values
    sh = shape.shapeMeasurement(canvas, log, **settings)
    reference = sh.measureRefinedEllipticity()
    fileIO.cPickleDumpDictionary(reference, 'ghostStarContribution.pk')

    if verbose:
        print 'Reference:'
        pprint.pprint(reference)

    #load ghost
    ghostModel = pf.getdata('data/ghost800nm.fits')[355:423, 70:131]
    ghostModel /= np.max(ghostModel)  #peak is 1 now
    ys, xs = ghostModel.shape
    yd = int(np.round(ys / 2., 0))
    xd = int(np.round(xs / 2., 0))
    fileIO.writeFITS(ghostModel, 'ghostImage.fits', int=False)

    #ghost levels
    scales = np.logspace(-4, 2, 21)

    result = {}
    for scale in scales:
        scaled = ghostModel.copy() * scale
        #fileIO.writeFITS(scaled, 'ghostImage.fits', int=False)

        tmp = canvas.copy()
        if centered:
            tmp[ycen - yd:ycen + yd, xcen - xd:xcen + xd + 1] += scaled
        else:
            tmp[ycen - yd + offset:ycen + yd + offset,
                xcen - xd + offset:xcen + xd + 1 + offset] += scaled
            #tmp[ycen: ycen + 2*yd, xcen:xcen + 2*xd + 1] += scaled
        #fileIO.writeFITS(tmp, 'originalPlusGhost.fits', int=False)

        #measure e and R2 from the postage stamp image
        sh = shape.shapeMeasurement(tmp, log, **settings)
        results = sh.measureRefinedEllipticity()

        de1 = results['e1'] - reference['e1']
        de2 = results['e2'] - reference['e2']
        de = np.sqrt(de1**2 + de2**2)
        dR2 = (results['R2'] - reference['R2']) / reference['R2']

        if verbose:
            print '\n\nscale=', scale
            print 'Delta: with ghost - reference'
            print 'e1', de1
            print 'e2', de2
            print 'e', de
            print 'R2', dR2

        result[scale] = [
            de1, de2, de, dR2, results['e1'], results['e2'],
            results['ellipticity'], results['R2']
        ]

    return result
def ghostContributionElectrons(log, filename='data/psf1x.fits', magnitude=24.5, zp=25.5, exptime=565., exposures=3,
                      iterations=100, sigma=0.75, centered=False, offset=9, verbose=False):
    #set sampling etc. for shape measurement
    settings = dict(itereations=iterations, sigma=sigma, debug=True)

    #read in PSF
    data = pf.getdata(filename)

    #place it a larger canvas with zero padding around
    canvas = np.pad(data, 100, mode='constant', constant_values=0)  #requires numpy >= 1.7.0
    ys, xs = canvas.shape
    xcen = int(np.round(xs / 2., 0))
    ycen = int(np.round(ys / 2., 0))

    #normalize canvas, scale it to magnitude and save it
    canvas /= np.max(canvas)
    intscale = 10.0**(-0.4 * (magnitude-zp)) * exptime * exposures
    canvas *= intscale
    fileIO.writeFITS(canvas, 'originalPSF.fits', int=False)

    #reference values
    sh = shape.shapeMeasurement(canvas, log, **settings)
    reference = sh.measureRefinedEllipticity()
    fileIO.cPickleDumpDictionary(reference, 'ghostStarContribution.pk')

    if verbose:
        print 'Reference:'
        pprint.pprint(reference)

    #load ghost
    ghostModel = pf.getdata('data/ghost800nm.fits')[355:423, 70:131]
    ghostModel /= np.max(ghostModel)  #peak is 1 now
    ys, xs = ghostModel.shape
    yd = int(np.round(ys / 2., 0))
    xd = int(np.round(xs / 2., 0))
    fileIO.writeFITS(ghostModel, 'ghostImage.fits', int=False)

    #ghost levels
    scales = np.logspace(-4, 2, 21)

    result = {}
    for scale in scales:
        scaled = ghostModel.copy() * scale
        #fileIO.writeFITS(scaled, 'ghostImage.fits', int=False)

        tmp = canvas.copy()
        if centered:
            tmp[ycen - yd:ycen + yd, xcen - xd:xcen + xd + 1] += scaled
        else:
            tmp[ycen - yd + offset:ycen + yd + offset, xcen - xd + offset:xcen + xd + 1 + offset] += scaled
            #tmp[ycen: ycen + 2*yd, xcen:xcen + 2*xd + 1] += scaled
        #fileIO.writeFITS(tmp, 'originalPlusGhost.fits', int=False)

        #measure e and R2 from the postage stamp image
        sh = shape.shapeMeasurement(tmp, log, **settings)
        results = sh.measureRefinedEllipticity()

        de1 = results['e1'] - reference['e1']
        de2 = results['e2'] - reference['e2']
        de = np.sqrt(de1**2 + de2**2)
        dR2 = (results['R2'] - reference['R2']) / reference['R2']

        if verbose:
            print '\n\nscale=', scale
            print 'Delta: with ghost - reference'
            print 'e1', de1
            print 'e2', de2
            print 'e', de
            print 'R2', dR2

        result[scale] = [de1, de2, de, dR2, results['e1'], results['e2'], results['ellipticity'], results['R2']]

    return result
def ghostContributionToStar(log, filename='data/psf12x.fits', psfscale=2e5, distance=750,
                            inner=8, outer=60, oversample=12, iterations=20, sigma=0.75,
                            scale=5e-5, fixedPosition=True):
    #set sampling etc. for shape measurement
    settings = dict(sampling=1.0 / oversample, itereations=iterations, sigma=sigma, debug=True)

    #read in PSF
    data = pf.getdata(filename)

    #place it a larger canvas with zero padding around
    canvas = np.pad(data, int(distance*oversample + outer + 1),
                    mode='constant', constant_values=0)  #requires numpy >= 1.7.0
    ys, xs = canvas.shape
    xcen = int(np.round(xs / 2., 0))
    ycen = int(np.round(ys / 2., 0))

    #normalize canvas and save it
    canvas /= np.max(canvas)
    canvas *= float(psfscale)
    fileIO.writeFITS(canvas, 'originalPSF.fits', int=False)

    #reference values
    sh = shape.shapeMeasurement(canvas, log, **settings)
    reference = sh.measureRefinedEllipticity()
    fileIO.cPickleDumpDictionary(reference, 'ghostStarContribution.pk')
    print 'Reference:'
    pprint.pprint(reference)

    #make out of focus image, a simple doughnut
    img, xd, yd = drawDoughnut(inner, outer, oversample=oversample)

    #positions (shift respect to the centring of the star)
    xc = 0
    yc = distance * oversample

    #indices range
    xm = xcen + xc
    ym = ycen + yc

    #ghost level
    #scale the doughtnut pixel values, note that all pixels have the same value...
    img /= np.max(img)
    scaled = img.copy() * scale * psfscale
    fileIO.writeFITS(scaled, 'ghostImage.fits', int=False)

    tmp = canvas.copy()

    if oversample % 2 == 0:
        tmp[ym - yd:ym + yd, xm - xd:xm + xd] += scaled
    else:
        tmp[ym - yd:ym + yd + 1, xm - xd:xm + xd + 1] += scaled

    fileIO.writeFITS(tmp, 'originalPlusGhost.fits', int=False)

    #use fixed positions
    if fixedPosition:
        settings['fixedPosition'] = True
        settings['fixedX'] = reference['centreX']
        settings['fixedY'] = reference['centreY']

    #measure e and R2 from the postage stamp image
    sh = shape.shapeMeasurement(tmp, log, **settings)
    results = sh.measureRefinedEllipticity()
    fileIO.cPickleDumpDictionary(results, 'ghostStarContribution.pk')

    #save values
    print '\nWith Doughnut:'
    pprint.pprint(results)

    print '\nDelta: with ghost - reference'
    print 'e1', results['e1'] - reference['e1']
    print 'e2', results['e2'] - reference['e2']
    print 'e', results['ellipticity'] - reference['ellipticity']
    print 'R2', results['R2'] - reference['R2']
    print 'Xcen', results['centreX'] - reference['centreX']
    print 'Ycen', results['centreY'] - reference['centreY']

    return results
    #imapact on object detection
    log.info('Calculating the effect on object detection...')
    #objectDetection(log)
    #objectDetection(log, ghostlevels=(5.e-5, 5.e-7))
    objectDetection(log, covering=11500, ghostlevels=(5.e-5, 4.e-6, 5.e-6, 1.e-6, 6.e-7))

    #impact on shape measurement
    #log.info('Calculatsing the oeffect on shape measurements...')
    #shapeMeasurement(log)

    if debug:
         #out of focus ghosts
         res = analyseOutofFocusImpact(log, filename='data/psf1x.fits', maxdistance=100, samples=7,
                                       inner=8, outer=60, oversample=1.0, psfs=1000, iterations=5, sigma=0.75)
         fileIO.cPickleDumpDictionary(res, 'OutofFocusResultsDebug.pk')
         res = cPickle.load(open('OutofFocusResultsDebug.pk'))
         plotResults(res, 'OutofFocusGhostsDebug', 'VIS Ghosts: PSF Knowledge')


    if focus:
         #if the ghosts were in focus
         res = analyseInFocusImpact(log, filename='data/psf2x.fits', psfscale=100000, maxdistance=100,
                                    oversample=2.0, psfs=200, iterations=4, sigma=0.75)
         fileIO.cPickleDumpDictionary(res, 'InfocusResultsDebug.pk')
         plotResults(res, 'InfocusGhosts', 'VIS Ghosts: In Focus Analysis')

    if run:
         #real run
         res = analyseOutofFocusImpact(log)
         fileIO.cPickleDumpDictionary(res, 'OutofFocusResults.pk')
Esempio n. 13
0
                                                 file='data/psf1x.fits',
                                                 oversample=1.0,
                                                 iterations=4,
                                                 psfs=500,
                                                 samples=8)
            resSlope = findTolerableErrorSlope(log,
                                               file='data/psf1x.fits',
                                               oversample=1.0,
                                               iterations=4,
                                               psfs=500,
                                               samples=8)
        else:
            resPiston = findTolerableErrorPiston(log)
            resSlope = findTolerableErrorSlope(log)

        fileIO.cPickleDumpDictionary(resPiston, 'piston.pk')
        plotTolerableErrorE(resPiston,
                            r'VIS Bias Calibration: Piston',
                            output='BiasCalibrationTolerableErrorEPiston.pdf')
        plotTolerableErrorR2(
            resPiston,
            r'VIS Bias Calibration: Piston',
            output='BiasCalibrationTolerableErrorR2Piston.pdf')

        fileIO.cPickleDumpDictionary(resSlope, 'slope.pk')
        plotTolerableErrorE(resSlope,
                            r'VIS Bias Calibration: Tilt',
                            output='BiasCalibrationTolerableErrorESlope.pdf')
        plotTolerableErrorR2(resSlope,
                             r'VIS Bias Calibration: Tilt',
                             output='BiasCalibrationTolerableErrorR2Slope.pdf')
    multi = False
    plot = True
    debug = False
    file = "CosmicrayResults.pk"

    # start a logger
    log = lg.setUpLogger("CosmicrayRejection.log")
    log.info("Testing Cosmic Ray Rejection...")

    if debug:
        test(log)

    if run:
        if multi:
            resM = testCosmicrayRejectionMultiPSF(log, stars=2000, psfs=500)
            fileIO.cPickleDumpDictionary(resM, file.replace(".pk", "Multi2000.pk"))

        res = testCosmicrayRejection(log)
        fileIO.cPickleDumpDictionary(res, file)

    if plot:
        if not run:
            if multi:
                resM = cPickle.load(open(file.replace(".pk", "Multi2000.pk")))
            res = cPickle.load(open(file))

        plotResults(res, outdir="results")
        plotResults(resM, outdir="resultsMulti")

    log.info("Run finished...\n\n\n")
    runs = {'run1': dict(multiplier=1.5),
            'run2': dict(multiplier=0.5),
            'run3': dict(multiplier=2.0),
            'run4': dict(multiplier=3.0),
            'run5': dict(multiplier=4.0)}

    for key, value in runs.iteritems():
        if not os.path.exists(key):
            os.makedirs(key)

        #start a logger
        log = lg.setUpLogger(key+'/nonlinearityModelTransfer.log')
        log.info('Testing non-linearity model transfer...')
        log.info('Multiplier = %f' % value['multiplier'])

        if run:
            if debug:
                res = testNonlinearityModelTransfer(log, psfs=2000, file='data/psf1x.fits', oversample=1.0)
            else:
                res = testNonlinearityModelTransfer(log)

            fileIO.cPickleDumpDictionary(res, key+'/nonlinModelResults.pk')

        if plot:
            if not run:
                res = cPickle.load(open(key+'/nonlinModelResults.pk'))

            plotResults(res, outdir=key)

        log.info('Run finished...\n\n\n')
    #log.info('Calculatsing the oeffect on shape measurements...')
    #shapeMeasurement(log)

    if debug:
        #out of focus ghosts
        res = analyseOutofFocusImpact(log,
                                      filename='data/psf1x.fits',
                                      maxdistance=100,
                                      samples=7,
                                      inner=8,
                                      outer=60,
                                      oversample=1.0,
                                      psfs=1000,
                                      iterations=5,
                                      sigma=0.75)
        fileIO.cPickleDumpDictionary(res, 'OutofFocusResultsDebug.pk')
        res = cPickle.load(open('OutofFocusResultsDebug.pk'))
        plotResults(res, 'OutofFocusGhostsDebug', 'VIS Ghosts: PSF Knowledge')

    if focus:
        #if the ghosts were in focus
        res = analyseInFocusImpact(log,
                                   filename='data/psf2x.fits',
                                   psfscale=100000,
                                   maxdistance=100,
                                   oversample=2.0,
                                   psfs=200,
                                   iterations=4,
                                   sigma=0.75)
        fileIO.cPickleDumpDictionary(res, 'InfocusResultsDebug.pk')
        plotResults(res, 'InfocusGhosts', 'VIS Ghosts: In Focus Analysis')
Esempio n. 17
0
def analyseSpotsFitting(files,
                        gaussian=False,
                        pixelvalues=False,
                        bessel=True,
                        maxfev=10000):
    """
    Analyse spot measurements using different fitting methods.

    :param files: names of the FITS files to analyse (should match the IDs)
    :param gaussian: whether or not to do a simple Gaussian fitting analysis
    :param pixelvalues: whether or not to plot pixel values on a grid
    :param bessel: whether or not to do a Bessel + Gaussian convolution analysis
    :param maxfev: maximum number of iterations in the least squares fitting

    :return: None
    """
    log = lg.setUpLogger('spots.log')
    log.info('Starting...')
    over = 24
    settings = dict(itereations=8)
    ids = fileIDs()

    d = {}
    for filename in files:
        tmp = readData(filename, crop=False)
        f = filename.replace('small.fits', '')
        d[f] = tmp

    if pixelvalues:
        #plot differrent pixel values
        plotPixelValues(d, ids)

    if gaussian:
        #fit simple Gaussians
        Gaussians = {}
        for f, im in d.iteritems():
            #horizontal direction
            sumH = np.sum(im, axis=0)
            Hfit = gaussianFit(sumH,
                               initials=[
                                   np.max(sumH) - np.median(sumH), 8., 0.4,
                                   np.median(sumH)
                               ])
            plotLineFits(sumH, Hfit, f)

            #vertical direction
            sumV = np.sum(im, axis=1)
            Vfit = gaussianFit(sumV,
                               initials=[
                                   np.max(sumV) - np.median(sumV), 8., 0.4,
                                   np.median(sumV)
                               ])
            plotLineFits(sumH, Hfit, f, horizontal=False)

            #2D gaussian
            tmp = im.copy() - np.median(im)
            twoD = fit.Gaussian2D(tmp, intials=[np.max(tmp), 7, 7, 0.4, 0.4])

            print f, Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3], int(
                np.max(im))
            Gaussians[f] = [Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3]]

        fileIO.cPickleDumpDictionary(Gaussians, 'SpotmeasurementsGaussian.pk')

        plotGaussianResults(Gaussians, ids, output='line')
        plotGaussianResults(Gaussians, ids, output='twoD', vals=[1, 3])

    if bessel:
        Gaussians = {}
        #Bessel + Gaussian
        hf = 8 * over
        for f, im in d.iteritems():
            #if '21_59_31s' not in f:
            #    continue

            #over sample the data, needed for convolution
            oversampled = ndimage.zoom(im.copy(), over, order=0)
            fileIO.writeFITS(oversampled, f + 'block.fits', int=False)

            #find the centre in oversampled frame, needed for bessel and gives a starting point for fitting
            tmp = oversampled.copy() - np.median(oversampled)
            sh = shape.shapeMeasurement(tmp, log, **settings)
            results = sh.measureRefinedEllipticity()
            midx = results['centreX'] - 1.
            midy = results['centreY'] - 1.

            #generate 2D bessel and re-centre using the above centroid, normalize to the maximum image value and
            #save to a FITS file.
            bes = generateBessel(radius=0.45, oversample=over, size=16 * over)
            shiftx = -midx + hf
            shifty = -midy + hf
            bes = ndimage.interpolation.shift(bes, [-shifty, -shiftx], order=0)
            bes /= np.max(bes)
            fileIO.writeFITS(bes, f + 'bessel.fits', int=False)

            #check the residual with only the bessel and save to a FITS file
            t = ndimage.zoom(bes.copy(), 1. / over, order=0)
            t /= np.max(t)
            fileIO.writeFITS(im.copy() - np.median(oversampled) -
                             t * np.max(tmp),
                             f + 'residual.fits',
                             int=False)
            fileIO.writeFITS(oversampled - bes.copy() * np.max(tmp),
                             f + 'residualOversampled.fits',
                             int=False)

            #best guesses for fitting parameters
            params = [1., results['centreX'], results['centreY'], 0.5, 0.5]

            biassubtracted = im.copy() - np.median(oversampled)
            #error function is a convolution between a bessel function and 2D gaussian - data
            #note that the error function must be on low-res grid because it is the pixel values we try to match
            errfunc = lambda p: np.ravel(
                ndimage.zoom(signal.fftconvolve(
                    fitf(*p)(*np.indices(tmp.shape)), bes.copy(), mode='same'),
                             1. / over,
                             order=0) * np.max(tmp) - biassubtracted.copy())

            #fit
            res = sp.optimize.leastsq(errfunc,
                                      params,
                                      full_output=True,
                                      maxfev=maxfev)

            #save the fitted residuals
            t = signal.fftconvolve(fitf(*res[0])(*np.indices(tmp.shape)),
                                   bes.copy(),
                                   mode='same')
            fileIO.writeFITS(res[2]['fvec'].reshape(im.shape),
                             f + 'residualFit.fits',
                             int=False)
            fileIO.writeFITS(fitf(*res[0])(*np.indices(tmp.shape)),
                             f + 'gaussian.fits',
                             int=False)
            fileIO.writeFITS(t, f + 'BesselGausOversampled.fits', int=False)
            fileIO.writeFITS(ndimage.zoom(t, 1. / over, order=0),
                             f + 'BesselGaus.fits',
                             int=False)

            #print out the results and save to a dictionary
            print results['centreX'], results['centreY'], res[2]['nfev'], res[
                0]

            #sigmas are symmetric as the width of the fitting function is later squared...
            sigma1 = np.abs(res[0][3])
            sigma2 = np.abs(res[0][4])
            Gaussians[f] = [sigma1, sigma2]

        fileIO.cPickleDumpDictionary(Gaussians,
                                     'SpotmeasurementsBesselGaussian.pk')

        #plot the findings
        plotGaussianResults(Gaussians, ids, output='Bessel', vals=[0, 1])
def ghostContributionToStar(log,
                            filename='data/psf12x.fits',
                            psfscale=2e5,
                            distance=750,
                            inner=8,
                            outer=60,
                            oversample=12,
                            iterations=20,
                            sigma=0.75,
                            scale=5e-5,
                            fixedPosition=True):
    #set sampling etc. for shape measurement
    settings = dict(sampling=1.0 / oversample,
                    itereations=iterations,
                    sigma=sigma,
                    debug=True)

    #read in PSF
    data = pf.getdata(filename)

    #place it a larger canvas with zero padding around
    canvas = np.pad(data,
                    int(distance * oversample + outer + 1),
                    mode='constant',
                    constant_values=0)  #requires numpy >= 1.7.0
    ys, xs = canvas.shape
    xcen = int(np.round(xs / 2., 0))
    ycen = int(np.round(ys / 2., 0))

    #normalize canvas and save it
    canvas /= np.max(canvas)
    canvas *= float(psfscale)
    fileIO.writeFITS(canvas, 'originalPSF.fits', int=False)

    #reference values
    sh = shape.shapeMeasurement(canvas, log, **settings)
    reference = sh.measureRefinedEllipticity()
    fileIO.cPickleDumpDictionary(reference, 'ghostStarContribution.pk')
    print 'Reference:'
    pprint.pprint(reference)

    #make out of focus image, a simple doughnut
    img, xd, yd = drawDoughnut(inner, outer, oversample=oversample)

    #positions (shift respect to the centring of the star)
    xc = 0
    yc = distance * oversample

    #indices range
    xm = xcen + xc
    ym = ycen + yc

    #ghost level
    #scale the doughtnut pixel values, note that all pixels have the same value...
    img /= np.max(img)
    scaled = img.copy() * scale * psfscale
    fileIO.writeFITS(scaled, 'ghostImage.fits', int=False)

    tmp = canvas.copy()

    if oversample % 2 == 0:
        tmp[ym - yd:ym + yd, xm - xd:xm + xd] += scaled
    else:
        tmp[ym - yd:ym + yd + 1, xm - xd:xm + xd + 1] += scaled

    fileIO.writeFITS(tmp, 'originalPlusGhost.fits', int=False)

    #use fixed positions
    if fixedPosition:
        settings['fixedPosition'] = True
        settings['fixedX'] = reference['centreX']
        settings['fixedY'] = reference['centreY']

    #measure e and R2 from the postage stamp image
    sh = shape.shapeMeasurement(tmp, log, **settings)
    results = sh.measureRefinedEllipticity()
    fileIO.cPickleDumpDictionary(results, 'ghostStarContribution.pk')

    #save values
    print '\nWith Doughnut:'
    pprint.pprint(results)

    print '\nDelta: with ghost - reference'
    print 'e1', results['e1'] - reference['e1']
    print 'e2', results['e2'] - reference['e2']
    print 'e', results['ellipticity'] - reference['ellipticity']
    print 'R2', results['R2'] - reference['R2']
    print 'Xcen', results['centreX'] - reference['centreX']
    print 'Ycen', results['centreY'] - reference['centreY']

    return results
def testCTIcorrectionNonoise(log, files, output, sigma=0.75, iterations=4):
    """
    Calculates PSF properties such as ellipticity and size from data w/ and w/o CTI.

    :param log: python logger instance
    :type log: instance
    :param files: a list of files to be processed
    :type files: list
    :param sigma: size of the Gaussian weighting function
    :type sigma: float
    :param iterations: the number of iterations for the moment based shape estimator
    :type iterations: int

    :return: ellipticity and size
    :rtype: dict
    """
    eclean = []
    e1clean = []
    e2clean = []
    R2clean = []
    xclean = []
    yclean = []
    eCTI = []
    e1CTI = []
    e2CTI = []
    R2CTI = []
    xCTI = []
    yCTI = []
    eCTIfixed = []
    e1CTIfixed = []
    e2CTIfixed = []
    R2CTIfixed = []
    xCTIfixed = []
    yCTIfixed = []

    fh = open(output.replace('pk', 'csv'), 'w')
    fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\n')
    for f in files:
        print 'Processing: ', f

        #reset settings
        settings = dict(sigma=sigma, iterations=iterations)

        #load no cti data
        nocti = pf.getdata(f.replace('CUT', 'CUTnoctinonoise'))

        #load CTI data
        CTI = pf.getdata(f)

        sh = shape.shapeMeasurement(nocti, log, **settings)
        results = sh.measureRefinedEllipticity()

        eclean.append(results['ellipticity'])
        e1clean.append(results['e1'])
        e2clean.append(results['e2'])
        R2clean.append(results['R2'])
        xclean.append(results['centreX'])
        yclean.append(results['centreY'])

        #CTI, fitted centroid
        sh = shape.shapeMeasurement(CTI.copy(), log, **settings)
        results2 = sh.measureRefinedEllipticity()

        eCTI.append(results2['ellipticity'])
        e1CTI.append(results2['e1'])
        e2CTI.append(results2['e2'])
        R2CTI.append(results2['R2'])
        xCTI.append(results2['centreX'])
        yCTI.append(results2['centreY'])

        #fixed centroid
        settings['fixedPosition'] = True
        settings['fixedX'] = results['centreX']
        settings['fixedY'] = results['centreY']
        settings['iterations'] = 1
        sh = shape.shapeMeasurement(CTI.copy(), log, **settings)
        results3 = sh.measureRefinedEllipticity()

        eCTIfixed.append(results3['ellipticity'])
        e1CTIfixed.append(results3['e1'])
        e2CTIfixed.append(results3['e2'])
        R2CTIfixed.append(results3['R2'])
        xCTIfixed.append(results3['centreX'])
        yCTIfixed.append(results3['centreY'])

        text = '%s,%e,%e,%e,%e,%e,%e\n' % (f, results['ellipticity'] - results2['ellipticity'],
              results['e1'] - results2['e1'], results['e2'] - results2['e2'], results['R2'] - results2['R2'],
              results['centreX'] - results2['centreX'], results['centreY'] - results2['centreY'])
        fh.write(text)
        print text

    fh.close()

    results = {'eclean' : np.asarray(eclean),
               'e1clean' : np.asarray(e1clean),
               'e2clean' : np.asarray(e2clean),
               'R2clean' : np.asarray(R2clean),
               'xclean' : np.asarray(xclean),
               'yclean' : np.asarray(yclean),
               'eCTI' : np.asarray(eCTI),
               'e1CTI' : np.asarray(e1CTI),
               'e2CTI' : np.asarray(e2CTI),
               'R2CTI' : np.asarray(R2CTI),
               'xCTI' : np.asarray(xCTI),
               'yCTI' : np.asarray(yCTI),
               'eCTIfixed': np.asarray(eCTIfixed),
               'e1CTIfixed': np.asarray(e1CTIfixed),
               'e2CTIfixed': np.asarray(e2CTIfixed),
               'R2CTIfixed': np.asarray(R2CTIfixed),
               'xCTIfixed': np.asarray(xCTIfixed),
               'yCTIfixed': np.asarray(yCTIfixed)}

    #save to a file
    fileIO.cPickleDumpDictionary(results, output)

    return results
def analyseSpotsFitting(files, gaussian=False, pixelvalues=False, bessel=True, maxfev=10000):
    """
    Analyse spot measurements using different fitting methods.

    :param files: names of the FITS files to analyse (should match the IDs)
    :param gaussian: whether or not to do a simple Gaussian fitting analysis
    :param pixelvalues: whether or not to plot pixel values on a grid
    :param bessel: whether or not to do a Bessel + Gaussian convolution analysis
    :param maxfev: maximum number of iterations in the least squares fitting

    :return: None
    """
    log = lg.setUpLogger('spots.log')
    log.info('Starting...')
    over = 24
    settings = dict(itereations=8)
    ids = fileIDs()

    d = {}
    for filename in files:
        tmp = readData(filename, crop=False)
        f = filename.replace('small.fits', '')
        d[f] = tmp

    if pixelvalues:
        #plot differrent pixel values
        plotPixelValues(d, ids)

    if gaussian:
        #fit simple Gaussians
        Gaussians = {}
        for f, im in d.iteritems():
            #horizontal direction
            sumH = np.sum(im, axis=0)
            Hfit = gaussianFit(sumH, initials=[np.max(sumH) - np.median(sumH), 8., 0.4, np.median(sumH)])
            plotLineFits(sumH, Hfit, f)

            #vertical direction
            sumV = np.sum(im, axis=1)
            Vfit = gaussianFit(sumV, initials=[np.max(sumV) - np.median(sumV), 8., 0.4, np.median(sumV)])
            plotLineFits(sumH, Hfit, f, horizontal=False)

            #2D gaussian
            tmp = im.copy() - np.median(im)
            twoD = fit.Gaussian2D(tmp, intials=[np.max(tmp), 7, 7, 0.4, 0.4])

            print f, Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3], int(np.max(im))
            Gaussians[f] = [Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3]]

        fileIO.cPickleDumpDictionary(Gaussians, 'SpotmeasurementsGaussian.pk')

        plotGaussianResults(Gaussians, ids, output='line')
        plotGaussianResults(Gaussians, ids, output='twoD', vals=[1, 3])

    if bessel:
        Gaussians = {}
        #Bessel + Gaussian
        hf = 8 * over
        for f, im in d.iteritems():
            #if '21_59_31s' not in f:
            #    continue

            #over sample the data, needed for convolution
            oversampled = ndimage.zoom(im.copy(), over, order=0)
            fileIO.writeFITS(oversampled, f+'block.fits', int=False)

            #find the centre in oversampled frame, needed for bessel and gives a starting point for fitting
            tmp = oversampled.copy() - np.median(oversampled)
            sh = shape.shapeMeasurement(tmp, log, **settings)
            results = sh.measureRefinedEllipticity()
            midx = results['centreX'] - 1.
            midy = results['centreY'] - 1.

            #generate 2D bessel and re-centre using the above centroid, normalize to the maximum image value and
            #save to a FITS file.
            bes = generateBessel(radius=0.45, oversample=over, size=16*over)
            shiftx = -midx + hf
            shifty = -midy + hf
            bes = ndimage.interpolation.shift(bes, [-shifty, -shiftx], order=0)
            bes /= np.max(bes)
            fileIO.writeFITS(bes, f+'bessel.fits', int=False)

            #check the residual with only the bessel and save to a FITS file
            t = ndimage.zoom(bes.copy(), 1./over, order=0)
            t /= np.max(t)
            fileIO.writeFITS(im.copy() - np.median(oversampled) - t*np.max(tmp), f+'residual.fits', int=False)
            fileIO.writeFITS(oversampled - bes.copy()*np.max(tmp), f+'residualOversampled.fits', int=False)

            #best guesses for fitting parameters
            params = [1., results['centreX'], results['centreY'], 0.5, 0.5]

            biassubtracted = im.copy() - np.median(oversampled)
            #error function is a convolution between a bessel function and 2D gaussian - data
            #note that the error function must be on low-res grid because it is the pixel values we try to match
            errfunc = lambda p: np.ravel(ndimage.zoom(signal.fftconvolve(fitf(*p)(*np.indices(tmp.shape)), bes.copy(), mode='same'), 1./over, order=0)*np.max(tmp) - biassubtracted.copy())

            #fit
            res = sp.optimize.leastsq(errfunc, params, full_output=True, maxfev=maxfev)

            #save the fitted residuals
            t = signal.fftconvolve(fitf(*res[0])(*np.indices(tmp.shape)), bes.copy(), mode='same')
            fileIO.writeFITS(res[2]['fvec'].reshape(im.shape), f+'residualFit.fits', int=False)
            fileIO.writeFITS(fitf(*res[0])(*np.indices(tmp.shape)), f+'gaussian.fits', int=False)
            fileIO.writeFITS(t, f+'BesselGausOversampled.fits', int=False)
            fileIO.writeFITS(ndimage.zoom(t, 1./over, order=0), f+'BesselGaus.fits', int=False)

            #print out the results and save to a dictionary
            print results['centreX'], results['centreY'], res[2]['nfev'], res[0]

            #sigmas are symmetric as the width of the fitting function is later squared...
            sigma1 = np.abs(res[0][3])
            sigma2 = np.abs(res[0][4])
            Gaussians[f] = [sigma1, sigma2]

        fileIO.cPickleDumpDictionary(Gaussians, 'SpotmeasurementsBesselGaussian.pk')

        #plot the findings
        plotGaussianResults(Gaussians, ids, output='Bessel', vals=[0, 1])
def testCTIcorrection(log, files, sigma=0.75, iterations=4, xcen=1900, ycen=1900, side=20):
    """
    Calculates PSF properties such as ellipticity and size from data without CTI and from
    CTI data.

    :param log: python logger instance
    :type log: instance
    :param files: a list of files to be processed
    :type files: list
    :param sigma: size of the Gaussian weighting function
    :type sigma: float
    :param iterations: the number of iterations for the moment based shape estimator
    :type iterations: int
    :param xcen: x-coordinate of the object centre
    :type xcen: int
    :param ycen: y-coordinate of the object centre
    :type ycen: int
    :param side: size of the cutout around the centre (+/- side)
    :type side: int

    :return: ellipticity and size
    :rtype: dict
    """
    settings = dict(sigma=sigma, iterations=iterations)

    eclean = []
    e1clean = []
    e2clean = []
    R2clean = []
    eCTI = []
    e1CTI = []
    e2CTI = []
    R2CTI = []
    for file in files:
        #load no cti data
        nocti = pf.getdata(file.replace('CTI', 'nocti'))[ycen-side:ycen+side, xcen-side:xcen+side]
        #subtract background
        nocti -= 27.765714285714285
        nocti[nocti < 0.] = 0.  #remove negative numbers

        #load CTI data
        CTI = pf.getdata(file)[ycen-side:ycen+side, xcen-side:xcen+side]
        CTI[CTI < 0.] = 0. #remove negative numbers

        sh = shape.shapeMeasurement(nocti, log, **settings)
        results = sh.measureRefinedEllipticity()

        eclean.append(results['ellipticity'])
        e1clean.append(results['e1'])
        e2clean.append(results['e2'])
        R2clean.append(results['R2'])

        sh = shape.shapeMeasurement(CTI, log, **settings)
        results = sh.measureRefinedEllipticity()

        eCTI.append(results['ellipticity'])
        e1CTI.append(results['e1'])
        e2CTI.append(results['e2'])
        R2CTI.append(results['R2'])

    results = {'eclean' : np.asarray(eclean),
               'e1clean' : np.asarray(e1clean),
               'e2clean' : np.asarray(e2clean),
               'R2clean' : np.asarray(R2clean),
               'eCTI' : np.asarray(eCTI),
               'e1CTI' : np.asarray(e1CTI),
               'e2CTI' : np.asarray(e2CTI),
               'R2CTI' : np.asarray(R2CTI)}

    #save to a file
    fileIO.cPickleDumpDictionary(results, 'results.pk')

    return results
Esempio n. 22
0
def forwardModelJointFit(files, out, wavelength, gain=3.1, size=10, burn=50, run=100,
                         spotx=2888, spoty=3514, simulated=False, truths=None):
    """
    Forward models the spot data found from the input files. Models all data simultaneously so that the Airy
    disc centroid and shift from file to file. Assumes that the spot intensity, focus, and the CCD PSF kernel
    are the same for each file. Can be used with simulated and real data.
    """
    print '\n\n\n'
    print '_'*120

    images = len(files)
    orig = []
    image = []
    noise = []
    peakvalues = []
    for file in files:
        print file
        #get data and convert to electrons
        o = pf.getdata(file)*gain

        if simulated:
            data = o
        else:
            #roughly the correct location - to avoid identifying e.g. cosmic rays
            data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy()

        #maximum position within the cutout
        y, x = m.maximum_position(data)

        #spot and the peak pixel within the spot, this is also the CCD kernel position
        spot = data[y-size:y+size+1, x-size:x+size+1].copy()
        orig.append(spot.copy())

        #bias estimate
        if simulated:
            bias = 9000.
            rn = 4.5
        else:
            bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20])
            rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20])

        print 'Readnoise (e):', rn
        if rn < 2. or rn > 6.:
            print 'NOTE: suspicious readout noise estimate...'
        print 'ADC offset (e):', bias

        #remove bias
        spot -= bias

        #set highly negative values to zero
        spot[spot + rn**2 < 0.] = 0.

        max = np.max(spot)
        print 'Maximum Value:', max
        peakvalues.append(max)

        #noise model
        variance = spot.copy() + rn**2

        #save to a list
        image.append(spot)
        noise.append(variance)

    #sensibility test, try to check if all the files in the fit are of the same dataset
    if np.std(peakvalues) > 5*np.sqrt(np.median(peakvalues)):
        #check for more than 5sigma outliers, however, this is very sensitive to the centroiding of the spot...
        print 'POTENTIAL OUTLIER, please check the input files...'
        print np.std(peakvalues), 5*np.sqrt(np.median(peakvalues))

    #MCMC based fitting
    ndim = 2*images + 5  #xpos, ypos for each image and single amplitude, radius, focus, and sigmaX and sigmaY
    nwalkers = 1000
    print 'Bayesian Fitting, model has %i dimensions' % ndim

    # Choose an initial set of positions for the walkers using the Gaussian fit
    p0 = np.zeros((nwalkers, ndim))
    for x in xrange(images):
        p0[:, 2*x] = np.random.uniform(7., 14., size=nwalkers)      # x
        p0[:, 2*x+1] = np.random.uniform(7., 14., size=nwalkers)    # y
    p0[:, -5] = np.random.uniform(max, 2.*max, size=nwalkers)       # amplitude
    p0[:, -4] = np.random.uniform(.1, 1., size=nwalkers)            # radius
    p0[:, -3] = np.random.uniform(.1, 1., size=nwalkers)            # focus
    p0[:, -2] = np.random.uniform(.1, 0.5, size=nwalkers)           # width_x
    p0[:, -1] = np.random.uniform(.1, 0.5, size=nwalkers)           # width_y

    # Initialize the sampler with the chosen specs.
    #Create the coordinates x and y
    x = np.arange(0, spot.shape[1])
    y = np.arange(0, spot.shape[0])
    #Put the coordinates in a mesh
    xx, yy = np.meshgrid(x, y)

    #Flatten the arrays
    xx = xx.flatten()
    yy = yy.flatten()

    #initiate sampler
    pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword
    sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posteriorJoint, args=[xx, yy, image, noise], pool=pool)

    # Run a burn-in and set new starting position
    print "Burning-in..."
    pos, prob, state = sampler.run_mcmc(p0, burn)
    best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]
    pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)
    # Reset the chain to remove the burn-in samples.
    sampler.reset()

    # Starting from the final position in the burn-in chain
    print "Running MCMC..."
    pos, prob, state = sampler.run_mcmc(pos, burn)
    sampler.reset()
    pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)

    # Print out the mean acceptance fraction
    print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)

    #Get the index with the highest probability
    maxprob_index = np.argmax(prob)

    #Get the best parameters and their respective errors and print best fits
    params_fit = pos[maxprob_index]
    errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]
    print params_fit

    #unpack the fixed parameters
    amplitude, radius, focus, width_x, width_y = params_fit[-5:]
    amplitudeE, radiusE, focusE, width_xE, width_yE = errors_fit[-5:]

    #print results
    _printFWHM(width_x, width_y, width_xE, width_yE)

    #save the best models per file
    size = size*2 + 1
    gofs = []
    for index, file in enumerate(files):
        #path, file = os.path.split(file)
        id = 'test/' + out + str(index)
        #X and Y are always in pairs
        center_x = params_fit[2*index]
        center_y = params_fit[2*index+1]

        #1)Generate a model Airy disc
        airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
        adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape((size, size))

        #2)Apply Focus
        f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
        focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape((size, size))
        model = signal.convolve2d(adata, focusdata, mode='same')

        #3)Apply CCD diffusion, approximated with a Gaussian
        CCD = models.Gaussian2D(1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.)
        CCDdata = CCD.eval(xx, yy, 1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.).reshape((size, size))
        model = signal.convolve2d(model, CCDdata, mode='same')

        #save the data, model and residuals
        fileIO.writeFITS(orig[index], id+'data.fits', int=False)
        fileIO.writeFITS(image[index], id+'datafit.fits', int=False)
        fileIO.writeFITS(model, id+'model.fits', int=False)
        fileIO.writeFITS(model - image[index], id+'residual.fits', int=False)
        fileIO.writeFITS(((model - image[index])**2 / noise[index]), id+'residualSQ.fits', int=False)

        #a simple goodness of fit
        gof = (1./(np.size(image[index])*images - ndim)) * np.sum((model - image[index])**2 / noise[index])
        print 'GoF:', gof, ' Max difference', np.max(np.abs(model - image[index]))
        gofs.append(gof)

    #save results
    res = dict(wx=width_x, wy=width_y, wxerr=width_xE, wyerr=width_yE, files=files, out=out,
               wavelength=wavelength, peakvalues=np.asarray(peakvalues), CCDmodel=CCD, CCDmodeldata=CCDdata,
               GoFs=gofs)
    fileIO.cPickleDumpDictionary(res, 'test/' + out + '.pkl')

    #plot
    samples = sampler.chain.reshape((-1, ndim))
    #extents = None
    #if simulated:
    #    extents = [(0.9*truth, 1.1*truth) for truth in truths]
    #    print extents
    fig = triangle.corner(samples, labels=['x', 'y']*images + ['amplitude', 'radius', 'focus', 'width_x', 'width_y'],
                          truths=truths)#, extents=extents)
    fig.savefig('test/' + out + 'Triangle.png')

    pool.close()
def useThibautsData(log, output, bcgr=72.2, sigma=0.75, iterations=4, loc=1900, galaxies=1000,
                    datadir='/Users/smn2/EUCLID/CTItesting/uniform/',
                    thibautCDM03=False, beta=False, serial=1, parallel=1):
    """
    Test the impact of CTI in case of no noise and no correction.

    :param log: logger instance
    :param bcgr: background in electrons for the CTI modelling
    :param sigma: size of the weighting function for the quadrupole moment
    :param iterations: number of iterations in the quadrupole moments estimation
    :param loc: location to which the galaxy will be placed [default=1900]
    :param galaxies: number of galaxies to use (< 10000)
    :param datadir: directory pointing to the galaxy images

    :return:
    """
    files = g.glob(datadir + '*.fits')
    #pick randomly
    files = np.random.choice(files, galaxies, replace=False)

    #trap parameters: parallel
    if thibautCDM03:
        f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_parallel.dat'
        f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_serial.dat'
        params = ThibautsCDM03params()
        params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))
    else:
        f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_parallel.dat'
        f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_serial.dat'
        params = MSSLCDM03params()
        params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))
        if beta:
            params.update(dict(beta_p=0.6, beta_s=0.6))

    print f1, f2

    #store shapes
    eclean = []
    e1clean = []
    e2clean = []
    R2clean = []
    xclean = []
    yclean = []
    eCTI = []
    e1CTI = []
    e2CTI = []
    R2CTI = []
    xCTI = []
    yCTI = []
    eCTIfixed = []
    e1CTIfixed = []
    e2CTIfixed = []
    R2CTIfixed = []
    xCTIfixed = []
    yCTIfixed = []

    fh = open(output.replace('.pk', '.csv'), 'w')
    fh.write('#files: %s and %s\n' % (f1, f2))
    for key in params:
        print key, params[key]
        fh.write('# %s = %s\n' % (key, str(params[key])))
    fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\n')
    for f in files:
        print 'Processing: ', f

        #load data
        nocti = pf.getdata(f)

        #scale to SNR about 10 (average galaxy, a single exposure)
        nocti /= np.sum(nocti)
        nocti *= 1500.

        #place it on canvas
        tmp = np.zeros((2066, 2048))
        ysize, xsize = nocti.shape
        ysize /= 2
        xsize /= 2
        tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] = nocti.copy()

        #add background
        tmp += bcgr

        #run CDM03
        c = CTI.CDM03bidir(params, [])
        tmp = c.applyRadiationDamage(tmp.copy().transpose()).transpose()

        #remove background and make a cutout
        CTIdata = tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize]
        CTIdata -= bcgr
        CTIdata[CTIdata < 0.] = 0.

        #write files
        #fileIO.writeFITS(nocti, f.replace('.fits', 'noCTI.fits'), int=False)
        #fileIO.writeFITS(CTI, f.replace('.fits', 'CTI.fits'), int=False)

        #reset settings
        settings = dict(sigma=sigma, iterations=iterations)

        #calculate shapes
        sh = shape.shapeMeasurement(nocti.copy(), log, **settings)
        results = sh.measureRefinedEllipticity()

        eclean.append(results['ellipticity'])
        e1clean.append(results['e1'])
        e2clean.append(results['e2'])
        R2clean.append(results['R2'])
        xclean.append(results['centreX'])
        yclean.append(results['centreY'])

        #CTI, fitted centroid
        sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)
        results2 = sh.measureRefinedEllipticity()

        eCTI.append(results2['ellipticity'])
        e1CTI.append(results2['e1'])
        e2CTI.append(results2['e2'])
        R2CTI.append(results2['R2'])
        xCTI.append(results2['centreX'])
        yCTI.append(results2['centreY'])

        #fixed centroid
        settings['fixedPosition'] = True
        settings['fixedX'] = results['centreX']
        settings['fixedY'] = results['centreY']
        settings['iterations'] = 1
        sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)
        results3 = sh.measureRefinedEllipticity()

        eCTIfixed.append(results3['ellipticity'])
        e1CTIfixed.append(results3['e1'])
        e2CTIfixed.append(results3['e2'])
        R2CTIfixed.append(results3['R2'])
        xCTIfixed.append(results3['centreX'])
        yCTIfixed.append(results3['centreY'])

        text = '%s,%e,%e,%e,%e,%e,%e\n' % (f, results['ellipticity'] - results2['ellipticity'],
                                           results['e1'] - results2['e1'], results['e2'] - results2['e2'],
                                           results['R2'] - results2['R2'],
                                           results['centreX'] - results2['centreX'],
                                           results['centreY'] - results2['centreY'])
        fh.write(text)
        print text

    fh.close()

    results = {'eclean': np.asarray(eclean),
               'e1clean': np.asarray(e1clean),
               'e2clean': np.asarray(e2clean),
               'R2clean': np.asarray(R2clean),
               'xclean': np.asarray(xclean),
               'yclean': np.asarray(yclean),
               'eCTI': np.asarray(eCTI),
               'e1CTI': np.asarray(e1CTI),
               'e2CTI': np.asarray(e2CTI),
               'R2CTI': np.asarray(R2CTI),
               'xCTI': np.asarray(xCTI),
               'yCTI': np.asarray(yCTI),
               'eCTIfixed': np.asarray(eCTIfixed),
               'e1CTIfixed': np.asarray(e1CTIfixed),
               'e2CTIfixed': np.asarray(e2CTIfixed),
               'R2CTIfixed': np.asarray(R2CTIfixed),
               'xCTIfixed': np.asarray(xCTIfixed),
               'yCTIfixed': np.asarray(yCTIfixed)}

    #save to a file
    fileIO.cPickleDumpDictionary(results, output)

    return results
    plt.close()

    #eR2
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_color_cycle(['r', 'k', 'm', 'c', 'g'])

    for i in sorted(set(res)):
        msk = res == i
        ax.plot(xpos[msk]-np.round(xpos[msk], decimals=0), R2[msk],
                marker=marker.next(), linestyle='', label='Sampling=%i' % i)

    ax.set_xlim(-0.6, 0.6)
    ax.set_xlabel('X position')
    ax.set_ylabel(r'$R^{2}$')

    plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8, loc='best')
    plt.savefig('R2.pdf')
    plt.close()


if __name__ == "__main__":
    log = lg.setUpLogger('resolutionTesting.log')

    #calculate, save and load results
    res = calculateShapes(log, glob.glob('Q0*stars*x.fits'), 'test.dat')
    fileIO.cPickleDumpDictionary(res, 'results.pk')
    res = cPickle.load(open('results.pk'))

    #plot results
    plotResults(res)

if __name__ == '__main__':
    run = True
    debug = False
    plots = True
    error = False

    #start the script
    log = lg.setUpLogger('flatfieldCalibration.log')
    log.info('Testing flat fielding calibration...')

    if error:
        res = findTolerableError(log)

        fileIO.cPickleDumpDictionary(res, 'errors/residuals.pk')
        res = cPickle.load(open('errors/residuals.pk'))

        plotTolerableErrorE(res, output='errors/FlatFieldingTolerableErrorE.pdf')
        plotTolerableErrorR2(res, output='errors/FlatFieldingTolerableErrorR2.pdf')

    if run:
        results = testFlatCalibration(log, flats=np.arange(5, 100, 9))
        fileIO.cPickleDumpDictionary(results, 'flatfieldResults.pk')

    if debug:
        #calculate RMS on image with x frames combined together
        combined = generateResidualFlatField(combine=30, plots=True, debug=True)
        print np.std(combined), np.std(combined[500:561, 500:561]), np.std(combined[300:361, 300:361])

        results = testNoFlatfieldingEffects(log, oversample=4.0, file='data/psf4x.fits', psfs=400)
Esempio n. 26
0
    multi = False
    plot = True
    debug = False
    file = 'CosmicrayResults.pk'

    #start a logger
    log = lg.setUpLogger('CosmicrayRejection.log')
    log.info('Testing Cosmic Ray Rejection...')

    if debug:
        test(log)

    if run:
        if multi:
            resM = testCosmicrayRejectionMultiPSF(log, stars=2000, psfs=500)
            fileIO.cPickleDumpDictionary(resM,
                                         file.replace('.pk', 'Multi2000.pk'))

        res = testCosmicrayRejection(log)
        fileIO.cPickleDumpDictionary(res, file)

    if plot:
        if not run:
            if multi:
                resM = cPickle.load(open(file.replace('.pk', 'Multi2000.pk')))
            res = cPickle.load(open(file))

        plotResults(res, outdir='results')
        plotResults(resM, outdir='resultsMulti')

    log.info('Run finished...\n\n\n')
    plt.savefig('deltasize%s.pdf' % output)
    plt.close()


if __name__ == "__main__":
    log = lg.setUpLogger('centroidTesting.log')

    #res = testCentroidingImpact(log)
    #fileIO.cPickleDumpDictionary(res, 'centroidTesting.pk')
    #res = cPickle.load(open('centroidTesting.pk'))
    #plotCentroids(res)

    #PSF
    print 'Real PSF'
    xres, yres = testCentroidingImpactSingleDirection(log)
    fileIO.cPickleDumpDictionary(xres, 'centroidTestingX.pk')
    fileIO.cPickleDumpDictionary(yres, 'centroidTestingY.pk')
    xres = cPickle.load(open('centroidTestingX.pk'))
    yres = cPickle.load(open('centroidTestingY.pk'))
    plotCentroidsSingle(xres)
    plotCentroidsSingle(yres, output='Y')

    # #PSF interpolated
    # print 'Real PSF, interpolated'
    # xres, yres = testCentroidingImpactSingleDirection(log, interpolation=True)
    # fileIO.cPickleDumpDictionary(xres, 'centroidTestingXinterpolated.pk')
    # fileIO.cPickleDumpDictionary(yres, 'centroidTestingYinterpolated.pk')
    # xres = cPickle.load(open('centroidTestingXinterpolated.pk'))
    # yres = cPickle.load(open('centroidTestingYinterpolated.pk'))
    # plotCentroidsSingle(xres, output='Xinterpolated')
    # plotCentroidsSingle(yres, output='Yinterpolated')