def encircledEnergy(file='data/psf12x.fits'):
    """
    Calculates the encircled energy from a PSF.
    The default input PSF is 12 times over-sampled with 1 micron pixel.
    """
    #start the script
    log = lg.setUpLogger('PSFencircledEnergy.log')
    log.info('Reading data from %s' % file)

    data = readData(file)
    total = np.sum(data)

    #assume that centre is the same as the peak pixel (zero indexed)
    y, x = np.indices(data.shape)
    ycen, xcen = ndimage.measurements.maximum_position(data)
    log.info('Centre assumed to be (x, y) = (%i, %i)' % (xcen, ycen))

    #change the peak to be 0, 0 and calculate radius
    x -= xcen
    y -= ycen
    radius = np.sqrt(x**2 + y**2)

    #calculate flux in different apertures
    rads = np.arange(12, 600, 12)
    energy = []
    for radlimit in rads:
        mask = radius < radlimit
        energy.append(data[np.where(mask)].sum() / total)
    energy = np.asarray(energy)

    plotEncircledEnergy(rads, energy)
    log.info('Run finished...\n\n\n')
def FoVanalysis(run=True, outfile='PSFdata.pk'):
    #start the script
    log = lg.setUpLogger('PSFproperties.log')

    #derive results for each file
    if run:
        log.info('Deriving PSF properties...')

        #find files
        fls = glob.glob('/Volumes/disk_xray10/smn2/euclid/PSFs/detector_jitter-1_TOL05_MC_T0133_Nim=*.fits')

        txt = 'Processing %i files...' % (len(fls))
        print txt
        log.info(txt)

        filedata = {}
        for file in fls:
            data = readData(file)
            info = parseName(file)
            values = measureChars(data, info, log)
            filedata[file] = dict(info=info, values=values)
            txt = 'File %s processed...' % file
            print txt
            log.info(txt)

        #save data
        fileIO.cPickleDumpDictionary(filedata, outfile)
    else:
        filedata = cPickle.load(open(outfile))

    #generate plots
    generatePlots(filedata)

    log.info('Run finished...\n\n\n')
Beispiel #3
0
def encircledEnergy(file='data/psf12x.fits'):
    """
    Calculates the encircled energy from a PSF.
    The default input PSF is 12 times over-sampled with 1 micron pixel.
    """
    #start the script
    log = lg.setUpLogger('PSFencircledEnergy.log')
    log.info('Reading data from %s' % file)

    data = readData(file)
    total = np.sum(data)

    #assume that centre is the same as the peak pixel (zero indexed)
    y, x = np.indices(data.shape)
    ycen, xcen = ndimage.measurements.maximum_position(data)
    log.info('Centre assumed to be (x, y) = (%i, %i)' % (xcen, ycen))

    #change the peak to be 0, 0 and calculate radius
    x -= xcen
    y -= ycen
    radius = np.sqrt(x**2 + y**2)

    #calculate flux in different apertures
    rads = np.arange(12, 600, 12)
    energy = []
    for radlimit in rads:
        mask = radius < radlimit
        energy.append(data[np.where(mask)].sum() / total)
    energy = np.asarray(energy)

    plotEncircledEnergy(rads, energy)
    log.info('Run finished...\n\n\n')
def shapeComparisonToAST(oversample=3.):
    """
    To calculate shapes from AST PSFs.

    One of the actions from the PLM-SRR was 8941 (RID No: ENG-219), with the
    following wording:
    ASFT shall provide to the VIS team a PSF profile with associated R2
    with the sampling set to 4 microns and the VIS team will check that when
    applying the R2 processing the result is identical, to double check that
    the process is correct.
    """
    log = lg.setUpLogger('delete.log')

    files = glob.glob('*.fits')
    files = sorted(files)

    for file in files:
        data = pf.getdata(file)

        settings = dict(sampling=1.0/oversample, itereations=20)
        sh = shape.shapeMeasurement(data, log, **settings)
        reference = sh.measureRefinedEllipticity()

        R2 = reference['R2']  #in pixels
        R2a = reference['R2arcsec']

        print file, R2, R2a
Beispiel #5
0
def testFiles():
    #testing part, looks for blob?.fits and psf.fits to derive centroids and ellipticity
    import pyfits as pf
    import glob as g
    from support import logger as lg
    import sys

    files = g.glob('blob?.fits')

    log = lg.setUpLogger('shape.log')
    log.info('Testing shape measuring class...')

    for file in files:
        log.info('Processing file %s' % file)
        data = pf.getdata(file)
        sh = shape.shapeMeasurement(data, log)
        results = sh.measureRefinedEllipticity()
        sh.writeFITS(results['GaussianWeighted'],
                     file.replace('.fits', 'Gweighted.fits'))

        print file
        pprint.pprint(results)
        print

    file = 'psf1x.fits'
    log.info('Processing file %s' % file)
    data = pf.getdata(file)
    sh = shape.shapeMeasurement(data, log)
    results = sh.measureRefinedEllipticity()
    sh.writeFITS(results['GaussianWeighted'],
                 file.replace('.fits', 'Gweighted.fits'))
    print file
    pprint.pprint(results)
    print

    file = 'stamp.fits'
    log.info('Processing file %s' % file)
    data = pf.getdata(file)
    settings = dict(sigma=10.0)
    sh = shape.shapeMeasurement(data, log, **settings)
    results = sh.measureRefinedEllipticity()
    sh.writeFITS(results['GaussianWeighted'],
                 file.replace('.fits', 'Gweighted.fits'))
    print file
    pprint.pprint(results)
    print

    file = 'gaussian.fits'
    log.info('Processing file %s' % file)
    data = pf.getdata(file)
    settings = dict(sampling=0.2)
    sh = shape.shapeMeasurement(data, log, **settings)
    results = sh.measureRefinedEllipticity()
    sh.writeFITS(results['GaussianWeighted'],
                 file.replace('.fits', 'Gweighted.fits'))
    print file
    pprint.pprint(results)
    print

    log.info('All done\n\n')
Beispiel #6
0
    def setUp(self):
        from support import logger as lg
        self.log = lg.setUpLogger('shapeTesting.log')

        self.psffile12x = '../data/psf12x.fits'
        self.psffile = '../data/psf1x.fits'
        self.tolerance = 1.e-7
        self.sigma = 40.0
        self.sigmax = 67.25
        self.sigmay = 24.15
        self.sigmax2 = 77.12343
        self.sigmay2 = 42.34543
        self.xcent = 500.
        self.ycent = 500.

        #create 2D Gaussians that will be used for testing
        self.GaussianCirc = shapeMeasurement(np.zeros(
            (1000,
             1000)), self.log).circular2DGaussian(self.xcent, self.ycent,
                                                  self.sigma)['Gaussian']
        self.Gaussian = shapeMeasurement(np.zeros(
            (1000, 1000)), self.log).Gaussian2D(self.xcent, self.ycent,
                                                self.sigmax,
                                                self.sigmay)['Gaussian']
        self.Gaussian2 = shapeMeasurement(np.zeros(
            (1000, 1000)), self.log).Gaussian2D(self.xcent, self.ycent,
                                                self.sigmax2,
                                                self.sigmay2)['Gaussian']
def testGaussian():
    from support import gaussians

    log = lg.setUpLogger('delete.me')

    data = gaussians.Gaussian2D(100, 100, 200, 200, 20, 20)['Gaussian']
    data /= np.max(data)
    data *= 2.e5

    #measure shape
    sh = shape.shapeMeasurement(data, log)
    reference = sh.measureRefinedEllipticity()
    print reference

    #non-linearity shape
    newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal(data, 0.2)
    newdata[newdata < 0.] = 0.

    sh = shape.shapeMeasurement(newdata, log)
    nonlin = sh.measureRefinedEllipticity()
    print nonlin

    print reference['ellipticity'] - nonlin['ellipticity']
    print reference['e1'] - nonlin['e1']
    print reference['e2'] - nonlin['e2']
    print reference['R2'] - nonlin['R2']
Beispiel #8
0
def shapeComparisonToAST(oversample=3.):
    """
    To calculate shapes from AST PSFs.

    One of the actions from the PLM-SRR was 8941 (RID No: ENG-219), with the
    following wording:
    ASFT shall provide to the VIS team a PSF profile with associated R2
    with the sampling set to 4 microns and the VIS team will check that when
    applying the R2 processing the result is identical, to double check that
    the process is correct.
    """
    log = lg.setUpLogger('delete.log')

    files = glob.glob('*.fits')
    files = sorted(files)

    for file in files:
        data = pf.getdata(file)

        settings = dict(sampling=1.0 / oversample, itereations=20)
        sh = shape.shapeMeasurement(data, log, **settings)
        reference = sh.measureRefinedEllipticity()

        R2 = reference['R2']  #in pixels
        R2a = reference['R2arcsec']

        print file, R2, R2a
def testFiles():
    #testing part, looks for blob?.fits and psf.fits to derive centroids and ellipticity
    import pyfits as pf
    import glob as g
    from support import logger as lg
    import sys

    files = g.glob('blob?.fits')

    log = lg.setUpLogger('shape.log')
    log.info('Testing shape measuring class...')

    for file in files:
        log.info('Processing file %s' % file)
        data = pf.getdata(file)
        sh = shape.shapeMeasurement(data, log)
        results = sh.measureRefinedEllipticity()
        sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))

        print file
        pprint.pprint(results)
        print

    file = 'psf1x.fits'
    log.info('Processing file %s' % file)
    data = pf.getdata(file)
    sh = shape.shapeMeasurement(data, log)
    results = sh.measureRefinedEllipticity()
    sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
    print file
    pprint.pprint(results)
    print

    file = 'stamp.fits'
    log.info('Processing file %s' % file)
    data = pf.getdata(file)
    settings = dict(sigma=10.0)
    sh = shape.shapeMeasurement(data, log, **settings)
    results = sh.measureRefinedEllipticity()
    sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
    print file
    pprint.pprint(results)
    print

    file = 'gaussian.fits'
    log.info('Processing file %s' % file)
    data = pf.getdata(file)
    settings = dict(sampling=0.2)
    sh = shape.shapeMeasurement(data, log, **settings)
    results = sh.measureRefinedEllipticity()
    sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
    print file
    pprint.pprint(results)
    print

    log.info('All done\n\n')
def runAll(deriveCDF=True, examplePlot=True):
    """
    Run all steps from finding suitable Gaia BAM files to analysing them.
    
    :return: None
    """
    log = lg.setUpLogger('analyse.log')
    log.info('\n\nStarting to analyse')

    if deriveCDF: deriveCumulativeFunctionsforBinning()
    if examplePlot: generateBAMdatagridImage()

    files = findFiles(log)
    data, info = readData(log, files)
    data = preProcessData(log, data, info)
    analyseData(log, files, data, info)
Beispiel #11
0
def runAll(deriveCDF=True, examplePlot=True):
    """
    Run all steps from finding suitable Gaia BAM files to analysing them.
    
    :return: None
    """
    log = lg.setUpLogger('analyse.log')
    log.info('\n\nStarting to analyse')

    if deriveCDF: deriveCumulativeFunctionsforBinning()
    if examplePlot: generateBAMdatagridImage()

    files = findFiles(log)
    data, info = readData(log, files)
    data = preProcessData(log, data, info)
    analyseData(log, files, data, info)
Beispiel #12
0
def peakFraction(file='data/psf12x.fits', radius=0.65, oversample=12):
    """
    Calculates the fraction of energy in the peak pixel for a given PSF compared
    to an aperture of a given radius.
    """
    #start the script
    log = lg.setUpLogger('PSFpeakFraction.log')
    log.info('Reading data from %s' % file)

    #read data
    data = readData(file)

    #assume that centre is the same as the peak pixel (zero indexed)
    y, x = np.indices(data.shape)
    ycen, xcen = ndimage.measurements.maximum_position(data)
    log.info('Centre assumed to be (x, y) = (%i, %i)' % (xcen, ycen))

    #change the peak to be 0, 0 and calculate radius
    x -= xcen
    y -= ycen
    rad = np.sqrt(x**2 + y**2)

    #calculate flux in the apertures
    mask = rad < (radius * oversample * 10)
    energy = data[np.where(mask)].sum()

    #calculat the flux in the peak pixel
    if oversample > 1:
        shift = oversample / 2
        peak = data[ycen - shift:ycen + shift + 1,
                    xcen - shift:xcen + shift + 1].sum()
    else:
        peak = data[ycen, xcen]

    print peak / energy

    log.info('Run finished...\n\n\n')
Beispiel #13
0
def FoVanalysis(run=True, outfile='PSFdata.pk'):
    #start the script
    log = lg.setUpLogger('PSFproperties.log')

    #derive results for each file
    if run:
        log.info('Deriving PSF properties...')

        #find files
        fls = glob.glob(
            '/Volumes/disk_xray10/smn2/euclid/PSFs/detector_jitter-1_TOL05_MC_T0133_Nim=*.fits'
        )

        txt = 'Processing %i files...' % (len(fls))
        print txt
        log.info(txt)

        filedata = {}
        for file in fls:
            data = readData(file)
            info = parseName(file)
            values = measureChars(data, info, log)
            filedata[file] = dict(info=info, values=values)
            txt = 'File %s processed...' % file
            print txt
            log.info(txt)

        #save data
        fileIO.cPickleDumpDictionary(filedata, outfile)
    else:
        filedata = cPickle.load(open(outfile))

    #generate plots
    generatePlots(filedata)

    log.info('Run finished...\n\n\n')
def peakFraction(file='data/psf12x.fits', radius=0.65, oversample=12):
    """
    Calculates the fraction of energy in the peak pixel for a given PSF compared
    to an aperture of a given radius.
    """
    #start the script
    log = lg.setUpLogger('PSFpeakFraction.log')
    log.info('Reading data from %s' % file)

    #read data
    data = readData(file)

    #assume that centre is the same as the peak pixel (zero indexed)
    y, x = np.indices(data.shape)
    ycen, xcen = ndimage.measurements.maximum_position(data)
    log.info('Centre assumed to be (x, y) = (%i, %i)' % (xcen, ycen))

    #change the peak to be 0, 0 and calculate radius
    x -= xcen
    y -= ycen
    rad = np.sqrt(x**2 + y**2)

    #calculate flux in the apertures
    mask = rad < (radius * oversample  * 10)
    energy = data[np.where(mask)].sum()

    #calculat the flux in the peak pixel
    if oversample > 1:
        shift = oversample / 2
        peak = data[ycen-shift:ycen+shift+1, xcen-shift:xcen+shift+1].sum()
    else:
        peak = data[ycen, xcen]

    print peak / energy

    log.info('Run finished...\n\n\n')
Beispiel #15
0
            os.remove(output)

        #create a new FITS file, using HDUList instance
        ofd = pf.HDUList(pf.PrimaryHDU())

        #new image HDU
        hdu = pf.ImageHDU(data=data)

        #add info
        for key, value in self.settings.iteritems():
            hdu.header.update(key.upper(), value)

        hdu.header.add_history('If questions, please contact Sami-Matias Niemi (smn2 at mssl.ucl.ac.uk).')
        hdu.header.add_history('This file has been created with the VISsim Python Package at %s' % datetime.datetime.isoformat(datetime.datetime.now()))
        hdu.verify('fix')

        ofd.append(hdu)

        #write the actual file
        ofd.writeto(output)


if __name__ == '__main__':
    log = lg.setUpLogger('generateFlat.log')

    settings = dict(sigma=0.01)
    flat = flatField(log, **settings)
    data = flat.generateFlat()
    flat.writeFITS(data, 'VISFlatField1percent.fits')

    log.info('Run finished...\n\n\n')
Beispiel #16
0
def plotfile(filename='/Users/sammy/EUCLID/vissim-python/data/psf1x.fits',
             sigma=0.75,
             iterations=4,
             out='test.pdf',
             scale=False,
             log=False,
             zoom=30):
    """
    Calculate ellipticity from a given input file using quadrupole moments and
    plot the data.
    """
    settings = dict(sigma=sigma, iterations=iterations)

    l = lg.setUpLogger('CTItesting.log')

    data = pf.getdata(filename)
    if scale:
        data /= np.max(data)
        data *= 1.e5

    sh = shape.shapeMeasurement(data, l, **settings)
    results = sh.measureRefinedEllipticity()

    fig, axarr = plt.subplots(1, 2, sharey=True)
    ax1 = axarr[0]
    ax2 = axarr[1]
    fig.subplots_adjust(wspace=0)

    if log:
        ax1.set_title(r'$\log_{10}$(Image)')
        ax2.set_title(r'$\log_{10}$(Gaussian Weighted)')
    else:
        ax1.set_title(r'Image')
        ax2.set_title(r'Gaussian Weighted')

    #no ticks on the right hand side plot
    plt.setp(ax2.get_yticklabels(), visible=False)

    if log:
        im1 = ax1.imshow(np.log10(data), origin='lower')
        im2 = ax2.imshow(np.log10(results['GaussianWeighted']), origin='lower')
    else:
        im1 = ax1.imshow(data, origin='lower')
        im2 = ax2.imshow(results['GaussianWeighted'], origin='lower')

    ang = 0.5 * np.arctan(results['e2'] / results['e1'])

    e = Ellipse(xy=(results['centreX'] - 1, results['centreY'] - 1),
                width=results['a'],
                height=results['b'],
                angle=ang,
                facecolor='none',
                ec='k',
                lw=2)
    fig.gca().add_artist(e)

    if zoom is not None:
        ax1.set_xlim(results['centreX'] - zoom - 1, results['centreX'] + zoom)
        ax2.set_xlim(results['centreX'] - zoom - 1, results['centreX'] + zoom)
        ax1.set_ylim(results['centreY'] - zoom - 1, results['centreY'] + zoom)
        ax2.set_ylim(results['centreY'] - zoom - 1, results['centreY'] + zoom)

    plt.savefig(out)
    plt.close()
    res = ghostContribution(log, centered=True)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStarCentered.pk')
    res = cPickle.load(open('ghostContributionToStarCentered.pk'))
    plotGhostContribution(res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source', 'shapeBiasCentred.pdf',
                          r'Size Bias: 24.5 mag$_{AB}$ Point Source', 'sizeBiasCentred.pdf')


if __name__ == '__main__':
    run = False
    debug = False
    focus = False
    star = False

    #start the script
    log = lg.setUpLogger('ghosts.log')
    log.info('Analysing the impact of ghost images...')

    #imapact on object detection
    log.info('Calculating the effect on object detection...')
    #objectDetection(log)
    #objectDetection(log, ghostlevels=(5.e-5, 5.e-7))
    objectDetection(log, covering=11500, ghostlevels=(5.e-5, 4.e-6, 5.e-6, 1.e-6, 6.e-7))

    #impact on shape measurement
    #log.info('Calculatsing the oeffect on shape measurements...')
    #shapeMeasurement(log)

    if debug:
         #out of focus ghosts
         res = analyseOutofFocusImpact(log, filename='data/psf1x.fits', maxdistance=100, samples=7,
    fileIO.writeFITS(rs, 'multitrap/ctiMSSLdivTs.fits', int=False)
    fileIO.writeFITS(rps, 'multitrap/ctiMSSLdivTps.fits', int=False)
    print 'Parallel Ratio [max, min]:', rp.max(), rp.min()
    print 'Serial Ratio [max, min]:', rs.max(), rs.min()
    print 'Serial+Parallel Ratio [max, min]:', rps.max(), rps.min()

    print 'Checking arrays, parallel'
    np.testing.assert_array_almost_equal(ctiMSSLp, Tp, decimal=6, err_msg='', verbose=True)
    print 'Checking arrays, serial'
    np.testing.assert_array_almost_equal(ctiMSSLs, Ts, decimal=6, err_msg='', verbose=True)
    print 'Checking arrays, serial + parallel'
    np.testing.assert_array_almost_equal(ctiMSSLps, Tps, decimal=6, err_msg='', verbose=True)


if __name__ == '__main__':
    log = lg.setUpLogger('CTItesting.log')

    #testShapeMeasurementAlgorithms(log)
    #testShapeMeasurementAlgorithms(log, iterations=10)
    #testShapeMeasurementAlgorithms(log, iterations=500)
    #testShapeMeasurementAlgorithms(log, iterations=1, fixedPosition=True, fixedX=83.829, fixedY=84.504, sigma=0.1)

    #testCTIinclusion(log)

    #when running these, remember to change CTI.py to include import cdm03bidirTest as cdm03bidir
    try:
        print '\n\n\nSingle Trap Species'
        fullQuadrantTestSingleTrapSpecies()
    except AssertionError:
        _,_,tb = sys.exc_info()
        traceback.print_tb(tb) # Fixed format
        sys.exit(8)

    #FITS extension
    if opts.ext is None:
        ext = 0
    else:
        ext = opts.ext

    #name of the output file
    if opts.output is None:
        output = 'VISFPA.fits'
    else:
        output = opts.output

    #logger
    log = lg.setUpLogger('tileFPA.log')

    #look for files
    files = g.glob(opts.files)
    files.sort()
    if len(files) / 36. > 1.0 or len(files) == 0:
        print 'Detected %i input files, but the current version does not support anything but tiling 36 CCDs...' % len(files)
        sys.exit(9)

    #write to the log what files were used
    log.info('Input files:')
    for file in files:
        log.info(file)

    #intputs
    inputs = dict(files=files, ext=ext, output=output)
    fileIO.cPickleDumpDictionary(res, 'ghostContributionToStarCentered.pk')
    res = cPickle.load(open('ghostContributionToStarCentered.pk'))
    plotGhostContribution(res, r'Shape Bias: 24.5 mag$_{AB}$ Point Source',
                          'shapeBiasCentred.pdf',
                          r'Size Bias: 24.5 mag$_{AB}$ Point Source',
                          'sizeBiasCentred.pdf')


if __name__ == '__main__':
    run = False
    debug = False
    focus = False
    star = False

    #start the script
    log = lg.setUpLogger('ghosts.log')
    log.info('Analysing the impact of ghost images...')

    #imapact on object detection
    log.info('Calculating the effect on object detection...')
    #objectDetection(log)
    #objectDetection(log, ghostlevels=(5.e-5, 5.e-7))
    objectDetection(log,
                    covering=11500,
                    ghostlevels=(5.e-5, 4.e-6, 5.e-6, 1.e-6, 6.e-7))

    #impact on shape measurement
    #log.info('Calculatsing the oeffect on shape measurements...')
    #shapeMeasurement(log)

    if debug:
Beispiel #21
0
                   numpoints=1,
                   scatterpoints=1,
                   markerscale=1.8)
        plt.savefig(outdir + '/CosmicrayDeltaSize%i.pdf' % i)
        plt.close()


if __name__ == '__main__':
    run = True
    multi = False
    plot = True
    debug = False
    file = 'CosmicrayResults.pk'

    #start a logger
    log = lg.setUpLogger('CosmicrayRejection.log')
    log.info('Testing Cosmic Ray Rejection...')

    if debug:
        test(log)

    if run:
        if multi:
            resM = testCosmicrayRejectionMultiPSF(log, stars=2000, psfs=500)
            fileIO.cPickleDumpDictionary(resM,
                                         file.replace('.pk', 'Multi2000.pk'))

        res = testCosmicrayRejection(log)
        fileIO.cPickleDumpDictionary(res, file)

    if plot:
            plt.text(0.83, 1.12, txt, ha="left", va="top", fontsize=9, transform=ax.transAxes, alpha=0.2)

        plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8)
        plt.savefig(outdir + "/CosmicrayDeltaSize%i.pdf" % i)
        plt.close()


if __name__ == "__main__":
    run = True
    multi = False
    plot = True
    debug = False
    file = "CosmicrayResults.pk"

    # start a logger
    log = lg.setUpLogger("CosmicrayRejection.log")
    log.info("Testing Cosmic Ray Rejection...")

    if debug:
        test(log)

    if run:
        if multi:
            resM = testCosmicrayRejectionMultiPSF(log, stars=2000, psfs=500)
            fileIO.cPickleDumpDictionary(resM, file.replace(".pk", "Multi2000.pk"))

        res = testCosmicrayRejection(log)
        fileIO.cPickleDumpDictionary(res, file)

    if plot:
        if not run:
        sys.exit(8)

    # FITS extension
    if opts.ext is None:
        ext = 0
    else:
        ext = opts.ext

    # name of the output file
    if opts.output is None:
        output = "VISCCD.fits"
    else:
        output = opts.output

    # logger
    log = lg.setUpLogger("tileCCDs.log")

    # look for files
    files = g.glob(opts.files)
    files.sort()
    if len(files) / 4.0 > 1.0 or len(files) == 0:
        print "Detected %i input files, but the current version does not support anything but tiling four files..." % len(
            files
        )
        sys.exit(9)

    # write to the log what files were used
    log.info("Input files:")
    for file in files:
        log.info(file)
    plt.text(0.1, 0.85, r'$\sigma=$'+str(fits['sigma']), va='top',  transform=ax.transAxes, fontsize=11)
    plt.text(0.1, 0.80, r'$\tau_{r}=$'+str(fits['taur']), va='top',  transform=ax.transAxes, fontsize=11)
    plt.semilogy(prof[xstart:xstart+len+3], 'rD-', label='Fitted')
    plt.semilogy(np.arange(len)+3, values[:len], 'bo', label='Data')
    plt.xlabel('Pixels')
    plt.ylabel('electrons')
    plt.legend(loc='best')
    plt.savefig(output, numpoints=1)
    plt.close()


if __name__ == '__main__':
    electrons = 43500.

    #set up logger
    log = lg.setUpLogger('fitting.log')

    #input measurement values
    datafolder = '/Users/smn2/EUCLID/CTItesting/data/'
    gain1 = 1.17

    tmp = np.loadtxt(datafolder + 'CCD204_05325-03-02_Hopkinson_EPER_data_200kHz_one-output-mode_1.6e10-50MeV.txt',
                     usecols=(0, 6)) #6 = 152.55K
    ind = tmp[:, 0]
    values = tmp[:, 1]
    values = values[ind > 0.]
    values *= gain1
    vals = np.ones((2066, 1)) * 4.0
    ln = len(values)
    vals[1063:1063+ln, 0] = values
    vals[1075:, 0] = 3
        de.append(results['ellipticity'] - reference['ellipticity'])
        dR2.append(results['R2'] - reference['R2'])

    out[1] = [e1, e2, e, R2, de1, de2, de, dR2]

    return out, reference


if __name__ == '__main__':
    run = True
    debug = False
    plots = True
    error = False

    #start the script
    log = lg.setUpLogger('flatfieldCalibration.log')
    log.info('Testing flat fielding calibration...')

    if error:
        res = findTolerableError(log)

        fileIO.cPickleDumpDictionary(res, 'errors/residuals.pk')
        res = cPickle.load(open('errors/residuals.pk'))

        plotTolerableErrorE(res, output='errors/FlatFieldingTolerableErrorE.pdf')
        plotTolerableErrorR2(res, output='errors/FlatFieldingTolerableErrorR2.pdf')

    if run:
        results = testFlatCalibration(log, flats=np.arange(5, 100, 9))
        fileIO.cPickleDumpDictionary(results, 'flatfieldResults.pk')
    fileIO.writeFITS(ctiMSSL, 'tmp1.fits', int=False)
    fileIO.writeFITS(ctiThibault, 'tmp2.fits', int=False)
    fileIO.writeFITS(wcti/ctiMSSL, 'tmp3.fits', int=False)

    for key in wctiresults:
        tmp1 = wctiresults[key] - wMSSLctiresults[key]
        tmp2 = wctiresults[key] - wThibautctiresults[key]
        if 'Gaussian' in key:
            print key, np.max(np.abs(tmp1)), np.max(np.abs(tmp2))
        else:
            print key, tmp1, tmp2


if __name__ == '__main__':
    log = lg.setUpLogger('CTItesting.log')

    #simple test with Thibaut's files
    simpleTest(log)

    #use Thibaut's input galaxies
    galaxies = 800
    #thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDataP.pk', galaxies=galaxies, serial=-1)
    #thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDatab6P.pk', beta=True, galaxies=galaxies, serial=-1)
    #thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDataThibautsCDM03P.pk', thibautCDM03=True, galaxies=galaxies, serial=-1)
    #plotResultsNoNoise('resultsNoNoiseThibautsDataP.pk', 'MSSL CDM03 Parameters (beta=0.29, 0.12) (parallel only)')
    #plotResultsNoNoise('resultsNoNoiseThibautsDatab6P.pk', 'MSSL CDM03 Parameters (beta=0.6, 0.6) (parallel only)')
    #plotResultsNoNoise('resultsNoNoiseThibautsDataThibautsCDM03P.pk', 'Thibaut CDM03 Parameters (parallel only)')
    #thibaut = useThibautsData(log, 'resultsNoNoiseThibautsData.pk', galaxies=galaxies)
    #thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDatab6.pk', beta=True, galaxies=galaxies)
    #thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDataThibautsCDM03.pk', thibautCDM03=True, galaxies=galaxies)
Beispiel #27
0
        fh.close()

    def runAll(self, nostars=True):
        """
        Run all methods sequentially.
        """
        if nostars:
            self.createStarlist()
            self.addObjects(inputlist='stars.dat')
        self.createGalaxylist()
        self.addObjects()
        self.maskCrazyValues()


if __name__ == '__main__':
    log = lg.setUpLogger('generateGalaxies.log')
    log.info('Starting to create fake galaxies')

    fakedata = generateFakeData(log)
    fakedata.runAll()

    #no noise or background
    settings = dict(rdnoise=0.0,
                    background=0.0,
                    output='nonoise.fits',
                    poisson=iraf.no)
    fakedata = generateFakeData(log, **settings)
    fakedata.runAll()

    #postage stamp galaxy
    settings = dict(rdnoise=0.0,
Beispiel #28
0
def analyseSpotsFitting(files,
                        gaussian=False,
                        pixelvalues=False,
                        bessel=True,
                        maxfev=10000):
    """
    Analyse spot measurements using different fitting methods.

    :param files: names of the FITS files to analyse (should match the IDs)
    :param gaussian: whether or not to do a simple Gaussian fitting analysis
    :param pixelvalues: whether or not to plot pixel values on a grid
    :param bessel: whether or not to do a Bessel + Gaussian convolution analysis
    :param maxfev: maximum number of iterations in the least squares fitting

    :return: None
    """
    log = lg.setUpLogger('spots.log')
    log.info('Starting...')
    over = 24
    settings = dict(itereations=8)
    ids = fileIDs()

    d = {}
    for filename in files:
        tmp = readData(filename, crop=False)
        f = filename.replace('small.fits', '')
        d[f] = tmp

    if pixelvalues:
        #plot differrent pixel values
        plotPixelValues(d, ids)

    if gaussian:
        #fit simple Gaussians
        Gaussians = {}
        for f, im in d.iteritems():
            #horizontal direction
            sumH = np.sum(im, axis=0)
            Hfit = gaussianFit(sumH,
                               initials=[
                                   np.max(sumH) - np.median(sumH), 8., 0.4,
                                   np.median(sumH)
                               ])
            plotLineFits(sumH, Hfit, f)

            #vertical direction
            sumV = np.sum(im, axis=1)
            Vfit = gaussianFit(sumV,
                               initials=[
                                   np.max(sumV) - np.median(sumV), 8., 0.4,
                                   np.median(sumV)
                               ])
            plotLineFits(sumH, Hfit, f, horizontal=False)

            #2D gaussian
            tmp = im.copy() - np.median(im)
            twoD = fit.Gaussian2D(tmp, intials=[np.max(tmp), 7, 7, 0.4, 0.4])

            print f, Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3], int(
                np.max(im))
            Gaussians[f] = [Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3]]

        fileIO.cPickleDumpDictionary(Gaussians, 'SpotmeasurementsGaussian.pk')

        plotGaussianResults(Gaussians, ids, output='line')
        plotGaussianResults(Gaussians, ids, output='twoD', vals=[1, 3])

    if bessel:
        Gaussians = {}
        #Bessel + Gaussian
        hf = 8 * over
        for f, im in d.iteritems():
            #if '21_59_31s' not in f:
            #    continue

            #over sample the data, needed for convolution
            oversampled = ndimage.zoom(im.copy(), over, order=0)
            fileIO.writeFITS(oversampled, f + 'block.fits', int=False)

            #find the centre in oversampled frame, needed for bessel and gives a starting point for fitting
            tmp = oversampled.copy() - np.median(oversampled)
            sh = shape.shapeMeasurement(tmp, log, **settings)
            results = sh.measureRefinedEllipticity()
            midx = results['centreX'] - 1.
            midy = results['centreY'] - 1.

            #generate 2D bessel and re-centre using the above centroid, normalize to the maximum image value and
            #save to a FITS file.
            bes = generateBessel(radius=0.45, oversample=over, size=16 * over)
            shiftx = -midx + hf
            shifty = -midy + hf
            bes = ndimage.interpolation.shift(bes, [-shifty, -shiftx], order=0)
            bes /= np.max(bes)
            fileIO.writeFITS(bes, f + 'bessel.fits', int=False)

            #check the residual with only the bessel and save to a FITS file
            t = ndimage.zoom(bes.copy(), 1. / over, order=0)
            t /= np.max(t)
            fileIO.writeFITS(im.copy() - np.median(oversampled) -
                             t * np.max(tmp),
                             f + 'residual.fits',
                             int=False)
            fileIO.writeFITS(oversampled - bes.copy() * np.max(tmp),
                             f + 'residualOversampled.fits',
                             int=False)

            #best guesses for fitting parameters
            params = [1., results['centreX'], results['centreY'], 0.5, 0.5]

            biassubtracted = im.copy() - np.median(oversampled)
            #error function is a convolution between a bessel function and 2D gaussian - data
            #note that the error function must be on low-res grid because it is the pixel values we try to match
            errfunc = lambda p: np.ravel(
                ndimage.zoom(signal.fftconvolve(
                    fitf(*p)(*np.indices(tmp.shape)), bes.copy(), mode='same'),
                             1. / over,
                             order=0) * np.max(tmp) - biassubtracted.copy())

            #fit
            res = sp.optimize.leastsq(errfunc,
                                      params,
                                      full_output=True,
                                      maxfev=maxfev)

            #save the fitted residuals
            t = signal.fftconvolve(fitf(*res[0])(*np.indices(tmp.shape)),
                                   bes.copy(),
                                   mode='same')
            fileIO.writeFITS(res[2]['fvec'].reshape(im.shape),
                             f + 'residualFit.fits',
                             int=False)
            fileIO.writeFITS(fitf(*res[0])(*np.indices(tmp.shape)),
                             f + 'gaussian.fits',
                             int=False)
            fileIO.writeFITS(t, f + 'BesselGausOversampled.fits', int=False)
            fileIO.writeFITS(ndimage.zoom(t, 1. / over, order=0),
                             f + 'BesselGaus.fits',
                             int=False)

            #print out the results and save to a dictionary
            print results['centreX'], results['centreY'], res[2]['nfev'], res[
                0]

            #sigmas are symmetric as the width of the fitting function is later squared...
            sigma1 = np.abs(res[0][3])
            sigma2 = np.abs(res[0][4])
            Gaussians[f] = [sigma1, sigma2]

        fileIO.cPickleDumpDictionary(Gaussians,
                                     'SpotmeasurementsBesselGaussian.pk')

        #plot the findings
        plotGaussianResults(Gaussians, ids, output='Bessel', vals=[0, 1])
    plt.close()

    #eR2
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_color_cycle(['r', 'k', 'm', 'c', 'g'])

    for i in sorted(set(res)):
        msk = res == i
        ax.plot(xpos[msk]-np.round(xpos[msk], decimals=0), R2[msk],
                marker=marker.next(), linestyle='', label='Sampling=%i' % i)

    ax.set_xlim(-0.6, 0.6)
    ax.set_xlabel('X position')
    ax.set_ylabel(r'$R^{2}$')

    plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8, loc='best')
    plt.savefig('R2.pdf')
    plt.close()


if __name__ == "__main__":
    log = lg.setUpLogger('resolutionTesting.log')

    #calculate, save and load results
    res = calculateShapes(log, glob.glob('Q0*stars*x.fits'), 'test.dat')
    fileIO.cPickleDumpDictionary(res, 'results.pk')
    res = cPickle.load(open('results.pk'))

    #plot results
    plotResults(res)
    file = 'gaussian.fits'
    log.info('Processing file %s' % file)
    data = pf.getdata(file)
    settings = dict(sampling=0.2)
    sh = shape.shapeMeasurement(data, log, **settings)
    results = sh.measureRefinedEllipticity()
    sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
    print file
    pprint.pprint(results)
    print

    log.info('All done\n\n')


if __name__ == '__main__':
    log = lg.setUpLogger('gaussians.log')
    log.info('Testing gaussians...')

    xsize, ysize = 300, 300
    xcen, ycen = 150, 150
    sigmax = 27.25
    sigmay = 14.15

    #calculate ellipticity from Sigmas
    e = ellipticityFromSigmas(sigmax, sigmay)

    #generate a 2D gaussian with given properties...
    gaussian2d = Gaussian2D(xcen, ycen, xsize, ysize, sigmax, sigmay)

    #plot
    plot3D(gaussian2d)
        msk = res == i
        ax.plot(xpos[msk] - np.round(xpos[msk], decimals=0),
                R2[msk],
                marker=marker.next(),
                linestyle='',
                label='Sampling=%i' % i)

    ax.set_xlim(-0.6, 0.6)
    ax.set_xlabel('X position')
    ax.set_ylabel(r'$R^{2}$')

    plt.legend(shadow=True,
               fancybox=True,
               numpoints=1,
               scatterpoints=1,
               markerscale=1.8,
               loc='best')
    plt.savefig('R2.pdf')
    plt.close()


if __name__ == "__main__":
    log = lg.setUpLogger('resolutionTesting.log')

    #calculate, save and load results
    res = calculateShapes(log, glob.glob('Q0*stars*x.fits'), 'test.dat')
    fileIO.cPickleDumpDictionary(res, 'results.pk')
    res = cPickle.load(open('results.pk'))

    #plot results
    plotResults(res)
Beispiel #32
0
                fail += 1

    print 'Failed %i times' % fail
    print np.mean(derived), np.median(derived), np.std(derived)


if __name__ == '__main__':
    run = False
    plot = False
    error = False
    debug = False

    simpleAnalytical()

    #start the script
    log = lg.setUpLogger('biasCalibration.log')
    log.info('Testing bias level calibration...')

    if error:
        if debug:
            resPiston = findTolerableErrorPiston(log,
                                                 file='data/psf1x.fits',
                                                 oversample=1.0,
                                                 iterations=4,
                                                 psfs=500,
                                                 samples=8)
            resSlope = findTolerableErrorSlope(log,
                                               file='data/psf1x.fits',
                                               oversample=1.0,
                                               iterations=4,
                                               psfs=500,
    ax.set_ylim(-0.005, 0.005)

    ax.set_xlabel('Centroid Offset [1/12 pixels]')
    ax.set_ylabel(r'$R^{2} - \bar{R}^{2} \quad$ [pixels$^{2}$]')

    plt.legend(shadow=True,
               fancybox=True,
               numpoints=1,
               scatterpoints=1,
               markerscale=1.8)
    plt.savefig('deltasize%s.pdf' % output)
    plt.close()


if __name__ == "__main__":
    log = lg.setUpLogger('centroidTesting.log')

    #res = testCentroidingImpact(log)
    #fileIO.cPickleDumpDictionary(res, 'centroidTesting.pk')
    #res = cPickle.load(open('centroidTesting.pk'))
    #plotCentroids(res)

    #PSF
    print 'Real PSF'
    xres, yres = testCentroidingImpactSingleDirection(log)
    fileIO.cPickleDumpDictionary(xres, 'centroidTestingX.pk')
    fileIO.cPickleDumpDictionary(yres, 'centroidTestingY.pk')
    xres = cPickle.load(open('centroidTestingX.pk'))
    yres = cPickle.load(open('centroidTestingY.pk'))
    plotCentroidsSingle(xres)
    plotCentroidsSingle(yres, output='Y')
def analyseSpotsFitting(files, gaussian=False, pixelvalues=False, bessel=True, maxfev=10000):
    """
    Analyse spot measurements using different fitting methods.

    :param files: names of the FITS files to analyse (should match the IDs)
    :param gaussian: whether or not to do a simple Gaussian fitting analysis
    :param pixelvalues: whether or not to plot pixel values on a grid
    :param bessel: whether or not to do a Bessel + Gaussian convolution analysis
    :param maxfev: maximum number of iterations in the least squares fitting

    :return: None
    """
    log = lg.setUpLogger('spots.log')
    log.info('Starting...')
    over = 24
    settings = dict(itereations=8)
    ids = fileIDs()

    d = {}
    for filename in files:
        tmp = readData(filename, crop=False)
        f = filename.replace('small.fits', '')
        d[f] = tmp

    if pixelvalues:
        #plot differrent pixel values
        plotPixelValues(d, ids)

    if gaussian:
        #fit simple Gaussians
        Gaussians = {}
        for f, im in d.iteritems():
            #horizontal direction
            sumH = np.sum(im, axis=0)
            Hfit = gaussianFit(sumH, initials=[np.max(sumH) - np.median(sumH), 8., 0.4, np.median(sumH)])
            plotLineFits(sumH, Hfit, f)

            #vertical direction
            sumV = np.sum(im, axis=1)
            Vfit = gaussianFit(sumV, initials=[np.max(sumV) - np.median(sumV), 8., 0.4, np.median(sumV)])
            plotLineFits(sumH, Hfit, f, horizontal=False)

            #2D gaussian
            tmp = im.copy() - np.median(im)
            twoD = fit.Gaussian2D(tmp, intials=[np.max(tmp), 7, 7, 0.4, 0.4])

            print f, Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3], int(np.max(im))
            Gaussians[f] = [Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3]]

        fileIO.cPickleDumpDictionary(Gaussians, 'SpotmeasurementsGaussian.pk')

        plotGaussianResults(Gaussians, ids, output='line')
        plotGaussianResults(Gaussians, ids, output='twoD', vals=[1, 3])

    if bessel:
        Gaussians = {}
        #Bessel + Gaussian
        hf = 8 * over
        for f, im in d.iteritems():
            #if '21_59_31s' not in f:
            #    continue

            #over sample the data, needed for convolution
            oversampled = ndimage.zoom(im.copy(), over, order=0)
            fileIO.writeFITS(oversampled, f+'block.fits', int=False)

            #find the centre in oversampled frame, needed for bessel and gives a starting point for fitting
            tmp = oversampled.copy() - np.median(oversampled)
            sh = shape.shapeMeasurement(tmp, log, **settings)
            results = sh.measureRefinedEllipticity()
            midx = results['centreX'] - 1.
            midy = results['centreY'] - 1.

            #generate 2D bessel and re-centre using the above centroid, normalize to the maximum image value and
            #save to a FITS file.
            bes = generateBessel(radius=0.45, oversample=over, size=16*over)
            shiftx = -midx + hf
            shifty = -midy + hf
            bes = ndimage.interpolation.shift(bes, [-shifty, -shiftx], order=0)
            bes /= np.max(bes)
            fileIO.writeFITS(bes, f+'bessel.fits', int=False)

            #check the residual with only the bessel and save to a FITS file
            t = ndimage.zoom(bes.copy(), 1./over, order=0)
            t /= np.max(t)
            fileIO.writeFITS(im.copy() - np.median(oversampled) - t*np.max(tmp), f+'residual.fits', int=False)
            fileIO.writeFITS(oversampled - bes.copy()*np.max(tmp), f+'residualOversampled.fits', int=False)

            #best guesses for fitting parameters
            params = [1., results['centreX'], results['centreY'], 0.5, 0.5]

            biassubtracted = im.copy() - np.median(oversampled)
            #error function is a convolution between a bessel function and 2D gaussian - data
            #note that the error function must be on low-res grid because it is the pixel values we try to match
            errfunc = lambda p: np.ravel(ndimage.zoom(signal.fftconvolve(fitf(*p)(*np.indices(tmp.shape)), bes.copy(), mode='same'), 1./over, order=0)*np.max(tmp) - biassubtracted.copy())

            #fit
            res = sp.optimize.leastsq(errfunc, params, full_output=True, maxfev=maxfev)

            #save the fitted residuals
            t = signal.fftconvolve(fitf(*res[0])(*np.indices(tmp.shape)), bes.copy(), mode='same')
            fileIO.writeFITS(res[2]['fvec'].reshape(im.shape), f+'residualFit.fits', int=False)
            fileIO.writeFITS(fitf(*res[0])(*np.indices(tmp.shape)), f+'gaussian.fits', int=False)
            fileIO.writeFITS(t, f+'BesselGausOversampled.fits', int=False)
            fileIO.writeFITS(ndimage.zoom(t, 1./over, order=0), f+'BesselGaus.fits', int=False)

            #print out the results and save to a dictionary
            print results['centreX'], results['centreY'], res[2]['nfev'], res[0]

            #sigmas are symmetric as the width of the fitting function is later squared...
            sigma1 = np.abs(res[0][3])
            sigma2 = np.abs(res[0][4])
            Gaussians[f] = [sigma1, sigma2]

        fileIO.cPickleDumpDictionary(Gaussians, 'SpotmeasurementsBesselGaussian.pk')

        #plot the findings
        plotGaussianResults(Gaussians, ids, output='Bessel', vals=[0, 1])
    parser.add_option('-a', '--sampling', dest='sampling',
                      help='Change the sampling in the shape measuring algorithm', metavar='sampling')

    if printHelp:
        parser.print_help()
    else:
        return parser.parse_args()


if __name__ == '__main__':
    opts, args = processArgs()

    if opts.input is None:
        processArgs(True)
        sys.exit(8)

    settings = {}
    if opts.sourcefile is None:
        settings['sourceFile'] = None
    else:
        settings['sourceFile'] = opts.sourcefile
    if not opts.sampling is None:
        settings['sampling'] = float(opts.sampling)

    log = lg.setUpLogger('analyse.log')
    log.info('\n\nStarting to analyse %s' % opts.input)

    analyse = analyseVISdata(opts.input, log, **settings)
    results = analyse.doAll()

    log.info('All done...')
Beispiel #36
0
        'Number of the FITS extension from which to read the data [default=1]',
        metavar='int')
    if printHelp:
        parser.print_help()
    else:
        return parser.parse_args()


if __name__ == '__main__':
    opts, args = processArgs()

    if opts.input is None:
        processArgs(True)
        sys.exit(8)

    log = lg.setUpLogger('reduction.log')
    log.info('\n\nStarting to reduce data...')

    if opts.output is None:
        output = opts.input.replace('.fits', '') + 'Reduced.fits'
    else:
        output = opts.output

    if opts.extension is None:
        ext = 1
    else:
        ext = int(opts.extension)

    #input values that are used in processing and save to the FITS headers
    values = dict(rnoise=4.5,
                  dob=0.0,
        help='Change the sampling in the shape measuring algorithm',
        metavar='sampling')

    if printHelp:
        parser.print_help()
    else:
        return parser.parse_args()


if __name__ == '__main__':
    opts, args = processArgs()

    if opts.input is None:
        processArgs(True)
        sys.exit(8)

    settings = {}
    if opts.sourcefile is None:
        settings['sourceFile'] = None
    else:
        settings['sourceFile'] = opts.sourcefile
    if not opts.sampling is None:
        settings['sampling'] = float(opts.sampling)

    log = lg.setUpLogger('analyse.log')
    log.info('\n\nStarting to analyse %s' % opts.input)

    analyse = analyseVISdata(opts.input, log, **settings)
    results = analyse.doAll()

    log.info('All done...')
Beispiel #38
0
        sys.exit(8)

    #FITS extension
    if opts.ext is None:
        ext = 0
    else:
        ext = opts.ext

    #name of the output file
    if opts.output is None:
        output = 'VISCCD.fits'
    else:
        output = opts.output

    #logger
    log = lg.setUpLogger('tileCCDs.log')

    #look for files
    files = g.glob(opts.files)
    files.sort()
    if len(files) / 4. > 1.0 or len(files) == 0:
        print 'Detected %i input files, but the current version does not support anything but tiling four files...' \
              % len(files)
        sys.exit(9)

    #write to the log what files were used
    log.info('Input files:')
    for file in files:
        log.info(file)

    #intputs
        results = sh.measureRefinedEllipticity()

        e.append(results['ellipticity'])
        R2.append(results['R2'])

    return np.asarray(e), np.asarray(R2)


def plotDistribution(data, xlabel, output):
    """

    :param data:
    :return:
    """
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hist(data, bins=15, color='g', normed=True, log=True)
    ax.set_xlabel(xlabel)
    ax.set_ylabel('PDF')
    plt.savefig(output)
    plt.close()


if __name__ == '__main__':
    #start the script
    log = lg.setUpLogger('testShapeMeasurement.log')

    e, R2 = testShapeMeasurementAlgorithm(log)
    plotDistribution(e, 'Ellipticity', 'ellipticity.pdf')
    plotDistribution(R2, r'Size $(R^{2})$', 'size.pdf')
            dest='minimum',
            help="The minimum number of pixels a smallest object can cover",
            metavar='float')
        parser.add_option(
            '-a',
            '--above_background',
            dest='above',
            help=
            "The significance of the detection (x sigma above the background)",
            metavar='float')
        if printHelp:
            parser.print_help()
        else:
            return parser.parse_args()

    log = lg.setUpLogger('sourceFinding.log')

    opts, args = processArgs()

    if opts.file is None:
        processArgs(True)
        sys.exit(8)

    settings = dict()
    if opts.sigma is not None:
        settings.update({'sigma': float(opts.sigma)})
    if opts.large is not None:
        settings.update({'clean_size_max': float(opts.large)})
    if opts.minimum is not None:
        settings.update({'clean_size_min': float(opts.minimum)})
    if opts.above is not None:
    ax.axhline(y=0.0)
    ax.scatter(d, dr, c='m', marker='*', label=r'$R^{2}$')

    ax.set_xlim(-0.1, 12.1)
    ax.set_ylim(-0.005, 0.005)

    ax.set_xlabel('Centroid Offset [1/12 pixels]')
    ax.set_ylabel(r'$R^{2} - \bar{R}^{2} \quad$ [pixels$^{2}$]')

    plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8)
    plt.savefig('deltasize%s.pdf' % output)
    plt.close()


if __name__ == "__main__":
    log = lg.setUpLogger('centroidTesting.log')

    #res = testCentroidingImpact(log)
    #fileIO.cPickleDumpDictionary(res, 'centroidTesting.pk')
    #res = cPickle.load(open('centroidTesting.pk'))
    #plotCentroids(res)

    #PSF
    print 'Real PSF'
    xres, yres = testCentroidingImpactSingleDirection(log)
    fileIO.cPickleDumpDictionary(xres, 'centroidTestingX.pk')
    fileIO.cPickleDumpDictionary(yres, 'centroidTestingY.pk')
    xres = cPickle.load(open('centroidTestingX.pk'))
    yres = cPickle.load(open('centroidTestingY.pk'))
    plotCentroidsSingle(xres)
    plotCentroidsSingle(yres, output='Y')

    def runAll(self, nostars=True):
        """
        Run all methods sequentially.
        """
        if nostars:
            self.createStarlist()
            self.addObjects(inputlist='stars.dat')
        self.createGalaxylist()
        self.addObjects()
        self.maskCrazyValues()


if __name__ == '__main__':
    log = lg.setUpLogger('generateGalaxies.log')
    log.info('Starting to create fake galaxies')

    fakedata = generateFakeData(log)
    fakedata.runAll()

    #no noise or background
    settings = dict(rdnoise=0.0, background=0.0, output='nonoise.fits', poisson=iraf.no)
    fakedata = generateFakeData(log, **settings)
    fakedata.runAll()

    #postage stamp galaxy
    settings = dict(rdnoise=0.0, background=0.0, output='stamp.fits', poisson=iraf.no, xdim=200, ydim=200)
    fakedata = generateFakeData(log, **settings)
    fakedata.addObjects(inputlist='singlegalaxy.dat')
    fakedata.maskCrazyValues('stamp.fits')
Beispiel #43
0
            print '============================================'
            print '  Merging quadrants of xCCD #%d and yCCD #%d' % (xCCD, yCCD)
            print '============================================'
            #
            files = glob.glob('Q*_x%d_y%d_d1.fits' % (xCCD, yCCD))
            #
            if len(files) > 0:
                output = 'CCD_x%d_y%d_d1.fits' % (xCCD, yCCD)
                inputs = dict(files=files, ext=1, output=output)
                #
                tileCCD.tileCCD(inputs,
                                lg.setUpLogger('tileCCDs.log')).runAll()
    """
    print
    print '=============================================='
    print '  Dither n.', dither
    print '  Merging all CCDs from same dither position'
    print '=============================================='
    #
    files = glob.glob('CCD*_d%d.fits' % (dither))
    print len(files)
    print files
    from time import sleep
    sleep(10)
    output = 'FPA_Dith%d.fits' % (dither)
    inputs = dict(files=files, ext=0, output=output)
    tileFPA.tileFPA(inputs, lg.setUpLogger('tileFPA.log')).runAll()

# Deleting all single CCD and log files
call('rm *log', shell=True)
Beispiel #44
0
        sys.exit(8)

    #FITS extension
    if opts.ext is None:
        ext = 0
    else:
        ext = opts.ext

    #name of the output file
    if opts.output is None:
        output = 'VISFPA.fits'
    else:
        output = opts.output

    #logger
    log = lg.setUpLogger('tileFPA.log')

    #look for files
    files = g.glob(opts.files)
    files.sort()
    if len(files) / 36. > 1.0 or len(files) == 0:
        print 'Detected %i input files, but the current version does not support anything but tiling 36 CCDs...' % len(
            files)
        sys.exit(9)

    #write to the log what files were used
    log.info('Input files:')
    for file in files:
        log.info(file)

    #intputs
        parser.print_help()
    else:
        return parser.parse_args()


if __name__ == '__main__':
    opts, args = processArgs()

    if opts.input is None:
        processArgs(True)
        sys.exit(8)

    if opts.output is None:
        opts.output = '.'

    log = lg.setUpLogger(opts.output + '/BasisSet.log')
    log.info('\n\nStarting to derive basis set functions...')

    if opts.basis is None:
        opts.basis = 20
    else:
        opts.basis = int(opts.basis)
    log.info('%i basis sets will be derived' % opts.basis)

    files = glob.glob(opts.input)
    all = []
    sides = []
    for file in files:
        log.info('Processing %s' % file)
        #load data
        data = pf.getdata(file)
Beispiel #46
0
def shapeMovie(
        filename='/Users/sammy/EUCLID/CTItesting/Reconciliation/damaged_image_parallel.fits',
        sigma=0.75,
        scale=False,
        zoom=30,
        frames=20,
        subtractMedian=False):
    settings = dict(sigma=sigma, iterations=1)
    #settings = dict(sigma=sigma, iterations=1, fixedPosition=True, fixedX=85.0, fixedY=85.)

    l = lg.setUpLogger('CTItesting.log')

    data = pf.getdata(filename)

    if scale:
        data /= np.max(data)
        data *= 1.e5

    if subtractMedian:
        data -= np.median(data)
        data = data[78:90, 78:90]  #also limit the area

    sh = shape.shapeMeasurement(data, l, **settings)
    results = sh.measureRefinedEllipticity()
    ang = 0.5 * np.arctan(results['e2'] / results['e1'])

    fig, axarr = plt.subplots(1, 2, sharey=True)
    ax1 = axarr[0]
    ax2 = axarr[1]
    fig.subplots_adjust(wspace=0)

    ax1.set_title(r'Image w/ CTI')
    ax2.set_title(r'Gaussian Weighted')

    #no ticks on the right hand side plot
    plt.setp(ax2.get_yticklabels(), visible=False)

    ax1.imshow(data, origin='lower')
    ax2.imshow(results['GaussianWeighted'], origin='lower')

    if zoom is not None:
        ax1.set_xlim(results['centreX'] - zoom - 1, results['centreX'] + zoom)
        ax2.set_xlim(results['centreX'] - zoom - 1, results['centreX'] + zoom)
        ax1.set_ylim(results['centreY'] - zoom - 1, results['centreY'] + zoom)
        ax2.set_ylim(results['centreY'] - zoom - 1, results['centreY'] + zoom)

    text = ax2.text(0.02, 0.95, '', transform=ax2.transAxes, color='white')

    e = Ellipse(xy=(results['centreX'] - 1, results['centreY'] - 1),
                width=results['a'],
                height=results['b'],
                angle=ang,
                facecolor='none',
                ec='white',
                lw=2)

    def init():
        # initialization function: plot the background of each frame
        ax2.imshow([[], []])
        fig.gca().add_artist(e)
        text.set_text(' ')
        return ax2, text, e

    def animate(i):
        settings = dict(sigma=sigma, iterations=i + 1)
        #settings = dict(sigma=sigma, iterations=i+1, fixedPosition=True, fixedX=85.0, fixedY=85.)
        sh = shape.shapeMeasurement(data, l, **settings)
        results = sh.measureRefinedEllipticity()

        text.set_text(r'%i iterations, $e \sim %.4f$' %
                      (i + 1, results['ellipticity']))

        ax2.imshow(results['GaussianWeighted'], origin='lower')
        ang = 0.5 * np.arctan(results['e2'] / results['e1'])

        e.center = (results['centreX'] - 1, results['centreY'] - 1)
        e.width = results['a']
        e.height = results['b']
        e.angle = ang

        return ax2, text, e

    #note that the frames defines the number of times animate functions is being called
    anim = animation.FuncAnimation(fig,
                                   animate,
                                   init_func=init,
                                   frames=frames,
                                   interval=2,
                                   blit=True)
    anim.save('shapeMovie.mp4', fps=0.7)
if __name__ == '__main__':
    import pyfits as pf
    import glob as g
    import math
    import scipy.ndimage.measurements
    from scipy.ndimage import interpolation
    from support import files as fileIO
    from analysis import shape
    from support import logger as lg

    #inputs
    cut = 500
    files = g.glob('TOL*')

    log = lg.setUpLogger('centroidPSF.log')
    log.info('\n\nStarting to derive centred cutouts...')

    all = []
    for i, file in enumerate(files):
        #load data
        data = pf.getdata(file)

        #find the centroid pixel with fwcentroid
        #midy, midx = fwcentroid(data)
        #midx += 1.
        #midy += 1.

        #scipy centre-of-mass
        #midy, midx = scipy.ndimage.measurements.center_of_mass(data)
    parser.add_option('-e', '--extension', dest='extension',
                      help='Number of the FITS extension from which to read the data [default=1]', metavar='int')
    if printHelp:
        parser.print_help()
    else:
        return parser.parse_args()


if __name__ == '__main__':
    opts, args = processArgs()

    if opts.input is None:
        processArgs(True)
        sys.exit(8)

    log = lg.setUpLogger('reduction.log')
    log.info('\n\nStarting to reduce data...')

    if opts.output is None:
        output = opts.input.replace('.fits', '') + 'Reduced.fits'
    else:
        output = opts.output

    if opts.extension is None:
        ext = 1
    else:
        ext = int(opts.extension)

    #input values that are used in processing and save to the FITS headers
    values = dict(rnoise=4.5, dob=0.0, rdose=3e10, trapfile='data/cdm_euclid.dat', bias=1000.0, beta=0.6, fwc=175000,
                  vth=1.168e7, t=1.024e-2, vg=6.e-11, st=5.e-6, sfwc=730000., svg=1.0e-10, output=output,
        parser.add_option('-f', '--file', dest='file',
                          help="Input file e.g. 'Q0_00_00stars245ver2.fits'", metavar='string')
        parser.add_option('-s', '--sigma', dest='sigma',
                          help="Size of the Gaussian smoothing kernel", metavar='float')
        parser.add_option('-l', '--largest', dest='large',
                          help="The maximum number of pixels a largest object can cover", metavar='float')
        parser.add_option('-m', '--minimum', dest='minimum',
                          help="The minimum number of pixels a smallest object can cover", metavar='float')
        parser.add_option('-a', '--above_background', dest='above',
                          help="The significance of the detection (x sigma above the background)", metavar='float')
        if printHelp:
            parser.print_help()
        else:
            return parser.parse_args()

    log = lg.setUpLogger('sourceFinding.log')

    opts, args = processArgs()

    if opts.file is None:
        processArgs(True)
        sys.exit(8)

    settings = dict()
    if opts.sigma is not None:
        settings.update({'sigma' : float(opts.sigma)})
    if opts.large is not None:
        settings.update({'clean_size_max' : float(opts.large)})
    if opts.minimum is not None:
        settings.update({'clean_size_min' : float(opts.minimum)})
    if opts.above is not None:
    #different runs
    runs = {
        'run1': dict(multiplier=1.5),
        'run2': dict(multiplier=0.5),
        'run3': dict(multiplier=2.0),
        'run4': dict(multiplier=3.0),
        'run5': dict(multiplier=4.0)
    }

    for key, value in runs.iteritems():
        if not os.path.exists(key):
            os.makedirs(key)

        #start a logger
        log = lg.setUpLogger(key + '/nonlinearityModelTransfer.log')
        log.info('Testing non-linearity model transfer...')
        log.info('Multiplier = %f' % value['multiplier'])

        if run:
            if debug:
                res = testNonlinearityModelTransfer(log,
                                                    psfs=2000,
                                                    file='data/psf1x.fits',
                                                    oversample=1.0)
            else:
                res = testNonlinearityModelTransfer(log)

            fileIO.cPickleDumpDictionary(res, key + '/nonlinModelResults.pk')

        if plot:
Beispiel #51
0
    def run(self):
        """
        This is the method that will be called when multiprocessing.
        """
        while not self.kill_received:
            # get a task from the queue
            try:
                filename = self.work_queue.get_nowait()
            except Queue.Empty:
                break

            #capture start time
            start_time = time.time()

            #file to process, path and FITS extension removed
            file = filename.split('/')[-1].split('.fits')[0]

            #setup logger
            self.log = lg.setUpLogger('PostProcessing.log')

            str = 'Started processing %s' % filename
            print str
            self.log.info(str)

            #load data and cut out a correct region
            info = self.loadFITS(filename)
            data = self.cutoutRegion(info['data'])

            #apply CTI model and calculate a CTI map
            CTIed = self.radiateFullCCD(data)
            CTImap = self.generateCTImap(CTIed, data)

            #apply readout noise to the CTIed image
            noised = self.applyReadoutNoise(CTIed)

            #apply the first order correction and generate a residual map
            corrected = self.applyLinearCorrection(noised['readnoised'])
            CTImap2 = self.generateCTImap(corrected, data)

            #convert the readout noised image to ADUs
            datai = self.discretisetoADUs(noised['readnoised'])

            #write the outputs and compress
            file = file.replace('.dat', '')
            self.writeFITSfile(datai, file + 'CTI.fits')
            self.compressAndRemoveFile(file + 'CTI.fits')
            self.writeFITSfile(corrected,
                               file + 'CTIcorrected.fits',
                               unsigned16bit=False)
            self.compressAndRemoveFile(file + 'CTIcorrected.fits')
            self.writeFITSfile(CTImap,
                               file + 'CTImap.fits',
                               unsigned16bit=False)
            self.compressAndRemoveFile(file + 'CTImap.fits')
            self.writeFITSfile(CTImap2,
                               file + 'CTIresidual.fits',
                               unsigned16bit=False)
            self.compressAndRemoveFile(file + 'CTIresidual.fits')

            # store the result, not really necessary in this case, but for info...
            str = '\nFinished processing %s.fits, took about %.1f minutes to run' % (
                file, -(start_time - time.time()) / 60.)
            self.result_queue.put(str)
        # new image HDU
        hdu = pf.ImageHDU(data=data)

        # add info
        for key, value in self.settings.iteritems():
            hdu.header.update(key.upper(), value)

        hdu.header.add_history("If questions, please contact Sami-Matias Niemi (smn2 at mssl.ucl.ac.uk).")
        hdu.header.add_history(
            "This file has been created with the VISsim Python Package at %s"
            % datetime.datetime.isoformat(datetime.datetime.now())
        )
        hdu.verify("fix")

        ofd.append(hdu)

        # write the actual file
        ofd.writeto(output)


if __name__ == "__main__":
    log = lg.setUpLogger("generateFlat.log")

    settings = dict(sigma=0.01)
    flat = flatField(log, **settings)
    data = flat.generateFlat()
    flat.writeFITS(data, "VISFlatField1percent.fits")

    log.info("Run finished...\n\n\n")
    debug = True
    plot = True

    #different runs
    runs = {'run1': dict(multiplier=1.5),
            'run2': dict(multiplier=0.5),
            'run3': dict(multiplier=2.0),
            'run4': dict(multiplier=3.0),
            'run5': dict(multiplier=4.0)}

    for key, value in runs.iteritems():
        if not os.path.exists(key):
            os.makedirs(key)

        #start a logger
        log = lg.setUpLogger(key+'/nonlinearityModelTransfer.log')
        log.info('Testing non-linearity model transfer...')
        log.info('Multiplier = %f' % value['multiplier'])

        if run:
            if debug:
                res = testNonlinearityModelTransfer(log, psfs=2000, file='data/psf1x.fits', oversample=1.0)
            else:
                res = testNonlinearityModelTransfer(log)

            fileIO.cPickleDumpDictionary(res, key+'/nonlinModelResults.pk')

        if plot:
            if not run:
                res = cPickle.load(open(key+'/nonlinModelResults.pk'))
Beispiel #54
0
                                         limit=limit,
                                         verbose=verbose)

        #paste cosmic rays
        self.image += self.cosmicrayMap

        return self.image


if __name__ == "__main__":
    from support import logger as lg
    from support import files as fileIO
    from scipy import ndimage

    #set up logger
    log = lg.setUpLogger('VISsim.log')

    #test section
    crImage = np.zeros((2066, 2048), dtype=np.float64)

    #cosmic ray instance
    cosmics = cosmicrays(log, crImage)

    #add cosmic rays up to the covering fraction
    CCD_cr = cosmics.addUpToFraction(1.4, limit=None, verbose=True)

    effected = np.count_nonzero(CCD_cr)
    print effected, effected * 100. / (CCD_cr.shape[0] * CCD_cr.shape[1])

    #save to FITS
    fileIO.writeFITS(CCD_cr, 'cosmicrayTest.fits', int=False)
    #different runs
    runs = {'run1': dict(phase=0.0, multiplier=1.5),
            'run2': dict(phase=0.5, multiplier=1.5),
            'run3': dict(phase=1.0, multiplier=1.5),
            'run4': dict(phase=0.98, multiplier=0.5),
            'run5': dict(phase=0.98, multiplier=2.0),
            'run6': dict(phase=0.98, multiplier=3.0),
            'run7': dict(phase=0.98, multiplier=4.0)}

    for key, value in runs.iteritems():
        print key, value
        if not os.path.exists(key):
            os.makedirs(key)

        #start a logger
        log = lg.setUpLogger(key+'/nonlinearityCalibration.log')
        log.info('Testing non-linearity calibration...')
        log.info('Phase = %f' % value['phase'])
        log.info('Multiplier = %f' % value['multiplier'])

        if run:
            if debug:
                testNonlinearityModel(phase=value['phase'], outdir=key)
                res = testNonlinearity(log, psfs=2000, file='data/psf1x.fits', oversample=1.0, phs=value['phase'])
            else:
                res = testNonlinearity(log)

            fileIO.cPickleDumpDictionary(res, key+'/nonlinResults.pk')

        if plot:
            if not run: