Example #1
0
def test_meanify():

    if __name__ == '__main__':
        rtol = 4.e-1
        atol = 5.e-2
        bin_spacing = 30  # arcsec
    else:
        rtol = 1.e-1
        atol = 3.e-2
        bin_spacing = 150  # arcsec

    psf_file = 'test_mean_*.piff'
    average_file = 'average.fits'

    psfs_list = sorted(glob.glob(os.path.join('output', 'test_mean_*.piff')))

    config0 = {
        'output': {
            'file_name': psfs_list,
        },
        'hyper': {
            'file_name': 'output/' + average_file,
        }
    }

    config1 = {
        'output': {
            'file_name': psf_file,
            'dir': 'output',
        },
        'hyper': {
            'file_name': average_file,
            'dir': 'output',
            'bin_spacing': bin_spacing,
            'statistic': 'mean',
            'params_fitted': [0, 2]
        }
    }

    config2 = {
        'output': {
            'file_name': psf_file,
            'dir': 'output',
        },
        'hyper': {
            'file_name': average_file,
            'dir': 'output',
            'bin_spacing': bin_spacing,
            'statistic': 'median',
        }
    }

    for config in [config0, config1, config2]:
        piff.meanify(config)
        ## test if found initial average
        average = fitsio.read(os.path.join('output', average_file))
        params0 = make_average(coord=average['COORDS0'][0] / 0.26, gp=False)
        keys = ['hlr', 'g1', 'g2']
        for i, key in enumerate(keys):
            if config == config1 and i == 1:
                np.testing.assert_allclose(np.zeros(
                    len(average['PARAMS0'][0][:, i])),
                                           average['PARAMS0'][0][:, i],
                                           rtol=0,
                                           atol=0)
            else:
                np.testing.assert_allclose(params0[key],
                                           average['PARAMS0'][0][:, i],
                                           rtol=rtol,
                                           atol=atol)

    ## gaussian process testing of meanify
    np.random.seed(68)
    x = np.random.uniform(0, 2048, size=1000)
    y = np.random.uniform(0, 2048, size=1000)
    coord = np.array([x, y]).T
    average = make_average(coord=coord)

    stars = params_to_stars(average, noise=0.0, rng=None)
    stars_training = stars[:900]
    stars_validation = stars[900:]

    fit_hyp = ['none', 'isotropic']

    for fit in fit_hyp:
        gp = piff.GPInterp(kernel="0.009 * RBF(300.*0.26)",
                           optimizer=fit,
                           white_noise=1e-5,
                           average_fits='output/average.fits')
        gp.initialize(stars_training)
        gp.solve(stars_training)
        stars_interp = gp.interpolateList(stars_validation)
        params_interp = np.array([s.fit.params for s in stars_interp])
        params_validation = np.array([s.fit.params for s in stars_validation])
        params_training = np.array([s.fit.params for s in stars_training])
        np.testing.assert_allclose(params_interp,
                                   params_validation,
                                   rtol=rtol,
                                   atol=atol)
Example #2
0
def test_invalid():

    psf_file = 'test_mean_*.piff'
    average_file = 'average.fits'

    psfs_list = os.path.join('output', 'test_mean_*.piff')

    if __name__ == '__main__':
        logger = piff.config.setup_logger(verbose=2)
    else:
        logger = piff.config.setup_logger(
            log_file='output/test_invalid_config.log')

    config = {
        'output': {
            'file_name': psfs_list,
        },
        'hyper': {
            'file_name': 'output/' + average_file,
            'bin_spacing': 30,
            'statistic': 'mean',
            'params_fitted': [0, 2]
        }
    }

    # Both output and hyper are required
    with np.testing.assert_raises(ValueError):
        piff.meanify(config={'output': config['output']}, logger=logger)
    with np.testing.assert_raises(ValueError):
        piff.meanify(config={'hyper': config['hyper']}, logger=logger)
    # Both require file_name
    with np.testing.assert_raises(ValueError):
        piff.meanify(config={
            'output': config['output'],
            'hyper': {}
        },
                     logger=logger)
    with np.testing.assert_raises(ValueError):
        piff.meanify(config={
            'hyper': config['hyper'],
            'output': {}
        },
                     logger=logger)
    # Invalid statistic
    config['hyper']['statistic'] = 'invalid'
    with np.testing.assert_raises(ValueError):
        piff.meanify(config=config, logger=logger)
    config['hyper']['statistic'] = 'mean'
    # Invalid params_fitted
    config['hyper']['params_fitted'] = 0
    with np.testing.assert_raises(TypeError):
        piff.meanify(config=config, logger=logger)
    config['hyper']['params_fitted'] = [0, 2]
    # Invalid file_name
    config['output']['file_name'] = []
    with np.testing.assert_raises(ValueError):
        piff.meanify(config=config, logger=logger)
    config['output']['file_name'] = os.path.join('output', 'invalid_*.piff')
    with np.testing.assert_raises(ValueError):
        piff.meanify(config=config, logger=logger)
    config['output']['file_name'] = 7
    with np.testing.assert_raises(ValueError):
        piff.meanify(config=config, logger=logger)
    config['output']['file_name'] = psfs_list
Example #3
0
def test_meanify():

    if __name__ == '__main__':
        rtol = 1.e-1
        atol = 2.e-2
        bin_spacing = 30  # arcsec
    else:
        rtol = 1.e-1
        atol = 3.e-2
        bin_spacing = 150  # arcsec

    psf_file = 'test_mean_*.piff'
    average_file = 'average.fits'

    psfs_list = sorted(glob.glob(os.path.join('output', 'test_mean_*.piff')))

    config0 = {
        'output' : {
            'file_name' : psfs_list,
        },
        'hyper' : {
            'file_name' : 'output/'+average_file,
        }}

    config1 = {
        'output' : {
            'file_name' : psf_file,
            'dir': 'output',
        },
        'hyper' : {
            'file_name' : average_file,
            'dir': 'output',
            'bin_spacing' : bin_spacing,
            'statistic' : 'mean',
            'params_fitted': [0, 2]
        }}

    config2 = {
        'output' : {
            'file_name' : psf_file,
            'dir': 'output',
        },
        'hyper' : {
            'file_name' : average_file,
            'dir': 'output',
            'bin_spacing' : bin_spacing,
            'statistic' : 'median',
        }}

    for config in [config0, config1, config2]:
        piff.meanify(config)
        ## test if found initial average
        average = fitsio.read(os.path.join('output',average_file))
        params0 = make_average(coord=average['COORDS0'][0] / 0.26, gp=False)
        keys = ['hlr', 'g1', 'g2']
        for i,key in enumerate(keys):
            if config == config1 and i == 1:
                np.testing.assert_allclose(np.zeros(len(average['PARAMS0'][0][:,i])),
                                           average['PARAMS0'][0][:,i], rtol=0, atol=0)
            else:
                np.testing.assert_allclose(params0[key], average['PARAMS0'][0][:,i],
                                           rtol=rtol, atol=atol)

    ## gaussian process testing of meanify 
    np.random.seed(68)
    x = np.random.uniform(0, 2048, size=1000)
    y = np.random.uniform(0, 2048, size=1000)
    coord = np.array([x,y]).T
    average = make_average(coord=coord)

    stars = params_to_stars(average, noise=0.0, rng=None)
    stars_training = stars[:900]
    stars_validation = stars[900:]

    fit_hyp = [False, True]

    for fit in fit_hyp:
        gp = piff.GPInterp2pcf(kernel="0.009 * RBF(300.*0.26)",
                               optimize=fit_hyp, white_noise=1e-5, average_fits='output/average.fits')
        gp.initialize(stars_training)
        gp.solve(stars_training)
        stars_interp = gp.interpolateList(stars_validation)
        params_interp = np.array([s.fit.params for s in stars_interp])
        params_validation = np.array([s.fit.params for s in stars_validation])
        params_training = np.array([s.fit.params for s in stars_training])
        np.testing.assert_allclose(params_interp, params_validation, rtol=rtol, atol=atol)