Ejemplo n.º 1
0
def test_add_and_clear_results():
    num_secondary_bodies = 1
    input_file = os.path.join(orbitize.DATADIR, 'test_val.csv')
    data_table = read_input.read_file(input_file)
    system_mass = 1.0
    plx = 10.0
    mass_err = 0.1
    plx_err = 1.0
    # Initialize System object
    test_system = system.System(num_secondary_bodies,
                                data_table,
                                system_mass,
                                plx,
                                mass_err=mass_err,
                                plx_err=plx_err)
    # Initialize dummy results.Results object
    test_results = results.Results()
    # Add one result object
    test_system.add_results(test_results)
    assert len(test_system.results) == 1
    # Adds second result object
    test_system.add_results(test_results)
    assert len(test_system.results) == 2
    # Clears result objects
    test_system.clear_results()
    assert len(test_system.results) == 0
    # Add one more result object
    test_system.add_results(test_results)
    assert len(test_system.results) == 1
Ejemplo n.º 2
0
def test_save_and_load_results(results_to_test, has_lnlike=True):
    """
    Tests saving and reloading of a results object
        has_lnlike: allows for tests with and without lnlike values
            (e.g. OFTI doesn't output lnlike)
    """
    results_to_save = results_to_test
    if not has_lnlike:  # manipulate object to remove lnlike (as in OFTI)
        results_to_save.lnlike = None
    save_filename = 'test_results.h5'
    # Save to file
    results_to_save.save_results(save_filename)
    # Create new blank results object and load from file
    loaded_results = results.Results()
    loaded_results.load_results(save_filename, append=False)
    # Check if loaded results equal saved results
    assert results_to_save.sampler_name == loaded_results.sampler_name
    assert np.array_equal(results_to_save.post, loaded_results.post)
    if has_lnlike:
        assert np.array_equal(results_to_save.lnlike, loaded_results.lnlike)
    # Try to load the saved results again, this time appending
    loaded_results.load_results(save_filename, append=True)
    # Now check that the loaded results object has the expected size
    original_length = results_to_save.post.shape[0]
    expected_length = original_length * 2
    assert loaded_results.post.shape == (expected_length, 8)
    assert loaded_results.labels == std_labels
    if has_lnlike:
        assert loaded_results.lnlike.shape == (expected_length, )

    # check tau reference epoch is stored
    assert loaded_results.tau_ref_epoch == 50000

    # Clean up: Remove save file
    os.remove(save_filename)
Ejemplo n.º 3
0
def test_init_and_add_samples():
    """
    Tests object creation and add_samples() with some simulated posterior samples
    Returns results.Results object
    """
    # Create object
    results_obj = results.Results(sampler_name='testing',
                                  tau_ref_epoch=50000,
                                  labels=std_labels)
    # Simulate some sample draws, assign random likelihoods
    n_orbit_draws1 = 1000
    sim_post = simulate_orbit_sampling(n_orbit_draws1)
    sim_lnlike = np.random.uniform(size=n_orbit_draws1)
    # Test adding samples
    results_obj.add_samples(sim_post, sim_lnlike, labels=std_labels)
    # Simulate some more sample draws
    n_orbit_draws2 = 2000
    sim_post = simulate_orbit_sampling(n_orbit_draws2)
    sim_lnlike = np.random.uniform(size=n_orbit_draws2)
    # Test adding more samples
    results_obj.add_samples(sim_post, sim_lnlike, labels=std_labels)
    # Check shape of results.post
    expected_length = n_orbit_draws1 + n_orbit_draws2
    assert results_obj.post.shape == (expected_length, 8)
    assert results_obj.lnlike.shape == (expected_length, )
    assert results_obj.tau_ref_epoch == 50000
    assert results_obj.labels == std_labels

    return results_obj
Ejemplo n.º 4
0
def test_init_and_add_samples():
    """
    Tests object creation and add_samples() with some simulated posterior samples
    Returns results.Results object
    """

    input_file = os.path.join(orbitize.DATADIR, 'GJ504.csv')
    data = orbitize.read_input.read_file(input_file)

    # Create object
    results_obj = results.Results(sampler_name='testing',
                                  tau_ref_epoch=50000,
                                  labels=std_labels,
                                  num_secondary_bodies=1,
                                  data=data)
    # Simulate some sample draws, assign random likelihoods
    n_orbit_draws1 = 1000
    sim_post = simulate_orbit_sampling(n_orbit_draws1)
    sim_lnlike = np.random.uniform(size=n_orbit_draws1)
    # Test adding samples
    results_obj.add_samples(sim_post, sim_lnlike, labels=std_labels)
    # Simulate some more sample draws
    n_orbit_draws2 = 2000
    sim_post = simulate_orbit_sampling(n_orbit_draws2)
    sim_lnlike = np.random.uniform(size=n_orbit_draws2)
    # Test adding more samples
    results_obj.add_samples(sim_post, sim_lnlike, labels=std_labels)
    # Check shape of results.post
    expected_length = n_orbit_draws1 + n_orbit_draws2
    assert results_obj.post.shape == (expected_length, 8)
    assert results_obj.lnlike.shape == (expected_length, )
    assert results_obj.tau_ref_epoch == 50000
    assert results_obj.labels == std_labels

    return results_obj
Ejemplo n.º 5
0
def test_load_v1_results():
    """
    Tests that loading a posterior generated with v1.0.0 of the code works.
    """

    myResults = results.Results()
    myResults.load_results('{}v1_posterior.hdf5'.format(DATADIR))

    n_draws = 100

    assert myResults.post.shape == (n_draws, 8)
    assert myResults.lnlike.shape == (n_draws, )
    assert myResults.tau_ref_epoch == 0
    assert myResults.labels == std_labels
    assert myResults.fitting_basis == 'Standard'
Ejemplo n.º 6
0
def results_to_test():
    results_obj = results.Results(sampler_name='testing')
    # Simulate some sample draws, assign random likelihoods
    n_orbit_draws1 = 1000
    sim_post = simulate_orbit_sampling(n_orbit_draws1)
    sim_lnlike = np.random.uniform(size=n_orbit_draws1)
    # Test adding samples
    results_obj.add_samples(sim_post, sim_lnlike)
    # Simulate some more sample draws
    n_orbit_draws2 = 2000
    sim_post = simulate_orbit_sampling(n_orbit_draws2)
    sim_lnlike = np.random.uniform(size=n_orbit_draws2)
    # Test adding more samples
    results_obj.add_samples(sim_post, sim_lnlike)
    # Return object for testing
    return results_obj
Ejemplo n.º 7
0
def results_to_test():
    results_obj = results.Results(sampler_name='testing',
                                  tau_ref_epoch=50000,
                                  labels=std_labels,
                                  num_secondary_bodies=1)
    # Simulate some sample draws, assign random likelihoods
    n_orbit_draws1 = 1000
    sim_post = simulate_orbit_sampling(n_orbit_draws1)
    sim_lnlike = np.random.uniform(size=n_orbit_draws1)
    # Test adding samples
    results_obj.add_samples(sim_post, sim_lnlike, labels=std_labels)
    # Simulate some more sample draws
    n_orbit_draws2 = 2000
    sim_post = simulate_orbit_sampling(n_orbit_draws2)
    sim_lnlike = np.random.uniform(size=n_orbit_draws2)
    # Test adding more samples
    results_obj.add_samples(sim_post, sim_lnlike, labels=std_labels)
    # Return object for testing
    return results_obj
Ejemplo n.º 8
0
def test_save_and_load_gaia_and_hipparcos():
    """
    Test that a Results object for a Gaia+Hipparcos fit
    is saved and loaded properly.
    """

    hip_num = '027321'
    gaia_num = 4792774797545105664
    num_secondary_bodies = 1
    path_to_iad_file = '{}HIP{}.d'.format(DATADIR, hip_num)

    myHip = hipparcos.HipparcosLogProb(path_to_iad_file, hip_num,
                                       num_secondary_bodies)
    myGaia = gaia.GaiaLogProb(gaia_num, myHip)

    input_file = os.path.join(DATADIR, 'betaPic.csv')
    data_table_with_rvs = read_input.read_file(input_file)
    mySys = system.System(1,
                          data_table_with_rvs,
                          1.22,
                          56.95,
                          mass_err=0.08,
                          plx_err=0.26,
                          hipparcos_IAD=myHip,
                          gaia=myGaia,
                          fit_secondary_mass=True)

    mySamp = sampler.MCMC(mySys, num_temps=1, num_walkers=50)
    mySamp.run_sampler(1, burn_steps=0)

    save_name = 'test_results.h5'
    mySamp.results.save_results(save_name)

    loadedResults = results.Results()
    loadedResults.load_results(save_name)

    assert np.all(loadedResults.system.hipparcos_IAD.epochs ==
                  mySys.hipparcos_IAD.epochs)
    assert np.all(loadedResults.system.tau_ref_epoch == mySys.tau_ref_epoch)
    assert np.all(loadedResults.system.gaia.ra == mySys.gaia.ra)

    os.system('rm {}'.format(save_name))
Ejemplo n.º 9
0
def test_save_and_load_results(results_to_test, has_lnlike=True):
    """
    Tests saving and reloading of a results object
        has_lnlike: allows for tests with and without lnlike values
            (e.g. OFTI doesn't output lnlike)
    """
    results_to_save = results_to_test
    if not has_lnlike:  # manipulate object to remove lnlike (as in OFTI)
        results_to_save.lnlike = None
    save_filename = 'test_results.h5'
    # Save to file
    results_to_save.save_results(save_filename)
    # Create new blank results object and load from file
    loaded_results = results.Results()
    loaded_results.load_results(save_filename, append=False)
    # Check if loaded results equal saved results
    assert results_to_save.sampler_name == loaded_results.sampler_name
    assert results_to_save.version_number == loaded_results.version_number
    assert np.array_equal(results_to_save.post, loaded_results.post)
    if has_lnlike:
        assert np.array_equal(results_to_save.lnlike, loaded_results.lnlike)
    # Try to load the saved results again, this time appending
    loaded_results.load_results(save_filename, append=True)
    # Now check that the loaded results object has the expected size
    original_length = results_to_save.post.shape[0]
    expected_length = original_length * 2
    assert loaded_results.post.shape == (expected_length, 8)
    assert loaded_results.labels == std_labels
    assert loaded_results.param_idx == std_param_idx
    if has_lnlike:
        assert loaded_results.lnlike.shape == (expected_length, )

    # check tau reference epoch is stored
    assert loaded_results.tau_ref_epoch == 58849

    # check that str fields are indeed strs
    # checking just one str entry probably is good enough
    assert isinstance(loaded_results.data['quant_type'][0], str)

    # Clean up: Remove save file
    os.remove(save_filename)
Ejemplo n.º 10
0
def results_to_test():

    input_file = os.path.join(orbitize.DATADIR, 'GJ504.csv')
    data = orbitize.read_input.read_file(input_file)

    test_system = system.System(1, data, 1, 1)

    results_obj = results.Results(test_system, sampler_name='testing')
    # Simulate some sample draws, assign random likelihoods
    n_orbit_draws1 = 1000
    sim_post = simulate_orbit_sampling(n_orbit_draws1)
    sim_lnlike = np.random.uniform(size=n_orbit_draws1)
    # Test adding samples
    results_obj.add_samples(sim_post, sim_lnlike)
    # Simulate some more sample draws
    n_orbit_draws2 = 2000
    sim_post = simulate_orbit_sampling(n_orbit_draws2)
    sim_lnlike = np.random.uniform(size=n_orbit_draws2)
    # Test adding more samples
    results_obj.add_samples(sim_post, sim_lnlike)
    # Return object for testing
    return results_obj
Ejemplo n.º 11
0
def test_init_and_add_samples(radec_input=False):
    """
    Tests object creation and add_samples() with some simulated posterior samples
    Returns results.Results object
    """

    if radec_input:
        input_file = os.path.join(orbitize.DATADIR, 'test_val_radec.csv')
    else:
        input_file = os.path.join(orbitize.DATADIR, 'GJ504.csv')

    data = read_input.read_file(input_file)

    test_system = system.System(1, data, 1, 1)

    # Create object
    results_obj = results.Results(test_system, sampler_name='testing')
    # Simulate some sample draws, assign random likelihoods
    n_orbit_draws1 = 1000
    sim_post = simulate_orbit_sampling(n_orbit_draws1)
    sim_lnlike = np.random.uniform(size=n_orbit_draws1)
    # Test adding samples
    results_obj.add_samples(sim_post, sim_lnlike)  #, labels=std_labels)
    # Simulate some more sample draws
    n_orbit_draws2 = 2000
    sim_post = simulate_orbit_sampling(n_orbit_draws2)
    sim_lnlike = np.random.uniform(size=n_orbit_draws2)
    # Test adding more samples
    results_obj.add_samples(sim_post, sim_lnlike)  #, labels=std_labels)
    # Check shape of results.post
    expected_length = n_orbit_draws1 + n_orbit_draws2
    assert results_obj.post.shape == (expected_length, 8)
    assert results_obj.lnlike.shape == (expected_length, )
    assert results_obj.tau_ref_epoch == 58849
    assert results_obj.labels == std_labels

    return results_obj
Ejemplo n.º 12
0
def test_save_and_load_results(results_to_test,
                               format='hdf5',
                               has_lnlike=True):
    """
    Tests saving and reloading of a results object
        has_lnlike: allows for tests with and without lnlike values
            (e.g. OFTI doesn't output lnlike)
    """
    results_to_save = results_to_test
    if not has_lnlike:  # manipulate object to remove lnlike (as in OFTI)
        results_to_save.lnlike = None
    file_ext_dict = {
        'hdf5': '.h5',
        'fits': '.fits',
    }
    save_filename = 'test_results' + file_ext_dict[format]
    # Save to file
    results_to_save.save_results(save_filename, format=format)
    # Create new blank results object and load from file
    loaded_results = results.Results()
    loaded_results.load_results(save_filename, format=format, append=False)
    # Check if loaded results equal saved results
    assert results_to_save.sampler_name == loaded_results.sampler_name
    assert np.array_equal(results_to_save.post, loaded_results.post)
    if has_lnlike:
        assert np.array_equal(results_to_save.lnlike, loaded_results.lnlike)
    # Try to load the saved results again, this time appending
    loaded_results.load_results(save_filename, format=format, append=True)
    # Now check that the loaded results object has the expected size
    original_length = results_to_save.post.shape[0]
    expected_length = original_length * 2
    assert loaded_results.post.shape == (expected_length, 8)
    if has_lnlike:
        assert loaded_results.lnlike.shape == (expected_length, )
    # Clean up: Remove save file
    os.remove(save_filename)
Ejemplo n.º 13
0
def results_to_test():

    input_file = os.path.join(orbitize.DATADIR, 'GJ504.csv')
    data = orbitize.read_input.read_file(input_file)

    results_obj = results.Results(sampler_name='testing',
                                  tau_ref_epoch=50000,
                                  labels=std_labels,
                                  num_secondary_bodies=1,
                                  data=data)
    # Simulate some sample draws, assign random likelihoods
    n_orbit_draws1 = 1000
    sim_post = simulate_orbit_sampling(n_orbit_draws1)
    sim_lnlike = np.random.uniform(size=n_orbit_draws1)
    # Test adding samples
    results_obj.add_samples(sim_post, sim_lnlike, labels=std_labels)
    # Simulate some more sample draws
    n_orbit_draws2 = 2000
    sim_post = simulate_orbit_sampling(n_orbit_draws2)
    sim_lnlike = np.random.uniform(size=n_orbit_draws2)
    # Test adding more samples
    results_obj.add_samples(sim_post, sim_lnlike, labels=std_labels)
    # Return object for testing
    return results_obj
Ejemplo n.º 14
0
    print(data_table)
    print(filename)
    #
    # plt.figure()
    # from matplotlib import patches
    # ax = plt.gca()
    # e1 = patches.Arc((0,0),1,2,0,theta1=0,theta2=45)
    # ax.add_patch(e1)
    # plt.show()

    hdf5_filename = os.path.join(
        astrometry_DATADIR, "figures", "HR_8799_" + planet,
        'posterior_{0}_{1}_{2}.hdf5'.format(rv_str, planet, suffix))
    print(hdf5_filename)
    from orbitize import results
    loaded_results = results.Results(
    )  # Create blank results object for loading
    loaded_results.load_results(hdf5_filename)

    with pyfits.open(
            os.path.join(
                astrometry_DATADIR, "figures", "HR_8799_" + planet,
                'chain_{0}_{1}_{2}.hdf5'.format(rv_str, planet,
                                                suffix))) as hdulist:
        chains = hdulist[0].data[0, :, 1000::, :]

    print(chains.shape)
    chains = np.reshape(chains,
                        (chains.shape[0] * chains.shape[1], chains.shape[2]))
    print(chains.shape)
    chains_a = chains[:, 0]
    where_lt90 = np.where(chains_a < 100)
Ejemplo n.º 15
0
def test_mcmc_runs(num_temps=0, num_threads=1):
    """
    Tests the MCMC sampler by making sure it even runs
    Args:
        num_temps: Number of temperatures to use
            Uses Parallel Tempering MCMC (ptemcee) if > 1,
            otherwises, uses Affine-Invariant Ensemble Sampler (emcee)
        num_threads: number of threads to run
    """

    # use the test_csv dir
    input_file = os.path.join(orbitize.DATADIR, 'GJ504.csv')
    data_table = read_input.read_file(input_file)
    # Manually set 'object' column of data table
    data_table['object'] = 1

    # construct Driver
    n_walkers = 100
    myDriver = Driver(input_file,
                      'MCMC',
                      1,
                      1,
                      0.01,
                      mcmc_kwargs={
                          'num_temps': num_temps,
                          'num_threads': num_threads,
                          'num_walkers': n_walkers
                      })

    # run it a little (tests 0 burn-in steps)
    myDriver.sampler.run_sampler(100)
    assert myDriver.sampler.results.post.shape[0] == 100

    # run it a little more
    myDriver.sampler.run_sampler(1000, burn_steps=1)
    assert myDriver.sampler.results.post.shape[0] == 1100

    # run it a little more (tests adding to results object, and periodic saving)
    output_filename = os.path.join(orbitize.DATADIR, 'test_mcmc.hdf5')
    myDriver.sampler.run_sampler(400,
                                 burn_steps=1,
                                 output_filename=output_filename,
                                 periodic_save_freq=2)

    # test results object exists and has 2100*100 steps
    assert os.path.exists(output_filename)
    saved_results = results.Results()
    saved_results.load_results(output_filename)
    assert saved_results.post.shape[0] == 1500
    assert saved_results.curr_pos is not None  # current positions should be saved
    assert np.all(saved_results.curr_pos == myDriver.sampler.curr_pos)
    # also check it is consistent with the internal results object in myDriver
    assert myDriver.sampler.results.post.shape[0] == 1500

    # run it a little more testing that everything gets saved even if prediodic_save_freq is not a multiple of the number of steps
    output_filename_2 = os.path.join(orbitize.DATADIR, 'test_mcmc_v1.hdf5')
    myDriver.sampler.run_sampler(500,
                                 burn_steps=1,
                                 output_filename=output_filename_2,
                                 periodic_save_freq=3)
    assert myDriver.sampler.results.post.shape[0] == 2000

    # test that lnlikes being saved are correct
    returned_lnlike_test = myDriver.sampler.results.lnlike[0]
    computed_lnlike_test = myDriver.sampler._logl(
        myDriver.sampler.results.post[0])

    assert returned_lnlike_test == pytest.approx(computed_lnlike_test,
                                                 abs=0.01)

    # test resuming and restarting from a prevous save
    new_sampler = sampler.MCMC(myDriver.system,
                               num_temps=num_temps,
                               num_walkers=n_walkers,
                               num_threads=num_threads,
                               prev_result_filename=output_filename)
    assert new_sampler.results.post.shape[0] == 1500
    new_sampler.run_sampler(500, burn_steps=1)
    assert new_sampler.results.post.shape[0] == 2000
    assert new_sampler.results.post[0, 0] == myDriver.sampler.results.post[0,
                                                                           0]
Ejemplo n.º 16
0
    # exit()

    import matplotlib.pyplot as plt
    filename = "{0}/HR8799{1}_rvs.csv".format(astrometry_DATADIR, planet)
    data_table_withrvs = orbitize.read_input.read_file(filename)
    filename = "{0}/HR8799{1}.csv".format(astrometry_DATADIR, planet)
    data_table_norv = orbitize.read_input.read_file(filename)
    if 0:
        hdf5_filename = os.path.join(
            astrometry_DATADIR, "figures", "HR_8799_" + planet,
            'posterior_{0}_{1}_{2}.hdf5'.format("withrvs", planet,
                                                suffix_withrvs))
        print(hdf5_filename)
        # print("/data/osiris_data/astrometry/figures/HR_8799_bc/posterior_withrvs_bc_test_joint_16_512_1000_2_True_coplanar.hdf5")
        # exit()
        loaded_results_withrvs = results.Results(
        )  # Create blank results object for loading
        loaded_results_withrvs.load_results(hdf5_filename)
        param_list = [
            "sma1", "ecc1", "inc1", "aop1", "pan1", "epp1", "sma2", "ecc2",
            "inc2", "aop2", "pan2", "epp2", "plx", "sysrv", "mtot"
        ]
        corner_plot_fig = loaded_results_withrvs.plot_corner(
            param_list=param_list)
        corner_plot_fig.savefig(
            os.path.join(
                out_pngs, "HR_8799_" + planet,
                "corner_plot_withrvs_{0}_{1}.png".format(
                    planet, suffix_withrvs)))
        plt.show()

    suffix_norv2 = "test_joint_16_512_1000_2_False_coplanar"