Exemple #1
0
def main():
    #log.basicConfig(filename='imagine.log', level=log.DEBUG)

    nside = 2
    freq = 23

    mock_data, mock_cov = mock_errfix(nside, freq)
    mock_mask = mask_map(nside, freq)

    # using masked mock data/covariance
    # apply_mock will ignore masked input since mismatch in keys
    #likelihood = EnsembleLikelihood(mock_data, mock_cov, mock_mask)
    likelihood = SimpleLikelihood(mock_data, mock_cov, mock_mask)

    breg_factory = BregWMAPFactory(active_parameters=('b0', 'psi0', 'psi1',
                                                      'chi0'))
    breg_factory.parameter_ranges = {
        'b0': (0., 10.),
        'psi0': (0., 50.),
        'psi1': (0., 2.),
        'chi0': (0., 50.)
    }
    cre_factory = CREAnaFactory(active_parameters=('alpha', 'r0', 'z0'))
    cre_factory.parameter_ranges = {
        'alpha': (1., 5.),
        'r0': (1., 10.),
        'z0': (0.1, 5.)
    }
    fereg_factory = FEregYMW16Factory()
    factory_list = [breg_factory, cre_factory, fereg_factory]

    prior = FlatPrior()

    xmlpath = './params_masked_regular.xml'
    # only for triggering simulator
    # since we use masked mock_data/covariance
    # if use masked input, outputs from simulator will not be masked due to mismatch in keys
    x = np.zeros((1, 12 * nside**2))
    trigger = Measurements()
    trigger.append(('sync', str(freq), str(nside), 'Q'), x)
    trigger.append(('sync', str(freq), str(nside), 'U'), x)
    simer = Hammurabi(measurements=trigger, xml_path=xmlpath)

    ensemble_size = 1
    pipe = MultinestPipeline(simer, factory_list, likelihood, prior,
                             ensemble_size)
    pipe.random_type = 'free'
    pipe.sampling_controllers = {
        'resume': False,
        'verbose': True,
        'n_live_points': 4000
    }
    results = pipe()

    # saving results
    if mpirank == 0:
        samples = results['samples']
        np.savetxt('posterior_masked_regular.txt', samples)
def testfield():
    """

    :return:

    log.basicConfig(filename='imagine.log', level=log.INFO)
    """
    """
    # step 0, set 'a' and 'b', 'mea_std'

    TestField in LiSimulator is modeled as
        field = gaussian_random(mean=a,std=b)_x * cos(x)
        where x in (0,2pi)

    for generating mock data we need
    true values of a and b: true_a, true_b, mea_seed
    measurement uncertainty: mea_std
    measurement points, positioned in (0,2pi) evenly, due to TestField modelling
    """
    true_a = 3.
    true_b = 6.
    mea_std = 0.1  # std of gaussian measurement error
    mea_seed = 233
    mea_points = 10  # data points in measurements
    truths = [true_a, true_b]  # will be used in visualizing posterior
    """
    # step 1, prepare mock data
    """
    """
    # 1.1, generate measurements
    mea_field = signal_field + noise_field
    """
    x = np.linspace(0, 2. * np.pi, mea_points)
    np.random.seed(mea_seed)  # seed for signal field
    signal_field = np.multiply(
        np.cos(x), np.random.normal(loc=true_a, scale=true_b, size=mea_points))
    mea_field = np.vstack([
        signal_field + np.random.normal(loc=0., scale=mea_std, size=mea_points)
    ])
    """
    # 1.2, generate covariances
    """
    # pre-defined according to measurement error
    mea_cov = (mea_std**2) * np.eye(mea_points)
    """
    # 1.3 assemble in imagine convention
    """

    mock_data = Measurements()  # create empty Measrurements object
    mock_cov = Covariances()  # create empty Covariance object
    # pick up a measurement
    mock_data.append(('test', 'nan', str(mea_points), 'nan'), mea_field, True)
    mock_cov.append(('test', 'nan', str(mea_points), 'nan'), mea_cov, True)
    """
    # 1.4, visualize mock data
    """
    #if mpirank == 0:
    #matplotlib.pyplot.plot(x, mock_data[('test', 'nan', str(mea_points), 'nan')].to_global_data()[0])
    #matplotlib.pyplot.savefig('testfield_mock.pdf')
    """
    # step 2, prepare pipeline and execute analysis
    """
    """
    # 2.1, ensemble likelihood
    """
    likelihood = EnsembleLikelihood(
        mock_data, mock_cov)  # initialize likelihood with measured info
    """
    # 2.2, field factory list
    """
    factory = TestFieldFactory(
        active_parameters=('a', 'b'))  # factory with single active parameter
    factory.parameter_ranges = {
        'a': (0, 10),
        'b': (0, 10)
    }  # adjust parameter range for Bayesian analysis
    factory_list = [factory]  # likelihood requires a list/tuple of factories
    """
    # 2.3, flat prior
    """
    prior = FlatPrior()
    """
    # 2.4, simulator 
    """
    simer = LiSimulator(mock_data)
    """
    # 2.5, pipeline
    """
    ensemble_size = 10
    pipe = MultinestPipeline(simer, factory_list, likelihood, prior,
                             ensemble_size)
    pipe.random_type = 'free'
    pipe.sampling_controllers = {
        'n_iter_before_update': 1,
        'n_live_points': 400,
        'verbose': True,
        'resume': False
    }
    results = pipe()  # run with pymultinest
    """
    # step 3, visualize (with corner package)
    """
    if mpirank == 0:
        samples = results['samples']
        for i in range(len(
                pipe.active_parameters)):  # convert variables into parameters
            low, high = pipe.active_ranges[pipe.active_parameters[i]]
            for j in range(samples.shape[0]):
                samples[j, i] = unity_mapper(samples[j, i], low, high)
        # corner plot
        corner.corner(samples[:, :len(pipe.active_parameters)],
                      range=[0.99] * len(pipe.active_parameters),
                      quantiles=[0.02, 0.5, 0.98],
                      labels=pipe.active_parameters,
                      show_titles=True,
                      title_kwargs={"fontsize": 15},
                      color='steelblue',
                      truths=truths,
                      truth_color='firebrick',
                      plot_contours=True,
                      hist_kwargs={'linewidth': 2},
                      label_kwargs={'fontsize': 15})
        matplotlib.pyplot.savefig('testfield_posterior.pdf')
Exemple #3
0
def testfield(measure_size, simulation_size, make_plots=True, debug=False):
    if debug:
        log.basicConfig(filename='imagine_li_dynesty.log', level=log.DEBUG)
    else:
        log.basicConfig(filename='imagine_li_dynesty.log')
    """

    :return:

    log.basicConfig(filename='imagine.log', level=log.INFO)
    """
    """
    # step 0, set 'a' and 'b', 'mea_std'

    TestField in LiSimulator is modeled as
        field = gaussian_random(mean=a,std=b)_x * cos(x)
        where x in (0,2pi)

    for generating mock data we need
    true values of a and b: true_a, true_b, mea_seed
    measurement uncertainty: mea_std
    measurement points, positioned in (0,2pi) evenly, due to TestField modelling
    """
    true_a = 3.
    true_b = 6.
    mea_std = 0.1  # std of gaussian measurement error
    mea_seed = 233
    truths = [true_a, true_b]  # will be used in visualizing posterior
    """
    # step 1, prepare mock data
    """
    """
    # 1.1, generate measurements
    mea_field = signal_field + noise_field
    """
    x = np.linspace(0, 2. * np.pi, measure_size)  # data points in measurements
    np.random.seed(mea_seed)  # seed for signal field
    signal_field = np.multiply(
        np.cos(x), np.random.normal(loc=true_a,
                                    scale=true_b,
                                    size=measure_size))
    mea_field = np.vstack([
        signal_field +
        np.random.normal(loc=0., scale=mea_std, size=measure_size)
    ])
    """
    # 1.2, generate covariances
    what's the difference between pre-define dan re-estimated?
    """
    # re-estimate according to measurement error
    mea_repeat = np.zeros((simulation_size, measure_size))
    for i in range(simulation_size):  # times of repeated measurements
        mea_repeat[i, :] = signal_field + np.random.normal(
            loc=0., scale=mea_std, size=measure_size)
    mea_cov = oas_mcov(mea_repeat)[1]

    print(mpirank, 're-estimated: \n', mea_cov, 'slogdet',
          mpi_slogdet(mea_cov))

    # pre-defined according to measurement error
    mea_cov = (mea_std**2) * mpi_eye(measure_size)

    print(mpirank, 'pre-defined: \n', mea_cov, 'slogdet', mpi_slogdet(mea_cov))
    """
    # 1.3 assemble in imagine convention
    """

    mock_data = Measurements()  # create empty Measrurements object
    mock_cov = Covariances()  # create empty Covariance object
    # pick up a measurement
    mock_data.append(('test', 'nan', str(measure_size), 'nan'), mea_field,
                     True)
    mock_cov.append(('test', 'nan', str(measure_size), 'nan'), mea_cov, True)
    """
    # 1.4, visualize mock data
    """
    if mpirank == 0 and make_plots:
        plt.plot(x,
                 mock_data[('test', 'nan', str(measure_size), 'nan')].data[0])
        plt.savefig('testfield_mock_li.pdf')
    """
    # step 2, prepare pipeline and execute analysis
    """
    """
    # 2.1, ensemble likelihood
    """
    likelihood = EnsembleLikelihood(
        mock_data, mock_cov)  # initialize likelihood with measured info
    """
    # 2.2, field factory list
    """
    factory = TestFieldFactory(
        active_parameters=('a', 'b'))  # factory with single active parameter
    factory.parameter_ranges = {
        'a': (0, 10),
        'b': (0, 10)
    }  # adjust parameter range for Bayesian analysis
    factory_list = [factory]  # likelihood requires a list/tuple of factories
    """
    # 2.3, flat prior
    """
    prior = FlatPrior()
    """
    # 2.4, simulator
    """
    simer = LiSimulator(mock_data)
    """
    # 2.5, pipeline
    """
    pipe = DynestyPipeline(simer, factory_list, likelihood, prior,
                           simulation_size)
    pipe.random_type = 'controllable'  # 'fixed' random_type doesnt work for Dynesty pipeline, yet
    pipe.seed_tracer = int(23)
    pipe.sampling_controllers = {'nlive': 400}

    tmr = Timer()
    tmr.tick('test')
    results = pipe()
    tmr.tock('test')
    if mpirank == 0:
        print('\n elapse time ' + str(tmr.record['test']) + '\n')
    """
    # step 3, visualize (with corner package)
    """
    if mpirank == 0 and make_plots:
        samples = results['samples']
        for i in range(len(
                pipe.active_parameters)):  # convert variables into parameters
            low, high = pipe.active_ranges[pipe.active_parameters[i]]
            for j in range(samples.shape[0]):
                samples[j, i] = unity_mapper(samples[j, i], low, high)
        # corner plot
        corner.corner(samples[:, :len(pipe.active_parameters)],
                      range=[0.99] * len(pipe.active_parameters),
                      quantiles=[0.02, 0.5, 0.98],
                      labels=pipe.active_parameters,
                      show_titles=True,
                      title_kwargs={"fontsize": 15},
                      color='steelblue',
                      truths=truths,
                      truth_color='firebrick',
                      plot_contours=True,
                      hist_kwargs={'linewidth': 2},
                      label_kwargs={'fontsize': 20})
        plt.savefig('testfield_posterior_li_dynesty.pdf')
    def test_dynesty(self):
        # mock measures
        arr = np.random.rand(1, 3)
        measuredict = Measurements()
        measuredict.append(('test', 'nan', '3', 'nan'), arr, True)
        # simulator
        simer = BiSimulator(measuredict)
        # mock factory list
        tf = TestFieldFactory(active_parameters=tuple('a'))
        flist = (tf, )
        # mock likelihood
        lh = EnsembleLikelihood(measuredict)
        # mock prior
        pr = FlatPrior()
        # pipeline
        pipe = DynestyPipeline(simer, flist, lh, pr, 5)

        self.assertEqual(pipe.active_parameters, ('test_a', ))
        self.assertEqual(pipe.factory_list, (tf, ))
        self.assertEqual(pipe.simulator, simer)
        self.assertEqual(pipe.likelihood, lh)
        self.assertEqual(pipe.prior, pr)
        self.assertEqual(pipe.ensemble_size, 5)
        self.assertEqual(pipe.sampling_controllers, {})
        pipe.sampling_controllers = {'nlive': 1000}
        self.assertEqual(pipe.sampling_controllers, {'nlive': 1000})
        self.assertEqual(pipe.sample_callback, False)
        pipe.sample_callback = True
        self.assertEqual(pipe.sample_callback, True)
        self.assertEqual(pipe.likelihood_rescaler, 1.)
        pipe.likelihood_rescaler = 0.5
        self.assertEqual(pipe.likelihood_rescaler, 0.5)
        self.assertEqual(pipe.check_threshold, False)
        pipe.check_threshold = True
        self.assertEqual(pipe.check_threshold, True)
        self.assertEqual(pipe.likelihood_threshold, 0.)
        pipe.likelihood_threshold = -0.2
        self.assertEqual(pipe.likelihood_threshold, -0.2)
        self.assertEqual(pipe._ensemble_seeds, None)
        self.assertEqual(pipe.seed_tracer, int(0))
        self.assertEqual(pipe.random_type, 'free')

        # test free random seed
        pipe._randomness()
        s1 = pipe._ensemble_seeds
        self.assertTrue(s1 is None)
        # test controllable random seed
        pipe.random_type = 'controllable'
        pipe.seed_tracer = int(3)
        pipe._randomness()
        s1 = pipe._ensemble_seeds
        pipe._randomness()
        s2 = pipe._ensemble_seeds
        pipe = DynestyPipeline(simer, flist, lh, pr, 5)
        pipe.random_type = 'controllable'
        pipe.seed_tracer = int(3)
        pipe._randomness()
        s1re = pipe._ensemble_seeds
        pipe._randomness()
        s2re = pipe._ensemble_seeds
        self.assertListEqual(list(s1), list(s1re))
        self.assertListEqual(list(s2), list(s2re))
        pipe = DynestyPipeline(simer, flist, lh, pr, 5)
        pipe.random_type = 'controllable'
        pipe.seed_tracer = int(4)
        pipe._randomness()
        s1new = pipe._ensemble_seeds
        for i in range(len(s1)):
            self.assertNotEqual(s1[i], s1new[i])
        # test fixed random seed
        pipe.random_type = 'fixed'
        pipe.seed_tracer = int(5)
        pipe._randomness()
        s1 = pipe._ensemble_seeds
        pipe._randomness()
        s1re = pipe._ensemble_seeds
        self.assertListEqual(list(s1), list(s1re))
    def test_multinest(self):
        # mock measures
        arr = np.random.rand(1, 3)
        measuredict = Measurements()
        measuredict.append(('test', 'nan', '3', 'nan'), arr, True)
        # simulator
        simer = LiSimulator(measuredict)
        # mock factory list
        tf = TestFieldFactory(active_parameters=tuple('a'))
        flist = (tf, )
        # mock likelihood
        lh = EnsembleLikelihood(measuredict)
        # mock prior
        pr = FlatPrior()
        # pipeline
        pipe = MultinestPipeline(simer, flist, lh, pr, 5)

        self.assertEqual(pipe.active_parameters, ('test_a', ))
        self.assertEqual(pipe.factory_list, (tf, ))
        self.assertEqual(pipe.simulator, simer)
        self.assertEqual(pipe.likelihood, lh)
        self.assertEqual(pipe.prior, pr)
        self.assertEqual(pipe.ensemble_size, 5)
        self.assertEqual(pipe.sampling_controllers, {})
        pipe.sampling_controllers = {'verbose': False}
        self.assertEqual(pipe.sampling_controllers, {'verbose': False})
        self.assertEqual(pipe.sample_callback, False)
        pipe.sample_callback = True
        self.assertEqual(pipe.sample_callback, True)
        self.assertEqual(pipe.likelihood_rescaler, 1.)
        pipe.likelihood_rescaler = 0.5
        self.assertEqual(pipe.likelihood_rescaler, 0.5)
        self.assertEqual(pipe.check_threshold, False)
        pipe.check_threshold = True
        self.assertEqual(pipe.check_threshold, True)
        self.assertEqual(pipe.likelihood_threshold, 0.)
        pipe.likelihood_threshold = -0.2
        self.assertEqual(pipe.likelihood_threshold, -0.2)
        self.assertEqual(pipe._ensemble_seeds, None)
        self.assertEqual(pipe.seed_tracer, int(0))
        self.assertEqual(pipe.random_type, 'free')

        # test free random seed, full randomness
        pipe._randomness()
        s1 = pipe._ensemble_seeds
        self.assertTrue(s1 is None)
        # test controllable random seed, with top level seed controllable
        pipe.random_type = 'controllable'
        pipe.seed_tracer = int(3)  # controlling seed at top level
        pipe._randomness(
        )  # core func in assigning ensemble seeds, before calling simulator
        s1 = pipe._ensemble_seeds
        pipe._randomness()  # 2nd call of sampeler
        s2 = pipe._ensemble_seeds
        pipe = MultinestPipeline(simer, flist, lh, pr, 5)  # init a new sampler
        pipe.random_type = 'controllable'
        pipe.seed_tracer = int(3)  # repeat the controlling seed
        pipe._randomness()
        s1re = pipe._ensemble_seeds
        pipe._randomness()
        s2re = pipe._ensemble_seeds
        self.assertListEqual(list(s1), list(s1re))  # should get the same seeds
        self.assertListEqual(list(s2), list(s2re))
        pipe = MultinestPipeline(simer, flist, lh, pr, 5)
        pipe.random_type = 'controllable'
        pipe.seed_tracer = int(4)  # different controlling seed
        pipe._randomness()
        s1new = pipe._ensemble_seeds
        for i in range(len(s1)):
            self.assertNotEqual(s1[i], s1new[i])  # should get different seeds
        # test fixed random seed
        pipe.random_type = 'fixed'
        pipe.seed_tracer = int(5)
        pipe._randomness()  # 1st time seed assignment
        s1 = pipe._ensemble_seeds
        pipe._randomness()  # 2nd time seed assignment
        s1re = pipe._ensemble_seeds
        self.assertListEqual(list(s1), list(s1re))  # should get the same seeds
Exemple #6
0
def wmap_errprop():
    #log.basicConfig(filename='imagine.log', level=log.DEBUG)
    """
    only WMAP regular magnetic field model in test, @ 23GHz
    Faraday rotation provided by YMW16 free electron model
    full WMAP parameter set {b0, psi0, psi1, chi0}
    """
    # hammurabi parameter base file
    xmlpath = './params_fullsky_regular.xml'

    # we take three active parameters
    true_b0 = 6.0
    true_psi0 = 27.0
    true_psi1 = 0.9
    true_chi0 = 25.
    true_alpha = 3.0
    true_r0 = 5.0
    true_z0 = 1.0

    mea_nside = 2  # observable Nside
    mea_pix = 12 * mea_nside**2  # observable pixel number
    """
    # step 1, prepare mock data
    """
    x = np.zeros((1, mea_pix))  # only for triggering simulator
    trigger = Measurements()
    trigger.append(('sync', '23', str(mea_nside), 'I'), x)  # only I map
    # initialize simulator
    mocksize = 10  # ensemble of mock data (per node)
    error = 0.1  # theoretical raltive uncertainty for each (active) parameter
    mocker = Hammurabi(measurements=trigger, xml_path=xmlpath)
    # prepare theoretical uncertainty
    b0_var = np.random.normal(true_b0, error * true_b0, mocksize)
    psi0_var = np.random.normal(true_psi0, error * true_psi0, mocksize)
    psi1_var = np.random.normal(true_psi1, error * true_psi1, mocksize)
    chi0_var = np.random.normal(true_chi0, error * true_chi0, mocksize)
    alpha_var = np.random.normal(true_alpha, error * true_alpha, mocksize)
    r0_var = np.random.normal(true_r0, error * true_r0, mocksize)
    z0_var = np.random.normal(true_z0, error * true_z0, mocksize)
    mock_ensemble = Simulations()
    # start simulation
    for i in range(mocksize):  # get one realization each time
        # BregWMAP field
        paramlist = {
            'b0': b0_var[i],
            'psi0': psi0_var[i],
            'psi1': psi1_var[i],
            'chi0': chi0_var[i]
        }  # inactive parameters at default
        breg_wmap = BregWMAP(paramlist, 1)
        # CREAna field
        paramlist = {
            'alpha': alpha_var[i],
            'beta': 0.0,
            'theta': 0.0,
            'r0': r0_var[i],
            'z0': z0_var[i],
            'E0': 20.6,
            'j0': 0.0217
        }  # inactive parameters at default
        cre_ana = CREAna(paramlist, 1)
        # FEregYMW16 field
        fereg_ymw16 = FEregYMW16(dict(), 1)
        # collect mock data and covariance
        outputs = mocker([breg_wmap, cre_ana, fereg_ymw16])
        mock_ensemble.append(('sync', '23', str(mea_nside), 'I'),
                             outputs[('sync', '23', str(mea_nside), 'I')])
    # collect mean and cov from simulated results
    mock_data = Measurements()
    mock_cov = Covariances()
    mean, cov = oas_mcov(mock_ensemble[('sync', '23', str(mea_nside), 'I')])
    mock_data.append(('sync', '23', str(mea_nside), 'I'), mean)
    mock_cov.append(('sync', '23', str(mea_nside), 'I'), cov)
    """
    # step 2, prepare pipeline and execute analysis
    """
    #likelihood = EnsembleLikelihood(mock_data, mock_cov)
    likelihood = SimpleLikelihood(mock_data, mock_cov)

    breg_factory = BregWMAPFactory(active_parameters=('b0', 'psi0', 'psi1',
                                                      'chi0'))
    breg_factory.parameter_ranges = {
        'b0': (0., 10.),
        'psi0': (0., 50.),
        'psi1': (0., 2.),
        'chi0': (0., 50.)
    }
    cre_factory = CREAnaFactory(active_parameters=('alpha', 'r0', 'z0'))
    cre_factory.parameter_ranges = {
        'alpha': (1., 5.),
        'r0': (1., 10.),
        'z0': (0.1, 5.)
    }
    fereg_factory = FEregYMW16Factory()
    factory_list = [breg_factory, cre_factory, fereg_factory]

    prior = FlatPrior()

    simer = Hammurabi(measurements=mock_data, xml_path=xmlpath)

    ensemble_size = 1
    pipe = MultinestPipeline(simer, factory_list, likelihood, prior,
                             ensemble_size)
    pipe.random_type = 'free'
    pipe.sampling_controllers = {
        'n_live_points': 4000,
        'resume': False,
        'verbose': True
    }
    results = pipe()
    """
    # step 3, visualize (with corner package)
    """
    if mpirank == 0:
        samples = results['samples']
        np.savetxt('posterior_fullsky_regular_errprop.txt', samples)
    """