Exemplo n.º 1
0
 def test_slogdet(self):
     np.random.seed(mpirank)
     arr = np.random.rand(2, 2 * mpisize)
     sign, logdet = mpi_slogdet(arr)
     full_arr = np.vstack(comm.allgather(arr))
     test_sign, test_logdet = np.linalg.slogdet(full_arr)
     self.assertEqual(sign, test_sign)
     self.assertAlmostEqual(logdet, test_logdet)
Exemplo n.º 2
0
 def test_slogdet_odd(self):
     cols = 32
     rows = mpi_arrange(cols)[1] - mpi_arrange(cols)[0]
     arr = np.random.rand(rows, cols)
     sign, logdet = mpi_slogdet(arr)
     full_arr = np.vstack(comm.allgather(arr))
     test_sign, test_logdet = np.linalg.slogdet(full_arr)
     self.assertEqual(sign, test_sign)
     self.assertAlmostEqual(logdet, test_logdet)
Exemplo n.º 3
0
def pslogdet(data):
    """
    :py:func:`imagine.tools.mpi_helper.mpi_slogdet` or :py:func:`numpy.linalg.slogdet`
    depending on :py:data:`imagine.rc['distributed_arrays']`.
    """
    if rc['distributed_arrays']:
        return m.mpi_slogdet(data)
    else:
        return np.linalg.slogdet(data)
Exemplo n.º 4
0
def mpi_slogdet_timing(data_size):
    local_row_size = mpi_arrange(data_size)[1] - mpi_arrange(data_size)[0]
    random_data = np.random.rand(local_row_size, data_size)
    tmr = Timer()
    tmr.tick('mpi_slogdet')
    sign, logdet = mpi_slogdet(random_data)
    tmr.tock('mpi_slogdet')
    if not mpirank:
        print('@ tools_profiles::mpi_slogdet_timing with ' + str(mpisize) +
              ' nodes')
        print('global matrix size (' + str(data_size) + ',' + str(data_size) +
              ')')
        print('elapse time ' + str(tmr.record['mpi_slogdet']) + '\n')
Exemplo n.º 5
0
    def __call__(self, observable_dict):
        """
        SimpleLikelihood object call function

        Parameters
        ----------
        observable_dict : imagine.observables.observable_dict.Simulations
            Simulations object

        Returns
        ------
        likelicache : float
            log-likelihood value (copied to all nodes)
        """
        log.debug('@ simple_likelihood::__call__')
        assert isinstance(observable_dict, Simulations)
        # check dict entries
        assert (observable_dict.keys() == self._measurement_dict.keys())
        likelicache = np.float64(0)
        if self._covariance_dict is None:  # no covariance matrix
            for name in self._measurement_dict.keys():
                obs_mean = deepcopy(observable_dict[name].ensemble_mean
                                    )  # use mpi_mean, copied to all nodes
                data = deepcopy(
                    self._measurement_dict[name].data)  # to distributed data
                diff = np.nan_to_num(data - obs_mean)
                likelicache += -float(0.5) * float(np.vdot(
                    diff, diff))  # copied to all nodes
        else:  # with covariance matrix
            for name in self._measurement_dict.keys():
                obs_mean = deepcopy(observable_dict[name].ensemble_mean
                                    )  # use mpi_mean, copied to all nodes
                data = deepcopy(
                    self._measurement_dict[name].data)  # to distributed data
                diff = np.nan_to_num(data - obs_mean)
                if name in self._covariance_dict.keys(
                ):  # not all measreuments have cov
                    cov = deepcopy(self._covariance_dict[name].data
                                   )  # to distributed data
                    (sign, logdet) = mpi_slogdet(cov * 2. * np.pi)
                    likelicache += -0.5 * (
                        np.vdot(diff, mpi_lu_solve(cov, diff)) + sign * logdet)
                else:
                    likelicache += -0.5 * np.vdot(diff, diff)
        return likelicache
Exemplo n.º 6
0
def testfield(measure_size, simulation_size, make_plots=True, debug=False):
    if debug:
        log.basicConfig(filename='imagine_li_dynesty.log', level=log.DEBUG)
    else:
        log.basicConfig(filename='imagine_li_dynesty.log')
    """

    :return:

    log.basicConfig(filename='imagine.log', level=log.INFO)
    """
    """
    # step 0, set 'a' and 'b', 'mea_std'

    TestField in LiSimulator is modeled as
        field = gaussian_random(mean=a,std=b)_x * cos(x)
        where x in (0,2pi)

    for generating mock data we need
    true values of a and b: true_a, true_b, mea_seed
    measurement uncertainty: mea_std
    measurement points, positioned in (0,2pi) evenly, due to TestField modelling
    """
    true_a = 3.
    true_b = 6.
    mea_std = 0.1  # std of gaussian measurement error
    mea_seed = 233
    truths = [true_a, true_b]  # will be used in visualizing posterior
    """
    # step 1, prepare mock data
    """
    """
    # 1.1, generate measurements
    mea_field = signal_field + noise_field
    """
    x = np.linspace(0, 2. * np.pi, measure_size)  # data points in measurements
    np.random.seed(mea_seed)  # seed for signal field
    signal_field = np.multiply(
        np.cos(x), np.random.normal(loc=true_a,
                                    scale=true_b,
                                    size=measure_size))
    mea_field = np.vstack([
        signal_field +
        np.random.normal(loc=0., scale=mea_std, size=measure_size)
    ])
    """
    # 1.2, generate covariances
    what's the difference between pre-define dan re-estimated?
    """
    # re-estimate according to measurement error
    mea_repeat = np.zeros((simulation_size, measure_size))
    for i in range(simulation_size):  # times of repeated measurements
        mea_repeat[i, :] = signal_field + np.random.normal(
            loc=0., scale=mea_std, size=measure_size)
    mea_cov = oas_mcov(mea_repeat)[1]

    print(mpirank, 're-estimated: \n', mea_cov, 'slogdet',
          mpi_slogdet(mea_cov))

    # pre-defined according to measurement error
    mea_cov = (mea_std**2) * mpi_eye(measure_size)

    print(mpirank, 'pre-defined: \n', mea_cov, 'slogdet', mpi_slogdet(mea_cov))
    """
    # 1.3 assemble in imagine convention
    """

    mock_data = Measurements()  # create empty Measrurements object
    mock_cov = Covariances()  # create empty Covariance object
    # pick up a measurement
    mock_data.append(('test', 'nan', str(measure_size), 'nan'), mea_field,
                     True)
    mock_cov.append(('test', 'nan', str(measure_size), 'nan'), mea_cov, True)
    """
    # 1.4, visualize mock data
    """
    if mpirank == 0 and make_plots:
        plt.plot(x,
                 mock_data[('test', 'nan', str(measure_size), 'nan')].data[0])
        plt.savefig('testfield_mock_li.pdf')
    """
    # step 2, prepare pipeline and execute analysis
    """
    """
    # 2.1, ensemble likelihood
    """
    likelihood = EnsembleLikelihood(
        mock_data, mock_cov)  # initialize likelihood with measured info
    """
    # 2.2, field factory list
    """
    factory = TestFieldFactory(
        active_parameters=('a', 'b'))  # factory with single active parameter
    factory.parameter_ranges = {
        'a': (0, 10),
        'b': (0, 10)
    }  # adjust parameter range for Bayesian analysis
    factory_list = [factory]  # likelihood requires a list/tuple of factories
    """
    # 2.3, flat prior
    """
    prior = FlatPrior()
    """
    # 2.4, simulator
    """
    simer = LiSimulator(mock_data)
    """
    # 2.5, pipeline
    """
    pipe = DynestyPipeline(simer, factory_list, likelihood, prior,
                           simulation_size)
    pipe.random_type = 'controllable'  # 'fixed' random_type doesnt work for Dynesty pipeline, yet
    pipe.seed_tracer = int(23)
    pipe.sampling_controllers = {'nlive': 400}

    tmr = Timer()
    tmr.tick('test')
    results = pipe()
    tmr.tock('test')
    if mpirank == 0:
        print('\n elapse time ' + str(tmr.record['test']) + '\n')
    """
    # step 3, visualize (with corner package)
    """
    if mpirank == 0 and make_plots:
        samples = results['samples']
        for i in range(len(
                pipe.active_parameters)):  # convert variables into parameters
            low, high = pipe.active_ranges[pipe.active_parameters[i]]
            for j in range(samples.shape[0]):
                samples[j, i] = unity_mapper(samples[j, i], low, high)
        # corner plot
        corner.corner(samples[:, :len(pipe.active_parameters)],
                      range=[0.99] * len(pipe.active_parameters),
                      quantiles=[0.02, 0.5, 0.98],
                      labels=pipe.active_parameters,
                      show_titles=True,
                      title_kwargs={"fontsize": 15},
                      color='steelblue',
                      truths=truths,
                      truth_color='firebrick',
                      plot_contours=True,
                      hist_kwargs={'linewidth': 2},
                      label_kwargs={'fontsize': 20})
        plt.savefig('testfield_posterior_li_dynesty.pdf')
Exemplo n.º 7
0
    def __call__(self, observable_dict):
        """
        EnsembleLikelihood class call function

        Parameters
        ----------
        observable_dict : imagine.observables.observable_dict.Simulations
            Simulations object

        Returns
        ------
        likelicache : float
            log-likelihood value (copied to all nodes)
        """
        log.debug('@ ensemble_likelihood::__call__')
        assert isinstance(observable_dict, Simulations)
        # check dict entries
        assert (observable_dict.keys() == self._measurement_dict.keys())
        likelicache = float(0)
        if self._covariance_dict is None:
            for name in self._measurement_dict.keys():
                obs_mean, obs_cov = oas_mcov(
                    observable_dict[name].data)  # to distributed data
                data = deepcopy(
                    self._measurement_dict[name].data)  # to distributed data
                diff = np.nan_to_num(data - obs_mean)
                if (mpi_trace(obs_cov) <
                        1E-28):  # zero will not be reached, at most E-32
                    likelicache += -0.5 * np.vdot(diff, diff)
                else:
                    sign, logdet = mpi_slogdet(obs_cov * 2. * np.pi)
                    likelicache += -0.5 * (np.vdot(
                        diff, mpi_lu_solve(obs_cov, diff)) + sign * logdet)
        else:
            for name in self._measurement_dict.keys():
                obs_mean, obs_cov = oas_mcov(
                    observable_dict[name].data)  # to distributed data
                data = deepcopy(
                    self._measurement_dict[name].data)  # to distributed data
                diff = np.nan_to_num(data - obs_mean)
                if name in self._covariance_dict.keys(
                ):  # not all measurements have cov
                    full_cov = deepcopy(
                        self._covariance_dict[name].data) + obs_cov
                    if (mpi_trace(full_cov) <
                            1E-28):  # zero will not be reached, at most E-32
                        likelicache += -0.5 * np.vdot(diff, diff)
                    else:
                        sign, logdet = mpi_slogdet(full_cov * 2. * np.pi)
                        likelicache += -0.5 * (
                            np.vdot(diff, mpi_lu_solve(full_cov, diff)) +
                            sign * logdet)
                else:
                    if (mpi_trace(obs_cov) <
                            1E-28):  # zero will not be reached, at most E-32
                        likelicache += -0.5 * np.vdot(diff, diff)
                    else:
                        sign, logdet = mpi_slogdet(obs_cov * 2. * np.pi)
                        likelicache += -0.5 * (np.vdot(
                            diff, mpi_lu_solve(obs_cov, diff)) + sign * logdet)
        return likelicache