コード例 #1
0
def _extract(trace):
    divergent_data = convert_to_dataset(trace, group="sample_stats")
    _, diverging_mask = xarray_to_ndarray(divergent_data,
                                          var_names=("diverging", ),
                                          combined=True)
    diverging_mask = np.squeeze(diverging_mask)

    posterior_data = convert_to_dataset(trace, group="posterior")
    var_names = _var_names(["beta", "gamma", "tau_b", "tau_g"], posterior_data)
    var_names, _posterior = xarray_to_ndarray(get_coords(posterior_data, {}),
                                              var_names=var_names,
                                              combined=True)
    return diverging_mask, var_names, _posterior
コード例 #2
0
ファイル: fit_model.py プロジェクト: volkale/adv
def plot_model_comparison_CIs(model_res_dict):
    var_names = [
        'remr_lnVR', 'rema_lnVR', 'fema_lnVR', 'rema_lnCVR', 'fema_lnCVR'
    ]
    data = [
        az.convert_to_dataset(
            {model: np.exp(model_res_dict[model].posterior.mu.values)})
        for model in var_names
    ]
    _ = az.plot_forest(data,
                       combined=True,
                       hdi_prob=0.95,
                       quartiles=True,
                       colors='black',
                       figsize=(10, 4),
                       var_names=var_names,
                       model_names=len(var_names) * [''])
    plt.xlim(0.78, 1.23)
    plt.title('95% HDI for meta-analytic direct effect $e^\\mu$')
    plt.grid()
    plt.savefig(os.path.join(parent_dir_name,
                             f'output/hdi_model_comparison.tiff'),
                format='tiff',
                dpi=500,
                bbox_inches="tight")
コード例 #3
0
ファイル: test_data.py プロジェクト: pawanakhil/arviz
def test_inference_data_other_groups():
    datadict = {"a": np.random.randn(100), "b": np.random.randn(1, 100, 10)}
    dataset = convert_to_dataset(datadict, coords={"c": np.arange(10)}, dims={"b": ["c"]})
    with pytest.warns(UserWarning, match="not.+in.+InferenceData scheme"):
        idata = InferenceData(other_group=dataset)
    fails = check_multiple_attrs({"other_group": ["a", "b"]}, idata)
    assert not fails
コード例 #4
0
    def test_missing_coords(self, data):
        dataset = convert_to_dataset(data.datadict, coords=None, dims=data.dims)
        assert set(dataset.data_vars) == {"a", "b", "c"}
        assert set(dataset.coords) == {"chain", "draw", "c1", "c2", "b1"}

        assert set(dataset.a.coords) == {"chain", "draw"}
        assert set(dataset.b.coords) == {"chain", "draw", "b1"}
        assert set(dataset.c.coords) == {"chain", "draw", "c1", "c2"}
コード例 #5
0
def test_dict_to_dataset():
    datadict = {"a": np.random.randn(100), "b": np.random.randn(1, 100, 10)}
    dataset = convert_to_dataset(datadict, coords={"c": np.arange(10)}, dims={"b": ["c"]})
    assert set(dataset.data_vars) == {"a", "b"}
    assert set(dataset.coords) == {"chain", "draw", "c"}

    assert set(dataset.a.coords) == {"chain", "draw"}
    assert set(dataset.b.coords) == {"chain", "draw", "c"}
コード例 #6
0
ファイル: test_data.py プロジェクト: gentlerainsky/arviz
    def test_use_all(self):
        dataset = convert_to_dataset(self.datadict, coords=self.coords, dims=self.dims)
        assert set(dataset.data_vars) == {"a", "b", "c"}
        assert set(dataset.coords) == {"chain", "draw", "c1", "c2", "b1"}

        assert set(dataset.a.coords) == {"chain", "draw"}
        assert set(dataset.b.coords) == {"chain", "draw", "b1"}
        assert set(dataset.c.coords) == {"chain", "draw", "c1", "c2"}
コード例 #7
0
ファイル: test_data.py プロジェクト: StanczakDominik/arviz
    def test_1d_dataset(self):
        size = 100
        dataset = convert_to_dataset(np.random.randn(size))
        assert len(dataset.data_vars) == 1

        assert set(dataset.coords) == {"chain", "draw"}
        assert dataset.chain.shape == (1, )
        assert dataset.draw.shape == (size, )
コード例 #8
0
    def test_missing_dims(self, data):
        # missing dims
        coords = {"c_dim_0": np.arange(3), "c_dim_1": np.arange(4), "b_dim_0": np.arange(10)}
        dataset = convert_to_dataset(data.datadict, coords=coords, dims=None)
        assert set(dataset.data_vars) == {"a", "b", "c"}
        assert set(dataset.coords) == {"chain", "draw", "c_dim_0", "c_dim_1", "b_dim_0"}

        assert set(dataset.a.coords) == {"chain", "draw"}
        assert set(dataset.b.coords) == {"chain", "draw", "b_dim_0"}
        assert set(dataset.c.coords) == {"chain", "draw", "c_dim_0", "c_dim_1"}
コード例 #9
0
    def test_skip_dim_0(self, data):
        dims = {"c": [None, "c2"]}
        coords = {"c_dim_0": np.arange(3), "c2": np.arange(4), "b_dim_0": np.arange(10)}
        dataset = convert_to_dataset(data.datadict, coords=coords, dims=dims)
        assert set(dataset.data_vars) == {"a", "b", "c"}
        assert set(dataset.coords) == {"chain", "draw", "c_dim_0", "c2", "b_dim_0"}

        assert set(dataset.a.coords) == {"chain", "draw"}
        assert set(dataset.b.coords) == {"chain", "draw", "b_dim_0"}
        assert set(dataset.c.coords) == {"chain", "draw", "c_dim_0", "c2"}
コード例 #10
0
ファイル: test_data.py プロジェクト: StanczakDominik/arviz
    def test_nd_to_dataset(self):
        shape = (1, 2, 3, 4, 5)
        dataset = convert_to_dataset(np.random.randn(*shape))
        assert len(dataset.data_vars) == 1
        var_name = list(dataset.data_vars)[0]

        assert len(dataset.coords) == len(shape)
        assert dataset.chain.shape == shape[:1]
        assert dataset.draw.shape == shape[1:2]
        assert dataset[var_name].shape == shape
コード例 #11
0
ファイル: test_data.py プロジェクト: StanczakDominik/arviz
 def test_1d_dataset(self):
     size = 100
     dataset = convert_to_dataset(
         xr.DataArray(np.random.randn(1, size),
                      name="plot",
                      dims=("chain", "draw")))
     assert len(dataset.data_vars) == 1
     assert "plot" in dataset.data_vars
     assert dataset.chain.shape == (1, )
     assert dataset.draw.shape == (size, )
コード例 #12
0
def test_dict_to_dataset():
    datadict = {'a': np.random.randn(100), 'b': np.random.randn(1, 100, 10)}
    dataset = convert_to_dataset(datadict,
                                 coords={'c': np.arange(10)},
                                 dims={'b': ['c']})
    assert set(dataset.data_vars) == {'a', 'b'}
    assert set(dataset.coords) == {'chain', 'draw', 'c'}

    assert set(dataset.a.coords) == {'chain', 'draw'}
    assert set(dataset.b.coords) == {'chain', 'draw', 'c'}
コード例 #13
0
    def test_missing_coords(self):
        dataset = convert_to_dataset(self.datadict,
                                     coords=None,
                                     dims=self.dims)
        assert set(dataset.data_vars) == {'a', 'b', 'c'}
        assert set(dataset.coords) == {'chain', 'draw', 'c1', 'c2', 'b1'}

        assert set(dataset.a.coords) == {'chain', 'draw'}
        assert set(dataset.b.coords) == {'chain', 'draw', 'b1'}
        assert set(dataset.c.coords) == {'chain', 'draw', 'c1', 'c2'}
コード例 #14
0
ファイル: test_data.py プロジェクト: StanczakDominik/arviz
    def test_nd_to_dataset(self):
        shape = (1, 2, 3, 4, 5)
        dataset = convert_to_dataset(
            xr.DataArray(np.random.randn(*shape),
                         dims=("chain", "draw", "dim_0", "dim_1", "dim_2")))
        var_name = list(dataset.data_vars)[0]

        assert len(dataset.data_vars) == 1
        assert dataset.chain.shape == shape[:1]
        assert dataset.draw.shape == shape[1:2]
        assert dataset[var_name].shape == shape
コード例 #15
0
ファイル: test_data.py プロジェクト: gentlerainsky/arviz
 def test_convert_to_dataset(self):
     data = convert_to_dataset(
         self.obj,
         group="posterior",
         coords={"school": np.arange(self.data["J"])},
         dims={"theta": ["school"], "theta_tilde": ["school"]},
     )
     assert data.draw.shape == (self.draws,)
     assert data.chain.shape == (self.chains,)
     assert data.school.shape == (self.data["J"],)
     assert data.theta.shape == (self.chains, self.draws, self.data["J"])
コード例 #16
0
 def test_convert_to_dataset(self, eight_schools_params, draws, chains, data):
     dataset = convert_to_dataset(
         data.obj,
         group="posterior",
         coords={"school": np.arange(eight_schools_params["J"])},
         dims={"theta": ["school"], "eta": ["school"]},
     )
     assert dataset.draw.shape == (draws,)
     assert dataset.chain.shape == (chains,)
     assert dataset.school.shape == (eight_schools_params["J"],)
     assert dataset.theta.shape == (chains, draws, eight_schools_params["J"])
コード例 #17
0
def gelman_rubin(data, var_names=None):
    r"""Compute estimate of R-hat for a set of traces.

    The Gelman-Rubin diagnostic tests for lack of convergence by comparing the variance between
    multiple chains to the variance within each chain. If convergence has been achieved, the
    between-chain and within-chain variances should be identical. To be most effective in
    detecting evidence for nonconvergence, each chain should have been initialized to starting
    values that are dispersed relative to the target distribution.

    Parameters
    ----------
    data : obj
        Any object that can be converted to an az.InferenceData object
        Refer to documentation of az.convert_to_dataset for details
        At least 2 posterior chains are needed to compute this diagnostic of one or more
        stochastic parameters.
    var_names : list
      Names of variables to include in the rhat report

    Returns
    -------
    r_hat : dict of floats (MultiTrace) or float (trace object)
      Returns dictionary of the potential scale reduction factors, :math:`\hat{R}`

    Notes
    -----
    The diagnostic is computed by:

      .. math:: \hat{R} = \frac{\hat{V}}{W}

    where :math:`W` is the within-chain variance and :math:`\hat{V}` is the posterior variance
    estimate for the pooled traces. This is the potential scale reduction factor, which converges
    to unity when each of the traces is a sample from the target posterior. Values greater than one
    indicate that one or more chains have not yet converged.

    References
    ----------
    Brooks and Gelman (1998)
    Gelman and Rubin (1992)
    """
    if isinstance(data, np.ndarray):
        return _get_rhat(data)

    dataset = convert_to_dataset(data, group='posterior')
    if var_names is None:
        var_names = list(dataset.data_vars)
    dataset = dataset[var_names]
    return xr.apply_ufunc(_rhat_ufunc,
                          dataset,
                          input_core_dims=((
                              'chain',
                              'draw',
                          ), ))
コード例 #18
0
 def test_convert_to_dataset(self):
     data = convert_to_dataset(
         self.obj,
         group='posterior',
         coords={'school': np.arange(self.data['J'])},
         dims={
             'theta': ['school'],
             'theta_tilde': ['school']
         },
     )
     assert data.draw.shape == (self.draws, )
     assert data.chain.shape == (self.chains, )
     assert data.school.shape == (self.data['J'], )
     assert data.theta.shape == (self.chains, self.draws, self.data['J'])
コード例 #19
0
ファイル: test_data.py プロジェクト: StanczakDominik/arviz
 def test_dataset_conversion_idempotent(self):
     inference_data = load_arviz_data("centered_eight")
     data_set = convert_to_dataset(inference_data.posterior)
     assert isinstance(data_set, xr.Dataset)
     assert set(data_set.coords["school"].values) == {
         "Hotchkiss",
         "Mt. Hermon",
         "Choate",
         "Deerfield",
         "Phillips Andover",
         "St. Paul's",
         "Lawrenceville",
         "Phillips Exeter",
     }
     assert data_set["theta"].dims == ("chain", "draw", "school")
コード例 #20
0
    def test_skip_dim_0(self):
        dims = {'c': [None, 'c2']}
        coords = {
            'c_dim_0': np.arange(3),
            'c2': np.arange(4),
            'b_dim_0': np.arange(10)
        }
        dataset = convert_to_dataset(self.datadict, coords=coords, dims=dims)
        assert set(dataset.data_vars) == {'a', 'b', 'c'}
        assert set(
            dataset.coords) == {'chain', 'draw', 'c_dim_0', 'c2', 'b_dim_0'}

        assert set(dataset.a.coords) == {'chain', 'draw'}
        assert set(dataset.b.coords) == {'chain', 'draw', 'b_dim_0'}
        assert set(dataset.c.coords) == {'chain', 'draw', 'c_dim_0', 'c2'}
コード例 #21
0
def effective_n(data, *, var_names=None):
    r"""Calculate estimate of the effective sample size.

    Parameters
    ----------
    data : obj
        Any object that can be converted to an az.InferenceData object
        Refer to documentation of az.convert_to_dataset for details
        At least 2 posterior chains are needed to compute this diagnostic of one or more
        stochastic parameters.
    var_names : list
      Names of variables to include in the effective_n report

    Returns
    -------
    n_eff : dictionary of floats (MultiTrace) or float (trace object)
        Return the effective sample size, :math:`\hat{n}_{eff}`

    Notes
    -----
    The diagnostic is computed by:

    .. math:: \hat{n}_{eff} = \frac{mn}{1 + 2 \sum_{t=1}^T \hat{\rho}_t}

    where :math:`\hat{\rho}_t` is the estimated _autocorrelation at lag t, and T
    is the first odd positive integer for which the sum
    :math:`\hat{\rho}_{T+1} + \hat{\rho}_{T+1}` is negative.

    The current implementation is similar to Stan, which uses Geyer's initial monotone sequence
    criterion (Geyer, 1992; Geyer, 2011).

    References
    ----------
    Gelman et al. BDA (2014)
    """
    if isinstance(data, np.ndarray):
        return _get_neff(data)

    dataset = convert_to_dataset(data, group='posterior')
    if var_names is None:
        var_names = list(dataset.data_vars)
    dataset = dataset[var_names]
    return xr.apply_ufunc(_neff_ufunc,
                          dataset,
                          input_core_dims=((
                              'chain',
                              'draw',
                          ), ))
コード例 #22
0
    def test_missing_dims(self):
        # missing dims
        coords = {
            'c_dim_0': np.arange(3),
            'c_dim_1': np.arange(4),
            'b_dim_0': np.arange(10)
        }
        dataset = convert_to_dataset(self.datadict, coords=coords, dims=None)
        assert set(dataset.data_vars) == {'a', 'b', 'c'}
        assert set(dataset.coords) == {
            'chain', 'draw', 'c_dim_0', 'c_dim_1', 'b_dim_0'
        }

        assert set(dataset.a.coords) == {'chain', 'draw'}
        assert set(dataset.b.coords) == {'chain', 'draw', 'b_dim_0'}
        assert set(dataset.c.coords) == {'chain', 'draw', 'c_dim_0', 'c_dim_1'}
コード例 #23
0
def plot_model_comparison_CIs(model_res_dict):
    fig, ax = plt.subplots(nrows=1)
    datasets = [
        az.convert_to_dataset({
            drug_class:
            np.exp(model_res_dict[drug_class].posterior.mu.values)
        }) for drug_class in DRUG_CLASSES
    ]
    _ = az.plot_forest(datasets,
                       combined=True,
                       credible_interval=0.95,
                       quartiles=True,
                       colors='black',
                       var_names=DRUG_CLASSES,
                       model_names=['', '', '', ''],
                       ax=ax)
    ax.set_title('95% HDI $e^\\mu$')
    plt.tight_layout()
    plt.savefig(os.path.join(parent_dir_name,
                             f'output/hdi_drug_class_comparison.tiff'),
                format='tiff',
                dpi=500,
                bbox_inches="tight")
    return plt
コード例 #24
0
ファイル: test_data.py プロジェクト: StanczakDominik/arviz
 def test_warns_bad_shape(self):
     # Shape should be (chain, draw, *shape)
     with pytest.warns(UserWarning):
         convert_to_dataset(np.random.randn(100, 4))
コード例 #25
0
ファイル: test_data.py プロジェクト: StanczakDominik/arviz
def test_convert_to_dataset_idempotent():
    first = convert_to_dataset(np.random.randn(100))
    second = convert_to_dataset(first)
    assert first.equals(second)
コード例 #26
0
import arviz as az

#First we begin by sampling from a 1D scalar field.
# We will  use a simple gaussian with one parameter.
# Infact, we will just the harmonic oscillator ansatz.
#We also compute the effective sample size using az.ess() from arviz package. 

for sigma in numpy.linspace(0.01, 3, 30):
    def normal_proposal(old_point):
     symmetric
        return Normal(old_point, sigma*torch.ones_like(old_point)).sample()
    tf= HarmonicTrialFunction(torch.ones(1))
    n_walkers = 2
    init_config = torch.ones(n_walkers, 1)
    results = metropolis_symmetric(tf, init_config, normal_proposal, num_walkers=n_walkers, num_steps=100000)
    dataset1 = az.convert_to_dataset(results.numpy())
    dataset2 = az.convert_to_inference_data(results.numpy())


    az.plot_ess(dataset2, kind = "local")
    plt.savefig("Local")
    az.plot_ess(dataset2, kind = "quantile")
    plt.savefig("quantile")
    az.plot_ess(dataset2, kind = "evolution")
    plt.savefig("Evolution")
    print( az.ess(dataset1).data_vars)
# In the Output_of_run array we are using units of 1000.    
#Output_of_run = numpy.array([0.02366, 1.087, 3.579, 7.21, 11.32, 15.9, 20.19, 25.2, 29.98, 32.94, 36.67, 39.41, 38.68, 42.96, 44.4, 45.35, 44.83, 45.94, 43.73, 46.34, 44.69, 45.15, 41.88,41.41, 41.33, 41, 38.46, 38.3, 37.49, 36.02]) 
#y_data = Output_of_run
#x_data = numpy.linspace(0.01, 3, 30)
#plt.scatter(x_data, y_data, c='r', label='ess scatter')
コード例 #27
0
ファイル: test_data.py プロジェクト: StanczakDominik/arviz
def test_convert_to_dataset_bad(tmpdir):
    first = convert_to_inference_data(np.random.randn(100), group="prior")
    filename = str(tmpdir.join("test_file.nc"))
    first.to_netcdf(filename)
    with pytest.raises(ValueError):
        convert_to_dataset(filename, group="bar")
コード例 #28
0
ファイル: conv.py プロジェクト: heroxbd/waveform-analysis
    .set_index(["TriggerNo", "ChannelID"])
    .groupby(level=[0, 1])
    for ipt in args.ipt
]


t0 = []
s0 = []
for sample in samples:
    t0.append(np.vstack(sample.apply(lambda ent: ent["t0"].values)).T)
    s0.append(np.vstack(sample.apply(lambda ent: ent["s0"].values)).T)

t0 = np.array(t0)
s0 = np.array(s0)
# chain, draw, x_dim_0
t0 = az.convert_to_dataset(t0)
s0 = az.convert_to_dataset(s0)

trials = range(100, 2500, 100)
res_t0 = []
res_s0 = []
for i in trials:
    result = az.rhat(t0.sel(draw=slice(i, i * 2), x_dim_0=slice(None, 10)))
    res_t0.append(np.array(result.x))

    result = az.rhat(s0.sel(draw=slice(i, i * 2), x_dim_0=slice(None, 10)))
    res_s0.append(np.array(result.x))

res_t0 = np.array(res_t0)
res_s0 = np.array(res_s0)