print(sampler.noisy_logjoint(kind='pf', pf='paris', N=1000,
    return_loglike=True, tqdm=tqdm))

###############################################################################
# Evaluate Fit
###############################################################################
from sgmcmc_ssm.evaluator import OfflineEvaluator, half_average_parameters_list
from sgmcmc_ssm.metric_functions import (
        sample_function_parameters,
        noisy_logjoint_loglike_metric,
        )
# Evaluate Loglikelihood on Training Set
metric_functions=[
    noisy_logjoint_loglike_metric(tqdm=tqdm, kind='pf', pf='paris', N=1000),
    ]
sample_functions=sample_function_parameters(['A', 'Q', 'R'])

# Evaluate SGLD samples
sgld_evaluator = OfflineEvaluator(sampler,
        parameters_list=sgld_parameters,
        parameters_times=sgld_time,
        metric_functions = metric_functions,
        sample_functions = sample_functions,
        )
sgld_evaluator.evaluate(40, tqdm=tqdm)

ld_evaluator = OfflineEvaluator(sampler,
        parameters_list=ld_parameters,
        parameters_times=ld_time,
        metric_functions = metric_functions,
        sample_functions = sample_functions,
Esempio n. 2
0
    noisy_logjoint_loglike_metric(),
    best_double_permutation_metric_function_parameter(
        parameter_name='pi',
        target_value=parameters.pi,
        metric_name='mse',
        best_function=min),
    best_permutation_metric_function_parameter(parameter_name='mu',
                                               target_value=parameters.mu,
                                               metric_name='mse',
                                               best_function=min),
    best_permutation_metric_function_parameter(parameter_name='R',
                                               target_value=parameters.R,
                                               metric_name='mse',
                                               best_function=min),
]
sample_functions = sample_function_parameters(
    ['logit_pi', 'expanded_pi', 'pi', 'mu', 'R', 'LRinv'], )

sampler = GaussHMMSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
evaluator = SamplerEvaluator(
    sampler=sampler,
    metric_functions=metric_functions,
    sample_functions=sample_functions,
)
print(evaluator.metrics)
print(evaluator.samples)

## Run a few Gibbs Sampler steps
for _ in range(10):
    evaluator.evaluate_sampler_step(['sample_gibbs', 'project_parameters'])
print(evaluator.metrics)
Esempio n. 3
0
        noisy_logjoint_loglike_metric,
        metric_function_parameters,
        )

metric_functions = [
        noisy_logjoint_loglike_metric(),
        metric_function_parameters(
                parameter_names=['A', 'Q', 'C', 'R'],
                target_values=[parameters.A, parameters.Q,
                    parameters.C, parameters.R],
                metric_names = ['mse', 'mse', 'mse', 'mse'],
                )
        ]

sample_functions = sample_function_parameters(
        ['A', 'Q', 'LQinv', 'C', 'R', 'LRinv'],
        )

sampler = SLDSSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
sampler.init_sample_latent() ## THIS IS IMPORTANT
evaluator = SamplerEvaluator(
        sampler=sampler,
        metric_functions=metric_functions,
        sample_functions=sample_functions,
        )
print(evaluator.metrics)
print(evaluator.samples)

## Run a few Gibbs Sampler steps
for _ in range(10):
Esempio n. 4
0
        criteria=[min for parameter_name in parameter_names2],
    ),
    metric_function_parameters(
        parameter_names3,
        target_values=[
            getattr(my_data['parameters'], parameter_name)
            for parameter_name in parameter_names3
        ],
        metric_names=['mse' for parameter_name in parameter_names2],
    ),
    #        metric_compare_x(my_data['latent_vars']['x']),
    #        metric_compare_z(my_data['latent_vars']['z']),
    noisy_logjoint_loglike_metric(subsequence_length=50, buffer_length=10),
]
my_sample_functions = [
    sample_function_parameters(parameter_names2 + ['LRinv', 'LQinv']),
]
my_evaluators = {
    "{0}_{1}".format(*key):
    SamplerEvaluator(sampler,
                     my_metric_functions,
                     my_sample_functions,
                     sampler_name="{0}_{1}".format(*key))
    for key, sampler in my_samplers.items()
}

keys = my_evaluators.keys()
for step in tqdm(range(1000)):
    for ii, key in enumerate(keys):
        my_evaluators[key].evaluate_sampler_step(
            *sampler_steps[key.split("_")[0]])
################################################################################
# Sampler Evaluation
################################################################################
from sgmcmc_ssm.evaluator import OfflineEvaluator, half_average_parameters_list
from sgmcmc_ssm.metric_functions import (
    sample_function_parameters,
    noisy_logjoint_loglike_metric,
    noisy_predictive_logjoint_loglike_metric,
)

# Evaluate Loglikelihood and Predictive Loglikelihood
metric_functions = [
    noisy_logjoint_loglike_metric(),
    noisy_predictive_logjoint_loglike_metric(num_steps_ahead=3),
]
sample_functions = sample_function_parameters(['pi', 'logit_pi', 'mu', 'R'])

# Evaluate Gibbs samples
gibbs_evaluator = OfflineEvaluator(
    sampler,
    parameters_list=half_average_parameters_list(gibbs_parameters),
    parameters_times=gibbs_time,
    metric_functions=metric_functions,
    sample_functions=sample_functions,
)
gibbs_evaluator.evaluate(16, tqdm=tqdm)

# Evaluate SGRLD samples
sgrld_evaluator = OfflineEvaluator(
    sampler,
    parameters_list=half_average_parameters_list(sgrld_parameters),
Esempio n. 6
0
            ),
        best_permutation_metric_function_parameter(
            parameter_name = 'Q',
            target_value = parameters.Q,
            metric_name = 'mse',
            best_function = min
            ),
        metric_function_parameters(
                parameter_names=['C', 'R'],
                target_values=[parameters.C, parameters.R],
                metric_names = ['mse', 'mse'],
                )
        ]

sample_functions = sample_function_parameters(
        ['pi', 'A', 'Q', 'C', 'R'],
        )

sampler = SLDSSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
sampler.init_sample_latent() ## THIS IS IMPORTANT
evaluator = SamplerEvaluator(
        sampler=sampler,
        metric_functions=metric_functions,
        sample_functions=sample_functions,
        )
print(evaluator.metrics)
print(evaluator.samples)

## Run a few Gibbs Sampler steps
for _ in tqdm(range(10)):
Esempio n. 7
0
    ),
    metric_function_parameters(
        parameter_names2,
        target_values=[
            getattr(my_data['parameters'], parameter_name)
            for parameter_name in parameter_names2
        ],
        metric_names=['mse' for parameter_name in parameter_names2],
        criteria=[min for parameter_name in parameter_names2],
    ),
    metric_compare_z(my_data['latent_vars']),
    metric_function_from_sampler("predictive_loglikelihood"),
    noisy_logjoint_loglike_metric(),
]
my_sample_functions = [
    sample_function_parameters(parameter_names + parameter_names2 +
                               ['expanded_pi']),
]
my_evaluators = {
    "{0}_{1}".format(*key):
    SamplerEvaluator(sampler,
                     my_metric_functions,
                     my_sample_functions,
                     sampler_name="{0}_{1}".format(*key))
    for key, sampler in my_samplers.items()
}

keys = my_evaluators.keys()
for step in tqdm(range(1000)):
    for ii, key in enumerate(keys):
        my_evaluators[key].evaluate_sampler_step(
            *sampler_steps[key.split("_")[0]])
Esempio n. 8
0
        noisy_logjoint_loglike_metric,
        metric_function_parameters,
        )

metric_functions = [
        noisy_logjoint_loglike_metric(kind='pf', N=1000),
        metric_function_parameters(
                parameter_names=['alpha', 'beta', 'gamma', 'tau'],
                target_values=[parameters.alpha, parameters.beta,
                    parameters.gamma, parameters.tau],
                metric_names = ['mse', 'mse', 'mse', 'mse'],
                )
        ]

sample_functions = sample_function_parameters(
        ['alpha', 'beta', 'gamma', 'tau'],
        )

sampler = GARCHSampler(**parameters.dim)
sampler.setup(data['observations'], prior)
evaluator = SamplerEvaluator(
        sampler=sampler,
        metric_functions=metric_functions,
        sample_functions=sample_functions,
        )
print(evaluator.metrics)
print(evaluator.samples)

## Run a few ADA_GRAD sampler steps
for _ in tqdm(range(100)):
    evaluator.evaluate_sampler_step(