コード例 #1
0
def get_mean_field_elbo(model, target, num_mc_samples, model_args, vi_kwargs):

    variational_model, variational_parameters = make_variational_model_special(
        model, *model_args, **vi_kwargs)

    log_joint_q = make_log_joint_fn(variational_model)

    def target_q(**parameters):
        return log_joint_q(*model_args, **parameters)

    target_sum = 0.
    target_q__sum = 0.
    for _ in range(num_mc_samples):
        with tape() as variational_tape:
            _ = variational_model(*model_args)

        params = variational_tape.values()
        target_sum = target_sum + target(*params)
        target_q__sum = target_q__sum + target_q(**variational_tape)

    energy = target_sum / float(num_mc_samples)
    entropy = -target_q__sum / float(num_mc_samples)
    elbo = energy + entropy

    tf.summary.scalar('energy', energy)
    tf.summary.scalar('entropy', entropy)
    tf.summary.scalar('elbo', elbo)

    return elbo, variational_parameters
コード例 #2
0
ファイル: util.py プロジェクト: mgorinova/autoreparam
    def loop_body(mc_sample):
        with tape() as variational_tape:
            _ = variational_model(*model_args)

            params = variational_tape.values()

            energy = target(*params)
            entropy = tf.negative(target_q(**variational_tape))
            return energy + entropy
コード例 #3
0
def mean_field_variational_inference(model, *args, **kwargs):
    num_optimization_steps = kwargs.get('num_optimization_steps', 2000)
    del kwargs['num_optimization_steps']

    (variational_model,
     variational_parameters) = program_transformations.make_variational_model(
         model, *args, **kwargs)

    log_joint = make_log_joint_fn(model)

    def target(**parameters):
        full_kwargs = dict(parameters, **kwargs)
        return log_joint(*args, **full_kwargs)

    log_joint_q = make_log_joint_fn(variational_model)

    def target_q(**parameters):
        return log_joint_q(*args, **parameters)

    elbo_sum = 0.
    for _ in range(16):
        with tape() as variational_tape:
            _ = variational_model(*args)

        params = variational_tape
        elbo_sum = elbo_sum + target(**params) - target_q(**params)

    elbo = elbo_sum / 16.

    best_elbo = None

    learning_rate_ph = tf.placeholder(shape=[], dtype=tf.float32)
    learning_rate = tf.Variable(learning_rate_ph, trainable=False)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    train = optimizer.minimize(-elbo)
    init = tf.global_variables_initializer()

    start_time = time.time()
    for learning_rate_val in [0.01, 0.1, 0.01, 0.1, 0.01, 0.1]:
        feed_dict = {learning_rate_ph: learning_rate_val}
        with tf.Session() as sess:
            sess.run(init, feed_dict=feed_dict)

            this_timeline = []
            print(
                'VI with {} optimization steps'.format(num_optimization_steps))
            for _ in range(num_optimization_steps):
                _, e = sess.run([train, elbo], feed_dict=feed_dict)
                this_timeline.append(e)

            this_elbo = np.mean(this_timeline[-100:])
            if best_elbo is None or best_elbo < this_elbo:
                timeline = this_timeline
                best_elbo = this_elbo

                vals = sess.run(list(variational_parameters.values()),
                                feed_dict=feed_dict)
                learned_variational_params = collections.OrderedDict(
                    zip(variational_parameters.keys(), vals))

            vi_time = time.time() - start_time

    results = collections.OrderedDict()
    results['vp'] = learned_variational_params
    print('ELBO: {}'.format(best_elbo))

    return results, best_elbo, timeline, vi_time