コード例 #1
0
    def testReproducibility(self):
        def target_log_prob_fn(event):
            return tfd.Normal(loc=0., scale=1.).log_prob(event)

        tf.compat.v1.set_random_seed(4)
        xs = no_u_turn_sampler.kernel(target_log_prob_fn=target_log_prob_fn,
                                      current_state=[0.],
                                      step_size=[0.3],
                                      seed=3)
        tf.compat.v1.set_random_seed(4)
        ys = no_u_turn_sampler.kernel(target_log_prob_fn=target_log_prob_fn,
                                      current_state=[0.],
                                      step_size=[0.3],
                                      seed=3)
        for x, y in zip(xs, ys):
            self.assertAllEqual(x, y)
コード例 #2
0
    def testMultivariateNormalNd(self, event_size, num_samples):
        def target_log_prob_fn(event):
            return tfd.MultivariateNormalFullCovariance(
                loc=tf.zeros(event_size),
                covariance_matrix=tf.eye(event_size)).log_prob(event)

        state = tf.zeros(event_size)
        samples = []
        for seed in range(num_samples):
            [state], _, _ = no_u_turn_sampler.kernel(
                target_log_prob_fn=target_log_prob_fn,
                current_state=[state],
                step_size=[0.3],
                seed=seed)
            npstate = state.numpy()
            samples.append([npstate[0], npstate[1]])

        samples = np.array(samples)
        plt.scatter(samples[:, 0], samples[:, 1])
        savefig("projection_chain_{}d_normal_{}_steps.png".format(
            event_size, num_samples))
        plt.close()

        target_samples = tfd.MultivariateNormalFullCovariance(
            loc=tf.zeros(event_size),
            covariance_matrix=tf.eye(event_size)).sample(num_samples,
                                                         seed=4).numpy()
        plt.scatter(target_samples[:, 0], target_samples[:, 1])
        savefig("projection_independent_{}d_normal_{}_samples.png".format(
            event_size, num_samples))
        plt.close()
コード例 #3
0
    def testLogitBeta(self):
        def target_log_prob_fn(event):
            return tfd.TransformedDistribution(
                distribution=tfd.Beta(concentration0=1.0, concentration1=3.0),
                bijector=tfb.Invert(tfb.Sigmoid())).log_prob(event)

        states = tfd.TransformedDistribution(
            distribution=tfd.Beta(concentration0=1.0, concentration1=3.0),
            bijector=tfb.Invert(tfb.Sigmoid())).sample(10, seed=7)
        plt.hist(states.numpy(), bins=30)
        savefig("logit_beta_start_positions.png")
        plt.close()

        samples = []
        for seed, state in enumerate(states):
            [state], _, _ = no_u_turn_sampler.kernel(
                target_log_prob_fn=target_log_prob_fn,
                current_state=[state],
                step_size=[0.3],
                seed=seed)
            samples.append(state)

        samples = np.array(samples)
        plt.hist(samples, bins=30)
        savefig("one_step_logit_beta_posterior_conservation.png")
        plt.close()

        _ = scipy.stats.ks_2samp(samples.flatten(), states.numpy().flatten())
コード例 #4
0
    def testSkewedMultivariateNormal2d(self):
        def target_log_prob_fn(event):
            return tfd.MultivariateNormalFullCovariance(
                loc=tf.zeros(2),
                covariance_matrix=tf.linalg.tensor_diag([1.,
                                                         10.])).log_prob(event)

        rng = np.random.RandomState(seed=7)
        states = tf.cast(rng.normal(scale=[1.0, 10.0], size=[10, 2]),
                         tf.float32)
        plt.scatter(states[:, 0], states[:, 1])
        savefig("skewed_start_positions_2d.png")
        plt.close()

        samples = []
        for seed, state in enumerate(states):
            [state], _, _ = no_u_turn_sampler.kernel(
                target_log_prob_fn=target_log_prob_fn,
                current_state=[state],
                step_size=[0.3],
                seed=seed)
            samples.append(state)

        samples = tf.stack(samples).numpy()
        plt.scatter(samples[:, 0], samples[:, 1])
        savefig("one_step_skewed_posterior_conservation_2d.png")
        plt.close()
        plot_with_expectation(
            samples[:, 0],
            dist=scipy.stats.norm(0, 1),
            suffix="one_step_skewed_posterior_conservation_2d_dim_0.png")
        plot_with_expectation(
            samples[:, 1],
            dist=scipy.stats.norm(0, 10),
            suffix="one_step_skewed_posterior_conservation_2d_dim_1.png")
コード例 #5
0
ファイル: nuts_test.py プロジェクト: asudomoeva/probability
  def testMultivariateNormalNd(self, event_size, num_samples):
    def target_log_prob_fn(event):
      return tfd.MultivariateNormalFullCovariance(
          loc=tf.zeros(event_size),
          covariance_matrix=tf.eye(event_size)).log_prob(event)

    state = tf.zeros(event_size)
    samples = []
    for seed in range(num_samples):
      [state], _, _ = no_u_turn_sampler.kernel(
          target_log_prob_fn=target_log_prob_fn,
          current_state=[state],
          step_size=[0.3],
          seed=seed)
      npstate = state.numpy()
      samples.append([npstate[0], npstate[1]])

    samples = np.array(samples)
    plt.scatter(samples[:, 0], samples[:, 1])
    savefig("projection_chain_{}d_normal_{}_steps.png".format(
        event_size, num_samples))
    plt.close()

    target_samples = tfd.MultivariateNormalFullCovariance(
        loc=tf.zeros(event_size),
        covariance_matrix=tf.eye(event_size)).sample(
            num_samples, seed=4).numpy()
    plt.scatter(target_samples[:, 0], target_samples[:, 1])
    savefig("projection_independent_{}d_normal_{}_samples.png".format(
        event_size, num_samples))
    plt.close()
コード例 #6
0
ファイル: nuts_test.py プロジェクト: asudomoeva/probability
  def testSkewedMultivariateNormal2d(self):
    def target_log_prob_fn(event):
      return tfd.MultivariateNormalFullCovariance(
          loc=tf.zeros(2), covariance_matrix=tf.diag([1., 10.])).log_prob(event)

    rng = np.random.RandomState(seed=7)
    states = tf.cast(rng.normal(scale=[1.0, 10.0], size=[10, 2]), tf.float32)
    plt.scatter(states[:, 0], states[:, 1])
    savefig("skewed_start_positions_2d.png")
    plt.close()

    samples = []
    for seed, state in enumerate(states):
      [state], _, _ = no_u_turn_sampler.kernel(
          target_log_prob_fn=target_log_prob_fn,
          current_state=[state],
          step_size=[0.3],
          seed=seed)
      samples.append(state)

    samples = tf.stack(samples).numpy()
    plt.scatter(samples[:, 0], samples[:, 1])
    savefig("one_step_skewed_posterior_conservation_2d.png")
    plt.close()
    plot_with_expectation(
        samples[:, 0],
        dist=scipy.stats.norm(0, 1),
        suffix="one_step_skewed_posterior_conservation_2d_dim_0.png")
    plot_with_expectation(
        samples[:, 1],
        dist=scipy.stats.norm(0, 10),
        suffix="one_step_skewed_posterior_conservation_2d_dim_1.png")
コード例 #7
0
ファイル: nuts_test.py プロジェクト: asudomoeva/probability
  def testLogitBeta(self):
    def target_log_prob_fn(event):
      return tfd.TransformedDistribution(
          distribution=tfd.Beta(concentration0=1.0, concentration1=3.0),
          bijector=tfb.Invert(tfb.Sigmoid())).log_prob(event)

    states = tfd.TransformedDistribution(
        distribution=tfd.Beta(concentration0=1.0, concentration1=3.0),
        bijector=tfb.Invert(tfb.Sigmoid())).sample(10, seed=7)
    plt.hist(states.numpy(), bins=30)
    savefig("logit_beta_start_positions.png")
    plt.close()

    samples = []
    for seed, state in enumerate(states):
      [state], _, _ = no_u_turn_sampler.kernel(
          target_log_prob_fn=target_log_prob_fn,
          current_state=[state],
          step_size=[0.3],
          seed=seed)
      samples.append(state)

    samples = np.array(samples)
    plt.hist(samples, bins=30)
    savefig("one_step_logit_beta_posterior_conservation.png")
    plt.close()

    _ = scipy.stats.ks_2samp(samples.flatten(), states.numpy().flatten())
コード例 #8
0
ファイル: nuts_test.py プロジェクト: asudomoeva/probability
  def testReproducibility(self):
    def target_log_prob_fn(event):
      return tfd.Normal(loc=0., scale=1.).log_prob(event)

    tf.set_random_seed(4)
    xs = no_u_turn_sampler.kernel(
        target_log_prob_fn=target_log_prob_fn,
        current_state=[0.],
        step_size=[0.3],
        seed=3)
    tf.set_random_seed(4)
    ys = no_u_turn_sampler.kernel(
        target_log_prob_fn=target_log_prob_fn,
        current_state=[0.],
        step_size=[0.3],
        seed=3)
    for x, y in zip(xs, ys):
      self.assertAllEqual(x, y)
コード例 #9
0
    def testOneStepFromOrigin(self):
        def target_log_prob_fn(event):
            return tfd.Normal(loc=0., scale=1.).log_prob(event)

        samples = []
        for seed in range(10):
            [state], _, _ = no_u_turn_sampler.kernel(
                target_log_prob_fn=target_log_prob_fn,
                current_state=[0.],
                step_size=[0.3],
                seed=seed)
            samples.append(state)

        samples = np.array(samples)
        plt.hist(samples, bins=30)
        savefig("one_step_from_origin.png")
        plt.close()
コード例 #10
0
ファイル: nuts_test.py プロジェクト: asudomoeva/probability
  def testOneStepFromOrigin(self):
    def target_log_prob_fn(event):
      return tfd.Normal(loc=0., scale=1.).log_prob(event)

    samples = []
    for seed in range(10):
      [state], _, _ = no_u_turn_sampler.kernel(
          target_log_prob_fn=target_log_prob_fn,
          current_state=[0.],
          step_size=[0.3],
          seed=seed)
      samples.append(state)

    samples = np.array(samples)
    plt.hist(samples, bins=30)
    savefig("one_step_from_origin.png")
    plt.close()
コード例 #11
0
  def testNormal(self):
    def target_log_prob_fn(event):
      return tfd.Normal(loc=0., scale=1.).log_prob(event)

    rng = np.random.RandomState(seed=7)
    states = tf.cast(rng.normal(size=10), dtype=tf.float32)
    tf.set_random_seed(2)
    samples = []
    for seed, state in enumerate(states):
      [state], _, _ = no_u_turn_sampler.kernel(
          target_log_prob_fn=target_log_prob_fn,
          current_state=[state],
          step_size=[0.3],
          seed=seed)
      samples.append(state)

    samples = np.array(samples)
    plot_with_expectation(samples,
                          dist=scipy.stats.norm(0, 1),
                          suffix="one_step_posterior_conservation_normal.png")
コード例 #12
0
def main(argv):
    del argv  # unused
    if not FLAGS.skip_plots:
        if tf.io.gfile.exists(FLAGS.model_dir):
            tf.compat.v1.logging.warning(
                "Warning: deleting old log directory at {}".format(
                    FLAGS.model_dir))
            tf.io.gfile.rmtree(FLAGS.model_dir)
        tf.io.gfile.makedirs(FLAGS.model_dir)

    tf.compat.v1.enable_eager_execution()
    print("Number of available GPUs", tf.contrib.eager.num_gpus())

    if FLAGS.fake_data:
        features = tf.random.normal([20, 55])
        labels = tf.random.uniform([20], minval=0, maxval=2, dtype=tf.int32)
    else:
        features, labels = covertype()
    print("Data set size", features.shape[0])
    print("Number of features", features.shape[1])

    log_joint = ed.make_log_joint_fn(logistic_regression)

    @tf.function
    def target_log_prob_fn(coeffs):
        return log_joint(features=features, coeffs=coeffs, labels=labels)

    # Initialize using a sample from 20 steps of NUTS. This is roughly a MAP
    # estimate and is written explicitly to avoid differences in warm-starts
    # between different implementations (e.g., Stan, PyMC3).
    coeffs = tf.constant([
        +2.03420663e+00, -3.53567265e-02, -1.49223924e-01, -3.07049364e-01,
        -1.00028366e-01, -1.46827862e-01, -1.64167881e-01, -4.20344204e-01,
        +9.47479829e-02, -1.12681836e-02, +2.64442056e-01, -1.22087866e-01,
        -6.00568838e-02, -3.79419506e-01, -1.06668741e-01, -2.97053963e-01,
        -2.05253899e-01, -4.69537191e-02, -2.78072730e-02, -1.43250525e-01,
        -6.77954629e-02, -4.34899796e-03, +5.90927452e-02, +7.23133609e-02,
        +1.38526391e-02, -1.24497898e-01, -1.50733739e-02, -2.68872194e-02,
        -1.80925727e-02, +3.47936489e-02, +4.03552800e-02, -9.98773426e-03,
        +6.20188080e-02, +1.15002751e-01, +1.32145107e-01, +2.69109547e-01,
        +2.45785132e-01, +1.19035013e-01, -2.59744357e-02, +9.94279515e-04,
        +3.39266285e-02, -1.44057125e-02, -6.95222765e-02, -7.52013028e-02,
        +1.21171586e-01, +2.29205526e-02, +1.47308692e-01, -8.34354162e-02,
        -9.34122875e-02, -2.97472421e-02, -3.03937674e-01, -1.70958012e-01,
        -1.59496680e-01, -1.88516974e-01, -1.20889175e+00
    ])

    # Initialize step size via result of 50 warmup steps from Stan.
    step_size = 0.00167132

    coeffs_samples = []
    target_log_prob = None
    grads_target_log_prob = None
    for step in range(FLAGS.max_steps):
        print("Step", step)
        [
            [coeffs],
            target_log_prob,
            grads_target_log_prob,
        ] = no_u_turn_sampler.kernel(
            target_log_prob_fn=target_log_prob_fn,
            current_state=[coeffs],
            step_size=[step_size],
            seed=step,
            current_target_log_prob=target_log_prob,
            current_grads_target_log_prob=grads_target_log_prob)
        coeffs_samples.append(coeffs)

    if not FLAGS.skip_plots:
        for coeffs_sample in coeffs_samples:
            plt.plot(coeffs_sample.numpy())

        filename = os.path.join(FLAGS.model_dir, "coeffs_samples.png")
        plt.savefig(filename)
        print("Figure saved as", filename)
        plt.close()