def _test(logits, n):
  rv = Categorical(logits=logits)
  rv_sample = rv.sample(n)
  x = rv_sample.eval()
  x_tf = tf.constant(x, dtype=tf.int32)
  logits = logits.eval()
  assert np.allclose(rv.log_prob(x_tf).eval(),
                     categorical_logpmf_vec(x, logits))
def _test(logits, n):
    x = Categorical(logits=logits)
    val_est = get_dims(x.sample(n))
    val_true = n + get_dims(logits)[:-1]
    assert val_est == val_true
def _test(logits, n):
    x = Categorical(logits=logits)
    val_est = get_dims(x.sample(n))
    val_true = n + get_dims(logits)[:-1]
    assert val_est == val_true
    for epoch in tqdm(range(EPOCH_NUM)):
        # Let the training begin. We load the data in minibatches and update the VI infernce using each new batch.
        perm = np.random.permutation(N)  #disorganize the data
        for i in range(inference.n_iter):  # from 0 to N interval is Batch size
            batch_x = train_x[perm[i:i + batch]]
            batch_y = train_y2[perm[i:i + batch]]
        info_dict = inference.update(feed_dict={
            x_: batch_x,
            y_: batch_y,
            keep_prob: 0.5
        })
        inference.print_progress(info_dict)

    y_samples1 = y.sample(samples_num).eval(feed_dict={
        x_: train_x,
        keep_prob: 1
    })
    y_samples = y.sample(samples_num).eval(feed_dict={
        x_: test_x,
        keep_prob: 1
    })

    for i in range(samples_num):
        acc = (y_samples[i] == test_y2).mean() * 100
        temp = (y_samples1[i] == train_y2).mean() * 100
        accy_test.append(acc)
        accy_train.append(temp)

    plt.hist(accy_test)
    plt.title("Histogram of prediction accuracies in the test data")
    plt.xlabel("Accuracy")