def nn_predict(params, inputs):
    for W, b in params:
        outputs = np.dot(inputs, W) + b  # [N,D]
        #inputs = rbf(outputs)
        inputs = sigmoid(outputs)
        #inputs = relu(outputs)
    return outputs  # [N, dim_last]
Esempio n. 2
0
 def print_perf(params, iter, gradient):
     if iter % 30 == 0:
         save_images(sigmoid(params),
                     'results/4/thetas.png',
                     vmin=0.0,
                     vmax=1.0)
         print(batched_loss(params, iter))
Esempio n. 3
0
def print_perf(params, iter, gradient):
    if iter % 30 == 0:
        #save_images(sigmoid(params), 'q4plot.png')
        fig = plt.figure(1)
        fig.clf()
        ax = fig.add_subplot(111)
        plot_images(sigmoid(params), ax)
        print(batched_loss(params, iter))
Esempio n. 4
0
def neglogprob(params, data):
    # Implement this as the solution for 4c!
    params = sigmoid(params)
    result = np.exp(
        np.dot(data,
               np.log(params).T) + np.dot(1 - data,
                                          np.log(1 - params).T))
    result = np.mean(result, axis=1)
    result = np.log(result)
    return -np.mean(result)
Esempio n. 5
0
def print_perf(params, iter, gradient):
    if iter % 30 == 0:
        save_images(sigmoid(params), 'q4plot.png')
        print(batched_loss(params, iter))
Esempio n. 6
0
def generate_from_prior(gen_params, num_samples, noise_dim, rs):
    latents = rs.randn(num_samples, noise_dim)
    return sigmoid(neural_net_predict(gen_params, latents))
Esempio n. 7
0
def generate_from_prior(gen_params, num_samples, noise_dim, rs):
    latents = rs.randn(num_samples, noise_dim)
    return sigmoid(neural_net_predict(gen_params, latents))
Esempio n. 8
0
    results = []
    for c in range(theta.shape[0]):
        cur_theta = theta[c, 0:half_image.shape[0]]
        results.append(
            np.prod(cur_theta**half_image * (1 - cur_theta)**(1 - half_image)))
    return np.array(results)


# The optimizers provided by autograd can optimize lists, tuples, or dicts of parameters.
# You may use these optimizers for Q4, but implement your own gradient descent optimizer for Q3!
optimized_params = adam(objective_grad,
                        init_params,
                        step_size=0.2,
                        num_iters=10000,
                        callback=print_perf)

num_images = 20
result = np.zeros((20, 784))
images_from_train = train_images[0:20, :]
result[:, 0:392] = images_from_train[:, 0:392]
for im in range(num_images):
    top_probs = top_prob_list(sigmoid(optimized_params),
                              images_from_train[im, 0:392])
    top_prob_sum = np.sum(top_probs)
    for i in range(392, 784):
        numerator = 0.0
        for c in range(K):
            numerator += sigmoid(optimized_params[c, i]) * top_probs[c]
        result[im, i] = numerator / top_prob_sum
save_images(result, "q4d.jpg")
Esempio n. 9
0
    def print_perf(params, iter, gradient):
        if iter % 30 == 0:
            save_images(sigmoid(params),
                        'results/4/thetas.png',
                        vmin=0.0,
                        vmax=1.0)
            print(batched_loss(params, iter))

    # The optimizers provided by autograd can optimize lists, tuples, or dicts of parameters.
    # You may use these optimizers for Q4, but implement your own gradient descent optimizer for Q3!
    optimized_params = adam(objective_grad,
                            theta,
                            step_size=0.2,
                            num_iters=10000,
                            callback=print_perf)
    theta = sigmoid(optimized_params)
    np.save('results/4/theta.npy', theta)

    def xi_given_xtop(xtop):
        p_top_c = np.ndarray(shape=(K))
        for c in range(K):
            p_top_c[c] = np.exp(
                np.log(theta[c, :392]**xtop *
                       (1 - theta[c, :392])**(1 - xtop)).sum())
        p = (theta[:, 392:] * p_top_c.reshape(
            (-1, 1))).sum(axis=0) / p_top_c.sum(axis=0)
        return p

    samples = train_images[:20]
    p_bottom = np.ndarray(shape=(20, 392))
    for i in range(20):
Esempio n. 10
0
        x_top_temp = np.full(theta_top_temp.shape, x_top_temp)
        first = theta_top_temp**x_top_temp * (1 - theta_top_temp)**(1 -
                                                                    x_top_temp)
        first = np.prod(first, axis=1)
        first = np.full(theta_bottom_temp.T.shape, first).T
        x_bottom_temp = np.full(theta_bottom_temp.shape, x_bottom_temp)
        second = theta_bottom_temp
        second = np.sum(first * second, axis=0)
        first = np.sum(first, axis=0)
        result[i, int(theta.shape[1] / 2):] = second / first
        result[i, :int(theta.shape[1] / 2)] = x[:int(theta.shape[1] / 2)]
    return result


if __name__ == '__main__':
    N_data, train_images, train_labels, test_images, test_labels = load_mnist()

    optimized_params = adam(objective_grad,
                            init_params,
                            step_size=0.2,
                            num_iters=10000,
                            callback=print_perf)
    optimized_params = sigmoid(optimized_params)

    save_images(optimized_params, '4_c.jpg')

    picked_images = train_images[
        np.random.permutation(train_images.shape[0])[:20], :]
    images = plot_bottom_half(picked_images, optimized_params)
    save_images(images, '4_d.jpg')