Ejemplo n.º 1
0
def main():
    logging.basicConfig(level=logging.INFO)

    (x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()

    x_train = x_train / 255
    x_test = x_test / 255

    dataset = tf.data.Dataset.from_tensor_slices(x_train.reshape(-1, 28 * 28).astype(np.float32))
    dataset = dataset.shuffle(1024, reshuffle_each_iteration=True)

    rbm = BBRBM(n_visible=28 * 28, n_hidden=64)
    rbm.fit(dataset, epoches=100, batch_size=10)

    for i in np.random.choice(np.arange(x_test.shape[0]), 5, replace=False):
        x = x_test[i]
        x_tensor = tf.convert_to_tensor(x.reshape(1, 28 * 28), dtype=tf.float32)
        x_reconstructed_tensor = rbm.reconstruct(x_tensor)
        x_reconstructed = x_reconstructed_tensor.numpy().reshape(28, 28)

        Image.fromarray((x * 255).astype(np.uint8)).save(f'{i}_original.png')
        Image.fromarray((x_reconstructed * 255).astype(np.uint8)).save(f'{i}_reconstructed.png')
    #show_digit(image.reshape(28, 28), "Original image")
    #print("image label",mnist.test.labels[j])
    a = image.reshape(28, 28)
    c = image.reshape(28, 28)
    #show_digit(image.reshape(28,28))
    # img = a[0:16,0:28] #crop the image
    img = a * mask_b

    img = np.concatenate((t1, img.flatten()), axis=0)
    img_org = img
    #print("shape of of org img", img_org.shape)
    #imga = random_image#imga = img
    #show_digit(img_org[10:794].reshape(28,28),"croped input")
    #reconstruct image for N-MC
    for i in range(400):
        image_rec1 = bbrbm.reconstruct(img.reshape(1, -1), 0)
        #print("shape of of rec1",image_rec1.shape)
        image_rec1 = image_rec1.reshape(794, )
        if (i > 400 - num_avg - 1):
            store_recon_vu[i - (400 - num_avg)] = image_rec1
            #print("stored labels : ", store_labels)
            #print("index i : ", i)

        #print("new shape of of rec1", image_rec1.shape)
        t1 = image_rec1[0:10]
        rec_backup = image_rec1
        image_rec1 = image_rec1[10:794].reshape(28, 28)
        #print("size ofa", a.size)
        img = img_org + np.concatenate(
            (t1, (image_rec1 * mask_c).flatten()), axis=0)
        #show_digit(image_rec1.reshape(30, 30), "returned image")
Ejemplo n.º 3
0
#crop dimentions
x = 6
y = 6
a = image.reshape(28, 28)
img = a[0:16, 0:28]  #crop the image
#img = cropND(a,(x,y))
#show cropped image
#rint(img)
plt.imshow(a)
plt.show()
plt.imshow(img)
plt.show()
#pad the image to make it 780 before feeding it to the BM
imge = np.pad(img, [(6, ), (0, )], mode='constant')
#print(imge)
plt.imshow(imge)
plt.show()
#reconstruct
image_rec = bbrbm.reconstruct(imge.reshape(1, -1))
#show_digit(image)
#show_digit(image_rec)
#plot reconstructed image
plt.imshow(image_rec.reshape(28, 28))
plt.show()

############################
#save the weights
filename = 'weights'
name = 'bbrbm'
bbrbm.save_weights(filename, name)
Ejemplo n.º 4
0
digits_total_counts = np.ones(10, dtype=np.int32) * 10

for idx in range(mnist_test_images.shape[0]):
    image = mnist_test_images[idx, ]
    label = mnist_test_labels[idx, ]

    for digit in range(10):
        digit_label = np.zeros(10)
        digit_label[digit] = 1

        if (label == digit_label).all() and digits_current_counts[digit] < 10:
            nrow = digits_current_counts[digit]
            sample_idx = nrow * 10 + digit
            mnist_test_images_samples[sample_idx, ] = image
            mnist_test_images_samples_rec[sample_idx, ] = \
                bbrbm.reconstruct(image.reshape([1, -1]))
            mnist_test_images_samples_plt[sample_idx * 2, ] = \
                mnist_test_images_samples[sample_idx, ]
            mnist_test_images_samples_plt[sample_idx * 2 + 1, ] = \
                mnist_test_images_samples_rec[sample_idx, ]
            digits_current_counts[digit] += 1

    if (digits_current_counts == digits_total_counts).all():
        break


# %%
# funtion of plotting mnist data
def plot_mnist(mnist_images, nrows, ncols, cmap='gray'):
    fig = plt.figure(figsize=(ncols, nrows))
    gs = gridspec.GridSpec(nrows, ncols)
Ejemplo n.º 5
0
#names in case saving reconstruction is desired
fname = ["1-3","2-3","3-3","4-3","5-3","6-3","7-3","8-3","9-3","10-3","11-3","12-3","13-3","14-3","15-3","16-3","17-3","18-3","19-3","20-3"]

#802936312
for j in range(1):

    #reconstruct image for N-MC
    mnist_labeled = mnist_labeled_backup*mask_1a
    show_digit(mask_1a[0][0:784].reshape(28, 28), "mask_1a")
    show_digit(mask_1a[0][784:794].reshape(1, -1), "mask_1a labels")
    show_digit(mask_1b[0][0:784].reshape(28, 28), "mask_1b")
    show_digit(mnist_labeled[0][0:784].reshape(28, 28), "croped input image")
    show_digit(mask_1b[0][784:794].reshape(1, -1), "mask_1b labels")
    show_digit(mnist_labeled[0][784:794].reshape(1, -1), "copped labels")
    for i in range(100):
        image_rec1 = bbrbm.reconstruct(mnist_labeled)

        #print("shape of of rec1",image_rec1.shape)
        #mnist_labeled = image_rec1  # image_rec1.reshape(794,)

        mnist_labeled = mnist_labeled_backup*mask_1a + image_rec1*mask_1b
        #show_digit(image_rec1[0][78:7094].reshape(1, -1), "returned labels")
        #show_digit(image_rec1[0][0:784].reshape(28, 28), "recreated image")
        #show_digit(mnist_labeled[0][0:784].reshape(28, 28), "to feed back image")
        #show_digit(mnist_labeled[0][784:794].reshape(1, -1), "to feed back labels")

#plot the reconstruction results
for n in range(20):
    fig = plt.figure(figsize=(10, 10))

    # setting values to rows and column variables for the figure
Ejemplo n.º 6
0
def run(first_hidden, second_hidden, learning_rate, epochs, batch_size):

    x_train, x_test, y_train, y_test = get_datasets()

    # ------------------FIRST HIDDEN-------------------

    weight_matrix = generate_weight_matrix(x_train.shape[1], hidden)
    visible_biases = np.zeros(shape=(784))
    hidden_biases = np.zeros(shape=(first_hidden))

    bbrbm = BBRBM(
        n_visible=784,
        n_hidden=first_hidden,
        learning_rate=learning_rate,
        momentum=0,
        # momentum=0.95,
        use_tqdm=True)

    bbrbm.set_weights(weight_matrix, visible_biases, hidden_biases)
    errs = bbrbm.fit(x_train, n_epoches=epochs, batch_size=batch_size)

    first_weight_matrix = np.asarray(bbrbm.get_weight_matrix())
    first_visible_biases = bbrbm.get_visible_biases()
    first_hidden_biases = bbrbm.get_hidden_biases()

    first_output = np.dot(x_test, weight_matrix)

    for i in range(first_output.shape[0]):

        first_output[i, :] += first_hidden_biases

        for j in range(first_output.shape[1]):

            # sigmoid(relu(x))
            first_output[i, j] = sigmoid(np.max(0, first_output[i, j]))

    # ------------------SECOND HIDDEN-------------------

    bbrbm_second = BBRBM(
        n_visible=first_hidden,
        n_hidden=784,
        learning_rate=learning_rate,
        momentum=0,
        # momentum=0.95,
        use_tqdm=True)

    trained_outputs = np.ndarray(shape=(x_train.shape[0], x_train.shape[1]))
    for i in range(trained_outputs.shape[0]):

        trained_outputs[i, :] = bbrbm.reconstruct(x_train[i].reshape(1, -1))

    sgd = optimizers.SGD(lr=0.3, decay=0, momentum=0, nesterov=True)
    model = Sequential()
    model.add(
        Dense(10,
              input_dim=784,
              activation="softmax",
              kernel_initializer=keras.initializers.RandomNormal(mean=0.0,
                                                                 stddev=0.01),
              bias_initializer='zeros'))
    model.compile(optimizer=sgd,
                  loss='mean_squared_error',
                  metrics=['accuracy'],
                  loss_weights=None,
                  sample_weight_mode=None,
                  weighted_metrics=None,
                  target_tensors=None)

    earlyStopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  min_delta=0,
                                                  patience=0,
                                                  verbose=0,
                                                  mode='auto')

    model.fit(trained_outputs,
              np.asarray(y_train),
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              shuffle=True,
              callbacks=[earlyStopping],
              validation_data=(x_test, y_test))

    score = model.evaluate(x_test, np.asarray(y_test), verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    # vis(x_train, trained_outputs, 20)

    trained_outputs = np.dot(x_train, weight_matrix)
    for i in range(trained_outputs.shape[0]):

        trained_outputs[i, :] += hidden_biases

    debug = 0

    # test = np.dot(x_test, weight_matrix)
    #
    # preds = []
    # for i in range(test.shape[0]):
    #
    #     test[i,:]+=hidden_biases

    # output_weight_matrix = generate_output_weight_matrix(test.shape[1])
    # sgd = optimizers.SGD(lr=0.01, decay=0, momentum=0, nesterov=True)
    # model = Model(inputs=test, outputs=y_test)

    model.compile(optimizer=sgd,
                  loss='mean_squared_error',
                  metrics=['accuracy'],
                  loss_weights=None,
                  sample_weight_mode=None,
                  weighted_metrics=None,
                  target_tensors=None)

    debug = 0