Exemplo n.º 1
0
def train_entangled_pong_network():
    # inputs
    input_shape = (1, 84, 84)
    filters = 32
    kernel_size = 6
    beta = 1.0  # entangled latent space
    epochs = 10
    batch_size = 1

    # define filename
    name = 'cvae_atari_entangled_pong'

    # builder hyperparameter dictionary
    hp_dictionary = {
        'epochs': epochs,
        'batch_size': batch_size,
        'beta': beta,
        'filters': filters,
        'kernel_size': kernel_size,
        'loss': 'vae_loss',
        'optimizer': 'adam'
    }

    # define log directory
    log_dir = './summaries/' + experiment + '/' + utils.build_hyperparameter_string(
        name, hp_dictionary) + '/'

    # make VAE
    vae = PongEntangledConvolutionalLatentVAE(input_shape,
                                              log_dir,
                                              filters=filters,
                                              kernel_size=kernel_size,
                                              beta=beta)

    # compile VAE
    from keras import optimizers
    optimizer = optimizers.Adam(lr=1e-1)
    vae.compile(optimizer=optimizer)

    # get dataset
    train_directory = './atari_agents/record/train/'
    test_directory = './atari_agents/record/test/'
    train_generator = utils.atari_generator(train_directory,
                                            batch_size=batch_size)
    test_generator = utils.atari_generator(test_directory,
                                           batch_size=batch_size)
    train_size = utils.count_images(train_directory)
    test_size = utils.count_images(test_directory)

    # print summaries
    vae.print_model_summaries()

    # fit VAE
    steps_per_epoch = int(train_size / batch_size)
    validation_steps = int(test_size / batch_size)
    vae.fit_generator(train_generator,
                      epochs=epochs,
                      steps_per_epoch=steps_per_epoch,
                      validation_data=test_generator,
                      validation_steps=validation_steps)
def train_dense_latent_pong_no_batchnorm(beta):
    # inputs
    input_shape = (1, 84, 84)
    epochs = 10
    batch_size = 1
    filters = 32
    kernel_size = 6
    pre_latent_size = 128
    latent_size = 32

    # define filename
    name = 'cvae_atari_dense_latent_pong_no_batchnorm_beta_' + str(beta)

    # builder hyperparameter dictionary
    hp_dictionary = {
        'epochs': epochs,
        'batch_size': batch_size,
        'beta': beta,
        'filters': filters,
        'kernel_size': kernel_size,
        'loss': 'vae_loss',
        'optimizer': 'adam'
    }

    # define log directory
    log_dir = './summaries/' + experiment + '/' + utils.build_hyperparameter_string(name, hp_dictionary) + '/'

    # make VAE
    vae = DenseLatentPongNoBatchNorm(input_shape, 
                                    log_dir,
                                    filters=filters,
                                    kernel_size=kernel_size,
                                    pre_latent_size=pre_latent_size,
                                    latent_size=latent_size,
                                    beta=beta)

    # compile VAE
    from keras import optimizers
    optimizer = optimizers.Adam(lr=1e-3)
    vae.compile(optimizer=optimizer)

    # get dataset
    train_directory = './atari_agents/record/train/'
    test_directory = './atari_agents/record/test/'
    train_generator = utils.atari_generator(train_directory, batch_size=batch_size)
    test_generator = utils.atari_generator(test_directory, batch_size=batch_size)
    train_size = utils.count_images(train_directory)
    test_size = utils.count_images(test_directory)

    # print summaries
    vae.print_model_summaries()

    # fit VAE
    steps_per_epoch = int(train_size / batch_size)
    validation_steps = int(test_size / batch_size)
    vae.fit_generator(train_generator,
                   epochs=epochs,
                   steps_per_epoch=steps_per_epoch,
                   validation_data=test_generator,
                   validation_steps=validation_steps)
Exemplo n.º 3
0
def load_dataset():
        train_data_dir = 'train_images/'
        test_data_dir = 'test_images/'
        width = 224
        height = 224
        channels = 3

        n_train_images, n_test_images = count_images(train_data_dir, test_data_dir)

        train_data = np.zeros((n_train_images, width, height, channels), dtype = np.float32)
        test_data = np.zeros((n_test_images, width, height, channels), dtype = np.float32)
        train_labels = np.empty((n_train_images))
        test_labels = np.empty((n_test_images))

        i = 0
        for label, folder in enumerate(sorted(os.listdir(train_data_dir))):
                print 'currently reading train images from folder ', label,' :', folder
                for files in os.listdir(train_data_dir+folder):
                        current_image = load_img(train_data_dir+folder+'/'+files)
                        current_image = img_to_array(current_image)
                        current_image = preprocess(current_image)
                        train_data[i] = current_image
                        train_labels[i] = label
                        i+=1
        i = 0
        for label, folder in enumerate(sorted(os.listdir(test_data_dir))):
                print 'currently reading test images from folder ', label,' :', folder
                for files in os.listdir(test_data_dir+folder):
                        current_image = load_img(test_data_dir+folder+'/'+files)
                        current_image = img_to_array(current_image)
                        current_image = preprocess(current_image)
                        test_data[i] = current_image
                        test_labels[i] = label
                        i+=1
        return train_data, test_data, train_labels, test_labels
Exemplo n.º 4
0
def train_average_filter(beta):
    # inputs
    input_shape = (1, 84, 84)
    filters = 32
    latent_filters = 8
    kernel_size = 7
    epochs = 12
    batch_size = 1
    lr = 1e-4

    # define filename
    name = 'cvae_atari'

    # builder hyperparameter dictionary
    hp_dictionary = {
        'epochs': epochs,
        'batch_size': batch_size,
        'beta': beta,
        'filters': filters,
        'latent_filters': latent_filters,
        'kernel_size': kernel_size,
        'lr': lr,
        'loss': 'vae_loss',
        'optimizer': 'adam'
    }

    # define log directory
    log_dir = './summaries/' + experiment + '/' + utils.build_hyperparameter_string(
        name, hp_dictionary) + '/'

    # make VAE
    vae = ConvolutionalLatentAverageFilterShallowVAE(
        input_shape,
        log_dir,
        filters=filters,
        latent_filters=latent_filters,
        kernel_size=kernel_size,
        beta=beta)

    # compile VAE
    from keras import optimizers
    optimizer = optimizers.Adam(lr=lr)
    vae.compile(optimizer=optimizer)

    # get dataset
    train_directory = './atari_agents/record/train/'
    test_directory = './atari_agents/record/test/'
    train_generator = utils.atari_generator(train_directory,
                                            batch_size=batch_size)
    test_generator = utils.atari_generator(test_directory,
                                           batch_size=batch_size)
    train_size = utils.count_images(train_directory)
    test_size = utils.count_images(test_directory)

    # print summaries
    vae.print_model_summaries()

    # fit VAE
    steps_per_epoch = int(train_size / batch_size)
    validation_steps = int(test_size / batch_size)
    vae.fit_generator(train_generator,
                      epochs=epochs,
                      steps_per_epoch=steps_per_epoch,
                      validation_data=test_generator,
                      validation_steps=validation_steps)
def train_winner_takes_all(beta):
    # inputs
    input_shape = (1, 84, 84)
    filters = 32
    latent_filters = 8
    kernel_size = 6
    epochs = 5
    batch_size = 1
    lr = 1e-4
    img_channels = 1

    # define filename
    name = 'cvae_atari_winner_takes_all'

    # builder hyperparameter dictionary
    hp_dictionary = {
        'epochs': epochs,
        'batch_size': batch_size,
        'beta': beta,
        'filters': filters,
        'latent_filters': latent_filters,
        'kernel_size': kernel_size,
        'img_channels': img_channels,
        'lr': lr,
        'loss': 'vae_loss',
        'optimizer': 'adam'
    }

    # define log directory
    log_dir = '/vol/bitbucket/dgs13/summaries/' + experiment + '/' + utils.build_hyperparameter_string(
        name, hp_dictionary) + '/'

    # make VAE
    vae = WinnerTakesAll(input_shape,
                         log_dir,
                         filters=filters,
                         latent_filters=latent_filters,
                         kernel_size=kernel_size,
                         img_channels=img_channels,
                         beta=beta)

    # compile VAE
    from keras import optimizers
    optimizer = optimizers.Adam(lr=lr)
    vae.compile(optimizer=optimizer)

    # get dataset
    train_directory = '/vol/bitbucket/dgs13/record/train/'
    test_directory = '/vol/bitbucket/dgs13/record/test/'
    train_generator = utils.atari_generator(train_directory,
                                            batch_size=batch_size,
                                            img_channels=img_channels)
    test_generator = utils.atari_generator(test_directory,
                                           batch_size=batch_size,
                                           img_channels=img_channels)
    train_size = utils.count_images(train_directory)
    test_size = utils.count_images(test_directory)

    # print summaries
    vae.print_model_summaries()

    # fit VAE
    steps_per_epoch = int(train_size / batch_size)
    validation_steps = int(test_size / batch_size)
    vae.fit_generator(train_generator,
                      epochs=epochs,
                      steps_per_epoch=steps_per_epoch,
                      validation_data=test_generator,
                      validation_steps=validation_steps)
Exemplo n.º 6
0
def main(save_dir):
    make_save_dir(save_dir)
    zense_camera = PyZenseManager()
    pt2_camera = PyPureThermal2()
    image_width, image_height = zense_camera.image_size

    res_image_width = int(image_width * 0.75)
    res_image_height = int(image_height * 0.75)
    window_image_width = int(res_image_width * 3)
    window_image_height = int(res_image_height * 1.5)

    cvui.init("capture")
    frame = np.zeros((window_image_height, window_image_width + 540, 3),
                     np.uint8)
    captured_frame_count = count_images(save_dir)

    while True:
        key = cv2.waitKey(10)
        frame[:] = (49, 52, 49)

        status = zense_camera.update(verbose=UPDATE_VERBOSE)
        status &= pt2_camera.update()
        if status:
            # Get Images
            ir_image = zense_camera.ir_image.copy()
            depth_image = zense_camera.depth_image.copy()

            thermal_image = pt2_camera.thermal_image.copy()
            thermal_image_colorized = pt2_camera.thermal_image_colorized.copy()

            # Visualize Images
            frame = draw_frames(frame, depth_image, ir_image,
                                thermal_image_colorized, res_image_width,
                                res_image_height)
            if cvui.button(frame, 50, window_image_height - 50, 130, 50,
                           "Save Result Image") or key & 0xFF == ord("s"):
                save_images(
                    depth_image,
                    ir_image,
                    thermal_image,
                    thermal_image_colorized,
                    save_dir,
                )
                captured_frame_count += 1

            if cvui.button(frame, 200, window_image_height - 50, 130, 50,
                           "Clear"):
                clean_save_dir(save_dir)
                captured_frame_count = 0

            cvui.printf(
                frame,
                900,
                window_image_height - 30,
                0.8,
                0x00FF00,
                "Number of Captured Images : %d",
                captured_frame_count,
            )
            if key & 0xFF == ord("q"):
                break

            cvui.update()
            cvui.imshow("capture", frame)

    cv2.destroyAllWindows()
Exemplo n.º 7
0
model.add(Dropout(rate=0.5))
model.add(Dense(units=len(classes)))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])

plot_model(model, to_file='model.png', show_shapes=True)
"""
    Training the network
"""

history = model.fit_generator(
    train_generator,
    steps_per_epoch=count_images(train_dir, classes) // batch_size,
    epochs=200,
    validation_data=validation_generator,
    validation_steps=count_images(test_dir, classes) // batch_size,
    workers=4,
    use_multiprocessing=True)
model.save(
    model_path)  # always save your weights after training or during training
"""
    Visualize training
"""

print(history.history.keys())

plt.figure(1, figsize=(10, 10))
Exemplo n.º 8
0
BATCH_SIZE = 50
N_LABELS = 2
DROPOUT = 0.50  
LOGS_PATH = './tensorflow_logs/'
WEIGHT_PATH = 'vgg16_weights.npz'
TRAINSET_PATH = 'train.csv'
VALSET_PATH ='val.csv'
ckpt_dir = "./ckpt_dir_config1"

train_image_batch, train_label_batch = make_batches(TRAINSET_PATH, N_EPOCHS, IMAGE_HEIGHT,
                                                    IMAGE_WIDTH, BATCH_SIZE, shuffle=False, training = True)
val_image_batch, val_label_batch = make_batches(VALSET_PATH, 1, IMAGE_HEIGHT,
                                                IMAGE_WIDTH, BATCH_SIZE, shuffle=False, training = False)
                                                
# Count the number of training and test examples
num_train = count_images(TRAINSET_PATH)
num_val = count_images(VALSET_PATH)
print ('Train_images: ', num_train)
print ('Validation_images: ', num_val)

# Placeholders for tensorflow graph
learning_rate = tf.placeholder( tf.float32, [])
images_tf = tf.placeholder( tf.float32, [None, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name="images")
labels_tf = tf.placeholder( tf.int64, [None], name='labels')  # the dimensions could be [None,N_CLASSES]

network = VGG16(N_LABELS, WEIGHT_PATH)
output = network.inference(images_tf, is_training=True, dropout=DROPOUT)

with tf.name_scope('Loss'):
    loss_tf = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( output, labels_tf ), name='loss_tf')
    loss_summary = tf.scalar_summary("loss", loss_tf)