Ejemplo n.º 1
0
def get_data_and_labels():
    test_dataset, train_dataset, validation_dataset = load_test_train_validation_ds(
        reshape=(-1, image_size, image_size, num_channels))
    train_labels = convert_from_one_dim_labels(train_dataset.label, NUM_LABELS)

    train_labels = train_labels.astype(np.float32)
    train_data_resized = resize_all_img_in_dataset(
        train_dataset.data.astype(np.float32), (32, 32))

    validation_labels = convert_from_one_dim_labels(
        validation_dataset.label, NUM_LABELS).astype(np.float32)

    validation_data_resized = resize_all_img_in_dataset(
        validation_dataset.data.astype(np.float32), (32, 32))

    test_labels = convert_from_one_dim_labels(test_dataset.label, NUM_LABELS)
    test_data_resized = resize_all_img_in_dataset(test_dataset.data, (32, 32))
    return {
        'test_labels': test_labels,
        'train_labels': train_labels,
        'validation_labels': validation_labels,
        'validation_data_resized': validation_data_resized,
        'test_data_resized': test_data_resized,
        'train_data_resized': train_data_resized
    }
Ejemplo n.º 2
0
def one_layer_relu(learning_rate, num_steps, train_subset):
    # Load dataset:
    test_dataset, train_dataset, validation_dataset = load_test_train_validation_ds()

    # Variables
    tf_train_data, tf_train_labels = construct_tf_placeholder(train_subset)

    train_labels = convert_from_one_dim_labels(train_dataset.label, NUM_LABELS)

    # Constants validation set
    tf_valid_data, tf_valid_labels = construct_tf_constant(validation_dataset.data,
                                                           validation_dataset.label)
    valid_labels = convert_from_one_dim_labels(validation_dataset.label, NUM_LABELS)

    # Constant test set
    tf_test_data, tf_test_label = construct_tf_constant(test_dataset.data, test_dataset.label)
    test_labels = convert_from_one_dim_labels(test_dataset.label, NUM_LABELS)

    # Construct architecture and get predictions:
    logits = inference(tf_train_data)

    # Get loss:
    loss = loss_func(logits, tf_train_labels)
    # loss += 0.01*regularizers
    training_op = training(loss, learning_rate)
    eval_op = evaluate(logits, tf_train_labels)

    # Predictions:
    # test_prediction = inference(tf_test_data)

    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver()

    test_dict = {tf_train_data: test_dataset.data, tf_train_labels: test_labels}

    with tf.Session() as session:
        summary_writer = tf.train.SummaryWriter('logs/', graph_def=session.graph_def)
        tf.global_variables_initializer().run()
        logging.info('Initialization')
        for step in range(num_steps):
            random_indx = np.random.randint(1, train_dataset.data.shape[0], size=train_subset)

            batch_data = train_dataset.data[random_indx]
            batch_labels = train_labels[random_indx]

            feed_dict = {tf_train_data: batch_data, tf_train_labels: batch_labels}
            _, loss_value = session.run([training_op, loss], feed_dict=feed_dict)
            if step % 500 == 0:

                val_feed_dict = {
                    tf_train_data: validation_dataset.data,
                    tf_train_labels: valid_labels
                }
                simple_accuracy = session.run(eval_op, feed_dict=val_feed_dict)
                logging.info('Loss at step %d: %f, accuracy: %f' % (step, loss_value, simple_accuracy))

                summary_str = session.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                saver.save(session, 'logs/model-checkpoint', global_step=step)
        simple_accuracy = session.run(eval_op, feed_dict=test_dict)
        logging.info('Test accuracy: %.1f%%' % float(100*simple_accuracy))
Ejemplo n.º 3
0
def evaluate(label, y):
    res = tf.equal(tf.argmax(label, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(res, tf.float32))
    tf.summary.scalar('validation_error', (1.0 - accuracy))
    return accuracy


def chunks(data, label, idx, n):
    """Yield successive n-sized chunks from l."""
    for i in range(0, len(data), n):
        shuffled_index = idx[i:i + n]
        yield data[shuffled_index], label[shuffled_index]


if __name__ == '__main__':
    test_dataset, train_dataset, validation_dataset = load_test_train_validation_ds(
    )
    train_labels = convert_from_one_dim_labels(train_dataset.label, NUM_LABELS)

    train_labels = train_labels.astype(np.float32)
    train_data = train_dataset.data.astype(np.float32)

    validation_labels = convert_from_one_dim_labels(validation_dataset.label,
                                                    NUM_LABELS)

    validation_labels = validation_labels.astype(np.float32)
    validation_data = validation_dataset.data.astype(np.float32)

    test_labels = convert_from_one_dim_labels(test_dataset.label, NUM_LABELS)

    sess = tf.Session()
    x = tf.placeholder(shape=[None, 784], dtype='float32')
Ejemplo n.º 4
0
def accuracy(predictions, labels):
    return 100.0 * np.sum(
        np.argmax(predictions, 1) == np.argmax(labels, 1) /
        predictions.shape[0])


def chunks(data, label, idx, n):
    """Yield successive n-sized chunks from l."""
    for i in range(0, len(data), n):
        shuffled_index = idx[i:i + n]
        yield data[shuffled_index], label[shuffled_index]


if __name__ == '__main__':
    test_dataset, train_dataset, validation_dataset = load_test_train_validation_ds(
        reshape=(-1, image_size, image_size, num_channels))
    train_labels = convert_from_one_dim_labels(train_dataset.label, NUM_LABELS)

    train_labels = train_labels.astype(np.float32)
    train_data = train_dataset.data.astype(np.float32)

    validation_labels = convert_from_one_dim_labels(validation_dataset.label,
                                                    NUM_LABELS)

    validation_labels = validation_labels.astype(np.float32)
    validation_data = validation_dataset.data.astype(np.float32)

    test_labels = convert_from_one_dim_labels(test_dataset.label, NUM_LABELS)

    sess = tf.Session()
    tf_train_data = tf.placeholder(shape=(batch_size, image_size, image_size,