Ejemplo n.º 1
0
    if use_gpu:
        trained_cnn = trained_cnn.cuda()

    print("=CNN state loaded=")
    print("Extracting distractors features...")

    # Dump the features to then load them
    distractors_features_folder_name = save_features(trained_cnn,
                                                     target_shapes_dataset,
                                                     cnn_dump_id)

# Load data
if should_train_visual:
    assert False
    _train_data, _valid_data, _test_data = load_images(
        'shapes/{}'.format(target_shapes_dataset), BATCH_SIZE, K)
else:
    n_pretrained_image_features, _t, _v, test_data = load_pretrained_features_zero_shot(
        target_features_folder_name, distractors_features_folder_name,
        BATCH_SIZE, K)
    assert n_pretrained_image_features == n_image_features

# Create onehot metadata if not created yet - only target is needed
if not does_shapes_onehot_metadata_exist(target_shapes_dataset):
    create_shapes_onehot_metadata(target_shapes_dataset)

# Load metadata - only target is needed
_train_metadata, _valid_metadata, target_test_metadata = load_shapes_onehot_metadata(
    target_shapes_dataset)

# Settings
    # Load metadata
    train_metadata, valid_metadata, test_metadata, noise_metadata = load_shapes_onehot_metadata(
        shapes_dataset)
else:
    train_metadata = None
    valid_metadata = None
    test_metadata = None
    noise_metadata = None
print("loaded metadata")
print("loading data")
# Load data
if not shapes_dataset is None:
    if not use_symbolic_input:
        if should_train_visual:
            train_data, valid_data, test_data, noise_data = load_images(
                'shapes/{}'.format(shapes_dataset), BATCH_SIZE, K)
        else:
            n_pretrained_image_features, train_data, valid_data, test_data, noise_data = load_pretrained_features(
                features_folder_name, BATCH_SIZE, K)
            assert n_pretrained_image_features == n_image_features
    else:
        n_image_features, train_data, valid_data, test_data, noise_data = load_pretrained_features(
            'shapes/{}'.format(shapes_dataset),
            BATCH_SIZE,
            K,
            use_symbolic=True)
else:
    n_image_features, train_data, valid_data, test_data, noise_data = load_pretrained_features(
        'data/mscoco', BATCH_SIZE, K)
    print('\nUsing {} image features\n'.format(n_image_features))
Ejemplo n.º 3
0
import neonetwork as nn
import dataloader as dl


def foo(net, images, labels, which):
    net.set_input(dl.img_to_array(images[which]))
    net.forward_prop()
    # dl.show_img_arr(dl.img_to_array(images[which]))
    print(net.get_out())
    print(labels[which])


if __name__ == "__main__":
    labels = dl.load_labels(
        "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz", 60000)
    images = dl.load_images(
        "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", 60000)
    d = dl.create_data(images, labels)
    train, test = dl.divide_train_test(d, 48000)

    net = nn.Network([0 for i in range(784)])
    net.train(train, 10)
    net.test(test)