Beispiel #1
0
def get_data(conf):
    """Return data to be used in this session.

    Args:
        conf: Configuration dictionary
    Returns:
        X_train: numpy array of floats with shape [input_dimension, num train examples] in [0, 1].
        Y_train: numpy array of integers with shape [output_dimension, num train examples].
        X_devel: numpy array of floats with shape [input_dimension, num devel examples] in [0, 1].
        Y_devel: numpy array of integers with shape [output_dimension, num devel examples].
        X_test: numpy array of floats with shape [input_dimension, num test examples] in [0, 1].
        Y_test: numpy array of integers with shape [output_dimension, num test examples].
    """

    data_dir = os.path.join(conf['data_root_dir'], conf['dataset'])
    if conf['dataset'] == 'cifar10':
        conf['input_dimension'] = 32*32*3
        conf['output_dimension'] = 10
        X_train, Y_train, X_devel, Y_devel, X_test, Y_test = import_data.load_cifar10(
            data_dir, conf['devel_size'])
    elif conf['dataset'] == 'mnist':
        conf['input_dimension'] = 28*28*1
        conf['output_dimension'] = 10
        X_train, Y_train, X_devel, Y_devel, X_test, Y_test = import_data.load_mnist(
            data_dir, conf['devel_size'])
    elif conf['dataset'] == 'svhn':
        conf['input_dimension'] = 32*32*3
        conf['output_dimension'] = 10
        X_train, Y_train, X_devel, Y_devel, X_test, Y_test = import_data.load_svhn(
            data_dir, conf['devel_size'])

    conf['layer_dimensions'] = ([conf['input_dimension']] +
                                conf['hidden_dimensions'] +
                                [conf['output_dimension']])

    if conf['verbose']:
        print("Train dataset:")
        print("  shape = {}, data type = {}, min val = {}, max val = {}".format(X_train.shape,
                                                                                X_train.dtype,
                                                                                np.min(
                                                                                    X_train),
                                                                                np.max(X_train)))
        print("Development dataset:")
        print("  shape = {}, data type = {}, min val = {}, max val = {}".format(X_devel.shape,
                                                                                X_devel.dtype,
                                                                                np.min(
                                                                                    X_devel),
                                                                                np.max(X_devel)))
        print("Test dataset:")
        print("  shape = {}, data type = {}, min val = {}, max val = {}".format(X_test.shape,
                                                                                X_test.dtype,
                                                                                np.min(
                                                                                    X_test),
                                                                                np.max(X_test)))

    return X_train, Y_train, X_devel, Y_devel, X_test, Y_test
Beispiel #2
0
def get_data(conf):
    """
    Function for loading data from import_data.py and specifying correct dimensions.
    This function returns training, development and test data.
    """
    data_dir = os.path.join(conf["data_root_dir"], conf["dataset"])
    print(data_dir)
    if conf["dataset"] == "cifar10":
        conf["channels_x"] = 3
        conf["height_x"] = 32
        conf["width_x"] = 32
        conf["input_dimension"] = conf["channels_x"] * conf["height_x"] * conf[
            "width_x"]
        conf["output_dimension"] = 10
        X_train, Y_train, X_devel, Y_devel, X_test, Y_test = import_data.load_cifar10(
            conf, data_dir, conf["devel_size"])
    elif conf["dataset"] == "mnist":
        conf["channels_x"] = 1
        conf["height_x"] = 28
        conf["width_x"] = 28
        conf["input_dimension"] = conf["channels_x"] * conf["height_x"] * conf[
            "width_x"]
        conf["output_dimension"] = 10
        X_train, Y_train, X_devel, Y_devel, X_test, Y_test = import_data.load_mnist(
            conf, data_dir, conf["devel_size"])
    elif conf["dataset"] == "svhn":
        conf["channels_x"] = 3
        conf["height_x"] = 32
        conf["width_x"] = 32
        conf["input_dimension"] = conf["channels_x"] * conf["height_x"] * conf[
            "width_x"]
        conf["output_dimension"] = 10
        X_train, Y_train, X_devel, Y_devel, X_test, Y_test = import_data.load_svhn(
            conf, data_dir, conf["devel_size"])

    conf["layer_dimensions"] = [
        conf["input_dimension"]
    ] + conf["hidden_dimensions"] + [conf["output_dimension"]]

    if conf["verbose"]:
        print("Train dataset:")
        print(
            "  shape = {}, data type = {}, min val = {}, max val = {}".format(
                X_train.shape, X_train.dtype, np.min(X_train),
                np.max(X_train)))
        print("Development dataset:")
        print(
            "  shape = {}, data type = {}, min val = {}, max val = {}".format(
                X_devel.shape, X_devel.dtype, np.min(X_devel),
                np.max(X_devel)))
        print("Test dataset:")
        print(
            "  shape = {}, data type = {}, min val = {}, max val = {}".format(
                X_test.shape, X_test.dtype, np.min(X_test), np.max(X_test)))

    return X_train, Y_train, X_devel, Y_devel, X_test, Y_test
Beispiel #3
0
        print('Only numerics are legal')
        print(e)
else:
    dims = [784, 128, 10]

conf = {
    'layer_dimensions': dims,
    'learning_rate': float(options.learningrate),
    'epochs': int(options.epoch),
    'batch_size': int(options.batchsize),
    'activation_function': 'relu'
}

model = net(layerdimensions=conf['layer_dimensions'])

X, y, X_test, y_test = load_mnist('data_mnist')
b = len(X) // conf['batch_size']

print('\nModel')
print('-' * 50)
print('Layer dimentions: {}'.format(conf['layer_dimensions']))
print('Learning rate: {}'.format(conf['learning_rate']))
print('Epochs: {}'.format(conf['epochs']))
print('Batch size: {}\n'.format(conf['batch_size']))
print('-' * 50)

print('One hot encoding target values..\n')
y = one_hot_encode(y)
y_test = one_hot_encode(y_test)

print('Scaling pixel data..\n')
import numpy as np
import os
from import_data import obtain, load_mnist
from VAEs import VAE_q2

# In[Load and importantion code]

# Import binarized MNIST if it was not already done
exists = os.path.isfile(os.path.join(os.getcwd(), 'binarized_mnist'))
if os.path.isdir(os.path.join(os.getcwd(), 'binarized_mnist')) == False:
    obtain(os.path.join(os.getcwd(), 'binarized_mnist'))

batch_size = 32
valid_batch_size = 200
test_batch_size = 200
train_MNISt, val_MNIST, test_MNIST = load_mnist(batch_size, valid_batch_size,
                                                test_batch_size)

# In[Train function]


def train(epochs, l_r):
    model = VAE_q2()
    if torch.cuda.is_available():
        print("Using the GPU")
        device = torch.device("cuda")
        cuda_available = True
    else:
        print(
            "WARNING: You are about to run on cpu, and this will likely run out \
        of memory. \n You can try setting batch_size=1 to reduce memory usage")
        device = torch.device("cpu")