def load_data(dataset_name, data_path, batch_size):
    if dataset_name == 'mnist':
        train_data, _, test_data, _ = data.load_mnist(data_path)
    elif dataset_name == 'cifar10':
        train_data, _, test_data, _ = data.load_cifar10(data_path)
    else:
        assert False, "Must specify a valid dataset_name"
    data_shape = (batch_size, ) + train_data.shape[1:]
    batches_per_epoch = train_data.shape[0] // batch_size
    train_gen = data.data_generator(train_data, batch_size)
    test_gen = data.data_generator(test_data, batch_size)
    return train_gen, test_gen, batches_per_epoch, data_shape
Beispiel #2
0
        args.output_dir = './'
        output_directory = './'
    else:
        output_directory = args.output_dir
        os.mkdir(output_directory)


def unison_shuffled_copies(arrays):
    assert all([len(a) == len(arrays[0]) for a in arrays])
    p = np.random.permutation(len(arrays[0]))
    return [a[p] for a in arrays]


# load the data
if args.dataset == 'cifar10':
    train_data, train_labels, test_data, test_labels = data.load_cifar10(
        './data/')
elif args.dataset == 'mnist':
    train_data, train_labels, test_data, test_labels = data.load_mnist(
        './data/')
    train_data = np.reshape(train_data, [-1, 28, 28, 1])
    test_data = np.reshape(test_data, [-1, 28, 28, 1])
data_shape = train_data.shape[1:]
label_shape = train_labels.shape[1:]
train_batches_per_epoch = train_data.shape[0] // args.unlabeled_batch_size
test_batches_per_epoch = test_data.shape[0] // args.labeled_batch_size

# choose labeled training data
train_data, train_labels = unison_shuffled_copies([train_data, train_labels])
labeled_train_data = train_data[:args.num_labeled_data]
labeled_train_labels = train_labels[:args.num_labeled_data]
# sampled img save directory
if args.output_dir == '' and 'SLURM_JOB_ID' in os.environ.keys():
    job_id = os.environ['SLURM_JOB_ID']
    output_directory = 'glow_{}'.format(job_id)
    os.mkdir(output_directory)
else:
    if args.output_dir == '':
        args.output_dir = './'
    else:
        output_directory = args.output_dir
        os.mkdir(output_directory)

# load the data
if args.dataset == 'cifar10':
    train_data, train_labels, _, _ = data.load_cifar10('./data/')
    train_data *= 255
elif args.dataset == 'mnist':
    train_data, train_labels, test_data, test_labels = data.load_mnist(
        './data/')
    train_data *= 255
    train_data = np.reshape(train_data, [-1, 28, 28, 1])
data_shape = (args.batch_size, ) + train_data.shape[1:]
label_shape = (args.batch_size, ) + train_labels.shape[1:]
batches_per_epoch = train_data.shape[0] // (args.batch_size * args.num_gpus)
train_gen = data.parallel_data_generator([train_data, train_labels],
                                         args.batch_size)

# build model
glow = GlowFlow(args.levels,
                args.depth_per_level,
import sounds_deep.contrib.util.scaling as scaling
import sounds_deep.contrib.util.util as util
import sounds_deep.contrib.models.vae as vae
import sounds_deep.contrib.parameterized_distributions.discretized_logistic as discretized_logistic

parser = argparse.ArgumentParser(description='Train a VAE model.')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--latent_dimension', type=int, default=64)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--learning_rate', type=float, default=3e-5)
parser.add_argument('--dataset', type=str, default='cifar10')
args = parser.parse_args()

# load the data
if args.dataset == 'cifar10':
    train_data, _, _, _ = data.load_cifar10('./data/')
elif args.dataset == 'mnist':
    train_data, _, _, _ = data.load_mnist('./data/')
    train_data = np.reshape(train_data, [-1, 28, 28, 1])
data_shape = (args.batch_size, ) + train_data.shape[1:]
batches_per_epoch = train_data.shape[0] // args.batch_size
train_gen = data.data_generator(train_data, args.batch_size)

# build the model
if args.dataset == 'cifar10':
    encoder_module = snt.Sequential([
        snt.Conv2D(16, 3),
        snt.Residual(snt.Conv2D(16, 3)),
        snt.Residual(snt.Conv2D(16, 3)), scaling.squeeze2d,
        snt.Conv2D(64, 3),
        snt.Residual(snt.Conv2D(64, 3)),