예제 #1
0
def main():
    # Parsing arguments
    parser = argparse.ArgumentParser(description='Training GANs or VAEs')
    parser.add_argument('--model', type=str, required=True)
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--epoch', type=int, default=200)
    parser.add_argument('--batchsize', type=int, default=50)
    parser.add_argument('--datasize', type=int, default=-1)
    parser.add_argument('--output', default='output')
    parser.add_argument('--zdims', type=int, default=256)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--testmode', action='store_true')

    args = parser.parse_args()

    # Select GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    # Make output direcotiry if not exists
    if not os.path.isdir(args.output):
        os.mkdir(args.output)

    # Load datasets
    if args.dataset == 'mnist':
        datasets = mnist.load_data()
    elif args.dataset == 'svhn':
        datasets = svhn.load_data()
    elif args.dataset == 'hair':
        datasets = hairdata.load_data()
    elif args.dataset == 'hair_4tags':
        datasets = hairdata_4tags.load_data()
    else:
        datasets = load_data(args.dataset)
    print('aa')
    # Construct model
    if args.model not in models:
        raise Exception('Unknown model:', args.model)

    model = models[args.model](input_shape=datasets.images.shape[1:],
                               num_attrs=len(datasets.attr_names),
                               z_dims=args.zdims,
                               output=args.output)

    if args.resume is not None:
        model.load_model(args.resume)

    # Training loop
    datasets.images = datasets.images * 2.0 - 1.0
    samples = np.random.normal(size=(10, args.zdims)).astype(np.float32)
    model.main_loop(datasets,
                    samples,
                    datasets.attr_names,
                    epochs=args.epoch,
                    batchsize=args.batchsize,
                    reporter=[
                        'loss', 'g_loss', 'd_loss', 'g_acc', 'd_acc', 'c_loss',
                        'ae_loss'
                    ])
예제 #2
0
def main(_):
    # Parsing arguments
    parser = argparse.ArgumentParser(description='Training GANs or VAEs')
    parser.add_argument('--model', type=str, required=True)
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--datasize', type=int, default=-1)
    parser.add_argument('--epoch', type=int, default=200)
    parser.add_argument('--batchsize', type=int, default=50)
    parser.add_argument('--output', default='output')
    parser.add_argument('--zdims', type=int, default=256)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--testmode', action='store_true')

    args = parser.parse_args()

    # select gpu
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    # Make output direcotiry if not exists
    if not os.path.isdir(args.output):
        os.mkdir(args.output)

    # Load datasets
    if args.dataset == 'mnist':
        datasets = mnist.load_data()
    elif args.dataset == 'svhn':
        datasets = svhn.load_data()
    else:
        #datasets = load_data(args.dataset, args.datasize)
        datasets = load_images(args.dataset)  # load
    # Construct model
    if args.model not in models:
        raise Exception('Unknown model:', args.model)


#--------------------------------------------------------
    print("input_shape", datasets.shape[1:])
    #--------------------------------------------------------

    model = models[args.model](batchsize=args.batchsize,
                               input_shape=datasets.shape[1:],
                               attr_names=None or datasets.attr_names,
                               z_dims=args.zdims,
                               output=args.output,
                               resume=args.resume)

    if args.testmode:
        model.test_mode = True

    tf.set_random_seed(12345)

    # Training loop
    datasets.images = datasets.images.astype('float32') * 2.0 - 1.0
    #--------------------------
    print("datasets.images", datasets.images.shape)

    model.main_loop(datasets, epochs=args.epoch)
예제 #3
0
def load_dataset(dataset_name):
    if dataset_name == 'mnist':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
        )
    elif dataset_name == 'mnist-original':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
            original=True)
    elif dataset_name == 'mnist-rgb':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
            use_rgb=True)
    elif dataset_name == 'svhn':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = svhn.load_data(
        )
    elif dataset_name == 'svhn-extra':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = svhn.load_data(
            include_extra=True)
    elif dataset_name == 'mnist-svhn':
        anchor = load_dataset('mnist-rgb')
        mirror = load_dataset('svhn')
        dataset = CrossDomainDatasets(dataset_name.replace('-', ''), anchor,
                                      mirror)
    elif dataset_name == 'cifar10':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = cifar10.load_data(
        )
    elif dataset_name == 'moving-mnist':
        data = moving_mnist.load_data()
        dataset = TimeCorelatedDataset(dataset_name.replace('-', ''), data)
    elif dataset_name == 'lsun-bedroom':
        datapath = lsun.load_data()
        dataset = LargeDataset(datapath)
    elif dataset_name == 'celeba':
        datapath = celeba.load_data()
        dataset = LargeDataset(datapath)
    else:
        raise KeyError("Dataset not implemented")

    return dataset
예제 #4
0
def load_dataset(dataset_name):
    if dataset_name == 'mnist':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
        )
    if dataset_name == 'stacked-mnist':
        dataset = StackedDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
        )
    elif dataset_name == 'mnist-original':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
            original=True)
    elif dataset_name == 'mnist-rgb':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
            use_rgb=True)
    elif dataset_name == 'svhn':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = svhn.load_data(
        )
    elif dataset_name == 'svhn-extra':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = svhn.load_data(
            include_extra=True)
    elif dataset_name == 'mnist-svhn':
        anchor = load_dataset('mnist-rgb')
        mirror = load_dataset('svhn')
        dataset = CrossDomainDatasets(dataset_name.replace('-', ''), anchor,
                                      mirror)
    elif dataset_name == 'cifar10':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = cifar10.load_data(
        )
    elif dataset_name == 'moving-mnist':
        data = moving_mnist.load_data()
        dataset = TimeCorelatedDataset(dataset_name.replace('-', ''), data)
    elif dataset_name == 'lsun-bedroom':
        datapath = lsun.load_data()
        dataset = LargeDataset(datapath)
    elif dataset_name == 'celeba':
        datapath = celeba.load_data()
        dataset = LargeDataset(datapath, buffer_size=20000)
    elif dataset_name == 'celeba-128':
        datapath = celeba.load_data(image_size=128)
        dataset = LargeDataset(datapath, buffer_size=5000)
    elif dataset_name == 'celeba-crop-128':
        datapath = celeba.load_data(image_size=128, center_crop=True)
        dataset = LargeDataset(datapath, buffer_size=5000)
    elif dataset_name == 'celeba-crop-64':
        datapath = celeba.load_data(image_size=64, center_crop=True)
        dataset = LargeDataset(datapath, buffer_size=20000)
    elif dataset_name == 'synthetic-8ring':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.attr_names = mixture.load_data(
            type="ring", n=8, std=.05, r=1, density=5000)
    elif dataset_name == 'synthetic-25grid':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.attr_names = mixture.load_data(
            type="grid", n=25, std=.05, density=2500)
    elif dataset_name == 'synthetic-high-dim':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.attr_names = mixture.load_data(
            type="high-dim", n=10, d=1200, std=1., density=5000)
    elif dataset_name == 'mnist-anomaly':
        x, y, x_t, y_t, y_names = mnist.load_data()
        dataset = SimulatedAnomalyDetectionDataset(dataset_name.replace(
            '-', ''),
                                                   x,
                                                   y,
                                                   anomaly_class=0,
                                                   test_set=(x_t, y_t))
    elif dataset_name == 'svhn-anomaly':
        x, y, x_t, y_t, y_names = svhn.load_data()
        dataset = SimulatedAnomalyDetectionDataset(dataset_name.replace(
            '-', ''),
                                                   x,
                                                   y,
                                                   anomaly_class=0,
                                                   test_set=(x_t, y_t))
    elif dataset_name == 'cifar10-anomaly':
        x, y, x_t, y_t, y_names = cifar10.load_data()
        dataset = SimulatedAnomalyDetectionDataset(dataset_name.replace(
            '-', ''),
                                                   x,
                                                   y,
                                                   anomaly_class=0,
                                                   test_set=(x_t, y_t))
    else:
        raise KeyError("Dataset not implemented")

    return dataset