Exemple #1
0
import argparse
import torch
from model import VAE
from data import DATASETS, DATASET_CONFIGS
from train import train_model


parser = argparse.ArgumentParser('VAE pytorch implementation')
parser.add_argument('--no-gpu', action='store_false')
parser.add_argument('--test', action='store_true')
parser.add_argument(
    '--dataset', default='mnist',
    choices=list(DATASETS.keys())
)
parser.add_argument('--image-size', type=int, default=32)
parser.add_argument('--channel-num', type=int, default=1)
parser.add_argument('--kernel-num', type=int, default=128)
parser.add_argument('--kernel-size', type=int, default=4)
parser.add_argument('--z-size', type=int, default=128)
parser.add_argument('-b', '--batch-size', type=int, default=32)
parser.add_argument('-e', '--epoch', type=int, default=10)
parser.add_argument('-l', '--lr', type=float, default=3e-6)
parser.add_argument('-m', '--momentum', type=float, default=0.2)
parser.add_argument('--log-interval', type=int, default=10)


def patch_dataset_specific_configs(config):
    dataset_specific = DATASET_CONFIGS[config.dataset]
    for k, v in dataset_specific.items():
        setattr(config, k, v)
                                 batch_size=batch_size).fit(pixels)
    else:
        kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(pixels)
    return kmeans.cluster_centers_


def main(args):
    datapath = Path("data")
    datapath.mkdir(exist_ok=True)

    train_x = download(args.dataset, datapath)
    centroids = find_centroids(train_x, args.num_clusters, args.batch_size)
    np.save(datapath / f"{args.dataset}_centroids.npy", centroids)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset",
                        type=str,
                        choices=DATASETS.keys(),
                        default="mnist")
    parser.add_argument("--num_clusters", default=16, type=int)
    parser.add_argument(
        "--batch_size",
        default=1024,
        type=int,
        help="batch size for mini batch kmeans to quantize images",
    )
    args = parser.parse_args()
    main(args)
Exemple #3
0
parser.add_argument(
    '--q-hidden-size',
    type=int,
    default=128,
    help='posterior latent code approximator network\'s hidden layer size')
parser.add_argument('--learning-rate',
                    type=float,
                    default=0.00002,
                    help='learning rate for Adam [0.00002]')
parser.add_argument('--beta1',
                    type=float,
                    default=0.5,
                    help='momentum term of Adam [0.5]')
parser.add_argument('--dataset',
                    default='mnist',
                    help='dataset to use {}'.format(DATASETS.keys()))
parser.add_argument('--resize',
                    action='store_true',
                    help='whether to resize images on the fly or not')
parser.add_argument('--crop',
                    action='store_false',
                    help='whether to use crop for image resizing or not')
parser.add_argument('--iterations',
                    type=int,
                    default=5000,
                    help='training iteration number')
parser.add_argument('--batch-size',
                    type=int,
                    default=64,
                    help='training batch size')
parser.add_argument('--sample-size',
Exemple #4
0
#!/usr/bin/env python3
import os.path
import pprint
import tensorflow as tf
from data import DATASETS
from model import WGAN
from train import train, train_original
import utils
import random
import numpy as np


flags = tf.app.flags
flags.DEFINE_string('dataset', 'mnist', 'dataset to use {}'.format(
    DATASETS.keys()
))
flags.DEFINE_bool('resize', True, 'whether to resize images on the fly or not')
flags.DEFINE_bool(
    'crop', True,
    'whether to use crop for image resizing or not'
)

flags.DEFINE_integer('z_size', 100, 'size of latent code z [100]')
flags.DEFINE_integer('image_size', 32, 'size of image [32]')
flags.DEFINE_integer('channel_size', 1, 'size of channel [1]')
flags.DEFINE_integer(
    'g_filter_number', 64,
    'number of generator\'s filters at the last transposed conv layer'
)
flags.DEFINE_integer(
    'c_filter_number', 64,