Exemple #1
0
def load_mnist(path=".", normalize=True):
    mnist_dataset = MNIST(path=path, normalize=normalize)
    return mnist_dataset.load_data()
Exemple #2
0
from neon.util.argparser import NeonArgparser

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--kbatch',
                    type=int,
                    default=1,
                    help='number of data batches per noise batch in training')
parser.add_argument(
    "--original_cost",
    action='store_true',
    help="generator cost log(1-D(G(z))) rather than -log(D(G(z)))")
args = parser.parse_args()

# load up the mnist data set
dataset = MNIST(path=args.data_dir, size=27)
train_set = dataset.train_iter
valid_set = dataset.valid_iter

# setup weight initialization function
init = Gaussian(scale=0.05)

# generator using "decovolution" layers
relu = Rectlin(slope=0)  # relu for generator
conv = dict(init=init, batch_norm=True, activation=relu)
convp1 = dict(init=init, batch_norm=True, activation=relu, padding=1)
convp2 = dict(init=init, batch_norm=True, activation=relu, padding=2)
convp1s2 = dict(init=init,
                batch_norm=True,
                activation=relu,
                padding=1,
Exemple #3
0
from neon.util.persist import ensure_dirs_exist

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--kbatch',
                    type=int,
                    default=1,
                    help='number of data batches per noise batch in training')
parser.add_argument('--subset_pct',
                    type=float,
                    default=100,
                    help='subset percentage of training dataset to use')
args = parser.parse_args()

# load up the mnist data set
dataset = MNIST(path=args.data_dir, subset_pct=args.subset_pct, size=27)
train_set = dataset.train_iter
valid_set = dataset.valid_iter

# setup weight initialization function
init = Gaussian(scale=0.05)

# generator using "decovolution" layers
relu = Rectlin(slope=0)  # relu for generator
conv = dict(init=init, batch_norm=True, activation=relu)
convp1 = dict(init=init, batch_norm=True, activation=relu, padding=1)
convp2 = dict(init=init, batch_norm=True, activation=relu, padding=2)
convp1s2 = dict(init=init,
                batch_norm=True,
                activation=relu,
                padding=1,
Exemple #4
0
                    type=str,
                    default='dc',
                    help='generator model type: dc or mlp, default dc')
parser.add_argument('--n_dis_ftr',
                    type=int,
                    default=64,
                    help='base discriminator feature number, default 64')
parser.add_argument('--n_gen_ftr',
                    type=int,
                    default=64,
                    help='base generator feature number, default 64')
args = parser.parse_args()
random_seed = args.rng_seed if args.rng_seed else 0

# load up the mnist data set, padding images to size 32
dataset = MNIST(path=args.data_dir, sym_range=True, size=32, shuffle=True)
train = dataset.train_iter

# create a GAN
model, cost = create_model(dis_model=args.dmodel,
                           gen_model=args.gmodel,
                           cost_type='wasserstein',
                           noise_type='normal',
                           im_size=32,
                           n_chan=1,
                           n_noise=128,
                           n_gen_ftr=args.n_gen_ftr,
                           n_dis_ftr=args.n_dis_ftr,
                           depth=4,
                           n_extra_layers=4,
                           batch_norm=True,
Exemple #5
0
def load_mnist(path=".", normalize=True):
    mnist_dataset = MNIST(path=path, normalize=normalize)
    return mnist_dataset.load_data()