示例#1
0
dim = dataset['train']['data'][0].size
N_train = len(dataset['train']['target'])
N_test = len(dataset['test']['target'])
train_data_dict = {'data':dataset['train']['data'].reshape(N_train, dim).astype(np.float32),
                   'target':dataset['train']['target'].astype(np.int32)}
test_data_dict = {'data':dataset['test']['data'].reshape(N_test, dim).astype(np.float32),
                  'target':dataset['test']['target'].astype(np.int32)}
train_data = DataFeeder(train_data_dict, batchsize=args.batch)
test_data = DataFeeder(test_data_dict, batchsize=args.valbatch)

train_data.hook_preprocess(mnist_preprocess)
test_data.hook_preprocess(mnist_preprocess)


# Model Setup
model = models.ClassifierModel(cnn_models[args.model]())
if args.gpu >= 0:
    cuda.get_device(args.gpu).use()
    model.to_gpu()


# Opimizer Setup
optimizer = optimizers.Adam()
optimizer.setup(model)


# Trainer Setup
updates = int(N_train / args.batch)
trainer = trainers.SupervisedTrainer(optimizer, logger, train_data, test_data, args.gpu)

trainer.train(int(args.epoch*N_train/args.batch), 
示例#2
0
    'target': dataset['train']['target'].astype(np.int32)
}
test_data_dict = {
    'data': dataset['test']['data'].reshape(N_test, dim).astype(np.float32),
    'target': dataset['test']['target'].astype(np.int32)
}
train_data = DataFeeder(train_data_dict, batchsize=args.batch)
test_data = DataFeeder(test_data_dict, batchsize=args.valbatch)

train_data.hook_preprocess(mnist_preprocess)
test_data.hook_preprocess(mnist_preprocess)

# Model Setup
h_units = 1200
model = models.ClassifierModel(
    Mlp(train_data['data'][0].size, h_units, h_units,
        np.max(train_data['target']) + 1))
if args.gpu >= 0:
    cuda.get_device(args.gpu).use()
    model.to_gpu()

# Opimizer Setup
optimizer = optimizers.Adam()
optimizer.setup(model)

trainer = trainers.SupervisedTrainer(optimizer, logger, (train_data, ),
                                     test_data, args.gpu)
trainer.train(int(args.epoch * N_train / args.batch),
              log_interval=1,
              test_interval=N_train / args.batch,
              test_nitr=N_test / args.valbatch)
示例#3
0
dim = dataset['train']['data'][0].size
N_train = len(dataset['train']['target'])
N_test = len(dataset['test']['target'])
train_data_dict = {'data':dataset['train']['data'].astype(np.float32),
                   'target':dataset['train']['target'].astype(np.int32)}
test_data_dict = {'data':dataset['test']['data'].astype(np.float32),
                  'target':dataset['test']['target'].astype(np.int32)}
train_data = DataFeeder(train_data_dict, batchsize=args.batch)
test_data = DataFeeder(test_data_dict, batchsize=args.valbatch)

train_data.hook_preprocess(cifar_preprocess)
test_data.hook_preprocess(cifar_preprocess)


# Model Setup
model = models.ClassifierModel(AllConvNet())
#model = models.ClassifierModel(AllConvNetBN())
if args.gpu >= 0:
    cuda.get_device(args.gpu).use()
    model.to_gpu()


# Opimizer Setup
optimizer = optimizers.Adam()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.00002))


trainer = trainers.SupervisedTrainer(optimizer, logger, (train_data,), test_data, args.gpu)
trainer.train(int(args.epoch*N_train/args.batch), 
              log_interval=1,