示例#1
0
def main(args):
    s_train, s_test = dataset.load_svhn()
    t_train, t_test = dataset.load_mnist()

    s_train_iter = SerialIterator(
        s_train, args.batchsize, shuffle=True, repeat=True)
    t_train_iter = SerialIterator(
        t_test, args.batchsize, shuffle=True, repeat=True)
    s_test_iter = SerialIterator(
        s_test, args.batchsize, shuffle=False, repeat=False)
    t_test_iter = SerialIterator(
        t_test, args.batchsize, shuffle=False, repeat=False)

    model = drcn.DRCN()
    target_model = LossAndAccuracy(model)
    loss_list = ['loss_cla_s', 'loss_cla_t', 'loss_rec']
    optimizer = chainer.optimizers.RMSprop(args.lr)
    optimizer.setup(model)
    optimizers = {
        'model': optimizer
    }

    updater = Updater(s_train_iter, t_train_iter, optimizers, args)
    out_dir = utils.prepare_dir(args)
    trainer = Trainer(updater, (args.max_iter, 'iteration'), out=out_dir)
    trainer.extend(extensions.LogReport(trigger=(args.interval, args.unit)))
    trainer.extend(
        extensions.snapshot_object(model, filename='model'),
        trigger=MaxValueTrigger('acc_t', (args.interval, args.unit)))
    trainer.extend(extensions.Evaluator(t_test_iter, target_model,
                                        device=args.device), trigger=(args.interval, args.unit))
    trainer.extend(extensions.PrintReport([args.unit, *loss_list, 'acc_s', 'acc_t', 'elapsed_time']))
    trainer.extend(extensions.PlotReport([*loss_list], x_key=args.unit, file_name='loss.png', trigger=(args.interval, args.unit)))
    trainer.extend(extensions.PlotReport(['acc_s', 'acc_t'], x_key=args.unit, file_name='accuracy.png', trigger=(args.interval, args.unit)))
    trainer.extend(extensions.ProgressBar(update_interval=1))
    trainer.run()
示例#2
0
    train_flat_with_ones = np.hstack(
        [train_flat, np.ones((train_X.shape[0], 1))])
    test_flat_with_ones = np.hstack([test_flat, np.ones((test_X.shape[0], 1))])
    return train_flat_with_ones, test_flat_with_ones


def square(x):
    return float(x * x), 2 * x


def array_sum(x):
    assert x.shape == (2, ), x.shape
    return np.sum(x), np.ones_like(x)


def array_2d_sum(x):
    assert x.shape == (2, 2)
    return np.sum(x), np.ones_like(x)


train_X, train_y, test_X, test_y = load_svhn("data",
                                             max_train=10000,
                                             max_test=1000)
train_X, test_X = prepare_for_linear_classifier(train_X, test_X)
# Split train into train and val
train_X, train_y, val_X, val_y = random_split_train_val(train_X,
                                                        train_y,
                                                        num_val=1000)
check_gradient(square, np.array([3.0]))
check_gradient(array_sum, np.array([3.0, 2.0]))
check_gradient(array_2d_sum, np.array([[3.0, 2.0], [1.0, 0.0]]))
示例#3
0
if __name__ == '__main__':

    def prepare_for_neural_network(train_X, test_X):
        train_flat = train_X.reshape(train_X.shape[0], -1).astype(
            np.float) / 255.0
        test_flat = test_X.reshape(test_X.shape[0], -1).astype(
            np.float) / 255.0

        # Subtract mean
        mean_image = np.mean(train_flat, axis=0)
        train_flat -= mean_image
        test_flat -= mean_image

        return train_flat, test_flat

    train_X, train_y, test_X, test_y = load_svhn(
        "./assignments/assignment2/data", max_train=10000, max_test=1000)
    train_X, test_X = prepare_for_neural_network(train_X, test_X)
    # Split train into train and val
    train_X, train_y, val_X, val_y = random_split_train_val(train_X,
                                                            train_y,
                                                            num_val=1000)

    data_size = 32
    model = TwoLayerNet(n_input=train_X.shape[1],
                        n_output=10,
                        hidden_layer_size=100,
                        reg=0)
    dataset = Dataset(train_X[:data_size], train_y[:data_size],
                      val_X[:data_size], val_y[:data_size])
    trainer = Trainer(model,
                      dataset,
示例#4
0
    size = 28
    n_x = size * size
    n_hidden = [500, 500]
    n_z = 50
    n_y = 10
    n_batch_w = 7
    x_size = size * size
    output_f = 'sigmoid'
    n_layers_recog = n_layers_gen = len(n_hidden)
    output_image = np.zeros((n_batch_w * im_size[0], n_y * im_size[1]))

if args.dataset == 'svhn':
    size = 32
    im_size = (size, size, 3)
    train_x, train_y, test_x, test_y = dataset.load_svhn(args.data_dir,
                                                         binarize_y=True)
    # pca = pickle.load(open(args.data_dir+"/SVHN/pca.pkl"))
    n_x = train_x.shape[1]
    n_hidden = [500, 500]
    n_z = 300
    n_y = 10
    n_batch_w = 7
    x_size = size * size * 3
    output_f = 'sigmoid'
    n_layers_recog = n_layers_gen = len(n_hidden)
    output_image = np.zeros(
        (n_batch_w * im_size[0], n_y * im_size[1], im_size[2]))

model = pickle.load(open(args.model, "rb"))

output_dir = "%s_%s" % (args.output_dir, args.dataset)
    size    = 28
    n_x     = size*size
    n_hidden= [500, 500]
    n_z     = 50
    n_y     = 10
    n_batch_w = 7
    x_size  = size*size
    output_f= 'sigmoid'
    n_layers_recog = n_layers_gen = len(n_hidden)
    output_image = np.zeros((n_batch_w*im_size[0], n_y*im_size[1]))

if args.dataset == 'svhn':
    size = 32
    im_size = (size, size, 3)
    train_x, train_y, test_x, test_y = dataset.load_svhn(args.data_dir, binarize_y=True)
    # pca = pickle.load(open(args.data_dir+"/SVHN/pca.pkl"))
    n_x = train_x.shape[1]
    n_hidden = [500, 500]
    n_z = 300
    n_y = 10
    n_batch_w = 7
    x_size = size*size*3
    output_f = 'sigmoid'
    n_layers_recog = n_layers_gen = len(n_hidden)
    output_image = np.zeros((n_batch_w*im_size[0], n_y*im_size[1], im_size[2]))

model = pickle.load(open(args.model, "rb"))

output_dir = "%s_%s" % (args.output_dir, args.dataset)