Пример #1
0
Файл: run.py Проект: ybin/tinynn
def main(args):
    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    if args.model_type == "cnn":
        train_x = train_x.reshape((-1, 28, 28, 1))
        test_x = test_x.reshape((-1, 28, 28, 1))

    if args.model_type == "cnn":
        net = Net([
            Conv2D(kernel=[5, 5, 1, 8], stride=[2, 2], padding="SAME"),
            ReLU(),
            Conv2D(kernel=[5, 5, 8, 16], stride=[2, 2], padding="SAME"),
            ReLU(),
            Conv2D(kernel=[5, 5, 16, 32], stride=[2, 2], padding="SAME"),
            ReLU(),
            Flatten(),
            Dense(10)
        ])
    elif args.model_type == "dense":
        net = Net([
            Dense(200),
            ReLU(),
            Dense(100),
            ReLU(),
            Dense(70),
            ReLU(),
            Dense(30),
            ReLU(),
            Dense(10)
        ])
    else:
        raise ValueError(
            "Invalid argument model_type! Must be 'cnn' or 'dense'")

    model = Model(net=net,
                  loss=SoftmaxCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = np.asarray(test_y)
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #2
0
Файл: run.py Проект: ybin/tinynn
def main(args):
    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    # train_y = get_one_hot(train_y, 2)

    net = Net([Dense(100), ReLU(), Dense(30), ReLU(), Dense(1)])

    model = Model(net=net,
                  loss=SigmoidCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        for timer in model.timers.values():
            timer.report()
        # evaluate
        model.set_phase("TEST")
        test_y_idx = np.asarray(test_y).reshape(-1)
        test_pred = model.forward(test_x)
        test_pred[test_pred > 0] = 1
        test_pred[test_pred <= 0] = 0
        test_pred_idx = test_pred.reshape(-1)
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #3
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    train_x = Tensor(train_x)
    train_y = Tensor(train_y)
    test_x = Tensor(test_x)
    test_y = Tensor(test_y)

    net = Net([
        Dense(200),
        ReLU(),
        Dense(100),
        ReLU(),
        Dense(70),
        ReLU(),
        Dense(30),
        ReLU(),
        Dense(10)
    ])

    model = Model(net=net,
                  loss=SoftmaxCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))
    loss_layer = SoftmaxCrossEntropyLoss()
    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            model.zero_grad()
            pred = model.forward(batch.inputs)
            loss = loss_layer.loss(pred, batch.targets)
            loss.backward()
            model.step()
            loss_list.append(loss.values)
        print("Epoch %d tim cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = test_y.values
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #4
0
Файл: run.py Проект: t-k-/nbnp
def activation_maximazation(model, init_grads, layer_idx, fig):
    # create a random image according to MNIST statistics
    img_mean = 0.456
    img_std = 0.224
    img = np.random.normal(img_mean, img_std, (1, 28, 28, 1))
    disp_mnist_batch(img, fig)
    # create optimizer
    opt = Adam(lr=1e-2)
    sigma = 0.30

    for iteration in range(500 + 1):
        # forward pass until interested layer
        outputs = img
        for layer in model.net.layers[0:layer_idx + 1]:
            outputs = layer.forward(outputs)
        # backward from interested layer
        grads = init_grads
        for layer in model.net.layers[0:layer_idx + 1][::-1]:
            grads = layer.backward(grads)
        # flatten the gradients and apply steps
        flat_grads = np.ravel(grads)
        flat_steps = opt._compute_step(flat_grads)
        steps = flat_steps.reshape(img.shape)
        img += steps
        # blur image to regularize this progress
        img = gaussian_filter(img, sigma, order=0)
        # ensure image is still in [0, 1] range
        mean, max_, min_ = img.mean(), img.max(), img.min()
        img = (img - min_) / (max_ - min_)

        if iteration % 100 == 0:
            cells_idx = (-init_grads).astype(int).astype(bool)
            loss = -outputs[cells_idx].sum()
            stats = img.mean(), img.std(), img.min(), img.max()
            print('Iteration#%d, loss: %.3f' % (iteration, loss), end=" ")
            print('image: u=%.3f, std=%.3f, range=(%.3f, %.3f)' % stats)
            disp_mnist_batch(img, fig)
    return img
Пример #5
0
Файл: run.py Проект: t-k-/nbnp
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # create output directory for saving result images
    if not os.path.exists('./output'): os.mkdir('./output')

    # define network we are going to load
    net = Net([
        Conv2D(kernel=[5, 5, 1, 6], stride=[1, 1], padding="SAME"),
        ReLU(),
        MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
        Conv2D(kernel=[5, 5, 6, 16], stride=[1, 1], padding="SAME"),
        ReLU(),
        MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
        Flatten(),
        Dense(120),
        ReLU(),
        Dense(84),
        ReLU(),
        Dense(10)
    ])

    # load the model
    model = Model(net=net, loss=SoftmaxCrossEntropyLoss(), optimizer=Adam())
    print('loading pre-trained model file', args.model_path)
    model.load(args.model_path)

    # create pyplot window for on-the-fly visualization
    img = np.ones((1, 28, 28, 1))
    fig = disp_mnist_batch(img)

    # actual visualization generations

    layer_name = 'conv-layer-1'
    print('[ ' + layer_name + ' ]')
    images = am_visualize_conv_layer(model, 0, fig)
    save_batch_as_images('output/{}.png'.format(layer_name),
                         images,
                         title='visualized feature maps for ' + layer_name)

    layer_name = 'conv-layer-2'
    print('[ ' + layer_name + ' ]')
    images = am_visualize_conv_layer(model, 3, fig)
    save_batch_as_images('output/{}.png'.format(layer_name),
                         images,
                         title='visualized feature maps for ' + layer_name)
Пример #6
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # data preparing
    data_path = os.path.join(args.data_dir, args.file_name)
    train_x, train_y, img_shape = prepare_dataset(data_path)

    net = Net([
        Dense(30),
        ReLU(),
        Dense(60),
        ReLU(),
        Dense(60),
        ReLU(),
        Dense(30),
        ReLU(),
        Dense(3),
        Sigmoid()
    ])

    model = Model(net=net, loss=MSELoss(), optimizer=Adam())
    mse_evaluator = MSEEvaluator()
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            preds = model.forward(batch.inputs)
            loss, grads = model.backward(preds, batch.targets)
            model.apply_grad(grads)

        # evaluate
        preds = net.forward(train_x)
        mse = mse_evaluator.evaluate(preds, train_y)
        print(mse)

        if args.paint:
            # generate painting
            preds = preds.reshape(img_shape[0], img_shape[1], -1)
            preds = (preds * 255.0).astype("uint8")
            filename, ext = os.path.splitext(args.file_name)
            output_filename = "output" + ext
            output_path = os.path.join(args.data_dir, output_filename)
            Image.fromarray(preds).save(output_path)
        print("Epoch %d time cost: %.2f" % (epoch, time.time() - t_start))
Пример #7
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # data preparing
    train_x, train_y, img_shape = prepare_dataset(args.img)

    net = Net([
        Dense(30),
        ReLU(),
        Dense(100),
        ReLU(),
        Dense(100),
        ReLU(),
        Dense(30),
        ReLU(),
        Dense(3),
        Sigmoid()
    ])

    model = Model(net=net, loss=MSE(), optimizer=Adam())
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        for batch in iterator(train_x, train_y):
            preds = model.forward(batch.inputs)
            loss, grads = model.backward(preds, batch.targets)
            model.apply_grad(grads)

        # evaluate
        preds = net.forward(train_x)
        mse = mean_square_error(preds, train_y)
        print("Epoch %d %s" % (epoch, mse))

        # generate painting
        if epoch % 5 == 0:
            preds = preds.reshape(img_shape[0], img_shape[1], -1)
            preds = (preds * 255.0).astype("uint8")
            name, ext = os.path.splitext(args.img)
            filename = os.path.basename(name)
            out_filename = filename + "-paint-epoch" + str(epoch) + ext
            if not os.path.exists(args.output_dir):
                os.makedirs(args.output_dir)
            out_path = os.path.join(args.output_dir, out_filename)
            Image.fromarray(preds).save(out_path)
            print("save painting to %s" % out_path)
Пример #8
0
def main(args):
    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    net = Net([
        Dense(784, 200),
        ReLU(),
        Dense(200, 100),
        ReLU(),
        Dense(100, 70),
        ReLU(),
        Dense(70, 30),
        ReLU(),
        Dense(30, 10)
    ])

    model = Model(net=net,
                  loss=SoftmaxCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        t_end = time.time()
        # evaluate
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = np.asarray(test_y)
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print("Epoch %d time cost: %.4f\t %s" % (epoch, t_end - t_start, res))
Пример #9
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    train_set, valid_set, test_set = mnist(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    if args.model_type == "cnn":
        train_x = train_x.reshape((-1, 28, 28, 1))
        test_x = test_x.reshape((-1, 28, 28, 1))

    if args.model_type == "cnn":
        # a LeNet-5 model with activation function changed to ReLU
        net = Net([
            Conv2D(kernel=[5, 5, 1, 6], stride=[1, 1], padding="SAME"),
            ReLU(),
            MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
            Conv2D(kernel=[5, 5, 6, 16], stride=[1, 1], padding="SAME"),
            ReLU(),
            MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
            Flatten(),
            Dense(120),
            ReLU(),
            Dense(84),
            ReLU(),
            Dense(10)
        ])
    elif args.model_type == "dense":
        net = Net([
            Dense(200),
            ReLU(),
            Dense(100),
            ReLU(),
            Dense(70),
            ReLU(),
            Dense(30),
            ReLU(),
            Dense(10)
        ])
    else:
        raise ValueError("Invalid argument: model_type")

    model = Model(net=net,
                  loss=SoftmaxCrossEntropy(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = np.asarray(test_y)
        res = accuracy(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #10
0
def train(args):
    # prepare dataset
    train_, valid, test = mnist(args.data_dir)
    X = np.concatenate([train_[0], valid[0], test[0]])
    y = np.concatenate([train_[1], valid[1], test[1]])

    fix_noise = get_noise(size=(args.batch_size, args.nz))

    loss = SigmoidCrossEntropy()
    # TODO: replace mlp with cnn
    G = Model(net=mlp_G(),
              loss=loss,
              optimizer=Adam(args.lr_g, beta1=args.beta1))
    D = Model(net=mlp_D(),
              loss=loss,
              optimizer=Adam(args.lr_d, beta1=args.beta1))

    running_g_err, running_d_err = 0, 0
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        for i, batch in enumerate(iterator(X, y)):
            # --- Train Discriminator ---
            # feed with real data (maximize log(D(x)))
            d_pred_real = D.forward(batch.inputs)
            label_real = np.ones_like(d_pred_real)
            d_real_err, d_real_grad = D.backward(d_pred_real, label_real)

            # feed with fake data (maximize log(1 - D(G(z))))
            noise = get_noise(size=(len(batch.inputs), args.nz))
            g_out = G.forward(noise)
            d_pred_fake = D.forward(g_out)
            label_fake = np.zeros_like(d_pred_fake)
            d_fake_err, d_fake_grad = D.backward(d_pred_fake, label_fake)

            # train D
            d_err = d_real_err + d_fake_err
            d_grads = d_real_grad + d_fake_grad
            D.apply_grad(d_grads)

            # ---- Train Generator ---
            # maximize log(D(G(z)))
            d_pred_fake = D.forward(g_out)
            g_err, d_grad = D.backward(d_pred_fake, label_real)
            g_grads = G.net.backward(d_grad.wrt_input)
            G.apply_grad(g_grads)

            running_d_err = 0.9 * running_d_err + 0.1 * d_err
            running_g_err = 0.9 * running_g_err + 0.1 * g_err
            if i % 100 == 0:
                print("epoch-%d iter-%d d_err: %.4f g_err: %.4f" %
                      (epoch + 1, i + 1, running_d_err, running_g_err))

        # sampling
        print("epoch: %d/%d d_err: %.4f g_err: %.4f" %
              (epoch + 1, args.num_ep, running_d_err, running_g_err))
        samples = G.forward(fix_noise)
        img_name = "ep%d.png" % (epoch + 1)
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir)
        save_path = os.path.join(args.output_dir, img_name)
        save_batch_as_images(save_path, samples)

        # save generator
        model_path = os.path.join(args.output_dir, args.model_name)
        G.save(model_path)
        print("Saving generator ", model_path)
Пример #11
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # create output directory for saving result images
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # prepare and read dataset
    train_set, _, test_set = mnist(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set

    # specify the encoder and decoder net structure
    encoder_net = Net([Dense(256), ReLU(), Dense(64)])
    decoder_net = Net([ReLU(), Dense(256), Tanh(), Dense(784), Tanh()])
    nets = (encoder_net, decoder_net)
    optimizers = (Adam(args.lr), Adam(args.lr))
    model = AutoEncoder(nets, loss=MSE(), optimizer=optimizers)

    # for pre-trained model, test generated images from latent space
    if args.load_model is not None:
        # load pre-trained model
        model.load(os.path.join(args.output_dir, args.load_model))
        print("Loaded model fom %s" % args.load_model)

        # transition from test[from_idx] to test[to_idx] in n steps
        idx_arr, n = [2, 4, 32, 12, 82], 160
        print("Transition in numbers", [test_y[i] for i in idx_arr],
              "in %d steps ..." % n)
        stops = [model.en_net.forward(test_x[i]) for i in idx_arr]
        k = int(n / (len(idx_arr) - 1))  # number of code per transition
        # generate all transition codes
        code_arr = []
        for i in range(len(stops) - 1):
            t = [c.copy() for c in transition(stops[i], stops[i + 1], k)]
            code_arr += t
        # apply decoding all n "code" from latent space...
        batch = None
        for code in code_arr:
            # translate latent space to image
            genn = model.de_net.forward(code)
            # save decoded results in a batch
            if batch is None:
                batch = np.array(genn)
            else:
                batch = np.concatenate((batch, genn))
        output_path = os.path.join(args.output_dir, "genn-latent.png")
        save_batch_as_images(output_path, batch)
        quit()

    # train the auto-encoder
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        for batch in iterator(train_x, train_y):
            origin_in = batch.inputs

            # make noisy inputs
            m = origin_in.shape[0]  # batch size
            mu = args.gaussian_mean  # mean
            sigma = args.gaussian_std  # standard deviation
            noises = np.random.normal(mu, sigma, (m, 784))
            noises_in = origin_in + noises  # noisy inputs

            # forward
            genn = model.forward(noises_in)
            # back-propagate
            loss, grads = model.backward(genn, origin_in)

            # apply gradients
            model.apply_grad(grads)
        print("Epoch: %d Loss: %.3f" % (epoch, loss))

        # save all the generated images and original inputs for this batch
        noises_in_path = os.path.join(args.output_dir,
                                      "ep%d-input.png" % epoch)
        genn_path = os.path.join(args.output_dir, "ep%d-genn.png" % epoch)
        save_batch_as_images(noises_in_path, noises_in, titles=batch.targets)
        save_batch_as_images(genn_path, genn, titles=batch.targets)

    # save the model after training
    model.save(os.path.join(args.output_dir, args.save_model))
Пример #12
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # create output directory for saving result images
    if not os.path.exists('./output'):
        os.mkdir('./output')

    # prepare and read dataset
    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set

    # batch iterator
    iterator = BatchIterator(batch_size=args.batch_size)

    # specify the encoder and decoder net structure
    encoder = Net([Dense(256), ReLU(), Dense(64)])

    decoder = Net([ReLU(), Dense(256), Tanh(), Dense(784), Tanh()])

    # create AutoEncoder model
    model = AutoEncoder(encoder=encoder,
                        decoder=decoder,
                        loss=MSELoss(),
                        optimizer=Adam(args.lr))

    # for pretrained model, test generated images from latent space
    if args.load_model is not None:
        # load pretrained model
        model.load(args.load_model)
        print('Loaded model from %s' % args.load_model)
        # transition from test[from_idx] to test[to_idx] in n steps
        idx_arr, n = [2, 4, 32, 12, 82], 160
        print("Transition in numbers", [test_y[i] for i in idx_arr],
              "in %d steps ..." % n)
        stops = [model.encoder.forward(test_x[i]) for i in idx_arr]
        k = int(n / (len(idx_arr) - 1))  # number of code per transition
        # generate all transition codes
        code_arr = []
        for i in range(len(stops) - 1):
            t = [c.copy() for c in transition(stops[i], stops[i + 1], k)]
            code_arr += t
        # apply decoding all n "code" from latent space...
        batch = None
        for code in code_arr:
            # translate latent space to image
            genn = model.decoder.forward(code)
            # save decoded results in a batch
            if batch is None:
                batch = np.array(genn)
            else:
                batch = np.concatenate((batch, genn))
        save_batch_as_images('output/genn-latent.png', batch)
        quit()

    # train the autoencoder
    for epoch in range(args.num_ep):
        print('epoch %d ...' % epoch)
        for batch in iterator(train_x, train_y):
            origin_in = batch.inputs
            # make noisy inputs
            m = origin_in.shape[0]  # batch size
            mu = args.guassian_mean  # mean
            sigma = args.guassian_std  # standard deviation
            noises = np.random.normal(mu, sigma, (m, 784))
            noises_in = origin_in + noises  # noisy inputs
            # train the representation
            genn = model.forward(noises_in)
            loss, grads = model.backward(genn, origin_in)
            model.apply_grad(grads)
        print('Loss: %.3f' % loss)
        # save all the generated images and original inputs for this batch
        save_batch_as_images('output/ep%d-input.png' % epoch,
                             noises_in,
                             titles=batch.targets)
        save_batch_as_images('output/ep%d-genn.png' % epoch,
                             genn,
                             titles=batch.targets)

    # save the model after training
    model.save('output/model.pkl')