Пример #1
0
Файл: run.py Проект: ybin/tinynn
def main(args):
    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    # train_y = get_one_hot(train_y, 2)

    net = Net([Dense(100), ReLU(), Dense(30), ReLU(), Dense(1)])

    model = Model(net=net,
                  loss=SigmoidCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        for timer in model.timers.values():
            timer.report()
        # evaluate
        model.set_phase("TEST")
        test_y_idx = np.asarray(test_y).reshape(-1)
        test_pred = model.forward(test_x)
        test_pred[test_pred > 0] = 1
        test_pred[test_pred <= 0] = 0
        test_pred_idx = test_pred.reshape(-1)
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #2
0
Файл: run.py Проект: ybin/tinynn
def main(args):
    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    if args.model_type == "cnn":
        train_x = train_x.reshape((-1, 28, 28, 1))
        test_x = test_x.reshape((-1, 28, 28, 1))

    if args.model_type == "cnn":
        net = Net([
            Conv2D(kernel=[5, 5, 1, 8], stride=[2, 2], padding="SAME"),
            ReLU(),
            Conv2D(kernel=[5, 5, 8, 16], stride=[2, 2], padding="SAME"),
            ReLU(),
            Conv2D(kernel=[5, 5, 16, 32], stride=[2, 2], padding="SAME"),
            ReLU(),
            Flatten(),
            Dense(10)
        ])
    elif args.model_type == "dense":
        net = Net([
            Dense(200),
            ReLU(),
            Dense(100),
            ReLU(),
            Dense(70),
            ReLU(),
            Dense(30),
            ReLU(),
            Dense(10)
        ])
    else:
        raise ValueError(
            "Invalid argument model_type! Must be 'cnn' or 'dense'")

    model = Model(net=net,
                  loss=SoftmaxCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = np.asarray(test_y)
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #3
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # data preparing
    data_path = os.path.join(args.data_dir, args.file_name)
    train_x, train_y, img_shape = prepare_dataset(data_path)

    net = Net([
        Dense(30),
        ReLU(),
        Dense(60),
        ReLU(),
        Dense(60),
        ReLU(),
        Dense(30),
        ReLU(),
        Dense(3),
        Sigmoid()
    ])

    model = Model(net=net, loss=MSELoss(), optimizer=Adam())
    mse_evaluator = MSEEvaluator()
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            preds = model.forward(batch.inputs)
            loss, grads = model.backward(preds, batch.targets)
            model.apply_grad(grads)

        # evaluate
        preds = net.forward(train_x)
        mse = mse_evaluator.evaluate(preds, train_y)
        print(mse)

        if args.paint:
            # generate painting
            preds = preds.reshape(img_shape[0], img_shape[1], -1)
            preds = (preds * 255.0).astype("uint8")
            filename, ext = os.path.splitext(args.file_name)
            output_filename = "output" + ext
            output_path = os.path.join(args.data_dir, output_filename)
            Image.fromarray(preds).save(output_path)
        print("Epoch %d time cost: %.2f" % (epoch, time.time() - t_start))
Пример #4
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # data preparing
    train_x, train_y, img_shape = prepare_dataset(args.img)

    net = Net([
        Dense(30),
        ReLU(),
        Dense(100),
        ReLU(),
        Dense(100),
        ReLU(),
        Dense(30),
        ReLU(),
        Dense(3),
        Sigmoid()
    ])

    model = Model(net=net, loss=MSE(), optimizer=Adam())
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        for batch in iterator(train_x, train_y):
            preds = model.forward(batch.inputs)
            loss, grads = model.backward(preds, batch.targets)
            model.apply_grad(grads)

        # evaluate
        preds = net.forward(train_x)
        mse = mean_square_error(preds, train_y)
        print("Epoch %d %s" % (epoch, mse))

        # generate painting
        if epoch % 5 == 0:
            preds = preds.reshape(img_shape[0], img_shape[1], -1)
            preds = (preds * 255.0).astype("uint8")
            name, ext = os.path.splitext(args.img)
            filename = os.path.basename(name)
            out_filename = filename + "-paint-epoch" + str(epoch) + ext
            if not os.path.exists(args.output_dir):
                os.makedirs(args.output_dir)
            out_path = os.path.join(args.output_dir, out_filename)
            Image.fromarray(preds).save(out_path)
            print("save painting to %s" % out_path)
Пример #5
0
def test_parameters_change(fake_dataset):
    # make sure the parameters does change after apply gradients

    # fake dataset
    X, y = fake_dataset
    # simple model
    net = Net([Dense(10), Dense(1)])
    loss = MSE()
    opt = SGD(lr=1.0)
    model = Model(net, loss, opt)

    # forward and backward
    pred = model.forward(X)
    loss, grads = model.backward(pred, y)

    # parameters change test
    params_before = model.net.params.values
    model.apply_grad(grads)
    params_after = model.net.params.values
    for p1, p2 in zip(params_before, params_after):
        assert np.all(p1 != p2)
Пример #6
0
def main(args):
    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    net = Net([
        Dense(784, 200),
        ReLU(),
        Dense(200, 100),
        ReLU(),
        Dense(100, 70),
        ReLU(),
        Dense(70, 30),
        ReLU(),
        Dense(30, 10)
    ])

    model = Model(net=net,
                  loss=SoftmaxCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        t_end = time.time()
        # evaluate
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = np.asarray(test_y)
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print("Epoch %d time cost: %.4f\t %s" % (epoch, t_end - t_start, res))
Пример #7
0
class DQN(object):
    def __init__(self, env, args):
        self.args = args
        # Init replay buffer
        self.replay_buffer = deque(maxlen=args.buffer_size)

        # Init parameters
        self.global_step = 0
        self.epsilon = args.init_epsilon
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.n

        self.gamma = args.gamma
        self.learning_rate = args.lr
        self.batch_size = args.batch_size

        self.target_network_update_interval = args.target_network_update

    def build_net(self):
        q_net = Net([Dense(100), ReLU(), Dense(self.action_dim)])
        return q_net

    def construct_model(self):
        self.q_net = self.build_net()
        self.model = Model(net=self.q_net,
                           loss=MSELoss(),
                           optimizer=RMSProp(self.args.lr))

        # Target network
        self.target_q_net = self.build_net()
        self.target_q_net.initialize()

    def sample_action(self, state, policy):
        self.global_step += 1
        # Q value of all actions
        output_q = self.model.forward([state])[0]

        if policy == "egreedy":
            if random.random() <= self.epsilon:  # random action
                return random.randint(0, self.action_dim - 1)
            else:  # greedy action
                return np.argmax(output_q)
        elif policy == "greedy":
            return np.argmax(output_q)
        elif policy == "random":
            return random.randint(0, self.action_dim - 1)

    def learn(self, state, action, reward, next_state, done):
        onehot_action = np.zeros(self.action_dim)
        onehot_action[action] = 1

        # Store experience in deque
        self.replay_buffer.append(
            np.array([state, onehot_action, reward, next_state, done]))
        if len(self.replay_buffer) > self.batch_size:
            self.update_model()

    def update_model(self):
        if self.global_step % self.target_network_update_interval == 0:
            # Update target network. Assign params in q_net to target_q_net
            q_net_params = self.q_net.get_parameters()
            self.target_q_net.set_parameters(q_net_params)

        # Sample experience
        minibatch = random.sample(self.replay_buffer, self.batch_size)

        # Transpose minibatch
        s_batch, a_batch, r_batch, next_s_batch, done_batch = \
            np.array(minibatch).T.tolist()

        next_s_all_action_Q = self.target_q_net.forward(next_s_batch)
        next_s_Q_batch = np.max(next_s_all_action_Q, 1)

        # Calculate target_Q_batch
        target_Q_batch = []
        for i in range(self.batch_size):
            done_state = done_batch[i]
            if done_state:
                target_Q_batch.append(r_batch[i])
            else:
                target_Q_batch.append(r_batch[i] +
                                      self.gamma * next_s_Q_batch[i])

        # Train the network
        preds = self.model.forward(np.asarray(s_batch))
        preds = np.multiply(preds, a_batch)

        targets = np.reshape(target_Q_batch, (-1, 1))
        targets = np.tile(targets, (1, 2))
        targets = np.multiply(targets, a_batch)
        loss, grads = self.model.backward(preds, targets)

        self.model.apply_grad(grads)
Пример #8
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    train_set, valid_set, test_set = mnist(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    if args.model_type == "cnn":
        train_x = train_x.reshape((-1, 28, 28, 1))
        test_x = test_x.reshape((-1, 28, 28, 1))

    if args.model_type == "cnn":
        # a LeNet-5 model with activation function changed to ReLU
        net = Net([
            Conv2D(kernel=[5, 5, 1, 6], stride=[1, 1], padding="SAME"),
            ReLU(),
            MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
            Conv2D(kernel=[5, 5, 6, 16], stride=[1, 1], padding="SAME"),
            ReLU(),
            MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
            Flatten(),
            Dense(120),
            ReLU(),
            Dense(84),
            ReLU(),
            Dense(10)
        ])
    elif args.model_type == "dense":
        net = Net([
            Dense(200),
            ReLU(),
            Dense(100),
            ReLU(),
            Dense(70),
            ReLU(),
            Dense(30),
            ReLU(),
            Dense(10)
        ])
    else:
        raise ValueError("Invalid argument: model_type")

    model = Model(net=net,
                  loss=SoftmaxCrossEntropy(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = np.asarray(test_y)
        res = accuracy(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #9
0
def train(args):
    # prepare dataset
    train_, valid, test = mnist(args.data_dir)
    X = np.concatenate([train_[0], valid[0], test[0]])
    y = np.concatenate([train_[1], valid[1], test[1]])

    fix_noise = get_noise(size=(args.batch_size, args.nz))

    loss = SigmoidCrossEntropy()
    # TODO: replace mlp with cnn
    G = Model(net=mlp_G(),
              loss=loss,
              optimizer=Adam(args.lr_g, beta1=args.beta1))
    D = Model(net=mlp_D(),
              loss=loss,
              optimizer=Adam(args.lr_d, beta1=args.beta1))

    running_g_err, running_d_err = 0, 0
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        for i, batch in enumerate(iterator(X, y)):
            # --- Train Discriminator ---
            # feed with real data (maximize log(D(x)))
            d_pred_real = D.forward(batch.inputs)
            label_real = np.ones_like(d_pred_real)
            d_real_err, d_real_grad = D.backward(d_pred_real, label_real)

            # feed with fake data (maximize log(1 - D(G(z))))
            noise = get_noise(size=(len(batch.inputs), args.nz))
            g_out = G.forward(noise)
            d_pred_fake = D.forward(g_out)
            label_fake = np.zeros_like(d_pred_fake)
            d_fake_err, d_fake_grad = D.backward(d_pred_fake, label_fake)

            # train D
            d_err = d_real_err + d_fake_err
            d_grads = d_real_grad + d_fake_grad
            D.apply_grad(d_grads)

            # ---- Train Generator ---
            # maximize log(D(G(z)))
            d_pred_fake = D.forward(g_out)
            g_err, d_grad = D.backward(d_pred_fake, label_real)
            g_grads = G.net.backward(d_grad.wrt_input)
            G.apply_grad(g_grads)

            running_d_err = 0.9 * running_d_err + 0.1 * d_err
            running_g_err = 0.9 * running_g_err + 0.1 * g_err
            if i % 100 == 0:
                print("epoch-%d iter-%d d_err: %.4f g_err: %.4f" %
                      (epoch + 1, i + 1, running_d_err, running_g_err))

        # sampling
        print("epoch: %d/%d d_err: %.4f g_err: %.4f" %
              (epoch + 1, args.num_ep, running_d_err, running_g_err))
        samples = G.forward(fix_noise)
        img_name = "ep%d.png" % (epoch + 1)
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir)
        save_path = os.path.join(args.output_dir, img_name)
        save_batch_as_images(save_path, samples)

        # save generator
        model_path = os.path.join(args.output_dir, args.model_name)
        G.save(model_path)
        print("Saving generator ", model_path)