Example #1
0
    def trainer(self):

        steps = 0

        loss_deque = deque(maxlen=100)
        train_loss = []

        last_epoch = 0
        if self.opts.resume:
            last_epoch, loss = self.load_progress()

        for e in range(self.opts.epoch - last_epoch):
            '''Adaptive LR Change'''
            for param_group in self.RNN_optim.param_groups:
                param_group['lr'] = util.linear_LR(e, self.opts)
                print('epoch: {}, RNN_LR: {:.4}'.format(e, param_group['lr']))

            if self.opts.save_progress:
                '''Save the progress before start adjusting the LR'''
                if e == self.opts.const_epoch:
                    self.save_progress(self.opts.const_epoch,
                                       np.mean(loss_deque))

                if e % self.opts.save_every == 0:
                    self.save_progress(e, np.mean(loss_deque))

            for data, labels, lengths in self.data_loader:
                steps += 1

                data, labels, lengths = util.sort_batch(data, labels, lengths)

                #data = data[:lengths]
                #labels = labels[:lengths]

                self.RNN_optim.zero_grad()

                loss = self.RNN(data.to(device), labels.to(device),
                                lengths.to(device))

                loss.backward()
                self.RNN_optim.step()

                loss_deque.append(loss.cpu().item())
                train_loss.append(np.mean(loss_deque))

                if steps % self.opts.print_every == 0:
                    print('Epoch: {}, Steps: {}, Loss: {:.4}'.format(
                        e, steps, loss.item()))
                    util.raw_score_plotter(train_loss)

        if self.opts.save_progress:
            '''Save the progress before start adjusting the LR'''
            self.save_progress(-1, np.mean(loss_deque))

        util.raw_score_plotter(train_loss)
Example #2
0
File: train.py Project: yk287/NLP
def trainer(opts, RNN, RNN_optim, criterion, loader):

    last_100_loss = deque(maxlen=100)
    last_100_g_loss = []

    iter_count = 0

    for epoch in range(opts.epoch):

        for param_group in RNN_optim.param_groups:
            param_group['lr'] = util.linear_LR(epoch, opts)
            print('Epoch: {}, D_LR: {:.4}'.format(epoch, param_group['lr']))

        for image, label in loader:
            '''Images'''
            image = image.view(-1, 28, 28)
            image = image.to(device)

            label = label.to(device)
            '''run the data through RNN'''
            output = RNN(image)
            loss = criterion(output, label)
            '''take a gradient step'''
            RNN_optim.zero_grad()
            loss.backward()
            RNN_optim.step()  # One step Descent into loss
            '''plot the loss'''
            last_100_loss.append(loss.item())
            last_100_g_loss.append(np.mean(last_100_loss))
            util.raw_score_plotter(last_100_g_loss)
            '''Train Generator'''
            iter_count += 1

            if iter_count % opts.print_every == 0:
                print('Epoch: {}, Iter: {}, Loss: {:.4},'.format(
                    epoch, iter_count, loss.item()))
Example #3
0
def train_CycleGan(train_step,
                   loss,
                   reconstruction,
                   show_every=opts.show_every,
                   print_every=opts.print_every,
                   batch_size=128,
                   num_epoch=10):
    """
    function that trains VAE.
    :param train_step: an op that defines what to do with the loss function (minimize or maximize)
    :param loss: an op that defines the loss function to be minimized
    :param reconstruction: an op that defines how to reconstruct a target image
    :param show_every: how often to show an image to gauge training progress
    :param print_every: how often to print loss
    :param batch_size: batch size of training samples
    :param num_epoch: how many times to iterate over the training samples
    :return:
    """

    image_dir = '/home/youngwook/Downloads/edges2shoes'
    folder_names = get_folders(image_dir)

    train_folder = folder_names[2]
    val_folder = folder_names[1]

    train_data = AB_Combined_ImageLoader(train_folder,
                                         size=opts.resize,
                                         num_images=opts.num_images,
                                         randomcrop=opts.image_shape)
    train_loader = DataLoader(train_data,
                              batch_size=opts.batch,
                              shuffle=True,
                              num_workers=12)

    step = 0
    target_pred_list = []
    input_pred_list = []
    input_true_list = []
    target_true_list = []
    last_100_loss_dq = deque(maxlen=100)
    last_100_loss = []

    checkpoint_dir = './model'
    saver = tf.train.Saver()

    if opts.resume:
        #print('Loading Saved Checkpoint')
        tf_util.load_session(checkpoint_dir,
                             saver,
                             session,
                             model_name=opts.model_name)

    for epoch in range(num_epoch):
        # every show often, show a sample result

        lr = util.linear_LR(epoch, opts)

        for (minibatch, minbatch_y) in train_loader:
            # run a batch of data through the network
            # logits= sess.run(logits_real, feed_dict={x:minibatch})

            target_pred, input_pred = session.run(
                [target_image_prediction, input_image_prediction],
                feed_dict={
                    input_image: minibatch,
                    target_image: minbatch_y,
                    adaptive_lr: lr
                })

            input_memory.append(input_pred)
            target_memory.append(target_pred)

            target_replay_images = np.vstack(target_memory)
            input_replay_images = np.vstack(input_memory)

            #train the Generator
            _, G_loss_curr = session.run(
                [train_step[2], loss[1]],
                feed_dict={
                    input_image: minibatch,
                    target_image: minbatch_y,
                    input_replay: input_replay_images,
                    target_replay: target_replay_images,
                    input_image_pred: input_pred,
                    target_image_pred: target_pred,
                    adaptive_lr: lr
                })

            #train the discriminator
            _, D_loss_curr = session.run(
                [train_step[0], loss[0][0]],
                feed_dict={
                    input_image: minibatch,
                    input_replay: input_replay_images,
                    adaptive_lr: lr
                })
            _, D_loss_curr = session.run(
                [train_step[1], loss[0][1]],
                feed_dict={
                    target_image: minbatch_y,
                    target_replay: target_replay_images,
                    adaptive_lr: lr
                })

            last_100_loss_dq.append(G_loss_curr)
            last_100_loss.append(np.mean(last_100_loss_dq))

            step += 1
            if step % show_every == 0:
                '''for every show_every step, show reconstructed images from the training iteration'''

                target_name = './img/target_pred_%s.png' % step
                input_name = './img/input_pred_%s.png' % step
                input_true_name = './img/true_input_%s.png' % step
                target_true_name = './img/true_target_%s.png' % step

                #translate the image
                target_pred, input_pred = session.run(
                    [target_image_prediction, input_image_prediction],
                    feed_dict={
                        input_image: minibatch,
                        target_image: minbatch_y
                    })

                target_pred_list.append(target_name)
                input_pred_list.append(input_name)
                input_true_list.append(input_true_name)
                target_true_list.append(target_true_name)

                util.show_images(target_pred[:opts.batch], opts, target_name)
                util.plt.show()
                util.show_images(minbatch_y[:opts.batch], opts,
                                 target_true_name)
                util.plt.show()

                util.show_images(input_pred[:opts.batch], opts, input_name)
                util.plt.show()
                util.show_images(minibatch[:opts.batch], opts, input_true_name)
                util.plt.show()

            if step % print_every == 0:
                print('Epoch: {}, D: {:.4}'.format(epoch, G_loss_curr))
                util.raw_score_plotter(last_100_loss)

        #save the model after every epoch
        if opts.save_progress:
            tf_util.save_session(saver,
                                 session,
                                 checkpoint_dir,
                                 epoch,
                                 model_name=opts.model_name)

    util.raw_score_plotter(last_100_loss)

    image_to_gif('', target_pred_list, duration=0.5, gifname='target_pred')
    image_to_gif('', input_pred_list, duration=0.5, gifname='input_pred')
    image_to_gif('', input_true_list, duration=0.5, gifname='input_true')
    image_to_gif('', target_true_list, duration=0.5, gifname='target_true')
Example #4
0
File: train.py Project: yk287/ML
    def train(self):
        """
        Vanilla GAN Trainer

        :param D: Discriminator
        :param G: Generator
        :param D_solver: Optimizer for D
        :param G_solver: Optimizer for G
        :param discriminator_loss:  Loss for D
        :param generator_loss:  Loss for G
        :param loader: Torch dataloader
        :param show_every: Show samples after every show_every iterations
        :param batch_size: Batch Size used for training
        :param noise_size: Dimension of the noise to use as input for G
        :param num_epochs: Number of epochs over the training dataset to use for training
        :return:
        """
        last_100_loss = deque(maxlen=100)
        last_100_g_loss = []

        iter_count = 0

        last_epoch = 0
        if self.opts.resume:
            last_epoch, loss = self.load_progress()

        for epoch in range(self.opts.epoch - last_epoch):
            '''Adaptive LR Change'''
            for param_group in self.D_solver.param_groups:
                param_group['lr'] = util.linear_LR(epoch, self.opts)
                print('epoch: {}, D_LR: {:.4}'.format(epoch,
                                                      param_group['lr']))

            if self.opts.save_progress:
                '''Save the progress before start adjusting the LR'''
                if epoch == self.opts.const_epoch:
                    self.save_progress(self.opts.const_epoch,
                                       np.mean(last_100_loss))

            for image, label in self.loader:
                '''Real Images'''
                image = image.to(device)
                '''one hot encode the real label'''

                label = label.float().to(device)
                '''Train Discriminator'''
                '''Get the logits'''

                real_logits_cls = self.D(image.to(device))

                loss = self.opts.cls_lambda * F.binary_cross_entropy_with_logits(
                    real_logits_cls, label,
                    reduction='sum') / real_logits_cls.size(0)

                self.D_solver.zero_grad()
                loss.backward()
                self.D_solver.step()  # One step Descent into loss
                '''Train Generator'''
                iter_count += 1

                last_100_loss.append(loss.cpu().item())
                last_100_g_loss.append(np.mean(last_100_loss))

                if iter_count % self.opts.print_every == 0:
                    print('Epoch: {}, Iter: {}, D: {:.4} '.format(
                        epoch, iter_count, loss.item()))
                    util.raw_score_plotter(last_100_g_loss)

                if self.opts.save_progress:
                    if iter_count % self.opts.save_every == 0:
                        self.save_progress(epoch, np.mean(last_100_loss))

        if self.opts.save_progress:
            '''Save the progress before start adjusting the LR'''
            self.save_progress(-1, np.mean(last_100_loss))
Example #5
0
File: train.py Project: yk287/NLP
    def trainer(self):

        steps = 0
        correct = 0
        total = 0

        loss_deque = deque(maxlen=100)
        train_loss = []

        last_epoch = 0
        if self.opts.resume:
            last_epoch, loss = self.load_progress()

        for e in range(self.opts.epoch - last_epoch):
            '''Adaptive LR Change'''
            for param_group in self.RNN_optim.param_groups:
                param_group['lr'] = util.linear_LR(e, self.opts)
                print('epoch: {}, RNN_LR: {:.4}'.format(e, param_group['lr']))

            if self.opts.save_progress:
                '''Save the progress before start adjusting the LR'''
                if e == self.opts.const_epoch:
                    self.save_progress(self.opts.const_epoch,
                                       np.mean(loss_deque))

                if e % self.opts.save_every == 0:
                    self.save_progress(e, np.mean(loss_deque))

            for data, labels, lengths in self.data_loader:
                steps += 1

                data, labels, lengths = util.sort_batch(data, labels, lengths)

                self.RNN_optim.zero_grad()
                pred = self.RNN(data, lengths)
                loss = self.criterion(pred, labels.to(device))
                loss.backward()
                self.RNN_optim.step()

                # pick the argmax
                output = torch.max(pred, 1)[1]

                for output, label in zip(output, labels):
                    if output.cpu().item() == label.item():
                        correct += 1
                    total += 1

                loss_deque.append(loss.cpu().item())
                train_loss.append(np.mean(loss_deque))

                if steps % self.opts.print_every == 0:
                    print(
                        'Epoch: {}, Steps: {}, Loss: {:.4}, Train Accuracy {:4}, '
                        .format(e, steps, loss.item(), correct / float(total)))
                    correct = 0
                    total = 0
                    util.raw_score_plotter(train_loss)

        if self.opts.save_progress:
            '''Save the progress before start adjusting the LR'''
            self.save_progress(-1, np.mean(loss_deque))

        util.raw_score_plotter(train_loss)