Пример #1
0
    def train(self):
        generator = Generator(self.embedding, self.vocab_size,
                              config.att_embedding_size, 2, config.ber_prob)
        generator = generator.to(config.device)
        discriminator = Discriminator(2, config.dec_hidden_size,
                                      config.enc_hidden_size)
        discriminator = discriminator.to(config.device)
        self.g_optim = optim.Adam(params=generator.parameters(),
                                  lr=config.g_lr)
        self.d_optim = optim.Adam(params=discriminator.parameters(),
                                  lr=config.d_lr)
        criterion = nn.CrossEntropyLoss(ignore_index=0)

        num_step = 0
        best_loss = 1e10
        batch_nb = len(self.train_loader)
        for epoch in range(1, config.num_epochs + 1):
            start = time.time()
            for i, train_data in enumerate(self.train_loader):
                batch_idx = i + 1
                num_step += 1
                recon_loss, errG, errD = self.step(generator, discriminator,
                                                   criterion, train_data)

                generator_loss = recon_loss + errG * config.loss_lambda
                # generator_loss = recon_loss
                msg = "{}/{} {} - ETA : {} - recon : {:.4f}, loss G: {:.4f}, loss D: {:.4f}".format(
                    batch_idx, batch_nb, progress_bar(batch_idx, batch_nb),
                    eta(start, batch_idx, batch_nb), recon_loss, errG, errD)
                print(msg)
            # only see reconstruction loss
            dev_loss = self.evaluate(generator)
            msg = "Epoch {} took {} - final loss : {:.4f} - validation loss : {:.4f}" \
                .format(epoch, user_friendly_time(time_since(start)), generator_loss, dev_loss)
            print(msg)
            if dev_loss < best_loss:
                best_loss = dev_loss
                generator.save(config.save_dir, epoch, num_step)
        # log data for visualization later
        log_parameters = train_utils.log_epoch_stats(
            log_parameters,
            epoch,
            train_loss,
            train_r_sq,
            train_accu,
            val_loss,
            val_r_sq,
            val_accu
        )

        # print loss
        if epoch % PRINT_EVERY == 0:
            print('='*20 + '\nEpoch %d / %d\n' % (epoch, NUM_EPOCHS) + '='*20)
            print('[%s (%d %.1f%%)]' % (train_utils.time_since(START), epoch, float(epoch) / NUM_EPOCHS * 100))
            print('[%s %0.5f, %s %0.5f, %s %0.5f]'% ('Train Loss: ', train_loss, ' R-sq: ', train_r_sq, ' Accu:', train_accu))
            print('[%s %0.5f, %s %0.5f, %s %0.5f]'% ('Valid Loss: ', val_loss, ' R-sq: ', val_r_sq, ' Accu:', val_accu))

        # save model if best validation loss
        if val_loss < best_val_loss:
            n = file_info + '_best'
            train_utils.save(n, perf_model)
            best_val_loss = val_loss

    print("Saving...")
    train_utils.save(file_info, perf_model, log_parameters)

except KeyboardInterrupt:
    print("Saving before quit...")
    train_utils.save(file_info, perf_model, log_parameters)
Пример #3
0
print("----------------")
print()

# 학습 시작
try:
    for iter in range(1, n_iter + 1):
        input, target = get_batch_set()
        loss = train(input, target)
        total_loss += loss

        # 현재 학습 과정 출력
        if iter % print_every == 0:
            avg_loss = total_loss / print_every
            sys.stdout.write(
                "%d %d%% (%s) %.4f\n" %
                (iter, iter / n_iter * 100, time_since(start), avg_loss))
            losses.append(avg_loss)
            total_loss = 0
            lyrics = generate_lyrics(['사랑', '발라드'])
            print(lyrics)
            print()

    sys.stdout.write("학습이 완료되었습니다.\n")

# 중단 시그널 핸들링
except KeyboardInterrupt:
    print("학습이 중단되었습니다.")
    pass

# 손실 그래프 출력
plt.figure()
Пример #4
0
            perf_model, criterion, perf_optimizer, aug_training_data,
            aug_validation_data, METRIC, MTYPE, CTYPE)
        # adjut learning rate
        # train_utils.adjust_learning_rate(perf_optimizer, epoch, ADJUST_EVERY)
        # log data for visualization later
        log_value('train_loss', train_loss, epoch)
        log_value('val_loss', val_loss, epoch)
        log_value('train_r_sq', train_r_sq, epoch)
        log_value('val_r_sq', val_r_sq, epoch)
        log_value('train_accu', train_accu, epoch)
        log_value('val_accu', val_accu, epoch)
        log_value('train_accu2', train_accu2, epoch)
        log_value('val_accu2', val_accu2, epoch)
        # print loss
        if epoch % PRINT_EVERY == 0:
            print('[%s (%d %.1f%%)]' % (train_utils.time_since(START), epoch,
                                        float(epoch) / NUM_EPOCHS * 100))
            print('[%s %0.5f, %s %0.5f, %s %0.5f %0.5f]' %
                  ('Train Loss: ', train_loss, ' R-sq: ', train_r_sq, ' Accu:',
                   train_accu, train_accu2))
            print('[%s %0.5f, %s %0.5f, %s %0.5f %0.5f]' %
                  ('Valid Loss: ', val_loss, ' R-sq: ', val_r_sq, ' Accu:',
                   val_accu, val_accu2))
        # save model if best validation loss
        if val_loss < best_val_loss:
            n = file_info + '_best'
            train_utils.save(n, perf_model)
            best_val_loss = val_loss
    print("Saving...")
    train_utils.save(file_info, perf_model)
except KeyboardInterrupt: