Ejemplo n.º 1
0
def main():    
    train_loader, valid_loader, test_loader = data_manager.get_dataloader(hparams)
    runner = Runner(hparams)

    for epoch in range(hparams.num_epochs):
        
        train_y_pred_mid, train_y_true_mid,train_y_pred_emo, train_y_true_emo, train_loss=runner.run(train_loader, 'train')
        valid_y_pred_mid, valid_y_true_mid,valid_y_pred_emo, valid_y_true_emo, val_loss= runner.run(valid_loader, 'eval')

        train_Coeff_mid = r2_score(train_y_true_mid, train_y_pred_mid)
        val_Coeff_mid = r2_score(valid_y_true_mid, valid_y_pred_mid)

        train_Coeff_emo = r2_score(train_y_pred_emo, train_y_true_emo)
        val_Coeff_emo = r2_score(valid_y_pred_emo, valid_y_true_emo)
        print("[Epoch %d/%d] [Train Loss: %.4f] [Train Coeff: %.4f] [Valid Loss: %.4f] [Valid Coeff: %.4f]" %
              (epoch + 1, hparams.num_epochs, train_loss, train_Coeff_emo, val_loss, val_Coeff_emo))

        if runner.early_stop(val_loss, epoch + 1):
            break

    print("Training Finished")

    test_y_pred_mid, test_y_true_mid, test_y_pred_emo, test_y_true_emo, test_loss = runner.run(test_loader, 'eval')
    test_Coeff_mid = r2_score(test_y_true_mid, test_y_pred_mid)
    test_Coeff_emo = r2_score(test_y_pred_emo, test_y_true_emo)
    print ('Test Coeff emo: {:.4f} \n'. format(test_Coeff_emo))
Ejemplo n.º 2
0
def main(test_epoch: int):
    train_loader, valid_loader, test_loader = data_manager.get_dataloader(hp)
    if test_epoch == -1:
        runner = Runner(hp)

        # TODO: add all the evaluation metrics
        dict_loss = dict(loss=['Multiline', ['loss/train', 'loss/valid']])
        dict_eval = dict(
            PESQ=['Multiline', ['eval/PESQ_out', 'eval/PESQ_x']],
            STOI=['Multiline', ['eval/STOI_out', 'eval/STOI_x']],
            SegSNR=['Multiline', ['eval/SegSNR_out', 'eval/SegSNR_x']],
            fwSegSNR=['Multiline', ['eval/fwSegSNR_out', 'eval/fwSegSNR_x']])
        runner.writer.add_custom_scalars(dict(train=dict_loss,
                                              valid=dict_eval))

        epoch = 0
        test_epoch_or_zero = 0
        print(f'Training on {runner.str_device}')
        for epoch in range(hp.num_epochs):
            # training
            train_loss, train_eval = runner.run(train_loader, 'train', epoch)
            if train_loss is not None:
                if torch.isfinite(torch.tensor(train_loss)):
                    runner.writer.add_scalar('loss/train', train_loss, epoch)

            # checkpoint save
            torch.save(runner.model.module.state_dict(),
                       Path(runner.writer.log_dir, f'{epoch}.pt'))

            # validation
            valid_loss, valid_eval = runner.run(valid_loader, 'valid', epoch)
            if valid_loss is not None:
                if torch.isfinite(torch.tensor(valid_loss)):
                    runner.writer.add_scalar('loss/valid', valid_loss, epoch)

        print('Training Finished')
        test_epoch = test_epoch_or_zero if test_epoch_or_zero > 0 else epoch
    else:
        runner = Runner(hp)

    # test
    test_eval_outs, test_eval_xs = runner.test(test_loader, test_epoch)

    # TODO: write test result
    str_metric = ['SegSNR', 'fwSegSNR', 'PESQ', 'STOI']
    print_eval_outs, print_eval_xs = dict(), dict()
    for k, eval_out, eval_x in zip(str_metric, test_eval_outs, test_eval_xs):
        print_eval_outs[k] = eval_out
        print_eval_xs[k] = eval_x

    print(f'Test - Input Eval: {print_eval_xs}')
    print(f'Test - Out Eval: {print_eval_outs}')

    path_eval = Path(hp.logdir, f'test_{test_epoch}', 'test_eval.txt')
    if not path_eval.exists():
        print_to_file(path_eval, print_eval, (print_eval_xs, print_eval_outs))

    runner.writer.close()
Ejemplo n.º 3
0
def main(test_epoch: int):
    train_loader, valid_loader, test_loader = data_manager.get_dataloader(
        hparams)
    if test_epoch == -1:
        runner = Runner(hparams, len(train_loader.dataset))

        dict_custom_scalars = dict(
            loss=['Multiline', ['loss/train', 'loss/valid']], )
        test_dict_custom_scalars = dict(spectrogram=[
            'Multiline', ['spectrogram/input', 'spectrogram/reconstructed']
        ], )

        runner.writer.add_custom_scalars(dict(training=dict_custom_scalars))
        runner.writer.add_custom_scalars(dict(test=test_dict_custom_scalars))

        epoch = 0
        test_epoch_or_zero = 0
        print(f'Training on {runner.str_device}')
        for epoch in range(hparams.num_epochs):
            # training
            train_loss, _ = runner.run(train_loader, 'train', epoch)
            runner.writer.add_scalar('loss/train', train_loss, epoch)

            # validation
            valid_loss, _ = runner.run(valid_loader, 'valid', epoch)
            runner.writer.add_scalar('loss/valid', valid_loss, epoch)

            # check stopping criterion
            test_epoch_or_zero = runner.step(valid_loss, epoch)
            if test_epoch_or_zero > 0:
                break

        if isinstance(runner.model, nn.DataParallel):
            state_dict = runner.model.module.state_dict()
        else:
            state_dict = runner.model.state_dict()
        torch.save(state_dict, Path(runner.writer.logdir, f'{epoch}.pt'))

        print('Training Finished')
        test_epoch = test_epoch_or_zero if test_epoch_or_zero > 0 else epoch
    else:
        runner = Runner(hparams, len(test_loader.dataset))

    # test
    _, evaluate = runner.run(test_loader, 'test', test_epoch)
    y, y_est, pred_prob = evaluate
    pred_prob = np.stack((pred_prob, 1 - pred_prob), axis=1)

    roc_auc = sklearn.metrics.roc_auc_score(y, pred_prob[:, 0])
    fig_roc = evaluation.draw_roc_curve(y, pred_prob)
    fig_confusion_mat = evaluation.draw_confusion_mat(y, y_est)

    runner.writer.add_scalar(f'roc auc', roc_auc)
    runner.writer.add_figure(f'roc curve', fig_roc)
    runner.writer.add_figure(f'confusion matrix', fig_confusion_mat)
    runner.writer.close()

    print(sklearn.metrics.classification_report(y, y_est))
def main(test_epoch: int):
    train_loader, valid_loader, test_loader = data_manager.get_dataloader(
        hparams)
    if test_epoch == -1:
        runner = Runner(hparams, len(train_loader.dataset),
                        train_loader.dataset.class_weight)
        dict_custom_scalars = dict(
            loss=['Multiline', ['loss/train', 'loss/valid']])
        for name in runner.metrics:
            dict_custom_scalars[name] = [
                'Multiline', [f'{name}/train', f'{name}/valid']
            ]
        runner.writer.add_custom_scalars(dict(training=dict_custom_scalars))

        epoch = 0
        test_epoch_or_zero = 0
        print(f'Training on {runner.str_device}')
        for epoch in range(hparams.num_epochs):
            # training
            train_loss, train_eval = runner.run(train_loader, 'train', epoch)
            runner.writer.add_scalar('loss/train', train_loss, epoch)
            for idx, name in enumerate(runner.metrics):
                runner.writer.add_scalar(f'{name}/train', train_eval[idx],
                                         epoch)

            # validation
            valid_loss, valid_eval = runner.run(valid_loader, 'valid', epoch)
            runner.writer.add_scalar('loss/valid', valid_loss, epoch)
            for idx, name in enumerate(runner.metrics):
                runner.writer.add_scalar(f'{name}/valid', valid_eval[idx],
                                         epoch)

            # check stopping criterion
            test_epoch_or_zero = runner.step(valid_eval[2], epoch)
            if test_epoch_or_zero > 0:
                break

        torch.save(runner.model.module.state_dict(),
                   Path(runner.writer.logdir, f'{epoch}.pt'))
        print('Training Finished')
        test_epoch = test_epoch_or_zero if test_epoch_or_zero > 0 else epoch
    else:
        runner = Runner(hparams, len(test_loader.dataset))

    # test
    _, test_eval = runner.run(test_loader, 'test', test_epoch)

    str_eval = np.array2string(test_eval, precision=4)
    print(f'Testset Evaluation: {str_eval}')
    runner.writer.add_text('Testset Evaluation', str_eval, test_epoch)

    runner.writer.close()
Ejemplo n.º 5
0
def main():
    train_loader, valid_loader, test_loader = data_manager.get_dataloader(hparams)
    runner = Runner(hparams)

    print('Training on ' + device_name(hparams.device))
    for epoch in range(hparams.num_epochs):
        train_loss, train_acc = runner.run(train_loader, 'train')
        valid_loss, valid_acc = runner.run(valid_loader, 'eval')

        print("[Epoch %d/%d] [Train Loss: %.4f] [Train Acc: %.4f] [Valid Loss: %.4f] [Valid Acc: %.4f]" %
              (epoch + 1, hparams.num_epochs, train_loss, train_acc, valid_loss, valid_acc))

    test_loss, test_acc = runner.run(test_loader, 'eval')
    print("Training Finished")
    print("Test Accuracy: %.2f%%" % (100*test_acc))
Ejemplo n.º 6
0
def main():
    cur = time.localtime()
    cur_time = "%04d-%02d-%02d_%02d:%02d" % (
        cur.tm_year, cur.tm_mon, cur.tm_mday, cur.tm_hour, cur.tm_min)
    f = open(
        './experiments/' + cur_time + '.txt', 'a'
    )  #f.write("========================================================================\n")

    train_loader, valid_loader, test_loader = data_manager.get_dataloader(
        hparams)
    runner = Runner(hparams)
    summary(runner.model)
    f.write(str(runner.model))
    f.write('\n')

    print('Training on ' + device_name(hparams.device))
    f.write('Training on ' + device_name(hparams.device) + '\n')
    train_loss_list = []
    valid_loss_list = []

    for epoch in range(hparams.num_epochs):
        train_loss, train_acc = runner.run(train_loader, 'train')
        valid_loss, valid_acc = runner.run(valid_loader, 'eval')
        train_loss_list.append(train_loss)
        valid_loss_list.append(valid_loss)
        print(
            "[Epoch %d/%d] [Train Loss: %.4f] [Train Acc: %.4f] [Valid Loss: %.4f] [Valid Acc: %.4f]"
            % (epoch + 1, hparams.num_epochs, train_loss, train_acc,
               valid_loss, valid_acc))
        f.write(
            "[Epoch %d/%d] [Train Loss: %.4f] [Train Acc: %.4f] [Valid Loss: %.4f] [Valid Acc: %.4f]"
            % (epoch + 1, hparams.num_epochs, train_loss, train_acc,
               valid_loss, valid_acc))
        f.write('\n')

        if runner.early_stop(valid_loss, epoch + 1):
            break

    get_img(cur_time, train_loss_list, valid_loss_list)
    test_loss, test_acc, prediction = runner.test_run(test_loader)

    print("Training Finished")
    print("Test Accuracy: %.2f%%" % (100 * test_acc))
    f.write("Test Accuracy: %.2f%%" % (100 * test_acc))

    f.close()