def main():
    end_epoch = 0
    for i in range(1, config.epoch + 1):
        end_epoch = i
        try:
            if not opt.notrain:
                flag = train(i)
                if flag:
                    break
            else:
                eval(i, 'Valid')
        except KeyboardInterrupt:
            logging('Interupt\n')
            break
    #Select the best micro_f1 score
    idx = np.argmax(scores['overall_micro_f1'])
    best_epoch = idx + 1

    logging("Summary (validation):\n")
    d = {}
    for metric in all_metrics:
        logging("{}:{:.3f}\n".format(metric, scores[metric][idx]))
        d[metric] = scores[metric][idx]

    test_d = test(best_epoch)

    val_losses = utils.combine_dict(valid_loss_printer.losses[best_epoch], d)
    test_losses = utils.combine_dict(test_loss_printer.losses[best_epoch],
                                     test_d)
    results = utils.combine_results(start_local_time, time.localtime(),
                                    time.time() - start_time, best_epoch,
                                    'wgan', end_epoch, threshold, config,
                                    train_loss_printer.losses[best_epoch],
                                    val_losses, test_losses)

    # Recorder
    recorder = Recorder(opt.result_csv)
    recorder.add_result(results)
    recorder.write_csv()
    print(log_path)
def main():
    end_epoch = 0
    for i in range(1, config.epoch + 1):
        end_epoch = i
        try:
            if not opt.notrain:
                train(i)
            else:
                eval(i)
        except KeyboardInterrupt:
            logging('Interupt\n')
            break
    idx = np.argmax(scores['overall_auc'])
    best_epoch = idx + 1
    logging("Summary (validation):\n")
    for metric in all_metrics:
        logging("{}:{:.3f}\n".format(metric, scores[metric][idx]))
    logging("\nPerformance on test set:\n")
    test_d = test(best_epoch)

    d = {}
    for metric in all_metrics:
        logging("{}:{:.3f}\n".format(metric, scores[metric][idx]))
        d[metric] = scores[metric][idx]

    train_loss_d = {'logit_loss': train_loss[idx]}

    results = utils.combine_results(start_local_time, time.localtime(),
                                    time.time() - start_time, best_epoch,
                                    'baseline', end_epoch, threshold, config,
                                    train_loss_d, d, test_d)

    # Recorder
    recorder = Recorder(opt.result_csv)
    recorder.add_result(results)
    recorder.write_csv()
    print(log_path)