Exemple #1
0
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
    if epoch in [args.epochs * 0.5, args.epochs * 0.75]:
        for param_group in optimizer.param_groups:
            param_group['lr'] *= 0.1
    avg_loss, train_acc = train(model,
                                optimizer,
                                epoch=epoch,
                                device=device,
                                train_loader=train_loader,
                                valid=args.valid,
                                valid_size=valid_size,
                                log_interval=args.log_interval)
    if args.valid:
        prec1 = valid(model, device, valid_loader, valid_size=valid_size)
        test(model, device, test_loader)
    else:
        prec1 = test(model, device, test_loader)
    is_best = prec1 > best_prec1
    best_prec1 = max(prec1, best_prec1)
    save_checkpoint(
        {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer': optimizer.state_dict(),
            'cfg': model.cfg
        },
        is_best,
        filepath=args.save)
Exemple #2
0
                                      linear_lr=0.001)
            optimizer = optim.SGD(optim_set, momentum=args.momentum)

            for epoch in range(args.start_epoch, args.epochs):
                avg_loss, train_acc = train(newmodel,
                                            optimizer,
                                            epoch=epoch,
                                            device=device,
                                            train_loader=train_loader,
                                            valid=args.valid,
                                            valid_len=valid_len,
                                            log_interval=args.log_interval)
                # test(newmodel, device, test_loader)
                if args.valid:
                    prec = valid(newmodel,
                                 device,
                                 valid_loader,
                                 valid_len=valid_len)
                else:
                    prec = train_acc
                if prec > args.pruneT:
                    break

            prec_list.append(prec)
            drop_threshold = args.dropT
            print('(last_prec1 - prec): %.3f' % (last_prec1 - prec))
            if prec > args.pruneT and (last_prec1 - prec) <= drop_threshold:
                last_prec1 = prec
                success_flag_list.append(1)
                del model
                model = newmodel
                prune_success = True
Exemple #3
0
                                      linear_lr=0.001)
            optimizer = optim.SGD(optim_set, momentum=args.momentum)

            for epoch in range(args.start_epoch, args.epochs):
                avg_loss, train_acc = train(newmodel,
                                            optimizer,
                                            epoch=epoch,
                                            device=device,
                                            train_loader=train_loader,
                                            valid=args.valid,
                                            valid_size=valid_size,
                                            log_interval=args.log_interval)
                # test(newmodel, device, test_loader)
                if args.valid:
                    prec = valid(newmodel,
                                 device,
                                 valid_loader,
                                 valid_size=valid_size)
                else:
                    prec = train_acc
                if prec > args.pruneT:
                    break

            prec_list.append(prec)
            drop_threshold = args.dropT
            print('(last_prec1 - prec): %.3f' % (last_prec1 - prec))
            if prec > args.pruneT and (last_prec1 - prec) <= drop_threshold:
                last_prec1 = prec
                success_flag_list.append(1)
                del model
                model = newmodel
                prune_success = True
        help="Select dataset to train. expected value: [cifar10, imagenet, lsun, celeba]")
    parser.add_argument("--batch_size", type=int, default=64, \
        help="Number of data to learn at a time")
    parser.add_argument("--excute_mode", type=str, default="valid", \
        help="Select execute mode. expected value: [train, valid, sampling]")
    parser.add_argument("--recall_iter", type=int, default=100000, \
        help="Select the number of recall iter. 0 to start new iter.")

    arguments = parser.parse_args()

    max_iter = arguments.recall_iter
    min_loss = float('inf')

    loss_list = [0, 0, 0]

    right_valid_loss = train.valid(arguments)
    loss_list[2] = arguments.recall_iter

    arguments.recall_iter = 0
    left_valid_loss = train.valid(arguments)
    loss_list[0] = arguments.recall_iter

    arguments.recall_iter = max_iter//2
    mid_valid_loss = train.valid(arguments)
    loss_list[1] = arguments.recall_iter
    

    while 1:
        min_loss = min(loss_list)
        min_iter = loss_list.index(min_loss)
                                    load_backboe_weight=False)

    # load weight
    model_file = args.weight_path
    chkpt = torch.load(model_file, map_location='cpu')  # load checkpoint
    model.load_state_dict(chkpt['model'])
    print('load weights from ' + model_file)

    model = model.to(device)

    if args.distributed:
        model = nn.parallel.DistributedDataParallel(
            model,
            device_ids=[args.local_rank],
            output_device=args.local_rank,
            broadcast_buffers=False,
        )

    valid_loader = DataLoader(
        valid_set,
        batch_size=args.batch,
        sampler=data_sampler(valid_set,
                             shuffle=False,
                             distributed=args.distributed),
        num_workers=args.num_workers,
        collate_fn=collate_fn(args),
    )

    predictions = valid(args, 0, valid_loader, valid_set, model, device)
    #save_predictions_to_images(valid_set, predictions)
Exemple #6
0
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
    if epoch in [args.epochs * 0.5, args.epochs * 0.75]:
        for param_group in optimizer.param_groups:
            param_group['lr'] *= 0.1
    avg_loss, train_acc = train(model,
                                optimizer,
                                epoch=epoch,
                                device=device,
                                train_loader=train_loader,
                                valid=args.valid,
                                valid_len=valid_len,
                                log_interval=args.log_interval)
    if args.valid:
        prec1 = valid(model, device, valid_loader, valid_len=valid_len)
        test(model, device, test_loader)
    else:
        prec1 = test(model, device, test_loader)
    is_best = prec1 > best_prec1
    best_prec1 = max(prec1, best_prec1)
    save_checkpoint(
        {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer': optimizer.state_dict(),
            'cfg': model.cfg
        },
        is_best,
        filepath=args.save)
Exemple #7
0
def main():
    save_dir = os.path.join('output',
                            "{model}").format(model=str(FLAGS.model_num))
    file_exists = os.path.isfile(
        _get_model_file(save_dir, str(FLAGS.model_num)))
    results_path = os.path.join(save_dir, 'results')
    if file_exists and not FLAGS.overwrite:
        print("Model file of \"%s\" already exists. Skipping training..." %
              FLAGS.model_num)
        results = aggregate_json_results(results_path)
        return results
    else:
        if file_exists:
            print("Model file exists, but will be overwritten...")

    transform_aug_train = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(FLAGS.resize_val),
        #transforms.RandomCrop(FLAGS.resize_val, padding=4),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    transform_aug_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    transform_train = transform_aug_train

    transform_test = transform_aug_test

    cif10 = CIFAR10_loader(root='./data/cifar-10-batches-py/',
                           transform=transform_train,
                           train=True)
    train_idx, val_idx = train_test_split(np.arange(cif10.labels.shape[1]),
                                          train_size=0.8,
                                          shuffle=True,
                                          stratify=cif10.labels.reshape(
                                              (-1, 1)))
    cif10_train = Subset(cif10, train_idx)
    cif10_val = Subset(cif10, val_idx)
    mytrainloader = DataLoader(cif10_train,
                               batch_size=FLAGS.batch_size,
                               shuffle=True,
                               num_workers=0)
    myvalloader = DataLoader(cif10_val,
                             batch_size=FLAGS.batch_size,
                             shuffle=False)
    cif10_test = CIFAR10_loader(root='./data/cifar-10-batches-py/',
                                transform=transform_test,
                                train=False)
    mytestloader = DataLoader(cif10_test,
                              batch_size=FLAGS.batch_size,
                              shuffle=False,
                              num_workers=0)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    convnet = ConvNet3()
    experiment_timer = time.time()
    results = custom_train(convnet,
                           mytrainloader,
                           valloader=myvalloader,
                           testloader=mytestloader,
                           dir=save_dir,
                           model_num=FLAGS.model_num,
                           epochs=FLAGS.epochs,
                           device=device,
                           lr=FLAGS.learning_rate)
    results['final_time'] = time.time() - experiment_timer
    flags_dict = vars(FLAGS)
    save_dict(os.path.join(save_dir, 'flags.json'), flags_dict)
    train_results = os.path.join(results_path, "train.json")
    save_dict(train_results, results)
    dict1 = valid(convnet, mytestloader, device=device)
    dict2 = valid_class(convnet,
                        mytestloader, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
                        device=device)
    final_dict = {**dict1, **dict2}
    acc_results = os.path.join(results_path, "test.json")
    save_dict(acc_results, final_dict)
    plt.plot(results['train_losses'])
    plt.title('Losses per epoch')
    plt.savefig(os.path.join(results_path, 'loss.png'))
    plt.close()
    plt.plot(results['train_scores'])
    plt.title('Accuracy per epoch')
    plt.savefig(os.path.join(results_path, 'acc.png'))
    plt.close()
Exemple #8
0
def main(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    if opt.vis:
        vis = Visualizer(opt.env)
    else:
        vis = None

    init_loss_file(opt)
    if opt.data_source == "statics":
        opt.fold_dataset = True
    train_path, valid_path, test_path = init_file_path(opt)
    print(opt.fold_dataset)

    # random_state = random.randint(1, 50)
    # print("random_state:", random_state)
    train_dataset = KTData(train_path, fold_dataset=opt.fold_dataset, q_numbers=opt.output_dim, opt='None')
    valid_dataset = KTData(valid_path, fold_dataset=opt.fold_dataset, q_numbers=opt.output_dim, opt='None')
    test_dataset = KTData(test_path, fold_dataset=opt.fold_dataset, q_numbers=opt.output_dim, opt='None')

    # print(train_path, valid_path, test_path)
    print(len(train_dataset), len(valid_dataset), len(test_dataset))

    train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers,
                             drop_last=True, collate_fn=myutils.collate_fn)
    valid_loader = DataLoader(valid_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers,
                             drop_last=True, collate_fn=myutils.collate_fn)
    test_loader = DataLoader(test_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers,
                             drop_last=True, collate_fn=myutils.collate_fn)

    if opt.model_name == "CNN":
        model = CNN(opt.input_dim, opt.embed_dim, opt.hidden_dim, opt.num_layers, opt.output_dim, opt.batch_size, opt.device)
    elif opt.model_name == "CNN_3D":
        model = CNN_3D(opt.k_frames, opt.input_dim, opt.embed_dim, opt.hidden_dim, opt.num_layers, opt.output_dim, opt.batch_size, opt.device)
    else:
        model = RNN_DKT(opt.input_dim, opt.embed_dim, opt.hidden_dim, opt.num_layers, opt.output_dim, opt.batch_size, opt.device)

    lr = opt.lr
    last_epoch = -1

    optimizer = torch.optim.Adam(
        params=model.parameters(),
        lr=lr,
        weight_decay=opt.weight_decay,
        betas=(0.9, 0.99)
    )
    if opt.model_path:
        map_location = lambda storage, loc: storage
        checkpoint = torch.load(opt.model_path, map_location=map_location)
        model.load_state_dict(checkpoint["model"])
        last_epoch = checkpoint["epoch"]
        lr = checkpoint["lr"]
        optimizer.load_state_dict(checkpoint["optimizer"])

    model = model.to(opt.device)

    loss_result = {}
    auc_resilt = {}
    best_test_auc = 0
    # START TRAIN
    for epoch in range(opt.max_epoch):
        if epoch < last_epoch:
            continue
        if opt.model_name == "CNN_3D":
            train_loss_meter, train_auc_meter, train_loss_list = train.train_3d(opt, vis, model, train_loader, epoch, lr,
                                                                             optimizer)
            val_loss_meter, val_auc_meter, val_loss_list = train.valid_3d(opt, vis, model, valid_loader, epoch)
            test_loss_meter, test_auc_meter, test_loss_list = test.test_3d(opt, vis, model, test_loader, epoch)
        else:
            train_loss_meter, train_auc_meter, train_loss_list = train.train(opt, vis, model, train_loader, epoch, lr, optimizer)
            val_loss_meter, val_auc_meter, val_loss_list = train.valid(opt, vis, model, valid_loader, epoch)
            test_loss_meter, test_auc_meter, test_loss_list = test.test(opt, vis, model, test_loader, epoch)

        loss_result["train_loss"] = train_loss_meter.value()[0]
        auc_resilt["train_auc"] = train_auc_meter.value()[0]
        loss_result["val_loss"] = val_loss_meter.value()[0]
        auc_resilt["val_auc"] = val_auc_meter.value()[0]
        loss_result["test_loss"] = test_loss_meter.value()[0]
        auc_resilt["test_auc"] = test_auc_meter.value()[0]

        for k, v in loss_result.items():
            print("epoch:{epoch}, {k}:{v:.5f}".format(epoch=epoch, k=k, v=v))
            if opt.vis:
                vis.line(X=np.array([epoch]), Y=np.array([v]),
                         win="loss",
                         opts=dict(title="loss", showlegend=True),
                         name = k,
                         update='append')
        for k, v in auc_resilt.items():
            print("epoch:{epoch}, {k}:{v:.5f}".format(epoch=epoch, k=k, v=v))
            if opt.vis:
                vis.line(X=np.array([epoch]), Y=np.array([v]),
                         win="auc",
                         opts=dict(title="auc", showlegend=True),
                         name = k,
                         update='append')

        best_test_auc = max(best_test_auc, test_auc_meter.value()[0], val_auc_meter.value()[0])
        print("best_test_auc is: ", best_test_auc)

        # TODO 每个epoch结束后把loss写入文件
        myutils.save_loss_file(opt, epoch, train_loss_list, val_loss_list, test_loss_list)

        # TODO 每save_every个epoch结束后保存模型参数+optimizer参数
        if epoch % opt.save_every == 0:
            myutils.save_model_weight(opt, model, optimizer, epoch, lr)

        # TODO 做lr_decay
        lr = myutils.adjust_lr(opt, optimizer, epoch, train_loss_meter.value()[0])

    # TODO 结束的时候保存final模型参数
    myutils.save_model_weight(opt, model, optimizer, epoch, lr, is_final=True)
Exemple #9
0
def main():
    file_name = "./flood_graph/150_250/128/500/ji_sort/1_conf/sample-wised/default/{}/".format(
        args.b)
    start = time.time()
    # set GPU ID
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    cudnn.benchmark = True

    # check save path
    save_path = file_name
    # save_path = args.save_path
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # make dataloader
    if args.valid == True:
        train_loader, valid_loader, test_loader, test_onehot, test_label = dataset.get_valid_loader(
            args.data, args.data_path, args.batch_size)

    else:
        train_loader, train_onehot, train_label, test_loader, test_onehot, test_label = dataset.get_loader(
            args.data, args.data_path, args.batch_size)

    # set num_class
    if args.data == 'cifar100':
        num_class = 100
    else:
        num_class = 10

    # set num_classes
    model_dict = {
        "num_classes": num_class,
    }

    # set model
    if args.model == 'res':
        model = resnet.resnet110(**model_dict).cuda()
    elif args.model == 'dense':
        model = densenet_BC.DenseNet3(depth=100,
                                      num_classes=num_class,
                                      growth_rate=12,
                                      reduction=0.5,
                                      bottleneck=True,
                                      dropRate=0.0).cuda()
    elif args.model == 'vgg':
        model = vgg.vgg16(**model_dict).cuda()

    # set criterion
    if args.loss == 'MS':
        cls_criterion = losses.MultiSimilarityLoss().cuda()
    elif args.loss == 'Contrastive':
        cls_criterion = losses.ContrastiveLoss().cuda()
    elif args.loss == 'Triplet':
        cls_criterion = losses.TripletLoss().cuda()
    elif args.loss == 'NPair':
        cls_criterion = losses.NPairLoss().cuda()
    elif args.loss == 'Focal':
        cls_criterion = losses.FocalLoss(gamma=3.0).cuda()
    else:
        if args.mode == 0:
            cls_criterion = nn.CrossEntropyLoss().cuda()
        else:
            cls_criterion = nn.CrossEntropyLoss(reduction="none").cuda()

    ranking_criterion = nn.MarginRankingLoss(margin=0.0).cuda()

    # set optimizer (default:sgd)
    optimizer = optim.SGD(
        model.parameters(),
        lr=0.1,
        momentum=0.9,
        weight_decay=5e-4,
        # weight_decay=0.0001,
        nesterov=False)

    # optimizer = optim.SGD(model.parameters(),
    #                       lr=float(args.lr),
    #                       momentum=0.9,
    #                       weight_decay=args.weight_decay,
    #                       nesterov=False)

    # set scheduler
    # scheduler = MultiStepLR(optimizer,
    #                         milestones=[500, 750],
    #                         gamma=0.1)

    scheduler = MultiStepLR(optimizer, milestones=[150, 250], gamma=0.1)

    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay_gamma)

    # make logger
    train_logger = utils.Logger(os.path.join(save_path, 'train.log'))
    result_logger = utils.Logger(os.path.join(save_path, 'result.log'))

    # make History Class
    correctness_history = crl_utils.History(len(train_loader.dataset))

    ## define matrix
    if args.data == 'cifar':
        matrix_idx_confidence = [[_] for _ in range(50000)]
        matrix_idx_iscorrect = [[_] for _ in range(50000)]
    else:
        matrix_idx_confidence = [[_] for _ in range(73257)]
        matrix_idx_iscorrect = [[_] for _ in range(73257)]

    # write csv
    #'''
    import csv
    f = open('{}/logs_{}_{}.txt'.format(file_name, args.b, args.epochs),
             'w',
             newline='')
    f.write("location = {}\n\n".format(file_name) + str(args))

    f0 = open('{}/Test_confidence_{}_{}.csv'.format(file_name, args.b,
                                                    args.epochs),
              'w',
              newline='')
    # f0 = open('./baseline_graph/150_250/128/500/Test_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')
    # f0 = open('./CRL_graph/150_250/Test_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')

    wr_conf_test = csv.writer(f0)
    header = [_ for _ in range(args.epochs + 1)]
    header[0] = 'Epoch'
    wr_conf_test.writerows([header])

    f1 = open('{}/Train_confidence_{}_{}.csv'.format(file_name, args.b,
                                                     args.epochs),
              'w',
              newline='')
    # f1 = open('./baseline_graph/150_250/128/500/Train_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')
    # f1 = open('./CRL_graph/150_250/Train_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')

    wr = csv.writer(f1)
    header = [_ for _ in range(args.epochs + 1)]
    header[0] = 'Epoch'
    wr.writerows([header])

    f2 = open('{}/Train_Flood_{}_{}_{}.csv'.format(file_name, args.data,
                                                   args.b, args.epochs),
              'w',
              newline='')
    # f2 = open('./baseline_graph/150_250/128/500/Train_Base_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')
    # f2 = open('./CRL_graph/150_250/Train_Flood_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')

    wr_train = csv.writer(f2)
    header = [_ for _ in range(args.epochs + 1)]
    header[0] = 'Epoch'
    wr_train.writerows([header])

    f3 = open('{}/Test_Flood_{}_{}_{}.csv'.format(file_name, args.data, args.b,
                                                  args.epochs),
              'w',
              newline='')
    # f3 = open('./baseline_graph/150_250/128/500/Test_Base_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')
    # f3 = open('./CRL_graph/150_250/Test_Flood_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')

    wr_test = csv.writer(f3)
    header = [_ for _ in range(args.epochs + 1)]
    header[0] = 'Epoch'
    wr_test.writerows([header])
    #'''

    # start Train
    best_valid_acc = 0
    test_ece_report = []
    test_acc_report = []
    test_nll_report = []
    test_over_con99_report = []
    test_e99_report = []
    test_cls_loss_report = []

    train_ece_report = []
    train_acc_report = []
    train_nll_report = []
    train_over_con99_report = []
    train_e99_report = []
    train_cls_loss_report = []
    train_rank_loss_report = []
    train_total_loss_report = []

    for epoch in range(1, args.epochs + 1):
        scheduler.step()

        matrix_idx_confidence, matrix_idx_iscorrect, idx, iscorrect, confidence, target, cls_loss_tr, rank_loss_tr, batch_correctness, total_confidence, total_correctness = \
            train.train(matrix_idx_confidence, matrix_idx_iscorrect, train_loader,
                    model,
                    wr,
                    cls_criterion,
                    ranking_criterion,
                    optimizer,
                    epoch,
                    correctness_history,
                    train_logger,
                    args)

        if args.rank_weight != 0.0:
            print("RANK ", rank_loss_tr)
            total_loss_tr = cls_loss_tr + rank_loss_tr

        if args.valid == True:
            idx, iscorrect, confidence, target, cls_loss_val, acc = train.valid(
                valid_loader, model, cls_criterion, ranking_criterion,
                optimizer, epoch, correctness_history, train_logger, args)
            if acc > best_valid_acc:
                best_valid_acc = acc
                print("*** Update Best Acc ***")

        # save model
        if epoch == args.epochs:
            torch.save(model.state_dict(),
                       os.path.join(save_path, 'model.pth'))

        print("########### Train ###########")
        acc_tr, aurc_tr, eaurc_tr, aupr_tr, fpr_tr, ece_tr, nll_tr, brier_tr, E99_tr, over_99_tr, cls_loss_tr = metrics.calc_metrics(
            train_loader, train_label, train_onehot, model, cls_criterion,
            args)

        if args.sort == True and epoch == 260:
            #if args.sort == True:
            train_loader = dataset.sort_get_loader(
                args.data, args.data_path, args.batch_size, idx,
                np.array(target), iscorrect,
                batch_correctness, total_confidence, total_correctness,
                np.array(confidence), epoch, args)

        train_acc_report.append(acc_tr)
        train_nll_report.append(nll_tr * 10)
        train_ece_report.append(ece_tr)
        train_over_con99_report.append(over_99_tr)
        train_e99_report.append(E99_tr)
        train_cls_loss_report.append(cls_loss_tr)

        if args.rank_weight != 0.0:
            train_total_loss_report.append(total_loss_tr)
            train_rank_loss_report.append(rank_loss_tr)
        print("CLS ", cls_loss_tr)

        # finish train
        print("########### Test ###########")
        # calc measure
        acc_te, aurc_te, eaurc_te, aupr_te, fpr_te, ece_te, nll_te, brier_te, E99_te, over_99_te, cls_loss_te = metrics.calc_metrics(
            test_loader, test_label, test_onehot, model, cls_criterion, args)
        test_ece_report.append(ece_te)
        test_acc_report.append(acc_te)
        test_nll_report.append(nll_te * 10)
        test_over_con99_report.append(over_99_te)
        test_e99_report.append(E99_te)
        test_cls_loss_report.append(cls_loss_te)

        print("CLS ", cls_loss_te)
        print("############################")

    # for idx in matrix_idx_confidence:
    #     wr.writerow(idx)

    #'''
    # draw graph
    df = pd.DataFrame()
    df['epoch'] = [i for i in range(1, args.epochs + 1)]
    df['test_ece'] = test_ece_report
    df['train_ece'] = train_ece_report
    fig_loss = plt.figure(figsize=(35, 35))
    fig_loss.set_facecolor('white')
    ax = fig_loss.add_subplot()

    ax.plot(df['epoch'],
            df['test_ece'],
            df['epoch'],
            df['train_ece'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] ECE per epoch', fontsize=80)
    # plt.title('[BASE] ECE per epoch', fontsize=80)
    # plt.title('[CRL] ECE per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('ECE', fontsize=70)
    plt.ylim([0, 1])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_ECE_lr_{}.png'.format(file_name, args.model, args.b,
                                                args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_ECE_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_ECE_lr_{}.png'.format(args.model, args.b, args.epochs))

    df2 = pd.DataFrame()
    df2['epoch'] = [i for i in range(1, args.epochs + 1)]
    df2['test_acc'] = test_acc_report
    df2['train_acc'] = train_acc_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df2['epoch'],
            df2['test_acc'],
            df2['epoch'],
            df2['train_acc'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] Accuracy per epoch', fontsize=80)
    # plt.title('[BASE] Accuracy per epoch', fontsize=80)
    # plt.title('[CRL] Accuracy per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('Accuracy', fontsize=70)
    plt.ylim([0, 100])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_acc_lr_{}.png'.format(file_name, args.model, args.b,
                                                args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_acc_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_acc_lr_{}.png'.format(args.model, args.b, args.epochs))

    df3 = pd.DataFrame()
    df3['epoch'] = [i for i in range(1, args.epochs + 1)]
    df3['test_nll'] = test_nll_report
    df3['train_nll'] = train_nll_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df3['epoch'],
            df3['test_nll'],
            df3['epoch'],
            df3['train_nll'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] NLL per epoch', fontsize=80)
    # plt.title('[BASE] NLL per epoch', fontsize=80)
    # plt.title('[CRL] NLL per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('NLL', fontsize=70)
    plt.ylim([0, 45])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_nll_lr_{}.png'.format(file_name, args.model, args.b,
                                                args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_nll_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_nll_lr_{}.png'.format(args.model, args.b, args.epochs))

    df4 = pd.DataFrame()
    df4['epoch'] = [i for i in range(1, args.epochs + 1)]
    df4['test_over_con99'] = test_over_con99_report
    df4['train_over_con99'] = train_over_con99_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df4['epoch'],
            df4['test_over_con99'],
            df4['epoch'],
            df4['train_over_con99'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] Over conf99 per epoch', fontsize=80)
    # plt.title('[BASE] Over conf99 per epoch', fontsize=80)
    # plt.title('[CRL] Over conf99 per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('Over con99', fontsize=70)
    if args.data == 'cifar10' or args.data == 'cifar100':
        plt.ylim([0, 50000])
    else:
        plt.ylim([0, 73257])

    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_over_conf99_lr_{}.png'.format(
        file_name, args.model, args.b, args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_over_conf99_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_over_conf99_lr_{}.png'.format(args.model, args.b, args.epochs))

    df5 = pd.DataFrame()
    df5['epoch'] = [i for i in range(1, args.epochs + 1)]
    df5['test_e99'] = test_e99_report
    df5['train_e99'] = train_e99_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df5['epoch'],
            df5['test_e99'],
            df5['epoch'],
            df5['train_e99'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] E99 per epoch', fontsize=80)
    # plt.title('[BASE] E99 per epoch', fontsize=80)
    # plt.title('[CRL] E99 per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('E99', fontsize=70)
    plt.ylim([0, 0.2])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_E99_flood_lr_{}.png'.format(file_name, args.model,
                                                      args.b, args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_E99_flood_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_E99_flood_lr_{}.png'.format(args.model, args.b, args.epochs))

    df5 = pd.DataFrame()
    df5['epoch'] = [i for i in range(1, args.epochs + 1)]
    df5['test_cls_loss'] = test_cls_loss_report
    df5['train_cls_loss'] = train_cls_loss_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df5['epoch'],
            df5['test_cls_loss'],
            df5['epoch'],
            df5['train_cls_loss'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] CLS_loss per epoch', fontsize=80)
    # plt.title('[BASE] CLS_loss per epoch', fontsize=80)
    # plt.title('[CRL] CLS_loss per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('Loss', fontsize=70)
    plt.ylim([0, 5])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_cls_loss_flood_lr_{}.png'.format(
        file_name, args.model, args.b, args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_cls_loss_flood_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_cls_loss_flood_lr_{}.png'.format(args.model, args.b, args.epochs))

    if args.rank_weight != 0.0:
        df6 = pd.DataFrame()
        df6['epoch'] = [i for i in range(1, args.epochs + 1)]
        df6['train_cls_loss'] = train_cls_loss_report
        df6['train_rank_loss'] = train_rank_loss_report
        df6['train_total_loss'] = train_total_loss_report
        fig_acc = plt.figure(figsize=(35, 35))
        fig_acc.set_facecolor('white')
        ax = fig_acc.add_subplot()

        ax.plot(df6['epoch'],
                df6['train_cls_loss'],
                df6['epoch'],
                df6['train_rank_loss'],
                df6['epoch'],
                df6['train_total_loss'],
                linewidth=10)
        ax.legend(['CLS', 'Rank', 'Total'], loc=2, prop={'size': 60})
        plt.title('[FL] CLS_loss per epoch', fontsize=80)
        plt.xlabel('Epoch', fontsize=70)
        plt.ylabel('Loss', fontsize=70)
        # plt.ylim([0, 5])
        plt.setp(ax.get_xticklabels(), fontsize=30)
        plt.setp(ax.get_yticklabels(), fontsize=30)
        plt.savefig(
            './CRL_graph/150_250/{}_{}_cls_loss_flood_lr_{}.png'.format(
                args.model, args.b, args.epochs))

    test_acc_report.insert(0, 'ACC')
    test_ece_report.insert(0, 'ECE')
    test_nll_report.insert(0, 'NLL')
    test_over_con99_report.insert(0, 'Over_conf99')
    test_e99_report.insert(0, 'E99')
    test_cls_loss_report.insert(0, 'CLS')
    wr_test.writerow(test_acc_report)
    wr_test.writerow(test_ece_report)
    wr_test.writerow(test_nll_report)
    wr_test.writerow(test_over_con99_report)
    wr_test.writerow(test_e99_report)
    wr_test.writerow(test_cls_loss_report)

    train_acc_report.insert(0, 'ACC')
    train_ece_report.insert(0, 'ECE')
    train_nll_report.insert(0, 'NLL')
    train_over_con99_report.insert(0, 'Over_conf99')
    train_e99_report.insert(0, 'E99')
    train_cls_loss_report.insert(0, 'CLS')

    wr_train.writerow(train_acc_report)
    wr_train.writerow(train_ece_report)
    wr_train.writerow(train_nll_report)
    wr_train.writerow(train_over_con99_report)
    wr_train.writerow(train_e99_report)
    wr_train.writerow(train_cls_loss_report)

    if args.rank_weight != 0.0:
        train_rank_loss_report.insert(0, 'Rank')
        train_total_loss_report.insert(0, 'Total')
        wr_train.writerow(train_rank_loss_report)
        wr_train.writerow(train_total_loss_report)

    #'''

    # result write
    result_logger.write([
        acc_te, aurc_te * 1000, eaurc_te * 1000, aupr_te * 100, fpr_te * 100,
        ece_te * 100, nll_te * 10, brier_te * 100, E99_te * 100
    ])
    if args.valid == True:
        print("Best Valid Acc : {}".format(acc))
    print("Flood Level: {}".format(args.b))
    print("Sort : {}".format(args.sort))
    print("Sort Mode : {}".format(args.sort_mode))
    print("TIME : ", time.time() - start)