Exemplo n.º 1
0
def prepare_optimizee(args, sgd_in_names, obs_shape, hidden_size, actor_critic, current_optimizee_step, prev_optimizee_step):
    prev_optimizee_step += current_optimizee_step
    current_optimizee_step = 0

    ##### Vgg16 #####
    vgg = vgg16(pretrained=True)
    model = FCN_Vgg(n_class=args.num_class)
    model.copy_params_from_vgg16(vgg)
    ###################

    # Setup optimizer
    sgd_in = [
        {'params': get_params(model, ["conv1_1", "conv1_2"]), 'lr': args.lr},
        {'params': get_params(model, ["conv2_1", "conv2_2"]), 'lr': args.lr},
        {'params': get_params(model, ["conv3_1", "conv3_2", "conv3_3"]), 'lr': args.lr},
        {'params': get_params(model, ["conv4_1", "conv4_2", "conv4_3"]), 'lr': args.lr},
        {'params': get_params(model, ["conv5_1", "conv5_2", "conv5_3"]), 'lr': args.lr},
        {'params': get_params(model, ["fc6", "fc7"]), 'lr': args.lr},
        {'params': get_params(model, ["score_fr", "score_pool3", "score_pool4", "upscore2", "upscore8", "upscore_pool4"]), 'lr': args.lr},
    ]
    optimizer = torch.optim.SGD(sgd_in, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # Specify which GPUs to use
    model = model.cuda()
    model.eval()

    return model, optimizer, current_optimizee_step, prev_optimizee_step
Exemplo n.º 2
0
def get_architecture(arch: str) -> torch.nn.Module:
    """ Return a neural network (with random weights)
    :param arch: the architecture - should be in the ARCHITECTURES list above
    :return: a Pytorch module
    """
    if arch == 'lenet':
        model = lenet()
    elif arch == 'alexnet':
        model = alexnet()
    elif arch == 'resnet20':
        model = resnet20()
    elif arch == 'resnt26':
        model = resnet26()
    elif arch == 'resnet32':
        model = resnet32()
    elif arch == 'resnet110':
        model = resnet110()
    elif arch == 'densenet':
        model = densenet_BC_cifar(depth=100, k=12)
    elif arch == 'vgg16':
        model = vgg16()
    elif arch == 'vgg19':
        model = vgg19()
    else:
        raise ValueError('arch not in ARCHITECTURES')
    return model
Exemplo n.º 3
0
def main():
    # set GPU ID
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    cudnn.benchmark = True

    # check save path
    save_path = args.save_path
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # make dataloader
    train_loader, test_loader, \
    test_onehot, test_label = dataset.get_loader(args.data,
                                                 args.data_path,
                                                 args.batch_size)

    # set num_class
    if args.data == 'cifar100':
        num_class = 100
    else:
        num_class = 10

    # set num_classes
    model_dict = {
        "num_classes": num_class,
    }

    # set network
    if args.model == 'res':
        model = resnet.resnet110(**model_dict).cuda()
    elif args.model == 'dense':
        model = densenet_BC.DenseNet3(depth=100,
                                      num_classes=num_class,
                                      growth_rate=12,
                                      reduction=0.5,
                                      bottleneck=True,
                                      dropRate=0.0).cuda()
    elif args.model == 'vgg':
        model = vgg.vgg16(**model_dict).cuda()

    # set criterion
    cls_criterion = nn.CrossEntropyLoss().cuda()

    # make logger
    result_logger = utils.Logger(os.path.join(save_path, 'result.log'))

    # load pretrained model
    model_state_dict = torch.load(os.path.join(args.save_path,
                                               '{0}.pth'.format(args.file_name)))
    model.load_state_dict(model_state_dict)

    # calc measure
    acc, aurc, eaurc, aupr, fpr, ece, nll, brier = metrics.calc_metrics(test_loader,
                                                                        test_label,
                                                                        test_onehot,
                                                                        model,
                                                                        cls_criterion)
    # result write
    result_logger.write([acc,aurc*1000,eaurc*1000,aupr*100,fpr*100,ece*100,nll*10,brier*100])
Exemplo n.º 4
0
    def __init__(self, device):
        super().__init__()
        self.faster = fr.FasterRCNN()
        self.lower_module = lm.lower_module(device)
        self.upper_module = um.upper_module()
        self.vgg = vgg.vgg16()
        self.transform = transforms.Compose([
            transforms.ToTensor(),
        ])
        self.predict = nn.Linear(c.Concat_vec_len * 2, c.HOIlen)

        with open('data/train_bbox.json', 'r') as f:
            self.train_bboxes = json.load(f)
        with open('data/test_bbox.json', 'r') as f:
            self.test_bboxes = json.load(f)
Exemplo n.º 5
0
def main():
    # set GPU ID
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    cudnn.benchmark = True

    # check save path
    save_path = args.save_path
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # make dataloader
    train_loader, test_loader, \
    test_onehot, test_label = dataset.get_loader(args.data,
                                                 args.data_path,
                                                 args.batch_size)

    # set num_class
    if args.data == 'cifar100':
        num_class = 100
    else:
        num_class = 10

    # set num_classes
    model_dict = {
        "num_classes": num_class,
    }

    # set model
    if args.model == 'res':
        model = resnet.resnet110(**model_dict).cuda()
    elif args.model == 'dense':
        model = densenet_BC.DenseNet3(depth=100,
                                      num_classes=num_class,
                                      growth_rate=12,
                                      reduction=0.5,
                                      bottleneck=True,
                                      dropRate=0.0).cuda()
    elif args.model == 'vgg':
        model = vgg.vgg16(**model_dict).cuda()

    # set criterion
    cls_criterion = nn.CrossEntropyLoss().cuda()
    ranking_criterion = nn.MarginRankingLoss(margin=0.0).cuda()

    # set optimizer (default:sgd)
    optimizer = optim.SGD(model.parameters(),
                          lr=0.1,
                          momentum=0.9,
                          weight_decay=0.0001,
                          nesterov=False)

    # set scheduler
    scheduler = MultiStepLR(optimizer, milestones=[150, 250], gamma=0.1)

    # make logger
    train_logger = utils_orig.Logger(os.path.join(save_path, 'train.log'))
    result_logger = utils_orig.Logger(os.path.join(save_path, 'result.log'))

    # make History Class
    correctness_history = crl_utils.History(len(train_loader.dataset))

    # start Train
    for epoch in range(1, args.epochs + 1):
        scheduler.step()
        train.train(train_loader, model, cls_criterion, ranking_criterion,
                    optimizer, epoch, correctness_history, train_logger, args)

        # save model
        if epoch == args.epochs:
            torch.save(model.state_dict(),
                       os.path.join(save_path, 'model.pth'))
    # finish train

    # calc measure
    acc, aurc, eaurc, aupr, fpr, ece, nll, brier = metrics.calc_metrics(
        test_loader, test_label, test_onehot, model, cls_criterion)
    # result write
    result_logger.write([
        acc, aurc * 1000, eaurc * 1000, aupr * 100, fpr * 100, ece * 100,
        nll * 10, brier * 100
    ])
Exemplo n.º 6
0
import model.vgg as vgg
import cv2
from torchvision import transforms

model = vgg.vgg16().cuda(2)
transform = transforms.Compose([
    transforms.ToTensor(),
])
data = cv2.imread('input_img/000000035005.jpg')
data = transform(data).cuda(2)
print(data.shape)
print(model(data.unsqueeze_(0)))
Exemplo n.º 7
0
def main():
    args = get_args()
    pid = os.getpid()
    device = torch.device("cuda:0" if args.cuda else "cpu")

    if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True

    best_mIoU = 0

    #### preparation ###########################################
    from config_seg import config as data_setting
    data_setting.batch_size = args.batch_size
    # if args.src_list is not None:
    #     data_setting.train_source = args.src_list
    # if args.tgt_list is not None:
    #     data_setting.eval_source = args.tgt_list
    train_loader = get_train_loader(data_setting, GTA5, test=False)
    train_loader_iter = iter(train_loader)
    current_optimizee_step, prev_optimizee_step = 0, 0

    model_old = None
    if args.lwf:
        # create a fixed model copy for Life-long learning
        ##### Vgg16 #####
        model_old = vgg16(pretrained=True)
        ###################
        model_old.eval()
        model_old.to(device)
    ############################################################

    ### Agent Settings ########################################
    RANDOM = False # False | True | 'init'
    action_space = np.arange(0, 1.1, 0.1)
    # action_space = np.arange(0, 3); granularity = 0.01
    obs_avg = True
    _window_size = 1
    window_size = 1 if obs_avg else _window_size
    window_shrink_size = 20 # larger: controller will be updated more frequently w.r.t. optimizee_step
    sgd_in_names = ["conv1", "conv2", "conv3", "conv4", "conv5", "FC", "fc_new"]
    coord_size = len(sgd_in_names)
    ob_name_lstm = ["loss", "loss_kl", "step", "fc_mean", "fc_std"]
    ob_name_scalar = []
    obs_shape = (len(ob_name_lstm) * window_size + len(ob_name_scalar) + coord_size, )
    _hidden_size = 20
    hidden_size = _hidden_size * len(ob_name_lstm)
    actor_critic = Policy(coord_size, input_size=(len(ob_name_lstm), len(ob_name_scalar)), action_space=len(action_space), hidden_size=_hidden_size, window_size=window_size)
    actor_critic.to(device)
    actor_critic.eval()

    partial = torch.load("./pretrained/policy_vgg16_segmentation.pth", map_location=lambda storage, loc: storage)
    state = actor_critic.state_dict()
    pretrained_dict = {k: v for k, v in partial.items()}
    state.update(pretrained_dict)
    actor_critic.load_state_dict(state)

    if args.algo == 'reinforce':
        agent = algo.REINFORCE(
            actor_critic,
            args.entropy_coef,
            lr=args.lr_meta,
            eps=args.eps,
            alpha=args.alpha,
            max_grad_norm=args.max_grad_norm)
    elif args.algo == 'a2c':
        agent = algo.A2C_ACKTR(
            actor_critic,
            args.value_loss_coef,
            args.entropy_coef,
            lr=args.lr_meta,
            eps=args.eps,
            alpha=args.alpha,
            max_grad_norm=args.max_grad_norm)
    elif args.algo == 'ppo':
        agent = algo.PPO(
            actor_critic,
            args.clip_param,
            args.ppo_epoch,
            args.num_mini_batch,
            args.value_loss_coef,
            args.entropy_coef,
            lr=args.lr_meta,
            eps=args.eps,
            max_grad_norm=args.max_grad_norm)
    elif args.algo == 'acktr':
        agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True)
    ################################################################

    _min_iter = 20
    # reset optmizee
    model, optimizer, current_optimizee_step, prev_optimizee_step = prepare_optimizee(args, sgd_in_names, obs_shape, hidden_size, actor_critic, current_optimizee_step, prev_optimizee_step)

    ##### Logging ###########################
    # Log outputs
    if RANDOM:
        args.name = "Random_GTA5_Min%diter.Step%d.Window%d_batch%d_Epoch%d_LR%.1e.warmpoly_lwf.%d"%\
            (_min_iter, args.num_steps, window_shrink_size, args.batch_size, args.epochs, args.lr, args.lwf)
    else:
        args.name = "metatrain_GTA5_%s.SGD.Gamma%.1f.LRmeta.%.1e.Hidden%d.Loss.avg.exp.Earlystop.%d.Min%diter.Step%d.Window%d_batch%d_Epoch%d_LR%.1e.warmpoly_lwf.%d"%\
            (args.algo, args.gamma, args.lr_meta, _hidden_size, args.early_stop, _min_iter, args.num_steps, window_shrink_size, args.batch_size, args.epochs, args.lr, args.lwf)
        if args.resume:
            args.name += "_resumed"

    if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True

    # Log outputs
    directory = "runs/%s/"%(args.name)
    if not os.path.exists(directory):
        os.makedirs(directory)
    filename = directory + 'train.log'
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    rootLogger = logging.getLogger()
    logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s]  %(message)s")
    fileHandler = logging.FileHandler(filename)
    fileHandler.setFormatter(logFormatter)
    rootLogger.addHandler(fileHandler)

    consoleHandler = logging.StreamHandler()
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler)
    rootLogger.setLevel(logging.INFO)

    writer = SummaryWriter(directory)
    ###########################################

    threds = 1
    evaluator = SegEvaluator(Cityscapes(data_setting, 'val', None), args.num_class, np.array([0.485, 0.456, 0.406]),
                    np.array([0.229, 0.224, 0.225]), model, [1, ], False, devices=0, config=data_setting, threds=threds,
                    verbose=False, save_path=None, show_image=False)

    epoch_size = len(train_loader)
    total_steps = epoch_size*args.epochs
    bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
    pbar = tqdm(range(int(epoch_size*args.epochs)), file=sys.stdout, bar_format=bar_format, ncols=100)
    _window_size = max(_min_iter, current_optimizee_step + prev_optimizee_step // window_shrink_size)
    train_loader_iter, obs, loss, loss_kl, fc_mean, fc_std = train_step(args, _window_size, train_loader_iter, train_loader, model, optimizer, obs_avg, args.lr, pbar, current_optimizee_step + prev_optimizee_step, total_steps, model_old=model_old)
    writer.add_scalar("loss/ce", loss, current_optimizee_step + prev_optimizee_step)
    writer.add_scalar("loss/kl", loss_kl, current_optimizee_step + prev_optimizee_step)
    writer.add_scalar("loss/total", loss + loss_kl, current_optimizee_step + prev_optimizee_step)
    writer.add_scalar("fc/mean", fc_mean, current_optimizee_step + prev_optimizee_step)
    writer.add_scalar("fc/std", fc_std, current_optimizee_step + prev_optimizee_step)
    current_optimizee_step += _window_size
    prev_obs = obs.unsqueeze(0)
    prev_hidden = torch.zeros(actor_critic.net.num_recurrent_layers, 1, hidden_size).cuda()
    for epoch in range(args.epochs):
        print("\n===== Epoch %d / %d ====="%(epoch+1, args.epochs))
        print("============= " + args.name + " ================")
        print("============= PID: " + str(pid) + " ================")
        while current_optimizee_step < epoch_size:
            # Sample actions
            with torch.no_grad():
                if not RANDOM:
                    value, action, action_log_prob, recurrent_hidden_states, distribution = actor_critic.act(prev_obs, prev_hidden, deterministic=False)
                    action = action.squeeze(0)
                    action_log_prob = action_log_prob.squeeze(0)
                    value = value.squeeze(0)
                    for idx in range(len(action)):
                        writer.add_scalar("action/%s"%(sgd_in_names[idx]), action[idx], current_optimizee_step + prev_optimizee_step)
                        writer.add_scalar("entropy/%s"%(sgd_in_names[idx]), distribution.distributions[idx].entropy(), current_optimizee_step + prev_optimizee_step)
                        optimizer.param_groups[idx]['lr'] = float(action_space[action[idx]]) * args.lr
                        writer.add_scalar("LR/%s"%(sgd_in_names[idx]), optimizer.param_groups[idx]['lr'], current_optimizee_step + prev_optimizee_step)
                else:
                    if RANDOM is True or RANDOM == 'init':
                        for idx in range(coord_size):
                            optimizer.param_groups[idx]['lr'] = float(choice(action_space)) * args.lr
                    if RANDOM == 'init':
                        RANDOM = 'done'
                    for idx in range(coord_size):
                        writer.add_scalar("LR/%s"%sgd_in_names[idx], optimizer.param_groups[idx]['lr'], current_optimizee_step + prev_optimizee_step)

            # Obser reward and next obs
            _window_size = max(_min_iter, current_optimizee_step + prev_optimizee_step // window_shrink_size)
            _window_size = min(_window_size, epoch_size - current_optimizee_step)
            train_loader_iter, obs, loss, loss_kl, fc_mean, fc_std = train_step(args, _window_size, train_loader_iter, train_loader, model, optimizer, obs_avg, args.lr, pbar, current_optimizee_step + prev_optimizee_step, total_steps, model_old=model_old)
            writer.add_scalar("loss/ce", loss, current_optimizee_step + prev_optimizee_step)
            writer.add_scalar("loss/kl", loss_kl, current_optimizee_step + prev_optimizee_step)
            writer.add_scalar("loss/total", loss + loss_kl, current_optimizee_step + prev_optimizee_step)
            writer.add_scalar("fc/mean", fc_mean, current_optimizee_step + prev_optimizee_step)
            writer.add_scalar("fc/std", fc_std, current_optimizee_step + prev_optimizee_step)
            current_optimizee_step += _window_size
            prev_obs = obs.unsqueeze(0)
            if not RANDOM: prev_hidden = recurrent_hidden_states
        prev_optimizee_step += current_optimizee_step
        current_optimizee_step = 0

        # evaluate on validation set
        torch.cuda.empty_cache()
        mIoU = validate(evaluator, model)
        writer.add_scalar("mIoU", mIoU, epoch)

        # remember best prec@1 and save checkpoint
        is_best = mIoU > best_mIoU
        best_mIoU = max(mIoU, best_mIoU)
        save_checkpoint(args.name, {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_mIoU': best_mIoU,
        }, is_best)

        logging.info('Best accuracy: {mIoU:.3f}'.format(mIoU=best_mIoU))
Exemplo n.º 8
0
def main():
    global args, best_mIoU
    args = parser.parse_args()
    pid = os.getpid()

    # Log outputs
    args.name = "GTA5_Vgg16_batch%d_512x512_Poly_LR%.1e_1to%.1f_all_lwf.%d_epoch%d" % (
        args.batch_size, args.lr, args.factor, args.lwf, args.epochs)
    if args.resume:
        args.name += "_resumed"
    directory = "runs/%s/" % (args.name)
    if not os.path.exists(directory):
        os.makedirs(directory)
    filename = directory + 'train.log'
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    rootLogger = logging.getLogger()
    logFormatter = logging.Formatter(
        "%(asctime)s [%(levelname)-5.5s]  %(message)s")
    fileHandler = logging.FileHandler(filename)
    fileHandler.setFormatter(logFormatter)
    rootLogger.addHandler(fileHandler)

    consoleHandler = logging.StreamHandler()
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler)
    rootLogger.setLevel(logging.INFO)

    writer = SummaryWriter(directory)

    from config_seg import config as data_setting
    data_setting.batch_size = args.batch_size
    train_loader = get_train_loader(data_setting, GTA5, test=False)

    ##### Vgg16 #####
    vgg = vgg16(pretrained=True)
    model = FCN_Vgg(n_class=args.num_class)
    model.copy_params_from_vgg16(vgg)
    ###################
    threds = 1
    evaluator = SegEvaluator(Cityscapes(data_setting, 'val', None),
                             args.num_class,
                             np.array([0.485, 0.456, 0.406]),
                             np.array([0.229, 0.224, 0.225]),
                             model, [
                                 1,
                             ],
                             False,
                             devices=args.gpus,
                             config=data_setting,
                             threds=threds,
                             verbose=False,
                             save_path=None,
                             show_image=False)

    # Setup optimizer
    ##### Vgg16 #####
    sgd_in = [
        {
            'params': get_params(model, ["conv1_1", "conv1_2"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["conv2_1", "conv2_2"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["conv3_1", "conv3_2", "conv3_3"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["conv4_1", "conv4_2", "conv4_3"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["conv5_1", "conv5_2", "conv5_3"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["fc6", "fc7"]),
            'lr': args.factor * args.lr
        },
        {
            'params':
            get_params(model, [
                "score_fr", "score_pool3", "score_pool4", "upscore2",
                "upscore8", "upscore_pool4"
            ]),
            'lr':
            args.lr
        },
    ]
    base_lrs = [group['lr'] for group in sgd_in]
    optimizer = torch.optim.SGD(sgd_in,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # Optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_mIoU = checkpoint['best_mIoU']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=ImageClassdata> no checkpoint found at '{}'".format(
                args.resume))

    model = model.cuda()
    model_old = None
    if args.lwf > 0:
        # create a fixed model copy for Life-long learning
        model_old = vgg16(pretrained=True)
        ###################
        for param in model_old.parameters():
            param.requires_grad = False
        model_old.eval()
        model_old.cuda()

    if args.evaluate:
        mIoU = validate(evaluator, model)
        print(mIoU)

    # Main training loop
    iter_max = args.epochs * math.ceil(len(train_loader) / args.iter_size)
    iter_stat = IterNums(iter_max)
    for epoch in range(args.start_epoch, args.epochs):
        logging.info("============= " + args.name + " ================")
        logging.info("============= PID: " + str(pid) + " ================")
        logging.info("Epoch: %d" % (epoch + 1))
        # train for one epoch
        train(args,
              train_loader,
              model,
              optimizer,
              base_lrs,
              iter_stat,
              epoch,
              writer,
              model_old=model_old,
              adjust_lr=epoch < args.epochs)
        # evaluate on validation set
        torch.cuda.empty_cache()
        mIoU = validate(evaluator, model)
        writer.add_scalar("mIoU", mIoU, epoch)
        # remember best mIoU and save checkpoint
        is_best = mIoU > best_mIoU
        best_mIoU = max(mIoU, best_mIoU)
        save_checkpoint(
            directory, {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_mIoU': best_mIoU,
            }, is_best)

    logging.info('Best accuracy: {mIoU:.3f}'.format(mIoU=best_mIoU))
Exemplo n.º 9
0
def main():
    # set GPU ID
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    cudnn.benchmark = True

    # check save path
    save_path = args.save_path
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # set num_class
    if args.data == 'cifar100':
        num_class = 100
    else:
        num_class = 10

    # set num_classes
    model_dict = {
        "num_classes": num_class,
    }

    _, test_loader, \
        test_onehot, test_label = dataset.get_loader(args.data,
                                        args.data_path,
                                        args.batch_size)

    train_set = dataset.get_dataset(args.data, args.data_path, mode='train')
    unlabeled_pool = dataset.get_dataset(args.data,
                                         args.data_path,
                                         mode='unlabeled')
    num_train = len(train_set)

    indices = list(range(num_train))
    random.shuffle(indices)

    labeled_set = indices[:args.initial_budget]
    unlabeled_set = indices[args.initial_budget:]

    labeled_dataloader = DataLoader(train_set,
                                    sampler=SubsetRandomSampler(labeled_set),
                                    batch_size=args.batch_size,
                                    drop_last=True)

    now = datetime.datetime.now()
    formatedDate = now.strftime('%Y%m%d_%H_%M_')
    result_logger = utils.Logger(
        os.path.join(args.save_path, formatedDate + 'result.log'))

    arguments = []
    for key, val in (args.__dict__.items()):
        arguments.append("{} : {}\n".format(key, val))
    result_logger.write(arguments)
    result_logger = utils.Logger(
        os.path.join(args.save_path, formatedDate + 'result.log'))
    # make logger
    train_logger = utils.Logger(
        os.path.join(save_path, formatedDate + 'train.log'))
    test_epoch_logger = utils.Logger(
        os.path.join(save_path, formatedDate + 'test_epoch.log'))

    current_train = len(labeled_set)
    while (current_train < args.max_budget + 1):
        # set model
        if args.model == 'res':
            model = resnet.ResNet152(**model_dict).cuda()
        elif args.model == 'dense':
            model = densenet_BC.DenseNet3(depth=100,
                                          num_classes=num_class,
                                          growth_rate=12,
                                          reduction=0.5,
                                          bottleneck=True,
                                          dropRate=0.0).cuda()
        elif args.model == 'vgg':
            model = vgg.vgg16(**model_dict).cuda()

        # set criterion
        cls_criterion = nn.CrossEntropyLoss().cuda()
        ranking_criterion = nn.MarginRankingLoss(margin=0.0).cuda()

        # set optimizer (default:sgd)
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=0.9,
                              weight_decay=0.0005,
                              nesterov=False)

        # set scheduler
        scheduler = MultiStepLR(optimizer, milestones=[120, 160], gamma=0.1)

        # make History Class
        correctness_history = crl_utils.History(len(
            labeled_dataloader.dataset))

        # start Train
        for epoch in range(1, args.epochs + 1):
            train.train(labeled_dataloader, model, cls_criterion,
                        ranking_criterion, optimizer, epoch,
                        correctness_history, train_logger, args)
            test_acc, test_loss = metrics.evaluate(test_loader, model,
                                                   cls_criterion, args.budget,
                                                   epoch, test_epoch_logger)
            scheduler.step()
            # save model
            if epoch == args.epochs:
                torch.save(model.state_dict(),
                           os.path.join(save_path, 'model.pth'))
        # finish train

        # calc measure
        acc, aurc, eaurc, aupr, fpr, ece, nll, brier = metrics.calc_metrics(
            test_loader, test_label, test_onehot, model, cls_criterion)
        # result write
        result_logger.write([
            current_train, test_acc, aurc * 1000, eaurc * 1000, aupr * 100,
            fpr * 100, ece * 100, nll * 10, brier * 100
        ])
        random.shuffle(unlabeled_set)
        subset = unlabeled_set[:args.subset]
        unlabeled_poolloader = DataLoader(
            unlabeled_pool,
            sampler=SubsetSequentialSampler(subset),
            batch_size=args.batch_size,
            drop_last=False)
        all_confidence = get_confidence(model, unlabeled_poolloader)
        print(len(all_confidence))
        arg = np.argsort(all_confidence)
        labeled_set = list(
            set(labeled_set) | set(np.array(unlabeled_set)[arg][:args.budget]))
        unlabeled_set = list(set(unlabeled_set) - set(labeled_set))
        current_train = len(labeled_set)

        #unlabeled_set = list(torch.tensor(unlabeled_set)[arg][args.budget:].numpy()) \
        #                            + unlabeled_set[args.subset:]
        print("after acquistiion")
        print('current labeled :', len(labeled_set))
        print('current unlabeled :', len(unlabeled_set))

        labeled_dataloader = DataLoader(
            train_set,
            sampler=SubsetRandomSampler(labeled_set),
            batch_size=args.batch_size,
            drop_last=True)
Exemplo n.º 10
0
def main():
    file_name = "./flood_graph/150_250/128/500/ji_sort/1_conf/sample-wised/default/{}/".format(
        args.b)
    start = time.time()
    # set GPU ID
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    cudnn.benchmark = True

    # check save path
    save_path = file_name
    # save_path = args.save_path
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # make dataloader
    if args.valid == True:
        train_loader, valid_loader, test_loader, test_onehot, test_label = dataset.get_valid_loader(
            args.data, args.data_path, args.batch_size)

    else:
        train_loader, train_onehot, train_label, test_loader, test_onehot, test_label = dataset.get_loader(
            args.data, args.data_path, args.batch_size)

    # set num_class
    if args.data == 'cifar100':
        num_class = 100
    else:
        num_class = 10

    # set num_classes
    model_dict = {
        "num_classes": num_class,
    }

    # set model
    if args.model == 'res':
        model = resnet.resnet110(**model_dict).cuda()
    elif args.model == 'dense':
        model = densenet_BC.DenseNet3(depth=100,
                                      num_classes=num_class,
                                      growth_rate=12,
                                      reduction=0.5,
                                      bottleneck=True,
                                      dropRate=0.0).cuda()
    elif args.model == 'vgg':
        model = vgg.vgg16(**model_dict).cuda()

    # set criterion
    if args.loss == 'MS':
        cls_criterion = losses.MultiSimilarityLoss().cuda()
    elif args.loss == 'Contrastive':
        cls_criterion = losses.ContrastiveLoss().cuda()
    elif args.loss == 'Triplet':
        cls_criterion = losses.TripletLoss().cuda()
    elif args.loss == 'NPair':
        cls_criterion = losses.NPairLoss().cuda()
    elif args.loss == 'Focal':
        cls_criterion = losses.FocalLoss(gamma=3.0).cuda()
    else:
        if args.mode == 0:
            cls_criterion = nn.CrossEntropyLoss().cuda()
        else:
            cls_criterion = nn.CrossEntropyLoss(reduction="none").cuda()

    ranking_criterion = nn.MarginRankingLoss(margin=0.0).cuda()

    # set optimizer (default:sgd)
    optimizer = optim.SGD(
        model.parameters(),
        lr=0.1,
        momentum=0.9,
        weight_decay=5e-4,
        # weight_decay=0.0001,
        nesterov=False)

    # optimizer = optim.SGD(model.parameters(),
    #                       lr=float(args.lr),
    #                       momentum=0.9,
    #                       weight_decay=args.weight_decay,
    #                       nesterov=False)

    # set scheduler
    # scheduler = MultiStepLR(optimizer,
    #                         milestones=[500, 750],
    #                         gamma=0.1)

    scheduler = MultiStepLR(optimizer, milestones=[150, 250], gamma=0.1)

    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay_gamma)

    # make logger
    train_logger = utils.Logger(os.path.join(save_path, 'train.log'))
    result_logger = utils.Logger(os.path.join(save_path, 'result.log'))

    # make History Class
    correctness_history = crl_utils.History(len(train_loader.dataset))

    ## define matrix
    if args.data == 'cifar':
        matrix_idx_confidence = [[_] for _ in range(50000)]
        matrix_idx_iscorrect = [[_] for _ in range(50000)]
    else:
        matrix_idx_confidence = [[_] for _ in range(73257)]
        matrix_idx_iscorrect = [[_] for _ in range(73257)]

    # write csv
    #'''
    import csv
    f = open('{}/logs_{}_{}.txt'.format(file_name, args.b, args.epochs),
             'w',
             newline='')
    f.write("location = {}\n\n".format(file_name) + str(args))

    f0 = open('{}/Test_confidence_{}_{}.csv'.format(file_name, args.b,
                                                    args.epochs),
              'w',
              newline='')
    # f0 = open('./baseline_graph/150_250/128/500/Test_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')
    # f0 = open('./CRL_graph/150_250/Test_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')

    wr_conf_test = csv.writer(f0)
    header = [_ for _ in range(args.epochs + 1)]
    header[0] = 'Epoch'
    wr_conf_test.writerows([header])

    f1 = open('{}/Train_confidence_{}_{}.csv'.format(file_name, args.b,
                                                     args.epochs),
              'w',
              newline='')
    # f1 = open('./baseline_graph/150_250/128/500/Train_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')
    # f1 = open('./CRL_graph/150_250/Train_confidence_{}_{}.csv'.format(args.b, args.epochs), 'w', newline='')

    wr = csv.writer(f1)
    header = [_ for _ in range(args.epochs + 1)]
    header[0] = 'Epoch'
    wr.writerows([header])

    f2 = open('{}/Train_Flood_{}_{}_{}.csv'.format(file_name, args.data,
                                                   args.b, args.epochs),
              'w',
              newline='')
    # f2 = open('./baseline_graph/150_250/128/500/Train_Base_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')
    # f2 = open('./CRL_graph/150_250/Train_Flood_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')

    wr_train = csv.writer(f2)
    header = [_ for _ in range(args.epochs + 1)]
    header[0] = 'Epoch'
    wr_train.writerows([header])

    f3 = open('{}/Test_Flood_{}_{}_{}.csv'.format(file_name, args.data, args.b,
                                                  args.epochs),
              'w',
              newline='')
    # f3 = open('./baseline_graph/150_250/128/500/Test_Base_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')
    # f3 = open('./CRL_graph/150_250/Test_Flood_{}_{}_{}.csv'.format(args.data, args.b, args.epochs), 'w', newline='')

    wr_test = csv.writer(f3)
    header = [_ for _ in range(args.epochs + 1)]
    header[0] = 'Epoch'
    wr_test.writerows([header])
    #'''

    # start Train
    best_valid_acc = 0
    test_ece_report = []
    test_acc_report = []
    test_nll_report = []
    test_over_con99_report = []
    test_e99_report = []
    test_cls_loss_report = []

    train_ece_report = []
    train_acc_report = []
    train_nll_report = []
    train_over_con99_report = []
    train_e99_report = []
    train_cls_loss_report = []
    train_rank_loss_report = []
    train_total_loss_report = []

    for epoch in range(1, args.epochs + 1):
        scheduler.step()

        matrix_idx_confidence, matrix_idx_iscorrect, idx, iscorrect, confidence, target, cls_loss_tr, rank_loss_tr, batch_correctness, total_confidence, total_correctness = \
            train.train(matrix_idx_confidence, matrix_idx_iscorrect, train_loader,
                    model,
                    wr,
                    cls_criterion,
                    ranking_criterion,
                    optimizer,
                    epoch,
                    correctness_history,
                    train_logger,
                    args)

        if args.rank_weight != 0.0:
            print("RANK ", rank_loss_tr)
            total_loss_tr = cls_loss_tr + rank_loss_tr

        if args.valid == True:
            idx, iscorrect, confidence, target, cls_loss_val, acc = train.valid(
                valid_loader, model, cls_criterion, ranking_criterion,
                optimizer, epoch, correctness_history, train_logger, args)
            if acc > best_valid_acc:
                best_valid_acc = acc
                print("*** Update Best Acc ***")

        # save model
        if epoch == args.epochs:
            torch.save(model.state_dict(),
                       os.path.join(save_path, 'model.pth'))

        print("########### Train ###########")
        acc_tr, aurc_tr, eaurc_tr, aupr_tr, fpr_tr, ece_tr, nll_tr, brier_tr, E99_tr, over_99_tr, cls_loss_tr = metrics.calc_metrics(
            train_loader, train_label, train_onehot, model, cls_criterion,
            args)

        if args.sort == True and epoch == 260:
            #if args.sort == True:
            train_loader = dataset.sort_get_loader(
                args.data, args.data_path, args.batch_size, idx,
                np.array(target), iscorrect,
                batch_correctness, total_confidence, total_correctness,
                np.array(confidence), epoch, args)

        train_acc_report.append(acc_tr)
        train_nll_report.append(nll_tr * 10)
        train_ece_report.append(ece_tr)
        train_over_con99_report.append(over_99_tr)
        train_e99_report.append(E99_tr)
        train_cls_loss_report.append(cls_loss_tr)

        if args.rank_weight != 0.0:
            train_total_loss_report.append(total_loss_tr)
            train_rank_loss_report.append(rank_loss_tr)
        print("CLS ", cls_loss_tr)

        # finish train
        print("########### Test ###########")
        # calc measure
        acc_te, aurc_te, eaurc_te, aupr_te, fpr_te, ece_te, nll_te, brier_te, E99_te, over_99_te, cls_loss_te = metrics.calc_metrics(
            test_loader, test_label, test_onehot, model, cls_criterion, args)
        test_ece_report.append(ece_te)
        test_acc_report.append(acc_te)
        test_nll_report.append(nll_te * 10)
        test_over_con99_report.append(over_99_te)
        test_e99_report.append(E99_te)
        test_cls_loss_report.append(cls_loss_te)

        print("CLS ", cls_loss_te)
        print("############################")

    # for idx in matrix_idx_confidence:
    #     wr.writerow(idx)

    #'''
    # draw graph
    df = pd.DataFrame()
    df['epoch'] = [i for i in range(1, args.epochs + 1)]
    df['test_ece'] = test_ece_report
    df['train_ece'] = train_ece_report
    fig_loss = plt.figure(figsize=(35, 35))
    fig_loss.set_facecolor('white')
    ax = fig_loss.add_subplot()

    ax.plot(df['epoch'],
            df['test_ece'],
            df['epoch'],
            df['train_ece'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] ECE per epoch', fontsize=80)
    # plt.title('[BASE] ECE per epoch', fontsize=80)
    # plt.title('[CRL] ECE per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('ECE', fontsize=70)
    plt.ylim([0, 1])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_ECE_lr_{}.png'.format(file_name, args.model, args.b,
                                                args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_ECE_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_ECE_lr_{}.png'.format(args.model, args.b, args.epochs))

    df2 = pd.DataFrame()
    df2['epoch'] = [i for i in range(1, args.epochs + 1)]
    df2['test_acc'] = test_acc_report
    df2['train_acc'] = train_acc_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df2['epoch'],
            df2['test_acc'],
            df2['epoch'],
            df2['train_acc'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] Accuracy per epoch', fontsize=80)
    # plt.title('[BASE] Accuracy per epoch', fontsize=80)
    # plt.title('[CRL] Accuracy per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('Accuracy', fontsize=70)
    plt.ylim([0, 100])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_acc_lr_{}.png'.format(file_name, args.model, args.b,
                                                args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_acc_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_acc_lr_{}.png'.format(args.model, args.b, args.epochs))

    df3 = pd.DataFrame()
    df3['epoch'] = [i for i in range(1, args.epochs + 1)]
    df3['test_nll'] = test_nll_report
    df3['train_nll'] = train_nll_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df3['epoch'],
            df3['test_nll'],
            df3['epoch'],
            df3['train_nll'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] NLL per epoch', fontsize=80)
    # plt.title('[BASE] NLL per epoch', fontsize=80)
    # plt.title('[CRL] NLL per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('NLL', fontsize=70)
    plt.ylim([0, 45])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_nll_lr_{}.png'.format(file_name, args.model, args.b,
                                                args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_nll_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_nll_lr_{}.png'.format(args.model, args.b, args.epochs))

    df4 = pd.DataFrame()
    df4['epoch'] = [i for i in range(1, args.epochs + 1)]
    df4['test_over_con99'] = test_over_con99_report
    df4['train_over_con99'] = train_over_con99_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df4['epoch'],
            df4['test_over_con99'],
            df4['epoch'],
            df4['train_over_con99'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] Over conf99 per epoch', fontsize=80)
    # plt.title('[BASE] Over conf99 per epoch', fontsize=80)
    # plt.title('[CRL] Over conf99 per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('Over con99', fontsize=70)
    if args.data == 'cifar10' or args.data == 'cifar100':
        plt.ylim([0, 50000])
    else:
        plt.ylim([0, 73257])

    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_over_conf99_lr_{}.png'.format(
        file_name, args.model, args.b, args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_over_conf99_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_over_conf99_lr_{}.png'.format(args.model, args.b, args.epochs))

    df5 = pd.DataFrame()
    df5['epoch'] = [i for i in range(1, args.epochs + 1)]
    df5['test_e99'] = test_e99_report
    df5['train_e99'] = train_e99_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df5['epoch'],
            df5['test_e99'],
            df5['epoch'],
            df5['train_e99'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] E99 per epoch', fontsize=80)
    # plt.title('[BASE] E99 per epoch', fontsize=80)
    # plt.title('[CRL] E99 per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('E99', fontsize=70)
    plt.ylim([0, 0.2])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_E99_flood_lr_{}.png'.format(file_name, args.model,
                                                      args.b, args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_E99_flood_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_E99_flood_lr_{}.png'.format(args.model, args.b, args.epochs))

    df5 = pd.DataFrame()
    df5['epoch'] = [i for i in range(1, args.epochs + 1)]
    df5['test_cls_loss'] = test_cls_loss_report
    df5['train_cls_loss'] = train_cls_loss_report
    fig_acc = plt.figure(figsize=(35, 35))
    fig_acc.set_facecolor('white')
    ax = fig_acc.add_subplot()

    ax.plot(df5['epoch'],
            df5['test_cls_loss'],
            df5['epoch'],
            df5['train_cls_loss'],
            linewidth=10)
    ax.legend(['Test', 'Train'], loc=2, prop={'size': 60})
    plt.title('[FL] CLS_loss per epoch', fontsize=80)
    # plt.title('[BASE] CLS_loss per epoch', fontsize=80)
    # plt.title('[CRL] CLS_loss per epoch', fontsize=80)
    plt.xlabel('Epoch', fontsize=70)
    plt.ylabel('Loss', fontsize=70)
    plt.ylim([0, 5])
    plt.setp(ax.get_xticklabels(), fontsize=30)
    plt.setp(ax.get_yticklabels(), fontsize=30)
    plt.savefig('{}/{}_{}_cls_loss_flood_lr_{}.png'.format(
        file_name, args.model, args.b, args.epochs))
    # plt.savefig('./baseline_graph/150_250/128/500/{}_{}_cls_loss_flood_lr_{}.png'.format(args.model, args.b, args.epochs))
    # plt.savefig('./CRL_graph/150_250/{}_{}_cls_loss_flood_lr_{}.png'.format(args.model, args.b, args.epochs))

    if args.rank_weight != 0.0:
        df6 = pd.DataFrame()
        df6['epoch'] = [i for i in range(1, args.epochs + 1)]
        df6['train_cls_loss'] = train_cls_loss_report
        df6['train_rank_loss'] = train_rank_loss_report
        df6['train_total_loss'] = train_total_loss_report
        fig_acc = plt.figure(figsize=(35, 35))
        fig_acc.set_facecolor('white')
        ax = fig_acc.add_subplot()

        ax.plot(df6['epoch'],
                df6['train_cls_loss'],
                df6['epoch'],
                df6['train_rank_loss'],
                df6['epoch'],
                df6['train_total_loss'],
                linewidth=10)
        ax.legend(['CLS', 'Rank', 'Total'], loc=2, prop={'size': 60})
        plt.title('[FL] CLS_loss per epoch', fontsize=80)
        plt.xlabel('Epoch', fontsize=70)
        plt.ylabel('Loss', fontsize=70)
        # plt.ylim([0, 5])
        plt.setp(ax.get_xticklabels(), fontsize=30)
        plt.setp(ax.get_yticklabels(), fontsize=30)
        plt.savefig(
            './CRL_graph/150_250/{}_{}_cls_loss_flood_lr_{}.png'.format(
                args.model, args.b, args.epochs))

    test_acc_report.insert(0, 'ACC')
    test_ece_report.insert(0, 'ECE')
    test_nll_report.insert(0, 'NLL')
    test_over_con99_report.insert(0, 'Over_conf99')
    test_e99_report.insert(0, 'E99')
    test_cls_loss_report.insert(0, 'CLS')
    wr_test.writerow(test_acc_report)
    wr_test.writerow(test_ece_report)
    wr_test.writerow(test_nll_report)
    wr_test.writerow(test_over_con99_report)
    wr_test.writerow(test_e99_report)
    wr_test.writerow(test_cls_loss_report)

    train_acc_report.insert(0, 'ACC')
    train_ece_report.insert(0, 'ECE')
    train_nll_report.insert(0, 'NLL')
    train_over_con99_report.insert(0, 'Over_conf99')
    train_e99_report.insert(0, 'E99')
    train_cls_loss_report.insert(0, 'CLS')

    wr_train.writerow(train_acc_report)
    wr_train.writerow(train_ece_report)
    wr_train.writerow(train_nll_report)
    wr_train.writerow(train_over_con99_report)
    wr_train.writerow(train_e99_report)
    wr_train.writerow(train_cls_loss_report)

    if args.rank_weight != 0.0:
        train_rank_loss_report.insert(0, 'Rank')
        train_total_loss_report.insert(0, 'Total')
        wr_train.writerow(train_rank_loss_report)
        wr_train.writerow(train_total_loss_report)

    #'''

    # result write
    result_logger.write([
        acc_te, aurc_te * 1000, eaurc_te * 1000, aupr_te * 100, fpr_te * 100,
        ece_te * 100, nll_te * 10, brier_te * 100, E99_te * 100
    ])
    if args.valid == True:
        print("Best Valid Acc : {}".format(acc))
    print("Flood Level: {}".format(args.b))
    print("Sort : {}".format(args.sort))
    print("Sort Mode : {}".format(args.sort_mode))
    print("TIME : ", time.time() - start)