Beispiel #1
0
def main():
    opt = parser.parse_args()
    model = ChannelNet(v=opt.version, num_class=100).cuda(opt.gpu)
    print(model)

    # if opt.gpu is not None:
    # model = nn.parallel.DataParallel(model, device_ids=opt.gpu)

    criterion = nn.CrossEntropyLoss().cuda(opt.gpu)
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    trainloader = train_loader(opt.data_path, opt.batchsize, opt.num_workers)
    testloader = test_loader(opt.data_path, opt.batchsize, opt.num_workers)

    best_acc = 0.0
    for epoch in range(opt.max_epoch):
        if epoch > 30:
            adjust_learning_rate(optimizer, epoch, opt)
        train(trainloader, model, optimizer, criterion, epoch, opt)
        acc = test(testloader, model, criterion, opt)
        if acc > best_acc:
            best_acc = acc
            state = {
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(
                state, os.path.join(opt.save_path,
                                    '%d_checkpoint.ckpt' % epoch))
        print(' Best accuracy so far : %.4f%%' % best_acc)
Beispiel #2
0
def main():
    global args, best_loss
    args = parser.parse_args()

    configure("runs/%s" % (args.name))

    model = MarketLSTM(args.layers)

    # get the number of model parameters

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    kwargs = {'num_workers': 1, 'pin_memory': True}

    model = model.cuda().double()

    cudnn.benchmark = True

    # define loss function (criterion) and pptimizer
    criterion = nn.MSELoss().cuda()
    if args.adam:
        optimizer = torch.optim.Adam(model.parameters(),
                                     args.lr,
                                     weight_decay=args.weight_decay)
    elif args.yf:
        optimizer = YFOptimizer(model.parameters(), lr=args.lr)
    else:
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

    for epoch in range(epochs):
        train(train_loader(), model, criterion, optimizer, epoch)
        loss = validate(val_loader(), model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = loss < best_loss
        best_loss = min(loss, best_loss)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec': best_loss,
            }, is_best)
    print('Best accuracy: ', best_loss)
Beispiel #3
0
def main():
    opt = parser.parse_args()
    model = resnet56(num_classes=100, dropblock=opt.dropblock).cuda()

    # if opt.gpu is not None:
    # model = nn.parallel.DataParallel(model, device_ids=opt.gpu)

    criterion = nn.CrossEntropyLoss().cuda(opt.gpu)
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    trainloader = train_loader(opt.data_path, opt.batchsize, opt.num_workers)
    testloader = test_loader(opt.data_path, opt.batchsize, opt.num_workers)

    best_acc = 0.0

    keep_prob = np.linspace(1.0, 0.85, opt.max_epoch - 30)

    for epoch in range(opt.max_epoch):
        if epoch > 30:
            if opt.dropblock:
                model.keepprob_update(keep_prob[epoch - 30])
            adjust_learning_rate(optimizer, epoch, opt)
        train(trainloader, model, optimizer, criterion, epoch, opt)
        acc = test(testloader, model, criterion, opt)
        if acc > best_acc:
            best_acc = acc
            state = {
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(state, os.path.join(opt.save_path,
                                           '%d_checkpoint.ckpt'))
        print(' Best accuracy so far : %.4f%%' % best_acc)
else:
	device = torch.device('cuda:{}'.format(args.device))
	torch.cuda.set_device(args.device)

config_list = [args.name, args.epochs, args.batch_size, args.lr, 
				args.input_h, args.input_w, 
				args.hidden_size, args.latent_size,
				args.L, args.binarize, args.mc]
if args.sample:
	config_list.append('sample')
config = ""
for i in map(str, config_list):
	config = config + '_' + i
print("Config:", config)

train_loader = dataloader.train_loader('mnist', args.data_directory, args.batch_size)
test_loader = dataloader.test_loader('mnist', args.data_directory, args.batch_size)

encoder = model.Encoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(device)
decoder = model.Decoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(device)
if args.load_model != '000000000000':
	encoder.load_state_dict(torch.load(args.log_directory + args.name + '/' + args.load_model+ '/{}_encoder.pt'.format(args.name)))
	decoder.load_state_dict(torch.load(args.log_directory + args.name + '/' + args.load_model + '/{}_decoder.pt'.format(args.name)))
	args.time_stamp = args.load_model[:12]

log = args.log_directory + args.name + '/' + args.time_stamp + config + '/'
writer = SummaryWriter(log)

optimizer = optim.Adam(list(encoder.parameters())+list(decoder.parameters()), lr = args.lr)

def binarize(data):
Beispiel #5
0
    args.input_h,
    args.input_w,
    args.filter_size,
    args.kernel_size,
    args.stride_size,
    args.layer_size,
    args.latent_size,
    # args.hidden_size,
    args.L,
    args.beta
]
config = '_'.join(map(str, config_list))
print("Config:", config)

train_loader = dataloader.train_loader(args.dataset, args.data_directory,
                                       args.batch_size, args.input_h,
                                       args.input_w, args.cpu_num)
test_loader = dataloader.test_loader(args.dataset, args.data_directory,
                                     args.batch_size, args.input_h,
                                     args.input_w, args.cpu_num)

# encoder = model.Encoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(args.device)
# decoder = model.Decoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(args.device)
encoder = model.Encoder(args.channel_size, args.filter_size, args.kernel_size,
                        args.stride_size, args.layer_size,
                        args.latent_size).to(args.device)
decoder = model.Decoder(args.channel_size, args.filter_size, args.kernel_size,
                        args.stride_size, args.layer_size,
                        args.latent_size).to(args.device)

if args.load_model != '000000000000':
Beispiel #6
0
if args.device == 'cpu':
	device = torch.device('cpu')
else:
	device = torch.device('cuda:{}'.format(args.device))
	torch.cuda.set_device(args.device)

config_list = [args.epochs, args.batch_size, args.lr, 
				args.input_h, args.input_w, 
				args.channel_size, args.content_code_h, args.content_code_w, args.style_code_num,
				args.lx, args.lc, args.ls, args.device]
config = ""
for i in map(str, config_list):
	config = config + '_' + i
print("Config:", config)

train_loader = dataloader.train_loader('celeba', args.data_directory, args.batch_size)
test_loader = dataloader.test_loader('celeba', args.data_directory, args.batch_size)

if args.load_model != '000000000000':
	ce1 = torch.load(args.log_directory + args.load_model + '/content_encoder1.pt')
	ce2 = torch.load(args.log_directory + args.load_model + '/content_encoder2.pt')
	se1 = torch.load(args.log_directory + args.load_model + '/style_encoder1.pt')
	se2 = torch.load(args.log_directory + args.load_model + '/style_encoder2.pt')
	de1 = torch.load(args.log_directory + args.load_model + '/decoder1.pt')
	de2 = torch.load(args.log_directory + args.load_model + '/decoder2.pt')
	dis1 = torch.load(args.log_directory + args.load_model + '/discriminator1.pt')
	dis2 = torch.load(args.log_directory + args.load_model + '/discriminator2.pt')
	args.time_stamep = args.load_mode[:12]
else:
	ce1 = model.Content_encoder(args.channel_size, args.content_code_h, args.content_code_w).to(device)
	ce2 = model.Content_encoder(args.channel_size, args.content_code_h, args.content_code_w).to(device)
Beispiel #7
0
import os
import time
import torch.optim as optim
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence
from tensorboardX import SummaryWriter
from build_model import build_model
from utils import *
from collections import defaultdict
import cv2
from configuration import get_config
import dataloader

args = get_config()
device = args.device

train_loader = dataloader.train_loader(args.dataset, args.data_directory,
                                       args.batch_size, args.data_config)
test_loader = dataloader.test_loader(args.dataset, args.data_directory,
                                     args.batch_size, args.data_config)
args.label_size = train_loader.dataset.a_size
args.q_size = train_loader.dataset.q_size
args.c_size = train_loader.dataset.c_size

models = build_model(args)

if args.load_model != '000000000000':
    for model_name, model in models.items():
        model.load_state_dict(
            torch.load(
                os.path.join(args.log_directory + args.project,
                             args.load_model, model_name)))
    args.time_stamp = args.load_model[:12]
Beispiel #8
0
def main():
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    cudnn.benchmark = True

    if args.model == 'res18':
        net = resnet.ResNet18(num_classes=40).cuda()
    elif args.model == 'resnext':
        net = resnext.ResNeXt(cardinality=args.cardinality,
                              depth=args.depth,
                              nlabels=40,
                              base_width=args.base_width,
                              widen_factor=args.widen_factor).cuda()
    elif args.model == 'res_cifar':
        net = resnet_cifar.resnet20(num_classes=40).cuda()

    state_dict = torch.load(f'{args.model_path}/model_200.pth')
    net.load_state_dict(state_dict)

    criterion = nn.CrossEntropyLoss().cuda()
    metric_logger = utils.Logger(os.path.join(args.save_path, 'metric.log'))
    ''' Open Set Recognition '''
    ''' validation '''
    print('')
    print('Open Set Recognition/Out of Distribution Detection-Validation')
    print('known data: CIFAR40')
    print('unknown data: new-TinyImageNet158')
    print('')

    train_loader = dataloader.train_loader(args.data_root, args.data,
                                           args.batch_size)

    in_valid_loader = dataloader.in_dist_loader(args.data_root, args.in_data,
                                                args.batch_size, 'valid')
    ood_valid_loader = dataloader.out_dist_loader(args.data_root,
                                                  'new-tinyimagenet158',
                                                  args.batch_size, 'valid')
    alpha_list = [40]
    eta_list = [5, 10, 20, 30, 40]

    for alpha in alpha_list:
        for eta in eta_list:
            args.weibull_alpha = alpha
            args.weibull_tail = eta

            in_softmax, in_openmax, in_softlogit, in_openlogit,\
                _, _, _ = test(net, train_loader, in_valid_loader)
            out_softmax, out_openmax, out_softlogit, out_openlogit,\
                _, _, _ = test(net, train_loader, ood_valid_loader)


            f1, li_f1, li_thresholds, \
            li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                                      pos_label=0)
            ood_scores = metrics.ood_metrics(1 - np.array(in_openmax),
                                             1 - np.array(out_openmax))

            if not os.path.exists(args.save_path):
                os.makedirs(args.save_path)

            metric_logger.write([
                'VAL CIFAR40-Tiny158', '\t', 'FPR@95%TPR', '\t', 'DET ERR',
                '\t', 'AUROC', '\t\t', 'AUPR-IN', '\t', 'AUPR-OUT', '\t',
                'F1 SCORE', '\t', ''
            ])
            metric_logger.write([
                '', '\t\t\t', 100 * ood_scores['FPR95'], '\t',
                100 * ood_scores['DTERR'], '\t', 100 * ood_scores['AUROC'],
                '\t', 100 * ood_scores['AUIN'], '\t',
                100 * ood_scores['AUOUT'], '\t', f1, '\t', ''
            ])

            # save to .csv
            with open(f'{args.save_path}/openmax-scores.csv', 'a',
                      newline='') as f:
                columns = [
                    "", "FPR@95%TPR", "DET ERR", "AUROC", "AUPR-IN",
                    "AUPR-OUT", "F1 SCORE"
                    "alpha"
                    "eta"
                ]
                writer = csv.writer(f)
                if args.weibull_alpha == 40 and args.weibull_tail == 5:
                    writer.writerow([
                        '* Open Set Recognition/Out of Distribution Detection Validation-new-TinyImageNet158'
                    ])
                    writer.writerow(columns)
                writer.writerow([
                    '', 100 * ood_scores['FPR95'], 100 * ood_scores['DTERR'],
                    100 * ood_scores['AUROC'], 100 * ood_scores['AUIN'],
                    100 * ood_scores['AUOUT'], f1, args.weibull_alpha,
                    args.weibull_tail
                ])
                # writer.writerow([''])
            f.close()
parser.add_argument('--parameters',
                    type=list,
                    default=[256, 512, 10, 0.25],
                    metavar='N',
                    help='vqvae parameters [hidden_size, K, D, beta]')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

writer = SummaryWriter(args.log_directory + '/' + args.time_stamp + '/')

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

train_loader = dataloader.train_loader(args.data, args.data_directory,
                                       args.batch_size)
test_loader = dataloader.test_loader(args.data, args.data_directory,
                                     args.batch_size)

hidden_size, K, D, beta = args.parameters

if args.load_model != '000000':
    vqvae = torch.load(args.log_directory + '/' + args.load_model +
                       '/vqvae.pt')
else:
    vqvae = model.VQVAE(hidden_size, K, D, beta)
if args.cuda:
    vqvae.cuda()

optimizer = optim.Adam(vqvae.parameters(), lr=args.lr)
Beispiel #10
0
def main():
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    cudnn.benchmark = True

    if args.model == 'res18':
        net = resnet.ResNet18(num_classes=40).cuda()
    elif args.model =='resnext':
        net = resnext.ResNeXt(cardinality=args.cardinality, 
                              depth=args.depth, 
                              nlabels=40, 
                              base_width=args.base_width, 
                              widen_factor=args.widen_factor).cuda()
    elif args.model =='res_cifar':
        net = resnet_cifar.resnet20(num_classes=40).cuda()

    state_dict = torch.load(f'{args.model_path}/model_200.pth')
    net.load_state_dict(state_dict)

    criterion = nn.CrossEntropyLoss().cuda()
    metric_logger = utils.Logger(os.path.join(args.save_path, 'test_metric.log'))

    ''' Misclassification Detection '''
    print('')
    print('Misclassification Detection')
    print('data: CIFAR40')
    print('')
    
    train_loader = dataloader.train_loader(args.data_root,
                                           args.data,
                                           args.batch_size)

    test_loader, test_targets = dataloader.test_loader(args.data_root,
                                                       args.in_data,
                                                       args.batch_size,
                                                       mode='test')
    
    in_softmax, in_openmax, in_softlogit, in_openlogit, in_open_pred, \
                correct, labels = test(net, train_loader, test_loader)

    acc, auroc, aurc, eaurc, \
    fpr, aupr, ece, li_acc, li_count = metrics.md_metrics_om(in_openlogit,
                                                            in_openmax,
                                                            correct,
                                                            labels)
                                                          

    plot.draw_reliability_diagrams(args.save_path, li_acc, li_count, ece)
    metric_logger.write(['Miscls Detect', '\t\t',
                         'ACCURACY', '\t',
                         'AUROC', '\t\t',
                         'AURC', '\t\t',
                         'E-AURC', '\t\t',
                         'AUPR', '\t\t',
                         'FPR@95%TPR', '\t',
                         'ECE'])
    metric_logger.write(['\t', '\t\t',
                         acc * 100, '\t',
                         auroc * 100, '\t',
                         aurc * 1000, '\t',
                         eaurc * 1000, '\t',
                         aupr * 100, '\t',
                         fpr * 100, '\t',
                         ece * 100])

    with open(f'{args.save_path}/base-scores.csv', 'w', newline='') as f:
        columns = ["",
                   "ACC",
                   "AUROC",
                   "AURC",
                   "E-AURC",
                   "AUPR",
                   "FPR@95%TPR",
                   "ECE"]
        writer = csv.writer(f)
        writer.writerow(['* Misclassification Detection'])
        writer.writerow(columns)
        writer.writerow(
            ['',
             acc * 100,
             auroc * 100,
             aurc * 1000,
             eaurc * 1000,
             aupr * 100,
             fpr * 100,
             ece * 100])
        writer.writerow([''])
    f.close()


    ''' test '''
    print('')
    print('Open Set Recognition-Test')
    print('known data: CIFAR40')
    print('unknown data: CIFAR60')
    print('')
    in_test_loader = dataloader.in_dist_loader(args.data_root,
                                               args.in_data,
                                               args.batch_size,
                                               'test')
    ood_test_loader = dataloader.out_dist_loader(args.data_root,
                                                 'cifar60',
                                                 args.batch_size,
                                                 'test')

    in_softmax, in_openmax, in_softlogit, in_openlogit,\
        _, _, _ = test(net, train_loader, in_test_loader)
    out_softmax, out_openmax, out_softlogit, out_openlogit,\
        _, _, _ = test(net, train_loader, ood_test_loader)
        
        
    f1, li_f1, li_thresholds, \
    li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                              pos_label=0)
    ood_scores = metrics.ood_metrics(1-np.array(in_openmax), 1-np.array(out_openmax))

    metric_logger.write(['TEST CIFAR40-CIFAR60', '\t',
                         'FPR@95%TPR', '\t',
                         'DET ERR', '\t',
                         'AUROC', '\t\t',
                         'AUPR-IN', '\t',
                         'AUPR-OUT', '\t',
                         'F1 SCORE', '\t',
                         ''])
    metric_logger.write(['', '\t\t\t',
                         100 * ood_scores['FPR95'], '\t',
                         100 * ood_scores['DTERR'], '\t',
                         100 * ood_scores['AUROC'], '\t',
                         100 * ood_scores['AUIN'], '\t',
                         100 * ood_scores['AUOUT'], '\t',
                         f1, '\t',
                         ''])

    plot.draw_f1(args.save_path, f1, li_f1, li_thresholds, data='CIFAR60',
                 mode='test', task='OsR')


    with open(f'{args.save_path}/base-scores.csv', 'a', newline='') as f:
        columns = ["",
                   "FPR@95%TPR",
                   "DET ERR",
                   "AUROC",
                   "AUPR-IN",
                   "AUPR-OUT",
                   "F1 SCORE"]
        writer = csv.writer(f)
        writer.writerow(['* Open Set Recognition Test-CIFAR60'])
        writer.writerow(columns)
        writer.writerow(
            ['', 100 * ood_scores['FPR95'],
             100 * ood_scores['DTERR'],
             100 * ood_scores['AUROC'],
             100 * ood_scores['AUIN'],
             100 * ood_scores['AUOUT'],
            f1])
        writer.writerow([''])
    f.close()




    ''' Out of Distribution Detection '''
    ''' test '''
    print('')
    print('Out of Distribution Detection-Test')
    print('known data: CIFAR40')
    print('unknown data: SVHN')
    print('')
    ood_test_loader = dataloader.out_dist_loader(args.data_root,
                                                 'svhn',
                                                 args.batch_size,
                                                 'test')

    out_softmax, out_openmax, out_softlogit, out_openlogit,\
        _, _, _ = test(net, train_loader, ood_test_loader)
        
        
    f1, li_f1, li_thresholds, \
    li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                              pos_label=0)
    ood_scores = metrics.ood_metrics(1-np.array(in_openmax), 1-np.array(out_openmax))

    metric_logger.write(['TEST CIFAR40-SVHN', '\t',
                         'FPR@95%TPR', '\t',
                         'DET ERR', '\t',
                         'AUROC', '\t\t',
                         'AUPR-IN', '\t',
                         'AUPR-OUT', '\t',
                         'F1 SCORE', '\t',
                         ''])
    metric_logger.write(['', '\t\t\t',
                         100 * ood_scores['FPR95'], '\t',
                         100 * ood_scores['DTERR'], '\t',
                         100 * ood_scores['AUROC'], '\t',
                         100 * ood_scores['AUIN'], '\t',
                         100 * ood_scores['AUOUT'], '\t',
                         f1, '\t',
                         ''])

    plot.draw_f1(args.save_path, f1, li_f1, li_thresholds, data='SVHN',
                 mode='test', task='OoD')


    with open(f'{args.save_path}/base-scores.csv', 'a', newline='') as f:
        columns = ["",
                   "FPR@95%TPR",
                   "DET ERR",
                   "AUROC",
                   "AUPR-IN",
                   "AUPR-OUT",
                   "F1 SCORE"]
        writer = csv.writer(f)
        writer.writerow(['* Out of Distribution Detection Test-SVHN'])
        writer.writerow(columns)
        writer.writerow(
            ['', 100 * ood_scores['FPR95'],
             100 * ood_scores['DTERR'],
             100 * ood_scores['AUROC'],
             100 * ood_scores['AUIN'],
             100 * ood_scores['AUOUT'],
            f1])
        writer.writerow([''])
    f.close()



    print('')
    print('Out of Distribution Detection-Test')
    print('known data: CIFAR40')
    print('unknown data: LSUN-FIX')
    print('')
    ood_test_loader = dataloader.out_dist_loader(args.data_root,
                                                 'lsun-fix',
                                                 args.batch_size,
                                                 'test')

    out_softmax, out_openmax, out_softlogit, out_openlogit,\
        _, _, _ = test(net, train_loader, ood_test_loader)
        
        
    f1, li_f1, li_thresholds, \
    li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                              pos_label=0)
    ood_scores = metrics.ood_metrics(1-np.array(in_openmax), 1-np.array(out_openmax))

    metric_logger.write(['TEST CIFAR40-LSUNFIX', '\t',
                         'FPR@95%TPR', '\t',
                         'DET ERR', '\t',
                         'AUROC', '\t\t',
                         'AUPR-IN', '\t',
                         'AUPR-OUT', '\t',
                         'F1 SCORE', '\t',
                         ''])
    metric_logger.write(['', '\t\t\t',
                         100 * ood_scores['FPR95'], '\t',
                         100 * ood_scores['DTERR'], '\t',
                         100 * ood_scores['AUROC'], '\t',
                         100 * ood_scores['AUIN'], '\t',
                         100 * ood_scores['AUOUT'], '\t',
                         f1, '\t',
                         ''])

    plot.draw_f1(args.save_path, f1, li_f1, li_thresholds, data='LSUN-FIX',
                 mode='test', task='OoD')

    with open(f'{args.save_path}/base-scores.csv', 'a', newline='') as f:
        columns = ["",
                   "FPR@95%TPR",
                   "DET ERR",
                   "AUROC",
                   "AUPR-IN",
                   "AUPR-OUT",
                   "F1 SCORE"]
        writer = csv.writer(f)
        writer.writerow(['* Out of Distribution Detection Test-LSUN-FIX'])
        writer.writerow(columns)
        writer.writerow(
            ['', 100 * ood_scores['FPR95'],
             100 * ood_scores['DTERR'],
             100 * ood_scores['AUROC'],
             100 * ood_scores['AUIN'],
             100 * ood_scores['AUOUT'],
            f1])
        writer.writerow([''])
    f.close()



    print('')
    print('Out of Distribution Detection-Test')
    print('known data: CIFAR40')
    print('unknown data: new-TinyImageNet158')
    print('')
    ood_test_loader = dataloader.out_dist_loader(args.data_root,
                                                 'new-tinyimagenet158',
                                                 args.batch_size,
                                                 'test')

    out_softmax, out_openmax, out_softlogit, out_openlogit,\
        _, _, _ = test(net, train_loader, ood_test_loader)
        
        
    f1, li_f1, li_thresholds, \
    li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                              pos_label=0)
    ood_scores = metrics.ood_metrics(1-np.array(in_openmax), 1-np.array(out_openmax))

    metric_logger.write(['TEST CIFAR40-Tiny158', '\t',
                         'FPR@95%TPR', '\t',
                         'DET ERR', '\t',
                         'AUROC', '\t\t',
                         'AUPR-IN', '\t',
                         'AUPR-OUT', '\t',
                         'F1 SCORE', '\t',
                         ''])
    metric_logger.write(['', '\t\t\t',
                         100 * ood_scores['FPR95'], '\t',
                         100 * ood_scores['DTERR'], '\t',
                         100 * ood_scores['AUROC'], '\t',
                         100 * ood_scores['AUIN'], '\t',
                         100 * ood_scores['AUOUT'], '\t',
                         f1, '\t',
                         ''])

    plot.draw_f1(args.save_path, f1, li_f1, li_thresholds, data='new-TinyImageNet158',
                 mode='test', task='OoD')

    with open(f'{args.save_path}/base-scores.csv', 'a', newline='') as f:
        columns = ["",
                   "FPR@95%TPR",
                   "DET ERR",
                   "AUROC",
                   "AUPR-IN",
                   "AUPR-OUT",
                   "F1 SCORE"]
        writer = csv.writer(f)
        writer.writerow(['* Out of Distribution Detection Test-new-TinyImageNet158'])
        writer.writerow(columns)
        writer.writerow(
            ['', 100 * ood_scores['FPR95'],
             100 * ood_scores['DTERR'],
             100 * ood_scores['AUROC'],
             100 * ood_scores['AUIN'],
             100 * ood_scores['AUOUT'],
             f1])
        writer.writerow([''])
    f.close()