Пример #1
0
def main():
    opt = parser.parse_args()
    model = ChannelNet(v=opt.version, num_class=100).cuda(opt.gpu)
    print(model)

    # if opt.gpu is not None:
    # model = nn.parallel.DataParallel(model, device_ids=opt.gpu)

    criterion = nn.CrossEntropyLoss().cuda(opt.gpu)
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    trainloader = train_loader(opt.data_path, opt.batchsize, opt.num_workers)
    testloader = test_loader(opt.data_path, opt.batchsize, opt.num_workers)

    best_acc = 0.0
    for epoch in range(opt.max_epoch):
        if epoch > 30:
            adjust_learning_rate(optimizer, epoch, opt)
        train(trainloader, model, optimizer, criterion, epoch, opt)
        acc = test(testloader, model, criterion, opt)
        if acc > best_acc:
            best_acc = acc
            state = {
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(
                state, os.path.join(opt.save_path,
                                    '%d_checkpoint.ckpt' % epoch))
        print(' Best accuracy so far : %.4f%%' % best_acc)
Пример #2
0
def main():
    opt = parser.parse_args()
    model = resnet56(num_classes=100, dropblock=opt.dropblock).cuda()

    # if opt.gpu is not None:
    # model = nn.parallel.DataParallel(model, device_ids=opt.gpu)

    criterion = nn.CrossEntropyLoss().cuda(opt.gpu)
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    trainloader = train_loader(opt.data_path, opt.batchsize, opt.num_workers)
    testloader = test_loader(opt.data_path, opt.batchsize, opt.num_workers)

    best_acc = 0.0

    keep_prob = np.linspace(1.0, 0.85, opt.max_epoch - 30)

    for epoch in range(opt.max_epoch):
        if epoch > 30:
            if opt.dropblock:
                model.keepprob_update(keep_prob[epoch - 30])
            adjust_learning_rate(optimizer, epoch, opt)
        train(trainloader, model, optimizer, criterion, epoch, opt)
        acc = test(testloader, model, criterion, opt)
        if acc > best_acc:
            best_acc = acc
            state = {
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(state, os.path.join(opt.save_path,
                                           '%d_checkpoint.ckpt'))
        print(' Best accuracy so far : %.4f%%' % best_acc)
Пример #3
0
	device = torch.device('cuda:{}'.format(args.device))
	torch.cuda.set_device(args.device)

config_list = [args.name, args.epochs, args.batch_size, args.lr, 
				args.input_h, args.input_w, 
				args.hidden_size, args.latent_size,
				args.L, args.binarize, args.mc]
if args.sample:
	config_list.append('sample')
config = ""
for i in map(str, config_list):
	config = config + '_' + i
print("Config:", config)

train_loader = dataloader.train_loader('mnist', args.data_directory, args.batch_size)
test_loader = dataloader.test_loader('mnist', args.data_directory, args.batch_size)

encoder = model.Encoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(device)
decoder = model.Decoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(device)
if args.load_model != '000000000000':
	encoder.load_state_dict(torch.load(args.log_directory + args.name + '/' + args.load_model+ '/{}_encoder.pt'.format(args.name)))
	decoder.load_state_dict(torch.load(args.log_directory + args.name + '/' + args.load_model + '/{}_decoder.pt'.format(args.name)))
	args.time_stamp = args.load_model[:12]

log = args.log_directory + args.name + '/' + args.time_stamp + config + '/'
writer = SummaryWriter(log)

optimizer = optim.Adam(list(encoder.parameters())+list(decoder.parameters()), lr = args.lr)

def binarize(data):
	data = data > 0.5
Пример #4
0
    args.kernel_size,
    args.stride_size,
    args.layer_size,
    args.latent_size,
    # args.hidden_size,
    args.L,
    args.beta
]
config = '_'.join(map(str, config_list))
print("Config:", config)

train_loader = dataloader.train_loader(args.dataset, args.data_directory,
                                       args.batch_size, args.input_h,
                                       args.input_w, args.cpu_num)
test_loader = dataloader.test_loader(args.dataset, args.data_directory,
                                     args.batch_size, args.input_h,
                                     args.input_w, args.cpu_num)

# encoder = model.Encoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(args.device)
# decoder = model.Decoder(args.input_h, args.input_w, args.hidden_size, args.latent_size).to(args.device)
encoder = model.Encoder(args.channel_size, args.filter_size, args.kernel_size,
                        args.stride_size, args.layer_size,
                        args.latent_size).to(args.device)
decoder = model.Decoder(args.channel_size, args.filter_size, args.kernel_size,
                        args.stride_size, args.layer_size,
                        args.latent_size).to(args.device)

if args.load_model != '000000000000':
    encoder.load_state_dict(
        torch.load(args.log_directory + args.name + '/' + args.load_model +
                   'bvae_encoder.pt')).to(args.device)
Пример #5
0
	device = torch.device('cpu')
else:
	device = torch.device('cuda:{}'.format(args.device))
	torch.cuda.set_device(args.device)

config_list = [args.epochs, args.batch_size, args.lr, 
				args.input_h, args.input_w, 
				args.channel_size, args.content_code_h, args.content_code_w, args.style_code_num,
				args.lx, args.lc, args.ls, args.device]
config = ""
for i in map(str, config_list):
	config = config + '_' + i
print("Config:", config)

train_loader = dataloader.train_loader('celeba', args.data_directory, args.batch_size)
test_loader = dataloader.test_loader('celeba', args.data_directory, args.batch_size)

if args.load_model != '000000000000':
	ce1 = torch.load(args.log_directory + args.load_model + '/content_encoder1.pt')
	ce2 = torch.load(args.log_directory + args.load_model + '/content_encoder2.pt')
	se1 = torch.load(args.log_directory + args.load_model + '/style_encoder1.pt')
	se2 = torch.load(args.log_directory + args.load_model + '/style_encoder2.pt')
	de1 = torch.load(args.log_directory + args.load_model + '/decoder1.pt')
	de2 = torch.load(args.log_directory + args.load_model + '/decoder2.pt')
	dis1 = torch.load(args.log_directory + args.load_model + '/discriminator1.pt')
	dis2 = torch.load(args.log_directory + args.load_model + '/discriminator2.pt')
	args.time_stamep = args.load_mode[:12]
else:
	ce1 = model.Content_encoder(args.channel_size, args.content_code_h, args.content_code_w).to(device)
	ce2 = model.Content_encoder(args.channel_size, args.content_code_h, args.content_code_w).to(device)
	se1 = model.Style_encoder(args.channel_size, args.style_code_num).to(device)
Пример #6
0
import torch.optim as optim
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence
from tensorboardX import SummaryWriter
from build_model import build_model
from utils import *
from collections import defaultdict
import cv2
from configuration import get_config
import dataloader

args = get_config()
device = args.device

train_loader = dataloader.train_loader(args.dataset, args.data_directory,
                                       args.batch_size, args.data_config)
test_loader = dataloader.test_loader(args.dataset, args.data_directory,
                                     args.batch_size, args.data_config)
args.label_size = train_loader.dataset.a_size
args.q_size = train_loader.dataset.q_size
args.c_size = train_loader.dataset.c_size

models = build_model(args)

if args.load_model != '000000000000':
    for model_name, model in models.items():
        model.load_state_dict(
            torch.load(
                os.path.join(args.log_directory + args.project,
                             args.load_model, model_name)))
    args.time_stamp = args.load_model[:12]
    print('Model {} loaded.'.format(args.load_model))
Пример #7
0
                    default=[256, 512, 10, 0.25],
                    metavar='N',
                    help='vqvae parameters [hidden_size, K, D, beta]')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

writer = SummaryWriter(args.log_directory + '/' + args.time_stamp + '/')

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

train_loader = dataloader.train_loader(args.data, args.data_directory,
                                       args.batch_size)
test_loader = dataloader.test_loader(args.data, args.data_directory,
                                     args.batch_size)

hidden_size, K, D, beta = args.parameters

if args.load_model != '000000':
    vqvae = torch.load(args.log_directory + '/' + args.load_model +
                       '/vqvae.pt')
else:
    vqvae = model.VQVAE(hidden_size, K, D, beta)
if args.cuda:
    vqvae.cuda()

optimizer = optim.Adam(vqvae.parameters(), lr=args.lr)


def train(epoch):
Пример #8
0
def main():
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    cudnn.benchmark = True

    if args.model == 'res18':
        net = resnet.ResNet18(num_classes=40).cuda()
    elif args.model =='resnext':
        net = resnext.ResNeXt(cardinality=args.cardinality, 
                              depth=args.depth, 
                              nlabels=40, 
                              base_width=args.base_width, 
                              widen_factor=args.widen_factor).cuda()
    elif args.model =='res_cifar':
        net = resnet_cifar.resnet20(num_classes=40).cuda()

    state_dict = torch.load(f'{args.model_path}/model_200.pth')
    net.load_state_dict(state_dict)

    criterion = nn.CrossEntropyLoss().cuda()
    metric_logger = utils.Logger(os.path.join(args.save_path, 'test_metric.log'))

    ''' Misclassification Detection '''
    print('')
    print('Misclassification Detection')
    print('data: CIFAR40')
    print('')
    
    train_loader = dataloader.train_loader(args.data_root,
                                           args.data,
                                           args.batch_size)

    test_loader, test_targets = dataloader.test_loader(args.data_root,
                                                       args.in_data,
                                                       args.batch_size,
                                                       mode='test')
    
    in_softmax, in_openmax, in_softlogit, in_openlogit, in_open_pred, \
                correct, labels = test(net, train_loader, test_loader)

    acc, auroc, aurc, eaurc, \
    fpr, aupr, ece, li_acc, li_count = metrics.md_metrics_om(in_openlogit,
                                                            in_openmax,
                                                            correct,
                                                            labels)
                                                          

    plot.draw_reliability_diagrams(args.save_path, li_acc, li_count, ece)
    metric_logger.write(['Miscls Detect', '\t\t',
                         'ACCURACY', '\t',
                         'AUROC', '\t\t',
                         'AURC', '\t\t',
                         'E-AURC', '\t\t',
                         'AUPR', '\t\t',
                         'FPR@95%TPR', '\t',
                         'ECE'])
    metric_logger.write(['\t', '\t\t',
                         acc * 100, '\t',
                         auroc * 100, '\t',
                         aurc * 1000, '\t',
                         eaurc * 1000, '\t',
                         aupr * 100, '\t',
                         fpr * 100, '\t',
                         ece * 100])

    with open(f'{args.save_path}/base-scores.csv', 'w', newline='') as f:
        columns = ["",
                   "ACC",
                   "AUROC",
                   "AURC",
                   "E-AURC",
                   "AUPR",
                   "FPR@95%TPR",
                   "ECE"]
        writer = csv.writer(f)
        writer.writerow(['* Misclassification Detection'])
        writer.writerow(columns)
        writer.writerow(
            ['',
             acc * 100,
             auroc * 100,
             aurc * 1000,
             eaurc * 1000,
             aupr * 100,
             fpr * 100,
             ece * 100])
        writer.writerow([''])
    f.close()


    ''' test '''
    print('')
    print('Open Set Recognition-Test')
    print('known data: CIFAR40')
    print('unknown data: CIFAR60')
    print('')
    in_test_loader = dataloader.in_dist_loader(args.data_root,
                                               args.in_data,
                                               args.batch_size,
                                               'test')
    ood_test_loader = dataloader.out_dist_loader(args.data_root,
                                                 'cifar60',
                                                 args.batch_size,
                                                 'test')

    in_softmax, in_openmax, in_softlogit, in_openlogit,\
        _, _, _ = test(net, train_loader, in_test_loader)
    out_softmax, out_openmax, out_softlogit, out_openlogit,\
        _, _, _ = test(net, train_loader, ood_test_loader)
        
        
    f1, li_f1, li_thresholds, \
    li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                              pos_label=0)
    ood_scores = metrics.ood_metrics(1-np.array(in_openmax), 1-np.array(out_openmax))

    metric_logger.write(['TEST CIFAR40-CIFAR60', '\t',
                         'FPR@95%TPR', '\t',
                         'DET ERR', '\t',
                         'AUROC', '\t\t',
                         'AUPR-IN', '\t',
                         'AUPR-OUT', '\t',
                         'F1 SCORE', '\t',
                         ''])
    metric_logger.write(['', '\t\t\t',
                         100 * ood_scores['FPR95'], '\t',
                         100 * ood_scores['DTERR'], '\t',
                         100 * ood_scores['AUROC'], '\t',
                         100 * ood_scores['AUIN'], '\t',
                         100 * ood_scores['AUOUT'], '\t',
                         f1, '\t',
                         ''])

    plot.draw_f1(args.save_path, f1, li_f1, li_thresholds, data='CIFAR60',
                 mode='test', task='OsR')


    with open(f'{args.save_path}/base-scores.csv', 'a', newline='') as f:
        columns = ["",
                   "FPR@95%TPR",
                   "DET ERR",
                   "AUROC",
                   "AUPR-IN",
                   "AUPR-OUT",
                   "F1 SCORE"]
        writer = csv.writer(f)
        writer.writerow(['* Open Set Recognition Test-CIFAR60'])
        writer.writerow(columns)
        writer.writerow(
            ['', 100 * ood_scores['FPR95'],
             100 * ood_scores['DTERR'],
             100 * ood_scores['AUROC'],
             100 * ood_scores['AUIN'],
             100 * ood_scores['AUOUT'],
            f1])
        writer.writerow([''])
    f.close()




    ''' Out of Distribution Detection '''
    ''' test '''
    print('')
    print('Out of Distribution Detection-Test')
    print('known data: CIFAR40')
    print('unknown data: SVHN')
    print('')
    ood_test_loader = dataloader.out_dist_loader(args.data_root,
                                                 'svhn',
                                                 args.batch_size,
                                                 'test')

    out_softmax, out_openmax, out_softlogit, out_openlogit,\
        _, _, _ = test(net, train_loader, ood_test_loader)
        
        
    f1, li_f1, li_thresholds, \
    li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                              pos_label=0)
    ood_scores = metrics.ood_metrics(1-np.array(in_openmax), 1-np.array(out_openmax))

    metric_logger.write(['TEST CIFAR40-SVHN', '\t',
                         'FPR@95%TPR', '\t',
                         'DET ERR', '\t',
                         'AUROC', '\t\t',
                         'AUPR-IN', '\t',
                         'AUPR-OUT', '\t',
                         'F1 SCORE', '\t',
                         ''])
    metric_logger.write(['', '\t\t\t',
                         100 * ood_scores['FPR95'], '\t',
                         100 * ood_scores['DTERR'], '\t',
                         100 * ood_scores['AUROC'], '\t',
                         100 * ood_scores['AUIN'], '\t',
                         100 * ood_scores['AUOUT'], '\t',
                         f1, '\t',
                         ''])

    plot.draw_f1(args.save_path, f1, li_f1, li_thresholds, data='SVHN',
                 mode='test', task='OoD')


    with open(f'{args.save_path}/base-scores.csv', 'a', newline='') as f:
        columns = ["",
                   "FPR@95%TPR",
                   "DET ERR",
                   "AUROC",
                   "AUPR-IN",
                   "AUPR-OUT",
                   "F1 SCORE"]
        writer = csv.writer(f)
        writer.writerow(['* Out of Distribution Detection Test-SVHN'])
        writer.writerow(columns)
        writer.writerow(
            ['', 100 * ood_scores['FPR95'],
             100 * ood_scores['DTERR'],
             100 * ood_scores['AUROC'],
             100 * ood_scores['AUIN'],
             100 * ood_scores['AUOUT'],
            f1])
        writer.writerow([''])
    f.close()



    print('')
    print('Out of Distribution Detection-Test')
    print('known data: CIFAR40')
    print('unknown data: LSUN-FIX')
    print('')
    ood_test_loader = dataloader.out_dist_loader(args.data_root,
                                                 'lsun-fix',
                                                 args.batch_size,
                                                 'test')

    out_softmax, out_openmax, out_softlogit, out_openlogit,\
        _, _, _ = test(net, train_loader, ood_test_loader)
        
        
    f1, li_f1, li_thresholds, \
    li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                              pos_label=0)
    ood_scores = metrics.ood_metrics(1-np.array(in_openmax), 1-np.array(out_openmax))

    metric_logger.write(['TEST CIFAR40-LSUNFIX', '\t',
                         'FPR@95%TPR', '\t',
                         'DET ERR', '\t',
                         'AUROC', '\t\t',
                         'AUPR-IN', '\t',
                         'AUPR-OUT', '\t',
                         'F1 SCORE', '\t',
                         ''])
    metric_logger.write(['', '\t\t\t',
                         100 * ood_scores['FPR95'], '\t',
                         100 * ood_scores['DTERR'], '\t',
                         100 * ood_scores['AUROC'], '\t',
                         100 * ood_scores['AUIN'], '\t',
                         100 * ood_scores['AUOUT'], '\t',
                         f1, '\t',
                         ''])

    plot.draw_f1(args.save_path, f1, li_f1, li_thresholds, data='LSUN-FIX',
                 mode='test', task='OoD')

    with open(f'{args.save_path}/base-scores.csv', 'a', newline='') as f:
        columns = ["",
                   "FPR@95%TPR",
                   "DET ERR",
                   "AUROC",
                   "AUPR-IN",
                   "AUPR-OUT",
                   "F1 SCORE"]
        writer = csv.writer(f)
        writer.writerow(['* Out of Distribution Detection Test-LSUN-FIX'])
        writer.writerow(columns)
        writer.writerow(
            ['', 100 * ood_scores['FPR95'],
             100 * ood_scores['DTERR'],
             100 * ood_scores['AUROC'],
             100 * ood_scores['AUIN'],
             100 * ood_scores['AUOUT'],
            f1])
        writer.writerow([''])
    f.close()



    print('')
    print('Out of Distribution Detection-Test')
    print('known data: CIFAR40')
    print('unknown data: new-TinyImageNet158')
    print('')
    ood_test_loader = dataloader.out_dist_loader(args.data_root,
                                                 'new-tinyimagenet158',
                                                 args.batch_size,
                                                 'test')

    out_softmax, out_openmax, out_softlogit, out_openlogit,\
        _, _, _ = test(net, train_loader, ood_test_loader)
        
        
    f1, li_f1, li_thresholds, \
    li_precision, li_recall = metrics.f1_score(1-np.array(in_openmax), 1-np.array(out_openmax),
                                              pos_label=0)
    ood_scores = metrics.ood_metrics(1-np.array(in_openmax), 1-np.array(out_openmax))

    metric_logger.write(['TEST CIFAR40-Tiny158', '\t',
                         'FPR@95%TPR', '\t',
                         'DET ERR', '\t',
                         'AUROC', '\t\t',
                         'AUPR-IN', '\t',
                         'AUPR-OUT', '\t',
                         'F1 SCORE', '\t',
                         ''])
    metric_logger.write(['', '\t\t\t',
                         100 * ood_scores['FPR95'], '\t',
                         100 * ood_scores['DTERR'], '\t',
                         100 * ood_scores['AUROC'], '\t',
                         100 * ood_scores['AUIN'], '\t',
                         100 * ood_scores['AUOUT'], '\t',
                         f1, '\t',
                         ''])

    plot.draw_f1(args.save_path, f1, li_f1, li_thresholds, data='new-TinyImageNet158',
                 mode='test', task='OoD')

    with open(f'{args.save_path}/base-scores.csv', 'a', newline='') as f:
        columns = ["",
                   "FPR@95%TPR",
                   "DET ERR",
                   "AUROC",
                   "AUPR-IN",
                   "AUPR-OUT",
                   "F1 SCORE"]
        writer = csv.writer(f)
        writer.writerow(['* Out of Distribution Detection Test-new-TinyImageNet158'])
        writer.writerow(columns)
        writer.writerow(
            ['', 100 * ood_scores['FPR95'],
             100 * ood_scores['DTERR'],
             100 * ood_scores['AUROC'],
             100 * ood_scores['AUIN'],
             100 * ood_scores['AUOUT'],
             f1])
        writer.writerow([''])
    f.close()
Пример #9
0
def main(model, path):
	t1 = time.time()

	in_features = 300
	hidden_size = 256
	layer_num = 2

	print("\n")
	print(" Loading test Data ... ")
	print("="*30)
	print("\n")

	test_dl, tst = dataloader.test_loader("Data/test.csv")

	print(" Got test_dataloader ... ")
	print("="*30)
	print("\n")

	print(" Loading LSTM Model ...")
	print("="*30)
	print("\n")
	model = model.Rnn_Lstm(in_features, hidden_size, layer_num, 391, phase='Test')

	print(" Loading Weights on the Model ...")
	print("="*30)
	print("\n")

	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

	model.to(device)

	state_dict = torch.load('Model_Checkpoints/checkpoint_5.pth')
	model.load_state_dict(state_dict)

	model.eval()

	print(" Predicting on the test data ...")
	print("="*30)
	print("\n")

	predictions = []
	
	for x, _ in tqdm.tqdm(test_dl):
		x = x.to(device)
		preds = model(x)
		predictions.extend(preds.cpu().detach().numpy())

	with open("Data/labels.txt", "rb") as fp:
		labels = pickle.load(fp)


	test_df = pd.read_csv("Data/test.csv")
	result_df = pd.DataFrame(data=predictions, columns=labels)
	test_results = pd.concat([test_df, result_df], axis=1)

	print("\n Saving Results to test_results.csv .")
	print("="*30)
	print("\n")

	result_folder = "Results"
	project_path = os.getcwd()
	save_path = os.path.join(project_path, result_folder)

	if not os.path.exists(result_folder):
		os.makedirs(result_folder)
	else:
		shutil.rmtree(save_path)
		os.makedirs(result_folder)

	test_results.to_csv(save_path+'/test_results.csv', index=False)

	print("Completed \n")