def main(): criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() model = Network(args.init_channels, 10, args.layers, criterion, spaces_dict[args.search_space]).cuda() model.load_state_dict(torch.load("weights-3.pt")) model.eval() #for module in model.named_modules(): # print (module[0]) # Open image raw_image = cv2.imread(args.img_path) tens = np.load(args.tens_path, allow_pickle=True) image = torch.from_numpy(tens).unsqueeze(0)#.unsqueeze(0) image = image.cuda() print (image.size()) pred = model(image) print (pred) # GCAM gcam = GradCAM(model=model) predictions = gcam.forward(image) top_idx = predictions[0][1] print(predictions, len(predictions), top_idx) target_layer = "cells.19" gcam.backward(idx=top_idx) region = gcam.generate(target_layer=target_layer) cmap = cm.jet_r(region)[..., :3] * 255.0 cmap = cv2.resize(cmap, (32, 32)) blend = (cmap+raw_image)/2 cv2.imwrite("blend_4.png", blend) print (region.shape, cmap.shape, raw_image.shape)
def main(): torch.set_num_threads(3) if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() model = Network(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space]) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) model2 = ResNet18() model2 = model2.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) optimizer2 = torch.optim.SGD(model2.parameters(), args.learning_rate2, momentum=args.momentum, weight_decay=args.weight_decay2) if args.dataset == 'cifar10': train_transform, valid_transform = utils._data_transforms_cifar10(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) elif args.dataset == 'cifar100': train_transform, valid_transform = utils._data_transforms_cifar100( args) train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) elif args.dataset == 'svhn': train_transform, valid_transform = utils._data_transforms_svhn(args) train_data = dset.SVHN(root=args.data, split='train', download=True, transform=train_transform) num_train = len(train_data) indices = list(range(num_train)) split = int(np.floor(args.train_portion * num_train)) if 'debug' in args.save: split = args.batch_size num_train = 2 * args.batch_size train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True) valid_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler( indices[split:num_train]), pin_memory=True) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs), eta_min=args.learning_rate_min) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer2, float(args.epochs), eta_min=args.learning_rate_min) # architect = Architect(model, args) architect = Architect(model2, args) for epoch in range(args.epochs): scheduler.step() lr = scheduler.get_lr()[0] lr2 = scheduler2.get_lr()[0] if args.cutout: # increase the cutout probability linearly throughout search train_transform.transforms[ -1].cutout_prob = args.cutout_prob * epoch / (args.epochs - 1) logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, train_transform.transforms[-1].cutout_prob) else: logging.info('epoch %d lr %e', epoch, lr) genotype = model.genotype() logging.info('genotype = %s', genotype) print(F.softmax(model.alphas_normal, dim=-1)) print(F.softmax(model.alphas_reduce, dim=-1)) # training train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, optimizer2, lr, lr2, model2, epoch) logging.info('train_acc %f', train_acc) writer.add_scalar('Acc/train', train_acc, epoch) writer.add_scalar('Obj/train', train_obj, epoch) # validation valid_acc, valid_obj = infer(valid_queue, model2, criterion) logging.info('valid_acc %f', valid_acc) writer.add_scalar('Acc/valid', valid_acc, epoch) writer.add_scalar('Obj/valid', valid_obj, epoch) utils.save(model, os.path.join(args.save, 'weights.pt')) writer.close()
def main(): torch.set_num_threads(3) if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) if args.perturb_alpha == 'none': perturb_alpha = None elif args.perturb_alpha == 'pgd_linf': perturb_alpha = Linf_PGD_alpha elif args.perturb_alpha == 'random': perturb_alpha = Random_alpha criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() ####################################### resnet18 = models.resnet18(num_classes=10) # torch.cuda.clear_memory_allocated() # del Variables # gc.collect() # torch.cuda.empty_cache() resnet18 = resnet18.cuda() model2 = resnet18 ###################################### model = Network(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space]) model = model.cuda() model.load_state_dict(torch.load("weights-9.pt")) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if args.dataset == 'cifar10': train_transform, valid_transform = utils._data_transforms_cifar10(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) elif args.dataset == 'cifar100': train_transform, valid_transform = utils._data_transforms_cifar100( args) train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) elif args.dataset == 'svhn': train_transform, valid_transform = utils._data_transforms_svhn(args) train_data = dset.SVHN(root=args.data, split='train', download=True, transform=train_transform) # num_train = len(train_data)+24 num_train = len(train_data) indices = list(range(num_train)) split = int(np.floor(args.train_portion * num_train)) # if 'debug' in args.save: # split = args.batch_size # num_train = 2 * args.batch_size train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True) # train_queue = torch.utils.data.DataLoader( # train_data, batch_size=args.batch_size, # sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:5]), # pin_memory=True) valid_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler( indices[split:num_train]), pin_memory=True) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs), eta_min=args.learning_rate_min) architect = Architect(model, args) for epoch in range(args.epochs): scheduler.step() lr = scheduler.get_lr()[0] if args.cutout: # increase the cutout probability linearly throughout search train_transform.transforms[ -1].cutout_prob = args.cutout_prob * epoch / (args.epochs - 1) logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, train_transform.transforms[-1].cutout_prob) else: logging.info('epoch %d lr %e', epoch, lr) if args.perturb_alpha: epsilon_alpha = 0.03 + (args.epsilon_alpha - 0.03) * epoch / args.epochs logging.info('epoch %d epsilon_alpha %e', epoch, epsilon_alpha) genotype = model.genotype() logging.info('genotype = %s', genotype) print(F.softmax(model.alphas_normal, dim=-1)) print(F.softmax(model.alphas_reduce, dim=-1)) # training # _, _, delta = train3(train_queue, valid_queue, model, architect, criterion, optimizer, lr, # perturb_alpha, epsilon_alpha, model2, epoch) # train_acc, train_obj = train4(train_queue, valid_queue, model, architect, criterion, optimizer, lr, # perturb_alpha, epsilon_alpha, model2, epoch, delta) # train_acc, train_obj = train2(train_queue, valid_queue, model, architect, criterion, optimizer, lr, # perturb_alpha, epsilon_alpha, model2, epoch) train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, perturb_alpha, epsilon_alpha, model2, epoch) logging.info('train_acc %f', train_acc) writer.add_scalar('Acc/train', train_acc, epoch) writer.add_scalar('Obj/train', train_obj, epoch) # validation # valid_acc, valid_obj = infer(valid_queue, model, criterion) ############################################################################################################ valid_acc, valid_obj = infer(valid_queue, resnet18, criterion) ############################################################################################################ logging.info('valid_acc %f', valid_acc) writer.add_scalar('Acc/valid', valid_acc, epoch) writer.add_scalar('Obj/valid', valid_obj, epoch) utils.save(model, os.path.join(args.save, 'weights.pt')) # for epoch in range(args.epochs): # scheduler.step() # lr = scheduler.get_lr()[0] # if args.cutout: # # increase the cutout probability linearly throughout search # train_transform.transforms[-1].cutout_prob = args.cutout_prob * epoch / (args.epochs - 1) # logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, # train_transform.transforms[-1].cutout_prob) # else: # logging.info('epoch %d lr %e', epoch, lr) # if args.perturb_alpha: # epsilon_alpha = 0.03 + (args.epsilon_alpha - 0.03) * epoch / args.epochs # logging.info('epoch %d epsilon_alpha %e', epoch, epsilon_alpha) # genotype = model.genotype() # logging.info('genotype = %s', genotype) # print(F.softmax(model.alphas_normal, dim=-1)) # print(F.softmax(model.alphas_reduce, dim=-1)) # # training # _, _, delta = train3(train_queue, valid_queue, model, architect, criterion, optimizer, lr, # perturb_alpha, epsilon_alpha, model2, epoch) # for epoch in range(args.epochs+150): # train_acc, train_obj = train4(train_queue, valid_queue, model, architect, criterion, optimizer, lr, # perturb_alpha, epsilon_alpha, model2, epoch, delta) # # train_acc, train_obj = train2(train_queue, valid_queue, model, architect, criterion, optimizer, lr, # # perturb_alpha, epsilon_alpha, model2, epoch) # # train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr) # logging.info('train_acc %f', train_acc) # writer.add_scalar('Acc/train', train_acc, epoch) # writer.add_scalar('Obj/train', train_obj, epoch) # # validation # # valid_acc, valid_obj = infer(valid_queue, model, criterion) # ############################################################################################################ # valid_acc, valid_obj = infer(valid_queue, resnet18, criterion) # ############################################################################################################ # logging.info('valid_acc %f', valid_acc) # writer.add_scalar('Acc/valid', valid_acc, epoch) # writer.add_scalar('Obj/valid', valid_obj, epoch) # utils.save(model, os.path.join(args.save, 'weights.pt')) writer.close()
def main(): torch.set_num_threads(3) if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) #model = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype) criterion = nn.CrossEntropyLoss().cuda() model = Network(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space]).cuda() model = model.cuda() pretrained_dict = torch.load("weights-11.pt") model_dict = model.state_dict() pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} model_dict.update(pretrained_dict) model.load_state_dict(model_dict) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) """ if args.dataset == 'cifar10': train_transform, valid_transform = utils._data_transforms_cifar10(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) elif args.dataset == 'cifar100': train_transform, valid_transform = utils._data_transforms_cifar100(args) train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform) """ train_data = ReweightDataset("newdata", "target_train.csv") valid_data = ReweightDataset("newdata_valid", "target_valid.csv") train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) best_acc_top1 = 0 best_acc_top5 = 0 for epoch in range(args.epochs): scheduler.step() lr = scheduler.get_lr()[0] if args.cutout: # increase the cutout probability linearly throughout search train_transform.transforms[-1].cutout_prob = args.cutout_prob * \ epoch / (args.epochs - 1) logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, train_transform.transforms[-1].cutout_prob) else: logging.info('epoch %d lr %e', epoch, lr) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_acc_5, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc top 1 %f top 5 %f', train_acc, train_acc_5) writer.add_scalar('Acc/train', train_acc, epoch) writer.add_scalar('Obj/train', train_obj, epoch) valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion) # logging.info('Valid_acc_top1: %f', valid_acc_top1) logging.info('valid_acc top 1 %f top 5 %f', valid_acc_top1, valid_acc_top5) writer.add_scalar('Acc/valid', valid_acc_top1, epoch) writer.add_scalar('Obj/valid', valid_obj, epoch) is_best = False if valid_acc_top5 > best_acc_top5: best_acc_top5 = valid_acc_top5 if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer' : optimizer.state_dict(), }, is_best, args.save) utils.save(model, os.path.join(args.save, 'weights.pt')) writer.close()