def test(): # define default variables args = get_args() # divide args part and call it as function mean = [x / 255 for x in [125.3, 123.0, 113.9]] std = [x / 255 for x in [63.0, 62.1, 66.7]] state = {k: v for k, v in args._get_kwargs()} # prepare test data parts if args.train_set == 0: phase = False else: phase = True test_transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean, std)]) test_data = dset.CIFAR100(args.data_path, train=phase, transform=test_transform, download=True) if args.dataset == 'cifar10': test_data = dset.CIFAR10(args.data_path, train=phase, transform=test_transform, download=True) nlabels = 10 else: test_data = dset.CIFAR100(args.data_path, train=phase, transform=test_transform, download=True) nlabels = 100 test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False, num_workers=args.prefetch, pin_memory=True) # initialize model and load from checkpoint net = CifarResNeXt(args.cardinality, args.depth, nlabels, args.widen_factor) loaded_state_dict = torch.load(args.load, map_location='cpu') temp = {} for key, val in list(loaded_state_dict.iteritems()): # parsing keys for ignoring 'module.' in keys temp[key[7:]] = val loaded_state_dict = temp net.load_state_dict(loaded_state_dict) # paralleize model if args.ngpu > 1: net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.ngpu > 0: net.cuda() # use network for evaluation net.eval() # calculation part start = time.clock() result = np.zeros((len(test_data), net.stages[3]), dtype=np.float32) result_target = np.zeros(len(test_data)) print(result.shape) for batch_idx, (data, target) in enumerate(test_loader): result_target[batch_idx * args.test_bs:(batch_idx + 1) * args.test_bs] = target.numpy() data, target = torch.autograd.Variable(data), torch.autograd.Variable(target) # forward output = net(data).detach().numpy() result[batch_idx*args.test_bs:(batch_idx+1)*args.test_bs,:] = output print("batch: {0}, average_time: {1}s, time_left:{2}s".format(batch_idx, (time.clock()-start)/(batch_idx+1), (time.clock()-start)/(batch_idx+1)*(len(test_loader)-batch_idx-1))) if batch_idx % 1000==0: np.save(args.savename+".npy",result) np.save(args.savename, result) np.save(args.savename+"_target", result_target)
def test(): # define default variables args = get_args() # divide args part and call it as function mean = [x / 255 for x in [125.3, 123.0, 113.9]] std = [x / 255 for x in [63.0, 62.1, 66.7]] state = {k: v for k, v in args._get_kwargs()} # prepare test data parts test_transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean, std)]) test_data = dset.CIFAR100(args.data_path, train=False, transform=test_transform, download=True) if args.dataset == 'cifar10': test_data = dset.CIFAR10(args.data_path, train=False, transform=test_transform, download=True) nlabels = 10 else: test_data = dset.CIFAR100(args.data_path, train=False, transform=test_transform, download=True) nlabels = 100 test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False, num_workers=args.prefetch, pin_memory=True) # initialize model and load from checkpoint net = CifarResNeXt(args.cardinality, args.depth, nlabels, args.widen_factor) loaded_state_dict = torch.load(args.load) temp = {} for key, val in list(loaded_state_dict.iteritems()): # parsing keys for ignoring 'module.' in keys temp[key[7:]] = val loaded_state_dict = temp net.load_state_dict(loaded_state_dict) # paralleize model if args.ngpu > 1: net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.ngpu > 0: net.cuda() # use network for evaluation net.eval() # calculation part loss_avg = 0.0 correct = 0.0 for batch_idx, (data, target) in enumerate(test_loader): data, target = torch.autograd.Variable( data.cuda()), torch.autograd.Variable(target.cuda()) # forward output = net(data) loss = F.cross_entropy(output, target) # accuracy pred = output.data.max(1)[1] correct += pred.eq(target.data).sum() # test loss average loss_avg += loss.data[0] state['test_loss'] = loss_avg / len(test_loader) state['test_accuracy'] = correct / len(test_loader.dataset) # finally print state dictionary print(state)
num_workers=args.prefetch, pin_memory=True) # Init checkpoints if not os.path.isdir(args.save): os.makedirs(args.save) # Init model, criterion, and optimizer net = CifarResNeXt(args.cardinality, args.depth, nlabels, args.widen_factor) print(net) if args.ngpu > 1: net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.ngpu > 0: net.cuda() optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'], weight_decay=state['decay'], nesterov=True) # train function (forward, backward, update) def train(): net.train() loss_avg = 0.0 for batch_idx, (data, target) in enumerate(train_loader): data, target = torch.autograd.Variable( data.cuda()), torch.autograd.Variable(target.cuda())