예제 #1
0
def test():
    # define default variables
    args = get_args()  # divide args part and call it as function
    mean = [x / 255 for x in [125.3, 123.0, 113.9]]
    std = [x / 255 for x in [63.0, 62.1, 66.7]]
    state = {k: v for k, v in args._get_kwargs()}

    # prepare test data parts
    test_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])
    test_data = dset.CIFAR100(args.data_path,
                              train=False,
                              transform=test_transform,
                              download=True)
    if args.dataset == 'cifar10':
        test_data = dset.CIFAR10(args.data_path,
                                 train=False,
                                 transform=test_transform,
                                 download=True)
        nlabels = 10
    else:
        test_data = dset.CIFAR100(args.data_path,
                                  train=False,
                                  transform=test_transform,
                                  download=True)
        nlabels = 100

    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=args.test_bs,
                                              shuffle=False,
                                              num_workers=args.prefetch,
                                              pin_memory=True)

    # initialize model and load from checkpoint
    net = CifarResNeXt(args.cardinality, args.depth, nlabels,
                       args.widen_factor)
    loaded_state_dict = torch.load(args.load)
    temp = {}
    for key, val in list(loaded_state_dict.iteritems()):
        # parsing keys for ignoring 'module.' in keys
        temp[key[7:]] = val
    loaded_state_dict = temp
    net.load_state_dict(loaded_state_dict)

    # paralleize model
    if args.ngpu > 1:
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
    if args.ngpu > 0:
        net.cuda()

    # use network for evaluation
    net.eval()

    # calculation part
    loss_avg = 0.0
    correct = 0.0
    for batch_idx, (data, target) in enumerate(test_loader):
        data, target = torch.autograd.Variable(
            data.cuda()), torch.autograd.Variable(target.cuda())

        # forward
        output = net(data)
        loss = F.cross_entropy(output, target)

        # accuracy
        pred = output.data.max(1)[1]
        correct += pred.eq(target.data).sum()

        # test loss average
        loss_avg += loss.data[0]

    state['test_loss'] = loss_avg / len(test_loader)
    state['test_accuracy'] = correct / len(test_loader.dataset)

    # finally print state dictionary
    print(state)
def test():
    # define default variables
    args = get_args()  # divide args part and call it as function
    mean = [x / 255 for x in [125.3, 123.0, 113.9]]
    std = [x / 255 for x in [63.0, 62.1, 66.7]]
    state = {k: v for k, v in args._get_kwargs()}

    # prepare test data parts
    if args.train_set == 0:
        phase = False
    else:
        phase = True

    test_transform = transforms.Compose(
        [transforms.ToTensor(), transforms.Normalize(mean, std)])
    test_data = dset.CIFAR100(args.data_path, train=phase, transform=test_transform, download=True)
    if args.dataset == 'cifar10':
        test_data = dset.CIFAR10(args.data_path, train=phase, transform=test_transform, download=True)
        nlabels = 10
    else:
        test_data = dset.CIFAR100(args.data_path, train=phase, transform=test_transform, download=True)
        nlabels = 100

    test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
                                              num_workers=args.prefetch, pin_memory=True)


    # initialize model and load from checkpoint
    net = CifarResNeXt(args.cardinality, args.depth, nlabels, args.widen_factor)
    loaded_state_dict = torch.load(args.load, map_location='cpu')
    temp = {}
    for key, val in list(loaded_state_dict.iteritems()):
        # parsing keys for ignoring 'module.' in keys
        temp[key[7:]] = val
    loaded_state_dict = temp
    net.load_state_dict(loaded_state_dict)

    # paralleize model
    if args.ngpu > 1:
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
    if args.ngpu > 0:
        net.cuda()

    # use network for evaluation
    net.eval()

    # calculation part
    start = time.clock()
    result = np.zeros((len(test_data), net.stages[3]), dtype=np.float32)
    result_target = np.zeros(len(test_data))

    print(result.shape)
    for batch_idx, (data, target) in enumerate(test_loader):
        result_target[batch_idx * args.test_bs:(batch_idx + 1) * args.test_bs] = target.numpy()
        data, target = torch.autograd.Variable(data), torch.autograd.Variable(target)

        # forward
        output = net(data).detach().numpy()
        result[batch_idx*args.test_bs:(batch_idx+1)*args.test_bs,:] = output
        print("batch: {0}, average_time: {1}s, time_left:{2}s".format(batch_idx, (time.clock()-start)/(batch_idx+1), (time.clock()-start)/(batch_idx+1)*(len(test_loader)-batch_idx-1)))
        if batch_idx % 1000==0:
            np.save(args.savename+".npy",result)
    np.save(args.savename, result)
    np.save(args.savename+"_target", result_target)
예제 #3
0
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.prefetch,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=args.test_bs,
                                              shuffle=False,
                                              num_workers=args.prefetch,
                                              pin_memory=True)

    # Init checkpoints
    if not os.path.isdir(args.save):
        os.makedirs(args.save)

    # Init model, criterion, and optimizer
    net = CifarResNeXt(args.cardinality, args.depth, nlabels,
                       args.widen_factor)
    print(net)
    if args.ngpu > 1:
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    if args.ngpu > 0:
        net.cuda()

    optimizer = torch.optim.SGD(net.parameters(),
                                state['learning_rate'],
                                momentum=state['momentum'],
                                weight_decay=state['decay'],
                                nesterov=True)

    # train function (forward, backward, update)
    def train():
예제 #4
0
                 ops_std, ops_mean, ops_std, inference_time_mean,
                 inference_time_std, len(policy_set)))


#--------------------------------------------------------------------------------------------------------#
trainset, testset = utils.get_dataset(args.model, args.data_dir)
testloader = torchdata.DataLoader(testset,
                                  batch_size=1,
                                  shuffle=False,
                                  num_workers=4)

num_blocks = (args.depth - 2) // 3 * args.cardinality
agent = utils.get_budget_constraint_agent(num_blocks)
dataset = args.model.split('_')[1]
if dataset == 'C10':
    rnet = CifarResNeXt(args.cardinality, args.depth, 10, args.base_width,
                        args.widen_factor)
elif dataset == 'C100':
    rnet = CifarResNeXt(args.cardinality, args.depth, 100, args.base_width,
                        args.widen_factor)

if args.load is not None:
    if args.agent_state == "finetune":
        checkpoint = torch.load(args.load)
        rnet.load_state_dict(checkpoint['resnet'])
        agent.load_state_dict(checkpoint['agent'])
    else:
        loaded_state_dict = torch.load(args.model_dir)
        temp = {}
        for key, val in list(loaded_state_dict.items()):
            temp[key] = val
        loaded_state_dict = temp
예제 #5
0
                                               shuffle=True,
                                               num_workers=args.prefetch,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=args.test_bs,
                                              shuffle=False,
                                              num_workers=args.prefetch,
                                              pin_memory=True)

    # Init checkpoints
    if not os.path.isdir(args.save):
        os.makedirs(args.save)

    # Init model, criterion, and optimizer
    net = CifarResNeXt(args.model, args.cardinality, args.depth, nlabels,
                       args.base_width, args.widen_factor, args.band_width,
                       args.preact)
    print(net)
    print(args.preact)

    # initialize model and load from checkpoint
    #net = CifarResNeXt(args.model, args.cardinality, args.depth, nlabels, args.base_width, args.widen_factor)
    if args.load:
        print("loading existing model")
        loaded_state_dict = torch.load(args.load)
        temp = {}
        #for key, val in list(loaded_state_dict.items()):
        # parsing keys for ignoring 'module.' in keys
        #    temp[key[7:]] = val
        #loaded_state_dict = temp
        net.load_state_dict(loaded_state_dict)
예제 #6
0
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.prefetch,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=args.test_bs,
                                              shuffle=False,
                                              num_workers=args.prefetch,
                                              pin_memory=True)

    # Init checkpoints
    if not os.path.isdir(args.save):
        os.makedirs(args.save)

    # Init model, criterion, and optimizer
    net = CifarResNeXt(args.cardinality, args.depth, nlabels, args.base_width,
                       args.widen_factor)
    log.write(f'{net}{nextline}')
    log.flush()

    device_ids = list(range(args.ngpu))
    if args.ngpu > 1:
        if args.gpu_id_list:
            # device_ids = list(map(int, args.gpu_id_list.split(',')))
            # os.environ['CUDA_VISIBLE_DEVICES']作用是只允许gpu gpu_id_list='3,5'可用,
            # 然后使用Model = nn.DataParallel(Model, device_ids=[0,1]),作用是从可用的两个gpu中搜索第0和第1个位置的gpu。
            os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id_list
        net = torch.nn.DataParallel(net, device_ids=device_ids)

    if args.ngpu > 0:
        # choose gpu to load model,defalt cuda:0
        net.cuda()