Example #1
0
def main(check_model, mm=1):

    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )
    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    '''trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch, shuffle=True, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )'''
    query = ImageDataset(dataset.query, transform=transform_test)
    if args.dataset == 'beijing':
        query = ImageDataset_forBeijing(dataset.query,
                                        transform=transform_test)

    #gallery = ImageDatasetLazy(dataset.gallery, transform=transform_test)
    gallery = ImageDataset(dataset.gallery, transform=transform_test)
    if args.dataset == 'beijing':
        gallery = ImageDataset_forBeijing(dataset.gallery,
                                          transform=transform_test)

    if args.evaluate:
        #print("Evaluate only")
        if mm == 1:
            cost, recall, precision = test(query, gallery, check_model, mm)
            return cost, recall, precision
        else:
            cost, recall, precision, delay = test(query, gallery, check_model,
                                                  mm)
            return cost, recall, precision, delay
Example #2
0
def train_teacher():
    """
    This function trains a teacher (teacher id) among an ensemble of nb_teachers
    models for the dataset specified.
    :param dataset: string corresponding to dataset (svhn, cifar10)
    :param nb_teachers: total number of teachers in the ensemble
    :param teacher_id: id of the teacher being trained
    :return: True if everything went well
    """
    # If working directories do not exist, create them
    #assert utils.mkdir_if_missing(config.data_dir)
    #assert utils.mkdir_if_misshing(config.train_dir)
    print("Initializing dataset {}".format(config.dataset))

    dataset = data_manager.init_img_dataset(
        root=config.data_dir, name=config.dataset,
    )

    # Load the dataset


    for i in range(0,config.nb_teachers):
        # Retrieve subset of data for this teacher

        if config.dataset == 'celeba':
            data, labels = dataset._data_partition(config.nb_teachers,i)

       
        print("Length of training data: " + str(len(data)))

        # Define teacher checkpoint filename and full path
        print('data.shape for each teacher')


        dir_path = os.path.join(config.save_model,'pate_'+config.dataset+str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        #filename = os.path.join(dir_path, str(config.nb_teachers) + '_teachers_' + str(i) + '_resnet.checkpoint.pth.tar')
        filename = os.path.join(dir_path, str(config.nb_teachers) + '_teachers_' + str(i) + config.arch+'.checkpoint.pth.tar')
        print('save_path for teacher{}  is {}'.format(i,filename))

        network.train_each_teacher(config.teacher_epoch,data, labels, dataset.test_data, dataset.test_label, filename)


    return True
Example #3
0
            img = Image.open(img_path).convert('RGB')
            got_img = True
        except IOError:
            print("{} does not read image".format(img_path))
        pass
    return img


# 重构torch的dataset
class ImageDataset(Dataset):
    def __init__(self, dataset, transform=None):
        self.dataset = dataset
        self.transform = transform

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, index):
        img_path, pid, camid = self.dataset[index]
        img = read_img(img_path)
        if self.transform is not None:
            img = self.transform(img)
        return img, pid, camid


img = Image.open(img_path).convert('RGB')

if __name__ == '__main__':
    import data_manager
    dataset = data_manager.init_img_dataset(root='C:\\Users\\surface\\Desktop')
    train_loader = ImageDataset(dataset.train)
Example #4
0
def prepare_student_data(nb_teachers, save=False):
    """
  Takes a dataset name and the size of the teacher ensemble and prepares
  training data for the student model, according to parameters indicated
  in flags above.
  :param dataset: string corresponding to mnist, cifar10, or svhn
  :param nb_teachers: number of teachers (in the ensemble) to learn from
  :param save: if set to True, will dump student training labels predicted by
               the ensemble of teachers (with Laplacian noise) as npy files.
               It also dumps the clean votes for each class (without noise) and
               the labels assigned by teachers
  :return: pairs of (data, labels) to be used for student training and testing
  """

    # Load the dataset
    if config.dataset == 'celeba':
        dataset = data_manager.init_img_dataset(root=config.data_dir,
                                                name=config.dataset)
        test_data = dataset.test_data
        test_labels = dataset.test_label

    else:
        print("Check value of dataset flag")
        return False

    # Make sure there is data leftover to be used as a test set
    assert config.stdnt_share < len(test_data)

    # Prepare [unlabeled] student training data (subset of test set)
    stdnt_data = test_data[:config.stdnt_share]
    # Compute teacher predictions for student training data
    if config.reuse_vote:
        #reuse previous saved clean votes, but stdnt_share maybe various
        #dir_path = os.path.join(config.save_model,'pate_'+str(config.nb_teachers))
        dir_path = os.path.join(config.save_model, config.dataset)
        dir_path = os.path.join(dir_path,
                                'pate_num_teacher_' + str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        filepath = dir_path + '/_teacher_votes.npy'
        # Prepare filepath for numpy dump of labels produced by noisy aggregation
        teachers_preds = np.load(filepath)
        teachers_preds = teachers_preds[:config.stdnt_share]
        ori_filepath = dir_path + '_ori_teacher_votes.npy'
        ori_teachers_preds = np.load(ori_filepath)
    else:
        teachers_preds = ensemble_preds(nb_teachers, stdnt_data)
        ori_teachers_preds = teachers_preds  # in the shape of (nb_teacher, nb_data, dim)
        teachers_preds = np.sum(teachers_preds, axis=0)
        dir_path = os.path.join(config.save_model, config.dataset)
        dir_path = os.path.join(dir_path,
                                'pate_num_teacher_' + str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        filepath = dir_path + '/_teacher_votes.npy'
        ori_filepath = dir_path + '_ori_teacher_votes.npy'
        with open(filepath, mode='wb') as file_obj:
            np.save(file_obj, teachers_preds)
        with open(ori_filepath, mode='wb') as file_obj:
            np.save(file_obj, ori_teachers_preds)

    if config.use_tau:
        tau_teachers_preds = np.zeros(teachers_preds.shape)
        for idx in range(len(tau_teachers_preds)):
            tau_teachers_preds[idx] = tau_limit(ori_teachers_preds[:, idx, :])

        preds_tau = np.asarray(tau_teachers_preds, dtype=np.float32)
        print('preds_tau', preds_tau[1, ])
        count_zero_list = config.nb_teachers * np.ones(
            [config.stdnt_share, config.nb_labels]) - teachers_preds
        print('shape of count_zero', count_zero_list.shape)
        idx, stdnt_labels = aggregation.aggregation_knn(
            teachers_preds, config.gau_scale, count_zero_list=count_zero_list)
        acct.compose_mechanism(gaussian, coeff=config.stdnt_share)
    else:
        acct.compose_mechanism(gaussian, coeff=config.stdnt_share)
        idx, stdnt_labels = aggregation.aggregation_knn(
            teachers_preds, config.gau_scale)
    print('shape of teachers_pred', teachers_preds.shape)
    # Aggregate teacher predictions to get student training labels

    # Print accuracy of aggregated label
    ac_ag_labels = hamming_accuracy(stdnt_labels,
                                    test_labels[:config.stdnt_share],
                                    torch=False)
    print("Accuracy of the aggregated labels: " + str(ac_ag_labels))
    current_eps = acct.get_eps(config.delta)
    print('eps after data independent composition', current_eps)
    # Store unused part of test set for use as a test set after student training
    stdnt_test_data = test_data[config.stdnt_share:]
    stdnt_test_labels = test_labels[config.stdnt_share:]

    return stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels
Example #5
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))

    # tensorboardX
    # writer = SummaryWriter(log_dir=osp.join(args.save_dir,'summary'))

    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root, name=args.dataset, split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    if args.random_erasing:
        transform_train = T.Compose([
            T.Random2DTranslation(args.height, args.width),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            RandomErasing(probability=args.probability, mean=[0.0, 0.0, 0.0]),
        ])
        

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    if args.loss == 'xent,htri':
        trainloader = DataLoader(
            ImageDataset(dataset.train, transform=transform_train),
            sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),
            batch_size=args.train_batch, num_workers=args.workers,
            pin_memory=pin_memory, drop_last=True,
        )
    elif args.loss == 'xent':
        trainloader = DataLoader(
            ImageDataset(dataset.train, transform=transform_train),
            batch_size=args.train_batch, shuffle=True, num_workers=args.workers,
            pin_memory=pin_memory, drop_last=True,
        )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss=args.loss)
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)
    
    optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
    if args.stepsize > 0:
        if not args.warmup:
            scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return
    def adjust_lr(optimizer, ep):
        if ep < 20:
            lr = 1e-4 * (ep + 1) / 2
        elif ep < 80:
            #lr = 1e-3 * len(args.gpu_devices)
            lr = 1e-3
        elif ep < 180:
            #lr = 1e-4 * len(args.gpu_devices)
            lr = 1e-4
        elif ep < 300:
            #lr = 1e-5 * len(args.gpu_devices)
            lr = 1e-5
        elif ep < 320:
            #lr = 1e-5 * 0.1 ** ((ep - 320) / 80) * len(args.gpu_devices)
            lr = 1e-5 * 0.1 ** ((ep - 320) / 80)
        elif ep < 400:
            lr = 1e-6
        elif ep < 480:
            #lr = 1e-4 * len(args.gpu_devices)
            lr = 1e-4
        else:
            #lr = 1e-5 * len(args.gpu_devices)
            lr = 1e-5
        for p in optimizer.param_groups:
            p['lr'] = lr
    
    length = len(trainloader)
    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    #best_rerank1 = -np.inf
    #best_rerankepoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        if args.stepsize > 0:
            if args.warmup:
                adjust_lr(optimizer, epoch + 1)
            else:
                scheduler.step()
        train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu=use_gpu, summary=None, length=length)
        train_time += round(time.time() - start_train_time)
        
        if (epoch+1) > args.start_eval and args.eval_step > 0 and (epoch+1) % args.eval_step == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            rank1 = test(epoch, model, queryloader, galleryloader, use_gpu=True, summary=None)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1
            ####### Best Rerank
            #is_rerankbest = rerank1 > best_rerank1
            #if is_rerankbest:
            #    best_rerank1 = rerank1
            #    best_rerankepoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar'))

    writer.close()
    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))
    #print("==> Best Rerank-1 {:.1%}, achieved at epoch {}".format(best_rerank1, best_rerankepoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
Example #6
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(p=0.5),
        T.Pad(10),
        T.RandomCrop([args.height, args.width]),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        torchvision.transforms.RandomErasing(p=0.5,
                                             scale=(0.02, 0.4),
                                             ratio=(0.3, 3.33),
                                             value=(0.4914, 0.4822, 0.4465))
        # T.RandomErasing(probability=0.5, sh=0.4, mean=(0.4914, 0.4822, 0.4465)),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'cent'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_cent = CenterLoss(num_classes=dataset.num_train_pids,
                                feat_dim=model.feat_dim,
                                use_gpu=use_gpu)

    optimizer_model = torch.optim.Adam(model.parameters(),
                                       lr=args.lr,
                                       weight_decay=args.weight_decay)
    optimizer_cent = torch.optim.SGD(criterion_cent.parameters(),
                                     lr=args.lr_cent)

    #only the optimizer_model use learning rate schedule
    # if args.stepsize > 0:
    #     scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)
    '''------Modify lr_schedule here------'''
    current_schedule = init_lr_schedule(schedule=args.schedule,
                                        warm_up_epoch=args.warm_up_epoch,
                                        half_cos_period=args.half_cos_period,
                                        lr_milestone=args.lr_milestone,
                                        gamma=args.gamma,
                                        stepsize=args.stepsize)

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer_model,
                                                  lr_lambda=current_schedule)
    '''------Please refer to the args.xxx for details of hyperparams------'''
    # embed()
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_cent, optimizer_model,
              optimizer_cent, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.schedule: scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Example #7
0
        sys.stdout = Logger(
            osp.join(args.save_dir,
                     'log_train.txt'))  # Log file is saved in log_train.txt
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Uncomment when gpu is used
    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    dataset = data_manager.init_img_dataset(root=args.root, name=args.dataset)

    # 3 dataloader: train, query, gallery
    # Train needs augmentation
    transform_train = T.Compose([
        transform.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
Example #8
0
def main():
    global Temporal_corr_prob
    global paths, sorted_frame_list_all, max_frame

    Temporal_corr_prob = np.zeros((9, 9, 5))

    Temporal_corr_prob = np.load('./t_5_corr.npy')
    Temporal_corr_prob[8, :, -1] = [
        0.51415094, 0.20754717, 0.07075472, 0.25, 0.08962264, 0.45283019,
        0.0754717, 0.49528302, 1
    ]
    #Temporal_corr_prob[8,:,-1] = [0.96295517, 0.96295517, 0.64197011, 0.96295517, 0.77036413, 0.96295517,0.64197011, 0.96295517, 1]
    #800 Temporal_corr_prob[8,:,-1] = [0.10991234, 0.03641268, 0.01213756, 0.04517869, 0.01416049, 0.09844909,0.01213756, 0.10249494,1]
    #480 Temporal_corr_prob[8,:,-1] = [0.34993271 , 0.12651413 , 0.04306864,  0.16689098 , 0.05114401,  0.30417227, 0.04306864, 0.32570659,1]
    #720 Temporal_corr_prob[8,:,-1] = [0.45967742, 0.1733871,  0.06451613, 0.22983871, 0.07258065, 0.40725806, 0.06451613, 0.43951613,1]
    #600 Temporal_corr_prob[8,:,-1] = [0.4169468,0.15467384,0.0537996,0.19166106,0.0672495,0.35642233,0.05716207,0.39340955,1]
    #1080 Temporal_corr_prob[8,:,-1] = [0.55533199, 0.23541247, 0.09054326, 0.28973843, 0.11468813, 0.52515091,0.09657948, 0.58551308,1]
    #Temporal_corr_prob[8,:,-1] = [0.49708912, 0.17913121, 0.06717421, 0.2507837,  0.08060905, 0.43439319, 0.07165249, 0.46574116, 1]
    #Temporal_corr_prob[8,2,29] += 0.1
    #Temporal_corr_prob[8,8,29] -= 0.1
    #Temporal_corr_prob[:,:,0:-1] = 0
    #Temporal_corr_prob[:,:,-1] = Spatial_corr_prob
    #Temporal_corr_prob[8,:,-1] = [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    #gallery = ImageDatasetLazy(dataset.gallery, transform=transform_test)
    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    paths = dict()
    life_frequency = []
    stand_frequency = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: []}

    frame_list = {
        0: dict(),
        1: dict(),
        2: dict(),
        3: dict(),
        4: dict(),
        5: dict(),
        6: dict(),
        7: dict()
    }

    change = np.zeros((9, 9))
    cal_v = np.zeros(9)
    total_number = 0
    min_frame = 10000000
    max_frame = -1
    with torch.no_grad():
        for batch_idx, (names, imgs, pids, camids,
                        fids) in enumerate(trainloader):
            for s_index in range(len(names)):
                if paths.get(int(pids[s_index])) == None:
                    paths[int(pids[s_index])] = []

                fid_ = fids[s_index] + cam_offsets[camids[s_index]]
                #if paths[int(pids[s_index])] != []:
                #    if paths
                paths[int(pids[s_index])].append((fid_, camids[s_index]))

                if int(fid_) >= max_frame:
                    max_frame = int(fid_)
                if int(fid_) <= min_frame:
                    min_frame = int(fid_)

                if frame_list[int(camids[s_index])].get(int(fid_)) == None:
                    frame_list[int(camids[s_index])][int(fid_)] = []

                frame_list[int(camids[s_index])][int(fid_)].append(
                    int(pids[s_index]))

                cal_v[camids[s_index]] += 1
                total_number += 1
    print("Max frame and min frame : ", max_frame, min_frame)
    print("")
    sorted_frame_list_all = [
    ]  #{ 0 : list(), 1:list(),2:list(),3:list(),4:list(),5:list(),6:list(),7:list()}
    for ind in range(8):
        for key in list(frame_list[ind].keys()):
            tmp_list = frame_list[ind][key]
            tmp_list.insert(0, ind)
            tmp_list.insert(0, key)

            #sorted_frame_list[ind].append(tmp_list)
            sorted_frame_list_all.append(tmp_list)
        #sorted_frame_list[ind] = sorted(sorted_frame_list[ind], key=lambda x: x[0])
        #print("Sorted Index : ",ind, "With term : ",len(sorted_frame_list[ind]))
    sorted_frame_list_all = sorted(sorted_frame_list_all, key=lambda x: x[0])
    #input()
    print("Sorted Index : ", ind, "With term : ", len(sorted_frame_list_all))
    #227540 49700

    #version1(paths,sorted_frame_list)
    #baseline(paths,sorted_frame_list)
    '''global frame_window_size
    global threshold_value
    a = np.zeros(100)
    b = np.zeros(100)
    for i in range(100):
        a[i] = 10*i + 10
        b[i] = pow(0.42,i)

    result = []
    for i in range(100):
        for j in range(100):
            frame_window_size = a[i]
            threshold_value = b[j]
            t,f = version3(paths,sorted_frame_list_all,max_frame)
            print("Time : ",i,j,"with tot : ",t, " and f : ",f, " setting : ",frame_window_size,threshold_value)

            if f < 50 and t < 600000:
                result.append((t,f))
    print(result)'''
    from bayes_opt import BayesianOptimization

    # Bounded region of parameter space

    #pbounds = {'t0':(0.6,1.5),'t1':(0.6,1.5),'t2':(0.2,0.8),'t3':(0.6,1.2),'t4':(0.2,0.8),'t5':(0.5,1.2),'t6':(0.2,0.8),'t7':(0.3,1.2),}

    #wrapper =

    for i in [
            0.96, 0.965, 0.97, 0.975, 0.98, 0.99
    ]:  #1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.0,2.1,2.2,2.3,2.4,2.8,3,4,5,6,10,100,1e5,1e6]:#0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.735,0.80,0.85,0.9,
        #for k in [7,13,17,20,23,27,31,71,103,143]:
        j = i
        t, f = wrapper(j)
        print(j, ",", t, ",", f)

    #version2(paths,sorted_frame_list_all,max_frame,j)
    '''optimizer = BayesianOptimization(
Example #9
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))

    # tensorboardX
    writer = SummaryWriter(log_dir=osp.join(args.save_dir,'summary'))

    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root, name=args.dataset, split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss=args.loss)
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

        if use_gpu:
            model = nn.DataParallel(model).cuda()

    start_time = time.time()
    if args.resume_all:
        print("Loading all checkpoints from '{}'".format(args.resume_all))
        pths = glob.glob(osp.join(args.resume_all,'checkpoint_ep*.tar'))
        best_epoch = 0
        best_rank1 = -np.inf
        # best_rerankepoch = 0
        # best_rerank1 = -np.inf
        for pth in pths:
            epoch = list(map(int, re.findall(pattern=r'ep(\d+)\.pth',string=pth)))
            print("Test epoch {}".format(epoch[0]))
            checkpoint = torch.load(pth)
            model.load_state_dict(checkpoint['state_dict'])
            if use_gpu:
                model = nn.DataParallel(model).cuda()
            rank1 = test(epoch, model, queryloader, galleryloader, use_gpu=True, summary=writer)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch[0]
            model = model.module
        shutil.copyfile(args.resume_all + 'checkpoint_ep' + str(best_epoch) + '.pth.tar', args.resume_all + 'best_checkpoint.pth.tar')
        print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))


    test_time = round(time.time() - start_time)
    test_time = str(datetime.timedelta(seconds=test_time))  
    print("Finished. Testtime (h:m:s): {}.".format(test_time))
# img = Image.open(img_path).convert('RGB')


class ImageDataset(Dataset):
    def __init__(self, dataset, transform=None):
        self.dataset = dataset
        self.transform = transform

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, index):
        img_path, pid, camid = self.dataset[index]
        img = read_image(img_path)
        if self.transform is not None:
            img = self.transform(img)
        return img, pid, camid


if __name__ == '__main__':
    import data_manager
    dataset = data_manager.init_img_dataset(root='/home/ls', name='market1501')
    train_loader = ImageDataset(dataset.train)
    from IPython import embed
    embed()

# for batch_id, (imgs, pid, camid) in enumerate(train_loader):
#    ...:     print(batch_id, imgs, pid, camid)
#    ...:     if batch_id > 10:
#    ...:         break
Example #11
0
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    if opt.use_dense:
        model_structure = ft_net_dense(n_classe)
    else:
        model_structure = ft_net(n_classe)
    model = load_network(model_structure)
    # Change to test mode
    model = model.eval()
    if use_gpu:
        model = model.cuda()

    dataset = data_manager.init_img_dataset(root=opt.dataset,
                                            name='cuhk03',
                                            split_id=0,
                                            cuhk03_classic_split=True)

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=data_transforms),
        batch_size=32,
        shuffle=False,
        num_workers=4,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=data_transforms),
        batch_size=32,
        shuffle=False,
        num_workers=4,
Example #12
0
args_height = 32
args_width = 16
args_train_batch = 128
args_workers = 4
use_gpu = torch.cuda.is_available()
pin_memory = True if use_gpu else False

transform_train = T.Compose([
    T.Resize((args_height, args_width)),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

dataset = data_manager.init_img_dataset(
    root=args_root,
    name=args_dataset,
    split_id=args_split_id,
)

data_loader = DataLoader(
    ImageDataset(dataset.train, transform=transform_train),
    batch_size=args_train_batch,
    shuffle=True,
    num_workers=args_workers,
    pin_memory=pin_memory,
    drop_last=True,
)

# for batch_idx, (imgs, pids, _) in enumerate(data_loader):
#     print('batch_idx:',batch_idx,'imgs:',imgs.size())
Example #13
0
def prepare_student_data(nb_teachers, save=False):
    """
    Takes a dataset name and the size of the teacher ensemble and prepares
    training data for the student model, according to parameters indicated
    in flags above.
    :param dataset: string corresponding to mnist, cifar10, or svhn
    :param nb_teachers: number of teachers (in the ensemble) to learn from
    :param save: if set to True, will dump student training labels predicted by
                 the ensemble of teachers (with Laplacian noise) as npy files.
                 It also dumps the clean votes for each class (without noise) and
                 the labels assigned by teachers
    :return: pairs of (data, labels) to be used for student training and testing

    """

    # Load the dataset
    if config.dataset == 'celeba':
        dataset = data_manager.init_img_dataset(root=config.data_dir,
                                                name=config.dataset)
        test_data = dataset.test_data
        test_labels = dataset.test_label
        train_data = dataset.train_data
        train_labels = dataset.train_label

    else:
        return False

    # Make sure there is data leftover to be used as a test set
    assert config.stdnt_share < len(test_data)
    ori_test_data = test_data
    # for test
    train_data, test_data = extract_feature(train_data, test_data)

    stdnt_data = test_data[:config.stdnt_share]
    share_index = np.random.choice(test_data[:-1000].shape[0],
                                   config.stdnt_share)
    stdnt_data = test_data[share_index]
    picked_stdnt_data = [ori_test_data[idx] for idx in share_index]
    print('choose original stdnt data', len(picked_stdnt_data))
    num_train = train_data.shape[0]
    # difference here, in celeba case we have 40-dim binary label, to do private aggregation,
    # we could simply adds up num of teachers' prediction, where the shape of teachers_preds will become num_queries * dim
    teachers_preds = np.zeros([stdnt_data.shape[0], config.nb_labels])

    tau_teachers_preds = []
    # a weighted teacher predtion with clippling
    for idx in range(len(stdnt_data)):
        if idx % 100 == 0:
            print('idx=', idx)
        query_data = stdnt_data[idx]
        select_teacher = np.random.choice(train_data.shape[0],
                                          int(prob * num_train))
        dis = np.linalg.norm(train_data[select_teacher] - query_data, axis=1)
        k_index = select_teacher[np.argsort(dis)[:config.nb_teachers]]
        # sum over the number of teachers, which make it easy to compute their votings
        if config.use_tau:
            tau_teachers_preds.append(tau_limit(train_labels[k_index, :]))
        teachers_preds[idx] = np.sum(train_labels[k_index, :], axis=0)

    teachers_preds = np.asarray(teachers_preds, dtype=np.int32)
    if config.use_tau:

        preds_tau = np.asarray(tau_teachers_preds, dtype=np.float32)
        acct.compose_poisson_subsampled_mechanisms(gaussian,
                                                   prob,
                                                   coeff=config.stdnt_share)
        count_zero_list = config.nb_teachers * np.ones(
            [config.stdnt_share, config.nb_labels]) - teachers_preds
        idx, stdnt_labels = aggregation.aggregation_knn(
            teachers_preds, config.gau_scale, count_zero_list=count_zero_list)
    else:
        acct.compose_poisson_subsampled_mechanisms(gaussian,
                                                   prob,
                                                   coeff=config.stdnt_share)
        idx, stdnt_labels = aggregation.aggregation_knn(
            teachers_preds, config.gau_scale)
    # compute privacy loss
    print("Composition of student  subsampled Gaussian mechanisms gives ",
          (acct.get_eps(delta), delta))

    # Print accuracy of aggregated label
    #ac_ag_labels = hamming_accuracy(stdnt_labels, test_labels[:config.stdnt_share], torch=False)
    ac_ag_labels = hamming_accuracy(stdnt_labels,
                                    test_labels[share_index],
                                    torch=False)
    precision = hamming_precision(stdnt_labels,
                                  test_labels[share_index],
                                  torch=False)
    print("Accuracy of the aggregated labels: " + str(ac_ag_labels))
    print('precision of the aggregated labels' + str(precision))
    current_eps = acct.get_eps(config.delta)
    # Store unused part of test set for use as a test set after student training
    stdnt_test_data = ori_test_data[-1000:]
    stdnt_test_labels = test_labels[-1000:]

    if save:
        # Prepare filepath for numpy dump of labels produced by noisy aggregation
        dir_path = os.path.join(config.save_model,
                                'knn_num_neighbor_' + str(config.nb_teachers))
        utils.mkdir_if_missing(dir_path)
        filepath = dir_path + '_knn_voting.npy'  #NOLINT(long-line)

        # Dump student noisy labels array
        with open(filepath, 'wb') as file_obj:
            np.save(file_obj, teachers_preds)

    return picked_stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels
Example #14
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent'},
                              use_gpu=use_gpu)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    # criterion = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    # optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
    # if args.stepsize > 0:
    #     scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
    # start_epoch = args.start_epoch
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    print("Evaluate only")
    test(model, queryloader, galleryloader, use_gpu)
def main():
    use_gpu = torch.cuda.is_available()
    if args.use_cpu:
        use_gpu = False

    if use_gpu:
        pin_memory = True
    else:
        pin_memory = False

    if not args.evaluate:   # If not test model
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))   # Log file is saved in log_train.txt
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Uncomment when gpu is used
    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    dataset = data_manager.init_img_dataset(root=args.root, name=args.dataset)

    # 3 dataloader: train, query, gallery
    # Train needs augmentation
    transform_train = T.Compose([
        transform.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'softmax', 'metric'})
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_class = nn.CrossEntropyLoss()
    criterion_metric = TriHardLoss(margin=args.margin)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    # Learning Rate Decay
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    # Uncomment when gpu is used
    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only!")
        test(model, queryloader, galleryloader, use_gpu)
        return 0

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Strat training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_class, criterion_metric, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.stepsize > 0:
            scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
            epoch + 1) == args.max_epoch:
            torch.save(model, 'logs/model_metric.pkl')
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent'},
                              use_gpu=use_gpu)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids,
                                        use_gpu=use_gpu)
    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.stepsize > 0: scheduler.step()

        if args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Example #17
0
        self.transform = transform

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, index):
        img_path, pid, camid = self.dataset[index]
        img = read_image(img_path)
        if self.transform is not None:
            img = self.transform(img)
        return img, pid, camid


if __name__ == '__main__':
    import data_manager
    dataset = data_manager.init_img_dataset(root='/data2', name='market1501')
    train_loader = ImageDataset(dataset.train)
    from IPython import embed
    #embed()


class VideoDataset(Dataset):
    """Video Person ReID Dataset.
    Note batch data has shape (batch, seq_len, channel, height, width).
    """
    sample_methods = ['evenly', 'random', 'all']

    def __init__(self, dataset, seq_len=15, sample='evenly', transform=None):
        self.dataset = dataset
        self.seq_len = seq_len
        self.sample = sample