コード例 #1
0
    def __init__(self,
                 model_name='',
                 model_path='',
                 image_size=(256, 128),
                 pixel_mean=[0.485, 0.456, 0.406],
                 pixel_std=[0.229, 0.224, 0.225],
                 pixel_norm=True,
                 device='cuda',
                 verbose=True):
        # Build model
        model = build_model(model_name,
                            num_classes=1,
                            pretrained=True,
                            use_gpu=device.startswith('cuda'))
        model.eval()

        num_params, flops = compute_model_complexity(
            model, (1, 3, image_size[0], image_size[1]))

        if verbose:
            print('Model: {}'.format(model_name))
            print('- params: {:,}'.format(num_params))
            print('- flops: {:,}'.format(flops))

        if model_path and check_isfile(model_path):
            load_pretrained_weights(model, model_path)

        # Build transform functions
        transforms = []
        transforms += [T.Resize(image_size)]
        transforms += [T.ToTensor()]
        if pixel_norm:
            transforms += [T.Normalize(mean=pixel_mean, std=pixel_std)]
        preprocess = T.Compose(transforms)

        to_pil = T.ToPILImage()

        device = torch.device(device)
        model.to(device)

        # Class attributes
        self.model = model
        self.preprocess = preprocess
        self.to_pil = to_pil
        self.device = device
コード例 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--root', type=str)
    parser.add_argument('-d', '--dataset', type=str, default='market1501')
    parser.add_argument('-m', '--model', type=str, default='osnet_x1_0')
    parser.add_argument('--weights', type=str)
    parser.add_argument('--save-dir', type=str, default='log')
    parser.add_argument('--height', type=int, default=256)
    parser.add_argument('--width', type=int, default=128)
    args = parser.parse_args()

    use_gpu = torch.cuda.is_available()

    datamanager = PersonReid.torchreid.data.ImageDataManager(
        root=args.root,
        sources=args.dataset,
        height=args.height,
        width=args.width,
        batch_size_train=100,
        batch_size_test=100,
        transforms=None,
        train_sampler='SequentialSampler')
    test_loader = datamanager.test_loader

    model = PersonReid.torchreid.models.build_model(
        name=args.model,
        num_classes=datamanager.num_train_pids,
        use_gpu=use_gpu)

    if use_gpu:
        model = model.cuda()

    if args.weights and check_isfile(args.weights):
        load_pretrained_weights(model, args.weights)

    visactmap(model, test_loader, args.save_dir, args.width, args.height,
              use_gpu)
コード例 #3
0
ファイル: main.py プロジェクト: nguyenbaviet/Reid
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='path to config file')
    parser.add_argument('-s',
                        '--sources',
                        type=str,
                        nargs='+',
                        help='source datasets (delimited by space)')
    parser.add_argument('-t',
                        '--targets',
                        type=str,
                        nargs='+',
                        help='target datasets (delimited by space)')
    parser.add_argument('--transforms',
                        type=str,
                        nargs='+',
                        help='data augmentation')
    parser.add_argument('--root',
                        type=str,
                        default='',
                        help='path to data root')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)
    check_cfg(cfg)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = build_datamanager(cfg)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        name=cfg.model.name,
        num_classes=datamanager.num_train_pids,
        loss=cfg.loss.name,
        pretrained=cfg.model.pretrained,
        use_gpu=cfg.use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer,
                                                       scheduler=scheduler)

    print('Building {}-engine for {}-reid'.format(cfg.loss.name,
                                                  cfg.data.type))
    engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
    engine.run(**engine_run_kwargs(cfg))
コード例 #4
0
ファイル: main.py プロジェクト: nguyenbaviet/Reid
    network, class_names, class_colors = load_network(d_config_file,
                                                      d_data_file,
                                                      d_weights,
                                                      batch_size=1)

    reid_datamanager = ImageDataManager(root='dataset',
                                        sources=['ghtk_dataset'],
                                        height=256,
                                        width=128,
                                        combineall=False,
                                        train_sampler='RandomIdentitySampler')
    model = models.build_model(name='resnet50',
                               num_classes=reid_datamanager.num_train_pids,
                               loss='triplet')
    utils.load_pretrained_weights(model, r_weights)

    infer_model = Inference(datamanager=reid_datamanager, model=model)

    frame_queue = Queue(maxsize=1)
    detections_queue = Queue(maxsize=1)
    crop_queue = Queue(maxsize=1)
    ids_queue = Queue(maxsize=1)

    Thread(target=video_capture, args=(frame_queue, cap)).start()
    Thread(target=get_detections,
           args=(frame_queue, detections_queue, network, class_names,
                 class_colors, d_thresh, cap)).start()
    Thread(target=crop_image, args=(detections_queue, crop_queue, cap)).start()
    Thread(target=reid, args=(crop_queue, ids_queue, infer_model, cap)).start()
    Thread(target=draw, args=(ids_queue, cap)).start()
コード例 #5
0
ファイル: actmap.py プロジェクト: nguyenbaviet/Reid
    root='dataset',
    sources=['ghtk_dataset', 'market1501'],
    targets='ghtk_dataset',
    height=256,
    width=128,
    combineall=False,
    batch_size_train=128,
    batch_size_test=128,
    num_instances=4,
    train_sampler='RandomIdentitySampler'
)

model = models.build_model(
    name='resnet50',
    num_classes=datamanager.num_train_pids,
    loss='triplet'
)

weights = '/home/vietnb/reid/log/resnet50-triplet-ghtk-plus/model/model.pth.tar-100'

use_gpu = torch.cuda.is_available()

if use_gpu:
    model = model.cuda()
load_pretrained_weights(model, weights)

visactmap(
    model, datamanager.test_loader, 'heatmap', 128, 256, use_gpu
)

コード例 #6
0
ファイル: inference.py プロジェクト: nguyenbaviet/Reid
        distmat = metrics.compute_distance_matrix(features, self.g_features[0])
        distmat = distmat.cpu().detach().numpy()
        indices = np.argsort(distmat, axis=1)
        for i, id in enumerate(indices[:, 0]):
            ids.append(self.gallery[id][1])
            # d.append(distmat[i][id])
        return ids


if __name__ == '__main__':
    datamanager = data.ImageDataManager(root='dataset',
                                        sources='market1501',
                                        height=256,
                                        width=128,
                                        combineall=False,
                                        batch_size_train=128,
                                        batch_size_test=32,
                                        num_instances=4,
                                        train_sampler='RandomIdentitySampler')
    model = models.build_model(name='resnet50',
                               num_classes=datamanager.num_train_pids,
                               loss='triplet')
    weights = '/home/vietnb/reid/log/resnet50-triplet-market1501/model/model.pth.tar-10'
    utils.load_pretrained_weights(model, weight_path=weights)

    query = [
        '/home/vietnb/reid/1000_c1s1_003.jpg.jpg',
        '/home/vietnb/reid/1000_c1s1_003.jpg.jpg'
    ]
    infer = Inference(datamanager=datamanager, model=model)
    infer.inference(query)
コード例 #7
0
                                     stepsize=20)

my_engine = engine.ImageTripletEngine(datamanager,
                                      model,
                                      optimizer,
                                      margin=0.3,
                                      weight_t=0.7,
                                      weight_x=1,
                                      scheduler=scheduler)
# if you want to do transfer-learning after add more data to dataset, set transfer_learning to True.
# or not, if you just want to re-train model without adding data, set transfer_learning to False

path = '/home/vietnb/reid/log/resnet50-triplet-ghtk/model/model.pth.tar-1000'
start_epoch = utils.load_pretrained_weights(model,
                                            path,
                                            optimizer,
                                            scheduler,
                                            transfer_learning=False)

start_epoch = utils.resume_from_checkpoint(path, model, optimizer)

my_engine.run(save_dir='log/resnet50-triplet-ghtk',
              max_epoch=50,
              start_epoch=start_epoch,
              print_freq=20,
              eval_freq=10,
              fixbase_epoch=20,
              open_layers='classifier',
              test_only=False,
              visrank_topk=5,
              visrank=True)