Exemple #1
0
def visual_batch_eval(cam, image, length, k):
    cam = torch.stack(cam, 0).contiguous()  # torch.Size([240, 16, 8])
    image = torch.stack(image,
                        0).contiguous()  # torch.Size([240, 1, 3, 256, 128])

    cam = cam.view(30, 8, 16, -1)  # 8,8,16,8

    image = image.view(30, 8, 1, 3, 256, -1)
    path = PATH_EVAL + "fenzhi{}".format(k)
    mkdir_if_missing(path)
    for i in range(cam.size(0)):
        fig = plt.figure(figsize=(15, 15))
        for j in range(cam.size(1)):
            ax1 = plt.subplot(3, 8, j + 1)
            ax1.axis('off')
            plt.title('cam', fontsize=18)
            plt.imshow(cam[i][j].detach().cpu().numpy(), alpha=0.6, cmap='jet')

            ax3 = plt.subplot(3, 8, j + 17)
            ax3.axis('off')
            plt.title('cam+img', fontsize=18)
            cam_ij = cam[i][j].unsqueeze(0)
            cam_ij = cam_ij.unsqueeze(0)
            images_ij = image[i][j]
            heatmap, raw_image = visualize(images_ij, cam_ij)
            heatmap = heatmap.squeeze().cpu().numpy().transpose(1, 2, 0)
            plt.imshow(heatmap)

            ax4 = plt.subplot(3, 8, j + 9)
            ax4.axis('off')
            plt.title('raw_image', fontsize=18)
            raw_image = raw_image.squeeze().cpu().numpy().transpose(1, 2, 0)
            plt.imshow(raw_image)
        # fig.tight_layout()
        fig.savefig(PATH_EVAL + "fenzhi{}/cambatch_{}.jpg".format(k, i))
Exemple #2
0
    def download(self):

        if self._check_integrity():
            print("Files already downloaded and verified")
            return

        raw_dir = osp.join(self.root, 'raw')
        mkdir_if_missing(raw_dir)

        fpath1 = osp.join(raw_dir, datasetname + '.tar')
        fpath2 = osp.join(raw_dir, flowname + '.tar')
        if osp.isfile(fpath1) and osp.isfile(fpath2):
            print("Using the download file:" + fpath1 + " " + fpath2)
        else:
            print("Please firstly download the files")
            raise RuntimeError("Downloaded file missing!")
    def download(self):

        if self._check_integrity():
            print("Files already downloaded and verified")
            return

        raw_dir = osp.join(self.root, 'raw')
        mkdir_if_missing(raw_dir)

        fpath1 = osp.join(raw_dir, datasetname + '.tar')
        fpath2 = osp.join(raw_dir, flowname + '.tar')

        if osp.isfile(fpath1) and osp.isfile(fpath2):
            print("Using the download file:" + fpath1 + " " + fpath2)
        else:
            print("Please firstly download the files")
            raise RuntimeError("Downloaded file missing!")
Exemple #4
0
 def _cp_img_to(src, dst, rank, prefix):
     """
     Args:
         src: image path or tuple (for vidreid)
         dst: target directory  # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual/0016C1T0006F001.jpg'
         rank: int, denoting ranked position, starting from 1
         prefix: string (query or gallery)
     """
     if isinstance(src, tuple) or isinstance(src, list):  # video reid
         dst = osp.join(
             dst, prefix + '_top' + str(rank).zfill(3)
         )  # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual/0016C1T0006F001.jpg/query_top000'
         mkdir_if_missing(dst)
         for img_path in src:  # 将图片copy到目标文件夹中
             shutil.copy(img_path, dst)
     else:
         dst = osp.join(
             dst, prefix + '_top' + str(rank).zfill(3) + '_name_' +
             osp.basename(src))
         shutil.copy(src, dst)
Exemple #5
0
def visual_batch(cam, image, k, save_dir, mode):
    cam = cam.squeeze(
    )  # b, t, 16, 8                            torch.Size([240, 16, 8])
    b, t, h, w = cam.size(
    )  # b, t, 16, 8                            torch.Size([240, 16, 8])
    image = torch.stack(image, 0).contiguous(
    )  # [bt, 1, 3, 256, 128] torch.Size([240, 1, 3, 256, 128])
    # cam = cam.view(8, 8, *cam.size()[1:])  # 8,8,16,8

    image = image.view(b, t, 1, *image.size()[-3:])  # b, t, 1, 3, 256, 128
    path = PATH + save_dir
    mkdir_if_missing(path)
    for i in range(cam.size(0)):  # b
        fig = plt.figure(figsize=(15, 15))
        for j in range(cam.size(1)):  # t
            ax1 = plt.subplot(3, 8, j + 1)
            ax1.axis('off')
            plt.title('cam', fontsize=18)
            plt.imshow(cam[i][j].detach().cpu().numpy(), alpha=0.6, cmap='jet')

            ax3 = plt.subplot(3, 8, j + 17)
            ax3.axis('off')
            plt.title('cam+img', fontsize=18)
            cam_ij = cam[i][j].unsqueeze(0)
            cam_ij = cam_ij.unsqueeze(0)
            images_ij = image[i][j]
            heatmap, raw_image = visualize(images_ij, cam_ij)
            heatmap = heatmap.squeeze().cpu().numpy().transpose(1, 2, 0)
            plt.imshow(heatmap)

            ax4 = plt.subplot(3, 8, j + 9)
            ax4.axis('off')
            plt.title('raw_image', fontsize=18)
            raw_image = raw_image.squeeze().cpu().numpy().transpose(1, 2, 0)
            plt.imshow(raw_image)
        # fig.tight_layout()
        fig.savefig(path + "/iter_{}index_{}_{}.jpg".format(k, i, mode))
    def imgextract(self):

        raw_dir = osp.join(self.root, 'raw')
        exdir1 = osp.join(raw_dir, datasetname)
        exdir2 = osp.join(raw_dir, flowname)
        fpath1 = osp.join(raw_dir, datasetname + '.tar')
        fpath2 = osp.join(raw_dir, flowname + '.tar')

        if not osp.isdir(exdir1):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar = tarfile.open(fpath1)
            mkdir_if_missing(exdir1)
            os.chdir(exdir1)
            tar.extractall()
            tar.close()
            os.chdir(cwd)

        if not osp.isdir(exdir2):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar = tarfile.open(fpath2)
            mkdir_if_missing(exdir2)
            os.chdir(exdir2)
            tar.extractall()
            tar.close()
            os.chdir(cwd)

            # reorganzing the dataset
            # Format

            temp_images_dir = osp.join(self.root, 'temp_images')
            mkdir_if_missing(temp_images_dir)

            temp_others_dir = osp.join(self.root, 'temp_others')
            mkdir_if_missing(temp_others_dir)

            images_dir = osp.join(self.root, 'images')
            mkdir_if_missing(images_dir)

            others_dir = osp.join(self.root, 'others')
            mkdir_if_missing(others_dir)

            fpaths1 = sorted(glob(osp.join(exdir1, 'i-LIDS-VID/sequences', '*/*/*.png')))
            fpaths2 = sorted(glob(osp.join(exdir2, flowname, '*/*/*.png')))

            identities_imgraw = [[[] for _ in range(2)] for _ in range(319)]
            identities_otherraw = [[[] for _ in range(2)] for _ in range(319)]

            # image information
            for fpath in fpaths1:
                fname = osp.basename(fpath)
                fname_list = fname.split('_')
                cam_name = fname_list[0]
                pid_name = fname_list[1]
                cam = int(cam_name[-1])
                pid = int(pid_name[-3:])
                temp_fname = ('{:08d}_{:02d}_{:04d}.png'
                              .format(pid, cam, len(identities_imgraw[pid - 1][cam - 1])))
                identities_imgraw[pid - 1][cam - 1].append(temp_fname)
                shutil.copy(fpath, osp.join(temp_images_dir, temp_fname))

            identities_temp = [x for x in identities_imgraw if x != [[], []]]
            identities_images = identities_temp

            for pid in range(len(identities_temp)):
                for cam in range(2):
                    for img in range(len(identities_images[pid][cam])):
                        temp_fname = identities_temp[pid][cam][img]
                        fname = ('{:08d}_{:02d}_{:04d}.png'
                                 .format(pid, cam, img))
                        identities_images[pid][cam][img] = fname
                        shutil.copy(osp.join(temp_images_dir, temp_fname), osp.join(images_dir, fname))

            shutil.rmtree(temp_images_dir)

            # flow information

            for fpath in fpaths2:
                fname = osp.basename(fpath)
                fname_list = fname.split('_')
                cam_name = fname_list[0]
                pid_name = fname_list[1]
                cam = int(cam_name[-1])
                pid = int(pid_name[-3:])
                temp_fname = ('{:08d}_{:02d}_{:04d}.png'
                              .format(pid, cam, len(identities_otherraw[pid - 1][cam - 1])))
                identities_otherraw[pid - 1][cam - 1].append(temp_fname)
                shutil.copy(fpath, osp.join(temp_others_dir, temp_fname))

            identities_temp = [x for x in identities_otherraw if x != [[], []]]
            identities_others = identities_temp

            for pid in range(len(identities_temp)):
                for cam in range(2):
                    for img in range(len(identities_others[pid][cam])):
                        temp_fname = identities_temp[pid][cam][img]
                        fname = ('{:08d}_{:02d}_{:04d}.png'
                                 .format(pid, cam, img))
                        identities_others[pid][cam][img] = fname
                        shutil.copy(osp.join(temp_others_dir, temp_fname), osp.join(others_dir, fname))

            shutil.rmtree(temp_others_dir)

            meta = {'name': 'iLIDS-sequence', 'shot': 'sequence', 'num_cameras': 2,
                    'identities': identities_images}

            write_json(meta,  osp.join(self.root, 'meta.json'))

            # Consider fixed training and testing split
            splitmat_name = osp.join(exdir1, 'train-test people splits', 'train_test_splits_ilidsvid.mat')
            data = sio.loadmat(splitmat_name)
            person_list = data['ls_set']
            num = len(identities_images)
            splits = []

            for i in range(10):
                pids = (person_list[i] - 1).tolist()
                trainval_pids = sorted(pids[:num // 2])
                test_pids = sorted(pids[num // 2:])
                split = {'trainval': trainval_pids,
                         'query': test_pids,
                         'gallery': test_pids}
                splits.append(split)
            write_json(splits, osp.join(self.root, 'splits.json'))
Exemple #7
0
def main():
    global best_prec
    global opt

    if opt['id'] != '':
        model_id = opt['id']
    else:
        model_id = time.strftime("%m_%d_%H-%M-%S")
    sys.stdout = Logger(osp.join(opt['log_dir'], 'log.' + model_id + '.txt'))

    # initialize
    checkpoint_dir = osp.join(opt['checkpoint_dir'], model_id)
    mkdir_if_missing(checkpoint_dir)

    # check gpu
    assert opt['gpus'] is not None

    # set random seed
    cudnn.benchmark = False
    cudnn.deterministic = True
    random.seed(opt['seed'])
    np.random.seed(opt['seed'])
    torch.manual_seed(opt['seed'])
    torch.cuda.manual_seed_all(opt['seed'])

    # load imdb
    train_refdb = get_db('refvg_train_' + opt['model_method'])
    vocab = train_refdb.load_dictionary()
    opt['vocab_size'] = len(vocab)
    val_refdb = get_db('refvg_val_' + opt['model_method'])

    # model, criterion, optimizer
    model = SGReason(opt)
    model = torch.nn.DataParallel(model).cuda()
    criterion = SoftmaxLoss().cuda()

    optimizer = torch.optim.Adam(list(model.parameters()) +
                                 list(criterion.parameters()),
                                 lr=opt['learning_rate'],
                                 betas=(opt['optim_alpha'], opt['optim_beta']),
                                 eps=opt['optim_epsilon'])

    scheduler = ReduceLROnPlateau(optimizer,
                                  factor=0.1,
                                  patience=3,
                                  mode='max')

    if opt['evaluate']:
        if osp.isfile(opt['model']):
            model, criterion = load_checkpoint(model, criterion, opt['model'])
            test_refdb = get_db('refvg_test_' + opt['model_method'])
            test_dataset = RefDataset(test_refdb, vocab, opt)
            test_loader = torch.utils.data.DataLoader(
                test_dataset,
                batch_size=opt['batch_size'],
                shuffle=False,
                num_workers=opt['workers'],
                pin_memory=True)
            test_loss, test_prec = validate(test_loader, model, criterion)
            print(test_loss, test_prec)
        else:
            print("=> no checkpoint found at '{}'".format(opt['model']))
        return

    # start training
    epoch_cur = 0
    train_dataset = RefDataset(train_refdb, vocab, opt)
    val_dataset = RefDataset(val_refdb, vocab, opt)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt['batch_size'],
                                               shuffle=True,
                                               num_workers=opt['workers'],
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=opt['batch_size'],
                                             shuffle=False,
                                             num_workers=opt['workers'],
                                             pin_memory=True)

    for epoch in range(epoch_cur, opt['max_epochs']):
        train(train_loader, model, criterion, optimizer, epoch)
        val_loss, prec = validate(val_loader, model, criterion, epoch)
        scheduler.step(prec)
        for i, param_group in enumerate(optimizer.param_groups):
            print(float(param_group['lr']))

        is_best = prec >= best_prec
        best_prec = max(best_prec, prec)
        save_checkpoint(
            {
                'model_state_dict': model.state_dict(),
                'crit_state_dict': criterion.state_dict(),
                'optimizer': optimizer.state_dict()
            }, is_best, checkpoint_dir, str(epoch))
Exemple #8
0
def visualize_ranked_results(distmat,
                             queryloader,
                             galleryloader,
                             save_dir='',
                             visual_id=2,
                             topk=10):
    """Visualizes ranked results. 存放在一个文件夹中

    Supports both image-reid and video-reid.

    Args:
        distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).
        queryloader (tuple): tuples of (img_path(s), pid, camid).
        galleryloader (tuple): tuples of (img_path(s), pid, camid).
        save_dir (str): directory to save output images.
        visual_id(int, optional): only show 1 id
        topk (int, optional): denoting top-k images in the rank list to be visualized.
    """
    num_q, num_g = distmat.shape

    print('Visualizing top-{} ranks'.format(topk))
    print('# query: {}\n# gallery {}'.format(num_q, num_g))
    print('Saving images to "{}"'.format(save_dir))

    query = queryloader  # 1980个tuple   (img_path(s), pid, camid))
    gallery = galleryloader  # 9330个tuple (img_path(s), pid, camid)
    assert num_q == len(query)
    assert num_g == len(gallery)

    indices = np.argsort(distmat, axis=1)  # <class 'tuple'>: (1980, 9330)
    mkdir_if_missing(
        save_dir
    )  # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual'

    def _cp_img_to(src, dst, rank, prefix):
        """
        Args:
            src: image path or tuple (for vidreid)
            dst: target directory  # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual/0016C1T0006F001.jpg'
            rank: int, denoting ranked position, starting from 1
            prefix: string (query or gallery)
        """
        if isinstance(src, tuple) or isinstance(src, list):  # video reid
            dst = osp.join(
                dst, prefix + '_top' + str(rank).zfill(3)
            )  # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual/0016C1T0006F001.jpg/query_top000'
            mkdir_if_missing(dst)
            for img_path in src:  # 将图片copy到目标文件夹中
                shutil.copy(img_path, dst)
        else:
            dst = osp.join(
                dst, prefix + '_top' + str(rank).zfill(3) + '_name_' +
                osp.basename(src))
            shutil.copy(src, dst)

    for q_idx in range(
            num_q):  # 考虑到速度等因素,只输出1个id的rank结果。这个id不是实际的行人id,是在tuple中的顺序
        if q_idx == visual_id:  # 14
            qimg_path, qpid, qcamid = query[q_idx]  # qpid = 16, camid = 0

            if isinstance(qimg_path, tuple) or isinstance(
                    qimg_path,
                    list):  # query_dir 保存Rank结果的文件夹名称 = query的第一张图片名称
                qdir = osp.join(
                    save_dir, osp.basename(qimg_path[0])
                )  # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual/0016C1T0006F001.jpg'
            else:
                qdir = osp.join(save_dir, osp.basename(qimg_path))
            mkdir_if_missing(qdir)  # 新建这个保存rank结果的文件夹
            _cp_img_to(qimg_path, qdir, rank=0,
                       prefix='query')  # 复制query的图片到结果文件夹中

            rank_idx = 1
            for g_idx in indices[q_idx, :]:  # 3291, 3288, 3289, 3290, 3293
                gimg_path, gpid, gcamid = gallery[g_idx]
                invalid = (qpid == gpid) & (qcamid == gcamid
                                            )  # true, 排除相同cam的情况
                if not invalid:
                    _cp_img_to(gimg_path,
                               qdir,
                               rank=rank_idx,
                               prefix='gallery')
                    rank_idx += 1
                    if rank_idx > topk:
                        break
    print("Done")
Exemple #9
0
def visualize_in_pic(distmat,
                     queryloader,
                     galleryloader,
                     save_dir='',
                     visual_id=2,
                     topk=9):
    """

        distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).
        queryloader (tuple): tuples of (img_path(s), pid, camid).
        galleryloader (tuple): tuples of (img_path(s), pid, camid).
        save_dir (str): directory to save output images.
        visual_id(int, optional): only show 1 id
        topk (int, optional): denoting top-k images in the rank list to be visualized.
    """
    num_q, num_g = distmat.shape

    print('Visualizing top-{} ranks'.format(topk + 1))
    print('# query: {}\n# gallery {}'.format(num_q, num_g))
    print('Saving images to "{}"'.format(save_dir))

    query = queryloader  # 1980个tuple   (img_path(s), pid, camid))
    gallery = galleryloader  # 9330个tuple (img_path(s), pid, camid)
    assert num_q == len(query)
    assert num_g == len(gallery)

    indices = np.argsort(distmat, axis=1)  # <class 'tuple'>: (1980, 9330)
    mkdir_if_missing(
        save_dir
    )  # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual'

    def imshow(path, title=None):
        """Imshow for Tensor."""
        im = plt.imread(path)
        plt.imshow(im)
        if title is not None:
            plt.title(title)
        # plt.pause(0.001)  # pause a bit so that plots are updated

    flag = 0
    for q_idx in range(num_q):  # 考虑到速度等因素,只输出1个id的rank结果。2,4,6,8,10..
        qimg_path, qpid, qcamid = query[q_idx]  # qpid = 16, camid = 0

        if qpid == visual_id:  # 14
            flag = 1
            fig = plt.figure(figsize=(25, 8))
            ax = plt.subplot(1, 11, 1)
            ax.axis('off')
            imshow(qimg_path[0], 'query, pid:{}'.format(qpid))

            rank_idx = 0
            for g_idx in indices[q_idx, :]:  # 3291, 3288, 3289, 3290, 3293
                gimg_path, gpid, gcamid = gallery[g_idx]
                # invalid = (qpid == gpid) & (qcamid == gcamid)  # true, 排除相同cam的情况
                invalid = False
                if not invalid:
                    rank_idx += 1
                    ax = plt.subplot(1, 11, rank_idx + 1)
                    ax.axis('off')
                    imshow(gimg_path[0])
                    if qpid == gpid:
                        ax.set_title('rank:{},pid{}_{}'.format(
                            rank_idx, gpid, gcamid),
                                     color='green')
                    else:
                        ax.set_title('rank:{},pid{}_{}'.format(
                            rank_idx, gpid, gcamid),
                                     color='red')

                    if rank_idx > topk:
                        break
            fig.savefig("show_{}_{}.png".format(qpid, qcamid))
            break
    if flag == 1:
        print("Done")
    else:
        print("No matched person in query_dataset, try another id")
    def imgextract(self):

        raw_dir = osp.join(self.root, 'raw')
        # raw_dir = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw
        exdir1 = osp.join(raw_dir, datasetname)
        # exdir1 = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw/prid_2011
        exdir2 = osp.join(raw_dir, flowname)
        # exdir2 = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw/prid2011flow
        fpath1 = osp.join(raw_dir, datasetname + '.tar')
        # fpath1 = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw/prid_2011.tar
        fpath2 = osp.join(raw_dir, flowname + '.tar')
        # fpath2 = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw/prid2011flow.tar

        if not osp.isdir(exdir1):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar_ref = tarfile.open(fpath1)
            mkdir_if_missing(exdir1)
            os.chdir(exdir1)
            tar_ref.extractall()
            tar_ref.close()
            os.chdir(cwd)

        if not osp.isdir(exdir2):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar_ref = tarfile.open(fpath2)
            mkdir_if_missing(exdir2)
            os.chdir(exdir2)
            tar_ref.extractall()
            tar_ref.close()
            os.chdir(cwd)

        # recognizing the dataset
        # Format
        temp_images_dir = osp.join(self.root, 'temp_images')
        mkdir_if_missing(temp_images_dir)

        temp_others_dir = osp.join(self.root, 'temp_others')
        mkdir_if_missing(temp_others_dir)

        images_dir = osp.join(self.root, 'images')
        mkdir_if_missing(images_dir)
        # images_dir = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/images

        others_dir = osp.join(self.root, 'others')
        mkdir_if_missing(others_dir)
        # others_dir = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/others

        fpaths1 = sorted(glob(osp.join(exdir1, 'prid_2011/multi_shot', '*/*/*.png')))
        fpaths2 = sorted(glob(osp.join(exdir2, 'prid2011flow', '*/*/*.png')))

        identities_imgraw = [[[] for _ in range(2)] for _ in range(200)]
        identities_otherraw = [[[] for _ in range(2)] for _ in range(200)]

        for fpath in fpaths1:
            fname = fpath
            fname_list = fname.split('/')
            cam_name = fname_list[-3]  # cam_a  / cam_b
            pid_name = fname_list[-2]  # person_001
            frame_name = fname_list[-1]  # 0001.png
            cam_id = 1 if cam_name == 'cam_a' else 2  # cam_id = 1 / 2
            pid_id = int(pid_name.split('_')[-1])  # pid_id = 001
            if pid_id > 200:
                continue
            frame_id = int(frame_name.split('.')[-2])  # frame_id = 0001
            temp_fname = ('{:08d}_{:02d}_{:04d}.png'
                          .format(pid_id-1, cam_id-1, frame_id-1))
            identities_imgraw[pid_id - 1][cam_id - 1].append(temp_fname)
            shutil.copy(fpath, osp.join(temp_images_dir, temp_fname))

        identities_temp = [x for x in identities_imgraw if x != [[], []]]
        identities_images = identities_temp

        for pid in range(len(identities_temp)):
            for cam in range(2):
                for img in range(len(identities_images[pid][cam])):
                    temp_fname = identities_temp[pid][cam][img]
                    fname = ('{:08d}_{:02d}_{:04d}.png'.format(pid, cam, img))
                    identities_images[pid][cam][img] = fname
                    shutil.copy(osp.join(temp_images_dir, temp_fname), osp.join(images_dir, fname))

        shutil.rmtree(temp_images_dir)

        for fpath in fpaths2:
            fname = fpath
            fname_list = fname.split('/')
            cam_name = fname_list[-3]  # cam_a  / cam_b
            pid_name = fname_list[-2]  # person_001
            frame_name = fname_list[-1]  # 0001.png
            cam_id = 1 if cam_name == 'cam_a' else 2  # cam_id = 1 / 2
            pid_id = int(pid_name.split('_')[-1])  # pid_id = 001
            if pid_id > 200:
                continue
            frame_id = int(frame_name.split('.')[-2])  # frame_id = 0001
            temp_fname = ('{:08d}_{:02d}_{:04d}.png'
                          .format(pid_id-1, cam_id-1, frame_id-1))
            identities_otherraw[pid_id - 1][cam_id - 1].append(temp_fname)
            shutil.copy(fpath, osp.join(temp_others_dir, temp_fname))

        identities_temp = [x for x in identities_otherraw if x != [[], []]]
        identities_others = identities_temp

        for pid in range(len(identities_temp)):
            for cam in range(2):
                for img in range(len(identities_others[pid][cam])):
                    temp_fname = identities_temp[pid][cam][img]
                    fname = ('{:08d}_{:02d}_{:04d}.png'.format(pid, cam, img))
                    identities_images[pid][cam][img] = fname
                    shutil.copy(osp.join(temp_others_dir, temp_fname), osp.join(others_dir, fname))

        shutil.rmtree(temp_others_dir)

        meta = {'name': 'prid-sequence', 'shot': 'sequence', 'num_cameras': 2,
                'identities': identities_images}

        write_json(meta, osp.join(self.root, 'meta.json'))
        # Consider fixed training and testing split
        num = 200
        splits = []
        for i in range(10):
            pids = np.random.permutation(num)
            pids = (pids - 1).tolist()
            trainval_pids = pids[:num // 2]
            test_pids = pids[num // 2:]
            split = {'trainval': trainval_pids,
                     'query': test_pids,
                     'gallery': test_pids}

            splits.append(split)
        write_json(splits, osp.join(self.root, 'splits.json'))
Exemple #11
0
    def imgextract(self):

        raw_dir = osp.join(self.root, 'raw')
        # raw_dir = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw
        exdir1 = osp.join(raw_dir, datasetname)
        # exdir1 = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw/prid_2011
        exdir2 = osp.join(raw_dir, flowname)
        # exdir2 = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw/prid2011flow
        fpath1 = osp.join(raw_dir, datasetname + '.tar')
        # fpath1 = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw/prid_2011.tar
        fpath2 = osp.join(raw_dir, flowname + '.tar')
        # fpath2 = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/raw/prid2011flow.tar

        if not osp.isdir(exdir1):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar_ref = tarfile.open(fpath1)
            mkdir_if_missing(exdir1)
            os.chdir(exdir1)
            tar_ref.extractall()
            tar_ref.close()
            os.chdir(cwd)

        if not osp.isdir(exdir2):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar_ref = tarfile.open(fpath2)
            mkdir_if_missing(exdir2)
            os.chdir(exdir2)
            tar_ref.extractall()
            tar_ref.close()
            os.chdir(cwd)

        # recognizing the dataset
        # Format
        temp_images_dir = osp.join(self.root, 'temp_images')
        mkdir_if_missing(temp_images_dir)

        temp_others_dir = osp.join(self.root, 'temp_others')
        mkdir_if_missing(temp_others_dir)

        images_dir = osp.join(self.root, 'images')
        mkdir_if_missing(images_dir)
        # images_dir = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/images

        others_dir = osp.join(self.root, 'others')
        mkdir_if_missing(others_dir)
        # others_dir = /media/ying/0BDD17830BDD1783/video_reid _prid/data/prid2011sequence/others

        fpaths1 = sorted(
            glob(osp.join(exdir1, 'prid_2011/multi_shot',
                          '*/*/*.png')))  # 存放所有图片的绝对路径
        fpaths2 = sorted(glob(osp.join(exdir2, 'prid2011flow', '*/*/*.png')))

        identities_imgraw = [[[] for _ in range(2)]
                             for _ in range(200)]  # 200个[ []..[] ]
        identities_otherraw = [[[] for _ in range(2)] for _ in range(200)]

        for fpath in fpaths1:
            fname = fpath
            fname_list = fname.split('/')
            cam_name = fname_list[-3]  # cam_a  / cam_b
            pid_name = fname_list[-2]  # person_001
            frame_name = fname_list[-1]  # 0001.png
            cam_id = 1 if cam_name == 'cam_a' else 2  # cam_id = 1 / 2
            pid_id = int(pid_name.split('_')[-1])  # pid_id = 001
            if pid_id > 200:
                continue
            frame_id = int(frame_name.split('.')[-2])  # frame_id = 0001
            temp_fname = ('{:08d}_{:02d}_{:04d}.png'.format(
                pid_id - 1, cam_id - 1, frame_id - 1))
            identities_imgraw[pid_id - 1][cam_id - 1].append(temp_fname)
            shutil.copy(fpath, osp.join(temp_images_dir, temp_fname))

        identities_temp = [x for x in identities_imgraw if x != [[], []]]
        identities_images = identities_temp

        for pid in range(len(identities_temp)):
            for cam in range(2):
                for img in range(len(identities_images[pid][cam])):
                    temp_fname = identities_temp[pid][cam][img]
                    fname = ('{:08d}_{:02d}_{:04d}.png'.format(pid, cam, img))
                    identities_images[pid][cam][img] = fname
                    shutil.copy(osp.join(temp_images_dir, temp_fname),
                                osp.join(images_dir, fname))

        shutil.rmtree(temp_images_dir)

        for fpath in fpaths2:
            fname = fpath
            fname_list = fname.split('/')
            cam_name = fname_list[-3]  # cam_a  / cam_b
            pid_name = fname_list[-2]  # person_001
            frame_name = fname_list[-1]  # 0001.png
            cam_id = 1 if cam_name == 'cam_a' else 2  # cam_id = 1 / 2
            pid_id = int(pid_name.split('_')[-1])  # pid_id = 001
            if pid_id > 200:
                continue
            frame_id = int(frame_name.split('.')[-2])  # frame_id = 0001
            temp_fname = ('{:08d}_{:02d}_{:04d}.png'.format(
                pid_id - 1, cam_id - 1, frame_id - 1))
            identities_otherraw[pid_id - 1][cam_id - 1].append(temp_fname)
            shutil.copy(fpath, osp.join(temp_others_dir, temp_fname))

        identities_temp = [x for x in identities_otherraw if x != [[], []]]
        identities_others = identities_temp

        for pid in range(len(identities_temp)):
            for cam in range(2):
                for img in range(len(identities_others[pid][cam])):
                    temp_fname = identities_temp[pid][cam][img]
                    fname = ('{:08d}_{:02d}_{:04d}.png'.format(pid, cam, img))
                    identities_images[pid][cam][img] = fname
                    shutil.copy(osp.join(temp_others_dir, temp_fname),
                                osp.join(others_dir, fname))

        shutil.rmtree(temp_others_dir)

        meta = {
            'name': 'prid-sequence',
            'shot': 'sequence',
            'num_cameras': 2,
            'identities': identities_images
        }

        write_json(meta, osp.join(self.root, 'meta.json'))
        # Consider fixed training and testing split
        num = len(identities_images)
        splits = []
        for i in range(20):
            pids = np.random.permutation(num)
            pids = (pids - 1).tolist()
            trainval_pids = pids[:num // 2]
            test_pids = pids[num // 2:]
            split = {
                'trainval': trainval_pids,
                'query': test_pids,
                'gallery': test_pids
            }

            splits.append(split)
        write_json(splits, osp.join(self.root, 'splits.json'))
Exemple #12
0
    def imgextract(self):

        raw_dir = osp.join(self.root, 'raw')
        exdir1 = osp.join(raw_dir, datasetname)
        exdir2 = osp.join(raw_dir, flowname)
        fpath1 = osp.join(raw_dir, datasetname + '.tar')
        fpath2 = osp.join(raw_dir, flowname + '.tar')

        if not osp.isdir(exdir1):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar = tarfile.open(fpath1)
            mkdir_if_missing(exdir1)
            os.chdir(exdir1)
            tar.extractall()
            tar.close()
            os.chdir(cwd)

        if not osp.isdir(exdir2):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar = tarfile.open(fpath2)
            mkdir_if_missing(exdir2)
            os.chdir(exdir2)
            tar.extractall()
            tar.close()
            os.chdir(cwd)

            # reorganzing the dataset
            # Format

            temp_images_dir = osp.join(self.root, 'temp_images')
            mkdir_if_missing(temp_images_dir)

            temp_others_dir = osp.join(self.root, 'temp_others')
            mkdir_if_missing(temp_others_dir)

            images_dir = osp.join(self.root, 'images')
            mkdir_if_missing(images_dir)

            others_dir = osp.join(self.root, 'others')
            mkdir_if_missing(others_dir)

            fpaths1 = sorted(
                glob(osp.join(exdir1, 'i-LIDS-VID/sequences', '*/*/*.png')))
            fpaths2 = sorted(glob(osp.join(exdir2, flowname, '*/*/*.png')))

            identities_imgraw = [[[] for _ in range(2)] for _ in range(319)]
            identities_otherraw = [[[] for _ in range(2)] for _ in range(319)]

            # image information
            for fpath in fpaths1:
                fname = osp.basename(fpath)
                fname_list = fname.split('_')
                cam_name = fname_list[0]
                pid_name = fname_list[1]
                cam = int(cam_name[-1])
                pid = int(pid_name[-3:])
                temp_fname = ('{:08d}_{:02d}_{:04d}.png'.format(
                    pid, cam, len(identities_imgraw[pid - 1][cam - 1])))
                identities_imgraw[pid - 1][cam - 1].append(temp_fname)
                shutil.copy(fpath, osp.join(temp_images_dir, temp_fname))

            identities_temp = [x for x in identities_imgraw if x != [[], []]]
            identities_images = identities_temp

            for pid in range(len(identities_temp)):
                for cam in range(2):
                    for img in range(len(identities_images[pid][cam])):
                        temp_fname = identities_temp[pid][cam][img]
                        fname = ('{:08d}_{:02d}_{:04d}.png'.format(
                            pid, cam, img))
                        identities_images[pid][cam][img] = fname
                        shutil.copy(osp.join(temp_images_dir, temp_fname),
                                    osp.join(images_dir, fname))

            shutil.rmtree(temp_images_dir)

            # flow information

            for fpath in fpaths2:
                fname = osp.basename(fpath)
                fname_list = fname.split('_')
                cam_name = fname_list[0]
                pid_name = fname_list[1]
                cam = int(cam_name[-1])
                pid = int(pid_name[-3:])
                temp_fname = ('{:08d}_{:02d}_{:04d}.png'.format(
                    pid, cam, len(identities_otherraw[pid - 1][cam - 1])))
                identities_otherraw[pid - 1][cam - 1].append(temp_fname)
                shutil.copy(fpath, osp.join(temp_others_dir, temp_fname))

            identities_temp = [x for x in identities_otherraw if x != [[], []]]
            identities_others = identities_temp

            for pid in range(len(identities_temp)):
                for cam in range(2):
                    for img in range(len(identities_others[pid][cam])):
                        temp_fname = identities_temp[pid][cam][img]
                        fname = ('{:08d}_{:02d}_{:04d}.png'.format(
                            pid, cam, img))
                        identities_others[pid][cam][img] = fname
                        shutil.copy(osp.join(temp_others_dir, temp_fname),
                                    osp.join(others_dir, fname))

            shutil.rmtree(temp_others_dir)

            meta = {
                'name': 'iLIDS-sequence',
                'shot': 'sequence',
                'num_cameras': 2,
                'identities': identities_images
            }

            write_json(meta, osp.join(self.root, 'meta.json'))

            # Consider fixed training and testing split
            splitmat_name = osp.join(exdir1, 'train-test people splits',
                                     'train_test_splits_ilidsvid.mat')
            data = sio.loadmat(splitmat_name)
            person_list = data['ls_set']
            num = len(identities_images)
            splits = []

            for i in range(10):
                pids = (person_list[i] - 1).tolist()
                trainval_pids = sorted(pids[:num // 2])
                test_pids = sorted(pids[num // 2:])
                split = {
                    'trainval': trainval_pids,
                    'query': test_pids,
                    'gallery': test_pids
                }
                splits.append(split)
            write_json(splits, osp.join(self.root, 'splits.json'))
Exemple #13
0
    def imgextract(self):

        raw_dir = osp.join(self.root, 'raw')
        exdir1 = osp.join(raw_dir, datasetname)
        exdir2 = osp.join(raw_dir, flowname)
        fpath1 = osp.join(raw_dir, datasetname + '.zip')
        fpath2 = osp.join(raw_dir, flowname + '.tar')

        if not osp.isdir(exdir1):
            print("Extracting tar file")
            cwd = os.getcwd()
            zip_ref = zipfile.ZipFile(fpath1, 'r')
            mkdir_if_missing(exdir1)
            zip_ref.extractall(exdir1)
            zip_ref.close()
            os.chdir(cwd)

        if not osp.isdir(exdir2):
            print("Extracting tar file")
            cwd = os.getcwd()
            tar_ref = tarfile.open(fpath2)
            mkdir_if_missing(exdir2)
            os.chdir(exdir2)
            tar_ref.extractall()
            tar_ref.close()
            os.chdir(cwd)

        ## recognizing the dataset
        # Format

        images_dir = osp.join(self.root, 'images')
        mkdir_if_missing(images_dir)

        others_dir = osp.join(self.root, 'others')
        mkdir_if_missing(others_dir)

        fpaths1 = sorted(glob(osp.join(exdir1, 'multi_shot', '*/*/*.png')))
        fpaths2 = sorted(glob(osp.join(exdir2, '*/*.png')))

        identities_images = [[[] for _ in range(2)] for _ in range(200)]

        for fpath in fpaths1:
            fname = fpath
            fname_list = fname.split('/')
            cam_name = fname_list[-3]
            pid_name = fname_list[-2]
            frame_name = fname_list[-1]
            cam_id = 1 if cam_name =='cam_a' else 2
            pid_id = int(pid_name.split('_')[-1])
            if pid_id > 200:
                continue
            frame_id = int(frame_name.split('.')[-2])
            imagefname = ('{:08d}_{:02d}_{:04d}.png'
                          .format(pid_id-1, cam_id-1, frame_id-1))
            identities_images[pid_id - 1][cam_id - 1].append(imagefname)
            shutil.copy(fpath, osp.join(images_dir, imagefname))

        for fpath in fpaths2:
            fname = fpath
            fname_list = fname.split('/')
            fname_img = fname_list[-1]
            shutil.copy(fname, osp.join(others_dir, fname_img))




        meta = {'name': 'iLIDS-sequence', 'shot': 'sequence', 'num_cameras': 2,
                'identities': identities_images}

        write_json(meta, osp.join(self.root, 'meta.json'))
        # Consider fixed training and testing split
        num = 200
        splits = []
        for i in range(10):
            pids = np.random.permutation(num)
            pids = (pids -1).tolist()
            trainval_pids = pids[:num // 2]
            test_pids = pids[num // 2:]
            split = {'trainval': trainval_pids,
                     'query': test_pids,
                     'gallery': test_pids}

            splits.append(split)
        write_json(splits, osp.join(self.root, 'splits.json'))
Exemple #14
0
def main(args):
    if not torch.cuda.is_available():
        args.cpu_only = True
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if not args.cpu_only:
        torch.cuda.manual_seed_all(args.seed)
        cudnn.benchmark = True

    # Logs directory
    mkdir_if_missing(args.logs_dir)
    if not args.eval_only:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Data
    train_dataset, test_dataset, num_classes = get_datasets(
        args.dataset, args.data_dir)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=args.workers,
                              shuffle=True,
                              pin_memory=True)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             num_workers=args.workers,
                             shuffle=False,
                             pin_memory=True)

    # Model
    model = WideResNet(args.depth,
                       args.width,
                       num_classes,
                       dropout_rate=args.dropout)
    criterion = nn.CrossEntropyLoss()

    start_epoch, best_prec1 = 0, 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['model'])
        start_epoch = checkpoint['epoch'] + 1
        best_prec1 = checkpoint['best_prec1']
        print("=> Load from {}, start epoch {}, best prec1 {:.2%}".format(
            args.resume, start_epoch, best_prec1))

    if not args.cpu_only:
        model = DataParallel(model).cuda()
        criterion = criterion.cuda()

    # Optimizer
    if args.optim_method == 'sgd':
        optimizer = SGD(model.parameters(),
                        lr=args.lr,
                        nesterov=True,
                        momentum=0.9,
                        weight_decay=args.weight_decay)
    else:
        optimizer = Adam(model.parameters(), lr=args.lr)

    # Evaluation only
    if args.eval_only:
        evaluate(start_epoch - 1, test_loader, model, criterion, args.cpu_only)
        return

    # Training
    epoch_steps = json.loads(args.epoch_steps)[::-1]
    for epoch in range(start_epoch, args.epochs):
        # Adjust learning rate
        power = 0
        for i, step in enumerate(epoch_steps):
            if epoch >= step:
                power = len(epoch_steps) - i
        lr = args.lr * (args.lr_decay_ratio**power)
        for g in optimizer.param_groups:
            g['lr'] = lr

        # Training
        train(epoch, train_loader, model, criterion, optimizer, args.cpu_only)
        prec1 = evaluate(epoch, test_loader, model, criterion, args.cpu_only)
        is_best = prec1 > best_prec1
        best_prec1 = max(best_prec1, prec1)

        # Save checkpoint
        checkpoint = {'epoch': epoch, 'best_prec1': best_prec1}
        if args.cpu_only:
            checkpoint['model'] = model.state_dict()
        else:
            checkpoint['model'] = model.module.state_dict()
        save_checkpoint(checkpoint, is_best,
                        osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {}  Prec1: {:.2%}  Best: {:.2%}{}\n'.format(
            epoch, prec1, best_prec1, ' *' if is_best else ''))
Exemple #15
0
        diff = np.absolute(img2 - img1)

    # diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
    diff = cv2.normalize(diff, None, mmin, mmax, norm_type=cv2.NORM_MINMAX)
    diff = cv2.resize(diff, (240, 240))
    diff = np.stack((diff, ) * 3, axis=-1)
    cv2.imwrite(diff_path + '/diff.png', diff)

    return is_align


if __name__ == '__main__':
    working_dir = osp.dirname(osp.abspath(__file__))
    data_dir = "data"
    dataset = 'GTOS_256'
    dataset_dir = osp.join(data_dir, dataset)
    img_dir = osp.join(dataset_dir, 'images')
    diff_dir = osp.join(dataset_dir, 'diff')
    mkdir_if_missing(diff_dir)
    cnt = 0
    for idx, scene in enumerate(sorted(os.listdir(img_dir))):
        img_scene_dir = osp.join(img_dir, scene)
        if not osp.isdir(img_scene_dir):
            continue
        diff_scene_dir = osp.join(diff_dir, scene)
        mkdir_if_missing(diff_scene_dir)
        if not make_diff(img_scene_dir, diff_scene_dir):
            cnt += 1

    print(cnt, " images cannot be successfully aligned")