Esempio n. 1
0
 def _cp_img_to(src, dst, rank, prefix):
     """
     Args:
         src: image path or tuple (for vidreid)
         dst: target directory
         rank: int, denoting ranked position, starting from 1
         prefix: string
     """
     if isinstance(src, tuple) or isinstance(src, list):
         dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
         make_dirs(dst)
         for img_path in src:
             shutil.copy(img_path, dst)
     else:
         dst = osp.join(
             dst, prefix + '_top' + str(rank).zfill(3) + '_name_' +
             osp.basename(src))
         shutil.copy(src, dst)
Esempio n. 2
0
    def __init__(self, config, loader):

        self.config = config
        self.loader = loader
        # Model Config
        self.mode = config.mode
        self.cnnbackbone = config.cnnbackbone
        self.pid_num = config.pid_num
        self.t_margin = config.t_margin
        # Logger Configuration
        self.max_save_model_num = config.max_save_model_num
        self.output_path = config.output_path
        self.output_dirs_dict = {
            'logs': os.path.join(self.output_path, 'logs/'),
            'models': os.path.join(self.output_path, 'models/'),
            'images': os.path.join(self.output_path, 'images/'),
            'features': os.path.join(self.output_path, 'features/')
        }

        # make directions
        for current_dir in self.output_dirs_dict.values():
            make_dirs(current_dir)
        # Train Configuration

        # resume_train_dir
        self.resume_train_dir = config.resume_train_dir

        # init
        self._init_device()
        self._init_model()
        self._init_criterion()
        self._init_optimizer()
        if config.output_featuremaps:
            self._init_fixed_values()
        if config.fp_16:
            self.amp = amp
            self.model_list = list(self.model_dict.values())
            self.optimizer_list = list(self.optimizer_dict.values())
            self.model_list, self.optimizer_list = self.amp.initialize(
                self.model_list, self.optimizer_list, opt_level="O1")
        else:
            self.amp = None
            self.model_list = None
            self.optimizer_list = None
Esempio n. 3
0
def visualize_ranked_results2(distmat, dataset, save_dir='', topk=20):
    """Visualizes ranked results.

    Supports both image-reid and video-reid.

    Args:
        distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).
        dataset (tuple): a 2-tuple containing (query, gallery), each of which contains
            tuples of (img_path(s), pid, camid).
        save_dir (str): directory to save output images.
        topk (int, optional): denoting top-k images in the rank list to be visualized.
    """
    num_q, num_g = distmat.shape

    print('Visualizing top-{} ranks'.format(topk))
    print('# query: {}\n# gallery {}'.format(num_q, num_g))
    print('Saving images to "{}"'.format(save_dir))

    query, gallery = dataset
    assert num_q == len(query)
    assert num_g == len(gallery)

    indices = np.argsort(distmat, axis=1)
    make_dirs(save_dir)

    def _cp_img_to(src, dst, rank, prefix):
        """
        Args:
            src: image path or tuple (for vidreid)
            dst: target directory
            rank: int, denoting ranked position, starting from 1
            prefix: string
        """
        if isinstance(src, tuple) or isinstance(src, list):
            dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
            make_dirs(dst)
            for img_path in src:
                shutil.copy(img_path, dst)
        else:
            dst = osp.join(
                dst, prefix + '_top' + str(rank).zfill(3) + '_name_' +
                osp.basename(src))
            shutil.copy(src, dst)

    for q_idx in range(num_q):
        qimg_path, qpid, qcamid = query[q_idx]
        if isinstance(qimg_path, tuple) or isinstance(qimg_path, list):
            qdir = osp.join(save_dir, osp.basename(qimg_path[0]))
        else:
            qdir = osp.join(save_dir, osp.basename(qimg_path))
        make_dirs(qdir)
        _cp_img_to(qimg_path, qdir, rank=0, prefix='query')

        rank_idx = 1
        for g_idx in indices[q_idx, :]:
            gimg_path, gpid, gcamid = gallery[g_idx]
            invalid = (qpid == gpid) & (qcamid == gcamid)
            if not invalid:
                _cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery')
                rank_idx += 1
                if rank_idx > topk:
                    break

    print("Done")
Esempio n. 4
0
def visualize_ranked_results(distmat,
                             dataset,
                             save_dir='',
                             topk=20,
                             sort='descend',
                             mode='inter-camera',
                             only_show=None):
    """Visualizes ranked results.
    Args:
        dismat (numpy.ndarray): distance matrix of shape (nq, ng)
        dataset (tupple): a 2-tuple including (query,gallery), each of which contains
            tuples of (img_paths, pids, camids)
        save_dir (str): directory to save output images.
        topk (int, optional): denoting top-k images in the rank list to be visualized.
        sort (string): ascend means small value is similar, otherwise descend
        mode (string): intra-camera/inter-camera/all
            intra-camera only visualize results in the same camera with the query
            inter-camera only visualize results in the different camera with the query
            all visualize all results
    """
    num_q, num_g = distmat.shape

    print('Visualizing top-{} ranks'.format(topk))
    print('# query: {}\n# gallery {}'.format(num_q, num_g))
    print('Saving images to "{}"'.format(save_dir))

    query, gallery = dataset
    assert num_q == len(query)
    assert num_g == len(gallery)
    assert sort in ['descend', 'ascend']
    assert mode in ['intra-camera', 'inter-camera', 'all']

    if sort is 'ascend':
        indices = np.argsort(distmat, axis=1)
    elif sort is 'descend':
        indices = np.argsort(distmat, axis=1)[:, ::-1]

    make_dirs(save_dir)

    def cat_imgs_to(image_list, hit_list, text_list, target_dir):

        images = []
        for img, hit, text in zip(image_list, hit_list, text_list):
            img = Image.open(img).resize((64, 128))
            d = ImageDraw.Draw(img)
            d.text((3, 1), "{:.3}".format(text), fill=(255, 255, 0))
            if hit:
                img = ImageOps.expand(img, border=4, fill='green')
            else:
                img = ImageOps.expand(img, border=4, fill='red')
            images.append(img)

        widths, heights = zip(*(i.size for i in images))
        total_width = sum(widths)
        max_height = max(heights)
        new_im = Image.new('RGB', (total_width, max_height))
        x_offset = 0
        for im in images:
            new_im.paste(im, (x_offset, 0))
            x_offset += im.size[0]

        new_im.save(target_dir)

    counts = 0
    for q_idx in range(num_q):

        image_list = []
        hit_list = []
        text_list = []

        # query image
        qimg_path, qpid, qcamid = query[q_idx]
        image_list.append(qimg_path)
        hit_list.append(True)
        text_list.append(0.0)

        # target dir
        if isinstance(qimg_path, tuple) or isinstance(qimg_path, list):
            qdir = osp.join(save_dir, osp.basename(qimg_path[0]))
        else:
            qdir = osp.join(save_dir, osp.basename(qimg_path))

        # matched images
        rank_idx = 1
        for ii, g_idx in enumerate(indices[q_idx, :]):
            gimg_path, gpid, gcamid = gallery[g_idx]
            if mode == 'intra-camera':
                valid = qcamid == gcamid
            elif mode == 'inter-camera':
                valid = (qpid != gpid
                         and qcamid == gcamid) or (qcamid != gcamid)
            elif mode == 'all':
                valid = True
            if valid:
                if only_show == 'pos' and qpid != gpid: continue
                if only_show == 'neg' and qpid == gpid: continue
                image_list.append(gimg_path)
                hit_list.append(qpid == gpid)
                text_list.append(distmat[q_idx, g_idx])
                rank_idx += 1
                if rank_idx > topk:
                    break

        counts += 1
        cat_imgs_to(image_list, hit_list, text_list, qdir)
        print(counts, qdir)
Esempio n. 5
0
    def featuremaps2heatmaps(self,
                             original_images,
                             featuremaps,
                             image_paths,
                             current_epoch,
                             if_save=False,
                             if_fixed=False,
                             if_fake=False):
        height = original_images.size(2)
        width = original_images.size(3)
        imgs = original_images
        outputs = featuremaps.sum(1)
        # outputs = (outputs ** 2).sum(1)
        #b, h, w = outputs.size()
        #outputs = outputs.view(b, h * w)
        # outputs = F.normalize(outputs, p=2, dim=1)
        #outputs = outputs.view(b, h, w)

        imgs, outputs = imgs.cpu(), outputs.cpu()
        grid_img_tensor = []
        if if_save:
            save_dir = osp.join(self.output_dirs_dict['images'],
                                str(current_epoch))
            make_dirs(save_dir)
            if if_fixed:
                if if_fake:
                    save_dir = osp.join(self.output_dirs_dict['images'],
                                        str(current_epoch), 'fake')
                    make_dirs(save_dir)
                else:
                    save_dir = osp.join(self.output_dirs_dict['images'],
                                        str(current_epoch), 'true')
                    make_dirs(save_dir)
            else:
                save_dir = osp.join(self.output_dirs_dict['images'],
                                    str(current_epoch))
        for j in range(outputs.size(0)):
            # get image name
            path = image_paths[j]
            imname = osp.basename(osp.splitext(path)[0])

            # RGB image
            img = imgs[j, ...]
            for t, m, s in zip(img, IMAGENET_MEAN, IMAGENET_STD):
                t.mul_(s).add_(m).clamp_(0, 1)
            img_np = np.uint8(np.floor(img.numpy() * 255))
            img_np = img_np.transpose((1, 2, 0))  # (c, h, w) -> (h, w, c)

            # activation map
            am = outputs[j, ...].numpy()
            am = cv2.resize(am, (width, height))
            am = 255 * (am - np.min(am)) / (np.max(am) - np.min(am) + 1e-12)
            am = np.uint8(np.floor(am))
            am = cv2.applyColorMap(am, cv2.COLORMAP_JET)

            # overlapped
            overlapped = img_np * 0.3 + am * 0.7
            overlapped[overlapped > 255] = 255
            overlapped = overlapped.astype(np.uint8)

            # save images in a single figure (add white spacing between images)
            # from left to right: original image, activation map, overlapped image
            grid_img = 255 * np.ones(
                (height, 3 * width + 2 * GRID_SPACING, 3), dtype=np.uint8)
            grid_img[:, :width, :] = img_np[:, :, ::-1]
            grid_img[:, width + GRID_SPACING:2 * width + GRID_SPACING, :] = am
            grid_img[:, 2 * width + 2 * GRID_SPACING:, :] = overlapped
            grid_img_tensor.append(grid_img)
            if if_save:
                cv2.imwrite(osp.join(save_dir, imname + '.jpg'), grid_img)
        grid_img_tensor = np.transpose(np.stack(grid_img_tensor, axis=0),
                                       (0, 3, 1, 2))
        return torch.from_numpy(grid_img_tensor)
Esempio n. 6
0
    def save_model(self, save_step, save_epoch):
        '''save model as save_epoch'''
        # save model
        models_steps_path = os.path.join(self.output_dirs_dict['models'],
                                         str(save_step))
        if not osp.exists(models_steps_path):
            make_dirs(models_steps_path)
        for module_name, module in self.model_dict.items():
            torch.save(
                module.state_dict(),
                os.path.join(models_steps_path,
                             f'model_{module_name}_{save_epoch}.pkl'))
        for optimizer_name, optimizer in self.optimizer_dict.items():
            torch.save(
                optimizer.state_dict(),
                os.path.join(models_steps_path,
                             f'optimizer_{optimizer_name}_{save_epoch}.pkl'))
        if self.config.fp_16 and self.amp:
            torch.save(
                self.amp.state_dict(),
                os.path.join(models_steps_path, f'amp_{save_epoch}.pkl'))

        # if saved model is more than max num, delete the model with smallest epoch
        if self.max_save_model_num > 0:
            root, _, files = os_walk(models_steps_path)

            # get indexes of saved models
            indexes = []
            for file in files:
                indexes.append(int(file.replace('.pkl', '').split('_')[-1]))

            # remove the bad-case and get available indexes
            model_num = len(self.model_dict)
            optimizer_num = len(self.optimizer_dict)
            available_indexes = copy.deepcopy(indexes)
            for element in indexes:
                if indexes.count(element) < model_num + optimizer_num:
                    available_indexes.remove(element)

            available_indexes = sorted(list(set(available_indexes)),
                                       reverse=True)
            unavailable_indexes = list(
                set(indexes).difference(set(available_indexes)))

            # delete all unavailable models
            for unavailable_index in unavailable_indexes:
                try:
                    # os.system('find . -name "{}*_{}.pth" | xargs rm  -rf'.format(self.config.save_models_path, unavailable_index))
                    for module_name in self.model_dict.keys():
                        os.remove(
                            os.path.join(
                                root,
                                f'model_{module_name}_{unavailable_index}.pkl')
                        )
                    for optimizer_name in self.optimizer_dict.keys():
                        os.remove(
                            os.path.join(
                                root,
                                f'optimizer_{optimizer_name}_{unavailable_index}.pkl'
                            ))
                    if self.config.fp_16 and self.amp:
                        os.remove(
                            os.path.join(root, f'amp_{unavailable_index}.pkl'))
                except:
                    pass

            # delete extra models
            if len(available_indexes) >= self.max_save_model_num:
                for extra_available_index in available_indexes[
                        self.max_save_model_num:]:
                    # os.system('find . -name "{}*_{}.pth" | xargs rm  -rf'.format(self.config.save_models_path, extra_available_index))
                    for mudule_name, mudule in self.model_dict.items():
                        os.remove(
                            os.path.join(
                                root,
                                f'model_{mudule_name}_{extra_available_index}.pkl'
                            ))
                    for optimizer_name, optimizer in self.optimizer_dict.items(
                    ):
                        os.remove(
                            os.path.join(
                                root,
                                f'optimizer_{optimizer_name}_{extra_available_index}.pkl'
                            ))
                    if self.config.fp_16 and self.amp:
                        os.remove(
                            os.path.join(root,
                                         f'amp_{extra_available_index}.pkl'))