Пример #1
0
 def get_all_checkpoint_files(self):
     """
     Returns:
         list: All available checkpoint files (.pth files) in target
             directory.
     """
     all_model_checkpoints = [
         os.path.join(self.save_dir, file)
         for file in PathManager.ls(self.save_dir)
         if PathManager.isfile(os.path.join(self.save_dir, file))
         and file.endswith(".pth")
     ]
     return all_model_checkpoints
Пример #2
0
def read_image(file_name, format=None):
    """
    Read an image into the given format.
    Will apply rotation and flipping if the image has such exif information.
    Args:
        file_name (str): image file path
        format (str): one of the supported image modes in PIL, or "BGR"
    Returns:
        image (np.ndarray): an HWC image
    """
    with PathManager.open(file_name, "rb") as f:
        image = Image.open(f)

        # capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973
        try:
            image = ImageOps.exif_transpose(image)
        except Exception:
            pass

        if format is not None:
            # PIL only supports RGB, so convert to RGB and flip channels over below
            conversion_format = format
            if format == "BGR":
                conversion_format = "RGB"
            image = image.convert(conversion_format)
        image = np.asarray(image)
        if format == "BGR":
            # flip channels if needed
            image = image[:, :, ::-1]
        # PIL squeezes out the channel dimension for "L", so make it HWC
        if format == "L":
            image = np.expand_dims(image, -1)
        image = Image.fromarray(image)
        return image
Пример #3
0
    def load(self, path: str):
        """
        Load from the given checkpoint. When path points to network file, this
        function has to be called on all ranks.
        Args:
            path (str): path or url to the checkpoint. If empty, will not load
                anything.
        Returns:
            dict:
                extra data loaded from the checkpoint that has not been
                processed. For example, those saved with
                :meth:`.save(**extra_data)`.
        """
        if not path:
            # no checkpoint provided
            self.logger.info(
                "No checkpoint found. Training model from scratch")
            return {}
        self.logger.info("Loading checkpoint from {}".format(path))
        if not os.path.isfile(path):
            path = PathManager.get_local_path(path)
            assert os.path.isfile(path), "Checkpoint {} not found!".format(
                path)

        checkpoint = self._load_file(path)
        self._load_model(checkpoint)
        # for key, obj in self.checkpointables.items():
        #     if key in checkpoint:
        #         self.logger.info("Loading {} from {}".format(key, path))
        #         obj.load_state_dict(checkpoint.pop(key))

        # return any further checkpoint data
        return checkpoint
Пример #4
0
 def has_checkpoint(self):
     """
     Returns:
         bool: whether a checkpoint exists in the target directory.
     """
     save_file = os.path.join(self.save_dir, "last_checkpoint")
     return PathManager.exists(save_file)
Пример #5
0
 def tag_last_checkpoint(self, last_filename_basename: str):
     """
     Tag the last checkpoint.
     Args:
         last_filename_basename (str): the basename of the last filename.
     """
     save_file = os.path.join(self.save_dir, "last_checkpoint")
     with PathManager.open(save_file, "w") as f:
         f.write(last_filename_basename)
Пример #6
0
def default_setup(cfg, args):
    """
    Perform some basic common setups at the beginning of a job, including:
    1. Set up the detectron2 logger
    2. Log basic information about environment, cmdline arguments, and config
    3. Backup the config to the output directory
    Args:
        cfg (CfgNode): the full config to be used
        args (argparse.NameSpace): the command line arguments to be logged
    """
    output_dir = cfg.OUTPUT_DIR
    if comm.is_main_process() and output_dir:
        PathManager.mkdirs(output_dir)

    rank = comm.get_rank()
    setup_logger(output_dir, distributed_rank=rank, name="fvcore")
    logger = setup_logger(output_dir, distributed_rank=rank)

    logger.info("Rank of current process: {}. World size: {}".format(
        rank, comm.get_world_size()))
    logger.info("Environment info:\n" + collect_env_info())

    logger.info("Command line arguments: " + str(args))
    if hasattr(args, "config_file") and args.config_file != "":
        logger.info("Contents of args.config_file={}:\n{}".format(
            args.config_file,
            PathManager.open(args.config_file, "r").read()))

    logger.info("Running with full config:\n{}".format(cfg))
    if comm.is_main_process() and output_dir:
        # Note: some of our scripts may expect the existence of
        # config.yaml in output directory
        path = os.path.join(output_dir, "config.yaml")
        with PathManager.open(path, "w") as f:
            f.write(cfg.dump())
        logger.info("Full config saved to {}".format(os.path.abspath(path)))

    # make sure each worker has a different, yet deterministic seed if specified
    seed_all_rng()

    # cudnn benchmark has large overhead. It shouldn't be used considering the small size of
    # typical validation set.
    if not (hasattr(args, "eval_only") and args.eval_only):
        torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
Пример #7
0
 def get_checkpoint_file(self):
     """
     Returns:
         str: The latest checkpoint file in target directory.
     """
     save_file = os.path.join(self.save_dir, "last_checkpoint")
     try:
         with PathManager.open(save_file, "r") as f:
             last_saved = f.read().strip()
     except IOError:
         # if file doesn't exist, maybe because it has just been
         # deleted by a separate process
         return ""
     return os.path.join(self.save_dir, last_saved)
Пример #8
0
 def after_step(self):
     if self._profiler is None:
         return
     self._profiler.__exit__(None, None, None)
     out_file = os.path.join(
         self._output_dir,
         "profiler-trace-iter{}.json".format(self.trainer.iter))
     if "://" not in out_file:
         self._profiler.export_chrome_trace(out_file)
     else:
         # Support non-posix filesystems
         with tempfile.TemporaryDirectory(
                 prefix="detectron2_profiler") as d:
             tmp_file = os.path.join(d, "tmp.json")
             self._profiler.export_chrome_trace(tmp_file)
             with open(tmp_file) as f:
                 content = f.read()
         with PathManager.open(out_file, "w") as f:
             f.write(content)
Пример #9
0
    def save(self, name: str, **kwargs: dict):
        """
        Dump model and checkpointables to a file.
        Args:
            name (str): name of the file.
            kwargs (dict): extra arbitrary data to save.
        """
        if not self.save_dir or not self.save_to_disk:
            return

        data = {}
        data["model"] = self.model.state_dict()
        for key, obj in self.checkpointables.items():
            data[key] = obj.state_dict()
        data.update(kwargs)

        basename = "{}.pth".format(name)
        save_file = os.path.join(self.save_dir, basename)
        assert os.path.basename(save_file) == basename, basename
        self.logger.info("Saving checkpoint to {}".format(save_file))
        with PathManager.open(save_file, "wb") as f:
            torch.save(data, f)
        self.tag_last_checkpoint(basename)
Пример #10
0
    def preprocess_split(self):
        # This function is a bit complex and ugly, what it does is
        # 1. extract data from cuhk-03.mat and save as png images
        # 2. create 20 classic splits (Li et al. CVPR'14)
        # 3. create new split (Zhong et al. CVPR'17)
        if osp.exists(self.imgs_labeled_dir) \
                and osp.exists(self.imgs_detected_dir) \
                and osp.exists(self.split_classic_det_json_path) \
                and osp.exists(self.split_classic_lab_json_path) \
                and osp.exists(self.split_new_det_json_path) \
                and osp.exists(self.split_new_lab_json_path):
            return

        import h5py
        from imageio import imwrite
        from scipy.io import loadmat

        PathManager.mkdirs(self.imgs_detected_dir)
        PathManager.mkdirs(self.imgs_labeled_dir)

        print('Extract image data from "{}" and save as png'.format(
            self.raw_mat_path))
        mat = h5py.File(self.raw_mat_path, 'r')

        def _deref(ref):
            return mat[ref][:].T

        def _process_images(img_refs, campid, pid, save_dir):
            img_paths = []  # Note: some persons only have images for one view
            for imgid, img_ref in enumerate(img_refs):
                img = _deref(img_ref)
                if img.size == 0 or img.ndim < 3:
                    continue  # skip empty cell
                # images are saved with the following format, index-1 (ensure uniqueness)
                # campid: index of camera pair (1-5)
                # pid: index of person in 'campid'-th camera pair
                # viewid: index of view, {1, 2}
                # imgid: index of image, (1-10)
                viewid = 1 if imgid < 5 else 2
                img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(
                    campid + 1, pid + 1, viewid, imgid + 1)
                img_path = osp.join(save_dir, img_name)
                if not osp.isfile(img_path):
                    imwrite(img_path, img)
                img_paths.append(img_path)
            return img_paths

        def _extract_img(image_type):
            print('Processing {} images ...'.format(image_type))
            meta_data = []
            imgs_dir = self.imgs_detected_dir if image_type == 'detected' else self.imgs_labeled_dir
            for campid, camp_ref in enumerate(mat[image_type][0]):
                camp = _deref(camp_ref)
                num_pids = camp.shape[0]
                for pid in range(num_pids):
                    img_paths = _process_images(camp[pid, :], campid, pid,
                                                imgs_dir)
                    assert len(
                        img_paths) > 0, 'campid{}-pid{} has no images'.format(
                            campid, pid)
                    meta_data.append((campid + 1, pid + 1, img_paths))
                print('- done camera pair {} with {} identities'.format(
                    campid + 1, num_pids))
            return meta_data

        meta_detected = _extract_img('detected')
        meta_labeled = _extract_img('labeled')

        def _extract_classic_split(meta_data, test_split):
            train, test = [], []
            num_train_pids, num_test_pids = 0, 0
            num_train_imgs, num_test_imgs = 0, 0
            for i, (campid, pid, img_paths) in enumerate(meta_data):

                if [campid, pid] in test_split:
                    for img_path in img_paths:
                        camid = int(osp.basename(img_path).split('_')
                                    [2]) - 1  # make it 0-based
                        test.append((img_path, num_test_pids, camid))
                    num_test_pids += 1
                    num_test_imgs += len(img_paths)
                else:
                    for img_path in img_paths:
                        camid = int(osp.basename(img_path).split('_')
                                    [2]) - 1  # make it 0-based
                        train.append((img_path, num_train_pids, camid))
                    num_train_pids += 1
                    num_train_imgs += len(img_paths)
            return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs

        print('Creating classic splits (# = 20) ...')
        splits_classic_det, splits_classic_lab = [], []
        for split_ref in mat['testsets'][0]:
            test_split = _deref(split_ref).tolist()

            # create split for detected images
            train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
                _extract_classic_split(meta_detected, test_split)
            splits_classic_det.append({
                'train': train,
                'query': test,
                'gallery': test,
                'num_train_pids': num_train_pids,
                'num_train_imgs': num_train_imgs,
                'num_query_pids': num_test_pids,
                'num_query_imgs': num_test_imgs,
                'num_gallery_pids': num_test_pids,
                'num_gallery_imgs': num_test_imgs
            })

            # create split for labeled images
            train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
                _extract_classic_split(meta_labeled, test_split)
            splits_classic_lab.append({
                'train': train,
                'query': test,
                'gallery': test,
                'num_train_pids': num_train_pids,
                'num_train_imgs': num_train_imgs,
                'num_query_pids': num_test_pids,
                'num_query_imgs': num_test_imgs,
                'num_gallery_pids': num_test_pids,
                'num_gallery_imgs': num_test_imgs
            })

        with PathManager.open(self.split_classic_det_json_path, 'w') as f:
            json.dump(splits_classic_det, f, indent=4, separators=(',', ': '))
        with PathManager.open(self.split_classic_lab_json_path, 'w') as f:
            json.dump(splits_classic_lab, f, indent=4, separators=(',', ': '))

        def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):
            tmp_set = []
            unique_pids = set()
            for idx in idxs:
                img_name = filelist[idx][0]
                camid = int(img_name.split('_')[2]) - 1  # make it 0-based
                pid = pids[idx]
                if relabel:
                    pid = pid2label[pid]
                img_path = osp.join(img_dir, img_name)
                tmp_set.append((img_path, int(pid), camid))
                unique_pids.add(pid)
            return tmp_set, len(unique_pids), len(idxs)

        def _extract_new_split(split_dict, img_dir):
            train_idxs = split_dict['train_idx'].flatten() - 1  # index-0
            pids = split_dict['labels'].flatten()
            train_pids = set(pids[train_idxs])
            pid2label = {pid: label for label, pid in enumerate(train_pids)}
            query_idxs = split_dict['query_idx'].flatten() - 1
            gallery_idxs = split_dict['gallery_idx'].flatten() - 1
            filelist = split_dict['filelist'].flatten()
            train_info = _extract_set(filelist,
                                      pids,
                                      pid2label,
                                      train_idxs,
                                      img_dir,
                                      relabel=True)
            query_info = _extract_set(filelist,
                                      pids,
                                      pid2label,
                                      query_idxs,
                                      img_dir,
                                      relabel=False)
            gallery_info = _extract_set(filelist,
                                        pids,
                                        pid2label,
                                        gallery_idxs,
                                        img_dir,
                                        relabel=False)
            return train_info, query_info, gallery_info

        print('Creating new split for detected images (767/700) ...')
        train_info, query_info, gallery_info = _extract_new_split(
            loadmat(self.split_new_det_mat_path), self.imgs_detected_dir)
        split = [{
            'train': train_info[0],
            'query': query_info[0],
            'gallery': gallery_info[0],
            'num_train_pids': train_info[1],
            'num_train_imgs': train_info[2],
            'num_query_pids': query_info[1],
            'num_query_imgs': query_info[2],
            'num_gallery_pids': gallery_info[1],
            'num_gallery_imgs': gallery_info[2]
        }]

        with PathManager.open(self.split_new_det_json_path, 'w') as f:
            json.dump(split, f, indent=4, separators=(',', ': '))

        print('Creating new split for labeled images (767/700) ...')
        train_info, query_info, gallery_info = _extract_new_split(
            loadmat(self.split_new_lab_mat_path), self.imgs_labeled_dir)
        split = [{
            'train': train_info[0],
            'query': query_info[0],
            'gallery': gallery_info[0],
            'num_train_pids': train_info[1],
            'num_train_imgs': train_info[2],
            'num_query_pids': query_info[1],
            'num_query_imgs': query_info[2],
            'num_gallery_pids': gallery_info[1],
            'num_gallery_imgs': gallery_info[2]
        }]
        with PathManager.open(self.split_new_lab_json_pat, 'w') as f:
            json.dump(split, f, indent=4, separators=(',', ': '))
Пример #11
0
    def __init__(self,
                 root='datasets',
                 split_id=0,
                 cuhk03_labeled=False,
                 cuhk03_classic_split=False,
                 **kwargs):
        # self.root = osp.abspath(osp.expanduser(root))
        self.root = root
        self.dataset_dir = osp.join(self.root, self.dataset_dir)

        self.data_dir = osp.join(self.dataset_dir, 'cuhk03_release')
        self.raw_mat_path = osp.join(self.data_dir, 'cuhk-03.mat')

        self.imgs_detected_dir = osp.join(self.dataset_dir, 'images_detected')
        self.imgs_labeled_dir = osp.join(self.dataset_dir, 'images_labeled')

        self.split_classic_det_json_path = osp.join(
            self.dataset_dir, 'splits_classic_detected.json')
        self.split_classic_lab_json_path = osp.join(
            self.dataset_dir, 'splits_classic_labeled.json')

        self.split_new_det_json_path = osp.join(self.dataset_dir,
                                                'splits_new_detected.json')
        self.split_new_lab_json_path = osp.join(self.dataset_dir,
                                                'splits_new_labeled.json')

        self.split_new_det_mat_path = osp.join(
            self.dataset_dir, 'cuhk03_new_protocol_config_detected.mat')
        self.split_new_lab_mat_path = osp.join(
            self.dataset_dir, 'cuhk03_new_protocol_config_labeled.mat')

        required_files = [
            self.dataset_dir, self.data_dir, self.raw_mat_path,
            self.split_new_det_mat_path, self.split_new_lab_mat_path
        ]
        self.check_before_run(required_files)

        self.preprocess_split()

        if cuhk03_labeled:
            split_path = self.split_classic_lab_json_path if cuhk03_classic_split else self.split_new_lab_json_path
        else:
            split_path = self.split_classic_det_json_path if cuhk03_classic_split else self.split_new_det_json_path

        with PathManager.open(split_path) as f:
            splits = json.load(f)
        assert split_id < len(
            splits
        ), 'Condition split_id ({}) < len(splits) ({}) is false'.format(
            split_id, len(splits))
        split = splits[split_id]

        train = split['train']
        tmp_train = []
        for img_path, pid, camid in train:
            new_pid = self.dataset_name + "_" + str(pid)
            tmp_train.append((img_path, new_pid, camid))
        train = tmp_train
        del tmp_train
        query = split['query']
        gallery = split['gallery']
        from ipdb import set_trace
        set_trace()

        super(CUHK03, self).__init__(train, query, gallery, **kwargs)
Пример #12
0
    parser.add_argument("--output",
                        default='demo_output',
                        help='path to save features')
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == '__main__':
    args = get_parser().parse_args()
    cfg = setup_cfg(args)
    demo = FeatureExtractionDemo(cfg, parallel=args.parallel)

    PathManager.mkdirs(args.output)
    if args.input:
        if PathManager.isdir(args.input[0]):
            args.input = glob.glob(os.path.expanduser(args.input[0]))
            assert args.input, "The input path(s) was not found"
        for path in tqdm.tqdm(args.input):
            img = cv2.imread(path)
            feat = demo.run_on_image(img)
            feat = feat.numpy()
            np.save(
                os.path.join(args.output,
                             path.replace('.jpg', '.npy').split('/')[-1]),
                feat)
Пример #13
0
                        help='path to save converted caffe model')
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == '__main__':
    args = get_parser().parse_args()
    cfg = setup_cfg(args)

    cfg.defrost()
    cfg.MODEL.BACKBONE.PRETRAIN = False
    cfg.MODEL.HEADS.POOL_LAYER = "identity"
    cfg.MODEL.BACKBONE.WITH_NL = False

    model = build_model(cfg)
    Checkpointer(model).load(cfg.MODEL.WEIGHTS)
    model.eval()
    print(model)

    inputs = torch.randn(1, 3, cfg.INPUT.SIZE_TEST[0],
                         cfg.INPUT.SIZE_TEST[1]).cuda()
    PathManager.mkdirs(args.output)
    pytorch_to_caffe.trans_net(model, inputs, args.name)
    pytorch_to_caffe.save_prototxt(f"{args.output}/{args.name}.prototxt")
    pytorch_to_caffe.save_caffemodel(f"{args.output}/{args.name}.caffemodel")