コード例 #1
0
    def from_config(cls, cfg, is_train: bool = True):
        augs = utils.build_augmentation(cfg, is_train)
        if cfg.INPUT.CROP.ENABLED and is_train:
            augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
            recompute_boxes = cfg.MODEL.MASK_ON
        else:
            recompute_boxes = False

        ret = {
            "is_train": is_train,
            "augmentations": augs,
            "image_format": cfg.INPUT.FORMAT,
            "use_instance_mask": cfg.MODEL.MASK_ON,
            "instance_mask_format": cfg.INPUT.MASK_FORMAT,
            "use_keypoint": cfg.MODEL.KEYPOINT_ON,
            "recompute_boxes": recompute_boxes,
        }
        if cfg.MODEL.KEYPOINT_ON:
            ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)

        if cfg.MODEL.LOAD_PROPOSALS:
            ret["precomputed_proposal_topk"] = (
                cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                if is_train
                else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
            )
        return ret
コード例 #2
0
    def test_flip_keypoints(self):
        transforms = T.TransformList([T.HFlipTransform(400)])
        anno = {
            "bbox": np.asarray([10, 10, 200, 300]),
            "bbox_mode": BoxMode.XYXY_ABS,
            "keypoints": np.random.rand(17, 3) * 50 + 15,
        }

        output = detection_utils.transform_instance_annotations(
            copy.deepcopy(anno),
            transforms,
            (400, 400),
            keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices(
                ["keypoints_coco_2017_train"]
            ),
        )
        # The first keypoint is nose
        self.assertTrue(np.allclose(output["keypoints"][0, 0], 400 - anno["keypoints"][0, 0]))
        # The last 16 keypoints are 8 left-right pairs
        self.assertTrue(
            np.allclose(
                output["keypoints"][1:, 0].reshape(-1, 2)[:, ::-1],
                400 - anno["keypoints"][1:, 0].reshape(-1, 2),
            )
        )
        self.assertTrue(
            np.allclose(
                output["keypoints"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :],
                anno["keypoints"][1:, 1:].reshape(-1, 2, 2),
            )
        )
コード例 #3
0
    def __init__(self, cfg, is_train=True):
        self.augmentation = utils.build_augmentation(cfg, is_train)
        if cfg.INPUT.CROP.ENABLED and is_train:
            self.augmentation.insert(
                0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
            logging.getLogger(__name__).info("Cropping used in training: " +
                                             str(self.augmentation[0]))
            self.compute_tight_boxes = True
        else:
            self.compute_tight_boxes = False

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                                  if is_train else
                                  cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)
        self.is_train = is_train
コード例 #4
0
    def __init__(self, cfg, is_train=True):
        if cfg.INPUT.CROP.ENABLED:
            self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
            logging.getLogger(__name__).info("CropGen used in training: " + str(self.crop_gen))
        else:
            self.crop_gen = None

        self.tfm_gens = utils.build_transform_gen(cfg, is_train)

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (
                cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                if is_train
                else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
            )
        self.is_train = is_train
コード例 #5
0
    def __init__(self, cfg, is_train=True):
        self.tfm_gens = utils.build_transform_gen(cfg, is_train)

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.densepose_on = cfg.MODEL.DENSEPOSE_ON
        assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet"
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.densepose_on:
            densepose_transform_srcs = [
                MetadataCatalog.get(ds).densepose_transform_src
                for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST
            ]
            assert len(densepose_transform_srcs) > 0
            # TODO: check that DensePose transformation data is the same for
            # all the datasets. Otherwise one would have to pass DB ID with
            # each entry to select proper transformation data. For now, since
            # all DensePose annotated data uses the same data semantics, we
            # omit this check.
            densepose_transform_data_fpath = PathManager.get_local_path(
                densepose_transform_srcs[0])
            self.densepose_transform_data = DensePoseTransformData.load(
                densepose_transform_data_fpath)

        self.is_train = is_train
コード例 #6
0
    def __init__(self, cfg, is_train=True):

        if cfg.INPUT.CROP.ENABLED and is_train:
            self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE,
                                         cfg.INPUT.CROP.SIZE)
            logging.getLogger(__name__).info("CropGen used in training: " +
                                             str(self.crop_gen))
        else:
            self.crop_gen = None

        if is_train:
            min_size = cfg.INPUT.MIN_SIZE_TRAIN
            max_size = cfg.INPUT.MAX_SIZE_TRAIN
            sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
        else:
            min_size = cfg.INPUT.MIN_SIZE_TEST
            max_size = cfg.INPUT.MAX_SIZE_TEST
            sample_style = "choice"
        if sample_style == "range":
            assert len(
                min_size
            ) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(
                len(min_size))

        logger = logging.getLogger(__name__)
        self.tfm_gens = []
        self.tfm_gens.append(
            T.ResizeShortestEdge(min_size, max_size, sample_style))
        # if self.is_train:
        #     self.tfm_gens.append(T.RandomBrightness())
        #     self.tfm_gens.append(T.RandomContrast())
        #     self.tfm_gens.append(T.RandomLighting())
        #     self.tfm_gens.append(T.RandomSaturation())

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                                  if is_train else
                                  cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)
        self.is_train = is_train
コード例 #7
0
    def __init__(self, cfg, is_train=True):
        self.augmentation = build_augmentation(cfg, is_train)

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = (
            cfg.MODEL.MASK_ON
            or (cfg.MODEL.DENSEPOSE_ON
                and cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS))
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.densepose_on = cfg.MODEL.DENSEPOSE_ON
        assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet"
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.densepose_on:
            densepose_transform_srcs = [
                MetadataCatalog.get(ds).densepose_transform_src
                for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST
            ]
            # pdb.set_trace()
            # densepose_transform_srcs = []
            # for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST:
            #     try:
            #         ts = MetadataCatalog.get(ds).densepose_transform_src
            #     except:
            #         ts = 'https://dl.fbaipublicfiles.com/densepose/data/UV_symmetry_transforms.mat'
            #     densepose_transform_srcs.append(ts)
            assert len(densepose_transform_srcs) > 0
            # TODO: check that DensePose transformation data is the same for
            # all the datasets. Otherwise one would have to pass DB ID with
            # each entry to select proper transformation data. For now, since
            # all DensePose annotated data uses the same data semantics, we
            # omit this check.
            densepose_transform_data_fpath = PathManager.get_local_path(
                densepose_transform_srcs[0])
            self.densepose_transform_data = DensePoseTransformData.load(
                densepose_transform_data_fpath)

        self.is_train = is_train
        self.use_gt_ins = cfg.MODEL.CONDINST.IUVHead.GT_INSTANCES
        self.mask_out_stride = cfg.MODEL.CONDINST.MASK_OUT_STRIDE
        self.use_gt_skeleton = cfg.MODEL.CONDINST.IUVHead.GT_SKELETON
        if self.use_gt_skeleton:
            self.keypoint_on = True
        self.use_aux_body_semantics = cfg.MODEL.CONDINST.AUX_SUPERVISION_BODY_SEMANTICS

        self.infer_smooth_frame_num = cfg.MODEL.INFERENCE_SMOOTH_FRAME_NUM
コード例 #8
0
    def __init__(self, cfg, is_train=True, image_loader=None, tfm_gens=None):
        self.tfm_gens = (tfm_gens if tfm_gens is not None else
                         utils.build_transform_gen(cfg, is_train))

        if cfg.INPUT.CROP.ENABLED and is_train:
            self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE,
                                         cfg.INPUT.CROP.SIZE)
            # D2GO NOTE: when INPUT.CROP.ENABLED, don't allow using RandomCropOp
            assert all(not isinstance(gen, T.RandomCrop)
                       for gen in self.tfm_gens)
        else:
            self.crop_gen = None

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT  # noqa
        self.mask_on = cfg.MODEL.MASK_ON  # noqa
        self.mask_format = cfg.INPUT.MASK_FORMAT  # noqa
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON  # noqa
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        if self.load_proposals:
            self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                                  if is_train else
                                  cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)

        self.is_train = is_train

        # Setup image loader:
        self.image_loader = image_loader
        self.backfill_size = cfg.D2GO_DATA.MAPPER.BACKFILL_SIZE
        self.retry = cfg.D2GO_DATA.MAPPER.RETRY
        self.catch_exception = cfg.D2GO_DATA.MAPPER.CATCH_EXCEPTION

        if self.backfill_size:
            if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
                logger.warning(
                    "ASPECT_RATIO_GROUPING may not work if image's width & height"
                    " are not given in json dataset when calling extended_coco_load,"
                    " if you encounter issue, consider disable ASPECT_RATIO_GROUPING."
                )

        self._error_count = 0
        self._total_counts = 0
        self._error_types = {}
コード例 #9
0
    def __init__(self, cfg, is_train=True):
        if cfg.INPUT.CROP.ENABLED and is_train:
            # self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
            self.crop_gen = T.WiderFace_RandomCrop()
            logging.getLogger(__name__).info("CropGen used in training: " +
                                             str(self.crop_gen))
        else:
            self.crop_gen = None

        if is_train:
            min_size = cfg.INPUT.MIN_SIZE_TRAIN
            max_size = cfg.INPUT.MAX_SIZE_TRAIN
            sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING

            self.tfm_gens = []
            # self.tfm_gens.append(T.WiderFace_ResizeShortestEdge(min_size, max_size, sample_style))
            self.tfm_gens.append(
                T.ResizeShortestEdge(min_size, max_size, sample_style))
            self.tfm_gens.append(
                T.RandomFlip(prob=0.5, horizontal=True, vertical=False))
            self.tfm_gens.append(
                T.RandomFlip(prob=0.5, horizontal=False, vertical=True))
            # self.tfm_gens.append(T.RandomContrast(0.7, 3.2))
            # self.tfm_gens.append(T.RandomBrightness(0.6, 1.8))
            # self.tfm_gens.append(T.RandomSaturation(0.6, 1.4))
            # self.tfm_gens.append(T.RandomLighting(0.1))
            logging.getLogger(__name__).info(
                "TransformGens used in training: " + str(self.tfm_gens))
        else:
            self.tfm_gens = []
            self.tfm_gens.append(T.WiderFace_NoOpTransform())

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                                  if is_train else
                                  cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)
        self.is_train = is_train
コード例 #10
0
    def __init__(self, cfg, is_train=True):
        if cfg.INPUT.CROP.ENABLED and is_train:
            self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE,
                                         cfg.INPUT.CROP.SIZE)
            logging.getLogger(__name__).info("CropGen used in training: " +
                                             str(self.crop_gen))
        else:
            self.crop_gen = None

        self.tfm_gens = utils.build_transform_gen(cfg, is_train)

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS

        self.few_shot = cfg.INPUT.FS.FEW_SHOT
        self.support_way = cfg.INPUT.FS.SUPPORT_WAY
        self.support_shot = cfg.INPUT.FS.SUPPORT_SHOT
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                                  if is_train else
                                  cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)
        self.is_train = is_train

        if self.is_train:
            # support_df
            self.support_on = True
            if self.few_shot:
                self.support_df = pd.read_pickle(
                    "./datasets/coco/10_shot_support_df.pkl")
            else:
                self.support_df = pd.read_pickle(
                    "./datasets/coco/train_support_df.pkl")

            metadata = MetadataCatalog.get('coco_2017_train')
            # unmap the category mapping ids for COCO
            reverse_id_mapper = lambda dataset_id: metadata.thing_dataset_id_to_contiguous_id[
                dataset_id]  # noqa
            self.support_df['category_id'] = self.support_df[
                'category_id'].map(reverse_id_mapper)
コード例 #11
0
    def __init__(self, cfg, is_train=True):
        if cfg.INPUT.CROP.ENABLED and is_train:
            self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE,
                                         cfg.INPUT.CROP.SIZE)
            logging.getLogger(__name__).info("CropGen used in training: " +
                                             str(self.crop_gen))
        else:
            self.crop_gen = None

        self.tfm_gens = utils.build_transform_gen(cfg, is_train)

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                                  if is_train else
                                  cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)
        self.is_train = is_train
        self.vision_type = cfg.INPUT.VISION_TYPE
        self.manipulation_type = cfg.INPUT.INFER_MANIPULATION_TYPE
        self.manipulation_fun = None
        if self.manipulation_type != "":
            supported_manipulations = get_testing_augmentations()
            self.manipulation_fun = supported_manipulations[
                self.manipulation_type]
        self.manipulation_value = cfg.INPUT.INFER_MANIPULATION_VALUE
        self.opponent_space = cfg.INPUT.OPPONENT_SPACE
        self.contrast = cfg.INPUT.CONTRAST
        self.mosaic_pattern = cfg.INPUT.MOSAIC_PATTERN
        if self.contrast == 1.0:
            self.contrast = None
コード例 #12
0
    def __init__(self, cfg, is_train=True):
        if cfg.INPUT.CROP.ENABLED and is_train:
            self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE,
                                         cfg.INPUT.CROP.SIZE)
            logging.getLogger(__name__).info("CropGen used in training: " +
                                             str(self.crop_gen))
        else:
            self.crop_gen = None


#         self.tfm_gens = utils.build_transform_gen(cfg, is_train)
        self.tfm_gens = [
            T.RandomBrightness(0.1, 1.6),
            T.RandomContrast(0.1, 3),
            T.RandomSaturation(0.1, 2),
            T.RandomRotation(angle=[90, 90]),
            T.RandomFlip(prob=0.4, horizontal=False, vertical=True),
            T.RandomCrop('relative_range', (0.4, 0.6)),
            CutMix()
        ]

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                                  if is_train else
                                  cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)
        self.is_train = is_train
コード例 #13
0
ファイル: dataset_mapper.py プロジェクト: zyg11/centerX
    def __init__(self, cfg, is_train=True):

        if cfg.INPUT.CROP.ENABLED and is_train:
            self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE,
                                         cfg.INPUT.CROP.SIZE)
            logging.getLogger('detectron2').info("CropGen used in training: " +
                                                 str(self.crop_gen))
        else:
            self.crop_gen = None

        self.eval_with_gt = cfg.TEST.get("WITH_GT", False)

        self.tfm_gens = build_transform_gen(cfg, is_train)

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
                cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                                  if is_train else
                                  cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)
        self.is_train = is_train
        self.imgaug_prob = cfg.MODEL.CENTERNET.IMGAUG_PROB
        self.kd_without_label = cfg.MODEL.CENTERNET.KD.KD_WITHOUT_LABEL
        self.BOX_MINSIZE = cfg.MODEL.CENTERNET.BOX_MINSIZE
コード例 #14
0
from detectron2.data.detection_utils import create_keypoint_hflip_indices

from .coco import dataloader

dataloader.train.dataset.min_keypoints = 1
dataloader.train.dataset.names = "keypoints_coco_2017_train"
dataloader.test.dataset.names = "keypoints_coco_2017_val"

dataloader.train.mapper.update(
    use_instance_mask=False,
    use_keypoint=True,
    keypoint_hflip_indices=create_keypoint_hflip_indices(
        dataloader.train.dataset.names),
)