Example #1
0
    def test_augmentation_list(self):
        input_shape = (100, 100)
        image = np.random.rand(*input_shape).astype("float32")
        sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8")
        inputs = T.StandardAugInput(image, sem_seg=sem_seg)  # provide two args

        augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)])
        _ = T.AugmentationList([augs, T.Resize(30)])(inputs)
Example #2
0
    def test_apply_rotated_boxes_unequal_scaling_factor(self):
        np.random.seed(125)
        h, w = 400, 200
        newh, neww = 800, 800
        image = np.random.rand(h, w)
        augs = []
        augs.append(T.Resize(shape=(newh, neww)))
        image, transforms = T.apply_augmentations(augs, image)
        image_shape = image.shape[:2]  # h, w
        assert image_shape == (newh, neww)

        boxes = np.array(
            [
                [150, 100, 40, 20, 0],
                [150, 100, 40, 20, 30],
                [150, 100, 40, 20, 90],
                [150, 100, 40, 20, -90],
            ],
            dtype=np.float64,
        )
        transformed_boxes = transforms.apply_rotated_box(boxes)

        expected_bboxes = np.array(
            [
                [600, 200, 160, 40, 0],
                [600, 200, 144.22205102, 52.91502622, 49.10660535],
                [600, 200, 80, 80, 90],
                [600, 200, 80, 80, -90],
            ],
            dtype=np.float64,
        )
        err_msg = "transformed_boxes = {}, expected {}".format(
            transformed_boxes, expected_bboxes)
        assert np.allclose(transformed_boxes, expected_bboxes), err_msg
Example #3
0
def custom_mapper(dataset_dict, size, flip_prob, min_brightness, max_brightness, \
                min_contrast, max_contrast, min_saturation, max_saturation):
    # Implement a mapper, similar to the default DatasetMapper, but with your own customizations
    dataset_dict = copy.deepcopy(dataset_dict)  # it will be modified by code below
    image = detection_utils.read_image(dataset_dict["file_name"], format="BGR")
    transform_list = [ 
                    T.Resize(size),
                    T.RandomBrightness(min_brightness, max_brightness),
                    T.RandomContrast(min_contrast, max_contrast),
                    T.RandomSaturation(min_saturation, max_saturation),

                    T.RandomFlip(prob=flip_prob, horizontal=False, vertical=True),
                    T.RandomFlip(prob=flip_prob, horizontal=True, vertical=False), 
                ]
    image, transforms = T.apply_transform_gens(transform_list, image)
    dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))

    annos = [
        detection_utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = detection_utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = detection_utils.filter_empty_instances(instances)
    return dataset_dict
def mapper(dataset_dict):
 # 自定义mapper
    dataset_dict = copy.deepcopy(dataset_dict)  # 后面要改变这个dict,所以先复制
    image = utils.read_image(dataset_dict["file_name"], format="BGR")  # 读取图片,numpy array
#     image, transforms = T.apply_transform_gens(
#         [T.Resize((800, 800)), T.RandomContrast(0.1, 3), T.RandomSaturation(0.1, 2), T.RandomRotation(angle=[0, 180]), 
#          T.RandomFlip(prob=0.4, horizontal=False, vertical=True), T.RandomCrop('relative_range', (0.4, 0.6))], image)  # 数组增强
    
#     image, transforms = T.apply_transform_gens(
#         [T.Resize((800, 800)), T.RandomContrast(0.1, 3), T.RandomSaturation(0.1, 2),
#          T.RandomFlip(prob=0.4, horizontal=True, vertical=False), T.RandomCrop('relative_range', (0.4, 0.6))], image)
    image, transforms = T.apply_transform_gens(
        [T.Resize((800, 800)), T.RandomContrast(0.1, 3), T.RandomSaturation(0.1, 2),
         T.RandomFlip(prob=0.4, horizontal=True, vertical=False)], image)
    # 数组增强
   
    dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) # 转成Tensor

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ] # 数据增强要同步标注
    instances = utils.annotations_to_instances(annos, image.shape[:2])  # 将标注转成Instance(Tensor)
    dataset_dict["instances"] = utils.filter_empty_instances(instances)  # 去除空的
    return dataset_dict
Example #5
0
def custom_mapper(dataset_dict):
    # it will be modified by code below
    dataset_dict = copy.deepcopy(dataset_dict)
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    transform_list = [
        T.Resize((512, 512)),
        T.RandomBrightness(0.8, 1.8),
        T.RandomContrast(0.6, 1.3),
        T.RandomSaturation(0.8, 1.4),
        T.RandomRotation(angle=[30, 30]),
        T.RandomLighting(0.7),
        T.RandomFlip(prob=0.4, horizontal=False, vertical=True),
    ]
    image, transforms = T.apply_transform_gens(transform_list, image)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
Example #6
0
def build_augmentation(cfg, is_train):
    """
    Create a list of default :class:`Augmentation` from config.
    Now it includes resizing and flipping.

    Returns:
        list[Augmentation]
    """
    augmentation = []

    if is_train:
        if cfg.INPUT.COLOR_AUG:
            augmentation.append(ColorAugTransform(img_format=cfg.INPUT.FORMAT))

        if cfg.INPUT.SWAP_CHANNELS:
            augmentation.append(RandomSwapChannelsTransform())

        if cfg.INPUT.EXPAND:
            augmentation.append(Expand(img_value=cfg.MODEL.PIXEL_MEAN))

        if cfg.INPUT.MIN_IOU_CROP:
            augmentation.append(MinIoURandomCrop())

    if cfg.INPUT.RESIZE.ENABLED:
        shape = cfg.INPUT.RESIZE.SIZE
        augmentation.append(T.Resize(shape))

    if is_train:
        augmentation.append(T.RandomFlip())

    return augmentation
Example #7
0
 def build_train_loader(cls, cfg):
     print(cfg.INPUT.MIN_SIZE_TRAIN)
     mapper = DatasetMapper(cfg,
                            is_train=True,
                            augmentations=[
                                transforms.Resize(cfg.INPUT.MIN_SIZE_TEST),
                                transforms.RandomFlip()
                            ])
     return build_detection_train_loader(cfg, mapper)
def build_test_loader(cfg, dataset_name):
    input_size = cfg.MODEL.CLSNET.INPUT_SIZE
    return build_detection_test_loader(
        cfg,
        dataset_name,
        mapper=DatasetMapper(
            cfg,
            is_train=False,
            augmentations=[T.Resize((input_size, input_size))]))
def build_train_loader(cfg):
    input_size = cfg.MODEL.CLSNET.INPUT_SIZE
    return build_detection_train_loader(
        cfg,
        mapper=DatasetMapper(cfg,
                             is_train=True,
                             augmentations=[
                                 T.Resize((input_size, input_size)),
                                 T.RandomContrast(0.5, 1.5),
                                 T.RandomBrightness(0.5, 1.5),
                                 T.RandomSaturation(0.5, 1.5)
                             ]))
Example #10
0
def mapper(dataset_dict):
    # Implement a mapper, similar to the default DatasetMapper, but with your own customizations
    dataset_dict = copy.deepcopy(dataset_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    image, transforms = T.apply_transform_gens([T.Resize((800, 800))], image)
    dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
    annos = [
		utils.transform_instance_annotations(obj, transforms, image.shape[:2])
		for obj in dataset_dict.pop("annotations")
		if obj.get("iscrowd", 0) == 0
	]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
Example #11
0
def mapper(dataset_dict):
    dataset_dict = copy.deepcopy(dataset_dict)
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    image, transforms = T.apply_transform_gens([T.Resize((1152, 1152))], image)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = utils.annotations_to_instances_rotated(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
Example #12
0
 def build_hooks(self):
     hooks = super().build_hooks()
     hooks.insert(-1, LossEvalHook(
         self.cfg.TEST.EVAL_PERIOD,
         self.model,
         build_detection_test_loader(
             self.cfg,
             self.cfg.DATASETS.TEST[0],
             MyMapper(False, augmentations=[T.Resize(self.cfg.INPUT_SIZE)], image_format="RGB")
         ),
         self,
         self.train_process,
         self.cfg.PATIENCE
     ))
     return hooks
Example #13
0
def ResizeShortestEdgeSquareOp(cfg: CfgNode, arg_str: str,
                               is_train: bool) -> List[d2T.Transform]:
    """Resize the input to square using INPUT.MIN_SIZE_TRAIN or INPUT.MIN_SIZE_TEST
    without keeping aspect ratio
    """
    if is_train:
        min_size = cfg.INPUT.MIN_SIZE_TRAIN
        assert (isinstance(min_size, (list, tuple))
                and len(min_size) == 1), "Only a signle size is supported"
        min_size = min_size[0]
    else:
        min_size = cfg.INPUT.MIN_SIZE_TEST

    tfm_gens = []
    if not min_size == 0:  # set to zero to disable resize
        tfm_gens.append(d2T.Resize(shape=[min_size, min_size]))
    return tfm_gens
Example #14
0
def customMapper(dataset_dict):
  dataset_dict = copy.deepcopy(dataset_dict)
  image = utils.read_image(dataset_dict["file_name"], format="BGR")

  transform_list = [
                    T.Resize((600, 800)),
                    T.RandomFlip(prob=0.6, horizontal=True, vertical=False),
                    T.RandomFlip(prob=0.6, horizontal=False, vertical=True),
                    ]
  image, transforms = T.apply_transform_gens(transform_list, image)
  dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
  annos = [
		utils.transform_instance_annotations(obj, transforms, image.shape[:2])
		for obj in dataset_dict.pop("annotations")
		if obj.get("iscrowd", 0) == 0
	]
  instances = utils.annotations_to_instances(annos, image.shape[:2])
  dataset_dict["instances"] = utils.filter_empty_instances(instances)
  return dataset_dict
Example #15
0
    def __init__(self, cfg, is_train=True):
        if cfg.INPUT.CROP.ENABLED and is_train:
            self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
            # logging.getLogger(__name__).info("CropGen used in training: " + str(self.crop_gen))
        else:
            self.crop_gen = None

        self.tfm_gens = [
                         T.RandomBrightness(0.5, 1.6),
                         T.RandomContrast(0.5, 1),
                         T.RandomSaturation(0.5, 1),
                         T.RandomRotation(angle=[-90, 90]),
                         T.RandomFlip(horizontal=True, vertical=False),
                         T.RandomCrop('relative_range', (0.4, 0.6)),
                         T.Resize((640,640)),
                         # CutOut()
                         ]

        # self.tfm_gens = utils.build_transform_gen(cfg, is_train)

        # fmt: off
        self.img_format = cfg.INPUT.FORMAT
        self.mask_on = cfg.MODEL.MASK_ON
        self.mask_format = cfg.INPUT.MASK_FORMAT
        self.keypoint_on = cfg.MODEL.KEYPOINT_ON
        self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
        # fmt: on
        if self.keypoint_on and is_train:
            # Flip only makes sense in training
            self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
        else:
            self.keypoint_hflip_indices = None

        if self.load_proposals:
            self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
            self.proposal_topk = (
                cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
                if is_train
                else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
            )
        self.is_train = is_train
def build_transform_gen(cfg, is_train):
    """
    Create a list of :class:`TransformGen` from config.
    Now it includes resizing and flipping.

    Returns:
        list[TransformGen]
    """
    input_size = cfg.MODEL.CLSNET.INPUT_SIZE

    logger = logging.getLogger("detectron2.data.classification_utils")
    tfm_gens = []
    tfm_gens.append(T.Resize((input_size, input_size)))
    if is_train:
        tfm_gens.append(T.RandomContrast(0.5, 1.5))
        tfm_gens.append(T.RandomBrightness(0.5, 1.5))
        tfm_gens.append(T.RandomSaturation(0.5, 1.5))
        tfm_gens.append(T.RandomFlip())
        logger.info("TransformGens used in training[Updated]: " +
                    str(tfm_gens))
    return tfm_gens
Example #17
0
 def from_config(cls, cfg, is_train):
     ret = super().from_config(cfg, is_train)
     del ret['augmentations']
     if cfg.INPUT.RESIZE_META:
         augmentations = [
             T.Resize((cfg.INPUT.META_MIN_SIZE, cfg.INPUT.META_MIN_SIZE))
         ]
     else:
         augmentations = [
             T.ResizeShortestEdge(cfg.INPUT.META_MIN_SIZE,
                                  cfg.INPUT.META_MAX_SIZE, "choice")
         ]
     if is_train and cfg.INPUT.RANDOM_FLIP != "none":
         augmentations.append(
             T.RandomFlip(
                 horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
                 vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
             ))
     ret['augmentations'] = augmentations
     ret['is_train'] = True
     return ret
 def __call__(self, dataset_dict):
     dataset_dict = copy.deepcopy(dataset_dict)
     # it will be modified by code below
     # can use other ways to read image
     image = utils.read_image(dataset_dict["file_name"], format="BGR")
     # See "Data Augmentation" tutorial for details usage
     auginput = T.AugInput(image)
     transform = T.Resize((800, 800))(auginput)
     print(f'resized image {image["file_name"]}')
     image = torch.from_numpy(auginput.image.transpose(2, 0, 1))
     annos = [
         utils.transform_instance_annotations(annotation, [transform],
                                              image.shape[1:])
         for annotation in dataset_dict.pop("annotations")
     ]
     return {
         # create the format that the model expects
         "image": image,
         "instances":
         utils.annotations_to_instances(annos, image.shape[1:])
     }
Example #19
0
def custom_mapper(dataset_dict):
    dataset_dict = copy.deepcopy(dataset_dict)
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    image, transforms = T.apply_transform_gens([
        T.Resize((1920, 1080)),
        T.RandomFlip(0.1),
        T.RandomSaturation(0.9, 1.1),
        T.RandomBrightness(0.9, 1.1),
        T.RandomContrast(0.9, 1.1)
    ], image)

    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
Example #20
0
    def __init__(
        self,
        resize_type="resize_shortest",
        resize_short=None,
        resize_max=None,
        box_scale_factor=1.0,
    ):
        super().__init__()

        assert resize_type in ["resize_shortest", "resize", "None", None]

        resizer = None
        if resize_type == "resize_shortest":
            resizer = transforms.ResizeShortestEdge(resize_short, resize_max)
        elif resize_type == "resize":
            resizer = transforms.Resize(resize_short)

        self.aug = [
            tfm_tensor.Tensor2Array(),
            tfm_crop.CropBoxAug(box_scale_factor=box_scale_factor),
            *([resizer] if resizer else []),
            tfm_tensor.Array2Tensor(),
        ]
Example #21
0
def custom_mapper(dataset_dict):
    dataset_dict = copy.deepcopy(
        dataset_dict)  # it will be modified by code below
    image = utils.read_image(dataset_dict["file_name"], format="BGR")
    transform_list = [
        T.Resize((300, 800)),
        InvertColors(),
        T.PadTransform(10, 10, 10, 10),
        T.RandomBrightness(0.8, 1.8),
        T.RandomContrast(0.6, 1.3),
        T.RandomSaturation(0.8, 1.4),
        T.RandomLighting(0.7),
    ]
    image, transforms = T.apply_transform_gens(transform_list, image)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        utils.transform_instance_annotations(obj, transforms, image.shape[:2])
        for obj in dataset_dict.pop("annotations")
    ]
    instances = utils.annotations_to_instances(annos, image.shape[:2])
    dataset_dict["instances"] = utils.filter_empty_instances(instances)
    return dataset_dict
Example #22
0
 def build_train_loader(cls, cfg):
     return build_detection_train_loader(cfg, mapper=MyMapper(True, augmentations=[T.Resize(cfg.INPUT_SIZE)], image_format="RGB"))
Example #23
0
from detectron2.utils.visualizer import Visualizer, ColorMode
import detectron2.data.transforms as T
import numpy as np

import torch
from utils.trainer import InvertColors

augs = T.AugmentationList(
    [
        InvertColors(),
        T.Resize((300,800)), 
#         T.RandomContrast(1.5, 2.5),
        T.PadTransform(100, 100, 100, 100),
    ]
)


def augment(im):
    input = T.AugInput(im)
    transform = augs(input)  # type: T.Transform
    x = input.image  # new image
    
    return x

def sort_predictions(outputs):
    pred_classes = []
    scores = []
    for out in outputs:
        idxs = np.argsort(out["instances"].pred_boxes.tensor.to('cpu')[:,0])
        pred_classes.append(out["instances"].pred_classes[idxs])
        scores.append(out["instances"].scores[idxs])
Example #24
0
 def build_test_loader(cls, cfg, dataset_name):
     mapper = DatasetMapper(
         cfg,
         is_train=False,
         augmentations=[transforms.Resize(cfg.INPUT.MIN_SIZE_TEST)])
     return build_detection_test_loader(cfg, dataset_name, mapper=mapper)
Example #25
0
 def __call__(self, aug_input):
     oldx, oldy, oldc = aug_input.image.shape
     scaler = T.RandomExtent(self.scale_range, self.shift_range)(aug_input)
     resizer = T.Resize((oldx, oldy))(aug_input)
     return T.TransformList([scaler, resizer])
Example #26
0
File: build.py Project: yeonh2/d2go
def ResizeOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[d2T.Transform]:
    kwargs = _json_load(arg_str) if arg_str is not None else {}
    assert isinstance(kwargs, dict)
    return [d2T.Resize(**kwargs)]
def rotated_mapper(original_dataset_dict):
    # Implement a mapper, similar to the default DatasetMapper, but with our own customizations

    dataset_dict = copy.deepcopy(
        original_dataset_dict)  # it will be modified by code below
    original_gsd = dataset_dict["gsd"]
    target_gsd = np.random.uniform(0.09, 0.13)  # randomize target gsd
    scale = original_gsd / target_gsd

    target_size = 400
    target_crop = int(target_size / scale)
    target_crop = (target_crop, target_crop)

    image_np = detection_utils.read_image(dataset_dict["file_name"],
                                          format="BGR")

    boxes = np.asarray([anno['bbox'] for anno in dataset_dict['annotations']])

    # select anno at random
    # draw random center

    # h, w = image_np.shape[:2]
    # rand_box = boxes[np.random.randint(len(boxes))]
    # ch, cw = rand_box[:2]
    # xmin = np.min()
    # xmax = np.max()
    # ymin = 3
    # ymax = 4

    # h0 = np.random.randint(min(h, ymin), min(h, ymax) + 1)
    # w0 = np.random.randint(min(w, xmin), min(w, xmax) + 1)
    # assert h >= target_crop[1] and w >= target_crop[0], "Shape computation has bugs."

    # crop = T.CropTransform(w0, h0, target_crop)

    # make sure random crop contains annotations
    i = 0
    while True:
        random_crop = T.RandomCrop('absolute',
                                   target_crop).get_transform(image_np)
        cropped_boxes = RotatedBoxes(
            random_crop.apply_coords(copy.deepcopy(boxes)))
        inside_ind = cropped_boxes.inside_box(target_crop)
        if 1 < sum(inside_ind) <= 100:
            break
        i += 1
        if i > 150:
            return None

    image, transforms = T.apply_transform_gens([
        random_crop,
        T.Resize((target_size, target_size)),
    ], image_np)
    dataset_dict["image"] = torch.as_tensor(
        image.transpose(2, 0, 1).astype("float32"))

    annos = [
        rotated_transform_instance_annotations(obj, transforms,
                                               image.shape[:2])
        for obj in dataset_dict.pop("annotations")
        if obj.get("iscrowd", 0) == 0
    ]
    instances = detection_utils.annotations_to_instances_rotated(
        annos, image.shape[:2])
    instances = detection_utils.filter_empty_instances(instances)
    inside_ind = instances.gt_boxes.inside_box(image.shape[:2])
    instances = instances[inside_ind]

    assert ((instances.gt_boxes.tensor.numpy()[:, 2] > 0).all().item()
            ), "width not > 0\n\n" + str(instances.gt_boxes.tensor.numpy())

    dataset_dict["instances"] = instances
    return dataset_dict
Example #28
0
from detectron2.utils.visualizer import Visualizer, ColorMode
import detectron2.data.transforms as T
import numpy as np

import torch
from utils.trainer import InvertColors

augs = T.AugmentationList([
    T.Resize((600, 400)),
    T.RandomContrast(1.5, 2.5),
    T.PadTransform(100, 100, 100, 100),
])


def augment(im):
    input = T.AugInput(im)
    transform = augs(input)  # type: T.Transform
    x = input.image  # new image
    return x


def sort_predictions(outputs):
    pred_classes = []
    scores = []
    for out in outputs:
        idxs = np.argsort(out["instances"].pred_boxes.tensor.to('cpu')[:, 0])
        pred_classes.append(out["instances"].pred_classes[idxs])
        scores.append(out["instances"].scores[idxs])

    return pred_classes, scores
Example #29
0
import cv2
import random
import numpy as np
print(torch.__version__)
import Params as P

USE_SAVED_MODEL = True
SHOW_INPUTS = False

augs = transforms.AugmentationList([
    transforms.RandomBrightness(0.5, 1.5),
    transforms.RandomContrast(0.5, 1.5),
    transforms.RandomSaturation(0.5, 1.5),
    transforms.RandomFlip(prob=0.5),
    transforms.RandomExtent(scale_range=(0.1, 3), shift_range=(0.5, 0.5)),
    transforms.Resize(P.CNN_INPUT_SHAPE)
])


class Trainer(DefaultTrainer):
    @classmethod
    def build_train_loader(cls, cfg):
        mapper = DatasetMapper(cfg, is_train=True, augmentations=augs)
        return build_detection_train_loader(cfg, mapper=mapper)


for d in ["train"]:  #, "valid"
    with open(P.DATASET_DIR + d + "/labels.json", 'r') as fp:
        dataset_dicts = json.load(fp)
    DatasetCatalog.register(P.DATASET_DIR + d, lambda d=d: dataset_dicts)
    MetadataCatalog.get(P.DATASET_DIR + d).set(thing_classes=["scallop"])