Example #1
0
 def write_predictions_json(self):
     try:
         with open(self.result_json_path, 'w+') as outfile:
             json.dump(self.pred_dataset, outfile, indent=4)
         printj.cyan(f'Created inference result file: {self.result_json_path}')
     except NotADirectoryError:
         printj.red(f'Not a directory: {self.result_json_path}')
Example #2
0
 def to_point3d_list(self) -> Point3D_List:
     if self.dimensionality != 3:
         printj.red(
             f'Cannot convert polygon to Point3D_List because self.dimensionality=={self.dimensionality}!=3'
         )
         raise TypeError
     return Point3D_List.from_list(self.to_list(demarcation=True))
Example #3
0
 def __sub__(self, other) -> BBox:
     if isinstance(other, BBox):
         return BBox(xmin=self.xmin - other.xmin,
                     ymin=self.ymin - other.ymin,
                     xmax=self.xmax - other.xmin,
                     ymax=self.ymax - other.ymin)
         # raise NotImplementedError
     elif isinstance(other, (int, float)):
         return BBox(xmin=self.xmin - other,
                     ymin=self.ymin - other,
                     xmax=self.xmax - other,
                     ymax=self.ymax - other)
     elif isinstance(other, Point2D):
         return BBox(xmin=self.xmin - other.x,
                     ymin=self.ymin - other.y,
                     xmax=self.xmax - other.x,
                     ymax=self.ymax - other.y)
     elif isinstance(other, Keypoint2D):
         return BBox(xmin=self.xmin - other.point.x,
                     ymin=self.ymin - other.point.y,
                     xmax=self.xmax - other.point.x,
                     ymax=self.ymax - other.point.y)
     else:
         printj.red(f'Cannot subtract {type(other)} from BBox')
         raise TypeError
Example #4
0
    def run(self):
        if not path_exists(self.ndds_dir):
            printj.red(f"Input ndds_dir path does not exist./n{self.ndds_dir}")
        make_dir_if_not_exists(dir_path=self.output_dir)
        coco_data_dir = os.path.join(
            self.output_dir,
            f"{get_filename_from_path(self.ndds_dir)}_coco_data")
        delete_dir_if_exists(dir_path=coco_data_dir)
        make_dir(dir_path=coco_data_dir)

        ndds_dataset = NDDS_Dataset.load_from_dir(json_dir=self.ndds_dir)
Example #5
0
    def __init__(
            self,
            weights_path: str,
            class_names: List[str] = None, num_classes: int = None,
            keypoint_names: List[str] = None, num_keypoints: int = None,
            model: str = "mask_rcnn_R_50_FPN_1x",
            confidence_threshold: float = 0.5,
            size_min: int = None,
            size_max: int = None,
            key_seg_together: bool = False,
            gray_on=False,
            crop_mode: int = None,
            crop_mode2_rec: Union[int, List[int]] = None,
            crop_mode3_sizes: Union[int, List[int]] = None,
            crop_mode3_overlaps: Union[int, List[int]] = None,
            detectron2_dir_path: str = "/home/jitesh/detectron/detectron2"
    ):
        """
        D2Inferer
        =========

        Parameters:
        ------
        - weights_path: str 
        - class_names: List[str] = None, num_classes: int = None,
        - keypoint_names: List[str] = None, num_keypoints: int = None,
        - model: str = "mask_rcnn_R_50_FPN_1x",
        - confidence_threshold: float = 0.5,
        - size_min: int = None,
        - size_max: int = None,
        - key_seg_together: bool = False,
        - detectron2_dir_path: str = "/home/jitesh/detectron/detectron2"

        - crop_mode = 1 : crop between points (0, 0) and (a, a), where a is min(height, width)
        - crop_mode = 2 : crop between points crop_rec[0] and crop_rec[1], crop_rec is defined by the user through parameter
        - crop_mode = 3 : chop, infer and merge
        - crop_rec : list of points of rectangle to crop
        """
        self.df = pd.DataFrame(data=[], columns=[])
        self.gray_on = gray_on
        self.crop_mode = crop_mode
        self.crop_rec = crop_mode2_rec
        self.crop_mode3_sizes = crop_mode3_sizes
        self.crop_mode3_overlaps = crop_mode3_overlaps
        if class_names is None:
            class_names = ['']
        if keypoint_names is None:
            keypoint_names = ['']
        self.key_seg_together = key_seg_together
        self.weights_path = weights_path
        self.class_names = class_names
        if num_classes is None:
            self.num_classes = len(class_names)
        else:
            assert num_classes == len(class_names)
            self.num_classes = num_classes
        self.keypoint_names = keypoint_names
        if num_keypoints is None:
            self.num_keypoints = len(keypoint_names)
        else:
            assert num_keypoints == len(keypoint_names)
            self.num_keypoints = num_keypoints
        self.confidence_threshold = confidence_threshold
        self.model = model
        if "COCO-Detection" in self.model:
            self.model = self.model
        elif "COCO-Keypoints" in self.model:
            self.model = self.model
        elif "COCO-InstanceSegmentation" in self.model:
            self.model = self.model
        elif "COCO-PanopticSegmentation" in self.model:
            self.model = self.model
        elif "LVIS-InstanceSegmentation" in self.model:
            self.model = self.model
        elif "Misc" in model:
            self.model = model
            # train_type = 'seg'
        elif "rpn" in model:
            self.model = "COCO-Detection/" + model
        elif "keypoint" in model:
            self.model = "COCO-Keypoints/" + model
        elif "mask" in model:
            self.model = "COCO-InstanceSegmentation/" + model
        else:
            printj.red.bold_on_black(f'{model} is not in the dictionary.\
                Choose the correct model.')
            raise Exception

        if ".yaml" in self.model:
            self.model = self.model
        else:
            self.model = self.model + ".yaml"

        self.cfg = get_cfg()
        model_conf_path = f"{detectron2_dir_path}/configs/{self.model}"
        if not file_exists(model_conf_path):
            printj.red(f"Invalid model: {model}\nOr")
            printj.red(f"File not found: {model_conf_path}")
            raise Exception
        self.cfg.merge_from_file(model_conf_path)
        self.cfg.MODEL.WEIGHTS = self.weights_path
        self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = self.num_classes
        self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.confidence_threshold
        self.cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = self.num_keypoints
        if "mask" in self.model.lower() or "segmentation" in self.model.lower():
            self.cfg.MODEL.MASK_ON = True
        # self.cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT=0.5
        if size_min is not None:
            self.cfg.INPUT.MIN_SIZE_TRAIN = size_min
            self.cfg.INPUT.MIN_SIZE_TEST = size_min
        if size_max is not None:
            self.cfg.INPUT.MAX_SIZE_TRAIN = size_max
            self.cfg.INPUT.MAX_SIZE_TEST = size_max
        self.predictor = DefaultPredictor(self.cfg)
        self.pred_dataset = []
        self.palette = np.array(color_palette(
            palette='hls', n_colors=self.num_classes+1))*255
Example #6
0
    def mapper(self, dataset_dict, train_type: str = 'kpt'):
        if train_type != 'kpt':
            for item in dataset_dict["annotations"]:
                if 'keypoints' in item:
                    del item['keypoints']

        image = utils.read_image(dataset_dict["file_name"], format="BGR")
        img_h, img_w = image.shape[:2]
        num_pixels = img_w * img_h

        ann_dict = Detectron2_Annotation_Dict.from_dict(dataset_dict)
        bbox_list = [ann.bbox for ann in ann_dict.annotations]
        # if train_type == 'seg':
        #     printj.purple(len(ann_dict.annotations))
        #     for ann in ann_dict.annotations:
        #         seg = ann.segmentation
        #     mask = seg.to_mask()
        #     tranformed = self.aug(mask=mask)
        #     mask = tranformed['mask']
        #     image = tranformed['image']
            
        # else:
        image = self.aug(image=np.array(image))['image']
        seq_aug_for_no_seg = almost_always(iaa.Sequential(
            [
                # iaa.Rot90(ia.ALL, keep_size=False)
            ]
        ))
        seq_aug_for_seg = sometimes(iaa.Sequential(
            [
                iaa.Rot90(ia.ALL, keep_size=False),
                iaa.Affine(
                    scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
                    translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
                    rotate=(-180, 180),
                    order=[0, 1],
                    # cval=(0, 255),
                    cval=255,
                    mode=ia.ALL
                )
            ]
        ))
        imgaug_kpts = KeypointsOnImage(keypoints=[], shape=image.shape)
        imgaug_bboxes = BoundingBoxesOnImage(
            bounding_boxes=[], shape=image.shape)
        imgaug_polys = PolygonsOnImage(polygons=[], shape=image.shape)

        num_ann = len(ann_dict.annotations)
        num_kpts = None
        seg_len_list = []
        for ann in ann_dict.annotations:
            if num_kpts is None:
                num_kpts = len(ann.keypoints)
            if len(ann.keypoints.to_imgaug(img_shape=image.shape).keypoints) != len(ann_dict.annotations[0].keypoints):
                printj.red(
                    f'len(ann.keypoints.to_imgaug(img_shape=image.shape).keypoints) == {len(ann.keypoints.to_imgaug(img_shape=image.shape).keypoints)} != {len(ann_dict.annotations[0].keypoints)} == len(ann_dict.annotations[0].keypoints)')
                raise Exception
            imgaug_kpts.keypoints.extend(
                ann.keypoints.to_imgaug(img_shape=image.shape).keypoints)
            if ann.bbox.to_imgaug() is None:
                printj.red(f'ann.bbox.to_imgaug() is None')
                printj.red(f'ann.bbox: {ann.bbox}')
                raise Exception
            imgaug_bboxes.bounding_boxes.append(ann.bbox.to_imgaug())
            if ann.segmentation.to_imgaug(img_shape=image.shape).polygons is None:
                printj.red(
                    f'ann.segmentation.to_imgaug(img_shape=image.shape).polygons is None')
                printj.red(f'ann.segmentation:\n{ann.segmentation}')
                raise Exception
            seg_len_list.append(len(ann.segmentation))

            imgaug_polys.polygons.extend(
                ann.segmentation.to_imgaug(img_shape=image.shape).polygons)
        if len(imgaug_polys.polygons) > 0:
            if num_kpts > 0:
                image, imgaug_kpts_aug, imgaug_polys_aug = seq_aug_for_seg(
                    image=image, keypoints=imgaug_kpts, polygons=imgaug_polys)
            else:
                image, imgaug_polys_aug = seq_aug_for_seg(
                    image=image, polygons=imgaug_polys)
                imgaug_kpts_aug = None
            imgaug_bboxes_aug = None
        else:
            if num_kpts > 0:
                image, imgaug_kpts_aug, imgaug_bboxes_aug = seq_aug_for_no_seg(
                    image=image, keypoints=imgaug_kpts, bounding_boxes=imgaug_bboxes)
            else:
                image, imgaug_bboxes_aug = seq_aug_for_no_seg(
                    image=image, bounding_boxes=imgaug_bboxes)
                imgaug_kpts_aug = None
            imgaug_polys_aug = None

        kpts_aug0 = Keypoint2D_List.from_imgaug(
            imgaug_kpts=imgaug_kpts_aug) if num_kpts > 0 else Keypoint2D_List()
        kpts_aug_list = kpts_aug0.to_numpy(demarcation=True)[:, :2].reshape(
            num_ann, num_kpts, 2) if num_kpts > 0 else []
        kpts_aug_list = [[[x, y, 2] for x, y in kpts_aug]
                         for kpts_aug in kpts_aug_list]
        kpts_aug_list = [Keypoint2D_List.from_list(
            kpts_aug, demarcation=True) for kpts_aug in kpts_aug_list]

        if imgaug_polys_aug is not None and imgaug_bboxes_aug is None:
            poly_aug_list = [Polygon.from_imgaug(
                imgaug_polygon) for imgaug_polygon in imgaug_polys_aug.polygons]
            poly_aug_list_list = unflatten_list(
                poly_aug_list, part_sizes=seg_len_list)
            seg_aug_list = [Segmentation(poly_aug_list)
                            for poly_aug_list in poly_aug_list_list]
            bbox_aug_list = [seg_aug.to_bbox() for seg_aug in seg_aug_list]
            # Adjust BBoxes when Segmentation BBox does not contain all keypoints
            for i in range(len(bbox_aug_list)):
                kpt_points_aug = [
                    kpt_aug.point for kpt_aug in kpts_aug_list[i]] if num_kpts > 0 else []
                kpt_points_aug_contained = [kpt_point_aug.within(
                    bbox_aug_list[i]) for kpt_point_aug in kpt_points_aug]
                if len(kpt_points_aug) > 0:
                    if not np.any(np.array(kpt_points_aug_contained)):
                        printj.red(
                            f"Keypoints not contained in corresponding bbox.")
                    elif not np.all(np.array(kpt_points_aug_contained)):
                        pass
                    else:
                        break
        elif imgaug_polys_aug is None and imgaug_bboxes_aug is not None:
            bbox_aug_list = [BBox.from_imgaug(
                bbox_aug) for bbox_aug in imgaug_bboxes_aug.bounding_boxes]
            seg_aug_list = [None] * len(bbox_aug_list)
        else:
            printj.red(f'Unexpected error')
            raise Exception

        if num_kpts > 0:
            for ann, kpts_aug, bbox_aug, seg_aug in zip(ann_dict.annotations, kpts_aug_list, bbox_aug_list, seg_aug_list):
                ann.keypoints = kpts_aug
                ann.bbox = bbox_aug
                ann.segmentation = seg_aug if seg_aug is not None else Segmentation.from_list([
                ])
        else:
            for ann, bbox_aug, seg_aug in zip(ann_dict.annotations, bbox_aug_list, seg_aug_list):
                ann.keypoints = Keypoint2D_List()
                ann.bbox = bbox_aug
                ann.segmentation = seg_aug if seg_aug is not None else Segmentation.from_list([
                ])

        dataset_dict = ann_dict.to_dict()

        image, transforms = T.apply_transform_gens([], image)

        annots = []
        for item in dataset_dict["annotations"]:
            if 'keypoints' in item and num_kpts == 0:
                del item['keypoints']
            elif 'keypoints' in item:
                item['keypoints'] = np.array(
                    item['keypoints']).reshape(-1, 3).tolist()
            annots.append(item)
        dataset_dict["image"] = torch.as_tensor(
            image.transpose(2, 0, 1).astype("float32"))
        instances = utils.annotations_to_instances(annots, image.shape[:2])
        dataset_dict["instances"] = utils.filter_empty_instances(
            instances, by_box=True, by_mask=False)

        # if True:
        #     vis_img = image.copy()
        #     bbox_list = [BBox.from_list(vals) for vals in dataset_dict["instances"].gt_boxes.tensor.numpy().tolist()]
        #     seg_list = [Segmentation([Polygon.from_list(poly.tolist(), demarcation=False) for poly in seg_polys]) for seg_polys in dataset_dict["instances"].gt_masks.polygons]
        #     kpts_list = [Keypoint2D_List.from_numpy(arr, demarcation=True) for arr in dataset_dict["instances"].gt_keypoints.tensor.numpy()] if hasattr(dataset_dict["instances"], 'gt_keypoints') else []
        #     for seg in seg_list:
        #         vis_img = draw_segmentation(img=vis_img, segmentation=seg, transparent=True)
        #     for bbox in bbox_list:
        #         vis_img = draw_bbox(img=vis_img, bbox=bbox)
        #     for kpts in kpts_list:
        #         vis_img = draw_keypoints(img=vis_img, keypoints=kpts.to_numpy(demarcation=True)[:, :2].tolist(), radius=6)
        #     aug_visualizer.step(vis_img)

        return dataset_dict
Example #7
0
import printj

printj.red('YOUR TEXT')
printj.bold('YOUR TEXT')
printj.blue.italic_on_yellow('YOUR TEXT')
Example #8
0
    def __init__(
        self,
        coco_ann_path: str,
        img_path: str,
        val_coco_ann_path: str,
        val_img_path: str,
        output_dir_path: str,
        resume: bool = True,
        class_names: List[str] = None,
        num_classes: int = None,
        keypoint_names: List[str] = None,
        num_keypoints: int = None,
        model: str = "mask_rcnn_R_50_FPN_1x",
        instance_train: str = "training_instance1",
        min_size_train: int = None,
        max_size_train: int = None,
        min_size_test: int = None,
        max_size_test: int = None,
        max_iter: int = 10000,
        batch_size_per_image: int = 512,
        checkpoint_period: int = None,
        score_thresh: int = None,
        key_seg_together: bool = False,
        aug_on: bool = True,
        train_val: bool = False,
        aug_settings_file_path: str = None,
        aug_vis_save_path: str = 'aug_vis.png',
        show_aug_seg: bool = False,
        aug_n_rows: int = 3,
        aug_n_cols: int = 5,
        aug_save_dims: Tuple[int] = (3 * 500, 5 * 500),
        device: str = 'cuda',
        num_workers: int = 2,
        images_per_batch: int = 2,
        base_lr: float = 0.003,
        decrease_lr_by_ratio: float = 0.1,
        lr_steps: tuple = (30000, ),
        detectron2_dir_path: str = None,
        val_on: bool = False,
        instance_test: str = "test_instance1",
        val_eval_period: int = 100,
        vis_period: int = 0,
        train_type: str = None,
    ):
        """
        D2Trainer
        =========

        Parameters:
        ------
        output_dir_path: str 
        class_names: List[str] = None, num_classes: int = None,
        keypoint_names: List[str] = None, num_keypoints: int = None,
        model: str = "mask_rcnn_R_50_FPN_1x",
        confidence_threshold: float = 0.5,
        min_size_train: int = None,
        max_size_train: int = None,
        key_seg_together: bool = False,
        detectron2_dir_path: str = "/home/jitesh/detectron/detectron2"
        """
        self.key_seg_together = key_seg_together
        self.coco_ann_path = coco_ann_path
        self.img_path = img_path
        self.val_coco_ann_path = val_coco_ann_path
        self.val_img_path = val_img_path
        self.output_dir_path = output_dir_path
        self.instance_train = instance_train
        self.resume = resume
        self.device = device
        self.num_workers = num_workers
        self.images_per_batch = images_per_batch
        self.batch_size_per_image = batch_size_per_image
        self.checkpoint_period = checkpoint_period
        self.score_thresh = score_thresh
        self.base_lr = base_lr
        self.decrease_lr_by_ratio = decrease_lr_by_ratio
        self.lr_steps = lr_steps
        self.max_iter = max_iter
        self.val_on = val_on
        self.instance_test = instance_test
        self.val_eval_period = val_eval_period
        self.vis_period = vis_period
        """ Load annotations json """
        with open(self.coco_ann_path) as json_file:
            self.coco_ann_data = json.load(json_file)
            self.categories = self.coco_ann_data["categories"]

        if class_names is None:
            # self.class_names = ['']
            self.class_names = [
                category["name"] for category in self.categories
            ]
        else:
            self.class_names = class_names
        if num_classes is None:
            self.num_classes = len(self.class_names)
        else:
            printj.red(f'num_classes: {num_classes}')
            printj.red(f'len(self.class_names): {len(self.class_names)}')
            assert num_classes == len(self.class_names)
            self.num_classes = num_classes
        if keypoint_names is None:
            self.keypoint_names = ['']
        else:
            self.keypoint_names = keypoint_names
        if num_keypoints is None:
            if keypoint_names == ['']:
                self.num_keypoints = 0
            else:
                self.num_keypoints = len(self.keypoint_names)
        else:
            assert num_keypoints == len(self.keypoint_names)
            self.num_keypoints = num_keypoints

        self.model = model
        if "COCO-Detection" in self.model:
            self.model = self.model
            train_type = 'bbox'
        elif "COCO-Keypoints" in self.model:
            self.model = self.model
            train_type = 'kpt'
        elif "COCO-InstanceSegmentation" in self.model:
            self.model = self.model
            train_type = 'seg'
        elif "COCO-PanopticSegmentation" in self.model:
            self.model = self.model
            train_type = 'seg'
        elif "LVIS-InstanceSegmentation" in self.model:
            self.model = self.model
            train_type = 'seg'
        elif "Misc" in model:
            self.model = model
            train_type = 'seg'
        elif "rpn" in model or "fast" in model:
            self.model = "COCO-Detection/" + model
            train_type = 'bbox'
        elif "keypoint" in model:
            self.model = "COCO-Keypoints/" + model
            train_type = 'kpt'
        elif "mask" in model:
            self.model = "COCO-InstanceSegmentation/" + model
            train_type = 'seg'
        elif train_type:
            self.model = model
            train_type = train_type
        else:
            printj.red.bold_on_black(f'{model} is not in the dictionary.\
                Choose the correct model.')
            raise Exception

        if ".yaml" in self.model:
            self.model = self.model
        else:
            self.model = self.model + ".yaml"

        if detectron2_dir_path:
            model_conf_path = f"{detectron2_dir_path}/configs/{self.model}"
        else:
            model_conf_path = model_zoo.get_config_file(self.model)
        if not file_exists(model_conf_path):
            printj.red(f"Invalid model: {model}\nOr")
            printj.red(f"File not found: {model_conf_path}")
            raise Exception
        """ register """
        register_coco_instances(name=self.instance_train,
                                metadata={},
                                json_file=self.coco_ann_path,
                                image_root=self.img_path)
        MetadataCatalog.get(
            self.instance_train).thing_classes = self.class_names
        # sys.exit(self.class_names)
        if val_on:
            register_coco_instances(name=self.instance_test,
                                    metadata={},
                                    json_file=self.val_coco_ann_path,
                                    image_root=self.val_img_path)
            MetadataCatalog.get(
                self.instance_test).thing_classes = self.class_names
        """ cfg """
        self.cfg = get_cfg()
        self.cfg.merge_from_file(model_conf_path)
        self.cfg.DATASETS.TRAIN = tuple([self.instance_train])
        self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(self.model)
        self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = self.num_classes
        self.cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = self.num_keypoints
        self.cfg.DATALOADER.NUM_WORKERS = self.num_workers
        self.cfg.SOLVER.IMS_PER_BATCH = self.images_per_batch
        self.cfg.SOLVER.BASE_LR = self.base_lr
        self.cfg.MODEL.DEVICE = self.device
        self.cfg.OUTPUT_DIR = self.output_dir_path
        if self.lr_steps:
            self.cfg.SOLVER.GAMMA = self.decrease_lr_by_ratio
            self.cfg.SOLVER.STEPS = self.lr_steps
        if self.max_iter:
            self.cfg.SOLVER.MAX_ITER = self.max_iter
        if self.batch_size_per_image:
            self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = self.batch_size_per_image
        if self.checkpoint_period:
            self.cfg.SOLVER.CHECKPOINT_PERIOD = self.checkpoint_period
        if self.vis_period:
            self.cfg.VIS_PERIOD = self.vis_period
        if score_thresh:
            self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = score_thresh
        if self.val_on:
            self.cfg.DATASETS.TEST = tuple([self.instance_test])
            self.cfg.TEST.EVAL_PERIOD = self.val_eval_period
        make_dir_if_not_exists(self.cfg.OUTPUT_DIR)
        if not self.resume:
            delete_dir_if_exists(self.cfg.OUTPUT_DIR)
            make_dir_if_not_exists(self.cfg.OUTPUT_DIR)
        if "mask" in self.model.lower() or "segmentation" in self.model.lower(
        ):
            self.cfg.MODEL.MASK_ON = True
        else:
            self.cfg.MODEL.MASK_ON = False
        # self.cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT=0.5
        # Train Size Parameters
        if min_size_train is not None:
            self.cfg.INPUT.MIN_SIZE_TRAIN = min_size_train
        if max_size_train is not None:
            self.cfg.INPUT.MAX_SIZE_TRAIN = max_size_train
        # Test Size Parameters
        if min_size_test is not None:
            self.cfg.INPUT.MIN_SIZE_TEST = min_size_test
        elif min_size_train is not None:
            self.cfg.INPUT.MIN_SIZE_TEST = min_size_train
        if max_size_test is not None:
            self.cfg.INPUT.MAX_SIZE_TEST = max_size_test
        elif max_size_train is not None:
            self.cfg.INPUT.MAX_SIZE_TEST = max_size_train

            self.cfg.INPUT.MIN_SIZE_TEST = min_size_train
        """ def train()  """
        self.aug_settings_file_path = aug_settings_file_path
        self.aug_on = aug_on
        self.train_val = train_val
        self.train_type = train_type
        self.aug_vis_save_path = aug_vis_save_path
        self.show_aug_seg = show_aug_seg

        self.aug_n_rows = aug_n_rows
        self.aug_n_cols = aug_n_cols
        self.aug_save_dims = aug_save_dims