def prepare_for_launch(): #runner = Detectron2GoRunner() runner = GeneralizedRCNNRunner() cfg = runner.get_default_cfg() cfg.merge_from_file(model_zoo.get_config_file("mask_rcnn_fbnetv3a_C4.yaml")) cfg.MODEL_EMA.ENABLED = False cfg.DATASETS.TRAIN = ("my_dataset_train",) cfg.DATASETS.TEST = ("my_dataset_val",) cfg.DATALOADER.NUM_WORKERS = 2 #no of cpu to be used cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("mask_rcnn_fbnetv3a_C4.yaml") # Let training initialize from model zoo cfg.SOLVER.IMS_PER_BATCH = 2 cfg.INPUT.CROP.ENABLED = True cfg.QUANTIZATION.BACKEND = 'qnnpack' cfg.INPUT.MAX_SIZE_TEST = 160 cfg.INPUT.MIN_SIZE_TEST = 112 #cfg.INPUT.RANDOM_FLIP = 'horizontal' #cfg.INPUT.RANDOM_FLIP = 'vertical' cfg.SOLVER.BASE_LR = 0.003 # pick a good LR cfg.SOLVER.MAX_ITER = 20 # you will need to train longer for a practical dataset cfg.SOLVER.STEPS = [] # do not decay learning rate cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set the testing threshold for this model cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 40 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (mahesh). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets) # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here. os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) return cfg, runner
def setup_custom_test(self): """ Override this when using different runner, using different base config file, or setting specific config for certain test. """ self.runner = GeneralizedRCNNRunner() self.cfg = self.runner.get_default_cfg()
def test_default_cfg_deprecated_keys(self): default_cfg = GeneralizedRCNNRunner().get_default_cfg() # a warning will be printed for deprecated keys default_cfg.merge_from_list(["QUANTIZATION.QAT.LOAD_PRETRAINED", True]) # exception will raise for renamed keys self.assertRaises( KeyError, default_cfg.merge_from_list, ["QUANTIZATION.QAT.BACKEND", "fbgemm"], )
def test_export_torchvision_format(self): runner = GeneralizedRCNNRunner() cfg = runner.get_default_cfg() cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml") cfg.merge_from_list(get_quick_test_config_opts()) cfg.merge_from_list(["MODEL.DEVICE", "cpu"]) pytorch_model = runner.build_model(cfg, eval_only=True) from typing import Dict, List class Wrapper(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, inputs: List[torch.Tensor]): x = inputs[0].unsqueeze(0) * 255 scale = 320.0 / min(x.shape[-2], x.shape[-1]) x = torch.nn.functional.interpolate( x, scale_factor=scale, mode="bilinear", align_corners=True, recompute_scale_factor=True, ) out = self.model(x[0]) res: Dict[str, torch.Tensor] = {} res["boxes"] = out[0] / scale res["labels"] = out[2] res["scores"] = out[1] return inputs, [res] size_divisibility = max(pytorch_model.backbone.size_divisibility, 10) h, w = size_divisibility, size_divisibility * 2 with create_detection_data_loader_on_toy_dataset( cfg, h, w, is_train=False) as data_loader: with make_temp_directory( "test_export_torchvision_format") as tmp_dir: predictor_path = convert_and_export_predictor( cfg, copy.deepcopy(pytorch_model), "torchscript", tmp_dir, data_loader, ) orig_model = torch.jit.load( os.path.join(predictor_path, "model.jit")) wrapped_model = Wrapper(orig_model) # optionally do a forward wrapped_model([torch.rand(3, 600, 600)]) scripted_model = torch.jit.script(wrapped_model) scripted_model.save(os.path.join(tmp_dir, "new_file.pt"))
def setUp(self): runner = GeneralizedRCNNRunner() self.cfg = runner.get_default_cfg() self.is_mcs = False self.setup_custom_test() # NOTE: change some config to make the model run fast self.cfg.merge_from_list(get_quick_test_config_opts()) self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"]) self.test_model = runner.build_model(self.cfg, eval_only=True)
def test_default_cfg_dump_and_load(self): default_cfg = GeneralizedRCNNRunner().get_default_cfg() cfg = default_cfg.clone() with make_temp_directory("detectron2go_tmp") as tmp_dir: file_name = os.path.join(tmp_dir, "config.yaml") # this is same as the one in fblearner_launch_utils_detectron2go.py with open(file_name, "w") as f: f.write(cfg.dump(default_flow_style=False)) # check if the dumped config file can be merged cfg.merge_from_file(file_name)
def test_load_arch_defs(self): """Test arch def str-to-dict conversion compatible with merging""" default_cfg = GeneralizedRCNNRunner().get_default_cfg() cfg = default_cfg.clone() cfg.merge_from_file(get_resource_path("arch_def_merging.yaml")) with make_temp_directory("detectron2go_tmp") as tmp_dir: # Dump out config with arch def file_name = os.path.join(tmp_dir, "test_archdef_config.yaml") with open(file_name, "w") as f: f.write(cfg.dump()) # Attempt to reload the config another_cfg = default_cfg.clone() another_cfg.merge_from_file(file_name)
def test_build_rpn_heads_with_rotated_anchor_generator(self): """ Make sure rpn heads work with rotated anchor generator""" self.assertGreater(len(rpn.RPN_HEAD_REGISTRY._obj_map), 0) for name, builder in rpn.RPN_HEAD_REGISTRY._obj_map.items(): logger.info("Testing {}...".format(name)) cfg = GeneralizedRCNNRunner().get_default_cfg() if name in RPN_CFGS: cfg.merge_from_file(RPN_CFGS[name]) cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator" backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() rpn_input_shape = [ backbone_shape[x] for x in cfg.MODEL.RPN.IN_FEATURES ] rpn_head = builder(cfg, rpn_input_shape) in_channels = list(backbone_shape.values())[0].channels anchor_generator = build_anchor_generator(cfg, rpn_input_shape) num_anchors = anchor_generator.num_cell_anchors[0] box_dim = anchor_generator.box_dim N, C_in, H, W = 2, in_channels, 24, 32 input = torch.rand([N, C_in, H, W], dtype=torch.float32) LAYERS = len(cfg.MODEL.RPN.IN_FEATURES) out = rpn_head([input] * LAYERS) self.assertEqual(len(out), 2) logits, bbox_reg = out for idx in range(LAYERS): self.assertEqual( logits[idx].shape, torch.Size([ input.shape[0], num_anchors, input.shape[2], input.shape[3] ]), ) self.assertEqual( bbox_reg[idx].shape, torch.Size([ logits[idx].shape[0], num_anchors * box_dim, logits[idx].shape[2], logits[idx].shape[3], ]), )
def test_base_reroute(self): default_cfg = GeneralizedRCNNRunner().get_default_cfg() # use rerouted file as base cfg = default_cfg.clone() cfg.merge_from_file(get_resource_path("rerouted_base.yaml")) self.assertEqual(cfg.MODEL.MASK_ON, True) # base is loaded self.assertEqual(cfg.MODEL.FBNET_V2.ARCH, "test") # non-base is loaded # use multiple files as base cfg = default_cfg.clone() cfg.merge_from_file(get_resource_path("rerouted_multi_base.yaml")) self.assertEqual(cfg.MODEL.MASK_ON, True) # base is loaded self.assertEqual(cfg.MODEL.FBNET_V2.ARCH, "FBNetV3_A") # second base is loaded self.assertEqual(cfg.OUTPUT_DIR, "test") # non-base is loaded
def test_configs_load(self): """ Make sure configs are loadable """ for location in ["detectron2", "detectron2go"]: root_dir = os.path.abspath(reroute_config_path(f"{location}://.")) files = glob.glob(os.path.join(root_dir, "**/*.yaml"), recursive=True) self.assertGreater(len(files), 0) for fn in sorted(files): logger.info("Loading {}...".format(fn)) GeneralizedRCNNRunner().get_default_cfg().merge_from_file(fn)
def test_8gpu_to_1gpu(self): """ when scaling a 8-gpu config to 1-gpu one, the batch size will be reduced by 8x """ cfg = GeneralizedRCNNRunner().get_default_cfg() self.assertEqual(cfg.SOLVER.REFERENCE_WORLD_SIZE, 8) batch_size_x8 = cfg.SOLVER.IMS_PER_BATCH assert batch_size_x8 % 8 == 0, "default batch size is not multiple of 8" auto_scale_world_size(cfg, new_world_size=1) self.assertEqual(cfg.SOLVER.REFERENCE_WORLD_SIZE, 1) self.assertEqual(cfg.SOLVER.IMS_PER_BATCH * 8, batch_size_x8)
def test_not_scale_for_zero_world_size(self): """ when reference world size is 0, no scaling should happen """ cfg = GeneralizedRCNNRunner().get_default_cfg() self.assertEqual(cfg.SOLVER.REFERENCE_WORLD_SIZE, 8) cfg.SOLVER.REFERENCE_WORLD_SIZE = 0 batch_size_x8 = cfg.SOLVER.IMS_PER_BATCH auto_scale_world_size(cfg, new_world_size=1) self.assertEqual(cfg.SOLVER.REFERENCE_WORLD_SIZE, 0) self.assertEqual(cfg.SOLVER.IMS_PER_BATCH, batch_size_x8)
class TemplateTestCase(unittest.TestCase ): # TODO: maybe subclass from TestMetaArch def setUp(self): # Add APIs to D2's meta arch, this is usually called in runner's setup, # however in unittest it needs to be called sperarately. # TODO: maybe we should apply this by default patch_d2_meta_arch() self.setup_test_dir() assert hasattr(self, "test_dir") self.setup_custom_test() assert hasattr(self, "runner") assert hasattr(self, "cfg") self.force_apply_overwrite_opts() self.test_model = self.runner.build_model(self.cfg, eval_only=True) def setup_test_dir(self): self.test_dir = tempfile.mkdtemp(prefix="test_export_") self.addCleanup(shutil.rmtree, self.test_dir) def _get_test_image_sizes_default(self, is_train): # model should work for any size, so don't alway use power of 2 or multiple # of size_divisibility for testing. side_length = max(self.test_model.backbone.size_divisibility, 10) # make it non-square to cover error caused by messing up width & height h, w = side_length, side_length * 2 return h, w def _get_test_image_size_no_resize(self, is_train): # use cfg.INPUT to make sure data loader doesn't resize the image if is_train: assert len(self.cfg.INPUT.MAX_SIZE_TRAIN) == 1 h = self.cfg.INPUT.MIN_SIZE_TRAIN[0] w = self.cfg.INPUT.MAX_SIZE_TRAIN else: h = self.cfg.INPUT.MIN_SIZE_TEST w = self.cfg.INPUT.MAX_SIZE_TEST return h, w def _get_test_image_sizes(self, is_train): """override this method to use other image size strategy""" return self._get_test_image_sizes_default(is_train) def setup_custom_test(self): """ Override this when using different runner, using different base config file, or setting specific config for certain test. """ self.runner = GeneralizedRCNNRunner() self.cfg = self.runner.get_default_cfg() # subclass can call: self.cfg.merge_from_file(...) def force_apply_overwrite_opts(self): """ Recommend only overriding this for a group of tests, while indivisual test should have its own `setup_custom_test`. """ # update config to make the model run fast self.cfg.merge_from_list(get_quick_test_config_opts()) # forcing test on CPU self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"]) @contextlib.contextmanager def _create_data_loader(self, is_train): """ Creating the data loader used for the test case. Note that it's better to use "fake" data for quick test and isolating I/O. """ image_height, image_width = self._get_test_image_sizes( is_train=False) with create_detection_data_loader_on_toy_dataset( self.cfg, image_height, image_width, is_train=is_train, runner=self.runner, ) as data_loader: yield data_loader def _test_export(self, predictor_type, compare_match=True): with self._create_data_loader(is_train=False) as data_loader: inputs = next(iter(data_loader)) # TODO: the export may change model it self, need to fix this model_to_export = copy.deepcopy(self.test_model) predictor_path = convert_and_export_predictor( self.cfg, model_to_export, predictor_type, self.test_dir, data_loader, ) predictor = create_predictor(predictor_path) predictor_outputs = predictor(inputs) _validate_outputs(inputs, predictor_outputs) if compare_match: with torch.no_grad(): pytorch_outputs = self.test_model(inputs) assert_instances_allclose( predictor_outputs[0]["instances"], pytorch_outputs[0]["instances"], size_as_tensor=True, ) return predictor_path # TODO: add test_train def _test_inference(self): with self._create_data_loader(is_train=False) as data_loader: inputs = next(iter(data_loader)) with torch.no_grad(): outputs = self.test_model(inputs) _validate_outputs(inputs, outputs)
def initialize_runner(runner, cfg): runner = runner or GeneralizedRCNNRunner() runner._initialize(cfg) return runner
from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.utils.video_visualizer import VideoVisualizer from detectron2.data import MetadataCatalog from detectron2.utils.visualizer import ColorMode from d2go.runner import Detectron2GoRunner from matplotlib import pyplot as plt #from google.colab.patches import cv2_imshow from d2go.utils.demo_predictor import DemoPredictor from mobile_cv.predictor.api import create_predictor from d2go.runner import GeneralizedRCNNRunner import time runner = GeneralizedRCNNRunner() cfg = runner.get_default_cfg() #cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_C4_1x.yaml")) #cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_C4_1x.yaml") #cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml")) #cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml") cfg.merge_from_file(r"/content/config1.yml") cfg.MODEL.WEIGHTS = os.path.join("/content/output/model_final.pth") cfg.MODEL.DEVICE = "cuda" #cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml")) #cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml")
def setUp(self): self.runner = GeneralizedRCNNRunner()
class TestKmeansAnchors(unittest.TestCase): def setUp(self): self.runner = GeneralizedRCNNRunner() def _get_default_cfg(self): cfg = self.runner.get_default_cfg() add_kmeans_anchors_cfg(cfg) return cfg @unittest.skip("This can only run locally and takes significant of time") def test_matching_previous_results(self): cfg = self._get_default_cfg() cfg.INPUT.MIN_SIZE_TRAIN = (144, ) cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 10 cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 512 cfg.MODEL.KMEANS_ANCHORS.DATASETS = () # NOTE: create a data loader that samples exact the same as previous # implementation. In D2Go, we will rely on the train loader instead. # NOTE: in order to load OV580_XRM dataset, change the IM_DIR to: # "/mnt/vol/gfsai-east/aml/mobile-vision//dataset/oculus/hand_tracking//torch/Segmentation/OV580_XRM_640x480_V3_new_rerun/images" # noqa data_loader = build_sequence_loader( cfg, # dataset_name="coco_2014_valminusminival", # dataset_name="OV580_XRM_640x480_V3_train", dataset_name="OV580_XRM_640x480_V3_heldOut_small_512", mapper=self.runner.get_mapper(cfg, is_train=True), total_samples=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG, batch_size=3, ) kmeans_anchors = compute_kmeans_anchors(cfg, data_loader, sort_by_area=False, _stride=16, _legacy_plus_one=True) # Taken from D9849940 reference_anchors = np.array([ [-15.33554182, -15.29361029, 31.33554182, 31.29361029], # noqa [-9.34156693, -9.32553548, 25.34156693, 25.32553548], # noqa [-6.03052776, -6.02034167, 22.03052776, 22.02034167], # noqa [-2.25951741, -2.182888, 18.25951741, 18.182888], # noqa [-18.93553378, -18.93553403, 34.93553378, 34.93553403], # noqa [-12.69068356, -12.73989029, 28.69068356, 28.73989029], # noqa [-24.73489189, -24.73489246, 40.73489189, 40.73489246], # noqa [-4.06014466, -4.06014469, 20.06014466, 20.06014469], # noqa [-7.61036119, -7.60467538, 23.61036119, 23.60467538], # noqa [-10.88200579, -10.87634414, 26.88200579, 26.87634414], # noqa ]) np.testing.assert_allclose(kmeans_anchors, reference_anchors, atol=1e-6) def test_build_model(self): cfg = self._get_default_cfg() cfg.INPUT.MIN_SIZE_TRAIN = (60, ) cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 3 cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 5 cfg.MODEL.KMEANS_ANCHORS.DATASETS = ("toy_dataset", ) cfg.MODEL.DEVICE = "cpu" cfg.MODEL.ANCHOR_GENERATOR.NAME = "KMeansAnchorGenerator" with register_toy_coco_dataset( "toy_dataset", image_size=(80, 60), # w, h num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG, ): model = self.runner.build_model(cfg) trainer = SimpleTrainer(model, data_loader=[], optimizer=None) trainer_hooks = [compute_kmeans_anchors_hook(self.runner, cfg)] trainer.register_hooks(trainer_hooks) trainer.before_train() anchor_generator = model.proposal_generator.anchor_generator cell_anchors = list(anchor_generator.cell_anchors) gt_anchors = np.array([ [-20, -15, 20, 15] # toy_dataset's bbox is half size of image for _ in range(cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS) ]) np.testing.assert_allclose(cell_anchors[0], gt_anchors)
import os from d2go.runner import Detectron2GoRunner from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.utils.video_visualizer import VideoVisualizer from detectron2.data import MetadataCatalog from detectron2.utils.visualizer import ColorMode from lockit import lock, unlock from mobile_cv.predictor.api import create_predictor from d2go.utils.demo_predictor import DemoPredictor from d2go.runner import GeneralizedRCNNRunner runner = GeneralizedRCNNRunner() cfg = runner.get_default_cfg() #runner = Detectron2GoRunner() #cfg = runner.get_default_cfg() #cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_C4_1x.yaml")) #cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_C4_1x.yaml") #cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml")) #cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml") cfg.merge_from_file( r"/home/pi/Desktop/project xcv/torchscript_int8@tracing/config2.yml") cfg.MODEL.WEIGHTS = os.path.join( r"/home/pi/Desktop/project xcv/torchscript_int8@tracing/data.pth")
class TemplateTestCase(unittest.TestCase ): # TODO: maybe subclass from TestMetaArch def setUp(self): # Add APIs to D2's meta arch, this is usually called in runner's setup, # however in unittest it needs to be called sperarately. # TODO: maybe we should apply this by default patch_d2_meta_arch() self.setup_test_dir() assert hasattr(self, "test_dir") self.setup_custom_test() assert hasattr(self, "runner") assert hasattr(self, "cfg") self.force_apply_overwrite_opts() self.test_model = self.runner.build_model(self.cfg, eval_only=True) def setup_test_dir(self): self.test_dir = tempfile.mkdtemp(prefix="test_export_") self.addCleanup(shutil.rmtree, self.test_dir) def setup_custom_test(self): """ Override this when using different runner, using different base config file, or setting specific config for certain test. """ self.runner = GeneralizedRCNNRunner() self.cfg = self.runner.get_default_cfg() # subclass can call: self.cfg.merge_from_file(...) def force_apply_overwrite_opts(self): """ Recommend only overriding this for a group of tests, while indivisual test should have its own `setup_custom_test`. """ # update config to make the model run fast self.cfg.merge_from_list(get_quick_test_config_opts()) # forcing test on CPU self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"]) def _test_export(self, predictor_type, compare_match=True): size_divisibility = max(self.test_model.backbone.size_divisibility, 10) h, w = size_divisibility, size_divisibility * 2 with create_fake_detection_data_loader( h, w, is_train=False) as data_loader: inputs = next(iter(data_loader)) # TODO: the export may change model it self, need to fix this model_to_export = copy.deepcopy(self.test_model) predictor_path = convert_and_export_predictor( self.cfg, model_to_export, predictor_type, self.test_dir, data_loader, ) predictor = create_predictor(predictor_path) predicotr_outputs = predictor(inputs) _validate_outputs(inputs, predicotr_outputs) if compare_match: with torch.no_grad(): pytorch_outputs = self.test_model(inputs) assert_instances_allclose( predicotr_outputs[0]["instances"], pytorch_outputs[0]["instances"], ) return predictor_path # TODO: add test_train def _test_inference(self): size_divisibility = max(self.test_model.backbone.size_divisibility, 10) h, w = size_divisibility, size_divisibility * 2 with create_fake_detection_data_loader( h, w, is_train=False) as data_loader: inputs = next(iter(data_loader)) with torch.no_grad(): outputs = self.test_model(inputs) _validate_outputs(inputs, outputs)