def test_recursive_iter_dict(self):
        values = {"k1": "v1", "k2": ["v2", 3, "v4"], "k3": 5}
        val_list = list(iu.recursive_iterate(values))
        self.assertEqual(val_list, ["v1", "v2", 3, "v4", 5])

        val_list = list(iu.recursive_iterate(values, iter_types=str))
        self.assertEqual(val_list, ["v1", "v2", "v4"])
    def test_recursive_iter_send(self):
        values = {"k1": "v1", "k2": ["v2", 3, "v4"], "k3": 5}
        gt_values = {"k1": "v1_ret", "k2": ["v2_ret", 4, "v4_ret"], "k3": 6}

        iters = iu.recursive_iterate(values)
        for x in iters:
            iters.send(x + "_ret" if isinstance(x, str) else x + 1)
        result = iters.value
        self.assertEqual(result, gt_values)
    def test_recursive_iter_seq_check_func(self):
        def _is_int_list(x):
            return isinstance(x, list) and all(isinstance(y, int) for y in x)

        values = [{"k1": [1, 2, 3], "k2": ["v2", 3, "v4"], "k3": 5}]
        # List of ints are not considered as a list
        val_list = list(
            iu.recursive_iterate(values, seq_check_func=lambda x: not _is_int_list(x))
        )
        self.assertEqual(val_list, [[1, 2, 3], "v2", 3, "v4", 5])
Beispiel #4
0
def _get_computed_tensor_to_list(op_name, op_cfg_name, result):
    riter = iu.recursive_iterate(result)
    for item in riter:
        item_str = (
            f"({list(item.shape)}, "
            f"{[float('%.5f' % o) for o in item.contiguous().view(-1).tolist()]})"
        )
        riter.send(item_str)
    ret = f'("{op_name}", "{op_cfg_name}"): {_to_str(riter.value)},  # noqa'
    return ret
    def test_recursive_iter_send_None(self):
        values = {"k1": "v1", "k2": ["v2", 3, "v4"], "k3": 5}
        gt_values = {
            "k1": "v1_ret",
            "k2": ["v2_ret", None, "v4_ret"],
            "k3": None,
        }

        iters = iu.recursive_iterate(values, wait_on_send=True)
        for x in iters:
            iters.send(x + "_ret" if isinstance(x, str) else None)
        result = iters.value
        self.assertEqual(result, gt_values)
Beispiel #6
0
def build_upsample(name=None, scales=None, **kwargs):
    if name is None or scales is None:
        return None

    if all(x == 1 for x in iu.recursive_iterate(scales)):
        return None

    if name == "default":
        ret = Upsample(scale_factor=scales, **kwargs)
    else:
        ret = UPSAMPLE_REGISTRY.get(name)(scales, **kwargs)

    return ret
Beispiel #7
0
def post_training_quantize(cfg, model, data_loader):
    """Calibrate a model, convert it to a quantized pytorch model"""
    model = copy.deepcopy(model)
    model.eval()
    # TODO: check why some parameters will have gradient
    for param in model.parameters():
        param.requires_grad = False

    if hasattr(model, "prepare_for_quant"):
        model = model.prepare_for_quant(cfg)
    else:
        logger.info("Using default implementation for prepare_for_quant")
        model = default_prepare_for_quant(cfg, model)

    if cfg.QUANTIZATION.EAGER_MODE:
        torch.quantization.prepare(model, inplace=True)
    logger.info("Prepared the PTQ model for calibration:\n{}".format(model))

    # Option for forcing running calibration on GPU, works only when the model supports
    # casting both model and inputs.
    calibration_force_on_gpu = (
        cfg.QUANTIZATION.PTQ.CALIBRATION_FORCE_ON_GPU and torch.cuda.is_available()
    )
    if calibration_force_on_gpu:
        # NOTE: model.to(device) may not handle cases such as normalizer, FPN, only
        # do move to GPU if specified.
        _cast_detection_model(model, "cuda")

    calibration_iters = cfg.QUANTIZATION.PTQ.CALIBRATION_NUM_IMAGES
    for idx, inputs in enumerate(data_loader):
        logger.info("Running calibration iter: {}/{}".format(idx, calibration_iters))

        if calibration_force_on_gpu:
            iters = recursive_iterate(inputs)
            for x in iters:
                if isinstance(x, torch.Tensor):
                    iters.send(x.to("cuda"))
            inputs = iters.value

        with torch.no_grad():
            model(inputs)
        if idx + 1 == calibration_iters:
            break
    else:
        logger.warning("Can't run enough calibration iterations")

    # cast model back to the original device
    if calibration_force_on_gpu:
        _cast_detection_model(model, cfg.MODEL.DEVICE)

    return model
    def test_recursive_iter_map_check_func(self):
        def _is_int_dict_key(x):
            return isinstance(x, dict) and all(isinstance(yk, int) for yk in x.keys())

        values = [{"k1": {1: 1, 2: 2, 3: 3}, "k2": ["v2", 3, "v4"], "k3": 5}]
        # dict where keys are ints are not considered as a map
        val_list = list(
            iu.recursive_iterate(
                values,
                iter_types=dict,
                map_check_func=lambda x: not _is_int_dict_key(x),
            )
        )
        self.assertEqual(val_list, [{1: 1, 2: 2, 3: 3}])
 def test_recursive_iter_simple(self):
     self.assertEqual(next(iter(iu.recursive_iterate(1))), 1)
     self.assertEqual(next(iter(iu.recursive_iterate("str"))), "str")
     self.assertEqual(list(iu.recursive_iterate(1)), [1])
     self.assertEqual(list(iu.recursive_iterate("str")), ["str"])
     # special cases for empty list and dict, the function will return an
     # empty iterable
     self.assertEqual(list(iu.recursive_iterate([])), [])
     self.assertEqual(list(iu.recursive_iterate({})), [])
Beispiel #10
0
def _get_expected_output_to_tensor(outputs):
    # a valid tensor is represented by a tuple with two lists (shape and value)
    def _is_shape_value_tuple(obj):
        if isinstance(obj, tuple) and len(obj) == 2:
            if isinstance(obj[0], list) and isinstance(obj[1], list):
                return True
        return False

    # get all tensors
    riter = iu.recursive_iterate(
        outputs, seq_check_func=lambda x: not _is_shape_value_tuple(x)
    )
    for item in riter:
        item_tensor = torch.FloatTensor(item[1]).reshape(item[0])
        riter.send(item_tensor)

    # get all tensor shapes
    shape_iter = iu.recursive_iterate(
        outputs, seq_check_func=lambda x: not _is_shape_value_tuple(x)
    )
    for item in shape_iter:
        shape_iter.send(item[0])

    return riter.value, shape_iter.value
Beispiel #11
0
def convert_torch_script(
    model, inputs, fuse_bn=True, verify_output=True, use_get_traceable=False
):
    assert isinstance(inputs, (tuple, list)), f"Invalid input types {inputs}"
    if verify_output:
        print("Run pytorch model")
        with torch.no_grad():
            output_before = model(*inputs)

    if fuse_bn:
        print("Fusing bn...")
        fused_model = fuse_utils.fuse_model(model)
        if fuse_utils.check_bn_exist(fused_model):
            print(f"WARNING: BN existed after fusing, {fused_model}")
    else:
        fused_model = copy.deepcopy(model)

    for x in fused_model.parameters():
        x.requires_grad = False

    if use_get_traceable:
        print("Get traceable model...")
        fused_model = ju.get_traceable_model(fused_model)

    print("Start tracing...")
    with torch.no_grad():
        traced_model = torch.jit.trace(fused_model, inputs, strict=False)
    # print(f"Traced model {traced_model}")
    # print(f"Traced model {traced_model.code}")

    # print("Optimizing traced model...")
    # traced_model = optimize_for_mobile(traced_model)
    print("Generating traced model lints...")
    print(generate_mobile_module_lints(traced_model))

    print("Run traced model")
    with torch.no_grad():
        outputs = traced_model(*inputs)

    if verify_output:
        paired_outputs = iu.create_pair(output_before, outputs)
        for x in iu.recursive_iterate(paired_outputs, iter_types=torch.Tensor):
            np.testing.assert_allclose(
                x.lhs.detach(), x.rhs.detach(), rtol=0, atol=1e-4
            )

    return traced_model, outputs
    def test_paired(self):
        self.assertIsInstance(iu.PairedDict({}, {}), iu.cabc.Mapping)
        self.assertIsInstance(iu.PairedSeq([], []), iu.cabc.Sequence)

        lhs = {"alist": [1, 2, 3], "bdict": {"c": "d", "e": ["f", "g", "h"]}}
        rhs = {"alist": ["1", "2", "3"], "bdict": {"c": 4, "e": [5, 6, 7]}}
        paired = iu.create_pair(lhs, rhs)
        self.assertIsInstance(paired, iu.PairedDict)

        paired_list = paired["alist"]
        self.assertIsInstance(paired_list, iu.PairedSeq)

        paired_obj = paired_list[0]
        self.assertIsInstance(paired_obj, iu.Pair)

        iter = iu.recursive_iterate(paired)
        for x in iter:
            iter.send(x.to_tuple())

        merged_gt = {
            "alist": [(1, "1"), (2, "2"), (3, "3")],
            "bdict": {"c": ("d", 4), "e": [("f", 5), ("g", 6), ("h", 7)]},
        }
        self.assertEqual(iter.value, merged_gt)
Beispiel #13
0
def _compare_output_shape(self, outputs, gt_shapes):
    for item in iu.recursive_iterate(iu.create_pair(outputs, gt_shapes)):
        self.assertEqual(item.lhs.shape, torch.Size(item.rhs))
Beispiel #14
0
def get_neg_stride(stride):
    iters = iu.recursive_iterate(stride)
    for ss in iters:
        assert ss is not None
        iters.send(-ss)
    return iters.value
Beispiel #15
0
def is_neg_stride(stride):
    return all(x < 0 for x in iu.recursive_iterate(stride))
Beispiel #16
0
def export_optimize_and_save_torchscript(
    model: nn.Module,
    inputs: Optional[Tuple[Any]],
    output_path: str,
    *,
    jit_mode: Optional[str] = DEFAULT_JIT_MODE,
    torchscript_filename: str = "model.jit",
    mobile_optimization: Optional[MobileOptimizationConfig] = None,
    _extra_files: Optional[Dict[str, bytes]] = None,
) -> str:
    """
    The primary function for exporting PyTorch model to TorchScript.

    Args:
        model (nn.Module): the model to export. When given a ScriptModule, skip the export
            and only optimize and save model.
        inputs (tuple or None): input arguments of model, can be called as model(*inputs).
            Will not be used when scripting the model.
        output_path (str): directory that the model will be saved.
        jit_mode (str): trace/script or None if the model is already a ScriptModule.
        torchscript_filename (str): the filename of non-mobile-optimized model.
        mobile_optimization (MobileOptimizationConfig): when provided, the mobile optimization
            will be applied.
        _extra_files (Dict[str, bytes]): when provided, extra files will be saved.

    Returns:
        (str): filename of the final model no matter optmized or not.
    """

    logger.info("Export, optimize and saving TorchScript to {} ...".format(
        output_path))
    PathManager.mkdirs(output_path)
    if _extra_files is None:
        _extra_files = {}

    if isinstance(model, torch.jit.ScriptModule):
        if jit_mode is not None:
            logger.info(
                "The input model is already a ScriptModule, skip the jit step")
    elif jit_mode == "trace":
        logger.info("Tracing the model ...")
        with torch.no_grad():
            script_model = torch.jit.trace(model, inputs)
    elif jit_mode == "script":
        logger.info("Scripting the model ...")
        script_model = torch.jit.script(model)
    else:
        raise ValueError("Unsupported jit_mode: {}".format(jit_mode))

    with make_temp_directory(
            "export_optimize_and_save_torchscript") as tmp_dir:

        @contextlib.contextmanager
        def _synced_local_file(rel_path):
            remote_file = os.path.join(output_path, rel_path)
            local_file = os.path.join(tmp_dir, rel_path)
            yield local_file
            PathManager.copy_from_local(local_file,
                                        remote_file,
                                        overwrite=True)

        with _synced_local_file(torchscript_filename) as model_file:
            logger.info(f"Saving torchscript model to: {torchscript_filename}")
            torch.jit.save(script_model, model_file, _extra_files=_extra_files)
        dump_torchscript_IR(script_model,
                            os.path.join(output_path, "torchscript_IR"))

        data_filename = "data.pth"
        with _synced_local_file(data_filename) as data_file:
            logger.info(f"Saving example data to: {data_filename}")
            torch.save(inputs, data_file)

        if mobile_optimization is not None:
            logger.info("Applying optimize_for_mobile ...")
            liteopt_model = optimize_for_mobile(
                script_model,
                optimization_blocklist=mobile_optimization.
                optimization_blocklist,
                preserved_methods=mobile_optimization.preserved_methods,
                backend=mobile_optimization.backend,
            )
            torchscript_filename = mobile_optimization.torchscript_filename
            with _synced_local_file(torchscript_filename) as lite_path:
                logger.info(
                    f"Saving mobile optimized model to: {torchscript_filename}"
                )
                liteopt_model._save_for_lite_interpreter(
                    lite_path, _extra_files=_extra_files)

            op_names = torch.jit.export_opnames(liteopt_model)
            logger.info("Operator names from lite interpreter:\n{}".format(
                "\n".join(op_names)))

            logger.info("Applying augment_model_with_bundled_inputs ...")
            # make all tensors zero-like to save storage
            iters = recursive_iterate(inputs)
            for x in iters:
                if isinstance(x, torch.Tensor):
                    iters.send(torch.zeros_like(x).contiguous())
            inputs = iters.value
            augment_model_with_bundled_inputs(liteopt_model, [inputs])

            # For non-cpu backends (e.g. Metal, Vulkan) the bundled inputs need to be
            # converted with `torch.to(<myDevice>)` in order to predict successfully
            # This is a temporary bypass until PT Edge supports automatic backend
            # conversion in the bundled inputs interface, or we can auto-add a input tensor
            # conversion op to Metal and Vulkan models.
            target_backend = mobile_optimization.backend.lower()
            if target_backend == "cpu":
                # Sanity check by running
                logger.info(
                    "Running sanity check for the mobile optimized model ...")
                liteopt_model(*liteopt_model.get_all_bundled_inputs()[0])
            name, ext = os.path.splitext(torchscript_filename)
            input_bundled_path = name + "_bundled" + ext
            with _synced_local_file(input_bundled_path) as lite_path:
                logger.info(
                    f"Saving input bundled model to: {input_bundled_path}")
                liteopt_model._save_for_lite_interpreter(lite_path)

        return torchscript_filename
Beispiel #17
0
def _compare_outputs(outputs, gt_outputs, error_msg):
    riter = iu.recursive_iterate(iu.create_pair(outputs, gt_outputs))
    for item in riter:
        np.testing.assert_allclose(
            item.lhs, item.rhs, rtol=0, atol=1e-4, err_msg=error_msg
        )
 def test_recursive_iter_simple_no_wait_on_send(self):
     self.assertEqual(list(iu.recursive_iterate(None, wait_on_send=False)), [None])
     self.assertEqual(
         list(iu.recursive_iterate([1, 2, None], wait_on_send=False)),
         [1, 2, None],
     )
Beispiel #19
0
def trace_and_save_torchscript(
    model: nn.Module,
    inputs: Tuple[torch.Tensor],
    output_path: str,
    torchscript_filename: str = "model.jit",
    mobile_optimization: Optional[MobileOptimizationConfig] = None,
    _extra_files: Optional[Dict[str, bytes]] = None,
):
    logger.info("Tracing and saving TorchScript to {} ...".format(output_path))
    PathManager.mkdirs(output_path)
    if _extra_files is None:
        _extra_files = {}

    with torch.no_grad():
        script_model = torch.jit.trace(model, inputs)

    with make_temp_directory("trace_and_save_torchscript") as tmp_dir:

        @contextlib.contextmanager
        def _synced_local_file(rel_path):
            remote_file = os.path.join(output_path, rel_path)
            local_file = os.path.join(tmp_dir, rel_path)
            yield local_file
            PathManager.copy_from_local(local_file,
                                        remote_file,
                                        overwrite=True)

        with _synced_local_file(torchscript_filename) as model_file:
            torch.jit.save(script_model, model_file, _extra_files=_extra_files)

        with _synced_local_file("data.pth") as data_file:
            torch.save(inputs, data_file)

        if mobile_optimization is not None:
            logger.info("Applying optimize_for_mobile ...")
            liteopt_model = optimize_for_mobile(
                script_model,
                optimization_blocklist=mobile_optimization.
                optimization_blocklist,
                preserved_methods=mobile_optimization.preserved_methods,
                backend=mobile_optimization.backend,
            )
            torchscript_filename = mobile_optimization.torchscript_filename
            with _synced_local_file(torchscript_filename) as lite_path:
                liteopt_model._save_for_lite_interpreter(
                    lite_path, _extra_files=_extra_files)
            # liteopt_model(*inputs)  # sanity check
            op_names = torch.jit.export_opnames(liteopt_model)
            logger.info("Operator names from lite interpreter:\n{}".format(
                "\n".join(op_names)))

            logger.info("Applying augment_model_with_bundled_inputs ...")
            # make all tensors zero-like to save storage
            iters = recursive_iterate(inputs)
            for x in iters:
                if isinstance(x, torch.Tensor):
                    iters.send(torch.zeros_like(x).contiguous())
            inputs = iters.value
            augment_model_with_bundled_inputs(liteopt_model, [inputs])
            liteopt_model(
                *liteopt_model.get_all_bundled_inputs()[0])  # sanity check
            name, ext = os.path.splitext(torchscript_filename)
            with _synced_local_file(name + "_bundled" + ext) as lite_path:
                liteopt_model._save_for_lite_interpreter(lite_path)

        return torchscript_filename
Beispiel #20
0
 def forward(self, x):
     assert len(list(iu.recursive_iterate(x))) == len(self.stubs)
     data_iter = iu.recursive_iterate(x, wait_on_send=True)
     for data, stub in zip(data_iter, self.stubs):
         data_iter.send(stub(data))
     return data_iter.value