Пример #1
0
    def gen_graph_optimization_yaml(self, model_obj):
        default_yaml_template = {'model': {'framework': 'tensorflow', 'name': 'resnet50'},
                                           'device': 'cpu',
                                           'graph_optimization': {'precisions': ['bf16, fp32']}}
        fwk_name = get_model_fwk_name(model_obj.root)
        if fwk_name != 'tensorflow':
            logger.info('Graph optimization only supports Tensorflow at current stage.')
            sys.exit(0)
        default_yaml_template['model']['framework'] = get_model_fwk_name(model_obj.root)

        if self._precisions == ['bf16'] and not CpuInfo().bf16:
            if os.getenv('FORCE_BF16') == '1':
                logger.warning("Graph optimization will be enforced even " \
                                "the hardware platform doesn't support bf16 instruction.")
            else:
                logger.info("Graph optimization exits due to the hardware " \
                            "doesn't support bf16 instruction.")
                sys.exit(0)

        default_yaml_template['graph_optimization']['precisions'] = self._precisions
        default_yaml_template['model']['inputs'] = self._input
        default_yaml_template['model']['outputs'] = self._output

        graph_optimization_yaml_path = tempfile.mkstemp(suffix='.yaml')[1]
        with open(graph_optimization_yaml_path, 'w', encoding='utf-8') as f:
            yaml.dump(default_yaml_template, f)
        self.conf = Conf(graph_optimization_yaml_path)
Пример #2
0
    def test_model(self):
        self.assertEqual('onnxruntime',
                         get_model_fwk_name(self.cnn_export_path))
        model = MODELS['onnxrt_integerops'](self.cnn_model)
        self.assertEqual(True, isinstance(model, LpotModel.ONNXModel))
        self.assertEqual(True, isinstance(model.model, onnx.ModelProto))

        self.assertRaises(ValueError, model.save, './lpot/test/test.onnx')
        model.save('test.onnx')
        self.assertEqual(True, os.path.exists('test.onnx'))
        os.remove('test.onnx')
Пример #3
0
    def test_model(self):
        self.assertEqual('mxnet', get_model_fwk_name(self.net))
        model = MODELS['mxnet'](self.net)
        self.assertEqual(True, isinstance(model, LpotModel.MXNetModel))
        self.assertEqual(True, isinstance(model.model, mx.gluon.HybridBlock))

        self.assertRaises(ValueError, model.save, './lpot/test/')
        model.save('./test')
        self.assertEqual(True, os.path.exists('test-symbol.json'))
        self.assertEqual(True, os.path.exists('test-0000.params'))

        net = load_mxnet_model('test-symbol.json', 'test-0000.params')
        model.model = net
        self.assertEqual(True, isinstance(model.model[0], mx.symbol.Symbol))
        model.save('./test2')
        self.assertEqual(True, os.path.exists('test2-symbol.json'))
        self.assertEqual(True, os.path.exists('test2-0000.params'))
Пример #4
0
    def testPyTorch(self):
        import torchvision
        from lpot.model.model import PyTorchModel, PyTorchIpexModel
        fwk = {'workspace_path': './pytorch'}
        ori_model = torchvision.models.mobilenet_v2()
        self.assertEqual('pytorch', get_model_fwk_name(ori_model))
        pt_model = PyTorchModel(ori_model)
        pt_model.model = ori_model
        with self.assertRaises(AssertionError):
            pt_model = PyTorchModel(torchvision.models.mobilenet_v2(), fwk)

        ipex_model = PyTorchIpexModel(ori_model)
        self.assertTrue(ipex_model.model)
        ipex_model.model = ori_model
        with self.assertRaises(AssertionError):
            ipex_model = PyTorchModel(torchvision.models.mobilenet_v2(), fwk)
        ipex_model.save('./')
        os.remove('./best_configure.json')
Пример #5
0
    def test_keras_saved_model(self):
        if tf.version.VERSION < '2.2.0':
            return
        keras_model = build_keras()
        self.assertEqual('tensorflow', get_model_fwk_name(keras_model))

        model = TensorflowModel(keras_model)
        self.assertGreaterEqual(len(model.output_node_names), 1)
        self.assertGreaterEqual(len(model.input_node_names), 1)
        keras_model.save('./simple_model')
        # load from path
        model = TensorflowModel('./simple_model')
        self.assertGreaterEqual(len(model.output_node_names), 1)
        self.assertGreaterEqual(len(model.input_node_names), 1)

        os.makedirs('./keras_model', exist_ok=True)
        model.save('./keras_model')
        os.system('rm -rf simple_model')
        os.system('rm -rf keras_model')
Пример #6
0
    def model(self, user_model):
        """Set the user model and dispatch to framework specific internal model object

        Args:
           user_model: user are supported to set model from original framework model format
                       (eg, tensorflow frozen_pb or path to a saved model), but not recommended.
                       Best practice is to set from a initialized lpot.common.Model.
                       If tensorflow model is used, model's inputs/outputs will be auto inferred,
                       but sometimes auto inferred inputs/outputs will not meet your requests,
                       set them manually in config yaml file. Another corner case is slim model
                       of tensorflow, be careful of the name of model configured in yaml file,
                       make sure the name is in supported slim model list.

        """
        from .common import Model as LpotModel
        from ..model import MODELS
        if not isinstance(user_model, LpotModel):
            logger.warning(
                'force convert user raw model to lpot model, ' +
                'better initialize lpot.experimental.common.Model and set....')
            user_model = LpotModel(user_model)

        fwk_name = get_model_fwk_name(user_model.root)
        if fwk_name != self.framework:
            logger.info(
                'Model conversion only supports Tensorflow at current stage.')
            sys.exit(0)

        if not self.conf:
            self._gen_yaml()

        framework_model_info = {}
        cfg = self.conf.usr_cfg
        framework_model_info.update({
            'name': cfg.model.name,
            'input_tensor_names': [],
            'output_tensor_names': [],
            'workspace_path': cfg.tuning.workspace.path
        })

        self._model = MODELS[self.framework](\
            user_model.root, framework_model_info, **user_model.kwargs)