コード例 #1
0
    def load_hub_modules(self):
        if 'hubconf' in sys.modules:
            sys.modules.pop('hubconf')

        sys.path.insert(0, self.path)
        try:
            with open(os.path.join(self.path, 'hubconf.py'), 'r') as file:
                pycode = file.read()
                ast_module = ast.parse(pycode)
                for _body in ast_module.body:
                    if not isinstance(_body, (ast.Import, ast.ImportFrom)):
                        continue

                    if not _body.module.endswith('module'):
                        continue

                    subpath = '.'.join(_body.module.split('.')[:-2])
                    subpath = os.path.join(self.path, subpath)
                    sys.path.insert(0, subpath)

            py_module = importlib.import_module('hubconf')
            for _item, _cls in inspect.getmembers(py_module, inspect.isclass):
                _item = py_module.__dict__[_item]
                if issubclass(_item, RunModule):
                    self.hub_modules[_item.name] = _item
        except:
            self.hub_modules = OrderedDict()
            utils.record_exception('An error occurred while loading {}'.format(
                self.path))

        sys.path.remove(self.path)
コード例 #2
0
 def checkout(self, branch: str):
     '''Checkout the current repo to the specified branch.'''
     try:
         self.repo.git.checkout(branch)
         # reload modules
         self.load_hub_modules()
     except:
         utils.record_exception(
             'An error occurred while checkout {}'.format(self.path))
コード例 #3
0
 def update(self):
     '''Update the current repo.'''
     try:
         self.repo.remote().pull(self.repo.branches[0])
         # reload modules
         self.load_hub_modules()
     except:
         self.hub_modules = OrderedDict()
         utils.record_exception('An error occurred while update {}'.format(
             self.path))
コード例 #4
0
ファイル: manager.py プロジェクト: xiaoyangyang2/PaddleHub
    def list(self) -> List[HubModule]:
        '''List all installed HubModule.'''
        for subdir in os.listdir(self.home):
            fulldir = os.path.join(self.home, subdir)

            try:
                self._local_modules[subdir] = HubModule.load(fulldir)
            except:
                utils.record_exception('An error was encountered while loading {}'.format(subdir))

        return [module for module in self._local_modules.values()]
コード例 #5
0
ファイル: manager.py プロジェクト: xiaoyangyang2/PaddleHub
    def search(self, name: str, source: str = None, branch: str = None) -> HubModule:
        '''Return HubModule If a HubModule with a specific name is found, otherwise None.'''
        module = None

        if name in self._local_modules:
            module = self._local_modules[name]
        else:
            module_dir = self._get_normalized_path(name)
            if os.path.exists(module_dir):
                try:
                    module = self._local_modules[name] = HubModule.load(module_dir)
                except:
                    utils.record_exception('An error was encountered while loading {}'.format(name))

        if not module:
            return None

        if source and source != module.source:
            return None

        if branch and branch != module.branch:
            return None

        return module
コード例 #6
0
    def export_onnx_model(self,
                          dirname: str,
                          input_spec: List[paddle.static.InputSpec] = None,
                          include_sub_modules: bool = True,
                          **kwargs):
        '''
        Export the model to ONNX format.

        Args:
            dirname(str): The directory to save the onnx model.
            input_spec(list): Describes the input of the saved model's forward method, which can be described by
                InputSpec or example Tensor. If None, all input variables of the original Layer's forward method
                would be the inputs of the saved model. Default None.
            include_sub_modules(bool): Whether to export sub modules. Default to True.
            **kwargs(dict|optional): Other export configuration options for compatibility, some may be removed in
                the future. Don't use them If not necessary. Refer to https://github.com/PaddlePaddle/paddle2onnx
                for more information.
        '''
        if include_sub_modules:
            for key, _sub_module in self.sub_modules().items():
                try:
                    sub_dirname = os.path.normpath(os.path.join(dirname, key))
                    _sub_module.export_onnx_model(
                        sub_dirname,
                        include_sub_modules=include_sub_modules,
                        **kwargs)
                except:
                    utils.record_exception(
                        'Failed to export sub module {}'.format(
                            _sub_module.name))

        if isinstance(self, paddle.nn.Layer):
            save_file = os.path.join(dirname, '{}'.format(self.name))
            if not input_spec:
                if hasattr(self, 'input_spec'):
                    input_spec = self.input_spec
                else:
                    _type = self.type.lower()
                    if _type.startswith('cv/image'):
                        input_spec = [
                            paddle.static.InputSpec(
                                shape=[None, 3, None, None], dtype='float32')
                        ]
                    else:
                        raise RuntimeError(
                            'Module {} lacks `input_spec`, please specify it when calling `export_onnx_model`.'
                            .format(self.name))

            paddle.onnx.export(self,
                               save_file,
                               input_spec=input_spec,
                               **kwargs)
            return

        if not self._pretrained_model_path:
            raise RuntimeError(
                'Module {} does not support exporting models in ONNX format.'.
                format(self.name))
        elif not os.path.exists(self._pretrained_model_path):
            log.logger.warning(
                'The model path of Module {} does not exist.'.format(
                    self.name))
            return

        place = paddle.CPUPlace()
        exe = paddle.static.Executor(place)

        model_filename = None
        params_filename = None

        if os.path.exists(os.path.join(self._pretrained_model_path, 'model')):
            model_filename = 'model'

        if os.path.exists(os.path.join(self._pretrained_model_path, 'params')):
            params_filename = 'params'

        if os.path.exists(
                os.path.join(self._pretrained_model_path, '__params__')):
            params_filename = '__params__'

        save_file = os.path.join(dirname, '{}.onnx'.format(self.name))

        program, inputs, outputs = paddle.fluid.io.load_inference_model(
            dirname=self._pretrained_model_path,
            model_filename=model_filename,
            params_filename=params_filename,
            executor=exe)

        paddle2onnx.program2onnx(program=program,
                                 scope=paddle.static.global_scope(),
                                 feed_var_names=inputs,
                                 target_vars=outputs,
                                 save_file=save_file,
                                 **kwargs)
コード例 #7
0
    def save_inference_model(self,
                             dirname: str,
                             model_filename: str = None,
                             params_filename: str = None,
                             input_spec: List[paddle.static.InputSpec] = None,
                             include_sub_modules: bool = True,
                             combined: bool = True):
        '''
        Export the model to Paddle Inference format.

        Args:
            dirname(str): The directory to save the paddle inference model.
            model_filename(str): The name of the saved model file. Default to `__model__`.
            params_filename(str): The name of the saved parameters file, only takes effect when `combined` is True.
                Default to `__params__`.
            input_spec(list): Describes the input of the saved model's forward method, which can be described by
                InputSpec or example Tensor. If None, all input variables of the original Layer's forward method
                would be the inputs of the saved model. Default None.
            include_sub_modules(bool): Whether to export sub modules. Default to True.
            combined(bool): Whether to save all parameters in a combined file. Default to True.
        '''
        if include_sub_modules:
            for key, _sub_module in self.sub_modules().items():
                try:
                    sub_dirname = os.path.normpath(os.path.join(dirname, key))
                    _sub_module.save_inference_model(
                        sub_dirname,
                        include_sub_modules=include_sub_modules,
                        model_filename=model_filename,
                        params_filename=params_filename,
                        combined=combined)
                except:
                    utils.record_exception(
                        'Failed to save sub module {}'.format(
                            _sub_module.name))

        if isinstance(self, paddle.nn.Layer):
            save_file = os.path.join(dirname, '{}'.format(self.name))
            if not input_spec:
                if hasattr(self, 'input_spec'):
                    input_spec = self.input_spec
                else:
                    _type = self.type.lower()
                    if _type.startswith('cv/image'):
                        input_spec = [
                            paddle.static.InputSpec(
                                shape=[None, 3, None, None], dtype='float32')
                        ]
                    else:
                        raise RuntimeError(
                            'Module {} lacks `input_spec`, please specify it when calling `save_inference_model`.'
                            .format(self.name))

            net = paddle.jit.to_static(self, input_spec)
            paddle.jit.save(net, save_file)

            log.logger.info(
                'Paddle Inference model saved in {}.'.format(dirname))
            return

        if not self._pretrained_model_path:
            raise RuntimeError(
                'Module {} does not support exporting models in Paddle Inference format.'
                .format(self.name))
        elif not os.path.exists(self._pretrained_model_path):
            log.logger.warning(
                'The model path of Module {} does not exist.'.format(
                    self.name))
            return

        model_filename = '__model__' if not model_filename else model_filename
        if combined:
            params_filename = '__params__' if not params_filename else params_filename

        place = paddle.CPUPlace()
        exe = paddle.static.Executor(place)

        _model_filename = None
        _params_filename = None

        if os.path.exists(os.path.join(self._pretrained_model_path, 'model')):
            _model_filename = 'model'

        if os.path.exists(os.path.join(self._pretrained_model_path, 'params')):
            _params_filename = 'params'

        if os.path.exists(
                os.path.join(self._pretrained_model_path, '__params__')):
            _params_filename = '__params__'

        program, feeded_var_names, target_vars = paddle.fluid.io.load_inference_model(
            dirname=self._pretrained_model_path,
            executor=exe,
            model_filename=_model_filename,
            params_filename=_params_filename,
        )

        paddle.fluid.io.save_inference_model(dirname=dirname,
                                             main_program=program,
                                             executor=exe,
                                             feeded_var_names=feeded_var_names,
                                             target_vars=target_vars,
                                             model_filename=model_filename,
                                             params_filename=params_filename)

        log.logger.info('Paddle Inference model saved in {}.'.format(dirname))