示例#1
0
 def restore(self, dump):
     """ if dump is in zip format, will unzip it. expects the following dir structure in the unzipped file:
     <root>
      - <task>
        - <id>-reporting.log
        - <id>.yml
     """
     dump_dir = unzip_files(dump)
     for task in os.listdir(dump_dir):
         task_dir = os.path.join(dump_dir, task)
         for exp in os.listdir(task_dir):
             exp_dir = os.path.join(task_dir, exp)
             meta = [os.path.join(exp_dir, x) for x in os.listdir(exp_dir) if x.endswith('meta.yml')]
             reporting = [os.path.join(exp_dir, x) for x in os.listdir(exp_dir) if x.endswith('reporting.log')]
             config = [os.path.join(exp_dir, x) for x in os.listdir(exp_dir) if x.endswith('config.yml')]
             try:
                 assert len(config) == 1
                 assert len(reporting) == 1
                 assert len(meta) == 1
                 config = read_config_file(config[0])
                 meta = read_config_file(meta[0])
                 reporting = log2json(reporting[0])
             except AssertionError:
                 raise RuntimeError('There should be exactly one meta file, one config file and one reporting log '
                                    'in {}'.format(exp_dir))
             self._put_result(task, config_obj=config, events_obj=reporting, **meta)
     if dump_dir != dump:
         shutil.rmtree(dump_dir)
示例#2
0
    def load(cls, bundle, **kwargs):
        """Load a model from a bundle.

        This can be either a local model or a remote, exported model.

        :returns a Service implementation
        """
        import onnxruntime as ort

        # can delegate
        if os.path.isdir(bundle):
            directory = bundle
        # Try and unzip if its a zip file
        else:
            directory = unzip_files(bundle)

        model_basename = find_model_basename(directory)
        # model_basename = model_basename.replace(".pyt", "")
        model_name = f"{model_basename}.onnx"

        vocabs = load_vocabs(directory)
        vectorizers = load_vectorizers(directory)

        model = ort.InferenceSession(model_name)
        return cls(vocabs, vectorizers, model)
示例#3
0
    def load(cls, bundle, **kwargs):
        """Load a model from a bundle.

        This can be either a local model or a remote, exported model.

        :returns a Service implementation
        """
        # can delegate
        if os.path.isdir(bundle):
            directory = bundle
        else:
            directory = unzip_files(bundle)

        model_basename = find_model_basename(directory)
        vocabs = load_vocabs(directory)
        vectorizers = load_vectorizers(directory)

        be = normalize_backend(kwargs.get('backend', 'tf'))

        remote = kwargs.get("remote", None)
        name = kwargs.get("name", None)
        if remote:
            beam = kwargs.get('beam', 10)
            model = Service._create_remote_model(directory, be, remote, name, cls.signature_name(), beam, preproc=kwargs.get('preproc', False))
            return cls(vocabs, vectorizers, model)

        # Currently nothing to do here
        # labels = read_json(os.path.join(directory, model_basename) + '.labels')

        import_user_module('baseline.{}.embeddings'.format(be))
        import_user_module('baseline.{}.{}'.format(be, cls.task_name()))
        model = load_model_for(cls.task_name(), model_basename, **kwargs)
        return cls(vocabs, vectorizers, model)
示例#4
0
    def load(cls, bundle, **kwargs):
        """Load a model from a bundle.

        This can be either a local model or a remote, exported model.

        :returns a Service implementation
        """
        # can delegate
        basehead = None

        if os.path.isdir(bundle):
            directory = bundle
        elif os.path.isfile(bundle):
            directory = unzip_files(bundle)
        else:
            directory = os.path.dirname(bundle)
            basehead = os.path.basename(bundle)
        model_basename = find_model_basename(directory, basehead)
        suffix = model_basename.split('-')[-1] + ".json"
        vocabs = load_vocabs(directory, suffix)

        be = normalize_backend(kwargs.get('backend', 'tf'))

        remote = kwargs.get("remote", None)
        name = kwargs.get("name", None)
        if remote:
            logging.debug("loading remote model")
            beam = int(kwargs.get('beam', 30))
            model, preproc = Service._create_remote_model(
                directory,
                be,
                remote,
                name,
                cls.task_name(),
                cls.signature_name(),
                beam,
                preproc=kwargs.get('preproc', 'client'),
                version=kwargs.get('version'),
                remote_type=kwargs.get('remote_type'),
            )
            vectorizers = load_vectorizers(directory)
            return cls(vocabs, vectorizers, model, preproc)

        # Currently nothing to do here
        # labels = read_json(os.path.join(directory, model_basename) + '.labels')

        import_user_module('baseline.{}.embeddings'.format(be))
        try:
            import_user_module('baseline.{}.{}'.format(be, cls.task_name()))
        except:
            pass
        model = load_model_for(cls.task_name(), model_basename, **kwargs)
        vectorizers = load_vectorizers(directory)
        return cls(vocabs, vectorizers, model, 'client')
示例#5
0
    def load(cls, bundle, **kwargs):
        """Load a model from a bundle.

        This can be either a local model or a remote, exported model.

        :returns a Service implementation
        """
        # can delegate
        if os.path.isdir(bundle):
            directory = bundle
        else:
            directory = unzip_files(bundle)

        model_basename = find_model_basename(directory)
        vocabs = load_vocabs(directory)
        vectorizers = load_vectorizers(directory)

        be = normalize_backend(kwargs.get('backend', 'tf'))

        remote = kwargs.get("remote", None)
        name = kwargs.get("name", None)
        if remote:
            logging.debug("loading remote model")
            beam = kwargs.get('beam', 30)
            model, preproc = Service._create_remote_model(
                directory, be, remote, name, cls.signature_name(), beam,
                preproc=kwargs.get('preproc', 'client'),
                version=kwargs.get('version')
            )
            return cls(vocabs, vectorizers, model, preproc)

        # Currently nothing to do here
        # labels = read_json(os.path.join(directory, model_basename) + '.labels')

        import_user_module('baseline.{}.embeddings'.format(be))
        try:
            import_user_module('baseline.{}.{}'.format(be, cls.task_name()))
        except:
            pass
        model = load_model_for(cls.task_name(), model_basename, **kwargs)
        return cls(vocabs, vectorizers, model, 'client')
示例#6
0
    def load(cls, bundle, **kwargs):
        """Load a model from a bundle.

        This can be either a local model or a remote, exported model.

        :returns a Service implementation
        """
        import onnxruntime as ort
        if os.path.isdir(bundle):
            directory = bundle
        else:
            directory = unzip_files(bundle)

        model_basename = find_model_basename(directory)
        model_name = f"{model_basename}.onnx"

        vocabs = load_vocabs(directory)
        vectorizers = load_vectorizers(directory)

        # Currently nothing to do here
        labels = read_json(model_basename + '.labels')

        model = ort.InferenceSession(model_name)
        return cls(vocabs, vectorizers, model, labels)