Ejemplo n.º 1
0
    def from_checkpoint(
        cls,
        checkpoint: Checkpoint,
        model_definition: Union[Callable[[], tf.keras.Model], Type[tf.keras.Model]],
        use_gpu: bool = False,
    ) -> "TensorflowPredictor":
        """Instantiate the predictor from a Checkpoint.

        The checkpoint is expected to be a result of ``TensorflowTrainer``.

        Args:
            checkpoint: The checkpoint to load the model and
                preprocessor from. It is expected to be from the result of a
                ``TensorflowTrainer`` run.
            model_definition: A callable that returns a TensorFlow Keras model
                to use. Model weights will be loaded from the checkpoint.
        """
        checkpoint = TensorflowCheckpoint.from_checkpoint(checkpoint)
        model_weights = checkpoint.get_model_weights()
        preprocessor = checkpoint.get_preprocessor()
        return cls(
            model_definition=model_definition,
            model_weights=model_weights,
            preprocessor=preprocessor,
            use_gpu=use_gpu,
        )
Ejemplo n.º 2
0
 def from_checkpoint(cls, checkpoint: Checkpoint,
                     **kwargs) -> "DummyPredictor":
     with checkpoint.as_directory():
         # simulate reading
         time.sleep(1)
     checkpoint_data = checkpoint.to_dict()
     preprocessor = checkpoint.get_preprocessor()
     return cls(checkpoint_data["factor"], preprocessor=preprocessor)
Ejemplo n.º 3
0
 def from_checkpoint(cls,
                     checkpoint: Checkpoint,
                     use_gpu: bool = False,
                     **kwargs) -> "DummyPredictor":
     checkpoint_data = checkpoint.to_dict()
     preprocessor = checkpoint.get_preprocessor()
     return cls(checkpoint_data["factor"],
                preprocessor=preprocessor,
                use_gpu=use_gpu)
Ejemplo n.º 4
0
    def from_checkpoint(cls, checkpoint: Checkpoint) -> "SklearnPredictor":
        """Instantiate the predictor from a Checkpoint.

        The checkpoint is expected to be a result of ``SklearnTrainer``.

        Args:
            checkpoint: The checkpoint to load the model and
                preprocessor from. It is expected to be from the result of a
                ``SklearnTrainer`` run.
        """
        checkpoint = SklearnCheckpoint.from_checkpoint(checkpoint)
        estimator = checkpoint.get_estimator()
        preprocessor = checkpoint.get_preprocessor()
        return cls(estimator=estimator, preprocessor=preprocessor)
Ejemplo n.º 5
0
    def from_checkpoint(cls, checkpoint: Checkpoint) -> "XGBoostPredictor":
        """Instantiate the predictor from a Checkpoint.

        The checkpoint is expected to be a result of ``XGBoostTrainer``.

        Args:
            checkpoint: The checkpoint to load the model and
                preprocessor from. It is expected to be from the result of a
                ``XGBoostTrainer`` run.

        """
        checkpoint = XGBoostCheckpoint.from_checkpoint(checkpoint)
        model = checkpoint.get_model()
        preprocessor = checkpoint.get_preprocessor()
        return cls(model=model, preprocessor=preprocessor)
Ejemplo n.º 6
0
    def from_checkpoint(
        cls,
        checkpoint: Checkpoint,
        env: Optional[EnvType] = None,
        **kwargs,
    ) -> "Predictor":
        """Create RLPredictor from checkpoint.

        This method requires that the checkpoint was created with the Ray AIR
        RLTrainer.

        Args:
            checkpoint: The checkpoint to load the model and
                preprocessor from.
            env: Optional environment to instantiate the trainer with. If not given,
                it is parsed from the saved trainer configuration instead.

        """
        checkpoint = RLCheckpoint.from_checkpoint(checkpoint)
        policy = checkpoint.get_policy(env)
        preprocessor = checkpoint.get_preprocessor()
        return cls(policy=policy, preprocessor=preprocessor)
Ejemplo n.º 7
0
    def from_checkpoint(
        cls,
        checkpoint: Checkpoint,
        *,
        pipeline_cls: Optional[Type[Pipeline]] = None,
        **pipeline_kwargs,
    ) -> "HuggingFacePredictor":
        """Instantiate the predictor from a Checkpoint.

        The checkpoint is expected to be a result of ``HuggingFaceTrainer``.

        Args:
            checkpoint: The checkpoint to load the model, tokenizer and
                preprocessor from. It is expected to be from the result of a
                ``HuggingFaceTrainer`` run.
            pipeline_cls: A ``transformers.pipelines.Pipeline`` class to use.
                If not specified, will use the ``pipeline`` abstraction
                wrapper.
            **pipeline_kwargs: Any kwargs to pass to the pipeline
                initialization. If ``pipeline`` is None, this must contain
                the 'task' argument. Cannot contain 'model'. Can be used
                to override the tokenizer with 'tokenizer'.
        """
        if not pipeline_cls and "task" not in pipeline_kwargs:
            raise ValueError(
                "If `pipeline_cls` is not specified, 'task' must be passed as a kwarg."
            )
        pipeline_cls = pipeline_cls or pipeline_factory
        preprocessor = checkpoint.get_preprocessor()
        with checkpoint.as_directory() as checkpoint_path:
            # Tokenizer will be loaded automatically (no need to specify
            # `tokenizer=checkpoint_path`)
            pipeline = pipeline_cls(model=checkpoint_path, **pipeline_kwargs)
        return cls(
            pipeline=pipeline,
            preprocessor=preprocessor,
        )
Ejemplo n.º 8
0
    def from_checkpoint(
        cls,
        checkpoint: Checkpoint,
        model: Optional[torch.nn.Module] = None,
        use_gpu: bool = False,
    ) -> "TorchPredictor":
        """Instantiate the predictor from a Checkpoint.

        The checkpoint is expected to be a result of ``TorchTrainer``.

        Args:
            checkpoint: The checkpoint to load the model and
                preprocessor from. It is expected to be from the result of a
                ``TorchTrainer`` run.
            model: If the checkpoint contains a model state dict, and not
                the model itself, then the state dict will be loaded to this
                ``model``.
            use_gpu: If set, the model will be moved to GPU on instantiation and
                prediction happens on GPU.
        """
        checkpoint = TorchCheckpoint.from_checkpoint(checkpoint)
        model = checkpoint.get_model(model)
        preprocessor = checkpoint.get_preprocessor()
        return cls(model=model, preprocessor=preprocessor, use_gpu=use_gpu)
Ejemplo n.º 9
0
 def from_checkpoint(cls, checkpoint: Checkpoint,
                     **kwargs) -> "DummyPredictor":
     checkpoint_data = checkpoint.to_dict()
     preprocessor = checkpoint.get_preprocessor()
     return cls(checkpoint_data["factor"], preprocessor)
Ejemplo n.º 10
0
 def _load_checkpoint(
     self, checkpoint: Checkpoint
 ) -> Tuple[lightgbm.Booster, Optional["Preprocessor"]]:
     checkpoint = LightGBMCheckpoint.from_checkpoint(checkpoint)
     return checkpoint.get_model(), checkpoint.get_preprocessor()
Ejemplo n.º 11
0
 def _load_checkpoint(
     self, checkpoint: Checkpoint
 ) -> Tuple[xgboost.Booster, Optional["Preprocessor"]]:
     checkpoint = XGBoostCheckpoint.from_checkpoint(checkpoint)
     return checkpoint.get_model(), checkpoint.get_preprocessor()