def run(self, model_path: Path):
        """The model is downloaded when the run method is invoked.

        Arguments:
            model_path: The path to the trained model.
        """
        # 1: Use the host and port at runtime so it works in the cloud.
        # $ML_SERVER_URL/blob/master/mlserver/settings.py#L50
        if self.version == 1:
            # TODO: Reload the next version model of the model.

            self.settings.update({"host": self.host, "http_port": self.port})

            with open("settings.json", "w") as f:
                json.dump(self.settings, f)

            # 2. Store the model-settings
            # $ML_SERVER_URL/blob/master/mlserver/settings.py#L120
            self.model_settings["parameters"] = {
                "version": f"v0.0.{self.version}",
                "uri": str(model_path.absolute()),
            }
            with open("model-settings.json", "w") as f:
                json.dump(self.model_settings, f)

            # 3. Launch the Model Server
            subprocess.Popen("mlserver start .", shell=True)

            # 4. Increment the version for the next time run is called.
            self.version += 1

        else:
            # TODO: Load the next model and unload the previous one.
            pass
    def run(self):
        # 2: Load the Digits
        digits = datasets.load_digits()

        # 3: To apply a classifier on this data,
        # we need to flatten the image, to
        # turn the data in a (samples, feature) matrix:
        n_samples = len(digits.images)
        data = digits.images.reshape((n_samples, -1))

        # 4: Create a classifier: a support vector classifier
        classifier = svm.SVC(gamma=0.001)

        # 5: Split data into train and test subsets
        X_train, _, y_train, _ = train_test_split(data, digits.target, test_size=0.5, shuffle=False)

        # 6: We learn the digits on the first half of the digits
        classifier.fit(X_train, y_train)

        # 7: Save the Sklearn model with `joblib`.
        model_file_name = "mnist-svm.joblib"
        joblib.dump(classifier, model_file_name)

        # 8: Keep a reference the the generated model.
        self.best_model_path = Path("mnist-svm.joblib")
Example #3
0
 def setup(
     self,
     trainer: "pl.Trainer",
     pl_module: "pl.LightningModule",
     stage: Optional[str] = None,
 ) -> None:
     log_dir = self._get_logdir(trainer)
     self.work.log_dir = Path(log_dir) if log_dir is not None else None
     self._collect_logger_metadata(trainer)
Example #4
0
def test_artifacts_tracker(tmpdir):
    work = ScriptRunner(root_path=os.path.dirname(__file__), script_path=__file__)
    tracker = PLAppArtifactsTracker(work=work)
    trainer = Mock()

    trainer.loggers = []
    trainer.default_root_dir = "default_root_dir"
    tracker.setup(trainer=trainer, pl_module=Mock())
    assert work.log_dir == Path("default_root_dir")
    assert not work.logger_metadatas

    trainer.loggers = [TensorBoardLogger(save_dir=tmpdir)]
    trainer.logger = trainer.loggers[0]
    tracker.setup(trainer=trainer, pl_module=Mock())
    assert work.log_dir == Path(tmpdir / "lightning_logs" / "version_0")
    assert len(work.logger_metadatas) == 1
    assert work.logger_metadatas[0] == {"class_name": "TensorBoardLogger"}

    # call setup a second time and the metadata length should not change
    tracker.setup(trainer=trainer, pl_module=Mock())
    assert len(work.logger_metadatas) == 1
Example #5
0
    def _get_build_config(root_path: str) -> Optional[BuildConfig]:
        # These are the requirements for the script runner itself
        requirements = [
            "protobuf<4.21.0",
            "pytorch-lightning<=1.6.3",
            "pydantic<=1.9.0",
        ]
        if Path(root_path, "requirements.txt").exists():
            # Requirements from the user's code folder
            requirements.extend(
                load_requirements(root_path, file_name="requirements.txt"))

        return BuildConfig(requirements=requirements)
Example #6
0
    def run(self) -> None:
        if not self.triggered:
            return

        if self.script_runner is None:
            self.script_runner = ScriptRunner(
                root_path=str(Path(__file__).parent / "source"),
                script_path=str(
                    Path(__file__).parent / "source" / self.script_path),
                script_args=self.script_args,
                env=self._prepare_environment(),
                parallel=True,
                cloud_compute=CloudCompute(**self.cloud_compute_args),
                raise_exception=False,
            )
            self.script_runner.run()

        self.running = self.script_runner is not None and self.script_runner.has_started
        self.succeeded = self.script_runner is not None and self.script_runner.has_succeeded
        self.failed = self.script_runner is not None and self.script_runner.has_failed

        if self.succeeded or self.failed:
            self.triggered = False
Example #7
0
 def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
     if trainer.checkpoint_callback and trainer.checkpoint_callback.dirpath is not None:
         self.work.checkpoint_dir = Path(trainer.checkpoint_callback.dirpath)
Example #8
0
 def configure_layout(self):
     return StaticWebFrontend(str(Path(__file__).parent / "ui/build"))