Beispiel #1
0
    def test_dask_runtime_with_resources(self, db: Session,
                                         client: TestClient):
        runtime: mlrun.runtimes.DaskCluster = self._generate_runtime()

        expected_requests = generate_resources(mem="2G", cpu=3)
        runtime.with_requests(mem=expected_requests["memory"],
                              cpu=expected_requests["cpu"])
        gpu_type = "nvidia.com/gpu"
        expected_gpus = 2
        expected_scheduler_limits = generate_resources(mem="4G",
                                                       cpu=5,
                                                       gpus=expected_gpus,
                                                       gpu_type=gpu_type)
        expected_worker_limits = generate_resources(mem="4G",
                                                    cpu=5,
                                                    gpus=expected_gpus,
                                                    gpu_type=gpu_type)
        runtime.with_scheduler_limits(
            mem=expected_scheduler_limits["memory"],
            cpu=expected_scheduler_limits["cpu"],
        )
        runtime.with_worker_limits(
            mem=expected_worker_limits["memory"],
            cpu=expected_worker_limits["cpu"],
        )
        runtime.gpus(expected_gpus, gpu_type)
        _ = runtime.client

        self.kube_cluster_mock.assert_called_once()

        self._assert_pod_creation_config(
            expected_runtime_class_name="dask",
            assert_create_pod_called=False,
            assert_namespace_env_variable=False,
        )
        self._assert_v3io_mount_configured(self.v3io_user,
                                           self.v3io_access_key)
        self._assert_pods_resources(
            expected_requests,
            expected_worker_limits,
            expected_requests,
            expected_scheduler_limits,
        )
Beispiel #2
0
    def test_run_with_resource_limits_and_requests(self, db: Session,
                                                   client: TestClient):
        runtime = self._generate_runtime()

        gpu_type = "test/gpu"
        expected_limits = generate_resources(2, 4, 4, gpu_type)
        runtime.with_limits(
            mem=expected_limits["memory"],
            cpu=expected_limits["cpu"],
            gpus=expected_limits[gpu_type],
            gpu_type=gpu_type,
        )

        expected_requests = generate_resources(mem=2, cpu=3)
        runtime.with_requests(mem=expected_requests["memory"],
                              cpu=expected_requests["cpu"])

        self._execute_run(runtime)
        self._assert_pod_creation_config(expected_limits=expected_limits,
                                         expected_requests=expected_requests)
Beispiel #3
0
    def test_dask_runtime(self, db: Session, client: TestClient):
        runtime = self._generate_runtime()

        expected_requests = generate_resources(mem="2G", cpu=3)
        runtime.with_requests(mem=expected_requests["memory"],
                              cpu=expected_requests["cpu"])
        _ = runtime.client

        self.kube_cluster_mock.assert_called_once()

        self._assert_pod_creation_config(
            expected_runtime_class_name="dask",
            assert_create_pod_called=False,
            assert_namespace_env_variable=False,
            expected_requests=expected_requests,
        )
        self._assert_v3io_mount_configured(self.v3io_user,
                                           self.v3io_access_key)