async def start_scheduler(self): self.cluster._log( f"Launching cluster with the following configuration: " f"\n Source Image: {self.source_image} " f"\n Docker Image: {self.docker_image} " f"\n Machine Type: {self.machine_type} " f"\n Filesytsem Size: {self.filesystem_size} " f"\n N-GPU Type: {self.ngpus} {self.gpu_type}" f"\n Zone: {self.zone} ") self.cluster._log("Creating scheduler instance") self.internal_ip, self.external_ip = await self.create_vm() if self.config.get("public_ingress", True) and not is_inside_gce(): # scheduler must be publicly available, and firewall # needs to be in place to allow access to 8786 on # the external IP self.address = f"{self.cluster.protocol}://{self.external_ip}:8786" else: # if the client is running inside GCE environment # it's better to use internal IP, which doesn't # require firewall setup self.address = f"{self.cluster.protocol}://{self.internal_ip}:8786" await self.wait_for_scheduler() # need to reserve internal IP for workers # gcp docker containers can't see resolve ip address self.cluster.scheduler_internal_ip = self.internal_ip self.cluster.scheduler_external_ip = self.external_ip
def test_is_gce_env(): # Note: this test isn't super valuable, but at least we run the code assert is_inside_gce() is False
import pytest from dask_cloudprovider.gcp.utils import build_request, is_inside_gce def test_build_request(): assert build_request()(None, lambda x: x, "https://example.com") @pytest.mark.xfail(is_inside_gce(), reason="Fails if you run this test on GCE environment") def test_is_gce_env(): # Note: this test isn't super valuable, but at least we run the code assert is_inside_gce() is False