async def test_render_cloud_init(): cloud_init = AzureVMCluster.get_cloud_init(docker_args="--privileged") assert " --privileged " in cloud_init cloud_init = AzureVMCluster.get_cloud_init( extra_bootstrap=["echo 'hello world'", "echo 'foo bar'"] ) assert "- echo 'hello world'" in cloud_init assert "- echo 'foo bar'" in cloud_init
async def test_create_cluster_sync(): with AzureVMCluster() as cluster: with Client(cluster) as client: cluster.scale(1) client.wait_for_workers(1) assert len(cluster.workers) == 1 def inc(x): return x + 1 assert client.submit(inc, 10).result() == 11
async def test_create_cluster(): async with AzureVMCluster(asynchronous=True) as cluster: assert cluster.status == Status.running cluster.scale(2) await cluster assert len(cluster.workers) == 2 async with Client(cluster, asynchronous=True) as client: def inc(x): return x + 1 assert await client.submit(inc, 10).result() == 11
async def test_create_rapids_cluster_sync(): with AzureVMCluster( vm_size="Standard_NC12s_v3", docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.8", worker_class="dask_cuda.CUDAWorker", worker_options={"rmm_pool_size": "15GB"}, ) as cluster: with Client(cluster) as client: cluster.scale(1) client.wait_for_workers(1) def gpu_mem(): from pynvml.smi import nvidia_smi nvsmi = nvidia_smi.getInstance() return nvsmi.DeviceQuery("memory.free, memory.total") results = client.run(gpu_mem) for w, res in results.items(): assert "total" in res["gpu"][0]["fb_memory_usage"].keys() print(res)
async def test_init(): cluster = AzureVMCluster(asynchronous=True) assert cluster.status == Status.created
async def test_render_cloud_init(): cloud_init = AzureVMCluster.get_cloud_init(docker_args="--privileged") assert " --privileged " in cloud_init