def __init__( self, custom_scripts: List[CustomScript] = None, file_shares: List[FileShare] = None, cluster_id: str = None, vm_count=None, vm_low_pri_count=None, vm_size=None, subnet_id=None, docker_repo: str = None, spark_configuration: SparkConfiguration = None, user_configuration: UserConfiguration = None): super().__init__(custom_scripts=custom_scripts, cluster_id=cluster_id, vm_count=vm_count, vm_low_pri_count=vm_low_pri_count, vm_size=vm_size, docker_repo=docker_repo, subnet_id=subnet_id, file_shares=file_shares, user_configuration=user_configuration ) self.spark_configuration = spark_configuration self.gpu_enabled = helpers.is_gpu_enabled(vm_size) self.mixed_mode = True if (self.vm_count > 0) and (self.vm_low_pri_count > 0) else False
def __init__( self, id=None, applications=None, vm_size=None, spark_configuration=None, toolkit=None, max_dedicated_nodes=0, max_low_pri_nodes=0, subnet_id=None, scheduling_target: SchedulingTarget = None, worker_on_master=None, ): self.id = id self.applications = applications self.spark_configuration = spark_configuration self.vm_size = vm_size self.gpu_enabled = None if vm_size: self.gpu_enabled = helpers.is_gpu_enabled(vm_size) self.toolkit = toolkit self.max_dedicated_nodes = max_dedicated_nodes self.max_low_pri_nodes = max_low_pri_nodes self.subnet_id = subnet_id self.worker_on_master = worker_on_master self.scheduling_target = scheduling_target
def __init__(self, id, applications, vm_size, custom_scripts=None, spark_configuration=None, docker_repo=None, max_dedicated_nodes=None, max_low_pri_nodes=None): self.id = id self.applications = applications self.custom_scripts = custom_scripts self.spark_configuration = spark_configuration self.vm_size = vm_size self.gpu_enabled = helpers.is_gpu_enabled(vm_size) self.docker_repo = docker_repo self.max_dedicated_nodes = max_dedicated_nodes self.max_low_pri_nodes = max_low_pri_nodes
def __init__( self, custom_scripts: List[CustomScript] = None, file_shares: List[FileShare] = None, cluster_id: str = None, vm_count=None, vm_low_pri_count=None, vm_size=None, docker_repo: str=None, spark_configuration: SparkConfiguration = None): super().__init__(custom_scripts=custom_scripts, cluster_id=cluster_id, vm_count=vm_count, vm_low_pri_count=vm_low_pri_count, vm_size=vm_size, docker_repo=docker_repo, file_shares=file_shares ) self.spark_configuration = spark_configuration self.gpu_enabled = helpers.is_gpu_enabled(vm_size)
def gpu_enabled(self): return helpers.is_gpu_enabled(self.vm_size)
def __init__(self, pool: batch_models.CloudPool = None, nodes: batch_models.ComputeNodePaged = None): super().__init__(pool, nodes) self.master_node_id = self.__get_master_node_id() self.gpu_enabled = helpers.is_gpu_enabled(pool.vm_size)
def __init__(self, cluster: aztk.models.Cluster): super().__init__(cluster.pool, cluster.nodes) self.master_node_id = self.__get_master_node_id() self.gpu_enabled = helpers.is_gpu_enabled(cluster.pool.vm_size)