def execute(args: typing.NamedTuple): spark_client = aztk.spark.Client(config.load_aztk_secrets()) cluster_conf = ClusterConfiguration() cluster_conf.spark_configuration = load_aztk_spark_config(args.spark_conf) # read cluster.yaml configuration file, overwrite values with args file_config, wait = config.read_cluster_config() if args.cluster_path is None \ else config.read_cluster_config(args.cluster_path) cluster_conf.merge(file_config) cluster_conf.merge( ClusterConfiguration( cluster_id=args.cluster_id, size=args.size, size_low_priority=args.size_low_priority, vm_size=args.vm_size, subnet_id=args.subnet_id, user_configuration=UserConfiguration(username=args.username, password=args.password), )) if cluster_conf.toolkit: if args.docker_repo: cluster_conf.toolkit.docker_repo = args.docker_repo if args.docker_run_options: cluster_conf.toolkit.docker_run_options = args.docker_run_options wait = wait if args.wait is None else args.wait user_configuration = cluster_conf.user_configuration if user_configuration and user_configuration.username: ssh_key, password = utils.get_ssh_key_or_prompt( spark_client.secrets_configuration.ssh_pub_key, user_configuration.username, user_configuration.password, spark_client.secrets_configuration, ) cluster_conf.user_configuration = aztk.spark.models.UserConfiguration( username=user_configuration.username, password=password, ssh_key=ssh_key) else: cluster_conf.user_configuration = None cluster_conf.validate() utils.print_cluster_conf(cluster_conf, wait) with utils.Spinner(): # create spark cluster cluster = spark_client.cluster.create( cluster_configuration=cluster_conf, vm_ver=args.vm_os_ver, wait=wait) if wait: log.info("Cluster %s created successfully.", cluster.id) else: log.info("Cluster %s is being provisioned.", cluster.id)
def execute(args: typing.NamedTuple): spark_client = aztk.spark.Client(config.load_aztk_secrets()) cluster_conf = ClusterConfiguration() cluster_conf.spark_configuration = load_aztk_spark_config() # read cluster.yaml configuration file, overwrite values with args file_config, wait = config.read_cluster_config() cluster_conf.merge(file_config) if args.size_low_pri is not None: deprecate("0.9.0", "--size-low-pri has been deprecated.", "Please use --size-low-priority.") args.size_low_priority = args.size_low_pri cluster_conf.merge(ClusterConfiguration( cluster_id=args.cluster_id, size=args.size, size_low_priority=args.size_low_priority, vm_size=args.vm_size, subnet_id=args.subnet_id, user_configuration=UserConfiguration( username=args.username, password=args.password, ))) if args.docker_repo and cluster_conf.toolkit: cluster_conf.toolkit.docker_repo = args.docker_repo wait = wait if args.wait is None else args.wait user_configuration = cluster_conf.user_configuration if user_configuration and user_configuration.username: ssh_key, password = utils.get_ssh_key_or_prompt(spark_client.secrets_config.ssh_pub_key, user_configuration.username, user_configuration.password, spark_client.secrets_config) cluster_conf.user_configuration = aztk.spark.models.UserConfiguration( username=user_configuration.username, password=password, ssh_key=ssh_key ) else: cluster_conf.user_configuration = None cluster_conf.validate() utils.print_cluster_conf(cluster_conf, wait) with utils.Spinner(): # create spark cluster cluster = spark_client.create_cluster( cluster_conf, wait=wait ) if wait: log.info("Cluster %s created successfully.", cluster.id) else: log.info("Cluster %s is being provisioned.", cluster.id)
def execute(args: typing.NamedTuple): spark_client = aztk.spark.Client(config.load_aztk_secrets()) cluster_id = args.cluster_id cluster = spark_client.get_cluster(cluster_id) utils.print_cluster(spark_client, cluster) configuration = spark_client.get_cluster_config(cluster_id) if configuration and args.show_config: log.info("-------------------------------------------") log.info("Cluster configuration:") utils.print_cluster_conf(configuration, False)
def execute(args: typing.NamedTuple): spark_client = aztk.spark.Client(config.load_aztk_secrets()) cluster_conf = ClusterConfiguration() cluster_conf.spark_configuration = load_aztk_spark_config() # read cluster.yaml configuartion file, overwrite values with args file_config, wait = config.read_cluster_config() cluster_conf.merge(file_config) cluster_conf.merge(ClusterConfiguration( cluster_id=args.cluster_id, vm_count=args.size, vm_low_pri_count=args.size_low_pri, vm_size=args.vm_size, subnet_id=args.subnet_id, user_configuration=UserConfiguration( username=args.username, password=args.password, ), docker_repo=args.docker_repo)) wait = wait if args.wait is None else args.wait user_configuration = cluster_conf.user_configuration if user_configuration and user_configuration.username: ssh_key, password = utils.get_ssh_key_or_prompt(spark_client.secrets_config.ssh_pub_key, user_configuration.username, user_configuration.password, spark_client.secrets_config) cluster_conf.user_configuration = aztk.spark.models.UserConfiguration( username=user_configuration.username, password=password, ssh_key=ssh_key ) else: cluster_conf.user_configuration = None utils.print_cluster_conf(cluster_conf, wait) spinner = utils.Spinner() spinner.start() # create spark cluster cluster = spark_client.create_cluster( cluster_conf, wait=wait ) spinner.stop() if wait: log.info("Cluster %s created successfully.", cluster.id) else: log.info("Cluster %s is being provisioned.", cluster.id)