コード例 #1
0
ファイル: cluster_add_user.py プロジェクト: skepticatgit/aztk
def execute(args: typing.NamedTuple):
    spark_client = aztk.spark.Client(config.load_aztk_screts())

    log.info('-------------------------------------------')
    log.info('spark cluster id:    {}'.format(args.cluster_id))
    log.info('username:            {}'.format(args.username))
    log.info('-------------------------------------------')

    if args.ssh_key:
        ssh_key = args.ssh_key
    else:
        ssh_key = spark_client.secrets_config.ssh_pub_key

    ssh_key, password = utils.get_ssh_key_or_prompt(ssh_key, args.username, args.password, spark_client.secrets_config)

    spark_client.create_user(
        cluster_id=args.cluster_id,
        username=args.username,
        password=password,
        ssh_key=ssh_key
    )

    if password:
        log.info('password:            %s', '*' * len(password))
    elif ssh_key:
        log.info('ssh public key:      %s', ssh_key)

    log.info('-------------------------------------------')
コード例 #2
0
ファイル: cluster_create.py プロジェクト: skepticatgit/aztk
def execute(args: typing.NamedTuple):
    spark_client = aztk.spark.Client(config.load_aztk_screts())
    cluster_conf = ClusterConfiguration()
    cluster_conf.spark_configuration = load_aztk_spark_config()

    # read cluster.yaml configuartion file, overwrite values with args
    file_config, wait = config.read_cluster_config()
    cluster_conf.merge(file_config)
    cluster_conf.merge(
        ClusterConfiguration(cluster_id=args.cluster_id,
                             vm_count=args.size,
                             vm_low_pri_count=args.size_low_pri,
                             vm_size=args.vm_size,
                             subnet_id=args.subnet_id,
                             user_configuration=UserConfiguration(
                                 username=args.username,
                                 password=args.password,
                             ),
                             docker_repo=args.docker_repo))
    wait = wait if args.wait is None else args.wait

    user_configuration = cluster_conf.user_configuration

    if user_configuration and user_configuration.username:
        ssh_key, password = utils.get_ssh_key_or_prompt(
            spark_client.secrets_config.ssh_pub_key,
            user_configuration.username, user_configuration.password,
            spark_client.secrets_config)
        cluster_conf.user_configuration = aztk.spark.models.UserConfiguration(
            username=user_configuration.username,
            password=password,
            ssh_key=ssh_key)
    else:
        cluster_conf.user_configuration = None

    print_cluster_conf(cluster_conf, wait)
    spinner = utils.Spinner()
    spinner.start()

    # create spark cluster
    cluster = spark_client.create_cluster(cluster_conf, wait=wait)

    spinner.stop()

    if wait:
        log.info("Cluster %s created successfully.", cluster.id)
    else:
        log.info("Cluster %s is being provisioned.", cluster.id)
コード例 #3
0
ファイル: cluster_create.py プロジェクト: Pariyat/aztk
def execute(args: typing.NamedTuple):
    spark_client = aztk.spark.Client(config.load_aztk_screts())

    # read cluster.yaml configuartion file, overwrite values with args
    cluster_conf = ClusterConfig()

    cluster_conf.merge(uid=args.cluster_id,
                       size=args.size,
                       size_low_pri=args.size_low_pri,
                       vm_size=args.vm_size,
                       subnet_id=args.subnet_id,
                       wait=args.wait,
                       username=args.username,
                       password=args.password,
                       docker_repo=args.docker_repo)

    if cluster_conf.custom_scripts:
        custom_scripts = []
        for custom_script in cluster_conf.custom_scripts:
            custom_scripts.append(
                aztk.spark.models.CustomScript(script=custom_script['script'],
                                               run_on=custom_script['runOn']))
    else:
        custom_scripts = None

    if cluster_conf.file_shares:
        file_shares = []
        for file_share in cluster_conf.file_shares:
            file_shares.append(
                aztk.spark.models.FileShare(
                    storage_account_name=file_share['storage_account_name'],
                    storage_account_key=file_share['storage_account_key'],
                    file_share_path=file_share['file_share_path'],
                    mount_path=file_share['mount_path']))
    else:
        file_shares = None

    if cluster_conf.username:
        ssh_key, password = utils.get_ssh_key_or_prompt(
            spark_client.secrets_config.ssh_pub_key, cluster_conf.username,
            cluster_conf.password, spark_client.secrets_config)
        user_conf = aztk.spark.models.UserConfiguration(
            username=cluster_conf.username, password=password, ssh_key=ssh_key)
    else:
        user_conf = None

    print_cluster_conf(cluster_conf)
    spinner = utils.Spinner()
    spinner.start()

    # create spark cluster
    cluster = spark_client.create_cluster(
        aztk.spark.models.ClusterConfiguration(
            cluster_id=cluster_conf.uid,
            vm_count=cluster_conf.size,
            vm_low_pri_count=cluster_conf.size_low_pri,
            vm_size=cluster_conf.vm_size,
            subnet_id=cluster_conf.subnet_id,
            custom_scripts=custom_scripts,
            file_shares=file_shares,
            docker_repo=cluster_conf.docker_repo,
            spark_configuration=load_aztk_spark_config(),
            user_configuration=user_conf),
        wait=cluster_conf.wait)

    spinner.stop()

    if cluster_conf.wait:
        log.info("Cluster %s created successfully.", cluster.id)
    else:
        log.info("Cluster %s is being provisioned.", cluster.id)
コード例 #4
0
def execute(args: typing.NamedTuple):
    spark_client = load_spark_client()

    # read cluster.yaml configuartion file, overwrite values with args
    cluster_conf = ClusterConfig()

    cluster_conf.merge(uid=args.cluster_id,
                       size=args.size,
                       size_low_pri=args.size_low_pri,
                       vm_size=args.vm_size,
                       wait=args.wait,
                       username=args.username,
                       password=args.password,
                       docker_repo=args.docker_repo)

    print_cluster_conf(cluster_conf)

    if cluster_conf.custom_scripts:
        custom_scripts = []
        for custom_script in cluster_conf.custom_scripts:
            custom_scripts.append(
                aztk.spark.models.CustomScript(script=custom_script['script'],
                                               run_on=custom_script['runOn']))
    else:
        custom_scripts = None

    if cluster_conf.file_shares:
        file_shares = []
        for file_share in cluster_conf.file_shares:
            file_shares.append(
                aztk.spark.models.FileShare(
                    storage_account_name=file_share['storage_account_name'],
                    storage_account_key=file_share['storage_account_key'],
                    file_share_path=file_share['file_share_path'],
                    mount_path=file_share['mount_path']))
    else:
        file_shares = None

    jars_src = aztk.utils.constants.DEFAULT_SPARK_JARS_SOURCE

    # create spark cluster
    cluster = spark_client.create_cluster(
        aztk.spark.models.ClusterConfiguration(
            cluster_id=cluster_conf.uid,
            vm_count=cluster_conf.size,
            vm_low_pri_count=cluster_conf.size_low_pri,
            vm_size=cluster_conf.vm_size,
            custom_scripts=custom_scripts,
            file_shares=file_shares,
            docker_repo=cluster_conf.docker_repo,
            spark_configuration=aztk.spark.models.SparkConfiguration(
                spark_defaults_conf=os.path.join(
                    aztk.utils.constants.DEFAULT_SPARK_CONF_SOURCE,
                    'spark-defaults.conf'),
                spark_env_sh=os.path.join(
                    aztk.utils.constants.DEFAULT_SPARK_CONF_SOURCE,
                    'spark-env.sh'),
                core_site_xml=os.path.join(
                    aztk.utils.constants.DEFAULT_SPARK_CONF_SOURCE,
                    'core-site.xml'),
                jars=[
                    os.path.join(jars_src, path)
                    for path in os.listdir(jars_src)
                ])),
        wait=cluster_conf.wait)

    if cluster_conf.username:
        ssh_key = spark_client.secrets_config.ssh_pub_key

        ssh_key, password = utils.get_ssh_key_or_prompt(
            ssh_key, cluster_conf.username, cluster_conf.password,
            spark_client.secrets_config)

        spark_client.create_user(cluster_id=cluster_conf.uid,
                                 username=cluster_conf.username,
                                 password=password,
                                 ssh_key=ssh_key)

    if cluster_conf.wait:
        log.info("Cluster %s created successfully.", cluster.id)
    else:
        log.info("Cluster %s is being provisioned.", cluster.id)