Beispiel #1
0
def create_directory(dest_path: str, environment: str):
    config_src_path = constants.INIT_DIRECTORY_SOURCE
    config_dest_path = dest_path

    if os.path.isdir(config_dest_path):
        log.warning("This directory has already been initialized.")
        return

    copy_tree(config_src_path, config_dest_path, update=1)

    secrets_template_path = os.path.join(dest_path, "secrets.yaml.template")
    secrets_path = os.path.join(dest_path, "secrets.yaml")

    if os.path.isfile(secrets_path):
        os.remove(secrets_template_path)

    if os.path.isfile(
            secrets_template_path) and not os.path.isfile(secrets_path):
        os.rename(secrets_template_path, secrets_path)

    cluster_path = os.path.join(dest_path, "cluster.yaml")

    if os.path.isfile(cluster_path):
        with open(cluster_path, "r", encoding="UTF-8") as stream:
            cluster_yaml = stream.read()
        cluster_yaml = cluster_yaml.replace("{environment}",
                                            "{}\n".format(environment))
        with open(cluster_path, "w", encoding="UTF-8") as file:
            file.write(cluster_yaml)
Beispiel #2
0
def execute(args: typing.NamedTuple):
    spark_client = aztk.spark.Client(config.load_aztk_secrets())
    cluster_ids = args.cluster_ids

    for cluster_id in cluster_ids:
        if not args.force:
            if not args.keep_logs:
                log.warning(
                    "All logs persisted for this cluster will be deleted.")

            confirmation_cluster_id = input(
                "Please confirm the id of the cluster you wish to delete [{}]: "
                .format(cluster_id))

            if confirmation_cluster_id != cluster_id:
                log.error(
                    "Confirmation cluster id does not match. Please try again."
                )
                return

        if spark_client.cluster.delete(id=cluster_id,
                                       keep_logs=args.keep_logs):
            log.info("Deleting cluster %s", cluster_id)
        else:
            log.error(
                "Cluster with id '%s' doesn't exist or was already deleted.",
                cluster_id)
Beispiel #3
0
def native_python_ssh_into_master(spark_client, cluster, cluster_configuration,
                                  ssh_conf, password):
    if not ssh_conf.connect:
        log.warning("No ssh client found, using pure python connection.")
        return

    plugin_ports = []
    if cluster_configuration and cluster_configuration.plugins:
        ports = [
            PortForwardingSpecification(port.internal, port.public_port)
            for plugin in cluster_configuration.plugins
            for port in plugin.ports if port.expose_publicly
        ]
        plugin_ports.extend(ports)

    print("Press ctrl+c to exit...")
    spark_client.cluster.ssh_into_master(
        cluster.id,
        ssh_conf.username,
        ssh_key=None,
        password=password,
        port_forward_list=[
            PortForwardingSpecification(remote_port=8080,
                                        local_port=8080),  # web ui
            PortForwardingSpecification(remote_port=4040,
                                        local_port=4040),  # job ui
            PortForwardingSpecification(remote_port=18080,
                                        local_port=18080),  # job history ui
        ] + plugin_ports,
        internal=ssh_conf.internal,
    )
Beispiel #4
0
def execute(args: typing.NamedTuple):
    spark_client = aztk.spark.Client(config.load_aztk_secrets())
    job_id = args.job_id

    if not args.force:
        # check if job exists before prompting for confirmation
        spark_client.get_job(job_id)

        if not args.keep_logs:
            log.warning("All logs persisted for this job will be deleted.")

        confirmation_cluster_id = input("Please confirm the id of the cluster you wish to delete: ")

        if confirmation_cluster_id != job_id:
            log.error("Confirmation cluster id does not match. Please try again.")
            return

    if spark_client.delete_job(job_id, args.keep_logs):
        log.info("Deleting Job %s", job_id)
    else:
        log.error("Job with id '%s' doesn't exist or was already deleted.", job_id)
Beispiel #5
0
def _show_warn(message, *_args):
    log.warning(str(message))
Beispiel #6
0
def _merge_secrets_dict(secrets: SecretsConfiguration, secrets_config):
    service_principal_config = secrets_config.get('service_principal')
    if service_principal_config:
        secrets.service_principal = ServicePrincipalConfiguration(
            tenant_id=service_principal_config.get('tenant_id'),
            client_id=service_principal_config.get('client_id'),
            credential=service_principal_config.get('credential'),
            batch_account_resource_id=service_principal_config.get(
                'batch_account_resource_id'),
            storage_account_resource_id=service_principal_config.get(
                'storage_account_resource_id'),
        )

    shared_key_config = secrets_config.get('shared_key')
    batch = secrets_config.get('batch')
    storage = secrets_config.get('storage')

    if shared_key_config and (batch or storage):
        raise aztk.error.AztkError(
            "Shared keys must be configured either under 'sharedKey:' or under 'batch:' and 'storage:', not both."
        )

    if shared_key_config:
        secrets.shared_key = SharedKeyConfiguration(
            batch_account_name=shared_key_config.get('batch_account_name'),
            batch_account_key=shared_key_config.get('batch_account_key'),
            batch_service_url=shared_key_config.get('batch_service_url'),
            storage_account_name=shared_key_config.get('storage_account_name'),
            storage_account_key=shared_key_config.get('storage_account_key'),
            storage_account_suffix=shared_key_config.get(
                'storage_account_suffix'),
        )
    elif batch or storage:
        secrets.shared_key = SharedKeyConfiguration()
        if batch:
            log.warning(
                "Your secrets.yaml format is deprecated. To use shared key authentication use the shared_key key. See config/secrets.yaml.template"
            )
            secrets.shared_key.batch_account_name = batch.get(
                'batchaccountname')
            secrets.shared_key.batch_account_key = batch.get('batchaccountkey')
            secrets.shared_key.batch_service_url = batch.get('batchserviceurl')

        if storage:
            secrets.shared_key.storage_account_name = storage.get(
                'storageaccountname')
            secrets.shared_key.storage_account_key = storage.get(
                'storageaccountkey')
            secrets.shared_key.storage_account_suffix = storage.get(
                'storageaccountsuffix')

    docker_config = secrets_config.get('docker')
    if docker_config:
        secrets.docker = DockerConfiguration(
            endpoint=docker_config.get('endpoint'),
            username=docker_config.get('username'),
            password=docker_config.get('password'),
        )

    default_config = secrets_config.get('default')
    # Check for ssh keys if they are provided
    if default_config:
        secrets.ssh_priv_key = default_config.get('ssh_priv_key')
        secrets.ssh_pub_key = default_config.get('ssh_pub_key')