Esempio n. 1
0
def execute_sample(global_config, sample_config):
    """Executes the sample with the specified configurations.

    :param global_config: The global configuration to use.
    :type global_config: `configparser.ConfigParser`
    :param sample_config: The sample specific configuration to use.
    :type sample_config: `configparser.ConfigParser`
    """
    # Set up the configuration
    batch_account_key = global_config.get('Batch', 'batchaccountkey')
    batch_account_name = global_config.get('Batch', 'batchaccountname')
    batch_service_url = global_config.get('Batch', 'batchserviceurl')

    should_delete_job = sample_config.getboolean('DEFAULT', 'shoulddeletejob')
    pool_vm_size = sample_config.get('DEFAULT', 'poolvmsize')
    pool_vm_count = sample_config.getint('DEFAULT', 'poolvmcount')

    # Print the settings we are running with
    common.helpers.print_configuration(global_config)
    common.helpers.print_configuration(sample_config)

    credentials = batchauth.SharedKeyCredentials(batch_account_name,
                                                 batch_account_key)
    client_configuration = batch.BatchServiceClientConfiguration(
        credentials, base_url=batch_service_url)

    # Retry 5 times -- default is 3
    client_configuration.retry_policy.retries = 5
    batch_client = batch.BatchServiceClient(client_configuration)

    job_id = common.helpers.generate_unique_resource_name("HelloWorld")

    try:
        submit_job_and_add_task(batch_client, job_id, pool_vm_size,
                                pool_vm_count)

        common.helpers.wait_for_tasks_to_complete(
            batch_client, job_id, datetime.timedelta(minutes=25))

        tasks = batch_client.task.list(job_id)
        task_ids = [task.id for task in tasks]

        common.helpers.print_task_output(batch_client, job_id, task_ids)
    finally:
        if should_delete_job:
            print("Deleting job: ", job_id)
            batch_client.job.delete(job_id)
Esempio n. 2
0
def execute_sample(global_config, sample_config):
    """Executes the sample with the specified configurations.

    :param global_config: The global configuration to use.
    :type global_config: `configparser.ConfigParser`
    :param sample_config: The sample specific configuration to use.
    :type sample_config: `configparser.ConfigParser`
    """
    # Set up the configuration
    batch_account_key = global_config.get('Batch', 'batchaccountkey')
    batch_account_name = global_config.get('Batch', 'batchaccountname')
    batch_service_url = global_config.get('Batch', 'batchserviceurl')

    storage_account_key = global_config.get('Storage', 'storageaccountkey')
    storage_account_name = global_config.get('Storage', 'storageaccountname')
    storage_account_suffix = global_config.get(
        'Storage',
        'storageaccountsuffix')

    should_delete_container = sample_config.getboolean(
        'DEFAULT',
        'shoulddeletecontainer')
    should_delete_job = sample_config.getboolean(
        'DEFAULT',
        'shoulddeletejob')
    should_delete_pool = sample_config.getboolean(
        'DEFAULT',
        'shoulddeletepool')
    pool_vm_size = sample_config.get(
        'DEFAULT',
        'poolvmsize')
    pool_vm_count = sample_config.getint(
        'DEFAULT',
        'poolvmcount')

    # Print the settings we are running with
    common.helpers.print_configuration(global_config)
    common.helpers.print_configuration(sample_config)

    credentials = batchauth.SharedKeyCredentials(
        batch_account_name,
        batch_account_key)
    client_configuration = batch.BatchServiceClientConfiguration(
        credentials,
        base_url=batch_service_url)

    # Retry 5 times -- default is 3
    client_configuration.retry_policy.retries = 5
    batch_client = batch.BatchServiceClient(client_configuration)

    block_blob_client = azureblob.BlockBlobService(
        account_name=storage_account_name,
        account_key=storage_account_key,
        endpoint_suffix=storage_account_suffix)

    job_id = common.helpers.generate_unique_resource_name(
        "PoolsAndResourceFilesJob")
    pool_id = "PoolsAndResourceFilesPool"
    try:
        create_pool(
            batch_client,
            block_blob_client,
            pool_id,
            pool_vm_size,
            pool_vm_count)

        submit_job_and_add_task(
            batch_client,
            block_blob_client,
            job_id, pool_id)

        common.helpers.wait_for_tasks_to_complete(
            batch_client,
            job_id,
            datetime.timedelta(minutes=25))

        tasks = batch_client.task.list(job_id)
        task_ids = [task.id for task in tasks]

        common.helpers.print_task_output(batch_client, job_id, task_ids)
    finally:
        # clean up
        if should_delete_container:
            block_blob_client.delete_container(
                _CONTAINER_NAME,
                fail_not_exist=False)
        if should_delete_job:
            print("Deleting job: ", job_id)
            batch_client.job.delete(job_id)
        if should_delete_pool:
            print("Deleting pool: ", pool_id)
            batch_client.pool.delete(pool_id)
def execute_sample(global_config, sample_config):
    """Executes the sample with the specified configurations.

    :param global_config: The global configuration to use.
    :type global_config: `configparser.ConfigParser`
    :param sample_config: The sample specific configuration to use.
    :type sample_config: `configparser.ConfigParser`
    """
    # Set up the configuration
    batch_account_key = global_config.get('Batch', 'batchaccountkey')
    batch_account_name = global_config.get('Batch', 'batchaccountname')
    batch_service_url = global_config.get('Batch', 'batchserviceurl')

    storage_account_key = global_config.get('Storage', 'storageaccountkey')
    storage_account_name = global_config.get('Storage', 'storageaccountname')
    storage_account_suffix = global_config.get('Storage',
                                               'storageaccountsuffix')

    should_delete_container = sample_config.getboolean(
        'DEFAULT', 'shoulddeletecontainer')
    should_delete_job = sample_config.getboolean('DEFAULT', 'shoulddeletejob')
    should_delete_pool = sample_config.getboolean('DEFAULT',
                                                  'shoulddeletepool')
    should_delete_cert = sample_config.getboolean('DEFAULT',
                                                  'shoulddeletecert')
    pool_vm_size = sample_config.get('DEFAULT', 'poolvmsize')
    pool_vm_count = sample_config.getint('DEFAULT', 'poolvmcount')

    # Print the settings we are running with
    common.helpers.print_configuration(global_config)
    common.helpers.print_configuration(sample_config)

    credentials = batchauth.SharedKeyCredentials(batch_account_name,
                                                 batch_account_key)
    client_configuration = batch.BatchServiceClientConfiguration(
        credentials, base_url=batch_service_url)

    # Retry 5 times -- default is 3
    client_configuration.retry_policy.retries = 5
    batch_client = batch.BatchServiceClient(client_configuration)

    block_blob_client = azureblob.BlockBlobService(
        account_name=storage_account_name,
        account_key=storage_account_key,
        endpoint_suffix=storage_account_suffix)

    job_id = common.helpers.generate_unique_resource_name(
        'EncryptedResourceFiles')
    pool_id = common.helpers.generate_unique_resource_name(
        'EncryptedResourceFiles')
    sha1_cert_tp = None
    try:
        # encrypt local file and upload to blob storage via blobxfer
        rsapfxfile, sha1_cert_tp = encrypt_localfile_to_blob_storage(
            storage_account_name, storage_account_key, _CONTAINER_NAME,
            _RESOURCE_TO_ENCRYPT)

        # add certificate to account
        add_certificate_to_account(batch_client, rsapfxfile, _PFX_PASSPHRASE,
                                   sha1_cert_tp)

        # create pool and wait for node idle
        create_pool_and_wait_for_node(batch_client, pool_id, pool_vm_size,
                                      pool_vm_count, sha1_cert_tp)

        # submit job and add a task
        submit_job_and_add_task(batch_client, block_blob_client,
                                storage_account_name, storage_account_key,
                                _CONTAINER_NAME, _RESOURCE_NAME, job_id,
                                pool_id, sha1_cert_tp)

        # wait for tasks to complete
        common.helpers.wait_for_tasks_to_complete(
            batch_client, job_id, datetime.timedelta(minutes=20))

        tasks = batch_client.task.list(job_id)
        task_ids = [task.id for task in tasks]

        common.helpers.print_task_output(batch_client, job_id, task_ids)
    finally:
        # perform clean up
        if should_delete_container:
            print('Deleting container: {}'.format(_CONTAINER_NAME))
            block_blob_client.delete_container(_CONTAINER_NAME,
                                               fail_not_exist=False)
        if should_delete_job:
            print('Deleting job: {}'.format(job_id))
            batch_client.job.delete(job_id)
        if should_delete_pool:
            print('Deleting pool: {}'.format(pool_id))
            batch_client.pool.delete(pool_id)
        if should_delete_cert and sha1_cert_tp is not None:
            # cert deletion requires no active references to cert, so
            # override any config settings for preserving job/pool
            if not should_delete_job:
                print('Deleting job: {}'.format(job_id))
                batch_client.job.delete(job_id)
            if not should_delete_pool:
                print('Deleting pool: {}'.format(pool_id))
                batch_client.pool.delete(pool_id)
            print('Deleting cert: {}'.format(sha1_cert_tp))
            batch_client.certificate.delete('sha1', sha1_cert_tp)
Esempio n. 4
0
def execute_sample(global_config, sample_config):
    """Executes the sample with the specified configurations.

    :param global_config: The global configuration to use.
    :type global_config: `configparser.ConfigParser`
    :param sample_config: The sample specific configuration to use.
    :type sample_config: `configparser.ConfigParser`
    """
    # Set up the configuration
    batch_account_key = global_config.get('Batch', 'batchaccountkey')
    batch_account_name = global_config.get('Batch', 'batchaccountname')
    batch_service_url = global_config.get('Batch', 'batchserviceurl')

    storage_account_key = global_config.get('Storage', 'storageaccountkey')
    storage_account_name = global_config.get('Storage', 'storageaccountname')
    storage_account_suffix = global_config.get('Storage',
                                               'storageaccountsuffix')

    should_delete_job = sample_config.getboolean('DEFAULT', 'shoulddeletejob')
    should_delete_pool = sample_config.getboolean('DEFAULT',
                                                  'shoulddeletepool')

    pool_vm_size = sample_config.get('DEFAULT', 'poolvmsize')
    pool_vm_count = sample_config.getint('DEFAULT', 'poolvmcount')

    # Print the settings we are running with
    common.helpers.print_configuration(global_config)
    common.helpers.print_configuration(sample_config)

    credentials = batchauth.SharedKeyCredentials(batch_account_name,
                                                 batch_account_key)
    client_configuration = batch.BatchServiceClientConfiguration(
        credentials, base_url=batch_service_url)

    # Retry 5 times -- default is 3
    client_configuration.retry_policy.retries = 5
    batch_client = batch.BatchServiceClient(client_configuration)

    block_blob_client = azureblob.BlockBlobService(
        account_name=storage_account_name,
        account_key=storage_account_key,
        endpoint_suffix=storage_account_suffix)

    job_id = common.helpers.generate_unique_resource_name('DockerBatchTask')
    pool_id = common.helpers.generate_unique_resource_name('DockerBatchTask')

    try:
        # create pool
        create_pool(batch_client, block_blob_client, pool_id, pool_vm_size,
                    pool_vm_count)

        # submit job and add a task
        print('submitting docker run tasks via Azure Batch...')
        add_docker_batch_task(batch_client, block_blob_client, job_id, pool_id)

        # wait for tasks to complete
        common.helpers.wait_for_tasks_to_complete(
            batch_client, job_id, datetime.timedelta(minutes=25))

    finally:
        # perform clean up
        if should_delete_job:
            print('Deleting job: {}'.format(job_id))
            batch_client.job.delete(job_id)
        if should_delete_pool:
            print('Deleting pool: {}'.format(pool_id))
            batch_client.pool.delete(pool_id)
def execute_sample(global_config, sample_config):
    """Executes the sample with the specified configurations.

    :param global_config: The global configuration to use.
    :type global_config: `configparser.ConfigParser`
    :param sample_config: The sample specific configuration to use.
    :type sample_config: `configparser.ConfigParser`
    """
    # Set up the configuration
    batch_account_key = global_config.get('Batch', 'batchaccountkey')
    batch_account_name = global_config.get('Batch', 'batchaccountname')
    batch_service_url = global_config.get('Batch', 'batchserviceurl')

    storage_account_key = global_config.get('Storage', 'storageaccountkey')
    storage_account_name = global_config.get('Storage', 'storageaccountname')
    storage_account_suffix = global_config.get('Storage',
                                               'storageaccountsuffix')

    should_delete_container = sample_config.getboolean(
        'DEFAULT', 'shoulddeletecontainer')
    should_delete_job = sample_config.getboolean('DEFAULT', 'shoulddeletejob')
    should_delete_pool = sample_config.getboolean('DEFAULT',
                                                  'shoulddeletepool')
    generate_ssh_tunnel_script = sample_config.getboolean(
        'DEFAULT', 'generatesshtunnelscript')
    pool_vm_size = sample_config.get('DEFAULT', 'poolvmsize')
    pool_vm_count = sample_config.getint('DEFAULT', 'poolvmcount')

    # Print the settings we are running with
    common.helpers.print_configuration(global_config)
    common.helpers.print_configuration(sample_config)

    credentials = batchauth.SharedKeyCredentials(batch_account_name,
                                                 batch_account_key)
    client_configuration = batch.BatchServiceClientConfiguration(
        credentials, base_url=batch_service_url)

    # Retry 5 times -- default is 3
    client_configuration.retry_policy.retries = 5
    batch_client = batch.BatchServiceClient(client_configuration)

    block_blob_client = azureblob.BlockBlobService(
        account_name=storage_account_name,
        account_key=storage_account_key,
        endpoint_suffix=storage_account_suffix)

    job_id = common.helpers.generate_unique_resource_name('DockerSwarm')
    pool_id = common.helpers.generate_unique_resource_name('DockerSwarm')
    public_key = None
    private_key = None
    try:
        # create pool and wait for node idle
        nodes = create_pool_and_wait_for_nodes(batch_client, block_blob_client,
                                               pool_id, pool_vm_size,
                                               pool_vm_count)

        # generate ssh key pair
        private_key, public_key = generate_ssh_keypair('batch_id_rsa')

        # add compute node user to nodes with ssh key
        for node in nodes:
            add_admin_user_to_compute_node(batch_client, pool_id, node,
                                           _NODE_USERNAME, public_key)

        # designate a swarm master node
        master_node = designate_master_docker_swarm_node(
            batch_client, pool_id, nodes, job_id)

        # connect to docker remotely
        connect_to_remote_docker_swarm_master(batch_client, pool_id, nodes,
                                              master_node[1], _NODE_USERNAME,
                                              private_key,
                                              generate_ssh_tunnel_script)

        # submit job and add a task
        print('submitting a docker run task via Azure Batch...')
        task_id = add_docker_batch_task(batch_client, block_blob_client,
                                        job_id, pool_id)

        # wait for tasks to complete
        common.helpers.wait_for_tasks_to_complete(
            batch_client, job_id, datetime.timedelta(minutes=5))

        common.helpers.print_task_output(batch_client, job_id, [task_id])
    finally:
        # perform clean up
        if public_key is not None:
            try:
                os.remove(public_key)
            except OSError:
                pass
        if private_key is not None:
            if generate_ssh_tunnel_script:
                print('not deleting ssh private key due to ssh tunnel script!')
            else:
                try:
                    os.remove(private_key)
                except OSError:
                    pass
        if should_delete_container:
            print('Deleting container: {}'.format(_CONTAINER_NAME))
            block_blob_client.delete_container(_CONTAINER_NAME,
                                               fail_not_exist=False)
        if should_delete_job:
            print('Deleting job: {}'.format(job_id))
            batch_client.job.delete(job_id)
        if should_delete_pool:
            print('Deleting pool: {}'.format(pool_id))
            batch_client.pool.delete(pool_id)
Esempio n. 6
0
        upload_file_to_container(blob_client, input_container_name, file_path)
        for file_path in input_file_paths
    ]

    # Obtain a shared access signature that provides write access to the output
    # container to which the tasks will upload their output.
    output_container_sas_token = get_container_sas_token(
        blob_client, output_container_name, azureblob.BlobPermissions.WRITE)

    # Create a Batch service client. We'll now be interacting with the Batch
    # service in addition to Storage
    credentials = batchauth.SharedKeyCredentials(_BATCH_ACCOUNT_NAME,
                                                 _BATCH_ACCOUNT_KEY)

    batch_client = batch.BatchServiceClient(
        batch.BatchServiceClientConfiguration(credentials,
                                              base_url=_BATCH_ACCOUNT_URL))

    # Create the pool that will contain the compute nodes that will execute the
    # tasks. The resource files we pass in are used for configuring the pool's
    # start task, which is executed each time a node first joins the pool (or
    # is rebooted or re-imaged).
    create_pool(batch_client, _POOL_ID, application_files, _NODE_OS_DISTRO,
                _NODE_OS_VERSION)

    # Create the job that will run the tasks.
    create_job(batch_client, _JOB_ID, _POOL_ID)

    # Add the tasks to the job. We need to supply a container shared access
    # signature (SAS) token for the tasks so that they can upload their output
    # to Azure Storage.
    add_tasks(batch_client, _JOB_ID, input_files, output_container_name,