Exemple #1
0
def wait_for_blob_copy_operation(blob_name, target_container_name, target_storage_account_name,
                                 azure_pool_frequency, location):
    progress_controller = APPLICATION.get_progress_controller()
    copy_status = "pending"
    prev_progress = -1
    while copy_status == "pending":
        cmd = prepare_cli_command(['storage', 'blob', 'show',
                                   '--name', blob_name,
                                   '--container-name', target_container_name,
                                   '--account-name', target_storage_account_name])

        json_output = run_cli_command(cmd, return_as_json=True)
        copy_status = json_output["properties"]["copy"]["status"]
        copy_progress_1, copy_progress_2 = json_output["properties"]["copy"]["progress"].split("/")
        current_progress = round(int(copy_progress_1) / int(copy_progress_2), 1)

        if current_progress != prev_progress:
            msg = "{0} - copy progress: {1}%"\
                .format(location, str(current_progress))\
                .ljust(PROGRESS_LINE_LENGTH)  # need to justify since messages overide each other
            progress_controller.add(message=msg)

        prev_progress = current_progress

        try:
            time.sleep(azure_pool_frequency)
        except KeyboardInterrupt:
            progress_controller.stop()
            return

    if copy_status == 'success':
        progress_controller.stop()
    else:
        logger.error("The copy operation didn't succeed. Last status: %s", copy_status)
        raise CLIError('Blob copy failed')
def create_resource_group(resource_group_name, location):
    # check if target resource group exists
    cli_cmd = prepare_cli_command(['group', 'exists',
                                   '--name', resource_group_name], output_as_json=False)

    cmd_output = run_cli_command(cli_cmd)

    if 'true' in cmd_output:
        return

    # create the target resource group
    logger.warn("Creating resource group: %s", resource_group_name)
    cli_cmd = prepare_cli_command(['group', 'create',
                                   '--name', resource_group_name,
                                   '--location', location])

    run_cli_command(cli_cmd)
Exemple #3
0
def create_snapshot_from_vhd(location, target_blob_path, target_snapshot_name,
                             transient_resource_group_name):
    cli_cmd = prepare_cli_command([
        'snapshot', 'create', '--resource-group',
        transient_resource_group_name, '--name', target_snapshot_name,
        '--location', location, '--source', target_blob_path
    ])
    json_output = run_cli_command(cli_cmd, return_as_json=True)
    return json_output
def create_resource_group(resource_group_name, location, subscription=None):
    # check if target resource group exists
    cli_cmd = prepare_cli_command(['group', 'exists',
                                   '--name', resource_group_name],
                                  output_as_json=False,
                                  subscription=subscription)

    cmd_output = run_cli_command(cli_cmd)

    if 'true' in cmd_output:
        return

    # create the target resource group
    logger.warn("Creating resource group: %s", resource_group_name)
    cli_cmd = prepare_cli_command(['group', 'create',
                                   '--name', resource_group_name,
                                   '--location', location],
                                  subscription=subscription)

    run_cli_command(cli_cmd)
Exemple #5
0
def delete_all_created_images(dict_manifest, parallel_degree, target_locations_count, target_resource_group_name):
    pool = init_process_pool(parallel_degree, target_locations_count)

    image_delete_cmds = []
    for location, image in dict_manifest.items():
        logger.warn("create delete image command for image: %s, location: %s", image, location)
        image_delete_cmds.append(prepare_cli_command(['image', 'delete', '--name', image, '--resource-group', target_resource_group_name]))

    for delete_cmd in image_delete_cmds:
        pool.apply_async(run_cli_command, delete_cmd)

    pool.close()
    pool.join()
Exemple #6
0
def wait_for_blob_copy_operation(blob_name, target_container_name,
                                 target_storage_account_name,
                                 azure_pool_frequency, location, subscription):
    copy_status = "pending"
    prev_progress = -1
    retries = 0
    while copy_status == "pending":
        try:
            cli_cmd = prepare_cli_command([
                'storage', 'blob', 'show', '--name', blob_name,
                '--container-name', target_container_name, '--account-name',
                target_storage_account_name
            ],
                                          subscription=subscription)

            json_output = run_cli_command(cli_cmd, return_as_json=True)
            copy_status = json_output["properties"]["copy"]["status"]
            copy_progress_1, copy_progress_2 = json_output["properties"][
                "copy"]["progress"].split("/")
            current_progress = int(
                int(copy_progress_1) / int(copy_progress_2) * 100)
        except Exception as ex:
            logger.warn(
                "Got exception when trying to get blob: %s state, \n ex %s",
                blob_name, str(ex))
            if retries < 10:
                time.sleep(20)
                retries += 1
            else:
                raise ex

        if current_progress != prev_progress:
            msg = "{0} - Copy progress: {1}%"\
                .format(location, str(current_progress))
            logger.warn(msg)

        prev_progress = current_progress

        try:
            time.sleep(azure_pool_frequency)
        except KeyboardInterrupt:
            return

    if copy_status != 'success':
        logger.error("The copy operation didn't succeed. Last status: %s",
                     copy_status)
        logger.error("Command run: %s", cli_cmd)
        logger.error("Command output: %s", json_output)

        raise CLIError('Blob copy failed')
def wait_for_blob_copy_operation(blob_name, target_container_name,
                                 target_storage_account_name,
                                 azure_pool_frequency, location):
    copy_status = "pending"
    prev_progress = -1
    while copy_status == "pending":
        cli_cmd = prepare_cli_command([
            'storage', 'blob', 'show', '--name', blob_name, '--container-name',
            target_container_name, '--account-name',
            target_storage_account_name
        ])

        json_output = run_cli_command(cli_cmd, return_as_json=True)
        copy_status = json_output["properties"]["copy"]["status"]
        copy_progress_1, copy_progress_2 = json_output["properties"]["copy"][
            "progress"].split("/")
        current_progress = int(
            int(copy_progress_1) / int(copy_progress_2) * 100)

        if current_progress != prev_progress:
            msg = "{0} - Copy progress: {1}%"\
                .format(location, str(current_progress))
            logger.warn(msg)

        prev_progress = current_progress

        try:
            time.sleep(azure_pool_frequency)
        except KeyboardInterrupt:
            return

    if copy_status == 'success':
        return
    else:
        logger.error("The copy operation didn't succeed. Last status: %s",
                     copy_status)
        raise CLIError('Blob copy failed')
def create_target_image(cmd, location, transient_resource_group_name,
                        source_type, source_object_name,
                        source_os_disk_snapshot_name,
                        source_os_disk_snapshot_url, source_os_type,
                        target_resource_group_name, azure_pool_frequency, tags,
                        target_name, target_subscription, export_as_snapshot,
                        timeout):

    random_string = get_random_string(STORAGE_ACCOUNT_NAME_LENGTH -
                                      len(location))

    # create the target storage account. storage account name must be lowercase.
    logger.warning(
        "%s - Creating target storage account (can be slow sometimes)",
        location)
    target_storage_account_name = location.lower() + random_string
    cli_cmd = prepare_cli_command([
        'storage', 'account', 'create', '--name', target_storage_account_name,
        '--resource-group', transient_resource_group_name, '--location',
        location, '--sku', 'Standard_LRS'
    ],
                                  subscription=target_subscription)

    json_output = run_cli_command(cli_cmd, return_as_json=True)
    target_blob_endpoint = json_output['primaryEndpoints']['blob']

    # Setup the target storage account
    cli_cmd = prepare_cli_command([
        'storage', 'account', 'keys', 'list', '--account-name',
        target_storage_account_name, '--resource-group',
        transient_resource_group_name
    ],
                                  subscription=target_subscription)

    json_output = run_cli_command(cli_cmd, return_as_json=True)

    target_storage_account_key = json_output[0]['value']
    logger.debug("storage account key: %s", target_storage_account_key)

    expiry_format = "%Y-%m-%dT%H:%MZ"
    expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=timeout)
    logger.debug("create target storage sas using timeout seconds: %d",
                 timeout)

    cli_cmd = prepare_cli_command([
        'storage', 'account', 'generate-sas', '--account-name',
        target_storage_account_name, '--account-key',
        target_storage_account_key, '--expiry',
        expiry.strftime(expiry_format), '--permissions', 'aclrpuw',
        '--resource-types', 'sco', '--services', 'b', '--https-only'
    ],
                                  output_as_json=False,
                                  subscription=target_subscription)

    sas_token = run_cli_command(cli_cmd)
    sas_token = sas_token.rstrip("\n\r")  # STRANGE
    logger.debug("sas token: %s", sas_token)

    # create a container in the target blob storage account
    logger.warning("%s - Creating container in the target storage account",
                   location)
    target_container_name = 'snapshots'
    cli_cmd = prepare_cli_command([
        'storage', 'container', 'create', '--name', target_container_name,
        '--account-name', target_storage_account_name
    ],
                                  subscription=target_subscription)

    run_cli_command(cli_cmd)

    # Copy the snapshot to the target region using the SAS URL
    blob_name = source_os_disk_snapshot_name + '.vhd'
    logger.warning("%s - Copying blob to target storage account", location)
    cli_cmd = prepare_cli_command([
        'storage', 'blob', 'copy', 'start', '--source-uri',
        source_os_disk_snapshot_url, '--destination-blob', blob_name,
        '--destination-container', target_container_name, '--account-name',
        target_storage_account_name, '--sas-token', sas_token
    ],
                                  subscription=target_subscription)

    run_cli_command(cli_cmd)

    # Wait for the copy to complete
    start_datetime = datetime.datetime.now()
    wait_for_blob_copy_operation(blob_name, target_container_name,
                                 target_storage_account_name,
                                 azure_pool_frequency, location,
                                 target_subscription)
    msg = "{0} - Copy time: {1}".format(
        location,
        datetime.datetime.now() - start_datetime)
    logger.warning(msg)

    # Create the snapshot in the target region from the copied blob
    logger.warning(
        "%s - Creating snapshot in target region from the copied blob",
        location)
    target_blob_path = target_blob_endpoint + \
        target_container_name + '/' + blob_name
    target_snapshot_name = source_os_disk_snapshot_name + '-' + location
    if export_as_snapshot:
        snapshot_resource_group_name = target_resource_group_name
    else:
        snapshot_resource_group_name = transient_resource_group_name

    source_storage_account_id = get_storage_account_id_from_blob_path(
        cmd, target_blob_path, transient_resource_group_name,
        target_subscription)

    cli_cmd = prepare_cli_command([
        'snapshot', 'create', '--resource-group', snapshot_resource_group_name,
        '--name', target_snapshot_name, '--location', location, '--source',
        target_blob_path, '--source-storage-account-id',
        source_storage_account_id
    ],
                                  subscription=target_subscription)

    json_output = run_cli_command(cli_cmd, return_as_json=True)
    target_snapshot_id = json_output['id']

    # Optionally create the final image
    if export_as_snapshot:
        logger.warning("%s - Skipping image creation", location)
    else:
        logger.warning("%s - Creating final image", location)
        if target_name is None:
            target_image_name = source_object_name
            if source_type != 'image':
                target_image_name += '-image'
            target_image_name += '-' + location
        else:
            target_image_name = target_name

        cli_cmd = prepare_cli_command([
            'image', 'create', '--resource-group', target_resource_group_name,
            '--name', target_image_name, '--location', location, '--os-type',
            source_os_type, '--source', target_snapshot_id
        ],
                                      tags=tags,
                                      subscription=target_subscription)

        run_cli_command(cli_cmd)
def imagecopy(source_resource_group_name,
              source_object_name,
              target_location,
              target_resource_group_name,
              source_type='image',
              cleanup='false',
              parallel_degree=-1,
              tags=None,
              target_name=None):

    # get the os disk id from source vm/image
    logger.warn("Getting os disk id of the source vm/image")
    cli_cmd = prepare_cli_command([
        source_type, 'show', '--name', source_object_name, '--resource-group',
        source_resource_group_name
    ])

    json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)

    if json_cmd_output['storageProfile']['dataDisks']:
        logger.warn(
            "Data disks in the source detected, but are ignored by this extension!"
        )

    source_os_disk_id = None
    source_os_disk_type = None

    try:
        source_os_disk_id = json_cmd_output['storageProfile']['osDisk'][
            'managedDisk']['id']
        if source_os_disk_id is None:
            raise TypeError
        source_os_disk_type = "DISK"
        logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
    except TypeError:
        try:
            source_os_disk_id = json_cmd_output['storageProfile']['osDisk'][
                'blobUri']
            if source_os_disk_id is None:
                raise TypeError
            source_os_disk_type = "BLOB"
            logger.debug("found %s: %s", source_os_disk_type,
                         source_os_disk_id)
        except TypeError:
            try:  # images created by e.g. image-copy extension
                source_os_disk_id = json_cmd_output['storageProfile'][
                    'osDisk']['snapshot']['id']
                if source_os_disk_id is None:
                    raise TypeError
                source_os_disk_type = "SNAPSHOT"
                logger.debug("found %s: %s", source_os_disk_type,
                             source_os_disk_id)
            except TypeError:
                pass

    if source_os_disk_type is None or source_os_disk_id is None:
        logger.error(
            'Unable to locate a supported os disk type in the provided source object'
        )
        raise CLIError('Invalid OS Disk Source Type')

    source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']
    logger.debug(
        "source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s",
        source_os_disk_type, source_os_disk_id, source_os_type)

    # create source snapshots
    # TODO: skip creating another snapshot when the source is a snapshot
    logger.warn("Creating source snapshot")
    source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'
    cli_cmd = prepare_cli_command([
        'snapshot', 'create', '--name', source_os_disk_snapshot_name,
        '--resource-group', source_resource_group_name, '--source',
        source_os_disk_id
    ])

    run_cli_command(cli_cmd)

    # Get SAS URL for the snapshotName
    logger.warn("Getting sas url for the source snapshot")
    cli_cmd = prepare_cli_command([
        'snapshot', 'grant-access', '--name', source_os_disk_snapshot_name,
        '--resource-group', source_resource_group_name,
        '--duration-in-seconds', '3600'
    ])

    json_output = run_cli_command(cli_cmd, return_as_json=True)

    source_os_disk_snapshot_url = json_output['accessSas']
    logger.debug("source os disk snapshot url: %s",
                 source_os_disk_snapshot_url)

    # Start processing in the target locations

    transient_resource_group_name = 'image-copy-rg'
    # pick the first location for the temp group
    transient_resource_group_location = target_location[0].strip()
    create_resource_group(transient_resource_group_name,
                          transient_resource_group_location)

    target_locations_count = len(target_location)
    logger.warn("Target location count: %s", target_locations_count)

    create_resource_group(target_resource_group_name,
                          target_location[0].strip())

    if parallel_degree == -1:
        pool = Pool(target_locations_count)
    else:
        pool = Pool(min(parallel_degree, target_locations_count))

    # try to get a handle on arm's 409s
    azure_pool_frequency = 5
    if target_locations_count >= 5:
        azure_pool_frequency = 15
    elif target_locations_count >= 3:
        azure_pool_frequency = 10

    tasks = []
    for location in target_location:
        location = location.strip()
        tasks.append((location, transient_resource_group_name, source_type,
                      source_object_name, source_os_disk_snapshot_name,
                      source_os_disk_snapshot_url, source_os_type,
                      target_resource_group_name, azure_pool_frequency, tags,
                      target_name))

    logger.warn("Starting async process for all locations")

    for task in tasks:
        pool.apply_async(create_target_image, task)

    try:
        pool.close()
        pool.join()
    except KeyboardInterrupt:
        logger.warn('User cancelled the operation')
        if cleanup:
            logger.warn(
                'To cleanup temporary resources look for ones tagged with "image-copy-extension". \n'
                'You can use the following command: az resource list --tag created_by=image-copy-extension'
            )
        pool.terminate()
        return

    # Cleanup
    if cleanup:
        logger.warn('Deleting transient resources')

        # Delete resource group
        cli_cmd = prepare_cli_command([
            'group', 'delete', '--no-wait', '--yes', '--name',
            transient_resource_group_name
        ])
        run_cli_command(cli_cmd)

        # Revoke sas for source snapshot
        cli_cmd = prepare_cli_command([
            'snapshot', 'revoke-access', '--name',
            source_os_disk_snapshot_name, '--resource-group',
            source_resource_group_name
        ])
        run_cli_command(cli_cmd)

        # Delete source snapshot
        # TODO: skip this if source is snapshot and not creating a new one
        cli_cmd = prepare_cli_command([
            'snapshot', 'delete', '--name', source_os_disk_snapshot_name,
            '--resource-group', source_resource_group_name
        ])
        run_cli_command(cli_cmd)
def get_subscription_id():
    cli_cmd = prepare_cli_command(['account', 'show'])
    json_output = run_cli_command(cli_cmd, return_as_json=True)
    subscription_id = json_output['id']

    return subscription_id
def imagecopy(source_resource_group_name, source_object_name, target_location,
              target_resource_group_name, source_type='image', cleanup='false', parallel_degree=-1):

    # get the os disk id from source vm/image
    logger.warn("Getting os disk id of the source vm/image")
    cli_cmd = prepare_cli_command([source_type, 'show',
                                   '--name', source_object_name,
                                   '--resource-group', source_resource_group_name])

    json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)

    source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id']
    source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']
    logger.debug("source_os_disk_id: %s. source_os_type: %s",
                 source_os_disk_id, source_os_type)

    # create source snapshots
    logger.warn("Creating source snapshot")
    source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'
    cli_cmd = prepare_cli_command(['snapshot', 'create',
                                   '--name', source_os_disk_snapshot_name,
                                   '--resource-group', source_resource_group_name,
                                   '--source', source_os_disk_id])

    run_cli_command(cli_cmd)

    # Get SAS URL for the snapshotName
    logger.warn("Getting sas url for the source snapshot")
    cli_cmd = prepare_cli_command(['snapshot', 'grant-access',
                                   '--name', source_os_disk_snapshot_name,
                                   '--resource-group', source_resource_group_name,
                                   '--duration-in-seconds', '3600'])

    json_output = run_cli_command(cli_cmd, return_as_json=True)

    source_os_disk_snapshot_url = json_output['accessSas']
    logger.debug("source os disk snapshot url: %s",
                 source_os_disk_snapshot_url)

    # Start processing in the target locations

    transient_resource_group_name = 'image-copy-rg'
    create_resource_group(transient_resource_group_name, 'eastus')

    target_locations_count = len(target_location)
    logger.warn("Target location count: %s", target_locations_count)

    create_resource_group(target_resource_group_name,
                          target_location[0].strip())

    if parallel_degree == -1:
        pool = Pool(target_locations_count)
    else:
        pool = Pool(min(parallel_degree, target_locations_count))

    # try to get a handle on arm's 409s
    azure_pool_frequency = 5
    if target_locations_count >= 5:
        azure_pool_frequency = 15
    elif target_locations_count >= 3:
        azure_pool_frequency = 10

    tasks = []
    for location in target_location:
        location = location.strip()
        tasks.append((location, transient_resource_group_name, source_type,
                      source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
                      source_os_type, target_resource_group_name, azure_pool_frequency))

    logger.warn("Starting async process for all locations")

    for task in tasks:
        pool.apply_async(create_target_image, task)

    try:
        pool.close()
        pool.join()
    except KeyboardInterrupt:
        logger.warn('User cancelled the operation')
        if cleanup:
            logger.warn('To cleanup temporary resources look for ones tagged with "image-copy-extension". \n'
                        'You can use the following command: az resource list --tag created_by=image-copy-extension')
        pool.terminate()
        return

    # Cleanup
    if cleanup:
        logger.warn('Deleting transient resources')

        # Delete resource group
        cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes',
                                       '--name', transient_resource_group_name])
        run_cli_command(cli_cmd)

        # Revoke sas for source snapshot
        cli_cmd = prepare_cli_command(['snapshot', 'revoke-access',
                                       '--name', source_os_disk_snapshot_name,
                                       '--resource-group', source_resource_group_name])
        run_cli_command(cli_cmd)

        # Delete source snapshot
        cli_cmd = prepare_cli_command(['snapshot', 'delete',
                                       '--name', source_os_disk_snapshot_name,
                                       '--resource-group', source_resource_group_name])
        run_cli_command(cli_cmd)
Exemple #12
0
def imagecopy(cmd,
              source_resource_group_name,
              source_object_name,
              target_location,
              target_resource_group_name,
              temporary_resource_group_name='image-copy-rg',
              source_type='image',
              cleanup=False,
              parallel_degree=-1,
              tags=None,
              target_name=None,
              target_subscription=None,
              export_as_snapshot='false',
              timeout=3600):
    if cleanup:
        # If --cleanup is set, forbid using an existing temporary resource group name.
        # It is dangerous to clean up an existing resource group.
        cli_cmd = prepare_cli_command(
            ['group', 'exists', '-n', temporary_resource_group_name],
            output_as_json=False)
        cmd_output = run_cli_command(cli_cmd)
        if 'true' in cmd_output:
            raise CLIError(
                'Don\'t specify an existing resource group in --temporary-resource-group-name '
                'when --cleanup is set')

    # get the os disk id from source vm/image
    logger.warning("Getting OS disk ID of the source VM/image")
    cli_cmd = prepare_cli_command([
        source_type, 'show', '--name', source_object_name, '--resource-group',
        source_resource_group_name
    ])

    json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)

    if json_cmd_output['storageProfile']['dataDisks']:
        logger.warning(
            "Data disks in the source detected, but are ignored by this extension!"
        )

    source_os_disk_id = None
    source_os_disk_type = None

    try:
        source_os_disk_id = json_cmd_output['storageProfile']['osDisk'][
            'managedDisk']['id']
        if source_os_disk_id is None:
            raise TypeError
        source_os_disk_type = "DISK"
        logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
    except TypeError:
        try:
            source_os_disk_id = json_cmd_output['storageProfile']['osDisk'][
                'blobUri']
            if source_os_disk_id is None:
                raise TypeError
            source_os_disk_type = "BLOB"
            logger.debug("found %s: %s", source_os_disk_type,
                         source_os_disk_id)
        except TypeError:
            try:  # images created by e.g. image-copy extension
                source_os_disk_id = json_cmd_output['storageProfile'][
                    'osDisk']['snapshot']['id']
                if source_os_disk_id is None:
                    raise TypeError
                source_os_disk_type = "SNAPSHOT"
                logger.debug("found %s: %s", source_os_disk_type,
                             source_os_disk_id)
            except TypeError:
                pass

    if source_os_disk_type is None or source_os_disk_id is None:
        logger.error(
            'Unable to locate a supported OS disk type in the provided source object'
        )
        raise CLIError('Invalid OS Disk Source Type')

    source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']
    logger.debug(
        "source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s",
        source_os_disk_type, source_os_disk_id, source_os_type)

    # create source snapshots
    # TODO: skip creating another snapshot when the source is a snapshot
    logger.warning("Creating source snapshot")
    source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'

    if source_os_disk_type == "BLOB":
        source_storage_account_id = get_storage_account_id_from_blob_path(
            cmd, source_os_disk_id, source_resource_group_name)
        cli_cmd = prepare_cli_command([
            'snapshot', 'create', '--name', source_os_disk_snapshot_name,
            '--resource-group', source_resource_group_name, '--source',
            source_os_disk_id, '--source-storage-account-id',
            source_storage_account_id
        ])
    else:
        cli_cmd = prepare_cli_command([
            'snapshot', 'create', '--name', source_os_disk_snapshot_name,
            '--resource-group', source_resource_group_name, '--source',
            source_os_disk_id
        ])

    run_cli_command(cli_cmd)

    # Get SAS URL for the snapshotName
    logger.warning(
        "Getting sas url for the source snapshot with timeout: %d seconds",
        timeout)
    if timeout < 3600:
        logger.error("Timeout should be greater than 3600 seconds")
        raise CLIError('Invalid Timeout')

    cli_cmd = prepare_cli_command([
        'snapshot', 'grant-access', '--name', source_os_disk_snapshot_name,
        '--resource-group', source_resource_group_name,
        '--duration-in-seconds',
        str(timeout)
    ])

    json_output = run_cli_command(cli_cmd, return_as_json=True)

    source_os_disk_snapshot_url = json_output['accessSas']
    logger.debug("source os disk snapshot url: %s",
                 source_os_disk_snapshot_url)

    # Start processing in the target locations

    transient_resource_group_name = temporary_resource_group_name
    # pick the first location for the temp group
    transient_resource_group_location = target_location[0].strip()
    create_resource_group(transient_resource_group_name,
                          transient_resource_group_location,
                          target_subscription)

    target_locations_count = len(target_location)
    logger.warning("Target location count: %s", target_locations_count)

    create_resource_group(target_resource_group_name,
                          target_location[0].strip(), target_subscription)

    try:

        # try to get a handle on arm's 409s
        azure_pool_frequency = 5
        if target_locations_count >= 5:
            azure_pool_frequency = 15
        elif target_locations_count >= 3:
            azure_pool_frequency = 10

        if (target_locations_count == 1) or (parallel_degree == 1):
            # Going to copy to targets one-by-one
            logger.debug("Starting sync process for all locations")
            for location in target_location:
                location = location.strip()
                create_target_image(
                    cmd, location, transient_resource_group_name, source_type,
                    source_object_name, source_os_disk_snapshot_name,
                    source_os_disk_snapshot_url, source_os_type,
                    target_resource_group_name, azure_pool_frequency, tags,
                    target_name, target_subscription, export_as_snapshot,
                    timeout)
        else:
            if parallel_degree == -1:
                pool = Pool(target_locations_count)
            else:
                pool = Pool(min(parallel_degree, target_locations_count))

            tasks = []
            for location in target_location:
                location = location.strip()
                tasks.append(
                    (location, transient_resource_group_name, source_type,
                     source_object_name, source_os_disk_snapshot_name,
                     source_os_disk_snapshot_url, source_os_type,
                     target_resource_group_name, azure_pool_frequency, tags,
                     target_name, target_subscription, export_as_snapshot,
                     timeout))

            logger.warning("Starting async process for all locations")

            for task in tasks:
                pool.apply_async(create_target_image, task)

            pool.close()
            pool.join()

    except KeyboardInterrupt:
        logger.warning('User cancelled the operation')
        if cleanup:
            logger.warning(
                'To cleanup temporary resources look for ones tagged with "image-copy-extension". \n'
                'You can use the following command: az resource list --tag created_by=image-copy-extension'
            )
        pool.terminate()
        return

    # Cleanup
    if cleanup:
        logger.warning('Deleting transient resources')

        # Delete resource group
        cli_cmd = prepare_cli_command([
            'group', 'delete', '--no-wait', '--yes', '--name',
            transient_resource_group_name
        ],
                                      subscription=target_subscription)
        run_cli_command(cli_cmd)

        # Revoke sas for source snapshot
        cli_cmd = prepare_cli_command([
            'snapshot', 'revoke-access', '--name',
            source_os_disk_snapshot_name, '--resource-group',
            source_resource_group_name
        ])
        run_cli_command(cli_cmd)

        # Delete source snapshot
        # TODO: skip this if source is snapshot and not creating a new one
        cli_cmd = prepare_cli_command([
            'snapshot', 'delete', '--name', source_os_disk_snapshot_name,
            '--resource-group', source_resource_group_name
        ])
        run_cli_command(cli_cmd)
Exemple #13
0
def imagecopy(source_resource_group_name, source_object_name, target_location,
              target_resource_group_name, temporary_resource_group_name, source_type='image',
              cleanup='false', parallel_degree=-1, tags=None, target_name=None,
              target_subscription=None, timeout=3600, verify=False, manifest_file=None):


    # get the os disk id from source vm/image
    logger.warn("Getting os disk id of the source vm/image")
    cli_cmd = prepare_cli_command([source_type, 'show',
                                   '--name', source_object_name,
                                   '--resource-group', source_resource_group_name])

    json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)

    if json_cmd_output['storageProfile']['dataDisks']:
        logger.warn(
            "Data disks in the source detected, but are ignored by this extension!")

    source_os_disk_id = None
    source_os_disk_type = None

    try:
        source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id']
        if source_os_disk_id is None:
            raise TypeError
        source_os_disk_type = "DISK"
        logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
    except TypeError:
        try:
            source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri']
            if source_os_disk_id is None:
                raise TypeError
            source_os_disk_type = "BLOB"
            logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
        except TypeError:
            try:  # images created by e.g. image-copy extension
                source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id']
                if source_os_disk_id is None:
                    raise TypeError
                source_os_disk_type = "SNAPSHOT"
                logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
            except TypeError:
                pass

    if source_os_disk_type is None or source_os_disk_id is None:
        logger.error(
            'Unable to locate a supported os disk type in the provided source object')
        raise CLIError('Invalid OS Disk Source Type')

    source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']
    logger.debug("source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s",
                 source_os_disk_type, source_os_disk_id, source_os_type)

    # create source snapshots
    # TODO: skip creating another snapshot when the source is a snapshot
    logger.warn("Creating source snapshot")
    source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'
    cli_cmd = prepare_cli_command(['snapshot', 'create',
                                   '--name', source_os_disk_snapshot_name,
                                   '--resource-group', source_resource_group_name,
                                   '--source', source_os_disk_id])

    run_cli_command(cli_cmd)

    # Get SAS URL for the snapshotName
    logger.warn("Getting sas url for the source snapshot with timeout seconds: %d", timeout)
    if timeout < 3600:
        logger.warn("Timeout should be greater than 3600")
        raise CLIError('Inavlid Timeout')

    cli_cmd = prepare_cli_command(['snapshot', 'grant-access',
                                   '--name', source_os_disk_snapshot_name,
                                   '--resource-group', source_resource_group_name,
                                   '--duration-in-seconds', str(timeout)])

    json_output = run_cli_command(cli_cmd, return_as_json=True)

    source_os_disk_snapshot_url = json_output['accessSas']
    logger.debug("source os disk snapshot url: %s",
                 source_os_disk_snapshot_url)

    # Start processing in the target locations

    transient_resource_group_name = temporary_resource_group_name
    logger.info("temp resource group name is %s", transient_resource_group_name)

    # pick the first location for the temp group
    transient_resource_group_location = target_location[0].strip()
    create_resource_group(transient_resource_group_name,
                          transient_resource_group_location,
                          target_subscription)

    target_locations_count = len(target_location)
    logger.warn("Target location count: %s", target_locations_count)

    create_resource_group(target_resource_group_name,
                          target_location[0].strip(),
                          target_subscription)

    try:

        # try to get a handle on arm's 409s
        azure_pool_frequency = 5
        if target_locations_count >= 5:
            azure_pool_frequency = 15
        elif target_locations_count >= 3:
            azure_pool_frequency = 10

        pool = init_process_pool(parallel_degree, target_locations_count)

        tasks = []
        m = Manager()
        manifest = m.dict()
        for location in target_location:
            logger.warn("Creating task for location: %s", location)
            location = location.strip()
            tasks.append((location, transient_resource_group_name, source_type,
                          source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
                          source_os_type, target_resource_group_name, azure_pool_frequency,
                          tags, target_name, target_subscription, timeout, manifest))

        logger.warn("Starting async process of %d tasks for all locations", len(tasks))

        for task in tasks:
            pool.apply_async(create_target_image, task)

        pool.close()
        pool.join()
    except KeyboardInterrupt:
        logger.warn('User cancelled the operation')
        if cleanup:
            logger.warn('To cleanup temporary resources look for ones tagged with "image-copy-extension". \n'
                        'You can use the following command: az resource list --tag created_by=image-copy-extension')
        pool.terminate()
        return

    # Cleanup
    if cleanup:
        logger.warn('Deleting transient resources')

        # Delete resource group
        cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes',
                                       '--name', transient_resource_group_name],
                                      subscription=target_subscription)
        run_cli_command(cli_cmd)

        # Revoke sas for source snapshot
        cli_cmd = prepare_cli_command(['snapshot', 'revoke-access',
                                       '--name', source_os_disk_snapshot_name,
                                       '--resource-group', source_resource_group_name])
        run_cli_command(cli_cmd)

        # Delete source snapshot
        # TODO: skip this if source is snapshot and not creating a new one
        cli_cmd = prepare_cli_command(['snapshot', 'delete',
                                       '--name', source_os_disk_snapshot_name,
                                       '--resource-group', source_resource_group_name])
        run_cli_command(cli_cmd)

    if manifest_file is not None:
        dict_manifest = dict(manifest)
        logger.warn("Writing manifest %s to %s", pprint.pformat(dict_manifest), manifest_file)
        with open(manifest_file, "w+") as f:
            f.write(json.dumps(dict_manifest))

    #Verify
    if verify:
        logger.warn("verifying images created on all regions")
        dict_manifest = dict(manifest)
        for location in target_location:
            location = location.strip()
            if location not in manifest:
                logger.error("location: %s not found in manifest", location)
                logger.error("verification failed try to delete all images")
                delete_all_created_images(dict_manifest, parallel_degree, target_locations_count, target_resource_group_name)
                exit(1)
def imagecopy(source_resource_group_name, source_object_name, target_location,
              target_resource_group_name, temporary_resource_group_name='image-copy-rg',
              source_type='image', cleanup='false', parallel_degree=-1, tags=None, target_name=None,
              target_subscription=None, export_as_snapshot='false', timeout=3600):

    # get the os disk id from source vm/image
    logger.warn("Getting os disk id of the source vm/image")
    cli_cmd = prepare_cli_command([source_type, 'show',
                                   '--name', source_object_name,
                                   '--resource-group', source_resource_group_name])

    json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)

    if json_cmd_output['storageProfile']['dataDisks']:
        logger.warn(
            "Data disks in the source detected, but are ignored by this extension!")

    source_os_disk_id = None
    source_os_disk_type = None

    try:
        source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id']
        if source_os_disk_id is None:
            raise TypeError
        source_os_disk_type = "DISK"
        logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
    except TypeError:
        try:
            source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri']
            if source_os_disk_id is None:
                raise TypeError
            source_os_disk_type = "BLOB"
            logger.debug("found %s: %s", source_os_disk_type,
                         source_os_disk_id)
        except TypeError:
            try:  # images created by e.g. image-copy extension
                source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id']
                if source_os_disk_id is None:
                    raise TypeError
                source_os_disk_type = "SNAPSHOT"
                logger.debug("found %s: %s", source_os_disk_type,
                             source_os_disk_id)
            except TypeError:
                pass

    if source_os_disk_type is None or source_os_disk_id is None:
        logger.error(
            'Unable to locate a supported os disk type in the provided source object')
        raise CLIError('Invalid OS Disk Source Type')

    source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']
    logger.debug("source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s",
                 source_os_disk_type, source_os_disk_id, source_os_type)

    # create source snapshots
    # TODO: skip creating another snapshot when the source is a snapshot
    logger.warn("Creating source snapshot")
    source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'
    cli_cmd = prepare_cli_command(['snapshot', 'create',
                                   '--name', source_os_disk_snapshot_name,
                                   '--resource-group', source_resource_group_name,
                                   '--source', source_os_disk_id])

    run_cli_command(cli_cmd)

    # Get SAS URL for the snapshotName
    logger.warn(
        "Getting sas url for the source snapshot with timeout: %d seconds", timeout)
    if timeout < 3600:
        logger.error("Timeout should be greater than 3600 seconds")
        raise CLIError('Invalid Timeout')

    cli_cmd = prepare_cli_command(['snapshot', 'grant-access',
                                   '--name', source_os_disk_snapshot_name,
                                   '--resource-group', source_resource_group_name,
                                   '--duration-in-seconds', str(timeout)])

    json_output = run_cli_command(cli_cmd, return_as_json=True)

    source_os_disk_snapshot_url = json_output['accessSas']
    logger.debug("source os disk snapshot url: %s",
                 source_os_disk_snapshot_url)

    # Start processing in the target locations

    transient_resource_group_name = temporary_resource_group_name
    # pick the first location for the temp group
    transient_resource_group_location = target_location[0].strip()
    create_resource_group(transient_resource_group_name,
                          transient_resource_group_location,
                          target_subscription)

    target_locations_count = len(target_location)
    logger.warn("Target location count: %s", target_locations_count)

    create_resource_group(target_resource_group_name,
                          target_location[0].strip(),
                          target_subscription)

    try:

        # try to get a handle on arm's 409s
        azure_pool_frequency = 5
        if target_locations_count >= 5:
            azure_pool_frequency = 15
        elif target_locations_count >= 3:
            azure_pool_frequency = 10

        if parallel_degree == -1:
            pool = Pool(target_locations_count)
        else:
            pool = Pool(min(parallel_degree, target_locations_count))

        tasks = []
        for location in target_location:
            location = location.strip()
            tasks.append((location, transient_resource_group_name, source_type,
                          source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
                          source_os_type, target_resource_group_name, azure_pool_frequency,
                          tags, target_name, target_subscription, export_as_snapshot, timeout))

        logger.warn("Starting async process for all locations")

        for task in tasks:
            pool.apply_async(create_target_image, task)

        pool.close()
        pool.join()
    except KeyboardInterrupt:
        logger.warn('User cancelled the operation')
        if cleanup:
            logger.warn('To cleanup temporary resources look for ones tagged with "image-copy-extension". \n'
                        'You can use the following command: az resource list --tag created_by=image-copy-extension')
        pool.terminate()
        return

    # Cleanup
    if cleanup:
        logger.warn('Deleting transient resources')

        # Delete resource group
        cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes',
                                       '--name', transient_resource_group_name],
                                      subscription=target_subscription)
        run_cli_command(cli_cmd)

        # Revoke sas for source snapshot
        cli_cmd = prepare_cli_command(['snapshot', 'revoke-access',
                                       '--name', source_os_disk_snapshot_name,
                                       '--resource-group', source_resource_group_name])
        run_cli_command(cli_cmd)

        # Delete source snapshot
        # TODO: skip this if source is snapshot and not creating a new one
        cli_cmd = prepare_cli_command(['snapshot', 'delete',
                                       '--name', source_os_disk_snapshot_name,
                                       '--resource-group', source_resource_group_name])
        run_cli_command(cli_cmd)
Exemple #15
0
def create_target_image(location, transient_resource_group_name, source_type, source_object_name,
                        source_os_disk_snapshot_name, source_os_disk_snapshot_url, source_os_type,
                        target_resource_group_name, azure_pool_frequency):

    subscription_id = get_subscription_id()

    subscription_hash = hashlib.sha1(subscription_id.encode("UTF-8")).hexdigest()
    unique_subscription_string = subscription_hash[:7]

    # create the target storage account
    logger.warn("{0} - Creating target storage account (can be slow sometimes)".format(location))
    target_storage_account_name = location + unique_subscription_string
    cmd = prepare_cli_command(['storage', 'account', 'create',
                               '--name', target_storage_account_name,
                               '--resource-group', transient_resource_group_name,
                               '--location', location,
                               '--sku', 'Standard_LRS'])

    json_output = run_cli_command(cmd, return_as_json=True)
    target_blob_endpoint = json_output['primaryEndpoints']['blob']

    # Setup the target storage account
    cmd = prepare_cli_command(['storage', 'account', 'keys', 'list',
                               '--account-name', target_storage_account_name,
                               '--resource-group', transient_resource_group_name])

    json_output = run_cli_command(cmd, return_as_json=True)

    target_storage_account_key = json_output[0]['value']
    logger.debug(target_storage_account_key)

    expiry_format = "%Y-%m-%dT%H:%MZ"
    expiry = datetime.datetime.utcnow() + datetime.timedelta(hours=1)

    cmd = prepare_cli_command(['storage', 'account', 'generate-sas',
                               '--account-name', target_storage_account_name,
                               '--account-key', target_storage_account_key,
                               '--expiry', expiry.strftime(expiry_format),
                               '--permissions', 'aclrpuw', '--resource-types',
                               'sco', '--services', 'b', '--https-only'],
                              output_as_json=False)

    sas_token = run_cli_command(cmd)
    sas_token = sas_token.rstrip("\n\r")  # STRANGE
    logger.debug("sas token: " + sas_token)

    # create a container in the target blob storage account
    logger.warn("{0} - Creating container in the target storage account".format(location))
    target_container_name = 'snapshots'
    cmd = prepare_cli_command(['storage', 'container', 'create',
                               '--name', target_container_name,
                               '--account-name', target_storage_account_name])

    run_cli_command(cmd)

    # Copy the snapshot to the target region using the SAS URL
    blob_name = source_os_disk_snapshot_name + '.vhd'
    logger.warn("{0} - Copying blob to target storage account".format(location))
    cmd = prepare_cli_command(['storage', 'blob', 'copy', 'start',
                               '--source-uri', source_os_disk_snapshot_url,
                               '--destination-blob', blob_name,
                               '--destination-container', target_container_name,
                               '--account-name', target_storage_account_name,
                               '--sas-token', sas_token])

    run_cli_command(cmd)

    # Wait for the copy to complete
    start_datetime = datetime.datetime.now()
    wait_for_blob_copy_operation(blob_name, target_container_name,
                                 target_storage_account_name, azure_pool_frequency, location)
    msg = "{0} - Copy time: {1}".format(location, datetime.datetime.now() - start_datetime).ljust(PROGRESS_LINE_LENGTH)
    logger.warn(msg)

    # Create the snapshot in the target region from the copied blob
    logger.warn("{0} - Creating snapshot in target region from the copied blob".format(location))
    target_blob_path = target_blob_endpoint + target_container_name + '/' + blob_name
    target_snapshot_name = source_os_disk_snapshot_name + '-' + location
    cmd = prepare_cli_command(['snapshot', 'create',
                               '--resource-group', transient_resource_group_name,
                               '--name', target_snapshot_name,
                               '--location', location,
                               '--source', target_blob_path])

    json_output = run_cli_command(cmd, return_as_json=True)
    target_snapshot_id = json_output['id']

    # Create the final image
    logger.warn("{0} - Creating final image".format(location))
    target_image_name = source_object_name
    if source_type != 'image':
        target_image_name += '-image'
    target_image_name += '-' + location

    cmd = prepare_cli_command(['image', 'create',
                               '--resource-group', target_resource_group_name,
                               '--name', target_image_name,
                               '--location', location,
                               '--source', target_blob_path,
                               '--os-type', source_os_type,
                               '--source', target_snapshot_id])

    run_cli_command(cmd)