Ejemplo n.º 1
0
    def validator(namespace):
        type_field_name = '{}_type'.format(property_name)
        property_val = getattr(namespace, property_name, None)
        parent_val = getattr(namespace, parent_name, None) if parent_name else None

        # Check for the different scenarios (order matters)
        # 1) provided value indicates None (pair of empty quotes)
        if property_val in ('', '""', "''") or (property_val is None and default_none):
            if not allow_none:
                raise CLIError('{} cannot be None.'.format(property_option))
            setattr(namespace, type_field_name, 'none')
            setattr(namespace, property_name, None)
            if parent_name and parent_val:
                logger = azlogging.get_az_logger(__name__)
                logger.warning('Ignoring: %s %s', parent_option, parent_val)
                setattr(namespace, parent_name, None)
            return  # SUCCESS

        # Create a resource ID we can check for existence.
        (resource_id_parts, value_was_id) = _validate_name_or_id(
            namespace.resource_group_name, property_val, property_type, parent_val, parent_type)

        # 2) resource exists
        if resource_exists(**resource_id_parts):
            setattr(namespace, type_field_name, 'existingId')
            setattr(namespace, property_name, resource_id(**resource_id_parts))
            if parent_val:
                if value_was_id:
                    logger = azlogging.get_az_logger(__name__)
                    logger.warning('Ignoring: %s %s', parent_option, parent_val)
                setattr(namespace, parent_name, None)
            return  # SUCCESS

        # if a parent name was required but not specified, raise a usage error
        if has_parent and not value_was_id and not parent_val and not allow_new:
            raise ValueError('incorrect usage: {0} ID | {0} NAME {1} NAME'.format(
                property_option, parent_option))

        # if non-existent ID was supplied, throw error depending on whether a new resource can
        # be created.
        if value_was_id:
            usage_message = '{} NAME'.format(property_option) if not has_parent \
                else '{} NAME [{} NAME]'.format(property_option, parent_option)
            action_message = 'Specify ( {} ) to create a new resource.'.format(usage_message) if \
                allow_new else 'Create the required resource and try again.'
            raise CLIError('{} {} does not exist. {}'.format(
                property_name, property_val, action_message))

        # 3) try to create new resource
        if allow_new:
            setattr(namespace, type_field_name, 'new')
        else:
            raise CLIError(
                '{} {} does not exist. Create the required resource and try again.'.format(
                    property_name, property_val))
Ejemplo n.º 2
0
def storage_blob_download_batch(client, source, destination, source_container_name, pattern=None,
                                dryrun=False):
    """
    Download blobs in a container recursively

    :param str source:
        The string represents the source of this download operation. The source can be the
        container URL or the container name. When the source is the container URL, the storage
        account name will parsed from the URL.

    :param str destination:
        The string represents the destination folder of this download operation. The folder must
        exist.

    :param bool dryrun:
        Show the summary of the operations to be taken instead of actually download the file(s)

    :param str pattern:
        The pattern is used for files globbing. The supported patterns are '*', '?', '[seq]',
        and '[!seq]'.
    """
    source_blobs = list(collect_blobs(client, source_container_name, pattern))

    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('download action: from %s to %s', source, destination)
        logger.warning('    pattern %s', pattern)
        logger.warning('  container %s', source_container_name)
        logger.warning('      total %d', len(source_blobs))
        logger.warning(' operations')
        for b in source_blobs or []:
            logger.warning('  - %s', b)
        return []

    return list(_download_blob(client, source_container_name, destination, blob) for blob in source_blobs)
Ejemplo n.º 3
0
def storage_file_upload_batch(client, destination, source, pattern=None, dryrun=False,
                              validate_content=False, content_settings=None, max_connections=1,
                              metadata=None):
    """
    Upload local files to Azure Storage File Share in batch
    """

    from .util import glob_files_locally
    source_files = [c for c in glob_files_locally(source, pattern)]

    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('upload files to file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', destination)
        logger.warning('      total %d', len(source_files or []))
        logger.warning(' operations')
        for f in source_files or []:
            logger.warning('  - %s => %s', *f)

        return []

    # TODO: Performance improvement
    # 1. Upload files in parallel
    def _upload_action(source_pair):
        dir_name = os.path.dirname(source_pair[1])
        file_name = os.path.basename(source_pair[1])

        _make_directory_in_files_share(client, destination, dir_name)
        create_file_args = {
            'share_name': destination,
            'directory_name': dir_name,
            'file_name': file_name,
            'local_file_path': source_pair[0],
            'content_settings': content_settings,
            'metadata': metadata,
            'max_connections': max_connections,
        }

        if supported_api_version(ResourceType.DATA_STORAGE, min_api='2016-05-31'):
            create_file_args['validate_content'] = validate_content

        client.create_file_from_path(**create_file_args)

        return client.make_file_url(destination, dir_name, file_name)

    return list(_upload_action(f) for f in source_files)
Ejemplo n.º 4
0
def storage_file_download_batch(client, source, destination, pattern=None, dryrun=False,
                                validate_content=False, max_connections=1):
    """
    Download files from file share to local directory in batch
    """

    from .util import glob_files_remotely, mkdir_p

    source_files = glob_files_remotely(client, source, pattern)

    if dryrun:
        source_files_list = list(source_files)

        logger = get_az_logger(__name__)
        logger.warning('upload files to file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', source)
        logger.warning('destination %s', destination)
        logger.warning('    pattern %s', pattern)
        logger.warning('      total %d', len(source_files_list))
        logger.warning(' operations')
        for f in source_files_list:
            logger.warning('  - %s/%s => %s', f[0], f[1], os.path.join(destination, *f))

        return []

    def _download_action(pair):
        destination_dir = os.path.join(destination, pair[0])
        mkdir_p(destination_dir)

        get_file_args = {
            'share_name': source,
            'directory_name': pair[0],
            'file_name': pair[1],
            'file_path': os.path.join(destination, *pair),
            'max_connections': max_connections
        }

        if supported_api_version(ResourceType.DATA_STORAGE, min_api='2016-05-31'):
            get_file_args['validate_content'] = validate_content

        client.get_file_to_path(**get_file_args)
        return client.make_file_url(source, *pair)

    return list(_download_action(f) for f in source_files)
Ejemplo n.º 5
0
def storage_file_upload_batch(client, destination, source, pattern=None, dryrun=False, validate_content=False,
                              content_settings=None, max_connections=1, metadata=None):
    """ Upload local files to Azure Storage File Share in batch """

    from .util import glob_files_locally
    source_files = [c for c in glob_files_locally(source, pattern)]
    logger = get_az_logger(__name__)
    settings_class = get_sdk(ResourceType.DATA_STORAGE, 'file.models#ContentSettings')

    if dryrun:
        logger.info('upload files to file share')
        logger.info('    account %s', client.account_name)
        logger.info('      share %s', destination)
        logger.info('      total %d', len(source_files or []))
        return [{'File': client.make_file_url(destination, os.path.dirname(src), os.path.basename(dst)),
                 'Type': guess_content_type(src, content_settings, settings_class).content_type}
                for src, dst in source_files]

    # TODO: Performance improvement
    # 1. Upload files in parallel
    def _upload_action(src, dst):
        dir_name = os.path.dirname(dst)
        file_name = os.path.basename(dst)

        _make_directory_in_files_share(client, destination, dir_name)
        create_file_args = {
            'share_name': destination,
            'directory_name': dir_name,
            'file_name': file_name,
            'local_file_path': src,
            'content_settings': guess_content_type(src, content_settings, settings_class),
            'metadata': metadata,
            'max_connections': max_connections,
        }

        if supported_api_version(ResourceType.DATA_STORAGE, min_api='2016-05-31'):
            create_file_args['validate_content'] = validate_content

        logger.warning('uploading %s', src)
        client.create_file_from_path(**create_file_args)

        return client.make_file_url(destination, dir_name, file_name)

    return list(_upload_action(src, dst) for src, dst in source_files)
Ejemplo n.º 6
0
    def __call__(self, poller):
        from msrest.exceptions import ClientException
        correlation_message = ''
        self.progress_controller.begin()
        correlation_id = None

        az_logger = azlogging.get_az_logger()
        is_verbose = any(handler.level <= logs.INFO for handler in az_logger.handlers)

        while not poller.done():
            self.progress_controller.add(message='Running')
            try:
                # pylint: disable=protected-access
                correlation_id = json.loads(
                    poller._response.__dict__['_content'].decode())['properties']['correlationId']

                correlation_message = 'Correlation ID: {}'.format(correlation_id)
            except:  # pylint: disable=bare-except
                pass

            current_time = datetime.datetime.now()
            if is_verbose and current_time - self.last_progress_report >= datetime.timedelta(seconds=10):
                self.last_progress_report = current_time
                try:
                    self._generate_template_progress(correlation_id)
                except Exception as ex:  # pylint: disable=broad-except
                    logger.warning('%s during progress reporting: %s', getattr(type(ex), '__name__', type(ex)), ex)
            try:
                self._delay()
            except KeyboardInterrupt:
                self.progress_controller.stop()
                logger.error('Long running operation wait cancelled.  %s', correlation_message)
                raise

        try:
            result = poller.result()
        except ClientException as client_exception:
            from azure.cli.core.commands.arm import handle_long_running_operation_exception
            self.progress_controller.stop()
            handle_long_running_operation_exception(client_exception)

        self.progress_controller.end()
        return result
Ejemplo n.º 7
0
def storage_file_upload_batch(client, destination, source, pattern=None, dryrun=False,
                              validate_content=False, content_settings=None, max_connections=1,
                              metadata=None):
    """
    Upload local files to Azure Storage File Share in batch
    """

    from .util import glob_files_locally
    source_files = [c for c in glob_files_locally(source, pattern)]

    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('upload files to file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', destination)
        logger.warning('      total %d', len(source_files or []))
        logger.warning(' operations')
        for f in source_files or []:
            logger.warning('  - %s => %s', *f)

        return []

    # TODO: Performance improvement
    # 1. Upload files in parallel
    def _upload_action(source_pair):
        dir_name = os.path.dirname(source_pair[1])
        file_name = os.path.basename(source_pair[1])

        _make_directory_in_files_share(client, destination, dir_name)
        client.create_file_from_path(share_name=destination,
                                     directory_name=dir_name,
                                     file_name=file_name,
                                     local_file_path=source_pair[0],
                                     content_settings=content_settings,
                                     metadata=metadata,
                                     max_connections=max_connections,
                                     validate_content=validate_content)

        return client.make_file_url(destination, dir_name, file_name)

    return list(_upload_action(f) for f in source_files)
Ejemplo n.º 8
0
def storage_file_download_batch(client, source, destination, pattern=None, dryrun=False,
                                validate_content=False, max_connections=1):
    """
    Download files from file share to local directory in batch
    """

    from .util import glob_files_remotely, mkdir_p

    source_files = glob_files_remotely(client, source, pattern)

    if dryrun:
        source_files_list = list(source_files)

        logger = get_az_logger(__name__)
        logger.warning('upload files to file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', source)
        logger.warning('destination %s', destination)
        logger.warning('    pattern %s', pattern)
        logger.warning('      total %d', len(source_files_list))
        logger.warning(' operations')
        for f in source_files_list:
            logger.warning('  - %s/%s => %s', f[0], f[1], os.path.join(destination, *f))

        return []

    def _download_action(pair):
        destination_dir = os.path.join(destination, pair[0])
        mkdir_p(destination_dir)
        client.get_file_to_path(source,
                                directory_name=pair[0],
                                file_name=pair[1],
                                file_path=os.path.join(destination, *pair),
                                validate_content=validate_content,
                                max_connections=max_connections)
        return client.make_file_url(source, *pair)

    return list(_download_action(f) for f in source_files)
Ejemplo n.º 9
0
def storage_blob_copy_batch(client,
                            source_client,
                            destination_container=None,
                            source_container=None,
                            source_share=None,
                            source_sas=None,
                            pattern=None,
                            dryrun=False):
    """Copy a group of blob or files to a blob container."""
    logger = None
    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('copy files or blobs to blob container')
        logger.warning('    account %s', client.account_name)
        logger.warning('  container %s', destination_container)
        logger.warning('     source %s', source_container or source_share)
        logger.warning('source type %s',
                       'blob' if source_container else 'file')
        logger.warning('    pattern %s', pattern)
        logger.warning(' operations')

    if source_container:
        # copy blobs for blob container

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_blob_service_from_storage_client(
            client)

        if not source_sas and client.account_name != source_client.account_name:
            # when the blob is copied across storage account without sas, generate a short lived
            # sas for it
            source_sas = create_short_lived_container_sas(
                source_client.account_name, source_client.account_key,
                source_container)

        def action_blob_copy(blob_name):
            if dryrun:
                logger.warning('  - copy blob %s', blob_name)
            else:
                return _copy_blob_to_blob_container(client, source_client,
                                                    destination_container,
                                                    source_container,
                                                    source_sas, blob_name)

        return list(
            filter_none(
                action_blob_copy(blob) for blob in collect_blobs(
                    source_client, source_container, pattern)))

    elif source_share:
        # copy blob from file share

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_file_share_from_storage_client(
            client)

        if not source_sas and client.account_name != source_client.account_name:
            # when the file is copied across storage account without sas, generate a short lived sas
            source_sas = create_short_lived_share_sas(
                source_client.account_name, source_client.account_key,
                source_share)

        def action_file_copy(file_info):
            dir_name, file_name = file_info
            if dryrun:
                logger.warning('  - copy file %s',
                               os.path.join(dir_name, file_name))
            else:
                return _copy_file_to_blob_container(client, source_client,
                                                    destination_container,
                                                    source_share, source_sas,
                                                    dir_name, file_name)

        return list(
            filter_none(
                action_file_copy(file) for file in collect_files(
                    source_client, source_share, pattern)))
    else:
        raise ValueError(
            'Fail to find source. Neither blob container or file share is specified'
        )
Ejemplo n.º 10
0
def storage_blob_upload_batch(client,
                              source,
                              destination,
                              pattern=None,
                              source_files=None,
                              destination_container_name=None,
                              blob_type=None,
                              content_settings=None,
                              metadata=None,
                              validate_content=False,
                              maxsize_condition=None,
                              max_connections=2,
                              lease_id=None,
                              if_modified_since=None,
                              if_unmodified_since=None,
                              if_match=None,
                              if_none_match=None,
                              timeout=None,
                              dryrun=False):
    """
    Upload files to storage container as blobs

    :param str source:
        The directory where the files to be uploaded.

    :param str destination:
        The string represents the destination of this upload operation. The source can be the
        container URL or the container name. When the source is the container URL, the storage
        account name will parsed from the URL.

    :param str pattern:
        The pattern is used for files globbing. The supported patterns are '*', '?', '[seq]',
        and '[!seq]'.

    :param bool dryrun:
        Show the summary of the operations to be taken instead of actually upload the file(s)

    :param string if_match:
        An ETag value, or the wildcard character (*). Specify this header to perform the operation
        only if the resource's ETag matches the value specified.

    :param string if_none_match:
        An ETag value, or the wildcard character (*). Specify this header to perform the
        operation only if the resource's ETag does not match the value specified. Specify the
        wildcard character (*) to perform the operation only if the resource does not exist,
        and fail the operation if it does exist.
    """
    def _append_blob(file_path, blob_name):
        if not client.exists(destination_container_name, blob_name):
            client.create_blob(container_name=destination_container_name,
                               blob_name=blob_name,
                               content_settings=content_settings,
                               metadata=metadata,
                               lease_id=lease_id,
                               if_modified_since=if_modified_since,
                               if_match=if_match,
                               if_none_match=if_none_match,
                               timeout=timeout)

        return client.append_blob_from_path(
            container_name=destination_container_name,
            blob_name=blob_name,
            file_path=file_path,
            progress_callback=lambda c, t: None,
            validate_content=validate_content,
            maxsize_condition=maxsize_condition,
            lease_id=lease_id,
            timeout=timeout)

    def _upload_blob(file_path, blob_name):
        return client.create_blob_from_path(
            container_name=destination_container_name,
            blob_name=blob_name,
            file_path=file_path,
            progress_callback=lambda c, t: None,
            content_settings=content_settings,
            metadata=metadata,
            validate_content=validate_content,
            max_connections=max_connections,
            lease_id=lease_id,
            if_modified_since=if_modified_since,
            if_unmodified_since=if_unmodified_since,
            if_match=if_match,
            if_none_match=if_none_match,
            timeout=timeout)

    upload_action = _upload_blob if blob_type == 'block' or blob_type == 'page' else _append_blob

    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('upload action: from %s to %s', source, destination)
        logger.warning('    pattern %s', pattern)
        logger.warning('  container %s', destination_container_name)
        logger.warning('       type %s', blob_type)
        logger.warning('      total %d', len(source_files))
        logger.warning(' operations')
        for f in source_files or []:
            logger.warning('  - %s => %s', *f)
    else:
        for f in source_files or []:
            print('uploading {}'.format(f[0]))
            upload_action(*f)
Ejemplo n.º 11
0
def storage_file_copy_batch(client, source_client,
                            destination_share=None, destination_path=None,
                            source_container=None, source_share=None, source_sas=None,
                            pattern=None, dryrun=False, metadata=None, timeout=None):
    """
    Copy a group of files asynchronously
    """
    logger = None
    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('copy files or blobs to file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', destination_share)
        logger.warning('       path %s', destination_path)
        logger.warning('     source %s', source_container or source_share)
        logger.warning('source type %s', 'blob' if source_container else 'file')
        logger.warning('    pattern %s', pattern)
        logger.warning(' operations')

    if source_container:
        # copy blobs to file share

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_blob_service_from_storage_client(client)

        # the cache of existing directories in the destination file share. the cache helps to avoid
        # repeatedly create existing directory so as to optimize the performance.
        existing_dirs = set([])

        if not source_sas and client.account_name != source_client.account_name:
            # when blob is copied across storage account without sas, generate a short lived
            # sas for it
            source_sas = create_short_lived_container_sas(source_client.account_name,
                                                          source_client.account_key,
                                                          source_container)

        def action_blob_copy(blob_name):
            if dryrun:
                logger.warning('  - copy blob %s', blob_name)
            else:
                return _create_file_and_directory_from_blob(
                    client, source_client, destination_share, source_container, source_sas,
                    blob_name, destination_dir=destination_path, metadata=metadata, timeout=timeout,
                    existing_dirs=existing_dirs)

        return list(filter_none(action_blob_copy(blob) for blob in
                                collect_blobs(source_client, source_container, pattern)))

    elif source_share:
        # copy files from share to share

        # if the source client is None, assume the file share is in the same storage account as
        # destination, therefore client is reused.
        source_client = source_client or client

        # the cache of existing directories in the destination file share. the cache helps to avoid
        # repeatedly create existing directory so as to optimize the performance.
        existing_dirs = set([])

        if not source_sas and client.account_name != source_client.account_name:
            # when file is copied across storage account without sas, generate a short lived
            # sas for it
            source_sas = create_short_lived_share_sas(source_client.account_name,
                                                      source_client.account_key,
                                                      source_share)

        def action_file_copy(file_info):
            dir_name, file_name = file_info
            if dryrun:
                logger.warning('  - copy file %s', os.path.join(dir_name, file_name))
            else:
                return _create_file_and_directory_from_file(
                    client, source_client, destination_share, source_share, source_sas, dir_name,
                    file_name, destination_dir=destination_path, metadata=metadata,
                    timeout=timeout, existing_dirs=existing_dirs)

        return list(filter_none(action_file_copy(file) for file in
                                collect_files(source_client, source_share, pattern)))
    else:
        # won't happen, the validator should ensure either source_container or source_share is set
        raise ValueError('Fail to find source. Neither blob container or file share is specified.')
Ejemplo n.º 12
0
import os
import random
import socket
import string
import sys
import threading
from subprocess import PIPE, CalledProcessError, Popen, check_output
from time import sleep

import azure.cli.command_modules.project.naming as naming
import azure.cli.command_modules.project.settings as settings
import azure.cli.core.azlogging as azlogging  # pylint: disable=invalid-name
from azure.cli.core._util import CLIError

logger = azlogging.get_az_logger(__name__)  # pylint: disable=invalid-name


def get_random_registry_name():
    """
    Gets a random name for the Azure
    Container Registry
    """
    return get_random_string(only_letters=True)


def get_random_string(length=6, only_letters=False):
    """
    Gets a random lowercase string made
    from ascii letters and digits
    """
Ejemplo n.º 13
0
 def test_get_az_logger(self):
     az_logger = azlogging.get_az_logger()
     self.assertEqual(az_logger.name, 'az')
Ejemplo n.º 14
0
def storage_file_copy_batch(client, source_client,
                            destination_share=None, destination_path=None,
                            source_container=None, source_share=None, source_sas=None,
                            pattern=None, dryrun=False, metadata=None, timeout=None):
    """
    Copy a group of files asynchronously
    """
    logger = None
    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('copy files or blobs to file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', destination_share)
        logger.warning('       path %s', destination_path)
        logger.warning('     source %s', source_container or source_share)
        logger.warning('source type %s', 'blob' if source_container else 'file')
        logger.warning('    pattern %s', pattern)
        logger.warning(' operations')

    if source_container:
        # copy blobs to file share

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_blob_service_from_storage_client(client)

        # the cache of existing directories in the destination file share. the cache helps to avoid
        # repeatedly create existing directory so as to optimize the performance.
        existing_dirs = set([])

        if not source_sas and client.account_name != source_client.account_name:
            # when blob is copied across storage account without sas, generate a short lived
            # sas for it
            source_sas = create_short_lived_container_sas(source_client.account_name,
                                                          source_client.account_key,
                                                          source_container)

        def action_blob_copy(blob_name):
            if dryrun:
                logger.warning('  - copy blob %s', blob_name)
            else:
                return _create_file_and_directory_from_blob(
                    client, source_client, destination_share, source_container, source_sas,
                    blob_name, destination_dir=destination_path, metadata=metadata, timeout=timeout,
                    existing_dirs=existing_dirs)

        return list(filter_none(action_blob_copy(blob) for blob in
                                collect_blobs(source_client, source_container, pattern)))

    elif source_share:
        # copy files from share to share

        # if the source client is None, assume the file share is in the same storage account as
        # destination, therefore client is reused.
        source_client = source_client or client

        # the cache of existing directories in the destination file share. the cache helps to avoid
        # repeatedly create existing directory so as to optimize the performance.
        existing_dirs = set([])

        if not source_sas and client.account_name != source_client.account_name:
            # when file is copied across storage account without sas, generate a short lived
            # sas for it
            source_sas = create_short_lived_share_sas(source_client.account_name,
                                                      source_client.account_key,
                                                      source_share)

        def action_file_copy(file_info):
            dir_name, file_name = file_info
            if dryrun:
                logger.warning('  - copy file %s', os.path.join(dir_name, file_name))
            else:
                return _create_file_and_directory_from_file(
                    client, source_client, destination_share, source_share, source_sas, dir_name,
                    file_name, destination_dir=destination_path, metadata=metadata,
                    timeout=timeout, existing_dirs=existing_dirs)

        return list(filter_none(action_file_copy(file) for file in
                                collect_files(source_client, source_share, pattern)))
    else:
        # won't happen, the validator should ensure either source_container or source_share is set
        raise ValueError('Fail to find source. Neither blob container or file share is specified.')
Ejemplo n.º 15
0
def storage_file_upload_batch(client,
                              destination,
                              source,
                              pattern=None,
                              dryrun=False,
                              validate_content=False,
                              content_settings=None,
                              max_connections=1,
                              metadata=None):
    """ Upload local files to Azure Storage File Share in batch """

    from .util import glob_files_locally
    source_files = [c for c in glob_files_locally(source, pattern)]
    logger = get_az_logger(__name__)
    settings_class = get_sdk(ResourceType.DATA_STORAGE,
                             'file.models#ContentSettings')

    if dryrun:
        logger.info('upload files to file share')
        logger.info('    account %s', client.account_name)
        logger.info('      share %s', destination)
        logger.info('      total %d', len(source_files or []))
        return [{
            'File':
            client.make_file_url(destination, os.path.dirname(src),
                                 os.path.basename(dst)),
            'Type':
            guess_content_type(src, content_settings,
                               settings_class).content_type
        } for src, dst in source_files]

    # TODO: Performance improvement
    # 1. Upload files in parallel
    def _upload_action(src, dst):
        dir_name = os.path.dirname(dst)
        file_name = os.path.basename(dst)

        _make_directory_in_files_share(client, destination, dir_name)
        create_file_args = {
            'share_name':
            destination,
            'directory_name':
            dir_name,
            'file_name':
            file_name,
            'local_file_path':
            src,
            'content_settings':
            guess_content_type(src, content_settings, settings_class),
            'metadata':
            metadata,
            'max_connections':
            max_connections,
        }

        if supported_api_version(ResourceType.DATA_STORAGE,
                                 min_api='2016-05-31'):
            create_file_args['validate_content'] = validate_content

        logger.warning('uploading %s', src)
        client.create_file_from_path(**create_file_args)

        return client.make_file_url(destination, dir_name, file_name)

    return list(_upload_action(src, dst) for src, dst in source_files)
Ejemplo n.º 16
0
    def validator(namespace):
        type_field_name = '{}_type'.format(property_name)
        property_val = getattr(namespace, property_name, None)
        parent_val = getattr(namespace, parent_name,
                             None) if parent_name else None

        # Check for the different scenarios (order matters)
        # 1) provided value indicates None (pair of empty quotes)
        if property_val in ('', '""', "''") or (property_val is None
                                                and default_none):
            if not allow_none:
                raise CLIError('{} cannot be None.'.format(property_option))
            setattr(namespace, type_field_name, 'none')
            setattr(namespace, property_name, None)
            if parent_name and parent_val:
                logger = azlogging.get_az_logger(__name__)
                logger.warning('Ignoring: %s %s', parent_option, parent_val)
                setattr(namespace, parent_name, None)
            return  # SUCCESS

        # Create a resource ID we can check for existence.
        (resource_id_parts,
         value_was_id) = _validate_name_or_id(namespace.resource_group_name,
                                              property_val, property_type,
                                              parent_val, parent_type)

        # 2) resource exists
        if resource_exists(**resource_id_parts):
            setattr(namespace, type_field_name, 'existingId')
            setattr(namespace, property_name, resource_id(**resource_id_parts))
            if parent_val:
                if value_was_id:
                    logger = azlogging.get_az_logger(__name__)
                    logger.warning('Ignoring: %s %s', parent_option,
                                   parent_val)
                setattr(namespace, parent_name, None)
            return  # SUCCESS

        # if a parent name was required but not specified, raise a usage error
        if has_parent and not value_was_id and not parent_val and not allow_new:
            raise ValueError(
                'incorrect usage: {0} ID | {0} NAME {1} NAME'.format(
                    property_option, parent_option))

        # if non-existent ID was supplied, throw error depending on whether a new resource can
        # be created.
        if value_was_id:
            usage_message = '{} NAME'.format(property_option) if not has_parent \
                else '{} NAME [{} NAME]'.format(property_option, parent_option)
            action_message = 'Specify ( {} ) to create a new resource.'.format(usage_message) if \
                allow_new else 'Create the required resource and try again.'
            raise CLIError('{} {} does not exist. {}'.format(
                property_name, property_val, action_message))

        # 3) try to create new resource
        if allow_new:
            setattr(namespace, type_field_name, 'new')
        else:
            raise CLIError(
                '{} {} does not exist. Create the required resource and try again.'
                .format(property_name, property_val))
Ejemplo n.º 17
0
def storage_blob_upload_batch(
        client,
        source,
        destination,
        pattern=None,
        source_files=None,  # pylint: disable=too-many-locals
        destination_container_name=None,
        blob_type=None,
        content_settings=None,
        metadata=None,
        validate_content=False,
        maxsize_condition=None,
        max_connections=2,
        lease_id=None,
        if_modified_since=None,
        if_unmodified_since=None,
        if_match=None,
        if_none_match=None,
        timeout=None,
        dryrun=False):
    """
    Upload files to storage container as blobs

    :param str source:
        The directory where the files to be uploaded.

    :param str destination:
        The string represents the destination of this upload operation. The source can be the
        container URL or the container name. When the source is the container URL, the storage
        account name will parsed from the URL.

    :param str pattern:
        The pattern is used for files globbing. The supported patterns are '*', '?', '[seq]',
        and '[!seq]'.

    :param bool dryrun:
        Show the summary of the operations to be taken instead of actually upload the file(s)

    :param string if_match:
        An ETag value, or the wildcard character (*). Specify this header to perform the operation
        only if the resource's ETag matches the value specified.

    :param string if_none_match:
        An ETag value, or the wildcard character (*). Specify this header to perform the
        operation only if the resource's ETag does not match the value specified. Specify the
        wildcard character (*) to perform the operation only if the resource does not exist,
        and fail the operation if it does exist.
    """
    def _append_blob(file_path, blob_name, blob_content_settings):
        if not client.exists(destination_container_name, blob_name):
            client.create_blob(container_name=destination_container_name,
                               blob_name=blob_name,
                               content_settings=blob_content_settings,
                               metadata=metadata,
                               lease_id=lease_id,
                               if_modified_since=if_modified_since,
                               if_match=if_match,
                               if_none_match=if_none_match,
                               timeout=timeout)

        append_blob_args = {
            'container_name': destination_container_name,
            'blob_name': blob_name,
            'file_path': file_path,
            'progress_callback': lambda c, t: None,
            'maxsize_condition': maxsize_condition,
            'lease_id': lease_id,
            'timeout': timeout
        }

        if supported_api_version(ResourceType.DATA_STORAGE,
                                 min_api='2016-05-31'):
            append_blob_args['validate_content'] = validate_content

        return client.append_blob_from_path(**append_blob_args)

    def _upload_blob(file_path, blob_name, blob_content_settings):
        create_blob_args = {
            'container_name': destination_container_name,
            'blob_name': blob_name,
            'file_path': file_path,
            'progress_callback': lambda c, t: None,
            'content_settings': blob_content_settings,
            'metadata': metadata,
            'max_connections': max_connections,
            'lease_id': lease_id,
            'if_modified_since': if_modified_since,
            'if_unmodified_since': if_unmodified_since,
            'if_match': if_match,
            'if_none_match': if_none_match,
            'timeout': timeout
        }

        if supported_api_version(ResourceType.DATA_STORAGE,
                                 min_api='2016-05-31'):
            create_blob_args['validate_content'] = validate_content

        return client.create_blob_from_path(**create_blob_args)

    def _create_return_result(blob_name,
                              blob_content_settings,
                              upload_result=None):
        return {
            'Blob': client.make_blob_url(destination_container_name,
                                         blob_name),
            'Type': blob_content_settings.content_type,
            'Last Modified':
            upload_result.last_modified if upload_result else None,
            'eTag': upload_result.etag if upload_result else None
        }

    upload_action = _upload_blob if blob_type == 'block' or blob_type == 'page' else _append_blob
    logger = get_az_logger(__name__)
    settings_class = get_sdk(ResourceType.DATA_STORAGE,
                             'blob.models#ContentSettings')

    results = []
    if dryrun:
        logger.info('upload action: from %s to %s', source, destination)
        logger.info('    pattern %s', pattern)
        logger.info('  container %s', destination_container_name)
        logger.info('       type %s', blob_type)
        logger.info('      total %d', len(source_files))
        results = []
        for src, dst in source_files or []:
            results.append(
                _create_return_result(
                    dst,
                    guess_content_type(src, content_settings, settings_class)))
    else:
        for src, dst in source_files or []:
            logger.warning('uploading {}'.format(src))
            guessed_content_settings = guess_content_type(
                src, content_settings, settings_class)
            results.append(
                _create_return_result(
                    dst, guessed_content_settings,
                    upload_action(src, dst, guessed_content_settings)))

    return results
Ejemplo n.º 18
0
 def test_get_az_logger_module(self):
     az_module_logger = azlogging.get_az_logger('azure.cli.module')
     self.assertEqual(az_module_logger.name, 'az.azure.cli.module')
Ejemplo n.º 19
0
# --------------------------------------------------------------------------------------------

# AZURE CLI RBAC TEST DEFINITIONS
import json
import os
import tempfile
import time
import unittest

from azure.cli.testsdk import (LiveScenarioTest, ScenarioTest, ResourceGroupPreparer, KeyVaultPreparer,
                               JMESPathCheck as JMESPathCheckV2)
import azure.cli.core.azlogging as azlogging
from azure.cli.testsdk.vcr_test_base import (VCRTestBase, JMESPathCheck, ResourceGroupVCRTestBase, NoneCheck,
                                             MOCKED_SUBSCRIPTION_ID)

logger = azlogging.get_az_logger(__name__)


class RbacSPSecretScenarioTest(LiveScenarioTest):
    @ResourceGroupPreparer(name_prefix='cli_create_rbac_sp_minimal')
    def test_create_for_rbac_with_secret(self, resource_group):

        sp_name = 'http://{}'.format(resource_group)
        try:
            self.cmd('ad sp create-for-rbac -n {}2'.format(sp_name), checks=[
                JMESPathCheckV2('name', sp_name)
            ])
        finally:
            self.cmd('ad app delete --id {}2'.format(sp_name))

    @ResourceGroupPreparer(name_prefix='cli_create_rbac_sp_with_password')
Ejemplo n.º 20
0
 def _log_hostname():
     import socket
     import azure.cli.core.azlogging as azlogging
     logger = azlogging.get_az_logger(__name__)
     logger.warning("A Cloud Shell credential problem occurred. When you report the issue with the error "
                    "below, please mention the hostname '%s'", socket.gethostname())
Ejemplo n.º 21
0
def storage_blob_upload_batch(
        client,
        source,
        destination,
        pattern=None,
        source_files=None,  # pylint: disable=too-many-locals
        destination_container_name=None,
        blob_type=None,
        content_settings=None,
        metadata=None,
        validate_content=False,
        maxsize_condition=None,
        max_connections=2,
        lease_id=None,
        if_modified_since=None,
        if_unmodified_since=None,
        if_match=None,
        if_none_match=None,
        timeout=None,
        dryrun=False):
    def _create_return_result(blob_name,
                              blob_content_settings,
                              upload_result=None):
        return {
            'Blob': client.make_blob_url(destination_container_name,
                                         blob_name),
            'Type': blob_content_settings.content_type,
            'Last Modified':
            upload_result.last_modified if upload_result else None,
            'eTag': upload_result.etag if upload_result else None
        }

    logger = get_az_logger(__name__)
    settings_class = get_sdk(ResourceType.DATA_STORAGE,
                             'blob.models#ContentSettings')

    results = []
    if dryrun:
        logger.info('upload action: from %s to %s', source, destination)
        logger.info('    pattern %s', pattern)
        logger.info('  container %s', destination_container_name)
        logger.info('       type %s', blob_type)
        logger.info('      total %d', len(source_files))
        results = []
        for src, dst in source_files or []:
            results.append(
                _create_return_result(
                    dst,
                    guess_content_type(src, content_settings, settings_class)))
    else:
        for src, dst in source_files or []:
            logger.warning('uploading {}'.format(src))
            guessed_content_settings = guess_content_type(
                src, content_settings, settings_class)
            result = upload_blob(client,
                                 destination_container_name,
                                 dst,
                                 src,
                                 blob_type=blob_type,
                                 content_settings=guessed_content_settings,
                                 metadata=metadata,
                                 validate_content=validate_content,
                                 maxsize_condition=maxsize_condition,
                                 max_connections=max_connections,
                                 lease_id=lease_id,
                                 if_modified_since=if_modified_since,
                                 if_unmodified_since=if_unmodified_since,
                                 if_match=if_match,
                                 if_none_match=if_none_match,
                                 timeout=timeout)
            results.append(
                _create_return_result(dst, guessed_content_settings, result))
    return results
Ejemplo n.º 22
0
def storage_blob_upload_batch(client, source, destination, pattern=None, source_files=None,
                              destination_container_name=None, blob_type=None,
                              content_settings=None, metadata=None, validate_content=False,
                              maxsize_condition=None, max_connections=2, lease_id=None,
                              if_modified_since=None, if_unmodified_since=None, if_match=None,
                              if_none_match=None, timeout=None, dryrun=False):
    """
    Upload files to storage container as blobs

    :param str source:
        The directory where the files to be uploaded.

    :param str destination:
        The string represents the destination of this upload operation. The source can be the
        container URL or the container name. When the source is the container URL, the storage
        account name will parsed from the URL.

    :param str pattern:
        The pattern is used for files globbing. The supported patterns are '*', '?', '[seq]',
        and '[!seq]'.

    :param bool dryrun:
        Show the summary of the operations to be taken instead of actually upload the file(s)

    :param string if_match:
        An ETag value, or the wildcard character (*). Specify this header to perform the operation
        only if the resource's ETag matches the value specified.

    :param string if_none_match:
        An ETag value, or the wildcard character (*). Specify this header to perform the
        operation only if the resource's ETag does not match the value specified. Specify the
        wildcard character (*) to perform the operation only if the resource does not exist,
        and fail the operation if it does exist.
    """
    def _append_blob(file_path, blob_name):
        if not client.exists(destination_container_name, blob_name):
            client.create_blob(
                container_name=destination_container_name,
                blob_name=blob_name,
                content_settings=content_settings,
                metadata=metadata,
                lease_id=lease_id,
                if_modified_since=if_modified_since,
                if_match=if_match,
                if_none_match=if_none_match,
                timeout=timeout)

        return client.append_blob_from_path(
            container_name=destination_container_name,
            blob_name=blob_name,
            file_path=file_path,
            progress_callback=lambda c, t: None,
            validate_content=validate_content,
            maxsize_condition=maxsize_condition,
            lease_id=lease_id,
            timeout=timeout)

    def _upload_blob(file_path, blob_name):
        return client.create_blob_from_path(
            container_name=destination_container_name,
            blob_name=blob_name,
            file_path=file_path,
            progress_callback=lambda c, t: None,
            content_settings=content_settings,
            metadata=metadata,
            validate_content=validate_content,
            max_connections=max_connections,
            lease_id=lease_id,
            if_modified_since=if_modified_since,
            if_unmodified_since=if_unmodified_since,
            if_match=if_match,
            if_none_match=if_none_match,
            timeout=timeout)

    upload_action = _upload_blob if blob_type == 'block' or blob_type == 'page' else _append_blob

    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('upload action: from %s to %s', source, destination)
        logger.warning('    pattern %s', pattern)
        logger.warning('  container %s', destination_container_name)
        logger.warning('       type %s', blob_type)
        logger.warning('      total %d', len(source_files))
        logger.warning(' operations')
        for f in source_files or []:
            logger.warning('  - %s => %s', *f)
    else:
        for f in source_files or []:
            print('uploading {}'.format(f[0]))
            upload_action(*f)
Ejemplo n.º 23
0
def storage_blob_copy_batch(client, source_client,
                            destination_container=None, source_container=None, source_share=None,
                            source_sas=None, pattern=None, dryrun=False):
    """Copy a group of blob or files to a blob container."""
    logger = None
    if dryrun:
        logger = get_az_logger(__name__)
        logger.warning('copy files or blobs to blob container')
        logger.warning('    account %s', client.account_name)
        logger.warning('  container %s', destination_container)
        logger.warning('     source %s', source_container or source_share)
        logger.warning('source type %s', 'blob' if source_container else 'file')
        logger.warning('    pattern %s', pattern)
        logger.warning(' operations')

    if source_container:
        # copy blobs for blob container

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_blob_service_from_storage_client(client)

        if not source_sas and client.account_name != source_client.account_name:
            # when the blob is copied across storage account without sas, generate a short lived
            # sas for it
            source_sas = create_short_lived_container_sas(source_client.account_name,
                                                          source_client.account_key,
                                                          source_container)

        def action_blob_copy(blob_name):
            if dryrun:
                logger.warning('  - copy blob %s', blob_name)
            else:
                return _copy_blob_to_blob_container(client, source_client, destination_container,
                                                    source_container, source_sas, blob_name)

        return list(filter_none(action_blob_copy(blob) for blob in collect_blobs(source_client,
                                                                                 source_container,
                                                                                 pattern)))

    elif source_share:
        # copy blob from file share

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_file_share_from_storage_client(client)

        if not source_sas and client.account_name != source_client.account_name:
            # when the file is copied across storage account without sas, generate a short lived sas
            source_sas = create_short_lived_share_sas(source_client.account_name,
                                                      source_client.account_key,
                                                      source_share)

        def action_file_copy(file_info):
            dir_name, file_name = file_info
            if dryrun:
                logger.warning('  - copy file %s', os.path.join(dir_name, file_name))
            else:
                return _copy_file_to_blob_container(client, source_client, destination_container,
                                                    source_share, source_sas, dir_name, file_name)

        return list(filter_none(action_file_copy(file) for file in collect_files(source_client,
                                                                                 source_share,
                                                                                 pattern)))
    else:
        raise ValueError('Fail to find source. Neither blob container or file share is specified')
Ejemplo n.º 24
0
import sys
import os
import uuid
import argparse
from azure.cli.core.parser import AzCliCommandParser, enable_autocomplete
from azure.cli.core._output import CommandResultItem
import azure.cli.core.extensions
import azure.cli.core._help as _help
import azure.cli.core.azlogging as azlogging
from azure.cli.core.util import todict, truncate_text, CLIError, read_file_content
from azure.cli.core._config import az_config
import azure.cli.core.commands.progress as progress

import azure.cli.core.telemetry as telemetry

logger = azlogging.get_az_logger(__name__)

ARGCOMPLETE_ENV_NAME = '_ARGCOMPLETE'


class Configuration(object):  # pylint: disable=too-few-public-methods
    """The configuration object tracks session specific data such
    as output formats, available commands etc.
    """

    def __init__(self):
        self.output_format = None

    def get_command_table(self, argv=None):  # pylint: disable=no-self-use
        import azure.cli.core.commands as commands
        # Find the first noun on the command line and only load commands from that
Ejemplo n.º 25
0
def storage_blob_upload_batch(client, source, destination, pattern=None, source_files=None,  # pylint: disable=too-many-locals
                              destination_container_name=None, blob_type=None,
                              content_settings=None, metadata=None, validate_content=False,
                              maxsize_condition=None, max_connections=2, lease_id=None,
                              if_modified_since=None, if_unmodified_since=None, if_match=None,
                              if_none_match=None, timeout=None, dryrun=False):
    """
    Upload files to storage container as blobs

    :param str source:
        The directory where the files to be uploaded.

    :param str destination:
        The string represents the destination of this upload operation. The source can be the
        container URL or the container name. When the source is the container URL, the storage
        account name will parsed from the URL.

    :param str pattern:
        The pattern is used for files globbing. The supported patterns are '*', '?', '[seq]',
        and '[!seq]'.

    :param bool dryrun:
        Show the summary of the operations to be taken instead of actually upload the file(s)

    :param string if_match:
        An ETag value, or the wildcard character (*). Specify this header to perform the operation
        only if the resource's ETag matches the value specified.

    :param string if_none_match:
        An ETag value, or the wildcard character (*). Specify this header to perform the
        operation only if the resource's ETag does not match the value specified. Specify the
        wildcard character (*) to perform the operation only if the resource does not exist,
        and fail the operation if it does exist.
    """

    def _append_blob(file_path, blob_name, blob_content_settings):
        if not client.exists(destination_container_name, blob_name):
            client.create_blob(
                container_name=destination_container_name,
                blob_name=blob_name,
                content_settings=blob_content_settings,
                metadata=metadata,
                lease_id=lease_id,
                if_modified_since=if_modified_since,
                if_match=if_match,
                if_none_match=if_none_match,
                timeout=timeout)

        append_blob_args = {
            'container_name': destination_container_name,
            'blob_name': blob_name,
            'file_path': file_path,
            'progress_callback': lambda c, t: None,
            'maxsize_condition': maxsize_condition,
            'lease_id': lease_id,
            'timeout': timeout
        }

        if supported_api_version(ResourceType.DATA_STORAGE, min_api='2016-05-31'):
            append_blob_args['validate_content'] = validate_content

        return client.append_blob_from_path(**append_blob_args)

    def _upload_blob(file_path, blob_name, blob_content_settings):
        create_blob_args = {
            'container_name': destination_container_name,
            'blob_name': blob_name,
            'file_path': file_path,
            'progress_callback': lambda c, t: None,
            'content_settings': blob_content_settings,
            'metadata': metadata,
            'max_connections': max_connections,
            'lease_id': lease_id,
            'if_modified_since': if_modified_since,
            'if_unmodified_since': if_unmodified_since,
            'if_match': if_match,
            'if_none_match': if_none_match,
            'timeout': timeout
        }

        if supported_api_version(ResourceType.DATA_STORAGE, min_api='2016-05-31'):
            create_blob_args['validate_content'] = validate_content

        return client.create_blob_from_path(**create_blob_args)

    def _create_return_result(blob_name, blob_content_settings, upload_result=None):
        return {
            'Blob': client.make_blob_url(destination_container_name, blob_name),
            'Type': blob_content_settings.content_type,
            'Last Modified': upload_result.last_modified if upload_result else None,
            'eTag': upload_result.etag if upload_result else None}

    upload_action = _upload_blob if blob_type == 'block' or blob_type == 'page' else _append_blob
    logger = get_az_logger(__name__)
    settings_class = get_sdk(ResourceType.DATA_STORAGE, 'blob.models#ContentSettings')

    results = []
    if dryrun:
        logger.info('upload action: from %s to %s', source, destination)
        logger.info('    pattern %s', pattern)
        logger.info('  container %s', destination_container_name)
        logger.info('       type %s', blob_type)
        logger.info('      total %d', len(source_files))
        results = []
        for src, dst in source_files or []:
            results.append(_create_return_result(dst, guess_content_type(src, content_settings, settings_class)))
    else:
        for src, dst in source_files or []:
            logger.warning('uploading {}'.format(src))
            guessed_content_settings = guess_content_type(src, content_settings, settings_class)
            results.append(
                _create_return_result(dst, guessed_content_settings, upload_action(src, dst, guessed_content_settings)))

    return results