Пример #1
0
    def init_command_file_logging(cli_ctx, **kwargs):
        def _delete_old_logs(log_dir):
            """
            Periodically delete the 5 oldest command log files, ensuring that only the history of the last
            25 commands (or less) are kept.
            """

            # get log file names and sort them from newest to oldest file.
            log_file_names = [file for file in os.listdir(log_dir) if file.endswith(".log")]
            sorted_files = sorted(log_file_names, reverse=True)

            # if we have too many files, delete the 5 last / oldest command log files.
            if len(sorted_files) > 25:
                for file in sorted_files[-5:]:
                    try:
                        os.remove(os.path.join(log_dir, file))
                    except OSError:  # FileNotFoundError introduced in Python 3
                        continue

        # if tab-completion and not command don't log to file.
        if not cli_ctx.data.get('completer_active', False):
            self = cli_ctx.logging
            args = kwargs['args']

            cmd_logger = logging.getLogger(AzCliLogging._COMMAND_METADATA_LOGGER)

            self._init_command_logfile_handlers(cmd_logger, args)  # pylint: disable=protected-access
            get_logger(__name__).debug("metadata file logging enabled - writing logs to '%s'.", self.command_log_dir)

            _delete_old_logs(self.command_log_dir)
Пример #2
0
 def _wrapped_func(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except Exception as ex:  # nopa pylint: disable=broad-except
         get_logger(__name__).info('Suppress exception %s', ex)
         if fallback_return is not None:
             return fallback_return
Пример #3
0
def storage_file_delete_batch(cmd, client, source, pattern=None, dryrun=False, timeout=None):
    """
    Delete files from file share in batch
    """

    def delete_action(file_pair):
        delete_file_args = {'share_name': source, 'directory_name': file_pair[0], 'file_name': file_pair[1],
                            'timeout': timeout}

        return client.delete_file(**delete_file_args)

    from azure.cli.command_modules.storage.util import glob_files_remotely
    source_files = list(glob_files_remotely(cmd, client, source, pattern))

    if dryrun:
        logger = get_logger(__name__)
        logger.warning('delete files from %s', source)
        logger.warning('    pattern %s', pattern)
        logger.warning('      share %s', source)
        logger.warning('      total %d', len(source_files))
        logger.warning(' operations')
        for f in source_files:
            logger.warning('  - %s/%s', f[0], f[1])
        return []

    for f in source_files:
        delete_action(f)
Пример #4
0
def force_progress_logging():
    from six import StringIO
    import logging
    from knack.log import get_logger
    from .reverse_dependency import get_commands_loggers

    cmd_logger = get_commands_loggers()

    # register a progress logger handler to get the content to verify
    test_io = StringIO()
    test_handler = logging.StreamHandler(test_io)
    cmd_logger.addHandler(test_handler)
    old_cmd_level = cmd_logger.level
    cmd_logger.setLevel(logging.INFO)

    # this tells progress logger we are under verbose, so should log
    az_logger = get_logger()
    old_az_level = az_logger.handlers[0].level
    az_logger.handlers[0].level = logging.INFO

    yield test_io

    # restore old logging level and unplug the test handler
    cmd_logger.removeHandler(test_handler)
    cmd_logger.setLevel(old_cmd_level)
    az_logger.handlers[0].level = old_az_level
Пример #5
0
def storage_blob_delete_batch(client, source, source_container_name, pattern=None, lease_id=None,
                              delete_snapshots=None, if_modified_since=None, if_unmodified_since=None, if_match=None,
                              if_none_match=None, timeout=None, dryrun=False):
    @check_precondition_success
    def _delete_blob(blob_name):
        delete_blob_args = {
            'container_name': source_container_name,
            'blob_name': blob_name,
            'lease_id': lease_id,
            'delete_snapshots': delete_snapshots,
            'if_modified_since': if_modified_since,
            'if_unmodified_since': if_unmodified_since,
            'if_match': if_match,
            'if_none_match': if_none_match,
            'timeout': timeout
        }
        return client.delete_blob(**delete_blob_args)

    source_blobs = list(collect_blobs(client, source_container_name, pattern))

    if dryrun:
        logger = get_logger(__name__)
        logger.warning('delete action: from %s', source)
        logger.warning('    pattern %s', pattern)
        logger.warning('  container %s', source_container_name)
        logger.warning('      total %d', len(source_blobs))
        logger.warning(' operations')
        for blob in source_blobs:
            logger.warning('  - %s', blob)
        return []

    return [result for include, result in (_delete_blob(blob) for blob in source_blobs) if include]
def validate_client_parameters(cmd, namespace):
    """ Retrieves storage connection parameters from environment variables and parses out connection string into
    account name and key """
    n = namespace

    def get_config_value(section, key, default):
        return cmd.cli_ctx.config.get(section, key, default)

    if hasattr(n, 'auth_mode'):
        auth_mode = n.auth_mode or get_config_value('storage', 'auth_mode', None)
        del n.auth_mode
        if not n.account_name:
            n.account_name = get_config_value('storage', 'account', None)
        if auth_mode == 'login':
            n.token_credential = _create_token_credential(cmd.cli_ctx)

            # give warning if there are account key args being ignored
            account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
                                n.connection_string and "--connection-string"]
            account_key_args = [arg for arg in account_key_args if arg]

            if account_key_args:
                from knack.log import get_logger

                logger = get_logger(__name__)
                logger.warning('In "login" auth mode, the following arguments are ignored: %s',
                               ' ,'.join(account_key_args))
            return

    if not n.connection_string:
        n.connection_string = get_config_value('storage', 'connection_string', None)

    # if connection string supplied or in environment variables, extract account key and name
    if n.connection_string:
        conn_dict = validate_key_value_pairs(n.connection_string)
        n.account_name = conn_dict.get('AccountName')
        n.account_key = conn_dict.get('AccountKey')
        if not n.account_name or not n.account_key:
            from knack.util import CLIError
            raise CLIError('Connection-string: %s, is malformed. Some shell environments require the '
                           'connection string to be surrounded by quotes.' % n.connection_string)

    # otherwise, simply try to retrieve the remaining variables from environment variables
    if not n.account_name:
        n.account_name = get_config_value('storage', 'account', None)
    if not n.account_key:
        n.account_key = get_config_value('storage', 'key', None)
    if not n.sas_token:
        n.sas_token = get_config_value('storage', 'sas_token', None)

    # strip the '?' from sas token. the portal and command line are returns sas token in different
    # forms
    if n.sas_token:
        n.sas_token = n.sas_token.lstrip('?')

    # if account name is specified but no key, attempt to query
    if n.account_name and not n.account_key and not n.sas_token:
        n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
Пример #7
0
def storage_blob_upload_batch(cmd, client, source, destination, pattern=None,  # pylint: disable=too-many-locals
                              source_files=None, destination_path=None,
                              destination_container_name=None, blob_type=None,
                              content_settings=None, metadata=None, validate_content=False,
                              maxsize_condition=None, max_connections=2, lease_id=None, progress_callback=None,
                              if_modified_since=None, if_unmodified_since=None, if_match=None,
                              if_none_match=None, timeout=None, dryrun=False):
    def _create_return_result(blob_name, blob_content_settings, upload_result=None):
        blob_name = normalize_blob_file_path(destination_path, blob_name)
        return {
            'Blob': client.make_blob_url(destination_container_name, blob_name),
            'Type': blob_content_settings.content_type,
            'Last Modified': upload_result.last_modified if upload_result else None,
            'eTag': upload_result.etag if upload_result else None}

    logger = get_logger(__name__)
    t_content_settings = cmd.get_models('blob.models#ContentSettings')

    results = []
    if dryrun:
        logger.info('upload action: from %s to %s', source, destination)
        logger.info('    pattern %s', pattern)
        logger.info('  container %s', destination_container_name)
        logger.info('       type %s', blob_type)
        logger.info('      total %d', len(source_files))
        results = []
        for src, dst in source_files or []:
            results.append(_create_return_result(dst, guess_content_type(src, content_settings, t_content_settings)))
    else:
        @check_precondition_success
        def _upload_blob(*args, **kwargs):
            return upload_blob(*args, **kwargs)

        for src, dst in source_files or []:
            logger.warning('uploading %s', src)
            guessed_content_settings = guess_content_type(src, content_settings, t_content_settings)

            include, result = _upload_blob(cmd, client, destination_container_name,
                                           normalize_blob_file_path(destination_path, dst), src,
                                           blob_type=blob_type, content_settings=guessed_content_settings,
                                           metadata=metadata, validate_content=validate_content,
                                           maxsize_condition=maxsize_condition, max_connections=max_connections,
                                           lease_id=lease_id, progress_callback=progress_callback,
                                           if_modified_since=if_modified_since,
                                           if_unmodified_since=if_unmodified_since, if_match=if_match,
                                           if_none_match=if_none_match, timeout=timeout)
            if include:
                results.append(_create_return_result(dst, guessed_content_settings, result))

        num_failures = len(source_files) - len(results)
        if num_failures:
            logger.warning('%s of %s files not uploaded due to "Failed Precondition"', num_failures, len(source_files))
    return results
Пример #8
0
    def load(self, filename, max_age=0):
        self.filename = filename
        self.data = {}
        try:
            if max_age > 0:
                st = os.stat(self.filename)
                if st.st_mtime + max_age < time.clock():
                    self.save()
            with codecs_open(self.filename, 'r', encoding=self._encoding) as f:
                self.data = json.load(f)
        except (OSError, IOError, t_JSONDecodeError) as load_exception:
            # OSError / IOError should imply file not found issues which are expected on fresh runs (e.g. on build
            # agents or new systems). A parse error indicates invalid/bad data in the file. We do not wish to warn
            # on missing files since we expect that, but do if the data isn't parsing as expected.
            log_level = logging.INFO
            if isinstance(load_exception, t_JSONDecodeError):
                log_level = logging.WARNING

            get_logger(__name__).log(log_level,
                                     "Failed to load or parse file %s. It will be overridden by default settings.",
                                     self.filename)
            self.save()
Пример #9
0
def handler(ctx, **kwargs):
    cmd = kwargs.get('command', None)
    if cmd and cmd.startswith('iot'):
        if not extension_exists('azure-cli-iot-ext'):
            ran_before = ctx.config.getboolean('iot', 'first_run', fallback=False)
            if not ran_before:
                extension_text = """
Comprehensive IoT data-plane functionality is available
in the Azure IoT CLI Extension. For more info and install guide
go to https://github.com/Azure/azure-iot-cli-extension
"""
                logger = get_logger(__name__)
                logger.warning(extension_text)
                ctx.config.set_value('iot', 'first_run', 'yes')
Пример #10
0
    def __call__(self, poller):
        import colorama
        from msrest.exceptions import ClientException

        # https://github.com/azure/azure-cli/issues/3555
        colorama.init()

        correlation_message = ''
        self.cli_ctx.get_progress_controller().begin()
        correlation_id = None

        cli_logger = get_logger()  # get CLI logger which has the level set through command lines
        is_verbose = any(handler.level <= logs.INFO for handler in cli_logger.handlers)

        while not poller.done():
            self.cli_ctx.get_progress_controller().add(message='Running')
            try:
                # pylint: disable=protected-access
                correlation_id = json.loads(
                    poller._response.__dict__['_content'].decode())['properties']['correlationId']

                correlation_message = 'Correlation ID: {}'.format(correlation_id)
            except:  # pylint: disable=bare-except
                pass

            current_time = datetime.datetime.now()
            if is_verbose and current_time - self.last_progress_report >= datetime.timedelta(seconds=10):
                self.last_progress_report = current_time
                try:
                    self._generate_template_progress(correlation_id)
                except Exception as ex:  # pylint: disable=broad-except
                    logger.warning('%s during progress reporting: %s', getattr(type(ex), '__name__', type(ex)), ex)
            try:
                self._delay()
            except KeyboardInterrupt:
                self.cli_ctx.get_progress_controller().stop()
                logger.error('Long-running operation wait cancelled.  %s', correlation_message)
                raise

        try:
            result = poller.result()
        except ClientException as client_exception:
            from azure.cli.core.commands.arm import handle_long_running_operation_exception
            self.cli_ctx.get_progress_controller().stop()
            handle_long_running_operation_exception(client_exception)

        self.cli_ctx.get_progress_controller().end()
        colorama.deinit()

        return result
Пример #11
0
 def wrapper(*args, **kwargs):
     from azure.common import AzureHttpError
     try:
         return True, func(*args, **kwargs)
     except AzureHttpError as ex:
         # Precondition failed error
         # https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/412
         # Not modified error
         # https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/304
         if ex.status_code not in [304, 412]:
             raise
         from knack.log import get_logger
         logger = get_logger(__name__)
         logger.warning('Failed precondition')
         return False, None
Пример #12
0
def storage_file_upload_batch(cmd, client, destination, source, destination_path=None, pattern=None, dryrun=False,
                              validate_content=False, content_settings=None, max_connections=1, metadata=None,
                              progress_callback=None):
    """ Upload local files to Azure Storage File Share in batch """

    from azure.cli.command_modules.storage.util import glob_files_locally, normalize_blob_file_path

    source_files = [c for c in glob_files_locally(source, pattern)]
    logger = get_logger(__name__)
    settings_class = cmd.get_models('file.models#ContentSettings')

    if dryrun:
        logger.info('upload files to file share')
        logger.info('    account %s', client.account_name)
        logger.info('      share %s', destination)
        logger.info('      total %d', len(source_files))
        return [{'File': client.make_file_url(destination, os.path.dirname(dst) or None, os.path.basename(dst)),
                 'Type': guess_content_type(src, content_settings, settings_class).content_type} for src, dst in
                source_files]

    # TODO: Performance improvement
    # 1. Upload files in parallel
    def _upload_action(src, dst):
        dst = normalize_blob_file_path(destination_path, dst)
        dir_name = os.path.dirname(dst)
        file_name = os.path.basename(dst)

        _make_directory_in_files_share(client, destination, dir_name)
        create_file_args = {'share_name': destination, 'directory_name': dir_name, 'file_name': file_name,
                            'local_file_path': src, 'progress_callback': progress_callback,
                            'content_settings': guess_content_type(src, content_settings, settings_class),
                            'metadata': metadata, 'max_connections': max_connections}

        if cmd.supported_api_version(min_api='2016-05-31'):
            create_file_args['validate_content'] = validate_content

        logger.warning('uploading %s', src)
        client.create_file_from_path(**create_file_args)

        return client.make_file_url(destination, dir_name, file_name)

    return list(_upload_action(src, dst) for src, dst in source_files)
Пример #13
0
def storage_file_download_batch(cmd, client, source, destination, pattern=None, dryrun=False, validate_content=False,
                                max_connections=1, progress_callback=None):
    """
    Download files from file share to local directory in batch
    """

    from azure.cli.command_modules.storage.util import glob_files_remotely, mkdir_p

    source_files = glob_files_remotely(cmd, client, source, pattern)

    if dryrun:
        source_files_list = list(source_files)

        logger = get_logger(__name__)
        logger.warning('download files from file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', source)
        logger.warning('destination %s', destination)
        logger.warning('    pattern %s', pattern)
        logger.warning('      total %d', len(source_files_list))
        logger.warning(' operations')
        for f in source_files_list:
            logger.warning('  - %s/%s => %s', f[0], f[1], os.path.join(destination, *f))

        return []

    def _download_action(pair):
        destination_dir = os.path.join(destination, pair[0])
        mkdir_p(destination_dir)

        get_file_args = {'share_name': source, 'directory_name': pair[0], 'file_name': pair[1],
                         'file_path': os.path.join(destination, *pair), 'max_connections': max_connections,
                         'progress_callback': progress_callback}

        if cmd.supported_api_version(min_api='2016-05-31'):
            get_file_args['validate_content'] = validate_content

        client.get_file_to_path(**get_file_args)
        return client.make_file_url(source, *pair)

    return list(_download_action(f) for f in source_files)
Пример #14
0
        def handler(ex):
            from azure.cli.core.profiles import get_sdk
            from knack.log import get_logger

            logger = get_logger(__name__)
            t_error = get_sdk(self.command_loader.cli_ctx,
                              CUSTOM_DATA_STORAGE,
                              'common._error#AzureHttpError')
            if isinstance(ex, t_error) and ex.status_code == 403:
                message = """
You do not have the required permissions needed to perform this operation.
Depending on your operation, you may need to be assigned one of the following roles:
    "Storage Blob Data Contributor (Preview)"
    "Storage Blob Data Reader (Preview)"
    "Storage Queue Data Contributor (Preview)"
    "Storage Queue Data Reader (Preview)"

If you want to use the old authentication method and allow querying for the right account key, please use the "--auth-mode" parameter and "key" value.
                """
                logger.error(message)
                return
            raise ex
Пример #15
0
def storage_blob_download_batch(client, source, destination, source_container_name, pattern=None, dryrun=False,
                                progress_callback=None, max_connections=2):

    def _download_blob(blob_service, container, destination_folder, normalized_blob_name, blob_name):
        # TODO: try catch IO exception
        destination_path = os.path.join(destination_folder, normalized_blob_name)
        destination_folder = os.path.dirname(destination_path)
        if not os.path.exists(destination_folder):
            mkdir_p(destination_folder)

        blob = blob_service.get_blob_to_path(container, blob_name, destination_path, max_connections=max_connections,
                                             progress_callback=progress_callback)
        return blob.name

    source_blobs = collect_blobs(client, source_container_name, pattern)
    blobs_to_download = {}
    for blob_name in source_blobs:
        # remove starting path seperator and normalize
        normalized_blob_name = normalize_blob_file_path(None, blob_name)
        if normalized_blob_name in blobs_to_download:
            from knack.util import CLIError
            raise CLIError('Multiple blobs with download path: `{}`. As a solution, use the `--pattern` parameter '
                           'to select for a subset of blobs to download OR utilize the `storage blob download` '
                           'command instead to download individual blobs.'.format(normalized_blob_name))
        blobs_to_download[normalized_blob_name] = blob_name

    if dryrun:
        logger = get_logger(__name__)
        logger.warning('download action: from %s to %s', source, destination)
        logger.warning('    pattern %s', pattern)
        logger.warning('  container %s', source_container_name)
        logger.warning('      total %d', len(source_blobs))
        logger.warning(' operations')
        for b in source_blobs:
            logger.warning('  - %s', b)
        return []

    return list(_download_blob(client, source_container_name, destination, blob_normed, blobs_to_download[blob_normed])
                for blob_normed in blobs_to_download)
Пример #16
0
 def _listen(self):
     self.sock.listen(100)
     index = 0
     basic_auth_string = self.create_basic_auth()
     while True:
         self.client, _address = self.sock.accept()
         self.client.settimeout(60 * 60)
         host = 'wss://{}{}'.format(self.remote_addr, '.scm.azurewebsites.net/AppServiceTunnel/Tunnel.ashx')
         basic_auth_header = 'Authorization: Basic {}'.format(basic_auth_string)
         cli_logger = get_logger()  # get CLI logger which has the level set through command lines
         is_verbose = any(handler.level <= logs.INFO for handler in cli_logger.handlers)
         if is_verbose:
             logger.info('Websocket tracing enabled')
             websocket.enableTrace(True)
         else:
             logger.info('Websocket tracing disabled, use --verbose flag to enable')
             websocket.enableTrace(False)
         self.ws = create_connection(host,
                                     sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),),
                                     class_=TunnelWebSocket,
                                     header=[basic_auth_header],
                                     sslopt={'cert_reqs': ssl.CERT_NONE},
                                     timeout=60 * 60,
                                     enable_multithread=True)
         logger.info('Websocket, connected status: %s', self.ws.connected)
         index = index + 1
         logger.info('Got debugger connection... index: %s', index)
         debugger_thread = Thread(target=self._listen_to_client, args=(self.client, self.ws, index))
         web_socket_thread = Thread(target=self._listen_to_web_socket, args=(self.client, self.ws, index))
         debugger_thread.start()
         web_socket_thread.start()
         logger.info('Both debugger and websocket threads started...')
         logger.info('Successfully connected to local server..')
         debugger_thread.join()
         web_socket_thread.join()
         logger.info('Both debugger and websocket threads stopped...')
         logger.info('Stopped local server..')
Пример #17
0
def create(cmd,
           vm_name,
           resource_group_name,
           repair_password=None,
           repair_username=None,
           repair_vm_name=None,
           copy_disk_name=None,
           repair_group_name=None):
    is_verbose = any(handler.level == logging.INFO
                     for handler in get_logger().handlers)
    # begin progress reporting for long running operation if not verbose
    if not is_verbose:
        cmd.cli_ctx.get_progress_controller().begin()
        cmd.cli_ctx.get_progress_controller().add(message='Running')
    # Function param for telemetry
    func_params = _get_function_param_dict(inspect.currentframe())
    # Start timer for custom telemetry
    start_time = timeit.default_timer()
    # Initialize return variables
    return_message = ''
    return_error_detail = ''
    return_status = ''

    source_vm = get_vm(cmd, resource_group_name, vm_name)
    is_linux = _is_linux_os(source_vm)
    target_disk_name = source_vm.storage_profile.os_disk.name
    is_managed = _uses_managed_disk(source_vm)

    copy_disk_id = None
    resource_tag = _get_repair_resource_tag(resource_group_name, vm_name)

    # Overall success flag
    command_succeeded = False
    # List of created resouces
    created_resources = []

    # Main command calling block
    try:
        # Fetch OS image urn
        if is_linux:
            os_image_urn = "UbuntuLTS"
        else:
            os_image_urn = _fetch_compatible_windows_os_urn(source_vm)

        # Set up base create vm command
        create_repair_vm_command = 'az vm create -g {g} -n {n} --tag {tag} --image {image} --admin-username {username} --admin-password {password}' \
                                   .format(g=repair_group_name, n=repair_vm_name, tag=resource_tag, image=os_image_urn, username=repair_username, password=repair_password)
        # fetch VM size of repair VM
        sku = _fetch_compatible_sku(source_vm)
        if not sku:
            raise SkuNotAvailableError(
                'Failed to find compatible VM size for source VM\'s OS disk within given region and subscription.'
            )
        create_repair_vm_command += ' --size {sku}'.format(sku=sku)

        # Create new resource group
        create_resource_group_command = 'az group create -l {loc} -n {group_name}' \
                                        .format(loc=source_vm.location, group_name=repair_group_name)
        logger.info(
            'Creating resource group for repair VM and its resources...')
        _call_az_command(create_resource_group_command)

        # MANAGED DISK
        if is_managed:
            logger.info(
                'Source VM uses managed disks. Creating repair VM with managed disks.\n'
            )
            # Copy OS disk command
            disk_sku = source_vm.storage_profile.os_disk.managed_disk.storage_account_type
            if not disk_sku:
                # VM is deallocated so fetch disk_sku info from disk itself
                disk_sku = _fetch_disk_sku(resource_group_name,
                                           target_disk_name)
            copy_disk_command = 'az disk create -g {g} -n {n} --source {s} --sku {sku} --location {loc} --query id -o tsv' \
                                .format(g=resource_group_name, n=copy_disk_name, s=target_disk_name, sku=disk_sku, loc=source_vm.location)
            # Validate create vm create command to validate parameters before runnning copy disk command
            validate_create_vm_command = create_repair_vm_command + ' --validate'

            logger.info('Validating VM template before continuing...')
            _call_az_command(validate_create_vm_command,
                             secure_params=[repair_password, repair_username])
            logger.info('Copying OS disk of source VM...')
            copy_disk_id = _call_az_command(copy_disk_command).strip('\n')

            attach_disk_command = 'az vm disk attach -g {g} --vm-name {repair} --name {id}' \
                                  .format(g=repair_group_name, repair=repair_vm_name, id=copy_disk_id)

            logger.info('Creating repair VM...')
            _call_az_command(create_repair_vm_command,
                             secure_params=[repair_password, repair_username])
            logger.info('Attaching copied disk to repair VM...')
            _call_az_command(attach_disk_command)
        # UNMANAGED DISK
        else:
            logger.info(
                'Source VM uses unmanaged disks. Creating repair VM with unmanaged disks.\n'
            )
            os_disk_uri = source_vm.storage_profile.os_disk.vhd.uri
            copy_disk_name = copy_disk_name + '.vhd'
            storage_account = StorageResourceIdentifier(
                cmd.cli_ctx.cloud, os_disk_uri)
            # Validate create vm create command to validate parameters before runnning copy disk commands
            validate_create_vm_command = create_repair_vm_command + ' --validate'
            logger.info('Validating VM template before continuing...')
            _call_az_command(validate_create_vm_command,
                             secure_params=[repair_password, repair_username])

            # get storage account connection string
            get_connection_string_command = 'az storage account show-connection-string -g {g} -n {n} --query connectionString -o tsv' \
                                            .format(g=resource_group_name, n=storage_account.account_name)
            logger.debug('Fetching storage account connection string...')
            connection_string = _call_az_command(
                get_connection_string_command).strip('\n')

            # Create Snapshot of Unmanaged Disk
            make_snapshot_command = 'az storage blob snapshot -c {c} -n {n} --connection-string "{con_string}" --query snapshot -o tsv' \
                                    .format(c=storage_account.container, n=storage_account.blob, con_string=connection_string)
            logger.info('Creating snapshot of OS disk...')
            snapshot_timestamp = _call_az_command(make_snapshot_command,
                                                  secure_params=[
                                                      connection_string
                                                  ]).strip('\n')
            snapshot_uri = os_disk_uri + '?snapshot={timestamp}'.format(
                timestamp=snapshot_timestamp)

            # Copy Snapshot into unmanaged Disk
            copy_snapshot_command = 'az storage blob copy start -c {c} -b {name} --source-uri {source} --connection-string "{con_string}"' \
                                    .format(c=storage_account.container, name=copy_disk_name, source=snapshot_uri, con_string=connection_string)
            logger.info('Creating a copy disk from the snapshot...')
            _call_az_command(copy_snapshot_command,
                             secure_params=[connection_string])
            # Generate the copied disk uri
            copy_disk_id = os_disk_uri.rstrip(
                storage_account.blob) + copy_disk_name

            # Create new repair VM with copied ummanaged disk command
            create_repair_vm_command = create_repair_vm_command + ' --use-unmanaged-disk'
            logger.info('Creating repair VM while disk copy is in progress...')
            _call_az_command(create_repair_vm_command,
                             secure_params=[repair_password, repair_username])

            logger.info('Checking if disk copy is done...')
            copy_check_command = 'az storage blob show -c {c} -n {name} --connection-string "{con_string}" --query properties.copy.status -o tsv' \
                                 .format(c=storage_account.container, name=copy_disk_name, con_string=connection_string)
            copy_result = _call_az_command(copy_check_command,
                                           secure_params=[connection_string
                                                          ]).strip('\n')
            if copy_result != 'success':
                raise UnmanagedDiskCopyError('Unmanaged disk copy failed.')

            # Attach copied unmanaged disk to new vm
            logger.info('Attaching copied disk to repair VM as data disk...')
            attach_disk_command = "az vm unmanaged-disk attach -g {g} -n {disk_name} --vm-name {vm_name} --vhd-uri {uri}" \
                                  .format(g=repair_group_name, disk_name=copy_disk_name, vm_name=repair_vm_name, uri=copy_disk_id)
            _call_az_command(attach_disk_command)

        command_succeeded = True
        created_resources = _list_resource_ids_in_rg(repair_group_name)

    # Some error happened. Stop command and clean-up resources.
    except KeyboardInterrupt:
        return_error_detail = "Command interrupted by user input."
        return_message = "Command interrupted by user input. Cleaning up resources."
    except AzCommandError as azCommandError:
        return_error_detail = str(azCommandError)
        return_message = "Repair create failed. Cleaning up created resources."
    except SkuNotAvailableError as skuNotAvailableError:
        return_error_detail = str(skuNotAvailableError)
        return_message = "Please check if the current subscription can create more VM resources. Cleaning up created resources."
    except UnmanagedDiskCopyError as unmanagedDiskCopyError:
        return_error_detail = str(unmanagedDiskCopyError)
        return_message = "Repair create failed. Please try again at another time. Cleaning up created resources."
    except WindowsOsNotAvailableError:
        return_error_detail = 'Compatible Windows OS image not available.'
        return_message = 'A compatible Windows OS image is not available at this time, please check subscription.'
    except Exception as exception:
        return_error_detail = str(exception)
        return_message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        # end long running op for process if not verbose
        if not is_verbose:
            cmd.cli_ctx.get_progress_controller().end()

    # Command failed block. Output right error message and return dict
    if not command_succeeded:
        return_status = STATUS_ERROR
        return_dict = _handle_command_error(return_error_detail,
                                            return_message)
        _clean_up_resources(repair_group_name, confirm=False)
    else:
        # Construct return dict
        return_status = STATUS_SUCCESS
        created_resources.append(copy_disk_id)
        return_dict = {}
        return_dict['status'] = return_status
        return_dict['message'] = 'Your repair VM \'{n}\' has been created in the resource group \'{repair_rg}\' with disk \'{d}\' attached as data disk. ' \
                                 'Please use this VM to troubleshoot and repair. Once the repairs are complete use the command ' \
                                 '\'az vm repair restore -n {source_vm} -g {rg} --verbose\' to restore disk to the source VM. ' \
                                 'Note that the copied disk is created within the original resource group \'{rg}\'.' \
                                 .format(n=repair_vm_name, repair_rg=repair_group_name, d=copy_disk_name, rg=resource_group_name, source_vm=vm_name)
        return_dict['repair_vm_name'] = repair_vm_name
        return_dict['copied_disk_name'] = copy_disk_name
        return_dict['copied_disk_uri'] = copy_disk_id
        return_dict['repair_resouce_group'] = repair_group_name
        return_dict['resource_tag'] = resource_tag
        return_dict['created_resources'] = created_resources

        logger.info('\n%s\n', return_dict['message'])

    # Track telemetry data
    elapsed_time = timeit.default_timer() - start_time
    _track_command_telemetry('vm repair create', func_params, return_status,
                             return_message, return_error_detail, elapsed_time,
                             get_subscription_id(cmd.cli_ctx), return_dict)
    return return_dict
Пример #18
0
def cf_sa_for_keys(cli_ctx, _):
    from knack.log import get_logger
    logger = get_logger(__name__)
    logger.debug('Disable HTTP logging to avoid having storage keys in debug logs')
    client = storage_client_factory(cli_ctx)
    return client.storage_accounts
Пример #19
0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------

# pylint: disable=missing-docstring

import logging
from functools import lru_cache

from knack.log import get_logger

logger = get_logger()  # pylint: disable=invalid-name


@lru_cache(maxsize=None)
def is_verbose():
    return any(handler.level <= logging.INFO for handler in logger.handlers)
Пример #20
0
def run(cmd,
        vm_name,
        resource_group_name,
        run_id=None,
        repair_vm_id=None,
        custom_script_file=None,
        parameters=None,
        run_on_repair=False):
    is_verbose = any(handler.level == logging.INFO
                     for handler in get_logger().handlers)
    # begin progress reporting for long running operation if not verbose
    if not is_verbose:
        cmd.cli_ctx.get_progress_controller().begin()
        cmd.cli_ctx.get_progress_controller().add(message='Running')
    # Function param for telemetry
    func_params = _get_function_param_dict(inspect.currentframe())
    # Start timer and params for custom telemetry
    start_time = timeit.default_timer()
    # Initialize return variables
    return_message = ''
    return_error_detail = ''
    return_status = ''

    # Overall success flag
    command_succeeded = False
    return_dict = {}
    try:
        source_vm = get_vm(cmd, resource_group_name, vm_name)

        # Build absoulte path of driver script
        loader = pkgutil.get_loader('azext_vm_repair')
        mod = loader.load_module('azext_vm_repair')
        rootpath = os.path.dirname(mod.__file__)
        is_linux = _is_linux_os(source_vm)
        if is_linux:
            run_script = os.path.join(rootpath, 'scripts',
                                      'linux-run-driver.sh')
            command_id = 'RunShellScript'
        else:
            run_script = os.path.join(rootpath, 'scripts',
                                      'win-run-driver.ps1')
            command_id = 'RunPowerShellScript'

        # If run_on_repair is False, then repair_vm is the source_vm (scripts run directly on source vm)
        repair_vm_id = parse_resource_id(repair_vm_id)
        repair_vm_name = repair_vm_id['name']
        repair_resource_group = repair_vm_id['resource_group']

        repair_run_command = 'az vm run-command invoke -g {rg} -n {vm} --command-id {command_id} ' \
                             '--scripts "@{run_script}"' \
                             .format(rg=repair_resource_group, vm=repair_vm_name, command_id=command_id, run_script=run_script)

        # Normal scenario with run id
        if not custom_script_file:
            # Fetch run path from GitHub
            repair_script_path = _fetch_run_script_path(run_id)
            repair_run_command += ' --parameters script_path="./{repair_script}"'.format(
                repair_script=repair_script_path)
        # Custom script scenario for script testers
        else:
            # no-op run id
            repair_run_command += ' "@{custom_file}" --parameters script_path=no-op'.format(
                custom_file=custom_script_file)
        # Append Parameters
        if parameters:
            if is_linux:
                param_string = _process_bash_parameters(parameters)
            else:
                param_string = _process_ps_parameters(parameters)
            # Work around for run-command bug, unexpected behavior with space characters
            param_string = param_string.replace(' ', '%20')
            repair_run_command += ' params="{}"'.format(param_string)
        if run_on_repair:
            vm_string = 'repair VM'
        else:
            vm_string = 'VM'
        logger.info('Running script on %s: %s', vm_string, repair_vm_name)
        script_start_time = timeit.default_timer()
        return_str = _call_az_command(repair_run_command)
        script_duration = timeit.default_timer() - script_start_time
        # Extract stdout and stderr, if stderr exists then possible error
        run_command_return = json.loads(return_str)

        if is_linux:
            run_command_message = run_command_return['value'][0][
                'message'].split('[stdout]')[1].split('[stderr]')
            stdout = run_command_message[0].strip('\n')
            stderr = run_command_message[1].strip('\n')
        else:
            stdout = run_command_return['value'][0]['message']
            stderr = run_command_return['value'][1]['message']

        run_script_succeeded = _check_script_succeeded(stdout)
        # Parse through logs to populate log properties: 'level', 'message'
        logs = _parse_run_script_raw_logs(stdout)

        # Process log-start and log-end
        # Log is cutoff at the start if over 4k bytes
        log_cutoff = True
        log_fullpath = ''
        for log in logs:
            if log['level'] == 'Log-Start':
                log_cutoff = False
            if log['level'] == 'Log-End':
                split_log = log['message'].split(']')
                if len(split_log) == 2:
                    log_fullpath = split_log[1]
        if log_cutoff:
            logger.warning(
                'Log file is too large and has been cutoff at the start of file. Please locate the log file within the %s using the logFullpath to check full logs.',
                vm_string)

        # Output 'output' or 'error' level logs depending on status
        if run_script_succeeded:
            script_status = STATUS_SUCCESS
            return_status = STATUS_SUCCESS
            message = 'Script completed without error.'
            output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'output'
            ])
            logger.info('\nScript returned with output:\n%s\n', output)
        else:
            script_status = STATUS_ERROR
            return_status = STATUS_SUCCESS
            message = 'Script succesfully run but returned with possible errors.'
            output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'error'
            ])
            logger.error('\nScript returned with error:\n%s\n', output)

        logger.debug("stderr: %s", stderr)
        return_message = message
        return_dict['status'] = return_status
        return_dict['script_status'] = script_status
        return_dict['message'] = message
        return_dict['logs'] = stdout
        return_dict['log_full_path'] = log_fullpath
        return_dict['output'] = output
        return_dict['vm_name'] = repair_vm_name
        return_dict['resouce_group'] = repair_resource_group
        command_succeeded = True
    except KeyboardInterrupt:
        return_error_detail = "Command interrupted by user input."
        return_message = "Repair run failed. Command interrupted by user input."
    except AzCommandError as azCommandError:
        return_error_detail = str(azCommandError)
        return_message = "Repair run failed."
    except requests.exceptions.RequestException as exception:
        return_error_detail = str(exception)
        return_message = "Failed to fetch run script data from GitHub. Please check this repository is reachable: https://github.com/Azure/repair-script-library"
    except RunScriptNotFoundForIdError as exception:
        return_error_detail = str(exception)
        return_message = "Repair run failed. Run ID not found."
    except Exception as exception:
        return_error_detail = str(exception)
        return_message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        # end long running op for process if not verbose
        if not is_verbose:
            cmd.cli_ctx.get_progress_controller().end()

    if not command_succeeded:
        script_duration = ''
        output = 'Repair run failed.'
        script_status = STATUS_ERROR
        return_status = STATUS_ERROR
        return_dict = _handle_command_error(return_error_detail,
                                            return_message)
        return_dict['script_status'] = script_status

    # Track telemetry data
    elapsed_time = timeit.default_timer() - start_time
    _track_run_command_telemetry('vm repair run', func_params, return_status,
                                 return_message,
                                 return_error_detail, elapsed_time,
                                 get_subscription_id(cmd.cli_ctx), return_dict,
                                 run_id, script_status, output,
                                 script_duration)
    return return_dict
Пример #21
0
def storage_file_copy_batch(cmd, client, source_client, destination_share=None, destination_path=None,
                            source_container=None, source_share=None, source_sas=None, pattern=None, dryrun=False,
                            metadata=None, timeout=None):
    """
    Copy a group of files asynchronously
    """
    logger = None
    if dryrun:
        logger = get_logger(__name__)
        logger.warning('copy files or blobs to file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', destination_share)
        logger.warning('       path %s', destination_path)
        logger.warning('     source %s', source_container or source_share)
        logger.warning('source type %s', 'blob' if source_container else 'file')
        logger.warning('    pattern %s', pattern)
        logger.warning(' operations')

    if source_container:
        # copy blobs to file share

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_blob_service_from_storage_client(cmd, client)

        # the cache of existing directories in the destination file share. the cache helps to avoid
        # repeatedly create existing directory so as to optimize the performance.
        existing_dirs = set([])

        if not source_sas:
            source_sas = create_short_lived_container_sas(cmd, source_client.account_name, source_client.account_key,
                                                          source_container)

        # pylint: disable=inconsistent-return-statements
        def action_blob_copy(blob_name):
            if dryrun:
                logger.warning('  - copy blob %s', blob_name)
            else:
                return _create_file_and_directory_from_blob(client, source_client, destination_share, source_container,
                                                            source_sas, blob_name, destination_dir=destination_path,
                                                            metadata=metadata, timeout=timeout,
                                                            existing_dirs=existing_dirs)

        return list(
            filter_none(action_blob_copy(blob) for blob in collect_blobs(source_client, source_container, pattern)))

    elif source_share:
        # copy files from share to share

        # if the source client is None, assume the file share is in the same storage account as
        # destination, therefore client is reused.
        source_client = source_client or client

        # the cache of existing directories in the destination file share. the cache helps to avoid
        # repeatedly create existing directory so as to optimize the performance.
        existing_dirs = set([])

        if not source_sas:
            source_sas = create_short_lived_share_sas(cmd, source_client.account_name, source_client.account_key,
                                                      source_share)

        # pylint: disable=inconsistent-return-statements
        def action_file_copy(file_info):
            dir_name, file_name = file_info
            if dryrun:
                logger.warning('  - copy file %s', os.path.join(dir_name, file_name))
            else:
                return _create_file_and_directory_from_file(client, source_client, destination_share, source_share,
                                                            source_sas, dir_name, file_name,
                                                            destination_dir=destination_path, metadata=metadata,
                                                            timeout=timeout, existing_dirs=existing_dirs)

        return list(filter_none(
            action_file_copy(file) for file in collect_files(cmd, source_client, source_share, pattern)))
    else:
        # won't happen, the validator should ensure either source_container or source_share is set
        raise ValueError('Fail to find source. Neither blob container or file share is specified.')
Пример #22
0
def validate_client_parameters(cmd, namespace):
    """ Retrieves storage connection parameters from environment variables and parses out connection string into
    account name and key """
    n = namespace

    def get_config_value(section, key, default):
        return cmd.cli_ctx.config.get(section, key, default)

    if hasattr(n, 'auth_mode'):
        auth_mode = n.auth_mode or get_config_value('storage', 'auth_mode',
                                                    None)
        del n.auth_mode
        if not n.account_name:
            n.account_name = get_config_value('storage', 'account', None)
        if auth_mode == 'login':
            n.token_credential = _create_token_credential(cmd.cli_ctx)

            # give warning if there are account key args being ignored
            account_key_args = [
                n.account_key and "--account-key", n.sas_token
                and "--sas-token", n.connection_string
                and "--connection-string"
            ]
            account_key_args = [arg for arg in account_key_args if arg]

            if account_key_args:
                from knack.log import get_logger

                logger = get_logger(__name__)
                logger.warning(
                    'In "login" auth mode, the following arguments are ignored: %s',
                    ' ,'.join(account_key_args))
            return

    if not n.connection_string:
        n.connection_string = get_config_value('storage', 'connection_string',
                                               None)

    # if connection string supplied or in environment variables, extract account key and name
    if n.connection_string:
        conn_dict = validate_key_value_pairs(n.connection_string)
        n.account_name = conn_dict.get('AccountName')
        n.account_key = conn_dict.get('AccountKey')
        if not n.account_name or not n.account_key:
            from knack.util import CLIError
            raise CLIError(
                'Connection-string: %s, is malformed. Some shell environments require the '
                'connection string to be surrounded by quotes.' %
                n.connection_string)

    # otherwise, simply try to retrieve the remaining variables from environment variables
    if not n.account_name:
        n.account_name = get_config_value('storage', 'account', None)
    if not n.account_key:
        n.account_key = get_config_value('storage', 'key', None)
    if not n.sas_token:
        n.sas_token = get_config_value('storage', 'sas_token', None)

    # strip the '?' from sas token. the portal and command line are returns sas token in different
    # forms
    if n.sas_token:
        n.sas_token = n.sas_token.lstrip('?')

    # if account name is specified but no key, attempt to query
    if n.account_name and not n.account_key and not n.sas_token:
        n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
Пример #23
0
def storage_file_copy_batch(cmd,
                            client,
                            source_client,
                            destination_share=None,
                            destination_path=None,
                            source_container=None,
                            source_share=None,
                            source_sas=None,
                            pattern=None,
                            dryrun=False,
                            metadata=None,
                            timeout=None):
    """
    Copy a group of files asynchronously
    """
    logger = None
    if dryrun:
        logger = get_logger(__name__)
        logger.warning('copy files or blobs to file share')
        logger.warning('    account %s', client.account_name)
        logger.warning('      share %s', destination_share)
        logger.warning('       path %s', destination_path)
        logger.warning('     source %s', source_container or source_share)
        logger.warning('source type %s',
                       'blob' if source_container else 'file')
        logger.warning('    pattern %s', pattern)
        logger.warning(' operations')

    if source_container:
        # copy blobs to file share

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_blob_service_from_storage_client(
            cmd, client)

        # the cache of existing directories in the destination file share. the cache helps to avoid
        # repeatedly create existing directory so as to optimize the performance.
        existing_dirs = set([])

        if not source_sas:
            source_sas = create_short_lived_container_sas(
                cmd, source_client.account_name, source_client.account_key,
                source_container)

        # pylint: disable=inconsistent-return-statements
        def action_blob_copy(blob_name):
            if dryrun:
                logger.warning('  - copy blob %s', blob_name)
            else:
                return _create_file_and_directory_from_blob(
                    client,
                    source_client,
                    destination_share,
                    source_container,
                    source_sas,
                    blob_name,
                    destination_dir=destination_path,
                    metadata=metadata,
                    timeout=timeout,
                    existing_dirs=existing_dirs)

        return list(
            filter_none(
                action_blob_copy(blob) for blob in collect_blobs(
                    source_client, source_container, pattern)))

    elif source_share:
        # copy files from share to share

        # if the source client is None, assume the file share is in the same storage account as
        # destination, therefore client is reused.
        source_client = source_client or client

        # the cache of existing directories in the destination file share. the cache helps to avoid
        # repeatedly create existing directory so as to optimize the performance.
        existing_dirs = set([])

        if not source_sas:
            source_sas = create_short_lived_share_sas(
                cmd, source_client.account_name, source_client.account_key,
                source_share)

        # pylint: disable=inconsistent-return-statements
        def action_file_copy(file_info):
            dir_name, file_name = file_info
            if dryrun:
                logger.warning('  - copy file %s',
                               os.path.join(dir_name, file_name))
            else:
                return _create_file_and_directory_from_file(
                    client,
                    source_client,
                    destination_share,
                    source_share,
                    source_sas,
                    dir_name,
                    file_name,
                    destination_dir=destination_path,
                    metadata=metadata,
                    timeout=timeout,
                    existing_dirs=existing_dirs)

        return list(
            filter_none(
                action_file_copy(file) for file in collect_files(
                    cmd, source_client, source_share, pattern)))
    else:
        # won't happen, the validator should ensure either source_container or source_share is set
        raise ValueError(
            'Fail to find source. Neither blob container or file share is specified.'
        )
Пример #24
0
def storage_blob_copy_batch(cmd, client, source_client, destination_container=None,
                            destination_path=None, source_container=None, source_share=None,
                            source_sas=None, pattern=None, dryrun=False):
    """Copy a group of blob or files to a blob container."""
    logger = None
    if dryrun:
        logger = get_logger(__name__)
        logger.warning('copy files or blobs to blob container')
        logger.warning('    account %s', client.account_name)
        logger.warning('  container %s', destination_container)
        logger.warning('     source %s', source_container or source_share)
        logger.warning('source type %s', 'blob' if source_container else 'file')
        logger.warning('    pattern %s', pattern)
        logger.warning(' operations')

    if source_container:
        # copy blobs for blob container

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_blob_service_from_storage_client(cmd, client)

        if not source_sas:
            source_sas = create_short_lived_container_sas(cmd, source_client.account_name, source_client.account_key,
                                                          source_container)

        # pylint: disable=inconsistent-return-statements
        def action_blob_copy(blob_name):
            if dryrun:
                logger.warning('  - copy blob %s', blob_name)
            else:
                return _copy_blob_to_blob_container(client, source_client, destination_container, destination_path,
                                                    source_container, source_sas, blob_name)

        return list(filter_none(action_blob_copy(blob) for blob in collect_blobs(source_client,
                                                                                 source_container,
                                                                                 pattern)))

    elif source_share:
        # copy blob from file share

        # if the source client is None, recreate one from the destination client.
        source_client = source_client or create_file_share_from_storage_client(cmd, client)

        if not source_sas:
            source_sas = create_short_lived_share_sas(cmd, source_client.account_name, source_client.account_key,
                                                      source_share)

        # pylint: disable=inconsistent-return-statements
        def action_file_copy(file_info):
            dir_name, file_name = file_info
            if dryrun:
                logger.warning('  - copy file %s', os.path.join(dir_name, file_name))
            else:
                return _copy_file_to_blob_container(client, source_client, destination_container, destination_path,
                                                    source_share, source_sas, dir_name, file_name)

        return list(filter_none(action_file_copy(file) for file in collect_files(cmd,
                                                                                 source_client,
                                                                                 source_share,
                                                                                 pattern)))
    else:
        raise ValueError('Fail to find source. Neither blob container or file share is specified')
Пример #25
0
def storage_file_upload_batch(cmd,
                              client,
                              destination,
                              source,
                              destination_path=None,
                              pattern=None,
                              dryrun=False,
                              validate_content=False,
                              content_settings=None,
                              max_connections=1,
                              metadata=None,
                              progress_callback=None):
    """ Upload local files to Azure Storage File Share in batch """

    from azure.cli.command_modules.storage.util import glob_files_locally, normalize_blob_file_path

    source_files = [c for c in glob_files_locally(source, pattern)]
    logger = get_logger(__name__)
    settings_class = cmd.get_models('file.models#ContentSettings')

    if dryrun:
        logger.info('upload files to file share')
        logger.info('    account %s', client.account_name)
        logger.info('      share %s', destination)
        logger.info('      total %d', len(source_files))
        return [{
            'File':
            client.make_file_url(destination,
                                 os.path.dirname(dst) or None,
                                 os.path.basename(dst)),
            'Type':
            guess_content_type(src, content_settings,
                               settings_class).content_type
        } for src, dst in source_files]

    # TODO: Performance improvement
    # 1. Upload files in parallel
    def _upload_action(src, dst):
        dst = normalize_blob_file_path(destination_path, dst)
        dir_name = os.path.dirname(dst)
        file_name = os.path.basename(dst)

        _make_directory_in_files_share(client, destination, dir_name)
        create_file_args = {
            'share_name':
            destination,
            'directory_name':
            dir_name,
            'file_name':
            file_name,
            'local_file_path':
            src,
            'progress_callback':
            progress_callback,
            'content_settings':
            guess_content_type(src, content_settings, settings_class),
            'metadata':
            metadata,
            'max_connections':
            max_connections
        }

        if cmd.supported_api_version(min_api='2016-05-31'):
            create_file_args['validate_content'] = validate_content

        logger.warning('uploading %s', src)
        client.create_file_from_path(**create_file_args)

        return client.make_file_url(destination, dir_name, file_name)

    return list(_upload_action(src, dst) for src, dst in source_files)
Пример #26
0
import time
from importlib import import_module
import six

from knack.arguments import CLICommandArgument, ignore_type, ArgumentsContext
from knack.commands import CLICommand, CommandGroup
from knack.invocation import CommandInvoker
from knack.log import get_logger
from knack.parser import ARGPARSE_SUPPORTED_KWARGS
from knack.util import CLIError

from azure.cli.core import EXCLUDED_PARAMS
from azure.cli.core.extension import get_extension
import azure.cli.core.telemetry as telemetry

logger = get_logger(__name__)

CLI_COMMON_KWARGS = [
    'min_api', 'max_api', 'resource_type', 'operation_group',
    'custom_command_type', 'command_type'
]

CLI_COMMAND_KWARGS = [
    'transform', 'table_transformer', 'confirmation', 'exception_handler',
    'client_factory', 'operations_tmpl', 'no_wait_param', 'supports_no_wait',
    'validator', 'client_arg_name', 'doc_string_source', 'deprecate_info'
] + CLI_COMMON_KWARGS
CLI_PARAM_KWARGS = \
    ['id_part', 'completer', 'validator', 'options_list', 'configured_default', 'arg_group', 'arg_type'] \
    + CLI_COMMON_KWARGS + ARGPARSE_SUPPORTED_KWARGS
Пример #27
0
from __future__ import print_function
from sys import exc_info
from datetime import datetime, timedelta
import adal
from knack.util import CLIError
from knack.log import get_logger
from azure.servicefabric.service_fabric_client_ap_is import ServiceFabricClientAPIs
from msrest import ServiceClient, Configuration
from sfctl.config import client_endpoint, SF_CLI_VERSION_CHECK_INTERVAL, get_cluster_auth, set_aad_cache, set_aad_metadata # pylint: disable=line-too-long
from sfctl.state import get_sfctl_version
from sfctl.custom_exceptions import SFCTLInternalException
from sfctl.auth import ClientCertAuthentication, AdalAuthentication


logger = get_logger(__name__)  # pylint: disable=invalid-name

def select_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify):  # pylint: disable=invalid-name,too-many-arguments
    """Verify arguments for select command"""

    if not (endpoint.lower().startswith('http')
            or endpoint.lower().startswith('https')):
        raise CLIError('Endpoint must be HTTP or HTTPS')

    usage = ('Valid syntax : --endpoint [ [ --key --cert | --pem | --aad] '
             '[ --ca | --no-verify ] ]')

    if ca and not (pem or all([key, cert])):
        raise CLIError(usage)

    if no_verify and not (pem or all([key, cert]) or aad):
Пример #28
0
 def _log_hostname():
     import socket
     from knack.log import get_logger
     logger = get_logger(__name__)
     logger.warning("A Cloud Shell credential problem occurred. When you report the issue with the error "
                    "below, please mention the hostname '%s'", socket.gethostname())
Пример #29
0
def restore(cmd,
            vm_name,
            resource_group_name,
            disk_name=None,
            repair_vm_id=None,
            yes=False):
    is_verbose = any(handler.level == logging.INFO
                     for handler in get_logger().handlers)
    # begin progress reporting for long running operation if not verbose
    if not is_verbose:
        cmd.cli_ctx.get_progress_controller().begin()
        cmd.cli_ctx.get_progress_controller().add(message='Running')
    # Function param for telemetry
    func_params = _get_function_param_dict(inspect.currentframe())
    # Start timer for custom telemetry
    start_time = timeit.default_timer()
    # Initialize return variables
    return_message = ''
    return_error_detail = ''
    return_status = ''

    source_vm = get_vm(cmd, resource_group_name, vm_name)
    is_managed = _uses_managed_disk(source_vm)

    repair_vm_id = parse_resource_id(repair_vm_id)
    repair_vm_name = repair_vm_id['name']
    repair_resource_group = repair_vm_id['resource_group']

    # Overall success flag
    command_succeeded = False
    source_disk = None
    try:
        if is_managed:
            source_disk = source_vm.storage_profile.os_disk.name
            # Detach repaired data disk command
            detach_disk_command = 'az vm disk detach -g {g} --vm-name {repair} --name {disk}' \
                                  .format(g=repair_resource_group, repair=repair_vm_name, disk=disk_name)
            # Update OS disk with repaired data disk
            attach_fixed_command = 'az vm update -g {g} -n {n} --os-disk {disk}' \
                                   .format(g=resource_group_name, n=vm_name, disk=disk_name)

            # Maybe run attach and delete concurrently
            logger.info('Detaching repaired data disk from repair VM...')
            _call_az_command(detach_disk_command)
            logger.info(
                'Attaching repaired data disk to source VM as an OS disk...')
            _call_az_command(attach_fixed_command)
        else:
            source_disk = source_vm.storage_profile.os_disk.vhd.uri
            # Get disk uri from disk name
            repair_vm = get_vm(cmd, repair_vm_id['resource_group'],
                               repair_vm_id['name'])
            data_disks = repair_vm.storage_profile.data_disks
            # The params went through validator so no need for existence checks
            disk_uri = [
                disk.vhd.uri for disk in data_disks if disk.name == disk_name
            ][0]

            detach_unamanged_command = 'az vm unmanaged-disk detach -g {g} --vm-name {repair} --name {disk}' \
                                       .format(g=repair_resource_group, repair=repair_vm_name, disk=disk_name)
            # Update OS disk with disk
            # storageProfile.osDisk.name="{disk}"
            attach_unmanaged_command = 'az vm update -g {g} -n {n} --set storageProfile.osDisk.vhd.uri="{uri}"' \
                                       .format(g=resource_group_name, n=vm_name, uri=disk_uri)
            logger.info('Detaching repaired data disk from repair VM...')
            _call_az_command(detach_unamanged_command)
            logger.info(
                'Attaching repaired data disk to source VM as an OS disk...')
            _call_az_command(attach_unmanaged_command)
        # Clean
        _clean_up_resources(repair_resource_group, confirm=not yes)
        command_succeeded = True
    except KeyboardInterrupt:
        return_error_detail = "Command interrupted by user input."
        return_message = "Command interrupted by user input. If the restore command fails at retry, please rerun the repair process from \'az vm repair create\'."
    except AzCommandError as azCommandError:
        return_error_detail = str(azCommandError)
        return_message = "Repair restore failed. If the restore command fails at retry, please rerun the repair process from \'az vm repair create\'."
    except Exception as exception:
        return_error_detail = str(exception)
        return_message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        # end long running op for process if not verbose
        if not is_verbose:
            cmd.cli_ctx.get_progress_controller().end()

    if not command_succeeded:
        return_status = STATUS_ERROR
        return_dict = _handle_command_error(return_error_detail,
                                            return_message)
    else:
        # Construct return dict
        return_status = STATUS_SUCCESS
        return_dict = {}
        return_dict['status'] = return_status
        return_dict['message'] = '\'{disk}\' successfully attached to \'{n}\' as an OS disk. Please test your repairs and once confirmed, ' \
                                 'you may choose to delete the source OS disk \'{src_disk}\' within resource group \'{rg}\' manually if you no longer need it, to avoid any undesired costs.' \
                                 .format(disk=disk_name, n=vm_name, src_disk=source_disk, rg=resource_group_name)

        logger.info('\n%s\n', return_dict['message'])

    # Track telemetry data
    elapsed_time = timeit.default_timer() - start_time
    _track_command_telemetry('vm repair restore', func_params, return_status,
                             return_message, return_error_detail, elapsed_time,
                             get_subscription_id(cmd.cli_ctx), return_dict)
    return return_dict
Пример #30
0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------

from knack.log import get_logger


logger = get_logger(__name__)


DEFAULT_PROFILE_NAME = 'default'


def scaffold_autoscale_settings_parameters(client):  # pylint: disable=unused-argument
    """Scaffold fully formed autoscale-settings' parameters as json template """

    import os.path
    from knack.util import CLIError
    from azure.cli.core.util import get_file_json

    # Autoscale settings parameter scaffold file path
    curr_dir = os.path.dirname(os.path.realpath(__file__))
    autoscale_settings_parameter_file_path = os.path.join(
        curr_dir, 'autoscale-parameters-template.json')

    if not os.path.exists(autoscale_settings_parameter_file_path):
        raise CLIError('File {} not found.'.format(autoscale_settings_parameter_file_path))

    return get_file_json(autoscale_settings_parameter_file_path)
Пример #31
0
import shutil
import stat
import subprocess
import tarfile
import zipfile

import requests

import yaml

from azure.cli.core.api import get_config_dir
from knack.log import get_logger
from knack.util import CLIError
from tabulate import tabulate

LOGGER = get_logger(__name__)


def _fzf_get_system():
    """
    Returns platform.system().

    Exists to be mocked in testing; if we just mock platform.system() it breaks the rest of
    azure-cli.
    """
    return platform.system()


def _fzf_get_filename():
    """
    Stub function to return the right command for each platform