Example #1
0
def _detect_ade_status(vm):
    if vm.storage_profile.os_disk.encryption_settings:
        return False, True
    ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
    ade = _find_existing_ade(vm, ade_ext_info=ade_ext_info)
    if ade is None:
        return False, False
    elif ade.type_handler_version.split('.')[0] == ade_ext_info['legacy_version'].split('.')[0]:
        return False, True

    return True, False   # we believe impossible to have both old & new ADE
Example #2
0
def _find_existing_ade(vm, use_instance_view=False, ade_ext_info=None):
    if not ade_ext_info:
        ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
    if use_instance_view:
        exts = vm.instance_view.extensions or []
        r = next((e for e in exts if e.type and e.type.lower().startswith(ade_ext_info['publisher'].lower()) and
                  e.name.lower() == ade_ext_info['name'].lower()), None)
    else:
        exts = vm.resources or []
        r = next((e for e in exts if (e.publisher.lower() == ade_ext_info['publisher'].lower() and
                                      e.virtual_machine_extension_type.lower() == ade_ext_info['name'].lower())), None)
    return r
def validate_create(cmd, namespace):
    check_extension_version(EXTENSION_NAME)

    # Check if VM exists and is not classic VM
    source_vm = _validate_and_get_vm(cmd, namespace.resource_group_name,
                                     namespace.vm_name)
    is_linux = _is_linux_os(source_vm)

    # Check repair vm name
    if namespace.repair_vm_name:
        _validate_vm_name(namespace.repair_vm_name, is_linux)
    else:
        namespace.repair_vm_name = ('repair-' + namespace.vm_name)[:15]

    # Check copy disk name
    timestamp = datetime.utcnow().strftime('%Y%m%d%H%M%S.%f')
    if namespace.copy_disk_name:
        _validate_disk_name(namespace.copy_disk_name)
    else:
        namespace.copy_disk_name = namespace.vm_name + '-DiskCopy-' + timestamp

    # Check copy resouce group name
    if namespace.repair_group_name:
        if namespace.repair_group_name == namespace.resource_group_name:
            raise CLIError(
                'The repair resource group name cannot be the same as the source VM resource group.'
            )
        _validate_resource_group_name(namespace.repair_group_name)
    else:
        namespace.repair_group_name = 'repair-' + namespace.vm_name + '-' + timestamp

    # Check encrypted disk
    if _uses_encrypted_disk(source_vm):
        # TODO, validate this with encrypted VMs
        logger.warning('The source VM\'s OS disk is encrypted.')

    # Validate Auth Params
    # Prompt vm username
    if not namespace.repair_username:
        _prompt_repair_username(namespace)
    # Validate vm username
    validate_vm_username(namespace.repair_username, is_linux)
    # Prompt vm password
    if not namespace.repair_password:
        _prompt_repair_password(namespace)
    # Validate vm password
    validate_vm_password(namespace.repair_password, is_linux)
Example #4
0
def _find_existing_ade(vm, use_instance_view=False, ade_ext_info=None):
    if not ade_ext_info:
        ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(
            vm) else vm_extension_info['Windows']
    if use_instance_view:
        exts = vm.instance_view.extensions or []
        r = next(
            (e for e in exts if e.type
             and e.type.lower().startswith(ade_ext_info['publisher'].lower())
             and e.name.lower() == ade_ext_info['name'].lower()), None)
    else:
        exts = vm.resources or []
        r = next((e for e in exts
                  if (e.publisher.lower() == ade_ext_info['publisher'].lower()
                      and e.virtual_machine_extension_type.lower() ==
                      ade_ext_info['name'].lower())), None)
    return r
Example #5
0
def decrypt_vmss(cmd, resource_group_name, vmss_name, volume_type=None, force=False):
    UpgradeMode, VirtualMachineScaleSetExtension = cmd.get_models('UpgradeMode', 'VirtualMachineScaleSetExtension')
    compute_client = _compute_client_factory(cmd.cli_ctx)
    vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
    is_linux = _is_linux_os(vmss.virtual_machine_profile)
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    # 1. be nice, figure out the default volume type
    volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)

    # 2. update the disk encryption extension
    public_config = {
        'VolumeType': volume_type,
        'EncryptionOperation': 'DisableEncryption',
    }

    ext = VirtualMachineScaleSetExtension(name=extension['name'],
                                          publisher=extension['publisher'],
                                          type1=extension['name'],
                                          type_handler_version=extension['version'],
                                          settings=public_config,
                                          auto_upgrade_minor_version=True,
                                          force_update_tag=uuid.uuid4())
    if (not vmss.virtual_machine_profile.extension_profile or
            not vmss.virtual_machine_profile.extension_profile.extensions):
        extensions = []
    else:
        extensions = vmss.virtual_machine_profile.extension_profile.extensions

    ade_extension = [x for x in extensions if
                     x.type1.lower() == extension['name'].lower() and x.publisher.lower() == extension['publisher'].lower()]  # pylint: disable=line-too-long
    if not ade_extension:
        from knack.util import CLIError
        raise CLIError("VM scale set '{}' was not encrypted".format(vmss_name))

    index = vmss.virtual_machine_profile.extension_profile.extensions.index(ade_extension[0])
    vmss.virtual_machine_profile.extension_profile.extensions[index] = ext

    # Avoid unnecessary permission error
    vmss.virtual_machine_profile.storage_profile.image_reference = None

    poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
    LongRunningOperation(cmd.cli_ctx)(poller)
    _show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, False)
def validate_create(cmd, namespace):
    # Check if VM exists and is not classic VM
    source_vm = _validate_and_get_vm(cmd, namespace.resource_group_name,
                                     namespace.vm_name)
    is_linux = _is_linux_os(source_vm)

    # Check repair vm name
    if namespace.repair_vm_name:
        _validate_vm_name(namespace.repair_vm_name, is_linux)
    else:
        namespace.repair_vm_name = ('repair-' + namespace.vm_name)[:15]

    # Check copy disk name
    timestamp = datetime.utcnow().strftime('%Y%m%d%H%M%S')
    if namespace.copy_disk_name:
        _validate_disk_name(namespace.copy_disk_name)
    else:
        namespace.copy_disk_name = namespace.vm_name + '-DiskCopy-' + timestamp

    # Check copy resouce group name
    if namespace.repair_group_name:
        if namespace.repair_group_name == namespace.resource_group_name:
            raise CLIError(
                'The repair resource group name cannot be the same as the source VM resource group.'
            )
        _validate_resource_group_name(namespace.repair_group_name)
    else:
        namespace.repair_group_name = 'repair-' + namespace.vm_name + '-' + timestamp

    # Check encrypted disk
    if _uses_encrypted_disk(source_vm):
        # TODO, validate this with encrypted VMs
        logger.warning('The source VM\'s OS disk is encrypted.')

    # Validate Auth Params
    if is_linux and namespace.repair_username:
        logger.warning(
            "Setting admin username property is not allowed for Linux VMs. Ignoring the given repair-username parameter."
        )
    if not is_linux and not namespace.repair_username:
        _prompt_repair_username(namespace)
    if not namespace.repair_password:
        _prompt_repair_password(namespace)
Example #7
0
def decrypt_vmss(cmd, resource_group_name, vmss_name, volume_type=None, force=False):
    UpgradeMode, VirtualMachineScaleSetExtension = cmd.get_models('UpgradeMode', 'VirtualMachineScaleSetExtension')
    compute_client = _compute_client_factory(cmd.cli_ctx)
    vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
    is_linux = _is_linux_os(vmss.virtual_machine_profile)
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    # 1. be nice, figure out the default volume type
    volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)

    # 2. update the disk encryption extension
    public_config = {
        'VolumeType': volume_type,
        'EncryptionOperation': 'DisableEncryption',
    }

    ext = VirtualMachineScaleSetExtension(name=extension['name'],
                                          publisher=extension['publisher'],
                                          type=extension['name'],
                                          type_handler_version=extension['version'],
                                          settings=public_config,
                                          auto_upgrade_minor_version=True,
                                          force_update_tag=uuid.uuid4())
    if (not vmss.virtual_machine_profile.extension_profile or
            not vmss.virtual_machine_profile.extension_profile.extensions):
        extensions = []
    else:
        extensions = vmss.virtual_machine_profile.extension_profile.extensions

    ade_extension = [x for x in extensions if
                     x.type.lower() == extension['name'].lower() and x.publisher.lower() == extension['publisher'].lower()]  # pylint: disable=line-too-long
    if not ade_extension:
        from knack.util import CLIError
        raise CLIError("VM scale set '{}' was not encrypted".format(vmss_name))

    index = vmss.virtual_machine_profile.extension_profile.extensions.index(ade_extension[0])
    vmss.virtual_machine_profile.extension_profile.extensions[index] = ext
    poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
    LongRunningOperation(cmd.cli_ctx)(poller)
    _show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, False)
Example #8
0
def decrypt_vm(cmd,
               resource_group_name,
               vm_name,
               volume_type=None,
               force=False):
    from knack.util import CLIError

    compute_client = _compute_client_factory(cmd.cli_ctx)
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    has_new_ade, has_old_ade = _detect_ade_status(vm)
    if not has_new_ade and not has_old_ade:
        logger.warning('Azure Disk Encryption is not enabled')
        return
    is_linux = _is_linux_os(vm)
    # pylint: disable=no-member

    # 1. be nice, figure out the default volume type and also verify VM will not be busted
    if is_linux:
        if volume_type:
            if not force and volume_type != _DATA_VOLUME_TYPE:
                raise CLIError(
                    "Only Data disks can have encryption disabled in a Linux VM. "
                    "Use '--force' to ignore the warning")
        else:
            volume_type = _DATA_VOLUME_TYPE
    elif volume_type is None:
        volume_type = _ALL_VOLUME_TYPE

    extension = vm_extension_info['Linux' if is_linux else 'Windows']
    # sequence_version should be incremented since encryptions occurred before
    sequence_version = uuid.uuid4()

    # 2. update the disk encryption extension
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'VolumeType': volume_type,
        'EncryptionOperation': 'DisableEncryption',
        'SequenceVersion': sequence_version,
    }

    VirtualMachineExtension, DiskEncryptionSettings = cmd.get_models(
        'VirtualMachineExtension', 'DiskEncryptionSettings')

    ext = VirtualMachineExtension(
        location=vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        type_handler_version=extension['version']
        if has_new_ade else extension['legacy_version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    poller.result()
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError("Extension updating didn't succeed")

    if not has_new_ade:
        # 3. Remove the secret from VM's storage profile
        vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
        disk_encryption_settings = DiskEncryptionSettings(enabled=False)
        vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
        set_vm(cmd, vm)
def validate_create(cmd, namespace):
    check_extension_version(EXTENSION_NAME)

    # Check if VM exists and is not classic VM
    source_vm = _validate_and_get_vm(cmd, namespace.resource_group_name,
                                     namespace.vm_name)
    is_linux = _is_linux_os(source_vm)

    # Check repair vm name
    if namespace.repair_vm_name:
        _validate_vm_name(namespace.repair_vm_name, is_linux)
    else:
        namespace.repair_vm_name = ('repair-' + namespace.vm_name)[:14] + '_'

    # Check copy disk name
    timestamp = datetime.utcnow().strftime('%Y%m%d%H%M%S.%f')
    if namespace.copy_disk_name:
        _validate_disk_name(namespace.copy_disk_name)
    else:
        namespace.copy_disk_name = namespace.vm_name + '-DiskCopy-' + timestamp

    # Check copy resouce group name
    if namespace.repair_group_name:
        if namespace.repair_group_name == namespace.resource_group_name:
            raise CLIError(
                'The repair resource group name cannot be the same as the source VM resource group.'
            )
        _validate_resource_group_name(namespace.repair_group_name)
    else:
        namespace.repair_group_name = 'repair-' + namespace.vm_name + '-' + timestamp

    # Check encrypted disk
    encryption_type, _, _, _ = _fetch_encryption_settings(source_vm)
    # Currently only supporting single pass
    if encryption_type in (Encryption.SINGLE_WITH_KEK,
                           Encryption.SINGLE_WITHOUT_KEK):
        if not namespace.unlock_encrypted_vm:
            _prompt_encrypted_vm(namespace)
    elif encryption_type is Encryption.DUAL:
        logger.warning(
            'The source VM\'s OS disk is encrypted using dual pass method.')
        raise CLIError(
            'The current command does not support VMs which were encrypted using dual pass.'
        )
    else:
        logger.debug('The source VM\'s OS disk is not encrypted')

    if namespace.enable_nested:
        if is_linux:
            raise CLIError('Nested VM is not supported for Linux VM')

    # Validate Auth Params
    # Prompt vm username
    if not namespace.repair_username:
        _prompt_repair_username(namespace)
    # Validate vm username
    validate_vm_username(namespace.repair_username, is_linux)
    # Prompt vm password
    if not namespace.repair_password:
        _prompt_repair_password(namespace)
    # Validate vm password
    validate_vm_password(namespace.repair_password, is_linux)
    # Prompt input for public ip usage
    if not namespace.associate_public_ip:
        _prompt_public_ip(namespace)
def validate_run(cmd, namespace):
    check_extension_version(EXTENSION_NAME)

    # Set run_on_repair to True if repair_vm_id given
    if namespace.repair_vm_id:
        namespace.run_on_repair = True

    # Check run-id and custom run file parameters
    if not namespace.run_id and not namespace.custom_script_file:
        raise CLIError('Please specify the run id with --run-id.')
    if namespace.run_id and namespace.custom_script_file:
        raise CLIError(
            'Cannot continue with both the run-id and the custom-run-file. Please specify just one.'
        )
    # Check if VM exists and is not classic VM
    source_vm = _validate_and_get_vm(cmd, namespace.resource_group_name,
                                     namespace.vm_name)
    is_linux = _is_linux_os(source_vm)

    if namespace.custom_script_file:
        # Check if file extension is correct
        if is_linux and not (namespace.custom_script_file.endswith('.sh') or
                             namespace.custom_script_file.endswith('.bash')):
            raise CLIError(
                'Only .sh or .bash scripts are supported for repair run on a Linux VM.'
            )
        if not is_linux and not (
                namespace.custom_script_file.endswith('.ps1')
                or namespace.custom_script_file.endswith('.ps2')):
            raise CLIError(
                'Only PowerShell scripts are supported for repair run on a Windows VM.'
            )
        # Check if file exists
        import os.path
        if not os.path.isfile(namespace.custom_script_file):
            raise CLIError(
                'Custom script file cannot be found. Please check if the file exists.'
            )
        # Check for current custom-run-file parameter limitations
        if namespace.parameters:
            raise CLIError(
                'Parameter passing does not work for custom run files yet. Please remove --parameters arguments.'
            )
        with open(namespace.custom_script_file, 'r') as f:
            first_line = f.readline()
            if first_line.lower().startswith('param('):
                raise CLIError(
                    'Powershell param() statement does not work for custom script files yet. Please remove the param() line in the file.'
                )

        namespace.run_id = 'no-op'

    # Check if the script type matches the OS
    if not is_linux and namespace.run_id.startswith('linux'):
        raise CLIError(
            'Script IDs that start with \'linux\' are Linux Shell scripts. You cannot run Linux Shell scripts on a Windows VM.'
        )
    if is_linux and namespace.run_id.startswith('win'):
        raise CLIError(
            'Script IDs that start with \'win\' are Windows PowerShell scripts. You cannot run Windows PowerShell scripts on a Linux VM.'
        )

    # Fetch repair vm
    if namespace.run_on_repair and not namespace.repair_vm_id:
        fetch_repair_vm(namespace)

    # If not run_on_repair, repair_vm = source_vm. Scripts directly run on source VM.
    if not namespace.run_on_repair:
        namespace.repair_vm_id = source_vm.id

    if not is_valid_resource_id(namespace.repair_vm_id):
        raise CLIError('Repair resource id is not valid.')
Example #11
0
def encrypt_vmss(cmd, resource_group_name, vmss_name,  # pylint: disable=too-many-locals, too-many-statements
                 disk_encryption_keyvault,
                 key_encryption_keyvault=None,
                 key_encryption_key=None,
                 key_encryption_algorithm='RSA-OAEP',
                 volume_type=None,
                 force=False):
    from msrestazure.tools import parse_resource_id

    # pylint: disable=no-member
    UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
        'UpgradeMode', 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')

    compute_client = _compute_client_factory(cmd.cli_ctx)
    vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
    is_linux = _is_linux_os(vmss.virtual_machine_profile)
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    # 1. First validate arguments
    volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(cmd.cli_ctx,
                                                          (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault

    #  to avoid bad server errors, ensure the vault has the right configurations
    _verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vmss, force)

    # if key name and not key url, get url.
    if key_encryption_key and '://' not in key_encryption_key:
        key_encryption_key = _get_keyvault_key_url(
            cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)

    # 2. we are ready to provision/update the disk encryption extensions
    public_config = {
        'KeyVaultURL': disk_encryption_keyvault_url,
        'KeyEncryptionKeyURL': key_encryption_key or '',
        "KeyVaultResourceId": disk_encryption_keyvault,
        "KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
        'KeyEncryptionAlgorithm': key_encryption_algorithm if key_encryption_key else '',
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption'
    }

    ext = VirtualMachineScaleSetExtension(name=extension['name'],
                                          publisher=extension['publisher'],
                                          type=extension['name'],
                                          type_handler_version=extension['version'],
                                          settings=public_config,
                                          auto_upgrade_minor_version=True,
                                          force_update_tag=uuid.uuid4())
    exts = [ext]

    # remove any old ade extensions set by this command and add the new one.
    vmss_ext_profile = vmss.virtual_machine_profile.extension_profile
    if vmss_ext_profile and vmss_ext_profile.extensions:
        exts.extend(old_ext for old_ext in vmss.virtual_machine_profile.extension_profile.extensions
                    if old_ext.type != ext.type or old_ext.name != ext.name)
    vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=exts)

    poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
    LongRunningOperation(cmd.cli_ctx)(poller)
    _show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, True)
Example #12
0
def run(cmd,
        vm_name,
        resource_group_name,
        run_id=None,
        repair_vm_id=None,
        custom_script_file=None,
        parameters=None,
        run_on_repair=False):
    # begin progress reporting for long running operation
    cmd.cli_ctx.get_progress_controller().begin()
    cmd.cli_ctx.get_progress_controller().add(message='Running')
    # Function param for telemetry
    func_params = _get_function_param_dict(inspect.currentframe())
    # Start timer and params for custom telemetry
    start_time = timeit.default_timer()
    # Initialize return variables
    return_message = ''
    return_error_detail = ''
    return_status = ''

    # Overall success flag
    command_succeeded = False
    return_dict = {}
    try:
        source_vm = get_vm(cmd, resource_group_name, vm_name)

        # Build absoulte path of driver script
        loader = pkgutil.get_loader('azext_vm_repair')
        mod = loader.load_module('azext_vm_repair')
        rootpath = os.path.dirname(mod.__file__)
        is_linux = _is_linux_os(source_vm)
        if is_linux:
            run_script = os.path.join(rootpath, 'scripts',
                                      'linux-run-driver.sh')
            command_id = 'RunShellScript'
        else:
            run_script = os.path.join(rootpath, 'scripts',
                                      'win-run-driver.ps1')
            command_id = 'RunPowerShellScript'

        # If run_on_repair is False, then repair_vm is the source_vm (scripts run directly on source vm)
        repair_vm_id = parse_resource_id(repair_vm_id)
        repair_vm_name = repair_vm_id['name']
        repair_resource_group = repair_vm_id['resource_group']

        repair_run_command = 'az vm run-command invoke -g {rg} -n {vm} --command-id {command_id} ' \
                             '--scripts "@{run_script}"' \
                             .format(rg=repair_resource_group, vm=repair_vm_name, command_id=command_id, run_script=run_script)

        # Normal scenario with run id
        if not custom_script_file:
            # Fetch run path from GitHub
            repair_script_path = _fetch_run_script_path(run_id)
            repair_run_command += ' --parameters script_path="./{repair_script}"'.format(
                repair_script=repair_script_path)
        # Custom script scenario for script testers
        else:
            # no-op run id
            repair_run_command += ' "@{custom_file}" --parameters script_path=no-op'.format(
                custom_file=custom_script_file)
        # Append Parameters
        if parameters:
            if is_linux:
                param_string = _process_bash_parameters(parameters)
            else:
                param_string = _process_ps_parameters(parameters)
            # Work around for run-command bug, unexpected behavior with space characters
            param_string = param_string.replace(' ', '%20')
            repair_run_command += ' params="{}"'.format(param_string)
        if run_on_repair:
            vm_string = 'repair VM'
        else:
            vm_string = 'VM'
        logger.info('Running script on %s: %s', vm_string, repair_vm_name)
        script_start_time = timeit.default_timer()
        return_str = _call_az_command(repair_run_command)
        script_duration = timeit.default_timer() - script_start_time
        # Extract stdout and stderr, if stderr exists then possible error
        run_command_return = json.loads(return_str)

        if is_linux:
            run_command_message = run_command_return['value'][0][
                'message'].split('[stdout]')[1].split('[stderr]')
            stdout = run_command_message[0].strip('\n')
            stderr = run_command_message[1].strip('\n')
        else:
            stdout = run_command_return['value'][0]['message']
            stderr = run_command_return['value'][1]['message']

        run_script_succeeded = _check_script_succeeded(stdout)
        # Parse through logs to populate log properties: 'level', 'message'
        logs = _parse_run_script_raw_logs(stdout)

        # Process log-start and log-end
        # Log is cutoff at the start if over 4k bytes
        log_cutoff = True
        log_fullpath = ''
        for log in logs:
            if log['level'] == 'Log-Start':
                log_cutoff = False
            if log['level'] == 'Log-End':
                split_log = log['message'].split(']')
                if len(split_log) == 2:
                    log_fullpath = split_log[1]
        if log_cutoff:
            logger.warning(
                'Log file is too large and has been cutoff at the start of file. Please locate the log file within the %s using the logFullpath to check full logs.',
                vm_string)

        # Output 'output' or 'error' level logs depending on status
        if run_script_succeeded:
            script_status = STATUS_SUCCESS
            return_status = STATUS_SUCCESS
            message = 'Script completed without error.'
            output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'output'
            ])
            logger.info('\nScript returned with output:\n%s\n', output)
        else:
            script_status = STATUS_ERROR
            return_status = STATUS_ERROR
            message = 'Script returned with possible errors.'
            output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'error'
            ])
            logger.error('\nScript returned with error:\n%s\n', output)

        logger.debug("stderr: %s", stderr)
        return_message = message
        return_dict['status'] = return_status
        return_dict['message'] = message
        return_dict['logs'] = stdout
        return_dict['log_full_path'] = log_fullpath
        return_dict['output'] = output
        return_dict['vm_name'] = repair_vm_name
        return_dict['resouce_group'] = repair_resource_group
        command_succeeded = True
    except KeyboardInterrupt:
        return_error_detail = "Command interrupted by user input."
        return_message = "Repair run failed. Command interrupted by user input."
    except AzCommandError as azCommandError:
        return_error_detail = str(azCommandError)
        return_message = "Repair run failed."
    except requests.exceptions.RequestException as exception:
        return_error_detail = str(exception)
        return_message = "Failed to fetch run script data from GitHub. Please check this repository is reachable: https://github.com/Azure/repair-script-library"
    except RunScriptNotFoundForIdError as exception:
        return_error_detail = str(exception)
        return_message = "Repair run failed. Run ID not found."
    except Exception as exception:
        return_error_detail = str(exception)
        return_message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        # end long running op for process
        cmd.cli_ctx.get_progress_controller().end()

    if not command_succeeded:
        script_duration = ''
        output = 'Script returned with possible errors.'
        script_status = STATUS_ERROR
        return_status = STATUS_ERROR
        return_dict = _handle_command_error(return_error_detail,
                                            return_message)

    # Track telemetry data
    elapsed_time = timeit.default_timer() - start_time
    _track_run_command_telemetry('vm repair run', func_params, return_status,
                                 return_message,
                                 return_error_detail, elapsed_time,
                                 get_subscription_id(cmd.cli_ctx), return_dict,
                                 run_id, script_status, output,
                                 script_duration)
    return return_dict
Example #13
0
def show_vm_encryption_status(cmd, resource_group_name, vm_name):

    encryption_status = {
        'osDisk': 'NotEncrypted',
        'osDiskEncryptionSettings': None,
        'dataDisk': 'NotEncrypted',
        'osType': None
    }
    compute_client = _compute_client_factory(cmd.cli_ctx)
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name, 'instanceView')
    has_new_ade, has_old_ade = _detect_ade_status(vm)
    if not has_new_ade and not has_old_ade:
        logger.warning('Azure Disk Encryption is not enabled')
        return None
    if has_new_ade:
        return _show_vm_encryption_status_thru_new_ade(vm)
    is_linux = _is_linux_os(vm)

    # pylint: disable=no-member
    # The following logic was mostly ported from xplat-cli
    os_type = 'Linux' if is_linux else 'Windows'
    encryption_status['osType'] = os_type
    extension = vm_extension_info[os_type]
    extension_result = compute_client.virtual_machine_extensions.get(resource_group_name,
                                                                     vm_name,
                                                                     extension['name'],
                                                                     'instanceView')
    logger.debug(extension_result)
    if extension_result.instance_view.statuses:
        encryption_status['progressMessage'] = extension_result.instance_view.statuses[0].message

    substatus_message = None
    if getattr(extension_result.instance_view, 'substatuses', None):
        substatus_message = extension_result.instance_view.substatuses[0].message

    encryption_status['osDiskEncryptionSettings'] = vm.storage_profile.os_disk.encryption_settings

    import json
    if is_linux:
        try:
            message_object = json.loads(substatus_message)
        except Exception:  # pylint: disable=broad-except
            message_object = None  # might be from outdated extension

        if message_object and ('os' in message_object):
            encryption_status['osDisk'] = message_object['os']
        else:
            encryption_status['osDisk'] = 'Unknown'

        if message_object and 'data' in message_object:
            encryption_status['dataDisk'] = message_object['data']
        else:
            encryption_status['dataDisk'] = 'Unknown'
    else:
        # Windows - get os and data volume encryption state from the vm model
        if (encryption_status['osDiskEncryptionSettings'] and
                encryption_status['osDiskEncryptionSettings'].enabled and
                encryption_status['osDiskEncryptionSettings'].disk_encryption_key and
                encryption_status['osDiskEncryptionSettings'].disk_encryption_key.secret_url):
            encryption_status['osDisk'] = _STATUS_ENCRYPTED
        else:
            encryption_status['osDisk'] = 'Unknown'

        if extension_result.provisioning_state == 'Succeeded':
            volume_type = extension_result.settings.get('VolumeType', None)
            about_data_disk = not volume_type or volume_type.lower() != 'os'
            if about_data_disk and extension_result.settings.get('EncryptionOperation', None) == 'EnableEncryption':
                encryption_status['dataDisk'] = _STATUS_ENCRYPTED

    return encryption_status
Example #14
0
def encrypt_vmss(
        cmd,
        resource_group_name,
        vmss_name,  # pylint: disable=too-many-locals, too-many-statements
        disk_encryption_keyvault,
        key_encryption_keyvault=None,
        key_encryption_key=None,
        key_encryption_algorithm='RSA-OAEP',
        volume_type=None,
        force=False):
    from msrestazure.tools import parse_resource_id

    # pylint: disable=no-member
    UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
        'UpgradeMode', 'VirtualMachineScaleSetExtension',
        'VirtualMachineScaleSetExtensionProfile')

    compute_client = _compute_client_factory(cmd.cli_ctx)
    vmss = compute_client.virtual_machine_scale_sets.get(
        resource_group_name, vmss_name)
    is_linux = _is_linux_os(vmss.virtual_machine_profile)
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    # 1. First validate arguments
    volume_type = _handles_default_volume_type_for_vmss_encryption(
        is_linux, volume_type, force)

    # encryption is not supported on all linux distros, but service never tells you
    # so let us verify at the client side
    if is_linux:
        image_reference = getattr(vmss.virtual_machine_profile.storage_profile,
                                  'image_reference', None)
        if image_reference:
            result, message = _check_encrypt_is_supported(
                image_reference, volume_type)
            if not result:
                logger.warning(message)

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(
        cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
        if '://' not in key_encryption_key:  # appears a key name
            key_encryption_key = _get_keyvault_key_url(
                cmd.cli_ctx,
                (parse_resource_id(key_encryption_keyvault))['name'],
                key_encryption_key)

    #  to avoid bad server errors, ensure the vault has the right configurations
    _verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault,
                                         key_encryption_keyvault, vmss, force)

    # 2. we are ready to provision/update the disk encryption extensions
    public_config = {
        'KeyVaultURL':
        disk_encryption_keyvault_url,
        'KeyEncryptionKeyURL':
        key_encryption_key or '',
        "KeyVaultResourceId":
        disk_encryption_keyvault,
        "KekVaultResourceId":
        key_encryption_keyvault if key_encryption_key else '',
        'KeyEncryptionAlgorithm':
        key_encryption_algorithm if key_encryption_key else '',
        'VolumeType':
        volume_type,
        'EncryptionOperation':
        'EnableEncryption'
    }

    ext = VirtualMachineScaleSetExtension(
        name=extension['name'],
        publisher=extension['publisher'],
        type=extension['name'],
        type_handler_version=extension['version'],
        settings=public_config,
        auto_upgrade_minor_version=True,
        force_update_tag=uuid.uuid4())
    if not vmss.virtual_machine_profile.extension_profile:
        vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(
            extensions=[])
    vmss.virtual_machine_profile.extension_profile.extensions.append(ext)
    poller = compute_client.virtual_machine_scale_sets.create_or_update(
        resource_group_name, vmss_name, vmss)
    LongRunningOperation(cmd.cli_ctx)(poller)
    _show_post_action_message(resource_group_name, vmss.name,
                              vmss.upgrade_policy.mode == UpgradeMode.manual,
                              True)
Example #15
0
def encrypt_vm(cmd, resource_group_name, vm_name,  # pylint: disable=too-many-locals, too-many-statements
               disk_encryption_keyvault,
               aad_client_id=None,
               aad_client_secret=None, aad_client_cert_thumbprint=None,
               key_encryption_keyvault=None,
               key_encryption_key=None,
               key_encryption_algorithm='RSA-OAEP',
               volume_type=None,
               encrypt_format_all=False):
    from msrestazure.tools import parse_resource_id
    from knack.util import CLIError

    # pylint: disable=no-member
    compute_client = _compute_client_factory(cmd.cli_ctx)
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    is_linux = _is_linux_os(vm)
    backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
    vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False
    _, has_old_ade = _detect_ade_status(vm)
    use_new_ade = not aad_client_id and not has_old_ade
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    if not use_new_ade and not aad_client_id:
        raise CLIError('Please provide --aad-client-id')

    # 1. First validate arguments
    if not use_new_ade and not aad_client_cert_thumbprint and not aad_client_secret:
        raise CLIError('Please provide either --aad-client-cert-thumbprint or --aad-client-secret')

    if volume_type is None:
        if not is_linux:
            volume_type = _ALL_VOLUME_TYPE
        elif vm.storage_profile.data_disks:
            raise CLIError('VM has data disks, please supply --volume-type')
        else:
            volume_type = 'OS'

    # encryption is not supported on all linux distros, but service never tells you
    # so let us verify at the client side
    if is_linux:
        image_reference = getattr(vm.storage_profile, 'image_reference', None)
        if image_reference:
            result, message = _check_encrypt_is_supported(image_reference, volume_type)
            if not result:
                logger.warning(message)

    # sequence_version should be unique
    sequence_version = uuid.uuid4()

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(
        cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
        if '://' not in key_encryption_key:  # appears a key name
            key_encryption_key = _get_keyvault_key_url(
                cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)

    # 2. we are ready to provision/update the disk encryption extensions
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'KeyVaultURL': disk_encryption_keyvault_url,
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption' if not encrypt_format_all else 'EnableEncryptionFormatAll',
        'KeyEncryptionKeyURL': key_encryption_key,
        'KeyEncryptionAlgorithm': key_encryption_algorithm,
        'SequenceVersion': sequence_version,
    }
    if use_new_ade:
        public_config.update({
            "KeyVaultResourceId": disk_encryption_keyvault,
            "KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
        })
    else:
        public_config.update({
            'AADClientID': aad_client_id,
            'AADClientCertThumbprint': aad_client_cert_thumbprint,
        })

    ade_legacy_private_config = {
        'AADClientSecret': aad_client_secret if is_linux else (aad_client_secret or '')
    }

    VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \
        cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference',
                       'KeyVaultKeyReference', 'SubResource')

    ext = VirtualMachineExtension(
        location=vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        protected_settings=None if use_new_ade else ade_legacy_private_config,
        type_handler_version=extension['version'] if use_new_ade else extension['legacy_version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    poller.result()

    # verify the extension was ok
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError('Extension needed for disk encryption was not provisioned correctly')

    if not use_new_ade:
        if not (extension_result.instance_view.statuses and
                extension_result.instance_view.statuses[0].message):
            raise CLIError('Could not find url pointing to the secret for disk encryption')

        # 3. update VM's storage profile with the secrets
        status_url = extension_result.instance_view.statuses[0].message

        vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
        secret_ref = KeyVaultSecretReference(secret_url=status_url,
                                             source_vault=SubResource(id=disk_encryption_keyvault))

        key_encryption_key_obj = None
        if key_encryption_key:
            key_encryption_key_obj = KeyVaultKeyReference(key_url=key_encryption_key,
                                                          source_vault=SubResource(id=key_encryption_keyvault))

        disk_encryption_settings = DiskEncryptionSettings(disk_encryption_key=secret_ref,
                                                          key_encryption_key=key_encryption_key_obj,
                                                          enabled=True)
        if vm_encrypted:
            # stop the vm before update if the vm is already encrypted
            logger.warning("Deallocating the VM before updating encryption settings...")
            compute_client.virtual_machines.deallocate(resource_group_name, vm_name).result()
            vm = compute_client.virtual_machines.get(resource_group_name, vm_name)

        vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
        set_vm(cmd, vm)

        if vm_encrypted:
            # and start after the update
            logger.warning("Restarting the VM after the update...")
            compute_client.virtual_machines.start(resource_group_name, vm_name).result()

    if is_linux and volume_type != _DATA_VOLUME_TYPE:
        old_ade_msg = "If you see 'VMRestartPending', please restart the VM, and the encryption will finish shortly"
        logger.warning("The encryption request was accepted. Please use 'show' command to monitor "
                       "the progress. %s", "" if use_new_ade else old_ade_msg)
Example #16
0
def create(cmd,
           vm_name,
           resource_group_name,
           repair_password=None,
           repair_username=None,
           repair_vm_name=None,
           copy_disk_name=None,
           repair_group_name=None,
           unlock_encrypted_vm=False,
           enable_nested=False,
           associate_public_ip=False,
           distro='ubuntu'):
    # Init command helper object
    command = command_helper(logger, cmd, 'vm repair create')
    # Main command calling block
    try:
        # Fetch source VM data
        source_vm = get_vm(cmd, resource_group_name, vm_name)
        source_vm_instance_view = get_vm(cmd, resource_group_name, vm_name,
                                         'instanceView')

        is_linux = _is_linux_os(source_vm)
        is_gen2 = _is_gen2(source_vm_instance_view)

        target_disk_name = source_vm.storage_profile.os_disk.name
        is_managed = _uses_managed_disk(source_vm)
        copy_disk_id = None
        resource_tag = _get_repair_resource_tag(resource_group_name, vm_name)
        created_resources = []

        # Fetch OS image urn and set OS type for disk create
        if is_linux:
            # os_image_urn = "UbuntuLTS"
            os_type = 'Linux'
            hyperV_generation_linux = _check_linux_hyperV_gen(source_vm)
            if hyperV_generation_linux == 'V2':
                logger.info(
                    'Generation 2 VM detected, RHEL/Centos/Oracle 6 distros not available to be used for rescue VM '
                )
                logger.debug('gen2 machine detected')
                os_image_urn = _select_distro_linux_gen2(distro)
            else:
                os_image_urn = _select_distro_linux(distro)
        else:
            os_image_urn = _fetch_compatible_windows_os_urn(source_vm)
            os_type = 'Windows'

        # Set up base create vm command
        if is_linux:
            create_repair_vm_command = 'az vm create -g {g} -n {n} --tag {tag} --image {image} --admin-username {username} --admin-password {password} --public-ip-address {option} --custom-data {cloud_init_script}' \
                .format(g=repair_group_name, n=repair_vm_name, tag=resource_tag, image=os_image_urn, username=repair_username, password=repair_password, option=associate_public_ip, cloud_init_script=_get_cloud_init_script())
        else:
            create_repair_vm_command = 'az vm create -g {g} -n {n} --tag {tag} --image {image} --admin-username {username} --admin-password {password} --public-ip-address {option}' \
                .format(g=repair_group_name, n=repair_vm_name, tag=resource_tag, image=os_image_urn, username=repair_username, password=repair_password, option=associate_public_ip)

        # Fetch VM size of repair VM
        sku = _fetch_compatible_sku(source_vm, enable_nested)
        if not sku:
            raise SkuNotAvailableError(
                'Failed to find compatible VM size for source VM\'s OS disk within given region and subscription.'
            )
        create_repair_vm_command += ' --size {sku}'.format(sku=sku)

        # Set availability zone for vm
        if source_vm.zones:
            zone = source_vm.zones[0]
            create_repair_vm_command += ' --zone {zone}'.format(zone=zone)

        # Create new resource group
        create_resource_group_command = 'az group create -l {loc} -n {group_name}' \
                                        .format(loc=source_vm.location, group_name=repair_group_name)
        logger.info(
            'Creating resource group for repair VM and its resources...')
        _call_az_command(create_resource_group_command)

        # MANAGED DISK
        if is_managed:
            logger.info(
                'Source VM uses managed disks. Creating repair VM with managed disks.\n'
            )

            # Copy OS disk command
            disk_sku, location, os_type, hyperV_generation = _fetch_disk_info(
                resource_group_name, target_disk_name)
            copy_disk_command = 'az disk create -g {g} -n {n} --source {s} --sku {sku} --location {loc} --os-type {os_type} --query id -o tsv' \
                                .format(g=resource_group_name, n=copy_disk_name, s=target_disk_name, sku=disk_sku, loc=location, os_type=os_type)

            # Only add hyperV variable when available
            if hyperV_generation:
                copy_disk_command += ' --hyper-v-generation {hyperV}'.format(
                    hyperV=hyperV_generation)
            elif is_linux and hyperV_generation_linux == 'V2':
                logger.info(
                    'The disk did not contian the info of gen2 , but the machine is created from gen2 image'
                )
                copy_disk_command += ' --hyper-v-generation {hyperV}'.format(
                    hyperV=hyperV_generation_linux)
            # Set availability zone for vm when available
            if source_vm.zones:
                zone = source_vm.zones[0]
                copy_disk_command += ' --zone {zone}'.format(zone=zone)
            # Copy OS Disk
            logger.info('Copying OS disk of source VM...')
            copy_disk_id = _call_az_command(copy_disk_command).strip('\n')

            # Create VM according to the two conditions: is_linux, unlock_encrypted_vm
            # Only in the case of a Linux VM without encryption the data-disk gets attached after VM creation.
            # This is required to prevent an incorrect boot due to an UUID mismatch
            if not is_linux:
                # windows
                _create_repair_vm(copy_disk_id, create_repair_vm_command,
                                  repair_password, repair_username)

            if not is_linux and unlock_encrypted_vm:
                # windows with encryption
                _create_repair_vm(copy_disk_id, create_repair_vm_command,
                                  repair_password, repair_username)
                _unlock_encrypted_vm_run(repair_vm_name, repair_group_name,
                                         is_linux)

            if is_linux and unlock_encrypted_vm:
                # linux with encryption
                _create_repair_vm(copy_disk_id, create_repair_vm_command,
                                  repair_password, repair_username)
                _unlock_encrypted_vm_run(repair_vm_name, repair_group_name,
                                         is_linux)

            if is_linux and (not unlock_encrypted_vm):
                # linux without encryption
                _create_repair_vm(copy_disk_id,
                                  create_repair_vm_command,
                                  repair_password,
                                  repair_username,
                                  fix_uuid=True)
                logger.info(
                    'Attaching copied disk to repair VM as data disk...')
                attach_disk_command = "az vm disk attach -g {g} --name {disk_id} --vm-name {vm_name} ".format(
                    g=repair_group_name,
                    disk_id=copy_disk_id,
                    vm_name=repair_vm_name)
                _call_az_command(attach_disk_command)

        # UNMANAGED DISK
        else:
            logger.info(
                'Source VM uses unmanaged disks. Creating repair VM with unmanaged disks.\n'
            )
            os_disk_uri = source_vm.storage_profile.os_disk.vhd.uri
            copy_disk_name = copy_disk_name + '.vhd'
            storage_account = StorageResourceIdentifier(
                cmd.cli_ctx.cloud, os_disk_uri)
            # Validate create vm create command to validate parameters before runnning copy disk commands
            validate_create_vm_command = create_repair_vm_command + ' --validate'
            logger.info('Validating VM template before continuing...')
            _call_az_command(validate_create_vm_command,
                             secure_params=[repair_password, repair_username])

            # get storage account connection string
            get_connection_string_command = 'az storage account show-connection-string -g {g} -n {n} --query connectionString -o tsv' \
                                            .format(g=resource_group_name, n=storage_account.account_name)
            logger.debug('Fetching storage account connection string...')
            connection_string = _call_az_command(
                get_connection_string_command).strip('\n')

            # Create Snapshot of Unmanaged Disk
            make_snapshot_command = 'az storage blob snapshot -c {c} -n {n} --connection-string "{con_string}" --query snapshot -o tsv' \
                                    .format(c=storage_account.container, n=storage_account.blob, con_string=connection_string)
            logger.info('Creating snapshot of OS disk...')
            snapshot_timestamp = _call_az_command(make_snapshot_command,
                                                  secure_params=[
                                                      connection_string
                                                  ]).strip('\n')
            snapshot_uri = os_disk_uri + '?snapshot={timestamp}'.format(
                timestamp=snapshot_timestamp)

            # Copy Snapshot into unmanaged Disk
            copy_snapshot_command = 'az storage blob copy start -c {c} -b {name} --source-uri {source} --connection-string "{con_string}"' \
                                    .format(c=storage_account.container, name=copy_disk_name, source=snapshot_uri, con_string=connection_string)
            logger.info('Creating a copy disk from the snapshot...')
            _call_az_command(copy_snapshot_command,
                             secure_params=[connection_string])
            # Generate the copied disk uri
            copy_disk_id = os_disk_uri.rstrip(
                storage_account.blob) + copy_disk_name

            # Create new repair VM with copied ummanaged disk command
            create_repair_vm_command = create_repair_vm_command + ' --use-unmanaged-disk'
            logger.info('Creating repair VM while disk copy is in progress...')
            _call_az_command(create_repair_vm_command,
                             secure_params=[repair_password, repair_username])

            logger.info('Checking if disk copy is done...')
            copy_check_command = 'az storage blob show -c {c} -n {name} --connection-string "{con_string}" --query properties.copy.status -o tsv' \
                                 .format(c=storage_account.container, name=copy_disk_name, con_string=connection_string)
            copy_result = _call_az_command(copy_check_command,
                                           secure_params=[connection_string
                                                          ]).strip('\n')
            if copy_result != 'success':
                raise UnmanagedDiskCopyError('Unmanaged disk copy failed.')

            # Attach copied unmanaged disk to new vm
            logger.info('Attaching copied disk to repair VM as data disk...')
            attach_disk_command = "az vm unmanaged-disk attach -g {g} -n {disk_name} --vm-name {vm_name} --vhd-uri {uri}" \
                                  .format(g=repair_group_name, disk_name=copy_disk_name, vm_name=repair_vm_name, uri=copy_disk_id)
            _call_az_command(attach_disk_command)

        # invoke enable-NestedHyperV.ps1 again to attach Disk to Nested
        if enable_nested:
            logger.info(
                "Running Script win-enable-nested-hyperv.ps1 to install HyperV"
            )

            run_hyperv_command = "az vm repair run -g {g} -n {name} --run-id win-enable-nested-hyperv --parameters gen={gen}" \
                .format(g=repair_group_name, name=repair_vm_name, gen=is_gen2)
            ret_enable_nested = _call_az_command(run_hyperv_command)

            logger.debug("az vm repair run hyperv command returned: %s",
                         ret_enable_nested)

            if str.find(ret_enable_nested, "SuccessRestartRequired") > -1:
                restart_cmd = 'az vm restart -g {rg} -n {vm}'.format(
                    rg=repair_group_name, vm=repair_vm_name)
                logger.info("Restarting Repair VM")
                restart_ret = _call_az_command(restart_cmd)
                logger.debug(restart_ret)

                # invoking hyperv script again
                logger.info(
                    "Running win-enable-nested-hyperv.ps1 again to create nested VM"
                )
                run_hyperv_command = "az vm repair run -g {g} -n {name} --run-id win-enable-nested-hyperv" \
                    .format(g=repair_group_name, name=repair_vm_name)
                ret_enable_nested_again = _call_az_command(run_hyperv_command)

                logger.debug("stderr: %s", ret_enable_nested_again)

        created_resources = _list_resource_ids_in_rg(repair_group_name)
        command.set_status_success()

    # Some error happened. Stop command and clean-up resources.
    except KeyboardInterrupt:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = "Command interrupted by user input."
        command.message = "Command interrupted by user input. Cleaning up resources."
    except AzCommandError as azCommandError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(azCommandError)
        command.message = "Repair create failed. Cleaning up created resources."
    except SkuDoesNotSupportHyperV as skuDoesNotSupportHyperV:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(skuDoesNotSupportHyperV)
        command.message = "v2 sku does not support nested VM in hyperv. Please run command without --enabled-nested."
    except ScriptReturnsError as scriptReturnsError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(scriptReturnsError)
        command.message = "Error returned from script when enabling hyperv."
    except SkuNotAvailableError as skuNotAvailableError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(skuNotAvailableError)
        command.message = "Please check if the current subscription can create more VM resources. Cleaning up created resources."
    except UnmanagedDiskCopyError as unmanagedDiskCopyError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(unmanagedDiskCopyError)
        command.message = "Repair create failed. Please try again at another time. Cleaning up created resources."
    except WindowsOsNotAvailableError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = 'Compatible Windows OS image not available.'
        command.message = 'A compatible Windows OS image is not available at this time, please check subscription.'
    except Exception as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'

    finally:
        if command.error_stack_trace:
            logger.debug(command.error_stack_trace)
    # Generate return results depending on command state
    if not command.is_status_success():
        command.set_status_error()
        return_dict = command.init_return_dict()
        _clean_up_resources(repair_group_name, confirm=False)
    else:
        created_resources.append(copy_disk_id)
        command.message = 'Your repair VM \'{n}\' has been created in the resource group \'{repair_rg}\' with disk \'{d}\' attached as data disk. ' \
                          'Please use this VM to troubleshoot and repair. Once the repairs are complete use the command ' \
                          '\'az vm repair restore -n {source_vm} -g {rg} --verbose\' to restore disk to the source VM. ' \
                          'Note that the copied disk is created within the original resource group \'{rg}\'.' \
                          .format(n=repair_vm_name, repair_rg=repair_group_name, d=copy_disk_name, rg=resource_group_name, source_vm=vm_name)
        return_dict = command.init_return_dict()
        # Add additional custom return properties
        return_dict['repair_vm_name'] = repair_vm_name
        return_dict['copied_disk_name'] = copy_disk_name
        return_dict['copied_disk_uri'] = copy_disk_id
        return_dict['repair_resource_group'] = repair_group_name
        return_dict['resource_tag'] = resource_tag
        return_dict['created_resources'] = created_resources

        logger.info('\n%s\n', command.message)
    return return_dict
Example #17
0
def encrypt_vmss(cmd, resource_group_name, vmss_name,  # pylint: disable=too-many-locals, too-many-statements
                 disk_encryption_keyvault,
                 key_encryption_keyvault=None,
                 key_encryption_key=None,
                 key_encryption_algorithm='RSA-OAEP',
                 volume_type=None,
                 force=False):
    from msrestazure.tools import parse_resource_id

    # pylint: disable=no-member
    UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
        'UpgradeMode', 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')

    compute_client = _compute_client_factory(cmd.cli_ctx)
    vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
    is_linux = _is_linux_os(vmss.virtual_machine_profile)
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    # 1. First validate arguments
    volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)

    # encryption is not supported on all linux distros, but service never tells you
    # so let us verify at the client side
    if is_linux:
        image_reference = getattr(vmss.virtual_machine_profile.storage_profile, 'image_reference', None)
        if image_reference:
            result, message = _check_encrypt_is_supported(image_reference, volume_type)
            if not result:
                logger.warning(message)

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(cmd.cli_ctx,
                                                          (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
        if '://' not in key_encryption_key:  # appears a key name
            key_encryption_key = _get_keyvault_key_url(
                cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)

    #  to avoid bad server errors, ensure the vault has the right configurations
    _verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vmss, force)

    # 2. we are ready to provision/update the disk encryption extensions
    public_config = {
        'KeyVaultURL': disk_encryption_keyvault_url,
        'KeyEncryptionKeyURL': key_encryption_key or '',
        "KeyVaultResourceId": disk_encryption_keyvault,
        "KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
        'KeyEncryptionAlgorithm': key_encryption_algorithm if key_encryption_key else '',
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption'
    }

    ext = VirtualMachineScaleSetExtension(name=extension['name'],
                                          publisher=extension['publisher'],
                                          type=extension['name'],
                                          type_handler_version=extension['version'],
                                          settings=public_config,
                                          auto_upgrade_minor_version=True,
                                          force_update_tag=uuid.uuid4())
    if not vmss.virtual_machine_profile.extension_profile:
        vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[])
    vmss.virtual_machine_profile.extension_profile.extensions.append(ext)
    poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
    LongRunningOperation(cmd.cli_ctx)(poller)
    _show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, True)
Example #18
0
def run(cmd,
        vm_name,
        resource_group_name,
        run_id=None,
        repair_vm_id=None,
        custom_script_file=None,
        parameters=None,
        run_on_repair=False,
        preview=None):

    # Init command helper object
    command = command_helper(logger, cmd, 'vm repair run')
    LINUX_RUN_SCRIPT_NAME = 'linux-run-driver.sh'
    WINDOWS_RUN_SCRIPT_NAME = 'win-run-driver.ps1'
    if preview:
        _set_repair_map_url(preview)

    try:
        # Fetch VM data
        source_vm = get_vm(cmd, resource_group_name, vm_name)
        is_linux = _is_linux_os(source_vm)

        if is_linux:
            script_name = LINUX_RUN_SCRIPT_NAME
        else:
            script_name = WINDOWS_RUN_SCRIPT_NAME

        # If run_on_repair is False, then repair_vm is the source_vm (scripts run directly on source vm)
        repair_vm_id = parse_resource_id(repair_vm_id)
        repair_vm_name = repair_vm_id['name']
        repair_resource_group = repair_vm_id['resource_group']

        run_command_params = []
        additional_scripts = []

        # Normal scenario with run id
        if not custom_script_file:
            # Fetch run path from GitHub
            repair_script_path = _fetch_run_script_path(run_id)
            run_command_params.append(
                'script_path="./{}"'.format(repair_script_path))

        # Custom script scenario for script testers
        else:
            run_command_params.append('script_path=no-op')
            additional_scripts.append(custom_script_file)

        # Append Parameters
        if parameters:
            if is_linux:
                param_string = _process_bash_parameters(parameters)
            else:
                param_string = _process_ps_parameters(parameters)
            run_command_params.append('params="{}"'.format(param_string))
        if run_on_repair:
            vm_string = 'repair VM'
        else:
            vm_string = 'VM'
        logger.info('Running script on %s: %s', vm_string, repair_vm_name)

        # Run script and measure script run-time
        script_start_time = timeit.default_timer()
        stdout, stderr = _invoke_run_command(script_name, repair_vm_name,
                                             repair_resource_group, is_linux,
                                             run_command_params,
                                             additional_scripts)
        command.script.run_time = timeit.default_timer() - script_start_time
        logger.debug("stderr: %s", stderr)

        # Parse through stdout to populate log properties: 'level', 'message'
        run_script_succeeded = _check_script_succeeded(stdout)
        logs = _parse_run_script_raw_logs(stdout)

        # Process log-start and log-end
        # Log is cutoff at the start if over 4k bytes
        log_cutoff = True
        log_fullpath = ''
        for log in logs:
            if log['level'] == 'Log-Start':
                log_cutoff = False
            if log['level'] == 'Log-End':
                split_log = log['message'].split(']')
                if len(split_log) == 2:
                    log_fullpath = split_log[1]
        if log_cutoff:
            logger.warning(
                'Log file is too large and has been cutoff at the start of file. Please locate the log file within the %s using the logFullpath to check full logs.',
                vm_string)

        # Output 'output' or 'error' level logs depending on status
        if run_script_succeeded:
            command.script.set_status_success()
            command.message = 'Script completed succesfully.'
            command.script.output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'output'
            ])
            logger.info('\nScript returned with output:\n%s\n',
                        command.script.output)
        else:
            command.script.set_status_error()
            command.message = 'Script completed with errors.'
            command.script.output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'error'
            ])
            logger.error('\nScript returned with error:\n%s\n',
                         command.script.output)

        command.set_status_success()
    except KeyboardInterrupt:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = "Command interrupted by user input."
        command.message = "Repair run failed. Command interrupted by user input."
    except AzCommandError as azCommandError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(azCommandError)
        command.message = "Repair run failed."
    except requests.exceptions.RequestException as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = "Failed to fetch run script data from GitHub. Please check this repository is reachable: https://github.com/Azure/repair-script-library"
    except RunScriptNotFoundForIdError as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = "Repair run failed. Run ID not found."
    except Exception as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        if command.error_stack_trace:
            logger.debug(command.error_stack_trace)

    if not command.is_status_success():
        command.set_status_error()
        command.script.output = 'Repair run failed.'
        return_dict = command.init_return_dict()
    else:
        # Build return Dict
        return_dict = command.init_return_dict()
        return_dict['script_status'] = command.script.status
        return_dict['logs'] = stdout
        return_dict['log_full_path'] = log_fullpath
        return_dict['output'] = command.script.output
        return_dict['vm_name'] = repair_vm_name
        return_dict['resource_group'] = repair_resource_group

    return return_dict
Example #19
0
def show_vm_encryption_status(cmd, resource_group_name, vm_name):

    encryption_status = {
        'osDisk': 'NotEncrypted',
        'osDiskEncryptionSettings': None,
        'dataDisk': 'NotEncrypted',
        'osType': None
    }
    compute_client = _compute_client_factory(cmd.cli_ctx)
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name,
                                             'instanceView')
    has_new_ade, has_old_ade = _detect_ade_status(vm)
    if not has_new_ade and not has_old_ade:
        logger.warning('Azure Disk Encryption is not enabled')
        return None
    if has_new_ade:
        return _show_vm_encryption_status_thru_new_ade(vm)
    is_linux = _is_linux_os(vm)

    # pylint: disable=no-member
    # The following logic was mostly ported from xplat-cli
    os_type = 'Linux' if is_linux else 'Windows'
    encryption_status['osType'] = os_type
    extension = vm_extension_info[os_type]
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    logger.debug(extension_result)
    if extension_result.instance_view.statuses:
        encryption_status[
            'progressMessage'] = extension_result.instance_view.statuses[
                0].message

    substatus_message = None
    if getattr(extension_result.instance_view, 'substatuses', None):
        substatus_message = extension_result.instance_view.substatuses[
            0].message

    encryption_status[
        'osDiskEncryptionSettings'] = vm.storage_profile.os_disk.encryption_settings

    import json
    if is_linux:
        try:
            message_object = json.loads(substatus_message)
        except Exception:  # pylint: disable=broad-except
            message_object = None  # might be from outdated extension

        if message_object and ('os' in message_object):
            encryption_status['osDisk'] = message_object['os']
        else:
            encryption_status['osDisk'] = 'Unknown'

        if message_object and 'data' in message_object:
            encryption_status['dataDisk'] = message_object['data']
        else:
            encryption_status['dataDisk'] = 'Unknown'
    else:
        # Windows - get os and data volume encryption state from the vm model
        if (encryption_status['osDiskEncryptionSettings']
                and encryption_status['osDiskEncryptionSettings'].enabled
                and encryption_status['osDiskEncryptionSettings'].
                disk_encryption_key
                and encryption_status['osDiskEncryptionSettings'].
                disk_encryption_key.secret_url):
            encryption_status['osDisk'] = _STATUS_ENCRYPTED
        else:
            encryption_status['osDisk'] = 'Unknown'

        if extension_result.provisioning_state == 'Succeeded':
            volume_type = extension_result.settings.get('VolumeType', None)
            about_data_disk = not volume_type or volume_type.lower() != 'os'
            if about_data_disk and extension_result.settings.get(
                    'EncryptionOperation', None) == 'EnableEncryption':
                encryption_status['dataDisk'] = _STATUS_ENCRYPTED

    return encryption_status
Example #20
0
def create(cmd,
           vm_name,
           resource_group_name,
           repair_password=None,
           repair_username=None,
           repair_vm_name=None,
           copy_disk_name=None,
           repair_group_name=None):
    # begin progress reporting for long running operation
    cmd.cli_ctx.get_progress_controller().begin()
    cmd.cli_ctx.get_progress_controller().add(message='Running')
    # Function param for telemetry
    func_params = _get_function_param_dict(inspect.currentframe())
    # Start timer for custom telemetry
    start_time = timeit.default_timer()
    # Initialize return variables
    return_message = ''
    return_error_detail = ''
    return_status = ''

    source_vm = get_vm(cmd, resource_group_name, vm_name)
    is_linux = _is_linux_os(source_vm)
    target_disk_name = source_vm.storage_profile.os_disk.name
    is_managed = _uses_managed_disk(source_vm)

    copy_disk_id = None
    resource_tag = _get_repair_resource_tag(resource_group_name, vm_name)

    # Overall success flag
    command_succeeded = False
    # List of created resouces
    created_resources = []

    # Main command calling block
    try:
        # Fetch OS image urn
        if is_linux:
            os_image_urn = "UbuntuLTS"
        else:
            os_image_urn = _fetch_compatible_windows_os_urn(source_vm)

        # Set up base create vm command
        create_repair_vm_command = 'az vm create -g {g} -n {n} --tag {tag} --image {image} --admin-password {password}' \
                                   .format(g=repair_group_name, n=repair_vm_name, tag=resource_tag, image=os_image_urn, password=repair_password)
        # Add username field only for Windows
        if not is_linux:
            create_repair_vm_command += ' --admin-username {username}'.format(
                username=repair_username)
        # fetch VM size of repair VM
        sku = _fetch_compatible_sku(source_vm)
        if not sku:
            raise SkuNotAvailableError(
                'Failed to find compatible VM size for source VM\'s OS disk within given region and subscription.'
            )
        create_repair_vm_command += ' --size {sku}'.format(sku=sku)

        # Create new resource group
        create_resource_group_command = 'az group create -l {loc} -n {group_name}' \
                                        .format(loc=source_vm.location, group_name=repair_group_name)
        logger.info(
            'Creating resource group for repair VM and its resources...')
        _call_az_command(create_resource_group_command)

        # MANAGED DISK
        if is_managed:
            logger.info(
                'Source VM uses managed disks. Creating repair VM with managed disks.\n'
            )
            # Copy OS disk command
            disk_sku = source_vm.storage_profile.os_disk.managed_disk.storage_account_type
            copy_disk_command = 'az disk create -g {g} -n {n} --source {s} --sku {sku} --location {loc} --query id -o tsv' \
                                .format(g=resource_group_name, n=copy_disk_name, s=target_disk_name, sku=disk_sku, loc=source_vm.location)
            # Validate create vm create command to validate parameters before runnning copy disk command
            validate_create_vm_command = create_repair_vm_command + ' --validate'

            logger.info('Validating VM template before continuing...')
            _call_az_command(validate_create_vm_command,
                             secure_params=[repair_password])
            logger.info('Copying OS disk of source VM...')
            copy_disk_id = _call_az_command(copy_disk_command).strip('\n')

            attach_disk_command = 'az vm disk attach -g {g} --vm-name {repair} --name {id}' \
                                  .format(g=repair_group_name, repair=repair_vm_name, id=copy_disk_id)

            logger.info('Creating repair VM...')
            _call_az_command(create_repair_vm_command,
                             secure_params=[repair_password])
            logger.info('Attaching copied disk to repair VM...')
            _call_az_command(attach_disk_command)
        # UNMANAGED DISK
        else:
            logger.info(
                'Source VM uses unmanaged disks. Creating repair VM with unmanaged disks.\n'
            )
            os_disk_uri = source_vm.storage_profile.os_disk.vhd.uri
            copy_disk_name = copy_disk_name + '.vhd'
            storage_account = StorageResourceIdentifier(
                cmd.cli_ctx.cloud, os_disk_uri)
            # Validate create vm create command to validate parameters before runnning copy disk commands
            validate_create_vm_command = create_repair_vm_command + ' --validate'
            logger.info('Validating VM template before continuing...')
            _call_az_command(validate_create_vm_command,
                             secure_params=[repair_password])

            # get storage account connection string
            get_connection_string_command = 'az storage account show-connection-string -g {g} -n {n} --query connectionString -o tsv' \
                                            .format(g=resource_group_name, n=storage_account.account_name)
            logger.debug('Fetching storage account connection string...')
            connection_string = _call_az_command(
                get_connection_string_command).strip('\n')

            # Create Snapshot of Unmanaged Disk
            make_snapshot_command = 'az storage blob snapshot -c {c} -n {n} --connection-string "{con_string}" --query snapshot -o tsv' \
                                    .format(c=storage_account.container, n=storage_account.blob, con_string=connection_string)
            logger.info('Creating snapshot of OS disk...')
            snapshot_timestamp = _call_az_command(make_snapshot_command,
                                                  secure_params=[
                                                      connection_string
                                                  ]).strip('\n')
            snapshot_uri = os_disk_uri + '?snapshot={timestamp}'.format(
                timestamp=snapshot_timestamp)

            # Copy Snapshot into unmanaged Disk
            copy_snapshot_command = 'az storage blob copy start -c {c} -b {name} --source-uri {source} --connection-string "{con_string}"' \
                                    .format(c=storage_account.container, name=copy_disk_name, source=snapshot_uri, con_string=connection_string)
            logger.info('Creating a copy disk from the snapshot...')
            _call_az_command(copy_snapshot_command,
                             secure_params=[connection_string])
            # Generate the copied disk uri
            copy_disk_id = os_disk_uri.rstrip(
                storage_account.blob) + copy_disk_name

            # Create new repair VM with copied ummanaged disk command
            create_repair_vm_command = create_repair_vm_command + ' --use-unmanaged-disk'
            logger.info('Creating repair VM while disk copy is in progress...')
            _call_az_command(create_repair_vm_command,
                             secure_params=[repair_password])

            logger.info('Checking if disk copy is done...')
            copy_check_command = 'az storage blob show -c {c} -n {name} --connection-string "{con_string}" --query properties.copy.status -o tsv' \
                                 .format(c=storage_account.container, name=copy_disk_name, con_string=connection_string)
            copy_result = _call_az_command(copy_check_command,
                                           secure_params=[connection_string
                                                          ]).strip('\n')
            if copy_result != 'success':
                raise UnmanagedDiskCopyError('Unmanaged disk copy failed.')

            # Attach copied unmanaged disk to new vm
            logger.info('Attaching copied disk to repair VM as data disk...')
            attach_disk_command = "az vm unmanaged-disk attach -g {g} -n {disk_name} --vm-name {vm_name} --vhd-uri {uri}" \
                                  .format(g=repair_group_name, disk_name=copy_disk_name, vm_name=repair_vm_name, uri=copy_disk_id)
            _call_az_command(attach_disk_command)

        command_succeeded = True
        created_resources = _list_resource_ids_in_rg(repair_group_name)

    # Some error happened. Stop command and clean-up resources.
    except KeyboardInterrupt:
        return_error_detail = "Command interrupted by user input."
        return_message = "Command interrupted by user input. Cleaning up resources."
    except AzCommandError as azCommandError:
        return_error_detail = str(azCommandError)
        return_message = "Repair create failed. Cleaning up created resources."
    except SkuNotAvailableError as skuNotAvailableError:
        return_error_detail = str(skuNotAvailableError)
        return_message = "Please check if the current subscription can create more VM resources. Cleaning up created resources."
    except UnmanagedDiskCopyError as unmanagedDiskCopyError:
        return_error_detail = str(unmanagedDiskCopyError)
        return_message = "Repair create failed. Please try again at another time. Cleaning up created resources."
    except WindowsOsNotAvailableError:
        return_error_detail = 'Compatible Windows OS image not available.'
        return_message = 'A compatible Windows OS image is not available at this time, please check subscription.'
    except Exception as exception:
        return_error_detail = str(exception)
        return_message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        # end long running op for process
        cmd.cli_ctx.get_progress_controller().end()

    # Command failed block. Output right error message and return dict
    if not command_succeeded:
        return_status = STATUS_ERROR
        return_dict = _handle_command_error(return_error_detail,
                                            return_message)
        _clean_up_resources(repair_group_name, confirm=False)
    else:
        # Construct return dict
        return_status = STATUS_SUCCESS
        created_resources.append(copy_disk_id)
        return_dict = {}
        return_dict['status'] = return_status
        return_dict['message'] = 'Your repair VM \'{n}\' has been created in the resource group \'{repair_rg}\' with disk \'{d}\' attached as data disk. ' \
                                 'Please use this VM to troubleshoot and repair. Once the repairs are complete use the command ' \
                                 '\'az vm repair restore -n {source_vm} -g {rg} --verbose\' to restore disk to the source VM. ' \
                                 'Note that the copied disk is created within the original resource group \'{rg}\'.' \
                                 .format(n=repair_vm_name, repair_rg=repair_group_name, d=copy_disk_name, rg=resource_group_name, source_vm=vm_name)
        return_dict['repair_vm_name'] = repair_vm_name
        return_dict['copied_disk_name'] = copy_disk_name
        return_dict['copied_disk_uri'] = copy_disk_id
        return_dict['repair_resouce_group'] = repair_group_name
        return_dict['resource_tag'] = resource_tag
        return_dict['created_resources'] = created_resources

        logger.info('\n%s\n', return_dict['message'])

    # Track telemetry data
    elapsed_time = timeit.default_timer() - start_time
    _track_command_telemetry('vm repair create', func_params, return_status,
                             return_message, return_error_detail, elapsed_time,
                             get_subscription_id(cmd.cli_ctx), return_dict)
    return return_dict
Example #21
0
def run(cmd,
        vm_name,
        resource_group_name,
        run_id=None,
        repair_vm_id=None,
        custom_script_file=None,
        parameters=None,
        run_on_repair=False):

    # Init command helper object
    command = command_helper(logger, cmd, 'vm repair run')

    REPAIR_DIR_NAME = 'azext_vm_repair'
    SCRIPTS_DIR_NAME = 'scripts'
    LINUX_RUN_SCRIPT_NAME = 'linux-run-driver.sh'
    WINDOWS_RUN_SCRIPT_NAME = 'win-run-driver.ps1'
    RUN_COMMAND_RUN_SHELL_ID = 'RunShellScript'
    RUN_COMMAND_RUN_PS_ID = 'RunPowerShellScript'

    try:
        # Fetch VM data
        source_vm = get_vm(cmd, resource_group_name, vm_name)

        # Build absoulte path of driver script
        loader = pkgutil.get_loader(REPAIR_DIR_NAME)
        mod = loader.load_module(REPAIR_DIR_NAME)
        rootpath = os.path.dirname(mod.__file__)
        is_linux = _is_linux_os(source_vm)
        if is_linux:
            run_script = os.path.join(rootpath, SCRIPTS_DIR_NAME,
                                      LINUX_RUN_SCRIPT_NAME)
            command_id = RUN_COMMAND_RUN_SHELL_ID
        else:
            run_script = os.path.join(rootpath, SCRIPTS_DIR_NAME,
                                      WINDOWS_RUN_SCRIPT_NAME)
            command_id = RUN_COMMAND_RUN_PS_ID

        # If run_on_repair is False, then repair_vm is the source_vm (scripts run directly on source vm)
        repair_vm_id = parse_resource_id(repair_vm_id)
        repair_vm_name = repair_vm_id['name']
        repair_resource_group = repair_vm_id['resource_group']

        repair_run_command = 'az vm run-command invoke -g {rg} -n {vm} --command-id {command_id} ' \
                             '--scripts "@{run_script}" -o json' \
                             .format(rg=repair_resource_group, vm=repair_vm_name, command_id=command_id, run_script=run_script)

        # Normal scenario with run id
        if not custom_script_file:
            # Fetch run path from GitHub
            repair_script_path = _fetch_run_script_path(run_id)
            repair_run_command += ' --parameters script_path="./{repair_script}"'.format(
                repair_script=repair_script_path)
        # Custom script scenario for script testers
        else:
            # no-op run id
            repair_run_command += ' "@{custom_file}" --parameters script_path=no-op'.format(
                custom_file=custom_script_file)

        # Append Parameters
        if parameters:
            if is_linux:
                param_string = _process_bash_parameters(parameters)
            else:
                param_string = _process_ps_parameters(parameters)
            # Work around for run-command bug, unexpected behavior with space characters
            param_string = param_string.replace(' ', '%20')
            repair_run_command += ' params="{}"'.format(param_string)
        if run_on_repair:
            vm_string = 'repair VM'
        else:
            vm_string = 'VM'
        logger.info('Running script on %s: %s', vm_string, repair_vm_name)

        # Run script and measure script run-time
        script_start_time = timeit.default_timer()
        return_str = _call_az_command(repair_run_command)
        command.script.run_time = timeit.default_timer() - script_start_time

        # Extract stdout and stderr, if stderr exists then possible error
        run_command_return = json.loads(return_str)
        if is_linux:
            run_command_message = run_command_return['value'][0][
                'message'].split('[stdout]')[1].split('[stderr]')
            stdout = run_command_message[0].strip('\n')
            stderr = run_command_message[1].strip('\n')
        else:
            stdout = run_command_return['value'][0]['message']
            stderr = run_command_return['value'][1]['message']
        logger.debug("stderr: %s", stderr)

        # Parse through stdout to populate log properties: 'level', 'message'
        run_script_succeeded = _check_script_succeeded(stdout)
        logs = _parse_run_script_raw_logs(stdout)

        # Process log-start and log-end
        # Log is cutoff at the start if over 4k bytes
        log_cutoff = True
        log_fullpath = ''
        for log in logs:
            if log['level'] == 'Log-Start':
                log_cutoff = False
            if log['level'] == 'Log-End':
                split_log = log['message'].split(']')
                if len(split_log) == 2:
                    log_fullpath = split_log[1]
        if log_cutoff:
            logger.warning(
                'Log file is too large and has been cutoff at the start of file. Please locate the log file within the %s using the logFullpath to check full logs.',
                vm_string)

        # Output 'output' or 'error' level logs depending on status
        if run_script_succeeded:
            command.script.set_status_success()
            command.message = 'Script completed succesfully.'
            command.script.output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'output'
            ])
            logger.info('\nScript returned with output:\n%s\n',
                        command.script.output)
        else:
            command.script.set_status_error()
            command.message = 'Script completed with errors.'
            command.script.output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'error'
            ])
            logger.error('\nScript returned with error:\n%s\n',
                         command.script.output)

        command.set_status_success()
    except KeyboardInterrupt:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = "Command interrupted by user input."
        command.message = "Repair run failed. Command interrupted by user input."
    except AzCommandError as azCommandError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(azCommandError)
        command.message = "Repair run failed."
    except requests.exceptions.RequestException as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = "Failed to fetch run script data from GitHub. Please check this repository is reachable: https://github.com/Azure/repair-script-library"
    except RunScriptNotFoundForIdError as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = "Repair run failed. Run ID not found."
    except Exception as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        if command.error_stack_trace:
            logger.debug(command.error_stack_trace)

    if not command.is_status_success():
        command.set_status_error()
        command.script.output = 'Repair run failed.'
        return_dict = command.init_return_dict()
    else:
        # Build return Dict
        return_dict = command.init_return_dict()
        return_dict['script_status'] = command.script.status
        return_dict['logs'] = stdout
        return_dict['log_full_path'] = log_fullpath
        return_dict['output'] = command.script.output
        return_dict['vm_name'] = repair_vm_name
        return_dict['resource_group'] = repair_resource_group

    return return_dict
Example #22
0
def decrypt_vm(cmd, resource_group_name, vm_name, volume_type=None, force=False):
    from knack.util import CLIError

    compute_client = _compute_client_factory(cmd.cli_ctx)
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    has_new_ade, has_old_ade = _detect_ade_status(vm)
    if not has_new_ade and not has_old_ade:
        logger.warning('Azure Disk Encryption is not enabled')
        return
    is_linux = _is_linux_os(vm)
    # pylint: disable=no-member

    # 1. be nice, figure out the default volume type and also verify VM will not be busted
    if is_linux:
        if volume_type:
            if not force and volume_type != _DATA_VOLUME_TYPE:
                raise CLIError("Only Data disks can have encryption disabled in a Linux VM. "
                               "Use '--force' to ignore the warning")
        else:
            volume_type = _DATA_VOLUME_TYPE
    elif volume_type is None:
        volume_type = _ALL_VOLUME_TYPE

    extension = vm_extension_info['Linux' if is_linux else 'Windows']
    # sequence_version should be incremented since encryptions occurred before
    sequence_version = uuid.uuid4()

    # 2. update the disk encryption extension
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'VolumeType': volume_type,
        'EncryptionOperation': 'DisableEncryption',
        'SequenceVersion': sequence_version,
    }

    VirtualMachineExtension, DiskEncryptionSettings = cmd.get_models(
        'VirtualMachineExtension', 'DiskEncryptionSettings')

    ext = VirtualMachineExtension(
        location=vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        type_handler_version=extension['version'] if has_new_ade else extension['legacy_version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(resource_group_name,
                                                                        vm_name,
                                                                        extension['name'], ext)
    poller.result()
    extension_result = compute_client.virtual_machine_extensions.get(resource_group_name, vm_name,
                                                                     extension['name'],
                                                                     'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError("Extension updating didn't succeed")

    if not has_new_ade:
        # 3. Remove the secret from VM's storage profile
        vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
        disk_encryption_settings = DiskEncryptionSettings(enabled=False)
        vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
        set_vm(cmd, vm)
Example #23
0
def encrypt_vm(
        cmd,
        resource_group_name,
        vm_name,  # pylint: disable=too-many-locals, too-many-statements
        disk_encryption_keyvault,
        aad_client_id=None,
        aad_client_secret=None,
        aad_client_cert_thumbprint=None,
        key_encryption_keyvault=None,
        key_encryption_key=None,
        key_encryption_algorithm='RSA-OAEP',
        volume_type=None,
        encrypt_format_all=False):
    from msrestazure.tools import parse_resource_id
    from knack.util import CLIError

    # pylint: disable=no-member
    compute_client = _compute_client_factory(cmd.cli_ctx)
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    is_linux = _is_linux_os(vm)
    backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
    vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False
    _, has_old_ade = _detect_ade_status(vm)
    use_new_ade = not aad_client_id and not has_old_ade
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    if not use_new_ade and not aad_client_id:
        raise CLIError('Please provide --aad-client-id')

    # 1. First validate arguments
    if not use_new_ade and not aad_client_cert_thumbprint and not aad_client_secret:
        raise CLIError(
            'Please provide either --aad-client-cert-thumbprint or --aad-client-secret'
        )

    if volume_type is None:
        if not is_linux:
            volume_type = _ALL_VOLUME_TYPE
        elif vm.storage_profile.data_disks:
            raise CLIError('VM has data disks, please supply --volume-type')
        else:
            volume_type = 'OS'

    # encryption is not supported on all linux distros, but service never tells you
    # so let us verify at the client side
    if is_linux:
        image_reference = getattr(vm.storage_profile, 'image_reference', None)
        if image_reference:
            result, message = _check_encrypt_is_supported(
                image_reference, volume_type)
            if not result:
                logger.warning(message)

    # sequence_version should be unique
    sequence_version = uuid.uuid4()

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(
        cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
        if '://' not in key_encryption_key:  # appears a key name
            key_encryption_key = _get_keyvault_key_url(
                cmd.cli_ctx,
                (parse_resource_id(key_encryption_keyvault))['name'],
                key_encryption_key)

    # 2. we are ready to provision/update the disk encryption extensions
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'KeyVaultURL': disk_encryption_keyvault_url,
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption'
        if not encrypt_format_all else 'EnableEncryptionFormatAll',
        'KeyEncryptionKeyURL': key_encryption_key,
        'KeyEncryptionAlgorithm': key_encryption_algorithm,
        'SequenceVersion': sequence_version,
    }
    if use_new_ade:
        public_config.update({
            "KeyVaultResourceId":
            disk_encryption_keyvault,
            "KekVaultResourceId":
            key_encryption_keyvault if key_encryption_key else '',
        })
    else:
        public_config.update({
            'AADClientID':
            aad_client_id,
            'AADClientCertThumbprint':
            aad_client_cert_thumbprint,
        })

    ade_legacy_private_config = {
        'AADClientSecret':
        aad_client_secret if is_linux else (aad_client_secret or '')
    }

    VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \
        cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference',
                       'KeyVaultKeyReference', 'SubResource')

    ext = VirtualMachineExtension(
        location=vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        protected_settings=None if use_new_ade else ade_legacy_private_config,
        type_handler_version=extension['version']
        if use_new_ade else extension['legacy_version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    poller.result()

    # verify the extension was ok
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError(
            'Extension needed for disk encryption was not provisioned correctly'
        )

    if not use_new_ade:
        if not (extension_result.instance_view.statuses
                and extension_result.instance_view.statuses[0].message):
            raise CLIError(
                'Could not find url pointing to the secret for disk encryption'
            )

        # 3. update VM's storage profile with the secrets
        status_url = extension_result.instance_view.statuses[0].message

        vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
        secret_ref = KeyVaultSecretReference(
            secret_url=status_url,
            source_vault=SubResource(id=disk_encryption_keyvault))

        key_encryption_key_obj = None
        if key_encryption_key:
            key_encryption_key_obj = KeyVaultKeyReference(
                key_url=key_encryption_key,
                source_vault=SubResource(id=key_encryption_keyvault))

        disk_encryption_settings = DiskEncryptionSettings(
            disk_encryption_key=secret_ref,
            key_encryption_key=key_encryption_key_obj,
            enabled=True)
        if vm_encrypted:
            # stop the vm before update if the vm is already encrypted
            logger.warning(
                "Deallocating the VM before updating encryption settings...")
            compute_client.virtual_machines.deallocate(resource_group_name,
                                                       vm_name).result()
            vm = compute_client.virtual_machines.get(resource_group_name,
                                                     vm_name)

        vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
        set_vm(cmd, vm)

        if vm_encrypted:
            # and start after the update
            logger.warning("Restarting the VM after the update...")
            compute_client.virtual_machines.start(resource_group_name,
                                                  vm_name).result()

    if is_linux and volume_type != _DATA_VOLUME_TYPE:
        old_ade_msg = "If you see 'VMRestartPending', please restart the VM, and the encryption will finish shortly"
        logger.warning(
            "The encryption request was accepted. Please use 'show' command to monitor "
            "the progress. %s", "" if use_new_ade else old_ade_msg)
Example #24
0
def create(cmd,
           vm_name,
           resource_group_name,
           repair_password=None,
           repair_username=None,
           repair_vm_name=None,
           copy_disk_name=None,
           repair_group_name=None,
           unlock_encrypted_vm=False):

    # Init command helper object
    command = command_helper(logger, cmd, 'vm repair create')

    # Main command calling block
    try:
        # Fetch source VM data
        source_vm = get_vm(cmd, resource_group_name, vm_name)
        is_linux = _is_linux_os(source_vm)
        target_disk_name = source_vm.storage_profile.os_disk.name
        is_managed = _uses_managed_disk(source_vm)
        copy_disk_id = None
        resource_tag = _get_repair_resource_tag(resource_group_name, vm_name)
        created_resources = []

        # Fetch OS image urn and set OS type for disk create
        if is_linux:
            os_image_urn = "UbuntuLTS"
            os_type = 'Linux'
        else:
            os_image_urn = _fetch_compatible_windows_os_urn(source_vm)
            os_type = 'Windows'

        # Set up base create vm command
        create_repair_vm_command = 'az vm create -g {g} -n {n} --tag {tag} --image {image} --admin-username {username} --admin-password {password}' \
                                   .format(g=repair_group_name, n=repair_vm_name, tag=resource_tag, image=os_image_urn, username=repair_username, password=repair_password)
        # Fetch VM size of repair VM
        sku = _fetch_compatible_sku(source_vm)
        if not sku:
            raise SkuNotAvailableError(
                'Failed to find compatible VM size for source VM\'s OS disk within given region and subscription.'
            )
        create_repair_vm_command += ' --size {sku}'.format(sku=sku)

        # Create new resource group
        create_resource_group_command = 'az group create -l {loc} -n {group_name}' \
                                        .format(loc=source_vm.location, group_name=repair_group_name)
        logger.info(
            'Creating resource group for repair VM and its resources...')
        _call_az_command(create_resource_group_command)

        # MANAGED DISK
        if is_managed:
            logger.info(
                'Source VM uses managed disks. Creating repair VM with managed disks.\n'
            )

            # Copy OS disk command
            disk_sku, location, os_type, hyperV_generation = _fetch_disk_info(
                resource_group_name, target_disk_name)
            copy_disk_command = 'az disk create -g {g} -n {n} --source {s} --sku {sku} --location {loc} --os-type {os_type} --query id -o tsv' \
                                .format(g=resource_group_name, n=copy_disk_name, s=target_disk_name, sku=disk_sku, loc=location, os_type=os_type)
            # Only add hyperV variable when available
            if hyperV_generation:
                copy_disk_command += ' --hyper-v-generation {hyperV}'.format(
                    hyperV=hyperV_generation)
            # Validate create vm create command to validate parameters before runnning copy disk command
            validate_create_vm_command = create_repair_vm_command + ' --validate'

            logger.info('Validating VM template before continuing...')
            _call_az_command(validate_create_vm_command,
                             secure_params=[repair_password, repair_username])
            logger.info('Copying OS disk of source VM...')
            copy_disk_id = _call_az_command(copy_disk_command).strip('\n')

            attach_disk_command = 'az vm disk attach -g {g} --vm-name {repair} --name {id}' \
                                  .format(g=repair_group_name, repair=repair_vm_name, id=copy_disk_id)

            logger.info('Creating repair VM...')
            _call_az_command(create_repair_vm_command,
                             secure_params=[repair_password, repair_username])
            logger.info('Attaching copied disk to repair VM...')
            _call_az_command(attach_disk_command)

            # Handle encrypted VM cases
            if unlock_encrypted_vm:
                _unlock_singlepass_encrypted_disk(source_vm, is_linux,
                                                  repair_group_name,
                                                  repair_vm_name)

        # UNMANAGED DISK
        else:
            logger.info(
                'Source VM uses unmanaged disks. Creating repair VM with unmanaged disks.\n'
            )
            os_disk_uri = source_vm.storage_profile.os_disk.vhd.uri
            copy_disk_name = copy_disk_name + '.vhd'
            storage_account = StorageResourceIdentifier(
                cmd.cli_ctx.cloud, os_disk_uri)
            # Validate create vm create command to validate parameters before runnning copy disk commands
            validate_create_vm_command = create_repair_vm_command + ' --validate'
            logger.info('Validating VM template before continuing...')
            _call_az_command(validate_create_vm_command,
                             secure_params=[repair_password, repair_username])

            # get storage account connection string
            get_connection_string_command = 'az storage account show-connection-string -g {g} -n {n} --query connectionString -o tsv' \
                                            .format(g=resource_group_name, n=storage_account.account_name)
            logger.debug('Fetching storage account connection string...')
            connection_string = _call_az_command(
                get_connection_string_command).strip('\n')

            # Create Snapshot of Unmanaged Disk
            make_snapshot_command = 'az storage blob snapshot -c {c} -n {n} --connection-string "{con_string}" --query snapshot -o tsv' \
                                    .format(c=storage_account.container, n=storage_account.blob, con_string=connection_string)
            logger.info('Creating snapshot of OS disk...')
            snapshot_timestamp = _call_az_command(make_snapshot_command,
                                                  secure_params=[
                                                      connection_string
                                                  ]).strip('\n')
            snapshot_uri = os_disk_uri + '?snapshot={timestamp}'.format(
                timestamp=snapshot_timestamp)

            # Copy Snapshot into unmanaged Disk
            copy_snapshot_command = 'az storage blob copy start -c {c} -b {name} --source-uri {source} --connection-string "{con_string}"' \
                                    .format(c=storage_account.container, name=copy_disk_name, source=snapshot_uri, con_string=connection_string)
            logger.info('Creating a copy disk from the snapshot...')
            _call_az_command(copy_snapshot_command,
                             secure_params=[connection_string])
            # Generate the copied disk uri
            copy_disk_id = os_disk_uri.rstrip(
                storage_account.blob) + copy_disk_name

            # Create new repair VM with copied ummanaged disk command
            create_repair_vm_command = create_repair_vm_command + ' --use-unmanaged-disk'
            logger.info('Creating repair VM while disk copy is in progress...')
            _call_az_command(create_repair_vm_command,
                             secure_params=[repair_password, repair_username])

            logger.info('Checking if disk copy is done...')
            copy_check_command = 'az storage blob show -c {c} -n {name} --connection-string "{con_string}" --query properties.copy.status -o tsv' \
                                 .format(c=storage_account.container, name=copy_disk_name, con_string=connection_string)
            copy_result = _call_az_command(copy_check_command,
                                           secure_params=[connection_string
                                                          ]).strip('\n')
            if copy_result != 'success':
                raise UnmanagedDiskCopyError('Unmanaged disk copy failed.')

            # Attach copied unmanaged disk to new vm
            logger.info('Attaching copied disk to repair VM as data disk...')
            attach_disk_command = "az vm unmanaged-disk attach -g {g} -n {disk_name} --vm-name {vm_name} --vhd-uri {uri}" \
                                  .format(g=repair_group_name, disk_name=copy_disk_name, vm_name=repair_vm_name, uri=copy_disk_id)
            _call_az_command(attach_disk_command)

        created_resources = _list_resource_ids_in_rg(repair_group_name)
        command.set_status_success()

    # Some error happened. Stop command and clean-up resources.
    except KeyboardInterrupt:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = "Command interrupted by user input."
        command.message = "Command interrupted by user input. Cleaning up resources."
    except AzCommandError as azCommandError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(azCommandError)
        command.message = "Repair create failed. Cleaning up created resources."
    except SkuNotAvailableError as skuNotAvailableError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(skuNotAvailableError)
        command.message = "Please check if the current subscription can create more VM resources. Cleaning up created resources."
    except UnmanagedDiskCopyError as unmanagedDiskCopyError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(unmanagedDiskCopyError)
        command.message = "Repair create failed. Please try again at another time. Cleaning up created resources."
    except WindowsOsNotAvailableError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = 'Compatible Windows OS image not available.'
        command.message = 'A compatible Windows OS image is not available at this time, please check subscription.'
    except Exception as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'

    finally:
        if command.error_stack_trace:
            logger.debug(command.error_stack_trace)
    # Generate return results depending on command state
    if not command.is_status_success():
        command.set_status_error()
        return_dict = command.init_return_dict()
        _clean_up_resources(repair_group_name, confirm=False)
    else:
        created_resources.append(copy_disk_id)
        command.message = 'Your repair VM \'{n}\' has been created in the resource group \'{repair_rg}\' with disk \'{d}\' attached as data disk. ' \
                          'Please use this VM to troubleshoot and repair. Once the repairs are complete use the command ' \
                          '\'az vm repair restore -n {source_vm} -g {rg} --verbose\' to restore disk to the source VM. ' \
                          'Note that the copied disk is created within the original resource group \'{rg}\'.' \
                          .format(n=repair_vm_name, repair_rg=repair_group_name, d=copy_disk_name, rg=resource_group_name, source_vm=vm_name)
        return_dict = command.init_return_dict()
        # Add additional custom return properties
        return_dict['repair_vm_name'] = repair_vm_name
        return_dict['copied_disk_name'] = copy_disk_name
        return_dict['copied_disk_uri'] = copy_disk_id
        return_dict['repair_resource_group'] = repair_group_name
        return_dict['resource_tag'] = resource_tag
        return_dict['created_resources'] = created_resources

        logger.info('\n%s\n', command.message)
    return return_dict