Exemple #1
0
def disable_protection(cmd,
                       client,
                       resource_group_name,
                       vault_name,
                       item,
                       delete_backup_data=False,
                       **kwargs):
    # Get container and item URIs
    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    # Trigger disable protection and wait for completion
    if delete_backup_data:
        result = sdk_no_wait(True, client.delete, vault_name,
                             resource_group_name, fabric_name, container_uri,
                             item_uri)
        return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                       resource_group_name)

    afs_item_properties = AzureFileshareProtectedItem()
    afs_item_properties.policy_id = ''
    afs_item_properties.protection_state = ProtectionState.protection_stopped
    afs_item_properties.source_resource_id = item.properties.source_resource_id
    afs_item = ProtectedItemResource(properties=afs_item_properties)
    result = sdk_no_wait(True, client.create_or_update, vault_name,
                         resource_group_name, fabric_name, container_uri,
                         item_uri, afs_item)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Exemple #2
0
def disable_protection(cmd, client, resource_group_name, vault_name, item,
                       delete_backup_data):

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(
            backup_item_type):
        raise CLIError("""
            Item must be either of type SQLDataBase or SAPHanaDatabase.
            """)

    if delete_backup_data:
        result = sdk_no_wait(True, client.delete, vault_name,
                             resource_group_name, fabric_name, container_uri,
                             item_uri)
        return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                          resource_group_name)

    properties = _get_protected_item_instance(backup_item_type)
    properties.protection_state = 'ProtectionStopped'
    properties.policy_id = ''
    param = ProtectedItemResource(properties=properties)

    # Trigger disable protection and wait for completion
    result = sdk_no_wait(True, client.create_or_update, vault_name,
                         resource_group_name, fabric_name, container_uri,
                         item_uri, param)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Exemple #3
0
def move_wl_recovery_points(cmd, resource_group_name, vault_name, item_name,
                            rp_name, source_tier, destination_tier):

    container_uri = cust_help.get_protection_container_uri_from_id(
        item_name.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item_name.id)

    if source_tier not in common.tier_type_map.keys():
        raise InvalidArgumentValueError(
            'This source tier-type is not accepted by move command at present.'
        )

    parameters = MoveRPAcrossTiersRequest(
        source_tier_type=common.tier_type_map[source_tier],
        target_tier_type=common.tier_type_map[destination_tier])

    result = _backup_client_factory(cmd.cli_ctx).move_recovery_point(
        vault_name,
        resource_group_name,
        fabric_name,
        container_uri,
        item_uri,
        rp_name,
        parameters,
        raw=True,
        polling=False).result()

    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Exemple #4
0
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy, tenant_id=None,
                           is_critical_operation=False):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError(
            """
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(backup_item_type):
        raise InvalidArgumentValueError("Item must be either of type SQLDataBase or SAPHanaDatabase.")

    item_properties = _get_protected_item_instance(backup_item_type)
    item_properties.policy_id = policy.id

    param = ProtectedItemResource(properties=item_properties)
    if is_critical_operation:
        existing_policy_name = item.properties.policy_id.split('/')[-1]
        existing_policy = common.show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name,
                                             existing_policy_name)
        if cust_help.is_retention_duration_decreased(existing_policy, policy, "AzureWorkload"):
            # update the payload with critical operation and add auxiliary header for cross tenant case
            if tenant_id is not None:
                client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient,
                                                 aux_tenants=[tenant_id]).protected_items
            param.properties.resource_guard_operation_requests = [cust_help.get_resource_guard_operation_request(
                cmd.cli_ctx, resource_group_name, vault_name, "updateProtection")]
    # Update policy
    result = client.create_or_update(vault_name, resource_group_name, fabric_name,
                                     container_uri, item_uri, param, cls=cust_help.get_pipeline_response)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Exemple #5
0
def backup_now(cmd, client, resource_group_name, vault_name, item, retain_until, backup_type,
               enable_compression=False):
    message = "For SAPHANA and SQL workload, retain-until parameter value will be overridden by the underlying policy"
    if retain_until is not None:
        logger.warning(message)
    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and enable_compression:
        raise CLIError(
            """
            Enable compression is not applicable for SAPHanaDatabase item type.
            """)

    if cust_help.is_hana(backup_item_type) and backup_type in ['Log', 'CopyOnlyFull']:
        raise CLIError(
            """
            Backup type cannot be Log or CopyOnlyFull for SAPHanaDatabase item type.
            """)

    properties = AzureWorkloadBackupRequest(backup_type=backup_type, enable_compression=enable_compression,
                                            recovery_point_expiry_time_in_utc=retain_until)
    param = BackupRequestResource(properties=properties)

    # Trigger backup and wait for completion
    result = sdk_no_wait(True, client.trigger,
                         vault_name, resource_group_name, fabric_name, container_uri, item_uri,
                         param)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Exemple #6
0
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item,
                           policy):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError("""
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)

    # Get container and item URIs
    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    # Update policy request
    afs_item_properties = AzureFileshareProtectedItem()
    afs_item_properties.policy_id = policy.id
    afs_item_properties.source_resource_id = item.properties.source_resource_id
    afs_item = ProtectedItemResource(properties=afs_item_properties)

    # Update policy
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     afs_item,
                                     raw=True)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Exemple #7
0
def enable_protection_for_azure_wl(cmd, client, resource_group_name,
                                   vault_name, policy_object,
                                   protectable_item):
    # Get protectable item.
    protectable_item_object = protectable_item
    protectable_item_type = protectable_item_object.properties.protectable_item_type
    if protectable_item_type.lower() not in [
            "sqldatabase", "sqlinstance", "saphanadatabase", "saphanasystem"
    ]:
        raise CLIError("""
            Protectable Item must be either of type SQLDataBase, HANADatabase, HANAInstance or SQLInstance.
            """)

    item_name = protectable_item_object.name
    container_name = protectable_item_object.id.split('/')[12]
    cust_help.validate_policy(policy_object)
    policy_id = policy_object.id

    properties = _get_protected_item_instance(protectable_item_type)
    properties.backup_management_type = 'AzureWorkload'
    properties.policy_id = policy_id
    properties.workload_type = protectable_item_type
    param = ProtectionContainerResource(properties=properties)

    # Trigger enable protection and wait for completion
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_name,
                                     item_name,
                                     param,
                                     raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Exemple #8
0
def undelete_protection(cmd, client, resource_group_name, vault_name, item):
    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(
            backup_item_type):
        raise ValidationError("""
            Item must be either of type SQLDataBase or SAPHanaDatabase.
            """)

    properties = _get_protected_item_instance(backup_item_type)
    properties.protection_state = 'ProtectionStopped'
    properties.policy_id = ''
    properties.is_rehydrate = True
    param = ProtectedItemResource(properties=properties)

    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     param,
                                     raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Exemple #9
0
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item,
                           policy):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError("""
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(
            backup_item_type):
        raise InvalidArgumentValueError(
            "Item must be either of type SQLDataBase or SAPHanaDatabase.")

    item_properties = _get_protected_item_instance(backup_item_type)
    item_properties.policy_id = policy.id

    param = ProtectedItemResource(properties=item_properties)

    # Update policy
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     param,
                                     raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Exemple #10
0
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item,
                           policy):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError("""
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)
    item_properties = item.properties
    item_properties.policy_id = policy.id

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    param = ProtectedItemResource(properties=item_properties)

    # Update policy
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     param,
                                     raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Exemple #11
0
def backup_now(cmd, client, resource_group_name, vault_name, item, retain_until):
    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)
    trigger_backup_request = _get_backup_request(retain_until)

    result = client.trigger(vault_name, resource_group_name, fabric_name,
                            container_uri, item_uri, trigger_backup_request, raw=True)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Exemple #12
0
def enable_for_AzureFileShare(cmd, client, resource_group_name, vault_name, afs_name,
                              storage_account_name, policy_name):

    # get registered storage accounts
    storage_account = None
    containers_client = backup_protection_containers_cf(cmd.cli_ctx)
    registered_containers = common.list_containers(containers_client, resource_group_name, vault_name, "AzureStorage")
    storage_account = _get_storage_account_from_list(registered_containers, storage_account_name)

    # get unregistered storage accounts
    if storage_account is None:
        unregistered_containers = list_protectable_containers(cmd.cli_ctx, resource_group_name, vault_name)
        storage_account = _get_storage_account_from_list(unregistered_containers, storage_account_name)

        if storage_account is None:
            # refresh containers in the vault
            protection_containers_client = protection_containers_cf(cmd.cli_ctx)
            filter_string = helper.get_filter_string({'backupManagementType': "AzureStorage"})

            refresh_result = protection_containers_client.refresh(vault_name, resource_group_name, fabric_name,
                                                                  filter=filter_string, raw=True)
            helper.track_refresh_operation(cmd.cli_ctx, refresh_result, vault_name, resource_group_name)

            # refetch the protectable containers after refresh
            unregistered_containers = list_protectable_containers(cmd.cli_ctx, resource_group_name, vault_name)
            storage_account = _get_storage_account_from_list(unregistered_containers, storage_account_name)

            if storage_account is None:
                raise CLIError("Storage account not found or not supported.")

        # register storage account
        protection_containers_client = protection_containers_cf(cmd.cli_ctx)
        properties = AzureStorageContainer(backup_management_type="AzureStorage",
                                           source_resource_id=storage_account.properties.container_id,
                                           workload_type="AzureFileShare")
        param = ProtectionContainerResource(properties=properties)
        result = protection_containers_client.register(vault_name, resource_group_name, fabric_name,
                                                       storage_account.name, param, raw=True)
        helper.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, storage_account.name)

    policy = common.show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name)
    helper.validate_policy(policy)

    protectable_item = _get_protectable_item_for_afs(cmd.cli_ctx, vault_name, resource_group_name, afs_name,
                                                     storage_account)
    helper.validate_azurefileshare_item(protectable_item)

    container_uri = helper.get_protection_container_uri_from_id(protectable_item.id)
    item_uri = helper.get_protectable_item_uri_from_id(protectable_item.id)
    item_properties = AzureFileshareProtectedItem()

    item_properties.policy_id = policy.id
    item_properties.source_resource_id = protectable_item.properties.parent_container_fabric_id
    item = ProtectedItemResource(properties=item_properties)

    result = client.create_or_update(vault_name, resource_group_name, fabric_name,
                                     container_uri, item_uri, item, raw=True)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Exemple #13
0
def restore_AzureFileShare(cmd,
                           client,
                           resource_group_name,
                           vault_name,
                           rp_name,
                           item,
                           restore_mode,
                           resolve_conflict,
                           restore_request_type,
                           source_file_type=None,
                           source_file_path=None,
                           target_storage_account_name=None,
                           target_file_share_name=None,
                           target_folder=None):

    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    sa_name = item.properties.container_name
    source_resource_id = _get_storage_account_id(cmd.cli_ctx,
                                                 sa_name.split(';')[-1],
                                                 sa_name.split(';')[-2])
    target_resource_id = None

    afs_restore_request = AzureFileShareRestoreRequest()
    target_details = None

    afs_restore_request.copy_options = resolve_conflict
    afs_restore_request.recovery_type = restore_mode
    afs_restore_request.source_resource_id = source_resource_id
    afs_restore_request.restore_request_type = restore_request_type

    restore_file_specs = [
        RestoreFileSpecs(path=source_file_path,
                         file_spec_type=source_file_type,
                         target_folder_path=target_folder)
    ]

    if restore_mode == "AlternateLocation":
        target_resource_id = _get_storage_account_id(
            cmd.cli_ctx, target_storage_account_name, resource_group_name)
        target_details = TargetAFSRestoreInfo()
        target_details.name = target_file_share_name
        target_details.target_resource_id = target_resource_id
        afs_restore_request.target_details = target_details

    afs_restore_request.restore_file_specs = restore_file_specs

    trigger_restore_request = RestoreRequestResource(
        properties=afs_restore_request)

    result = sdk_no_wait(True, client.trigger, vault_name, resource_group_name,
                         fabric_name, container_uri, item_uri, rp_name,
                         trigger_restore_request)

    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Exemple #14
0
def restore_azure_wl(cmd, client, resource_group_name, vault_name,
                     recovery_config):
    recovery_config_object = cust_help.get_or_read_json(recovery_config)
    restore_mode = recovery_config_object['restore_mode']
    container_uri = recovery_config_object['container_uri']
    item_uri = recovery_config_object['item_uri']
    recovery_point_id = recovery_config_object['recovery_point_id']
    log_point_in_time = recovery_config_object['log_point_in_time']
    item_type = recovery_config_object['item_type']
    source_resource_id = recovery_config_object['source_resource_id']
    database_name = recovery_config_object['database_name']
    container_id = recovery_config_object['container_id']
    alternate_directory_paths = recovery_config_object[
        'alternate_directory_paths']

    # Construct trigger restore request object
    trigger_restore_properties = _get_restore_request_instance(
        item_type, log_point_in_time)
    trigger_restore_properties.recovery_type = restore_mode

    if restore_mode == 'AlternateLocation':
        setattr(trigger_restore_properties, 'source_resource_id',
                source_resource_id)
        setattr(
            trigger_restore_properties, 'target_info',
            TargetRestoreInfo(overwrite_option='Overwrite',
                              database_name=database_name,
                              container_id=container_id))
        if 'sql' in item_type.lower():
            directory_map = []
            for i in alternate_directory_paths:
                directory_map.append(
                    SQLDataDirectoryMapping(mapping_type=i[0],
                                            source_path=i[1],
                                            source_logical_name=i[2],
                                            target_path=i[3]))
            setattr(trigger_restore_properties, 'alternate_directory_paths',
                    directory_map)

    if log_point_in_time is not None:
        setattr(trigger_restore_properties, 'point_in_time',
                datetime_type(log_point_in_time))

    if 'sql' in item_type.lower():
        setattr(trigger_restore_properties,
                'should_use_alternate_target_location', True)
        setattr(trigger_restore_properties, 'is_non_recoverable', False)
    trigger_restore_request = RestoreRequestResource(
        properties=trigger_restore_properties)
    # Trigger restore and wait for completion
    result = sdk_no_wait(True, client.trigger, vault_name, resource_group_name,
                         fabric_name, container_uri, item_uri,
                         recovery_point_id, trigger_restore_request)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Exemple #15
0
def backup_now(cmd,
               client,
               resource_group_name,
               vault_name,
               item,
               retain_until,
               backup_type,
               enable_compression=False):
    if backup_type is None:
        raise RequiredArgumentMissingError(
            "Backup type missing. Please provide a valid backup type using "
            "--backup-type argument.")

    message = "For SAPHANA and SQL workload, retain-until parameter value will be overridden by the underlying policy"

    if (retain_until is not None and backup_type != 'CopyOnlyFull'):
        logger.warning(message)
        retain_until = datetime.now(timezone.utc) + timedelta(days=30)

    if retain_until is None:
        retain_until = datetime.now(timezone.utc) + timedelta(days=30)

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and enable_compression:
        raise CLIError("""
            Enable compression is not applicable for SAPHanaDatabase item type.
            """)

    if cust_help.is_hana(backup_item_type) and backup_type in [
            'Log', 'CopyOnlyFull', 'Incremental'
    ]:
        raise CLIError("""
            Backup type cannot be Log, CopyOnlyFull, Incremental for SAPHanaDatabase Adhoc backup.
            """)

    properties = AzureWorkloadBackupRequest(
        backup_type=backup_type,
        enable_compression=enable_compression,
        recovery_point_expiry_time_in_utc=retain_until)
    param = BackupRequestResource(properties=properties)

    # Trigger backup and wait for completion
    result = client.trigger(vault_name,
                            resource_group_name,
                            fabric_name,
                            container_uri,
                            item_uri,
                            param,
                            raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Exemple #16
0
def backup_now(cmd, client, resource_group_name, vault_name, item, retain_until):

    if retain_until is None:
        retain_until = datetime.now(timezone.utc) + timedelta(days=30)

    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)
    trigger_backup_request = _get_backup_request(retain_until)

    result = client.trigger(vault_name, resource_group_name, fabric_name,
                            container_uri, item_uri, trigger_backup_request, raw=True)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Exemple #17
0
def update_policy_for_item(cmd,
                           client,
                           resource_group_name,
                           vault_name,
                           item,
                           policy,
                           tenant_id=None,
                           is_critical_operation=False):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError("""
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)

    # Get container and item URIs
    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    # Update policy request
    afs_item_properties = AzureFileshareProtectedItem()
    afs_item_properties.policy_id = policy.id
    afs_item_properties.source_resource_id = item.properties.source_resource_id
    afs_item = ProtectedItemResource(properties=afs_item_properties)
    if is_critical_operation:
        existing_policy_name = item.properties.policy_id.split('/')[-1]
        existing_policy = common.show_policy(
            protection_policies_cf(cmd.cli_ctx), resource_group_name,
            vault_name, existing_policy_name)
        if helper.is_retention_duration_decreased(existing_policy, policy,
                                                  "AzureStorage"):
            # update the payload with critical operation and add auxiliary header for cross tenant case
            if tenant_id is not None:
                client = get_mgmt_service_client(
                    cmd.cli_ctx,
                    RecoveryServicesBackupClient,
                    aux_tenants=[tenant_id]).protected_items
            afs_item.properties.resource_guard_operation_requests = [
                helper.get_resource_guard_operation_request(
                    cmd.cli_ctx, resource_group_name, vault_name,
                    "updateProtection")
            ]
    # Update policy
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     afs_item,
                                     cls=helper.get_pipeline_response)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Exemple #18
0
def disable_protection(cmd, client, resource_group_name, vault_name, item):
    # Get container and item URIs
    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    afs_item_properties = AzureFileshareProtectedItem()
    afs_item_properties.policy_id = ''
    afs_item_properties.protection_state = ProtectionState.protection_stopped
    afs_item_properties.source_resource_id = item.properties.source_resource_id
    afs_item = ProtectedItemResource(properties=afs_item_properties)
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     afs_item,
                                     cls=helper.get_pipeline_response)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Exemple #19
0
def delete_protected_item(cmd,
                          client,
                          resource_group_name,
                          vault_name,
                          item,
                          tenant_id=None):
    container_uri = custom_help.get_protection_container_uri_from_id(item.id)
    item_uri = custom_help.get_protected_item_uri_from_id(item.id)
    if custom_help.has_resource_guard_mapping(cmd.cli_ctx, resource_group_name,
                                              vault_name, "deleteProtection"):
        resource_guard_proxy_client = resource_guard_proxy_cf(cmd.cli_ctx)
        # For Cross Tenant Scenario
        if tenant_id is not None:
            resource_guard_proxy_client = get_mgmt_service_client(
                cmd.cli_ctx,
                RecoveryServicesBackupClient,
                aux_tenants=[tenant_id]).resource_guard_proxy
            client = get_mgmt_service_client(cmd.cli_ctx,
                                             RecoveryServicesBackupClient,
                                             aux_tenants=[tenant_id
                                                          ]).protected_items
        # unlock delete
        resource_guard_operation_request = custom_help.get_resource_guard_operation_request(
            cmd.cli_ctx, resource_group_name, vault_name, "deleteProtection")
        resource_guard_proxy_client.unlock_delete(
            vault_name, resource_group_name, default_resource_guard,
            UnlockDeleteRequest(resource_guard_operation_requests=[
                resource_guard_operation_request
            ],
                                resource_to_be_deleted=item.id))

    result = client.delete(vault_name,
                           resource_group_name,
                           fabric_name,
                           container_uri,
                           item_uri,
                           cls=custom_help.get_pipeline_response)
    return custom_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                        resource_group_name)
Exemple #20
0
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15,
                     rehydration_priority=None):

    recovery_config_object = cust_help.get_or_read_json(recovery_config)
    restore_mode = recovery_config_object['restore_mode']
    container_uri = recovery_config_object['container_uri']
    item_uri = recovery_config_object['item_uri']
    recovery_point_id = recovery_config_object['recovery_point_id']
    log_point_in_time = recovery_config_object['log_point_in_time']
    item_type = recovery_config_object['item_type']
    source_resource_id = recovery_config_object['source_resource_id']
    database_name = recovery_config_object['database_name']
    container_id = recovery_config_object['container_id']
    alternate_directory_paths = recovery_config_object['alternate_directory_paths']
    recovery_mode = recovery_config_object['recovery_mode']
    filepath = recovery_config_object['filepath']

    recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name,
                                                vault_name, container_uri, item_uri, recovery_point_id, item_type,
                                                backup_management_type="AzureWorkload")

    rp_list = [recovery_point]
    common.fetch_tier(rp_list)

    if (rp_list[0].properties.recovery_point_tier_details is not None and rp_list[0].tier_type == 'VaultArchive' and
            rehydration_priority is None):
        raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional
        parameters of rehydration duration and rehydration priority.""")

    if rp_list[0].properties.recovery_point_tier_details is not None and rp_list[0].tier_type == 'VaultArchive':
        # Construct trigger restore request object
        trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority)

        rehyd_duration = 'P' + str(rehydration_duration) + 'D'
        rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration,
                                                        rehydration_priority=rehydration_priority)

        trigger_restore_properties.recovery_point_rehydration_info = rehydration_info

    else:
        trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None)

    trigger_restore_properties.recovery_type = restore_mode

    # Get target vm id
    if container_id is not None:
        target_container_name = cust_help.get_protection_container_uri_from_id(container_id)
        target_resource_group = cust_help.get_resource_group_from_id(container_id)
        target_vault_name = cust_help.get_vault_from_arm_id(container_id)
        target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx),
                                                 target_container_name, target_resource_group, target_vault_name,
                                                 'AzureWorkload')
        setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id)

    if restore_mode == 'AlternateLocation':
        if recovery_mode != "FileRecovery":
            setattr(trigger_restore_properties, 'source_resource_id', source_resource_id)
            setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite',
                                                                                 database_name=database_name,
                                                                                 container_id=container_id))
            if 'sql' in item_type.lower():
                directory_map = []
                for i in alternate_directory_paths:
                    directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1],
                                                                 source_logical_name=i[2], target_path=i[3]))
                setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map)
        else:
            target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id,
                                            target_directory_for_file_restore=filepath)
            setattr(trigger_restore_properties, 'target_info', target_info)
            trigger_restore_properties.recovery_mode = recovery_mode

    if log_point_in_time is not None:
        setattr(trigger_restore_properties, 'point_in_time', datetime_type(log_point_in_time))

    if 'sql' in item_type.lower():
        setattr(trigger_restore_properties, 'should_use_alternate_target_location', True)
        setattr(trigger_restore_properties, 'is_non_recoverable', False)

    trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties)

    # Trigger restore and wait for completion
    result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri,
                            item_uri, recovery_point_id, trigger_restore_request, raw=True, polling=False).result()
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Exemple #21
0
def restore_AzureFileShare(cmd,
                           client,
                           resource_group_name,
                           vault_name,
                           rp_name,
                           item,
                           restore_mode,
                           resolve_conflict,
                           restore_request_type,
                           source_file_type=None,
                           source_file_path=None,
                           target_storage_account_name=None,
                           target_file_share_name=None,
                           target_folder=None):

    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    sa_name = item.properties.container_name

    afs_restore_request = AzureFileShareRestoreRequest()
    target_details = None

    afs_restore_request.copy_options = resolve_conflict
    afs_restore_request.recovery_type = restore_mode
    afs_restore_request.source_resource_id = _get_storage_account_id(
        cmd.cli_ctx,
        sa_name.split(';')[-1],
        sa_name.split(';')[-2])
    afs_restore_request.restore_request_type = restore_request_type

    restore_file_specs = None

    if source_file_path is not None:
        if len(source_file_path) > 99:
            raise ArgumentUsageError("""
            You can only recover a maximum of 99 Files/Folder.
            Please ensure you have provided less than 100 source file paths.
            """)
        restore_file_specs = []
        for filepath in source_file_path:
            restore_file_specs.append(
                RestoreFileSpecs(path=filepath,
                                 file_spec_type=source_file_type,
                                 target_folder_path=target_folder))

    if restore_mode == "AlternateLocation":
        target_sa_name, target_sa_rg = helper.get_resource_name_and_rg(
            resource_group_name, target_storage_account_name)
        target_details = TargetAFSRestoreInfo()
        target_details.name = target_file_share_name
        target_details.target_resource_id = _get_storage_account_id(
            cmd.cli_ctx, target_sa_name, target_sa_rg)
        afs_restore_request.target_details = target_details

    afs_restore_request.restore_file_specs = restore_file_specs

    trigger_restore_request = RestoreRequestResource(
        properties=afs_restore_request)

    result = client.begin_trigger(vault_name,
                                  resource_group_name,
                                  fabric_name,
                                  container_uri,
                                  item_uri,
                                  rp_name,
                                  trigger_restore_request,
                                  cls=helper.get_pipeline_response,
                                  polling=False).result()

    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Exemple #22
0
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15,
                     rehydration_priority=None, use_secondary_region=None):

    recovery_config_object = cust_help.get_or_read_json(recovery_config)
    restore_mode = recovery_config_object['restore_mode']
    container_uri = recovery_config_object['container_uri']
    item_uri = recovery_config_object['item_uri']
    recovery_point_id = recovery_config_object['recovery_point_id']
    log_point_in_time = recovery_config_object['log_point_in_time']
    item_type = recovery_config_object['item_type']
    workload_type = recovery_config_object['workload_type']
    source_resource_id = recovery_config_object['source_resource_id']
    database_name = recovery_config_object['database_name']
    container_id = recovery_config_object['container_id']
    alternate_directory_paths = recovery_config_object['alternate_directory_paths']
    recovery_mode = recovery_config_object['recovery_mode']
    filepath = recovery_config_object['filepath']

    item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name,
                            container_uri, item_uri, "AzureWorkload")
    cust_help.validate_item(item)
    validate_wl_restore(item, item_type, restore_mode, recovery_mode)

    trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None)
    if log_point_in_time is None:
        recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name,
                                                    vault_name, container_uri, item_uri, recovery_point_id,
                                                    workload_type, "AzureWorkload", use_secondary_region)

        if recovery_point is None:
            raise InvalidArgumentValueError("""
            Specified recovery point not found. Please check the recovery config file
            or try removing --use-secondary-region if provided""")

        common.fetch_tier_for_rp(recovery_point)

        if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive'):
            if rehydration_priority is None:
                raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional
                parameters of rehydration duration and rehydration priority.""")
            # normal rehydrated restore
            trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time,
                                                                       rehydration_priority)

            rehyd_duration = 'P' + str(rehydration_duration) + 'D'
            rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration,
                                                            rehydration_priority=rehydration_priority)

            trigger_restore_properties.recovery_point_rehydration_info = rehydration_info

    trigger_restore_properties.recovery_type = restore_mode

    # Get target vm id
    if container_id is not None:
        target_container_name = cust_help.get_protection_container_uri_from_id(container_id)
        target_resource_group = cust_help.get_resource_group_from_id(container_id)
        target_vault_name = cust_help.get_vault_from_arm_id(container_id)
        target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx),
                                                 target_container_name, target_resource_group, target_vault_name,
                                                 'AzureWorkload')
        setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id)

    if restore_mode == 'AlternateLocation':
        if recovery_mode != "FileRecovery":
            setattr(trigger_restore_properties, 'source_resource_id', source_resource_id)
            setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite',
                                                                                 database_name=database_name,
                                                                                 container_id=container_id))
            if 'sql' in item_type.lower():
                directory_map = []
                for i in alternate_directory_paths:
                    directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1],
                                                                 source_logical_name=i[2], target_path=i[3]))
                setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map)
        else:
            target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id,
                                            target_directory_for_file_restore=filepath)
            setattr(trigger_restore_properties, 'target_info', target_info)
            trigger_restore_properties.recovery_mode = recovery_mode

    if log_point_in_time is not None:
        log_point_in_time = datetime_type(log_point_in_time)
        time_range_list = _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region)
        validate_log_point_in_time(log_point_in_time, time_range_list)
        setattr(trigger_restore_properties, 'point_in_time', log_point_in_time)

    if 'sql' in item_type.lower():
        setattr(trigger_restore_properties, 'should_use_alternate_target_location', True)
        setattr(trigger_restore_properties, 'is_non_recoverable', False)

    trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties)

    if use_secondary_region:
        if rehydration_priority is not None:
            raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.")
        vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
        vault_location = vault.location
        azure_region = custom.secondary_region_map[vault_location]
        aad_client = aad_properties_cf(cmd.cli_ctx)
        filter_string = cust_help.get_filter_string({'backupManagementType': 'AzureWorkload'})
        aad_result = aad_client.get(azure_region, filter_string)
        rp_client = recovery_points_passive_cf(cmd.cli_ctx)
        crr_access_token = rp_client.get_access_token(vault_name, resource_group_name, fabric_name, container_uri,
                                                      item_uri, recovery_point_id, aad_result).properties
        crr_client = cross_region_restore_cf(cmd.cli_ctx)
        trigger_restore_properties.region = azure_region
        trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token,
                                                        restore_request=trigger_restore_properties)
        result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response,
                                          polling=False).result()
        return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id)

    # Trigger restore and wait for completion
    result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri,
                                  recovery_point_id, trigger_restore_request, cls=cust_help.get_pipeline_response,
                                  polling=False).result()
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)