Ejemplo n.º 1
0
def disable_protection(cmd, client, resource_group_name, vault_name, item,
                       delete_backup_data):

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(
            backup_item_type):
        raise CLIError("""
            Item must be either of type SQLDataBase or SAPHanaDatabase.
            """)

    if delete_backup_data:
        result = client.delete(vault_name,
                               resource_group_name,
                               fabric_name,
                               container_uri,
                               item_uri,
                               raw=True)
        return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                          resource_group_name)

    properties = _get_protected_item_instance(backup_item_type)
    properties.protection_state = 'ProtectionStopped'
    properties.policy_id = ''
    param = ProtectedItemResource(properties=properties)

    # Trigger disable protection and wait for completion
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     param,
                                     raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Ejemplo n.º 2
0
def disable_protection(cmd,
                       client,
                       resource_group_name,
                       vault_name,
                       item,
                       delete_backup_data=False,
                       **kwargs):
    # Get container and item URIs
    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    # Trigger disable protection and wait for completion
    if delete_backup_data:
        result = client.delete(vault_name,
                               resource_group_name,
                               fabric_name,
                               container_uri,
                               item_uri,
                               raw=True)
        return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                       resource_group_name)

    afs_item_properties = AzureFileshareProtectedItem()
    afs_item_properties.policy_id = ''
    afs_item_properties.protection_state = ProtectionState.protection_stopped
    afs_item_properties.source_resource_id = item.properties.source_resource_id
    afs_item = ProtectedItemResource(properties=afs_item_properties)
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     afs_item,
                                     raw=True)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Ejemplo n.º 3
0
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item,
                           policy):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError("""
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)

    # Get container and item URIs
    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    # Update policy request
    afs_item_properties = AzureFileshareProtectedItem()
    afs_item_properties.policy_id = policy.id
    afs_item_properties.source_resource_id = item.properties.source_resource_id
    afs_item = ProtectedItemResource(properties=afs_item_properties)

    # Update policy
    result = sdk_no_wait(True, client.create_or_update, vault_name,
                         resource_group_name, fabric_name, container_uri,
                         item_uri, afs_item)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Ejemplo n.º 4
0
def list_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date=None, end_date=None,
                         use_secondary_region=None, is_ready_for_move=None, target_tier=None, tier=None,
                         recommended_for_archive=None):
    if use_secondary_region:
        raise ArgumentUsageError(
            """
            --use-secondary-region flag is not supported for --backup-management-type AzureStorage.
            Please either remove the flag or query for any other backup-management-type.
            """)

    if is_ready_for_move is not None or target_tier is not None or tier is not None:
        raise ArgumentUsageError("""Invalid argument has been passed. --is-ready-for-move true, --target-tier
        and --tier flags are not supported for --backup-management-type AzureStorage.""")

    if recommended_for_archive is not None:
        raise ArgumentUsageError("""--recommended-for-archive is supported by AzureIaasVM backup management
        type only.""")

    if cmd.name.split()[2] == 'show-log-chain':
        raise ArgumentUsageError("show-log-chain is supported by AzureWorkload backup management type only.")

    # Get container and item URIs
    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    query_end_date, query_start_date = helper.get_query_dates(end_date, start_date)

    filter_string = helper.get_filter_string({
        'startDate': query_start_date,
        'endDate': query_end_date})

    # Get recovery points
    recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string)
    paged_recovery_points = helper.get_list_from_paged_response(recovery_points)

    return paged_recovery_points
Ejemplo n.º 5
0
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError(
            """
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(backup_item_type):
        raise InvalidArgumentValueError("Item must be either of type SQLDataBase or SAPHanaDatabase.")

    item_properties = _get_protected_item_instance(backup_item_type)
    item_properties.policy_id = policy.id

    param = ProtectedItemResource(properties=item_properties)

    # Update policy
    result = client.create_or_update(vault_name, resource_group_name, fabric_name,
                                     container_uri, item_uri, param, cls=cust_help.get_pipeline_response)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Ejemplo n.º 6
0
def restore_AzureFileShare(cmd,
                           client,
                           resource_group_name,
                           vault_name,
                           rp_name,
                           item,
                           restore_mode,
                           resolve_conflict,
                           restore_request_type,
                           source_file_type=None,
                           source_file_path=None,
                           target_storage_account_name=None,
                           target_file_share_name=None,
                           target_folder=None):

    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    sa_name = item.properties.container_name
    source_resource_id = _get_storage_account_id(cmd.cli_ctx,
                                                 sa_name.split(';')[-1],
                                                 sa_name.split(';')[-2])
    target_resource_id = None

    afs_restore_request = AzureFileShareRestoreRequest()
    target_details = None

    afs_restore_request.copy_options = resolve_conflict
    afs_restore_request.recovery_type = restore_mode
    afs_restore_request.source_resource_id = source_resource_id
    afs_restore_request.restore_request_type = restore_request_type

    restore_file_specs = None

    if source_file_path is not None:
        restore_file_specs = []
        for filepath in source_file_path:
            restore_file_specs.append(
                RestoreFileSpecs(path=filepath,
                                 file_spec_type=source_file_type,
                                 target_folder_path=target_folder))

    if restore_mode == "AlternateLocation":
        target_resource_id = _get_storage_account_id(
            cmd.cli_ctx, target_storage_account_name, resource_group_name)
        target_details = TargetAFSRestoreInfo()
        target_details.name = target_file_share_name
        target_details.target_resource_id = target_resource_id
        afs_restore_request.target_details = target_details

    afs_restore_request.restore_file_specs = restore_file_specs

    trigger_restore_request = RestoreRequestResource(
        properties=afs_restore_request)

    result = client.trigger(vault_name,
                            resource_group_name,
                            fabric_name,
                            container_uri,
                            item_uri,
                            rp_name,
                            trigger_restore_request,
                            raw=True)

    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
Ejemplo n.º 7
0
def list_wl_recovery_points(cmd,
                            client,
                            resource_group_name,
                            vault_name,
                            item,
                            start_date=None,
                            end_date=None,
                            extended_info=None,
                            is_ready_for_move=None,
                            target_tier=None,
                            use_secondary_region=None,
                            tier=None,
                            recommended_for_archive=None):

    if recommended_for_archive is not None:
        raise ArgumentUsageError(
            """--recommended-for-archive is supported by AzureIaasVM backup management
        type only.""")

    # Get container and item URIs
    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    query_end_date, query_start_date = cust_help.get_query_dates(
        end_date, start_date)

    if query_end_date and query_start_date:
        cust_help.is_range_valid(query_start_date, query_end_date)

    filter_string = cust_help.get_filter_string({
        'startDate': query_start_date,
        'endDate': query_end_date
    })

    if cmd.name.split()[2] == 'show-log-chain' or extended_info is not None:
        filter_string = cust_help.get_filter_string({
            'restorePointQueryType':
            'Log',
            'startDate':
            query_start_date,
            'endDate':
            query_end_date,
            'extendedInfo':
            extended_info
        })

    if use_secondary_region:
        client = recovery_points_crr_cf(cmd.cli_ctx)

    # Get recovery points
    recovery_points = client.list(vault_name, resource_group_name, fabric_name,
                                  container_uri, item_uri, filter_string)
    paged_recovery_points = cust_help.get_list_from_paged_response(
        recovery_points)

    common.fetch_tier(paged_recovery_points)

    if use_secondary_region:
        paged_recovery_points = [
            item for item in paged_recovery_points
            if item.properties.recovery_point_tier_details is None or (
                item.properties.recovery_point_tier_details is not None
                and item.tier_type != 'VaultArchive')
        ]

    recovery_point_list = common.check_rp_move_readiness(
        paged_recovery_points, target_tier, is_ready_for_move)
    recovery_point_list = common.filter_rp_based_on_tier(
        recovery_point_list, tier)
    return recovery_point_list
Ejemplo n.º 8
0
def restore_AzureFileShare(cmd,
                           client,
                           resource_group_name,
                           vault_name,
                           rp_name,
                           item,
                           restore_mode,
                           resolve_conflict,
                           restore_request_type,
                           source_file_type=None,
                           source_file_path=None,
                           target_storage_account_name=None,
                           target_file_share_name=None,
                           target_folder=None):

    container_uri = helper.get_protection_container_uri_from_id(item.id)
    item_uri = helper.get_protected_item_uri_from_id(item.id)

    sa_name = item.properties.container_name

    afs_restore_request = AzureFileShareRestoreRequest()
    target_details = None

    afs_restore_request.copy_options = resolve_conflict
    afs_restore_request.recovery_type = restore_mode
    afs_restore_request.source_resource_id = _get_storage_account_id(
        cmd.cli_ctx,
        sa_name.split(';')[-1],
        sa_name.split(';')[-2])
    afs_restore_request.restore_request_type = restore_request_type

    restore_file_specs = None

    if source_file_path is not None:
        if len(source_file_path) > 99:
            raise ArgumentUsageError("""
            You can only recover a maximum of 99 Files/Folder.
            Please ensure you have provided less than 100 source file paths.
            """)
        restore_file_specs = []
        for filepath in source_file_path:
            restore_file_specs.append(
                RestoreFileSpecs(path=filepath,
                                 file_spec_type=source_file_type,
                                 target_folder_path=target_folder))

    if restore_mode == "AlternateLocation":
        target_sa_name, target_sa_rg = helper.get_resource_name_and_rg(
            resource_group_name, target_storage_account_name)
        target_details = TargetAFSRestoreInfo()
        target_details.name = target_file_share_name
        target_details.target_resource_id = _get_storage_account_id(
            cmd.cli_ctx, target_sa_name, target_sa_rg)
        afs_restore_request.target_details = target_details

    afs_restore_request.restore_file_specs = restore_file_specs

    trigger_restore_request = RestoreRequestResource(
        properties=afs_restore_request)

    result = client.begin_trigger(vault_name,
                                  resource_group_name,
                                  fabric_name,
                                  container_uri,
                                  item_uri,
                                  rp_name,
                                  trigger_restore_request,
                                  cls=helper.get_pipeline_response,
                                  polling=False).result()

    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)