Ejemplo n.º 1
0
def undelete_protection(cmd, client, resource_group_name, vault_name, item):
    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(
            backup_item_type):
        raise ValidationError("""
            Item must be either of type SQLDataBase or SAPHanaDatabase.
            """)

    properties = _get_protected_item_instance(backup_item_type)
    properties.protection_state = 'ProtectionStopped'
    properties.policy_id = ''
    properties.is_rehydrate = True
    param = ProtectedItemResource(properties=properties)

    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     param,
                                     raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Ejemplo n.º 2
0
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item,
                           policy):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError("""
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(
            backup_item_type):
        raise InvalidArgumentValueError(
            "Item must be either of type SQLDataBase or SAPHanaDatabase.")

    item_properties = _get_protected_item_instance(backup_item_type)
    item_properties.policy_id = policy.id

    param = ProtectedItemResource(properties=item_properties)

    # Update policy
    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     param,
                                     raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Ejemplo n.º 3
0
def disable_protection(cmd, client, resource_group_name, vault_name, item,
                       delete_backup_data):

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(
            backup_item_type):
        raise CLIError("""
            Item must be either of type SQLDataBase or SAPHanaDatabase.
            """)

    if delete_backup_data:
        result = sdk_no_wait(True, client.delete, vault_name,
                             resource_group_name, fabric_name, container_uri,
                             item_uri)
        return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                          resource_group_name)

    properties = _get_protected_item_instance(backup_item_type)
    properties.protection_state = 'ProtectionStopped'
    properties.policy_id = ''
    param = ProtectedItemResource(properties=properties)

    # Trigger disable protection and wait for completion
    result = sdk_no_wait(True, client.create_or_update, vault_name,
                         resource_group_name, fabric_name, container_uri,
                         item_uri, param)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Ejemplo n.º 4
0
def backup_now(cmd, client, resource_group_name, vault_name, item, retain_until, backup_type,
               enable_compression=False):
    message = "For SAPHANA and SQL workload, retain-until parameter value will be overridden by the underlying policy"
    if retain_until is not None:
        logger.warning(message)
    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and enable_compression:
        raise CLIError(
            """
            Enable compression is not applicable for SAPHanaDatabase item type.
            """)

    if cust_help.is_hana(backup_item_type) and backup_type in ['Log', 'CopyOnlyFull']:
        raise CLIError(
            """
            Backup type cannot be Log or CopyOnlyFull for SAPHanaDatabase item type.
            """)

    properties = AzureWorkloadBackupRequest(backup_type=backup_type, enable_compression=enable_compression,
                                            recovery_point_expiry_time_in_utc=retain_until)
    param = BackupRequestResource(properties=properties)

    # Trigger backup and wait for completion
    result = sdk_no_wait(True, client.trigger,
                         vault_name, resource_group_name, fabric_name, container_uri, item_uri,
                         param)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Ejemplo n.º 5
0
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy, tenant_id=None,
                           is_critical_operation=False):
    if item.properties.backup_management_type != policy.properties.backup_management_type:
        raise CLIError(
            """
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to update the policy for the workload.
            """)

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(backup_item_type):
        raise InvalidArgumentValueError("Item must be either of type SQLDataBase or SAPHanaDatabase.")

    item_properties = _get_protected_item_instance(backup_item_type)
    item_properties.policy_id = policy.id

    param = ProtectedItemResource(properties=item_properties)
    if is_critical_operation:
        existing_policy_name = item.properties.policy_id.split('/')[-1]
        existing_policy = common.show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name,
                                             existing_policy_name)
        if cust_help.is_retention_duration_decreased(existing_policy, policy, "AzureWorkload"):
            # update the payload with critical operation and add auxiliary header for cross tenant case
            if tenant_id is not None:
                client = get_mgmt_service_client(cmd.cli_ctx, RecoveryServicesBackupClient,
                                                 aux_tenants=[tenant_id]).protected_items
            param.properties.resource_guard_operation_requests = [cust_help.get_resource_guard_operation_request(
                cmd.cli_ctx, resource_group_name, vault_name, "updateProtection")]
    # Update policy
    result = client.create_or_update(vault_name, resource_group_name, fabric_name,
                                     container_uri, item_uri, param, cls=cust_help.get_pipeline_response)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Ejemplo n.º 6
0
def backup_now(cmd,
               client,
               resource_group_name,
               vault_name,
               item,
               retain_until,
               backup_type,
               enable_compression=False):
    if backup_type is None:
        raise RequiredArgumentMissingError(
            "Backup type missing. Please provide a valid backup type using "
            "--backup-type argument.")

    message = "For SAPHANA and SQL workload, retain-until parameter value will be overridden by the underlying policy"

    if (retain_until is not None and backup_type != 'CopyOnlyFull'):
        logger.warning(message)
        retain_until = datetime.now(timezone.utc) + timedelta(days=30)

    if retain_until is None:
        retain_until = datetime.now(timezone.utc) + timedelta(days=30)

    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and enable_compression:
        raise CLIError("""
            Enable compression is not applicable for SAPHanaDatabase item type.
            """)

    if cust_help.is_hana(backup_item_type) and backup_type in [
            'Log', 'CopyOnlyFull', 'Incremental'
    ]:
        raise CLIError("""
            Backup type cannot be Log, CopyOnlyFull, Incremental for SAPHanaDatabase Adhoc backup.
            """)

    properties = AzureWorkloadBackupRequest(
        backup_type=backup_type,
        enable_compression=enable_compression,
        recovery_point_expiry_time_in_utc=retain_until)
    param = BackupRequestResource(properties=properties)

    # Trigger backup and wait for completion
    result = client.trigger(vault_name,
                            resource_group_name,
                            fabric_name,
                            container_uri,
                            item_uri,
                            param,
                            raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
Ejemplo n.º 7
0
def show_recovery_config(cmd, client, resource_group_name, vault_name,
                         restore_mode, container_name, item_name, rp_name,
                         target_item, target_item_name, log_point_in_time,
                         from_full_rp_name, filepath, target_container):
    if log_point_in_time is not None:
        datetime_type(log_point_in_time)

    if restore_mode == 'AlternateWorkloadRestore':
        if target_item is None:
            raise CLIError("""
                Target Item must be provided.
                """)

        protectable_item_type = target_item.properties.protectable_item_type
        if protectable_item_type.lower() not in [
                "sqlinstance", "saphanasystem"
        ]:
            raise CLIError("""
                Target Item must be either of type HANAInstance or SQLInstance.
                """)

    if restore_mode == 'RestoreAsFiles' and target_container is None:
        raise CLIError("Target Container must be provided.")

    if rp_name is None and log_point_in_time is None:
        raise CLIError("""
            Log point in time or recovery point name must be provided.
            """)

    item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx),
                            resource_group_name, vault_name, container_name,
                            item_name, "AzureWorkload")
    cust_help.validate_item(item)
    item_type = item.properties.workload_type
    item_name = item.name

    if not cust_help.is_sql(item_type) and not cust_help.is_hana(item_type):
        raise CLIError("""
            Item must be either of type SQLDataBase or SAPHanaDatabase.
            """)

    # Mapping of restore mode
    restore_mode_map = {
        'OriginalWorkloadRestore': 'OriginalLocation',
        'AlternateWorkloadRestore': 'AlternateLocation',
        'RestoreAsFiles': 'AlternateLocation'
    }

    if rp_name is None and restore_mode == "RestoreAsFiles" and from_full_rp_name is not None:
        rp_name = from_full_rp_name
    rp_name = rp_name if rp_name is not None else 'DefaultRangeRecoveryPoint'

    if rp_name == 'DefaultRangeRecoveryPoint':
        recovery_points = list_wl_recovery_points(cmd, client,
                                                  resource_group_name,
                                                  vault_name, item, None, None,
                                                  True)
        recovery_points = [rp for rp in recovery_points if rp.name == rp_name]

        if recovery_points == []:
            raise CLIError("""
                Invalid input.
                """)

        recovery_point = recovery_points[0]
    else:
        recovery_point = common.show_recovery_point(
            cmd,
            client,
            resource_group_name,
            vault_name,
            container_name,
            item_name,
            rp_name,
            item_type,
            backup_management_type="AzureWorkload")

    alternate_directory_paths = []
    if 'sql' in item_type.lower(
    ) and restore_mode == 'AlternateWorkloadRestore':
        items = list_workload_items(cmd, vault_name, resource_group_name,
                                    container_name)
        for titem in items:
            if titem.properties.friendly_name == target_item.properties.friendly_name:
                if titem.properties.server_name == target_item.properties.server_name:
                    for path in recovery_point.properties.extended_info.data_directory_paths:
                        target_path = cust_help.get_target_path(
                            path.type, path.path, path.logical_name,
                            titem.properties.data_directory_paths)
                        alternate_directory_paths.append(
                            (path.type, path.path, path.logical_name,
                             target_path))
    db_name = None
    if restore_mode == 'AlternateWorkloadRestore':
        friendly_name = target_item.properties.friendly_name
        db_name = friendly_name + '/' + target_item_name

    container_id = None
    if restore_mode == 'AlternateWorkloadRestore':
        container_id = '/'.join(target_item.id.split('/')[:-2])

    if not ('sql' in item_type.lower()
            and restore_mode == 'AlternateWorkloadRestore'):
        alternate_directory_paths = None

    recovery_mode = None
    if restore_mode == 'RestoreAsFiles':
        recovery_mode = 'FileRecovery'
        container_id = target_container.id

    return {
        'restore_mode': restore_mode_map[restore_mode],
        'container_uri': item.properties.container_name,
        'item_uri': item_name,
        'recovery_point_id': recovery_point.name,
        'log_point_in_time': log_point_in_time,
        'item_type': 'SQL' if 'sql' in item_type.lower() else 'SAPHana',
        'source_resource_id': item.properties.source_resource_id,
        'database_name': db_name,
        'container_id': container_id,
        'recovery_mode': recovery_mode,
        'filepath': filepath,
        'alternate_directory_paths': alternate_directory_paths
    }