Ejemplo n.º 1
0
def patch_volume(instance,
                 usage_threshold=None,
                 service_level=None,
                 tags=None,
                 vault_id=None,
                 backup_enabled=False,
                 backup_policy_id=None,
                 policy_enforced=False,
                 throughput_mibps=None):
    params = VolumePatch(
        usage_threshold=None
        if usage_threshold is None else int(usage_threshold) * gib_scale,
        service_level=service_level,
        data_protection=None
        if vault_id is None else VolumePatchPropertiesDataProtection(
            backup=VolumeBackupProperties(vault_id=vault_id,
                                          backup_enabled=backup_enabled,
                                          backup_policy_id=backup_policy_id,
                                          policy_enforced=policy_enforced)),
        tags=tags)
    if throughput_mibps is not None:
        params.throughput_mibps = throughput_mibps
    _update_mapper(
        instance, params,
        ['service_level', 'usage_threshold', 'tags', 'data_protection'])
    return params
Ejemplo n.º 2
0
    def test_patch_volume(self):
        raise unittest.SkipTest("Skipping Volume test")
        volume = create_volume(self.client,
                               TEST_RG,
                               TEST_ACC_1,
                               TEST_POOL_1,
                               TEST_VOL_1,
                               live=self.is_live)
        self.assertEqual("Premium", volume.service_level)
        self.assertEqual(100 * GIGABYTE, volume.usage_threshold)

        volume_patch = VolumePatch(usage_threshold=200 * GIGABYTE)
        volume = self.client.volumes.update(volume_patch, TEST_RG, TEST_ACC_1,
                                            TEST_POOL_1, TEST_VOL_1)
        self.assertEqual("Premium", volume.service_level)
        # unchanged
        self.assertEqual(200 * GIGABYTE, volume.usage_threshold)

        self.client.volumes.delete(TEST_RG, TEST_ACC_1, TEST_POOL_1,
                                   TEST_VOL_1).wait()
        wait_for_no_volume(self.client,
                           TEST_RG,
                           TEST_ACC_1,
                           TEST_POOL_1,
                           TEST_VOL_1,
                           live=self.is_live)
        delete_pool(self.client,
                    TEST_RG,
                    TEST_ACC_1,
                    TEST_POOL_1,
                    live=self.is_live)
        delete_account(self.client, TEST_RG, TEST_ACC_1, live=self.is_live)
Ejemplo n.º 3
0
    def test_patch_volume(self):
        # this can be reverted to set_bodiless_matcher() after tests are re-recorded and don't contain these headers
        set_custom_default_matcher(
            compare_bodies=False,
            excluded_headers=
            "Authorization,Content-Length,x-ms-client-request-id,x-ms-request-id"
        )
        volume = create_volume(self.client,
                               TEST_RG,
                               TEST_ACC_1,
                               TEST_POOL_1,
                               TEST_VOL_1,
                               live=self.is_live)
        assert "Premium" == volume.service_level
        assert 100 * GIGABYTE == volume.usage_threshold

        volume_patch = VolumePatch(usage_threshold=200 * GIGABYTE)
        volume = self.client.volumes.begin_update(TEST_RG, TEST_ACC_1,
                                                  TEST_POOL_1, TEST_VOL_1,
                                                  volume_patch).result()
        assert "Premium" == volume.service_level
        assert 200 * GIGABYTE == volume.usage_threshold

        delete_volume(self.client,
                      TEST_RG,
                      TEST_ACC_1,
                      TEST_POOL_1,
                      TEST_VOL_1,
                      live=self.is_live)
        delete_pool(self.client,
                    TEST_RG,
                    TEST_ACC_1,
                    TEST_POOL_1,
                    live=self.is_live)
        delete_account(self.client, TEST_RG, TEST_ACC_1, live=self.is_live)
    def test_patch_volume(self):
        volume = create_volume(self.client,
                               TEST_RG,
                               TEST_ACC_1,
                               TEST_POOL_1,
                               TEST_VOL_1,
                               live=self.is_live)
        self.assertEqual("Premium", volume.service_level)
        self.assertEqual(100 * GIGABYTE, volume.usage_threshold)

        volume_patch = VolumePatch(usage_threshold=200 * GIGABYTE)
        volume = self.client.volumes.begin_update(TEST_RG, TEST_ACC_1,
                                                  TEST_POOL_1, TEST_VOL_1,
                                                  volume_patch).result()
        self.assertEqual("Premium", volume.service_level)
        self.assertEqual(200 * GIGABYTE, volume.usage_threshold)

        delete_volume(self.client,
                      TEST_RG,
                      TEST_ACC_1,
                      TEST_POOL_1,
                      TEST_VOL_1,
                      live=self.is_live)
        delete_pool(self.client,
                    TEST_RG,
                    TEST_ACC_1,
                    TEST_POOL_1,
                    live=self.is_live)
        delete_account(self.client, TEST_RG, TEST_ACC_1, live=self.is_live)
    def test_assign_snapshot_policy_to_volume(self):
        # create volume and snapshot policy
        create_volume(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1,
                      TEST_VOL_1)
        snapshot_policy = create_snapshot_policy(self.client,
                                                 TEST_SNAPSHOT_POLICY_1)
        # assign the snapshot policy to the volume
        snapshot = VolumeSnapshotProperties(
            snapshot_policy_id=snapshot_policy.id)
        data_protection = VolumePatchPropertiesDataProtection(
            snapshot=snapshot)
        volume_patch = VolumePatch(data_protection=data_protection)
        volume = self.client.volumes.begin_update(TEST_RG, TEST_ACC_1,
                                                  TEST_POOL_1, TEST_VOL_1,
                                                  volume_patch).result()

        self.assertEqual(volume.data_protection.snapshot.snapshot_policy_id,
                         snapshot_policy.id)

        # cleanup
        delete_volume(self.client,
                      TEST_RG,
                      TEST_ACC_1,
                      TEST_POOL_1,
                      TEST_VOL_1,
                      live=self.is_live)
        delete_snapshot_policy(self.client,
                               TEST_SNAPSHOT_POLICY_1,
                               live=self.is_live)
        delete_pool(self.client,
                    TEST_RG,
                    TEST_ACC_1,
                    TEST_POOL_1,
                    live=self.is_live)
        delete_account(self.client, TEST_RG, TEST_ACC_1, live=self.is_live)
Ejemplo n.º 6
0
    def test_patch_volume(self):
        volume = create_volume(self.client,
                               TEST_RG,
                               TEST_ACC_1,
                               TEST_POOL_1,
                               TEST_VOL_1,
                               live=self.is_live)
        self.assertEqual("Premium", volume.service_level)

        volume_patch = VolumePatch(service_level="Standard")
        volume = self.client.volumes.update(volume_patch, TEST_RG, TEST_ACC_1,
                                            TEST_POOL_1, TEST_VOL_1)
        self.assertEqual("Standard", volume.service_level)

        self.client.volumes.delete(TEST_RG, TEST_ACC_1, TEST_POOL_1,
                                   TEST_VOL_1).wait()
        wait_for_no_volume(self.client,
                           TEST_RG,
                           TEST_ACC_1,
                           TEST_POOL_1,
                           TEST_VOL_1,
                           live=self.is_live)
        delete_pool(self.client,
                    TEST_RG,
                    TEST_ACC_1,
                    TEST_POOL_1,
                    live=self.is_live)
        delete_account(self.client, TEST_RG, TEST_ACC_1, live=self.is_live)
Ejemplo n.º 7
0
def patch_volume(cmd, instance, usage_threshold=None, service_level=None, protocol_types=None, tags=None):
    params = VolumePatch(
        usage_threshold=None if usage_threshold is None else int(usage_threshold) * gib_scale,
        service_level=service_level,
        protocol_types=protocol_types,
        tags=tags)
    _update_mapper(instance, params, ['service_level', 'usage_threshold', 'tags'])
    return params
Ejemplo n.º 8
0
def patch_volume(instance,
                 usage_threshold=None,
                 service_level=None,
                 tags=None,
                 vault_id=None,
                 backup_enabled=False,
                 backup_policy_id=None,
                 policy_enforced=False,
                 throughput_mibps=None,
                 snapshot_policy_id=None,
                 is_def_quota_enabled=None,
                 default_user_quota=None,
                 default_group_quota=None):
    data_protection = None
    backup = None
    snapshot = None
    if vault_id is not None:
        backup = VolumeBackupProperties(vault_id=vault_id,
                                        backup_enabled=backup_enabled,
                                        backup_policy_id=backup_policy_id,
                                        policy_enforced=policy_enforced)
    if snapshot_policy_id is not None:
        snapshot = VolumeSnapshotProperties(
            snapshot_policy_id=snapshot_policy_id)

    if backup is not None or snapshot is not None:
        data_protection = VolumePatchPropertiesDataProtection(
            backup=backup, snapshot=snapshot)

    params = VolumePatch(usage_threshold=None if usage_threshold is None else
                         int(usage_threshold) * gib_scale,
                         service_level=service_level,
                         data_protection=data_protection,
                         tags=tags,
                         is_default_quota_enabled=is_def_quota_enabled,
                         default_user_quota_in_ki_bs=default_user_quota,
                         default_group_quota_in_ki_bs=default_group_quota)
    if throughput_mibps is not None:
        params.throughput_mibps = throughput_mibps
    _update_mapper(
        instance, params,
        ['service_level', 'usage_threshold', 'tags', 'data_protection'])
    return params
Ejemplo n.º 9
0
def add_export_policy_rule(cmd, instance, allowed_clients, rule_index, unix_read_only, unix_read_write, cifs, nfsv3, nfsv41):
    rules = []

    export_policy = ExportPolicyRule(rule_index=rule_index, unix_read_only=unix_read_only, unix_read_write=unix_read_write, cifs=cifs, nfsv3=nfsv3, nfsv41=nfsv41, allowed_clients=allowed_clients)

    rules.append(export_policy)
    for rule in instance.export_policy.rules:
        rules.append(rule)

    volume_export_policy = VolumePropertiesExportPolicy(rules=rules)

    params = VolumePatch(
        export_policy=volume_export_policy,
        service_level=instance.service_level,
        usage_threshold=instance.usage_threshold)
    _update_mapper(instance, params, ['export_policy'])
    return params
Ejemplo n.º 10
0
def add_export_policy_rule(instance,
                           allowed_clients,
                           rule_index,
                           unix_read_only,
                           unix_read_write,
                           cifs,
                           nfsv3,
                           nfsv41,
                           kerberos5_r=None,
                           kerberos5_rw=None,
                           kerberos5i_r=None,
                           kerberos5i_rw=None,
                           kerberos5p_r=None,
                           kerberos5p_rw=None,
                           has_root_access=None,
                           chown_mode=None):
    rules = []

    export_policy = ExportPolicyRule(rule_index=rule_index,
                                     unix_read_only=unix_read_only,
                                     unix_read_write=unix_read_write,
                                     cifs=cifs,
                                     nfsv3=nfsv3,
                                     nfsv41=nfsv41,
                                     allowed_clients=allowed_clients,
                                     kerberos5_read_only=kerberos5_r,
                                     kerberos5_read_write=kerberos5_rw,
                                     kerberos5_i_read_only=kerberos5i_r,
                                     kerberos5_i_read_write=kerberos5i_rw,
                                     kerberos5_p_read_only=kerberos5p_r,
                                     kerberos5_p_read_write=kerberos5p_rw,
                                     has_root_access=has_root_access,
                                     chown_mode=chown_mode)

    rules.append(export_policy)
    for rule in instance.export_policy.rules:
        rules.append(rule)

    volume_export_policy = VolumePropertiesExportPolicy(rules=rules)

    params = VolumePatch(export_policy=volume_export_policy,
                         service_level=instance.service_level,
                         usage_threshold=instance.usage_threshold)
    _update_mapper(instance, params, ['export_policy'])
    return params
def disable_backup(client,
                   backup_name=TEST_BACKUP_1,
                   rg=BACKUP_RG,
                   account_name=TEST_ACC_1,
                   pool_name=TEST_POOL_1,
                   volume_name=TEST_VOL_1,
                   live=False):
    vaults = client.vaults.list(rg, account_name)
    volume_patch = VolumePatch(data_protection={
        "backup": {
            "vaultId": vaults.next().id,
            "backupEnabled": False
        }
    })
    client.volumes.begin_update(BACKUP_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1,
                                volume_patch).wait()
    wait_for_no_backup(client, rg, account_name, pool_name, volume_name,
                       backup_name, live)
Ejemplo n.º 12
0
def remove_export_policy_rule(instance, rule_index):
    rules = []
    # Note this commented out way created a patch request that included some mount target properties causing validation issues server side
    # need to investigate why, leave this for now remove after this has been ivestigated before next release please
    # look for the rule and remove
    # for rule in instance.export_policy.rules:
    #    if rule.rule_index == int(rule_index):
    #        instance.export_policy.rules.remove(rule)

    # return instance

    for rule in instance.export_policy.rules:
        if rule.rule_index != int(rule_index):
            rules.append(rule)

    volume_export_policy = VolumePropertiesExportPolicy(rules=rules)
    params = VolumePatch(export_policy=volume_export_policy,
                         service_level=instance.service_level,
                         usage_threshold=instance.usage_threshold)
    _update_mapper(instance, params, ['export_policy'])
    return params
Ejemplo n.º 13
0
def patch_volume(cmd,
                 instance,
                 service_level=None,
                 usage_threshold=None,
                 tag=None,
                 export_policy=None):

    # the export policy provided replaces any existing eport policy
    rules = build_export_policy_rules(export_policy)
    volume_export_policy = VolumePropertiesExportPolicy(
        rules=rules) if rules != [] else None

    params = VolumePatch(usage_threshold=None
                         if usage_threshold is None else int(usage_threshold),
                         service_level=service_level,
                         tags=generate_tags(tag),
                         export_policy=volume_export_policy)
    _update_mapper(
        instance, params,
        ['service_level', 'usage_threshold', 'tags', 'export_policy'])
    return params
Ejemplo n.º 14
0
    def test_assign_snapshot_policy_to_volume(self):
        # this can be reverted to set_bodiless_matcher() after tests are re-recorded and don't contain these headers
        set_custom_default_matcher(
            compare_bodies=False, excluded_headers="Authorization,Content-Length,x-ms-client-request-id,x-ms-request-id"
        )
        # create volume and snapshot policy
        create_volume(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1)
        snapshot_policy = create_snapshot_policy(self.client, TEST_SNAPSHOT_POLICY_1)
        # assign the snapshot policy to the volume
        snapshot = VolumeSnapshotProperties(snapshot_policy_id=snapshot_policy.id)
        data_protection = VolumePatchPropertiesDataProtection(snapshot=snapshot)
        volume_patch = VolumePatch(data_protection=data_protection)
        volume = self.client.volumes.begin_update(TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, volume_patch).result()

        assert volume.data_protection.snapshot.snapshot_policy_id == snapshot_policy.id

        # cleanup
        delete_volume(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1, live=self.is_live)
        delete_snapshot_policy(self.client, TEST_SNAPSHOT_POLICY_1, live=self.is_live)
        delete_pool(self.client, TEST_RG, TEST_ACC_1, TEST_POOL_1, live=self.is_live)
        delete_account(self.client, TEST_RG, TEST_ACC_1, live=self.is_live)
def create_backup(client,
                  backup_name=TEST_BACKUP_1,
                  rg=BACKUP_RG,
                  account_name=TEST_ACC_1,
                  pool_name=TEST_POOL_1,
                  volume_name=TEST_VOL_1,
                  location=BACKUP_LOCATION,
                  backup_only=False,
                  live=False):
    if not backup_only:
        create_volume(client,
                      rg,
                      account_name,
                      pool_name,
                      volume_name,
                      location,
                      vnet=BACKUP_VNET,
                      live=live)
        wait_for_volume(client, rg, account_name, pool_name, volume_name, live)

    vaults = client.vaults.list(rg, account_name)
    volume_patch = VolumePatch(data_protection={
        "backup": {
            "vaultId": vaults.next().id,
            "backupEnabled": True
        }
    })
    client.volumes.begin_update(BACKUP_RG, TEST_ACC_1, TEST_POOL_1, TEST_VOL_1,
                                volume_patch).result()
    backup_body = Backup(location=location)
    backup = client.backups.begin_create(rg, account_name, pool_name,
                                         volume_name, backup_name,
                                         backup_body).result()
    wait_for_backup_created(client, rg, account_name, pool_name, volume_name,
                            backup_name, live)
    return backup
def run_example():
    """Azure NetApp Files SDK management example."""

    print("Azure NetAppFiles Python SDK Sample")
    print("Sample project that performs CRUD management operations with Azure NetApp Files SDK with Python")
    print("-----------------------------------------------------------------------------------------------")

    # Creating the Azure NetApp Files Client with an Application
    # (service principal) token provider
    credentials, subscription_id = sample_utils.get_credentials()
    anf_client = AzureNetAppFilesManagementClient(
        credentials, subscription_id)

    # Creating an Azure NetApp Account
    console_output('Creating Azure NetApp Files account ...')
    account = None
    try:
        account = create_account(
            anf_client, RESOURCE_GROUP_NAME, ANF_ACCOUNT_NAME, LOCATION)
        console_output(
            '\tAccount successfully created, resource id: {}'.format(
                account.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Creating a Capacity Pool
    console_output('Creating Capacity Pool ...')
    capacity_pool = None
    try:
        capacity_pool = create_capacitypool_async(
            anf_client,
            RESOURCE_GROUP_NAME,
            account.name,
            CAPACITYPOOL_NAME,
            CAPACITYPOOL_SERVICE_LEVEL,
            CAPACITYPOOL_SIZE, LOCATION)

        console_output('\tCapacity Pool successfully created, resource id: {}'
                       .format(capacity_pool.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Creating a Volume
    #
    # Note: With exception of Accounts, all resources with Name property
    # returns a relative path up to the name and to use this property in
    # other methods, like Get for example, the argument needs to be
    # sanitized and just the actual name needs to be used (the hierarchy
    # needs to be cleaned up in the name).
    # Capacity Pool Name property example: "pmarques-anf01/pool01"
    # "pool01" is the actual name that needs to be used instead. Below you
    # will see a sample function that parses the name from its
    # resource id: resource_uri_utils.get_anf_capacity_pool()
    console_output('Creating a Volume ...')
    subnet_id = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'\
                .format(subscription_id,
                        VNET_RESOURCE_GROUP_NAME,
                        VNET_NAME,
                        SUBNET_NAME)
    volume = None
    try:
        pool_name = resource_uri_utils.get_anf_capacity_pool(capacity_pool.id)

        volume = create_volume(anf_client,
                               RESOURCE_GROUP_NAME,
                               account.name, pool_name,
                               VOLUME_NAME,
                               VOLUME_USAGE_QUOTA,
                               CAPACITYPOOL_SERVICE_LEVEL,
                               subnet_id,
                               LOCATION)

        console_output('\tVolume successfully created, resource id: {}'
                       .format(volume.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Creating a snapshot
    console_output('Creating a Snapshot ...')
    snapshot = None
    try:
        volume_name = resource_uri_utils.get_anf_volume(volume.id)

        snapshot = create_snapshot(anf_client,
                                   RESOURCE_GROUP_NAME,
                                   account.name,
                                   pool_name,
                                   VOLUME_NAME,
                                   SNAPSHOT_NAME,
                                   LOCATION)

        sample_utils.wait_for_anf_resource(anf_client, snapshot.id)

        console_output(
            '\tSnapshot successfully created, resource id: {}'
            .format(snapshot.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Creating a new volume from snapshot
    #
    # Note: SnapshotId is not the actual resource Id of the snapshot, this
    # value is the unique identifier (guid) of the snapshot, represented
    # by the SnapshotId instead.
    console_output('Creating New Volume from Snapshot ...')
    volume_from_snapshot = None
    try:
        new_volume_name = "Vol-{}".format(
            resource_uri_utils.get_anf_snapshot(snapshot.id))

        volume_from_snapshot = create_volume_from_snapshot(anf_client,
                                                           RESOURCE_GROUP_NAME,
                                                           account.name,
                                                           pool_name,
                                                           volume,
                                                           snapshot.snapshot_id,
                                                           new_volume_name)

        console_output('\tNew volume from snapshot successfully created, resource id: {}'.format(
            volume_from_snapshot.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Updating a Capacity Pool
    console_output('Performing updates on Capacity Pool and Volume...')
    new_capacity_pool_size_tib = 10
    console_output('\tChanging Capacity Pools size from {}TiB to {}TiB'.format(
        sample_utils.get_bytes_in_tib(capacity_pool.size),
        new_capacity_pool_size_tib))
    try:
        capacity_pool_patch = CapacityPoolPatch(location=capacity_pool.location,
                                                service_level=capacity_pool.service_level,
                                                size=sample_utils.get_tib_in_bytes(new_capacity_pool_size_tib))

        capacity_pool = anf_client.pools.update(capacity_pool_patch,
                                                RESOURCE_GROUP_NAME,
                                                account.name,
                                                resource_uri_utils.get_anf_capacity_pool(capacity_pool.id))

        console_output('\t\tCapacity Pool successfully updated, new size {}TiB, resource id: {}'.format(
            sample_utils.get_bytes_in_tib(capacity_pool.size), capacity_pool.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Volume updates: resize and adding a new export policy
    new_volume_size_tib = 1
    console_output('\tChanging volume size from {}TiB to {}TiB'.format(
        sample_utils.get_bytes_in_tib(volume.usage_threshold), new_volume_size_tib))

    # Getting list of export policies and adding a new one at the end
    rule_list = sorted(volume.export_policy.rules,
                       key=lambda r: r.rule_index, reverse=True)

    # Currently, ANF's volume export policy supports up to 5 rules
    export_policies_patch = None
    if len(rule_list) <= 4:
        rule_list.append(ExportPolicyRule(
            allowed_clients="10.0.0.4/32",
            cifs=False,
            nfsv3=True,
            nfsv41=False,
            rule_index=rule_list[0].rule_index + 1,
            unix_read_only=False,
            unix_read_write=True))

        export_policies_patch = VolumePatchPropertiesExportPolicy(
            rules=rule_list)

    if export_policies_patch is None:
        volume_patch = VolumePatch(
            location=volume.location,
            service_level=volume.service_level,
            usage_threshold=sample_utils.get_tib_in_bytes(new_volume_size_tib))
    else:
        volume_patch = VolumePatch(
            location=volume.location,
            service_level=volume.service_level,
            usage_threshold=sample_utils.get_tib_in_bytes(new_volume_size_tib),
            export_policy=export_policies_patch)

    try:
        updated_volume = anf_client.volumes.update(volume_patch,
                                                   RESOURCE_GROUP_NAME,
                                                   account.name,
                                                   resource_uri_utils.get_anf_capacity_pool(
                                                       capacity_pool.id),
                                                   resource_uri_utils.get_anf_volume(volume.id))

        console_output('\t\tVolume successfully updated, new size: {}TiB, export policy count: {}, resource id: {}'
                       .format(sample_utils.get_bytes_in_tib(updated_volume.usage_threshold),
                               len(updated_volume.export_policy.rules),
                               updated_volume.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Retrieving resources
    console_output('Performing retrieval operations ...')

    # Accounts
    # Getting a list of ANF Accounts
    console_output('\tListing accounts...')
    account_list = None
    try:
        account_list = list(anf_client.accounts.list(RESOURCE_GROUP_NAME))

        for i, retrieved_account in enumerate(account_list):
            console_output('\t\t{} - Account Name: {}, Id: {}'
                           .format(i,
                                   retrieved_account.name,
                                   retrieved_account.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Getting a single ANF Account
    console_output('\tGetting a single account...')
    try:
        retrieved_account = anf_client.accounts.get(
            RESOURCE_GROUP_NAME, account.name)

        console_output('\t\tAccount Name: {}, Id: {}'.format(
            retrieved_account.name, retrieved_account.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Capacity Pools
    # Getting a list of capacity pools from an account
    console_output('\tListing capacity pools from account {}...'
                   .format(account.name))
    capacitypool_list = None
    try:
        capacitypool_list = list(anf_client.pools.list(RESOURCE_GROUP_NAME,
                                                       resource_uri_utils.get_anf_account(account.id)))

        for i, retrieved_pool in enumerate(capacitypool_list):
            console_output('\t\t{} - Capacity Pool Name: {}, Id: {}'
                           .format(i,
                                   retrieved_pool.name,
                                   retrieved_pool.id))

    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Getting a single capacity pool
    console_output('\tGetting a single capacity pool...')
    try:
        retrieved_pool = anf_client.pools.get(RESOURCE_GROUP_NAME,
                                              resource_uri_utils.get_anf_account(
                                                  account.id),
                                              resource_uri_utils.get_anf_capacity_pool(capacity_pool.id))

        console_output('\t\tCapacity Pool Name: {}, Id: {}'.format(
            retrieved_pool.name, retrieved_pool.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Volumes
    # Getting a list of volumes from a capacity pool
    console_output('\tListing volumes from capacity pool {}...'.format(
        capacity_pool.name))
    volume_list = None
    try:
        volume_list = list(anf_client.volumes.list(RESOURCE_GROUP_NAME,
                                                   resource_uri_utils.get_anf_account(
                                                       account.id),
                                                   resource_uri_utils.get_anf_capacity_pool(capacity_pool.id)))

        for i, retrieved_volume in enumerate(volume_list):
            console_output('\t\t{} - Volume Name: {}, Id: {}'
                           .format(i,
                                   retrieved_volume.name,
                                   retrieved_volume.id))

    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Getting a single volume
    console_output('\tGetting a single volume...')
    try:
        retrieved_volume = anf_client.volumes.get(RESOURCE_GROUP_NAME,
                                                  resource_uri_utils.get_anf_account(
                                                      account.id),
                                                  resource_uri_utils.get_anf_capacity_pool(
                                                      capacity_pool.id),
                                                  resource_uri_utils.get_anf_volume(volume.id))

        console_output('\t\tVolume Name: {}, Id: {}'.format(
            retrieved_volume.name, retrieved_volume.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Snapshots
    # Getting a list of snapshots from volume
    console_output(
        '\tListing snapshots from from volume {}...'.format(volume.name))
    snapshot_list = None
    try:
        snapshot_list = list(anf_client.snapshots.list(RESOURCE_GROUP_NAME,
                                                       resource_uri_utils.get_anf_account(
                                                           account.id),
                                                       resource_uri_utils.get_anf_capacity_pool(
                                                           capacity_pool.id),
                                                       resource_uri_utils.get_anf_volume(volume.id)))

        for i, retrieved_snapshot in enumerate(snapshot_list):
            console_output('\t\t{} - Snapshot Name: {}, Id: {}'
                           .format(i,
                                   retrieved_snapshot.name,
                                   retrieved_snapshot.id))

    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Getting a single snapshot
    console_output('\tGetting a single snapshot...')
    try:
        retrieved_snapshot = anf_client.snapshots.get(RESOURCE_GROUP_NAME,
                                                      resource_uri_utils.get_anf_account(
                                                          account.id),
                                                      resource_uri_utils.get_anf_capacity_pool(
                                                          capacity_pool.id),
                                                      resource_uri_utils.get_anf_volume(
                                                          volume.id),
                                                      resource_uri_utils.get_anf_snapshot(snapshot.id))

        console_output('\t\tSnapshot Name: {}, Id: {}'.format(
            retrieved_snapshot.name, retrieved_snapshot.id))
    except CloudError as ex:
        console_output(
            'An error ocurred. Error details: {}'.format(ex.message))
        raise

    # Cleaning up. This process needs to start the cleanup from the innermost
    # resources down in the hierarchy chain in our case
    # Snapshots->Volumes->Capacity Pools->Accounts
    if SHOULD_CLEANUP:
        console_output('Cleaning up...')

        # Cleaning up snapshot
        console_output(
            "\tWaiting for 1 minute to let the snapshot used to create a new \
            volume to complete the split operation therefore not being locked...")
        time.sleep(60)
        console_output("\tDeleting Snapshot {}...".format(
            resource_uri_utils.get_anf_snapshot(snapshot.id)))

        try:
            anf_client.snapshots.delete(RESOURCE_GROUP_NAME,
                                        account.name,
                                        resource_uri_utils.get_anf_capacity_pool(
                                            capacity_pool.id),
                                        resource_uri_utils.get_anf_volume(
                                            volume.id),
                                        resource_uri_utils.get_anf_snapshot(snapshot.id)).wait()

            # ARM Workaround to wait the deletion complete/propagate
            sample_utils.wait_for_no_anf_resource(anf_client, snapshot.id)

            console_output('\t\tDeleted Snapshot: {}'.format(snapshot.id))
        except CloudError as ex:
            console_output(
                'An error ocurred. Error details: {}'.format(ex.message))
            raise

        # Cleaning up volumes
        # Note: Volume deletion operations at the RP level are executed
        # serially
        console_output("\tDeleting Volumes...")
        try:
            volume_ids = [volume.id, volume_from_snapshot.id]
            for volume_id in volume_ids:
                console_output("\t\tDeleting {}".format(volume_id))
                anf_client.volumes.delete(RESOURCE_GROUP_NAME,
                                          account.name,
                                          resource_uri_utils.get_anf_capacity_pool(
                                             capacity_pool.id),
                                          resource_uri_utils.get_anf_volume(volume_id)).wait()

                sample_utils.wait_for_no_anf_resource(anf_client, volume_id)

                console_output('\t\tDeleted Volume: {}'.format(volume_id))
        except CloudError as ex:
            console_output(
                'An error ocurred. Error details: {}'.format(ex.message))
            raise

        # Cleaning up Capacity Pool
        console_output("\tDeleting Capacity Pool {} ...".format(
            resource_uri_utils.get_anf_capacity_pool(capacity_pool.id)))
        try:
            anf_client.pools.delete(RESOURCE_GROUP_NAME,
                                    account.name,
                                    resource_uri_utils.get_anf_capacity_pool(
                                        capacity_pool.id)).wait()

            sample_utils.wait_for_no_anf_resource(anf_client, capacity_pool.id)

            console_output(
                '\t\tDeleted Capacity Pool: {}'.format(capacity_pool.id))
        except CloudError as ex:
            console_output(
                'An error ocurred. Error details: {}'.format(ex.message))
            raise

        # Cleaning up Account
        console_output("\tDeleting Account {} ...".format(account.name))
        try:
            anf_client.accounts.delete(RESOURCE_GROUP_NAME, account.name)
            sample_utils.wait_for_no_anf_resource(anf_client, account.id)
            console_output('\t\tDeleted Account: {}'.format(account.id))
        except CloudError as ex:
            console_output(
                'An error ocurred. Error details: {}'.format(ex.message))
            raise