def validate_alba_backend_removal(alba_backend_info):
        """
        Validate whether the backend has been deleted properly
        alba_backend_info should be a dictionary containing:
            - guid
            - name
            - maintenance_service_names
        :param alba_backend_info: Information about the backend
        :return: None
        """
        Toolbox.verify_required_params(actual_params=alba_backend_info,
                                       required_params={'name': (str, None),
                                                        'guid': (str, Toolbox.regex_guid),
                                                        'maintenance_service_names': (list, None)},
                                       exact_match=True)

        alba_backend_guid = alba_backend_info['guid']
        alba_backend_name = alba_backend_info['name']
        backend = GeneralBackend.get_by_name(alba_backend_name)
        assert backend is None,\
            'Still found a backend in the model with name {0}'.format(alba_backend_name)

        # Validate services removed from model
        for service in GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.ALBA_MGR):
            assert service.name != '{0}-abm'.format(alba_backend_name),\
                'An AlbaManager service has been found with name {0}'.format(alba_backend_name)
        for service in GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.NS_MGR):
            assert service.name.startswith('{0}-nsm_'.format(alba_backend_name)) is False,\
                'An NamespaceManager service has been found with name {0}'.format(alba_backend_name)

        # Validate ALBA backend configuration structure
        alba_backend_key = '/ovs/alba/backends'
        actual_configuration_keys = [key for key in Configuration.list(alba_backend_key)]
        assert alba_backend_guid not in actual_configuration_keys,\
            'Configuration still contains an entry in {0} with guid {1}'.format(alba_backend_key, alba_backend_guid)

        # Validate Arakoon configuration structure
        arakoon_keys = [key for key in Configuration.list('/ovs/arakoon') if key.startswith(alba_backend_name)]
        assert len(arakoon_keys) == 0,\
            'Configuration still contains configurations for clusters: {0}'.format(', '.join(arakoon_keys))

        # Validate services
        for storagerouter in GeneralStorageRouter.get_storage_routers():
            root_client = SSHClient(endpoint=storagerouter, username='******')
            maintenance_services = alba_backend_info['maintenance_service_names']
            abm_arakoon_service_name = 'ovs-arakoon-{0}-abm'.format(alba_backend_name)
            nsm_arakoon_service_name = 'ovs-arakoon-{0}-nsm_0'.format(alba_backend_name)
            for service_name in [abm_arakoon_service_name, nsm_arakoon_service_name] + maintenance_services:
                assert GeneralService.has_service(name=service_name, client=root_client) is False,\
                    'Service {0} still deployed on Storage Router {1}'.format(service_name, storagerouter.name)
    def check_vpool_cleanup(vpool_info, storagerouters=None):
        """
        Check if everything related to a vPool has been cleaned up on the storagerouters provided
        vpool_info should be a dictionary containing:
            - type
            - guid
            - files
            - directories
            - name (optional)
            - vpool (optional)
            If vpool is provided:
                - storagerouters need to be provided, because on these Storage Routers, we check whether the vPool has been cleaned up
            If name is provided:
                - If storagerouters is NOT provided, all Storage Routers will be checked for a correct vPool removal
                - If storagerouters is provided, only these Storage Routers will be checked for a correct vPool removal

        :param vpool_info: Information about the vPool
        :param storagerouters: Storage Routers to check if vPool has been cleaned up
        :return: None
        """
        for required_param in ['type', 'guid', 'files', 'directories']:
            if required_param not in vpool_info:
                raise ValueError('Incorrect vpool_info provided')
        if 'vpool' in vpool_info and 'name' in vpool_info:
            raise ValueError('vpool and name are mutually exclusive')
        if 'vpool' not in vpool_info and 'name' not in vpool_info:
            raise ValueError('Either vpool or vpool_name needs to be provided')

        vpool = vpool_info.get('vpool')
        vpool_name = vpool_info.get('name')
        vpool_guid = vpool_info['guid']
        vpool_type = vpool_info['type']
        files = vpool_info['files']
        directories = vpool_info['directories']

        supported_backend_types = GeneralBackend.get_valid_backendtypes()
        if vpool_type not in supported_backend_types:
            raise ValueError('Unsupported Backend Type provided. Please choose from: {0}'.format(', '.join(supported_backend_types)))
        if storagerouters is None:
            storagerouters = GeneralStorageRouter.get_storage_routers()

        if vpool_name is not None:
            assert GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) is None, 'A vPool with name {0} still exists'.format(vpool_name)

        # Prepare some fields to check
        vpool_name = vpool.name if vpool else vpool_name
        vpool_services = ['ovs-dtl_{0}'.format(vpool_name),
                          'ovs-volumedriver_{0}'.format(vpool_name)]
        if vpool_type == 'alba':
            vpool_services.append('ovs-albaproxy_{0}'.format(vpool_name))

        # Check etcd
        if vpool is None:
            assert EtcdConfiguration.exists('/ovs/vpools/{0}'.format(vpool_guid), raw=True) is False, 'vPool config still found in etcd'
        else:
            remaining_sd_ids = set([storagedriver.storagedriver_id for storagedriver in vpool.storagedrivers])
            current_sd_ids = set([item for item in EtcdConfiguration.list('/ovs/vpools/{0}/hosts'.format(vpool_guid))])
            assert not remaining_sd_ids.difference(current_sd_ids), 'There are more storagedrivers modelled than present in etcd'
            assert not current_sd_ids.difference(remaining_sd_ids), 'There are more storagedrivers in etcd than present in model'

        # Perform checks on all storagerouters where vpool was removed
        for storagerouter in storagerouters:
            # Check management center
            mgmt_center = GeneralManagementCenter.get_mgmt_center(pmachine=storagerouter.pmachine)
            if mgmt_center is not None:
                assert GeneralManagementCenter.is_host_configured(pmachine=storagerouter.pmachine) is False, 'Management Center is still configured on Storage Router {0}'.format(storagerouter.ip)

            # Check MDS services
            mds_services = GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.MD_SERVER)
            assert len([mds_service for mds_service in mds_services if mds_service.storagerouter_guid == storagerouter.guid]) == 0, 'There are still MDS services present for Storage Router {0}'.format(storagerouter.ip)

            # Check services
            root_client = SSHClient(storagerouter, username='******')
            for service in vpool_services:
                if ServiceManager.has_service(service, client=root_client):
                    raise RuntimeError('Service {0} is still configured on Storage Router {1}'.format(service, storagerouter.ip))

            # Check KVM vpool
            if storagerouter.pmachine.hvtype == 'KVM':
                vpool_overview = root_client.run('virsh pool-list --all').splitlines()
                vpool_overview.pop(1)
                vpool_overview.pop(0)
                for vpool_info in vpool_overview:
                    kvm_vpool_name = vpool_info.split()[0].strip()
                    if vpool_name == kvm_vpool_name:
                        raise ValueError('vPool {0} is still defined on Storage Router {1}'.format(vpool_name, storagerouter.ip))

            # Check file and directory existence
            if storagerouter.guid not in directories:
                raise ValueError('Could not find directory information for Storage Router {0}'.format(storagerouter.ip))
            if storagerouter.guid not in files:
                raise ValueError('Could not find file information for Storage Router {0}'.format(storagerouter.ip))

            for directory in directories[storagerouter.guid]:
                assert root_client.dir_exists(directory) is False, 'Directory {0} still exists on Storage Router {1}'.format(directory, storagerouter.ip)
            for file_name in files[storagerouter.guid]:
                assert root_client.file_exists(file_name) is False, 'File {0} still exists on Storage Router {1}'.format(file_name, storagerouter.ip)

            # Look for errors in storagedriver log
            for error_type in ['error', 'fatal']:
                cmd = "cat -vet /var/log/ovs/volumedriver/{0}.log | tail -1000 | grep ' {1} '; echo true > /dev/null".format(vpool_name, error_type)
                errors = []
                for line in root_client.run(cmd).splitlines():
                    if "HierarchicalArakoon" in line:
                        continue
                    errors.append(line)
                if len(errors) > 0:
                    if error_type == 'error':
                        print 'Volumedriver log file contains errors on Storage Router {0}\n - {1}'.format(storagerouter.ip, '\n - '.join(errors))
                    else:
                        raise RuntimeError('Fatal errors found in volumedriver log file on Storage Router {0}\n - {1}'.format(storagerouter.ip, '\n - '.join(errors)))
    def check_vpool_cleanup(vpool_info, storagerouters=None):
        """
        Check if everything related to a vPool has been cleaned up on the storagerouters provided
        vpool_info should be a dictionary containing:
            - type
            - guid
            - files
            - directories
            - name (optional)
            - vpool (optional)
            If vpool is provided:
                - storagerouters need to be provided, because on these Storage Routers, we check whether the vPool has been cleaned up
            If name is provided:
                - If storagerouters is NOT provided, all Storage Routers will be checked for a correct vPool removal
                - If storagerouters is provided, only these Storage Routers will be checked for a correct vPool removal

        :param vpool_info: Information about the vPool
        :param storagerouters: Storage Routers to check if vPool has been cleaned up
        :return: None
        """
        for required_param in ["type", "guid", "files", "directories"]:
            if required_param not in vpool_info:
                raise ValueError("Incorrect vpool_info provided")
        if "vpool" in vpool_info and "name" in vpool_info:
            raise ValueError("vpool and name are mutually exclusive")
        if "vpool" not in vpool_info and "name" not in vpool_info:
            raise ValueError("Either vpool or vpool_name needs to be provided")

        vpool = vpool_info.get("vpool")
        vpool_name = vpool_info.get("name")
        vpool_guid = vpool_info["guid"]
        vpool_type = vpool_info["type"]
        files = vpool_info["files"]
        directories = vpool_info["directories"]

        supported_backend_types = GeneralBackend.get_valid_backendtypes()
        if vpool_type not in supported_backend_types:
            raise ValueError(
                "Unsupported Backend Type provided. Please choose from: {0}".format(", ".join(supported_backend_types))
            )
        if storagerouters is None:
            storagerouters = GeneralStorageRouter.get_storage_routers()

        if vpool_name is not None:
            assert (
                GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) is None
            ), "A vPool with name {0} still exists".format(vpool_name)

        # Prepare some fields to check
        vpool_name = vpool.name if vpool else vpool_name
        vpool_services = ["ovs-dtl_{0}".format(vpool_name), "ovs-volumedriver_{0}".format(vpool_name)]
        if vpool_type == "alba":
            vpool_services.append("ovs-albaproxy_{0}".format(vpool_name))

        # Check configuration
        if vpool is None:
            assert (
                Configuration.exists("/ovs/vpools/{0}".format(vpool_guid), raw=True) is False
            ), "vPool config still found in etcd"
        else:
            remaining_sd_ids = set([storagedriver.storagedriver_id for storagedriver in vpool.storagedrivers])
            current_sd_ids = set([item for item in Configuration.list("/ovs/vpools/{0}/hosts".format(vpool_guid))])
            assert not remaining_sd_ids.difference(
                current_sd_ids
            ), "There are more storagedrivers modelled than present in etcd"
            assert not current_sd_ids.difference(
                remaining_sd_ids
            ), "There are more storagedrivers in etcd than present in model"

        # Perform checks on all storagerouters where vpool was removed
        for storagerouter in storagerouters:

            # Check MDS services
            mds_services = GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.MD_SERVER)
            assert (
                len(
                    [
                        mds_service
                        for mds_service in mds_services
                        if mds_service.storagerouter_guid == storagerouter.guid
                    ]
                )
                == 0
            ), "There are still MDS services present for Storage Router {0}".format(storagerouter.ip)

            # Check services
            root_client = SSHClient(storagerouter, username="******")
            for service in vpool_services:
                if ServiceManager.has_service(service, client=root_client):
                    raise RuntimeError(
                        "Service {0} is still configured on Storage Router {1}".format(service, storagerouter.ip)
                    )

            # Check KVM vpool
            if GeneralHypervisor.get_hypervisor_type() == "KVM":
                vpool_overview = root_client.run(["virsh", "pool-list", "--all"]).splitlines()
                vpool_overview.pop(1)
                vpool_overview.pop(0)
                for vpool_info in vpool_overview:
                    kvm_vpool_name = vpool_info.split()[0].strip()
                    if vpool_name == kvm_vpool_name:
                        raise ValueError(
                            "vPool {0} is still defined on Storage Router {1}".format(vpool_name, storagerouter.ip)
                        )

            # Check file and directory existence
            if storagerouter.guid not in directories:
                raise ValueError("Could not find directory information for Storage Router {0}".format(storagerouter.ip))
            if storagerouter.guid not in files:
                raise ValueError("Could not find file information for Storage Router {0}".format(storagerouter.ip))

            for directory in directories[storagerouter.guid]:
                assert (
                    root_client.dir_exists(directory) is False
                ), "Directory {0} still exists on Storage Router {1}".format(directory, storagerouter.ip)
            for file_name in files[storagerouter.guid]:
                assert (
                    root_client.file_exists(file_name) is False
                ), "File {0} still exists on Storage Router {1}".format(file_name, storagerouter.ip)

            # Look for errors in storagedriver log
            for error_type in ["error", "fatal"]:
                cmd = "cat -vet /var/log/ovs/volumedriver/{0}.log | tail -1000 | grep ' {1} '; echo true > /dev/null".format(
                    vpool_name, error_type
                )
                errors = []
                for line in root_client.run(cmd, allow_insecure=True).splitlines():
                    if "HierarchicalArakoon" in line:
                        continue
                    errors.append(line)
                if len(errors) > 0:
                    if error_type == "error":
                        print "Volumedriver log file contains errors on Storage Router {0}\n - {1}".format(
                            storagerouter.ip, "\n - ".join(errors)
                        )
                    else:
                        raise RuntimeError(
                            "Fatal errors found in volumedriver log file on Storage Router {0}\n - {1}".format(
                                storagerouter.ip, "\n - ".join(errors)
                            )
                        )