コード例 #1
0
 def get_instance(cls, connection_info, cache_store=None, version=6):
     """
     Retrieve an OVSClient instance to the connection information passed
     :param connection_info: Connection information, includes: 'host', 'port', 'client_id', 'client_secret'
     :type connection_info: dict
     :param cache_store: Store in which to keep the generated token for the client
     :type cache_store: object
     :param version: Version for the API
     :type version: int
     :return: An instance of the OVSClient class
     :rtype: ovs_extensions.api.client.OVSClient
     """
     ExtensionsToolbox.verify_required_params(
         actual_params=connection_info,
         required_params={
             'host': (str, ExtensionsToolbox.regex_ip),
             'port': (int, {
                 'min': 1,
                 'max': 65535
             }),
             'client_id': (str, None),
             'client_secret': (str, None),
             'local': (bool, None, False)
         })
     return cls(ip=connection_info['host'],
                port=connection_info['port'],
                credentials=(connection_info['client_id'],
                             connection_info['client_secret']),
                version=version,
                cache_store=cache_store)
コード例 #2
0
 def create_hprm_config_files(self, local_storagerouter, storagerouter,
                              parameters):
     """
     DEPRECATED API CALL - USE /vpool/vpool_guid/create_hprm_config_files instead
     Create the required configuration files to be able to make use of HPRM (aka PRACC)
     These configuration will be zipped and made available for download
     :param local_storagerouter: StorageRouter this call is executed on
     :type local_storagerouter: ovs.dal.hybrids.storagerouter.StorageRouter
     :param storagerouter: The StorageRouter for which a HPRM manager needs to be deployed
     :type storagerouter: ovs.dal.hybrids.storagerouter.StorageRouter
     :param parameters: Additional information required for the HPRM configuration files
     :type parameters: dict
     :return: Asynchronous result of a CeleryTask
     :rtype: celery.result.AsyncResult
     """
     _ = storagerouter
     ExtensionsToolbox.verify_required_params(
         actual_params=parameters,
         required_params={
             'vpool_guid': (str, ExtensionsToolbox.regex_guid)
         })
     return VPoolController.create_hprm_config_files.delay(
         parameters=parameters,
         vpool_guid=parameters['vpool_guid'],
         local_storagerouter_guid=local_storagerouter.guid)
コード例 #3
0
 def __init__(self, ip, username, password, client):
     # type: (str, str, str, SSHClient) -> None
     """
     Intialize an IPMIController
     :param ip: IP of the host to control through IPMI
     :type ip: str
     :param username: IPMI username of the host to control through IPMI
     :type username: str
     :param password: IPMI password of the host to control through IPMI
     :type password: str
     :param client: SSHClient to perform all IPMI commands on
     :type client: SSHClient
     """
     actual_params = {'ip': ip,
                      'username': username,
                      'password': password,
                      'client': client}
     required_params = {'ip': (str, ExtensionsToolbox.regex_ip, True),
                        'username': (str, None, True),
                        'password': (str, None, True),
                        'client': (SSHClient, None, True)}
     ExtensionsToolbox.verify_required_params(actual_params=actual_params,
                                              required_params=required_params)
     self.ip = ip
     self.username = username
     self._basic_command = ['ipmi-power', '-h', self.ip, '-u', self.username, '-p', self._pwd]
     self._client = client
     self._pwd = password
コード例 #4
0
    def create_blktap_device(client, diskname, edge_info, logger=LOGGER):
        """
        Creates a blk tap device from a vdisk
        :return: blktap device location
        """
        required_edge_params = {
            'port': (int, {
                'min': 1,
                'max': 65535
            }),
            'protocol': (str, ['tcp', 'udp', 'rdma']),
            'ip': (str, Toolbox.regex_ip),
            'username': (str, None, False),
            'password': (str, None, False)
        }
        ExtensionsToolbox.verify_required_params(required_edge_params,
                                                 edge_info)
        if edge_info.get('username') and edge_info.get('password'):
            ovs_edge_connection = "openvstorage+{0}:{1}:{2}/{3}:username={4}:password={5}".format(
                edge_info['protocol'], edge_info['ip'], edge_info['port'],
                diskname, edge_info['username'], edge_info['password'])
        else:
            ovs_edge_connection = "openvstorage+{0}:{1}:{2}/{3}".format(
                edge_info['protocol'], edge_info['ip'], edge_info['port'],
                diskname)

        cmd = ["tap-ctl", "create", "-a", ovs_edge_connection]
        logger.debug('Creating blktap device: {}'.format(' '.join(cmd)))
        return client.run(cmd)
コード例 #5
0
    def change_config(vpool_name, vpool_details, storagerouter_ip, *args,
                      **kwargs):

        # Settings volumedriver
        storagedriver_config = vpool_details.get('storagedriver')
        if storagedriver_config is not None:
            ExtensionsToolbox.verify_required_params(
                StoragedriverSetup.STORAGEDRIVER_PARAMS, storagedriver_config)
            StoragedriverSetup.LOGGER.info(
                'Updating volumedriver configuration of vPool `{0}` on storagerouter `{1}`.'
                .format(vpool_name, storagerouter_ip))
            vpool = VPoolHelper.get_vpool_by_name(vpool_name)
            storagedriver = [
                sd for sd in vpool.storagedrivers
                if sd.storagerouter.ip == storagerouter_ip
            ][0]
            if not storagedriver:
                error_msg = 'Unable to find the storagedriver of vPool {0} on storagerouter {1}'.format(
                    vpool_name, storagerouter_ip)
                raise RuntimeError(error_msg)
            StoragedriverHelper.change_config(storagedriver,
                                              storagedriver_config)
            vpool.invalidate_dynamics('configuration')
            StoragedriverSetup.LOGGER.info(
                'Updating volumedriver config of vPool `{0}` should have succeeded on storagerouter `{1}`'
                .format(vpool_name, storagerouter_ip))
コード例 #6
0
 def convert_image(client,
                   image_location,
                   diskname,
                   edge_info,
                   logger=LOGGER):
     """
     Converts an image file with qemu over edge connection
     :return: None
     """
     required_edge_params = {
         'port': (int, {
             'min': 1,
             'max': 65535
         }),
         'protocol': (str, ['tcp', 'udp', 'rdma']),
         'ip': (str, Toolbox.regex_ip),
         'username': (str, None, False),
         'password': (str, None, False)
     }
     ExtensionsToolbox.verify_required_params(required_edge_params,
                                              edge_info)
     if edge_info.get('username') and edge_info.get('password'):
         ovs_edge_connection = "openvstorage+{0}:{1}:{2}/{3}:username={4}:password={5}".format(
             edge_info['protocol'], edge_info['ip'], edge_info['port'],
             diskname, edge_info['username'], edge_info['password'])
     else:
         ovs_edge_connection = "openvstorage+{0}:{1}:{2}/{3}".format(
             edge_info['protocol'], edge_info['ip'], edge_info['port'],
             diskname)
     cmd = ["qemu-img", "convert", image_location, ovs_edge_connection]
     logger.debug('Converting an image with qemu using: {}'.format(
         ' '.join(cmd)))
     client.run(cmd)
コード例 #7
0
    def add_domain_to_sr(self, storagerouter_ip, name, recovery=False):
        """
        Add domains, present in the model, to a storage router.
        :param storagerouter_ip: ip of the storage router
        :type storagerouter_ip: str
        :param name: name of the domain to add to the storagerouter
        :type name: str
        :param recovery: true or false whether the domain is a recovery domain or not
        :type recovery: bool
        """
        self._valid_storagerouter(storagerouter_ip)
        ExtensionsToolbox.verify_required_params(
            required_params={'name': (str, None, True)},
            actual_params={'name': name},
            verify_keys=True)

        if name not in self._domains:
            raise ValueError('Invalid domain passed: {0}'.format(name))

        path = self.config['setup']['storagerouters'][storagerouter_ip]
        if 'domains' not in path.keys():
            path['domains'] = {}
        path = path['domains']
        config_key = 'domain_guids' if recovery is False else 'recovery_domain_guids'
        if config_key not in path:
            path[config_key] = []
        path[config_key].append(name)
コード例 #8
0
 def add_disk_to_sr(self, storagerouter_ip, name, roles):
     """
     Add disk with given name and roles to a storagerouter in the model.
     :param storagerouter_ip:
     :type storagerouter_ip: str
     :param name: name of the disk
     :type name: str
     :param roles: roles to assign to the disk
     :type roles: list
     """
     self._valid_storagerouter(storagerouter_ip)
     required_params = {
         'name': (str, None, True),
         'roles': (list, None, True)
     }
     ExtensionsToolbox.verify_required_params(
         required_params=required_params,
         actual_params={
             'name': name,
             'roles': roles
         },
         verify_keys=True)
     for role in roles:
         if role not in DiskPartition.ROLES:
             raise ValueError(
                 'Provided role {0} is not an allowed role for disk {1}.'.
                 format(role, name))
     disk_dict = {name: {'roles': roles}}
     if 'disks' not in self.config['setup']['storagerouters'][
             storagerouter_ip]:
         self.config['setup']['storagerouters'][storagerouter_ip][
             'disks'] = {}
     self.config['setup']['storagerouters'][storagerouter_ip][
         'disks'].update(disk_dict)
コード例 #9
0
 def add_storagerouter(self, storagerouter_ip, hostname):
     """
     Add a storagerouter to the model given the provided ip and hostname.
     :param storagerouter_ip: ip address of the storage router
     :type storagerouter_ip: str
     :param hostname: hostname of the storagerouter
     :type hostname: str
     """
     self._validate_ip(storagerouter_ip)
     required_params = {'hostname': (str, None, True)}
     ExtensionsToolbox.verify_required_params(
         required_params=required_params,
         actual_params={'hostname': hostname},
         verify_keys=True)
     if 'setup' not in self.config.keys():
         self.config['setup'] = {}
     if 'storagerouters' in self.config['setup'].keys():
         if storagerouter_ip in self.config['setup']['storagerouters']:
             raise ValueError(
                 'Storagerouter with given ip {0} already defined.'.format(
                     storagerouter_ip))
     else:
         if 'storagerouters' not in self.config['setup']:
             self.config['setup']['storagerouters'] = {}
     self.config['setup']['storagerouters'][storagerouter_ip] = {
         'hostname': hostname
     }
コード例 #10
0
    def validate(self, storagerouter=None, storagedriver=None):
        """
        Perform some validations before creating or extending a vPool
        :param storagerouter: StorageRouter on which the vPool will be created or extended
        :type storagerouter: ovs.dal.hybrids.storagerouter.StorageRouter
        :param storagedriver: When passing a StorageDriver, perform validations when shrinking a vPool
        :type storagedriver: ovs.dal.hybrids.storagedriver.StorageDriver
        :raises ValueError: If extending a vPool which status is not RUNNING
                RuntimeError: If this vPool's configuration does not meet the requirements
                              If the vPool has already been extended on the specified StorageRouter
        :return: None
        :rtype: NoneType
        """
        if self.vpool is not None:
            if self.vpool.status != VPool.STATUSES.RUNNING:
                raise ValueError('vPool should be in {0} status'.format(
                    VPool.STATUSES.RUNNING))

            ExtensionsToolbox.verify_required_params(
                actual_params=self.vpool.configuration,
                required_params={
                    'sco_size':
                    (int, StorageDriverClient.TLOG_MULTIPLIER_MAP.keys()),
                    'dtl_mode':
                    (str, StorageDriverClient.VPOOL_DTL_MODE_MAP.keys()),
                    'write_buffer': (float, None),
                    'dtl_transport':
                    (str, StorageDriverClient.VPOOL_DTL_TRANSPORT_MAP.keys()),
                    'tlog_multiplier':
                    (int, StorageDriverClient.TLOG_MULTIPLIER_MAP.values())
                })

            if storagerouter is not None:
                for vpool_storagedriver in self.vpool.storagedrivers:
                    if vpool_storagedriver.storagerouter_guid == storagerouter.guid:
                        raise RuntimeError(
                            'A StorageDriver is already linked to this StorageRouter for vPool {0}'
                            .format(self.vpool.name))
            if storagedriver is not None:
                VDiskController.sync_with_reality(vpool_guid=self.vpool.guid)
                storagedriver.invalidate_dynamics('vdisks_guids')
                if len(storagedriver.vdisks_guids) > 0:
                    raise RuntimeError(
                        'There are still vDisks served from the given StorageDriver'
                    )

                self.mds_services = [
                    mds_service for mds_service in self.vpool.mds_services
                    if mds_service.service.storagerouter_guid ==
                    storagedriver.storagerouter_guid
                ]
                for mds_service in self.mds_services:
                    if len(mds_service.storagedriver_partitions
                           ) == 0 or mds_service.storagedriver_partitions[
                               0].storagedriver is None:
                        raise RuntimeError(
                            'Failed to retrieve the linked StorageDriver to this MDS Service {0}'
                            .format(mds_service.service.name))
コード例 #11
0
    def update_storagedriver_of_vpool(self, sr_ip, vpool_name, sr_params=None):
        '''
        Update all or some data of a storagedriver, assigned to a vpool on a specific storagerouter.
        :param sr_ip: ip of the storagerouter on which the vpool is located
        :type sr_ip: str
        :param vpool_name: name of the vpool of which to update the storagedriver data
        :type vpool_name: str
        :param sr_params: parameters to update of the referenced storagedriver
        :type sr_params: dict
        '''
        required_params = {
            'sco_size': (int, StorageDriverClient.TLOG_MULTIPLIER_MAP.keys()),
            'cluster_size': (int, StorageDriverClient.CLUSTER_SIZES),
            'volume_write_buffer': (int, {
                'min': 128,
                'max': 10240
            }, False),
            'global_read_buffer': (int, {
                'min': 128,
                'max': 10240
            }, False),
            'strategy': (str, None, False),
            'deduplication': (str, None, False),
            'dtl_transport':
            (str, StorageDriverClient.VPOOL_DTL_TRANSPORT_MAP.keys()),
            'dtl_mode': (str, StorageDriverClient.VPOOL_DTL_MODE_MAP.keys())
        }

        default_params = {
            'sco_size': 4,
            'cluster_size': 4,
            'volume_write_buffer': 512,
            'strategy': 'none',
            'global_write_buffer': 128,
            'global_read_buffer': 128,
            'deduplication': 'non_dedupe',
            'dtl_transport': 'tcp',
            'dtl_mode': 'sync'
        }

        if sr_params is None:
            sr_params = {}
        default_params.update(sr_params)
        if not isinstance(default_params, dict):
            raise ValueError('Parameters should be of type "dict"')
        ExtensionsToolbox.verify_required_params(required_params,
                                                 default_params)
        if sr_ip not in self.config['setup']['storagerouters'].keys():
            raise KeyError('Storagerouter with ip is not defined')
        if vpool_name not in self.config['setup']['storagerouters'][sr_ip][
                'vpools']:
            raise KeyError(
                'Vpool with name {0} is not defined on storagerouter with ip {1}'
                .format(vpool_name, sr_ip))
        self.config['setup']['storagerouters'][sr_ip]['vpools'][vpool_name][
            'storagedriver'] = default_params
コード例 #12
0
 def _validate_ip(self, ip):
     required_params = {'storagerouter_ip': (str, Toolbox.regex_ip, True)}
     try:
         ExtensionsToolbox.verify_required_params(
             required_params=required_params,
             actual_params={'storagerouter_ip': ip},
             verify_keys=True)
     except RuntimeError as e:
         raise ValueError(e)
     if os.system('ping -c 1 {0}'.format(ip)) != 0:
         raise ValueError('No response from ip {0}'.format(ip))
コード例 #13
0
    def change_cache(self,
                     storagerouter_ip,
                     vpool,
                     block_cache=True,
                     fragment_cache=True,
                     on_read=True,
                     on_write=True):
        """
        Change the caching parameters of a given vpool on a given storagerouter. By default, change parameters of both block chache and fragment cache.
        :param storagerouter_ip: search for vpool on this storagerouter
        :type storagerouter_ip: str
        :param vpool: change cache options of given vpool
        :type vpool: str
        :param block_cache: change block cache parameters, default True
        :type block_cache: bool
        :param fragment_cache: change fragment cache parameters, default True
        :type fragment_cache: bool
        :param on_read: change onread parameters, default True
        :type on_read: bool
        :param on_write: chance onwrite parameters, default True
        :type on_write: bool
        """
        self._valid_storagerouter(storagerouter_ip=storagerouter_ip)

        required_params = {
            'vpool': (str, None, True),
            'block_cache': (bool, None, False),
            'fragment_cache': (bool, None, False),
            'on_read': (bool, None, False),
            'on_write': (bool, None, False)
        }
        actual_params = {
            'vpool': vpool,
            'block_cache': block_cache,
            'fragment_cache': fragment_cache,
            'on_read': on_read,
            'on_write': on_write
        }
        ExtensionsToolbox.verify_required_params(
            required_params=required_params,
            actual_params=actual_params,
            verify_keys=True)
        try:
            vpool = self.config['setup']['storagerouters'][storagerouter_ip][
                'vpools'][vpool]
        except KeyError:
            raise ValueError('Vpool {0} not found'.format(vpool))
        if block_cache is True:
            vpool['block_cache']['strategy']['cache_on_read'] = on_read
            vpool['block_cache']['strategy']['cache_on_write'] = on_write
        if fragment_cache is True:
            vpool['fragment_cache']['strategy']['cache_on_read'] = on_read
            vpool['fragment_cache']['strategy']['cache_on_write'] = on_write
コード例 #14
0
 def __init__(self, ip, user, password, type):
     required_params = {'ip': (str, Toolbox.regex_ip),
                        'user': (str, None),
                        'password': (str, None),
                        'type': (str, ['KVM', 'VMWARE'])}
     ExtensionsToolbox.verify_required_params(required_params, {'ip': ip,
                                                      'user': user,
                                                      'password': password,
                                                      'type': type})
     self.ip = ip
     self.user = user
     self.password = password
     self.type = type
コード例 #15
0
    def add_backend(self, backend_name, domains=None, scaling='LOCAL'):
        """
        Add a backend with provided domains and scaling to the model.
        :param backend_name: name of the backend
        :type backend_name: str
        :param domains: domains the backend is linked to
        :type domains: {}
        :param scaling:
        :type scaling: str
        """
        if domains is None:
            domains = []
        else:
            for domain_name in domains:
                if domain_name not in self._domains:
                    raise ValueError(
                        'Invalid domain passed: {0}'.format(domain_name))

        ExtensionsToolbox.verify_required_params(required_params={
            'backend_name': (str, Toolbox.regex_backend, True),
            'domains': (list, self._domains, True),
            'scaling': (str, AlbaBackend.SCALINGS, True)
        },
                                                 actual_params={
                                                     'backend_name':
                                                     backend_name,
                                                     'domains': domains,
                                                     'scaling': scaling
                                                 },
                                                 verify_keys=True)
        be_dict = {
            'name': backend_name,
            'domains': {
                'domain_guids': domains
            },
            'scaling': scaling
        }
        if 'setup' not in self.config.keys():
            self.config['setup'] = {}
        self._backends.append(be_dict['name'])
        if 'backends' not in self.config['setup']:
            self.config['setup']['backends'] = []
        self.config['setup']['backends'].append(be_dict)
コード例 #16
0
 def configure_proxy(backend_name, proxy_configuration):
     faulty_keys = [
         key for key in proxy_configuration.keys()
         if key not in ProxySetup.PARAMS
     ]
     if len(faulty_keys) > 0:
         raise ValueError(
             '{0} are unsupported keys for proxy configuration.'.format(
                 ', '.join(faulty_keys)))
     ExtensionsToolbox.verify_required_params(ProxySetup.PARAMS,
                                              proxy_configuration)
     vpools = VPoolList.get_vpools()
     service_manager = ServiceFactory.get_manager()
     with open('/root/old_proxies', 'w') as backup_file:
         for vpool in vpools:
             if vpool.metadata['backend']['backend_info'][
                     'name'] != backend_name:
                 continue
             for storagedriver in vpool.storagedrivers:
                 for proxy in storagedriver.alba_proxies:
                     config_loc = 'ovs/vpools/{0}/proxies/{1}/config/main'.format(
                         vpool.guid, proxy.guid)
                     proxy_service = Service(proxy.service_guid)
                     proxy_config = Configuration.get(config_loc)
                     old_proxy_config = dict(proxy_config)
                     backup_file.write('{} -- {}\n'.format(
                         config_loc, old_proxy_config))
                     proxy_config.update(proxy_configuration)
                     ProxySetup.LOGGER.info(
                         "Changed {0} to {1} for proxy {2}".format(
                             old_proxy_config, proxy_config, config_loc))
                     ProxySetup.LOGGER.info("Changed items {0}".format([
                         (key, value)
                         for key, value in proxy_config.iteritems()
                         if key not in old_proxy_config.keys()
                     ]))
                     Configuration.set(config_loc,
                                       json.dumps(proxy_config, indent=4),
                                       raw=True)
                     client = SSHClient(storagedriver.storage_ip,
                                        username='******')
                     service_manager.restart_service(proxy_service.name,
                                                     client=client)
コード例 #17
0
    def configure_mds(self, config):
        """
        Configure the global MDS settings for this vPool
        :param config: MDS configuration settings (Can contain amount of tlogs to wait for during MDS checkup, MDS safety and the maximum load for an MDS)
        :type config: dict
        :raises RuntimeError: If specified safety not between 1 and 5
                              If specified amount of tlogs is less than 1
                              If specified maximum load is less than 10%
        :return: None
        :rtype: NoneType
        """
        if self.vpool is None:
            raise RuntimeError(
                'Cannot configure MDS settings when no vPool has been created yet'
            )

        ExtensionsToolbox.verify_required_params(verify_keys=True,
                                                 actual_params=config,
                                                 required_params={
                                                     'mds_tlogs': (int, {
                                                         'min': 1
                                                     }, False),
                                                     'mds_safety': (int, {
                                                         'min': 1,
                                                         'max': 5
                                                     }, False),
                                                     'mds_maxload': (int, {
                                                         'min': 10
                                                     }, False)
                                                 })

        # Don't set a default value here, because we need to know whether these values have been specifically set or were set at None
        self.mds_tlogs = config.get('mds_tlogs')
        self.mds_safety = config.get('mds_safety')
        self.mds_maxload = config.get('mds_maxload')
        Configuration.set(key='/ovs/vpools/{0}/mds_config'.format(
            self.vpool.guid),
                          value={
                              'mds_tlogs': self.mds_tlogs or 100,
                              'mds_safety': self.mds_safety or 3,
                              'mds_maxload': self.mds_maxload or 75
                          })
コード例 #18
0
    def __init__(self, ip=None, port=None, database=None):
        # type: (str, int, str) -> None
        """
        Create client instance for graphite and validate parameters
        :param ip: IP address of the client to send graphite data towards
        :type ip: str
        :param port: port of the UDP listening socket
        :type port: int
        :param database: name of the database
        :type database: str
        """
        graphite_data = {}
        if all(p is None for p in [ip, port]):
            # Nothing specified
            graphite_data = self.get_graphite_config()
            if not graphite_data:
                raise RuntimeError(
                    'No graphite data found in config path `{0}`'.format(
                        self.CONFIG_PATH))

        ip = ip or graphite_data['ip']
        port = port or graphite_data.get('port', 2003)

        ExtensionsToolbox.verify_required_params(
            verify_keys=True,
            actual_params={
                'host': ip,
                'port': port
            },
            required_params={
                'host': (str, ExtensionsToolbox.regex_ip, True),
                'port': (int, {
                    'min': 1025,
                    'max': 65535
                }, True)
            })

        super(GraphiteClient, self).__init__(ip=ip,
                                             port=port,
                                             database=database)
コード例 #19
0
    def validate_and_retrieve_config(cls):
        """
        Retrieve and validate the configuration for StatsMonkey
        :return: The configuration set at /ovs/framework/monitoring/stats_monkey
        :rtype: dict
        """
        config_key = '/ovs/framework/monitoring/stats_monkey'
        config = cls._get_configuration()
        if not config.exists(config_key):
            raise ValueError(
                'StatsMonkey requires a configuration key at {0}'.format(
                    config_key))

        config = config.get(config_key)
        if not isinstance(config, dict):
            raise ValueError('StatsMonkey configuration must be of type dict')

        required_params = {
            'host': (str, ExtensionsToolbox.regex_ip),
            'port': (int, {
                'min': 1025,
                'max': 65535
            }),
            'interval': (int, {
                'min': 1
            }, False),
            'database': (str, None),
            'transport': (str, ['influxdb', 'redis', 'graphite']),
            'environment': (str, None)
        }
        if config.get('transport') == 'influxdb':
            required_params['username'] = (str, None)
        if config.get('transport') in ['influxdb', 'reddis']:
            required_params['password'] = (str, None)

        ExtensionsToolbox.verify_required_params(
            actual_params=config, required_params=required_params)
        cls._config = config
        return cls._config
コード例 #20
0
    def create_hprm_config_files(vpool_guid, local_storagerouter_guid,
                                 parameters):
        """
        Create the required configuration files to be able to make use of HPRM (aka PRACC)
        This configuration will be zipped and made available for download
        :param vpool_guid: The guid of the VPool for which a HPRM manager needs to be deployed
        :type vpool_guid: str
        :param local_storagerouter_guid: The guid of the StorageRouter the API was requested on
        :type local_storagerouter_guid: str
        :param parameters: Additional information required for the HPRM configuration files
        :type parameters: dict
        :return: Name of the zipfile containing the configuration files
        :rtype: str
        """
        # Validations
        required_params = {
            'port': (int, {
                'min': 1,
                'max': 65535
            }),
            'identifier': (str, ExtensionsToolbox.regex_vpool)
        }
        ExtensionsToolbox.verify_required_params(
            actual_params=parameters, required_params=required_params)
        vpool = VPool(vpool_guid)
        identifier = parameters['identifier']
        config_path = None
        local_storagerouter = StorageRouter(local_storagerouter_guid)
        for sd in vpool.storagedrivers:
            if len(sd.alba_proxies) == 0:
                raise ValueError(
                    'No ALBA proxies configured for vPool {0} on StorageRouter {1}'
                    .format(vpool.name, sd.storagerouter.name))
            config_path = '/ovs/vpools/{0}/proxies/{1}/config/{{0}}'.format(
                vpool.guid, sd.alba_proxies[0].guid)

        if config_path is None:
            raise ValueError(
                'vPool {0} has not been extended any StorageRouter'.format(
                    vpool.name))
        proxy_cfg = Configuration.get(key=config_path.format('main'))

        cache_info = {}
        arakoons = {}
        cache_types = VPool.CACHES.values()
        if not any(ctype in parameters for ctype in cache_types):
            raise ValueError(
                'At least one cache type should be passed: {0}'.format(
                    ', '.join(cache_types)))
        for ctype in cache_types:
            if ctype not in parameters:
                continue
            required_dict = {'read': (bool, None), 'write': (bool, None)}
            required_params.update({ctype: (dict, required_dict)})
            ExtensionsToolbox.verify_required_params(
                actual_params=parameters, required_params=required_params)
            read = parameters[ctype]['read']
            write = parameters[ctype]['write']
            if read is False and write is False:
                cache_info[ctype] = ['none']
                continue
            path = parameters[ctype].get('path')
            if path is not None:
                path = path.strip()
                if not path or path.endswith(
                        '/.') or '..' in path or '/./' in path:
                    raise ValueError('Invalid path specified')
                required_dict.update({
                    'path': (str, None),
                    'size': (int, {
                        'min': 1,
                        'max': 10 * 1024
                    })
                })
                ExtensionsToolbox.verify_required_params(
                    actual_params=parameters, required_params=required_params)
                while '//' in path:
                    path = path.replace('//', '/')
                cache_info[ctype] = [
                    'local', {
                        'path': path,
                        'max_size': parameters[ctype]['size'] * 1024**3,
                        'cache_on_read': read,
                        'cache_on_write': write
                    }
                ]
            else:
                required_dict.update({
                    'backend_info': (dict, {
                        'preset': (str, ExtensionsToolbox.regex_preset),
                        'alba_backend_guid':
                        (str, ExtensionsToolbox.regex_guid),
                        'alba_backend_name':
                        (str, ExtensionsToolbox.regex_backend)
                    }),
                    'connection_info': (dict, {
                        'host': (str, ExtensionsToolbox.regex_ip, False),
                        'port': (int, {
                            'min': 1,
                            'max': 65535
                        }, False),
                        'client_id':
                        (str, ExtensionsToolbox.regex_guid, False),
                        'client_secret': (str, None, False)
                    })
                })
                ExtensionsToolbox.verify_required_params(
                    actual_params=parameters, required_params=required_params)
                connection_info = parameters[ctype]['connection_info']
                if connection_info[
                        'host']:  # Remote Backend for accelerated Backend
                    alba_backend_guid = parameters[ctype]['backend_info'][
                        'alba_backend_guid']
                    ovs_client = OVSClient.get_instance(
                        connection_info=connection_info)
                    arakoon_config = VPoolShared.retrieve_alba_arakoon_config(
                        alba_backend_guid=alba_backend_guid,
                        ovs_client=ovs_client)
                    arakoons[ctype] = ArakoonClusterConfig.convert_config_to(
                        arakoon_config, return_type='INI')
                else:  # Local Backend for accelerated Backend
                    alba_backend_name = parameters[ctype]['backend_info'][
                        'alba_backend_name']
                    if Configuration.exists(key='/ovs/arakoon/{0}-abm/config'.
                                            format(alba_backend_name),
                                            raw=True) is False:
                        raise ValueError(
                            'Arakoon cluster for ALBA Backend {0} could not be retrieved'
                            .format(alba_backend_name))
                    arakoons[ctype] = Configuration.get(
                        key='/ovs/arakoon/{0}-abm/config'.format(
                            alba_backend_name),
                        raw=True)
                cache_info[ctype] = [
                    'alba', {
                        'albamgr_cfg_url':
                        '/etc/hprm/{0}/{1}_cache_arakoon.ini'.format(
                            identifier, ctype),
                        'bucket_strategy': [
                            '1-to-1', {
                                'prefix':
                                vpool.guid,
                                'preset':
                                parameters[ctype]['backend_info']['preset']
                            }
                        ],
                        'manifest_cache_size':
                        proxy_cfg['manifest_cache_size'],
                        'cache_on_read':
                        read,
                        'cache_on_write':
                        write
                    }
                ]

        tgz_name = 'hprm_config_files_{0}_{1}.tgz'.format(
            identifier, vpool.name)
        config = {
            'ips': ['127.0.0.1'],
            'port': parameters['port'],
            'pracc': {
                'uds_path': '/var/run/hprm/{0}/uds_path'.format(identifier),
                'max_clients': 1000,
                'max_read_buf_size':
                64 * 1024,  # Buffer size for incoming requests (in bytes)
                'thread_pool_size': 64
            },  # Amount of threads
            'transport': 'tcp',
            'log_level': 'info',
            'read_preference': proxy_cfg['read_preference'],
            'albamgr_cfg_url': '/etc/hprm/{0}/arakoon.ini'.format(identifier),
            'manifest_cache_size': proxy_cfg['manifest_cache_size']
        }
        file_contents_map = {}
        for ctype in cache_types:
            if ctype in cache_info:
                config['{0}_cache'.format(ctype)] = cache_info[ctype]
            if ctype in arakoons:
                file_contents_map[
                    '/opt/OpenvStorage/config/{0}/{1}_cache_arakoon.ini'.
                    format(identifier, ctype)] = arakoons[ctype]
        file_contents_map.update({
            '/opt/OpenvStorage/config/{0}/config.json'.format(identifier):
            json.dumps(config, indent=4),
            '/opt/OpenvStorage/config/{0}/arakoon.ini'.format(identifier):
            Configuration.get(key=config_path.format('abm'), raw=True)
        })

        local_client = SSHClient(endpoint=local_storagerouter)
        local_client.dir_create(
            directories='/opt/OpenvStorage/config/{0}'.format(identifier))
        local_client.dir_create(
            directories='/opt/OpenvStorage/webapps/frontend/downloads')
        for file_name, contents in file_contents_map.iteritems():
            local_client.file_write(contents=contents, filename=file_name)
        local_client.run(command=[
            'tar', '--transform', 's#^config/{0}#{0}#'.format(identifier),
            '-czf', '/opt/OpenvStorage/webapps/frontend/downloads/{0}'.format(
                tgz_name), 'config/{0}'.format(identifier)
        ])
        local_client.dir_delete(
            directories='/opt/OpenvStorage/config/{0}'.format(identifier))
        return tgz_name
コード例 #21
0
    def configure_support(support_info):
        """
        Configures support on all StorageRouters
        :param support_info: Information about which components should be configured
            {'stats_monkey': True,  # Enable/disable the stats monkey scheduled task
             'support_agent': True,  # Responsible for enabling the ovs-support-agent service, which collects heart beat data
             'remote_access': False,  # Cannot be True when support agent is False. Is responsible for opening an OpenVPN tunnel to allow for remote access
             'stats_monkey_config': {}}  # Dict with information on how to configure the stats monkey (Only required when enabling the stats monkey
        :type support_info: dict
        :return: None
        :rtype: NoneType
        """
        ExtensionsToolbox.verify_required_params(actual_params=support_info,
                                                 required_params={
                                                     'stats_monkey':
                                                     (bool, None, False),
                                                     'remote_access':
                                                     (bool, None, False),
                                                     'support_agent':
                                                     (bool, None, False),
                                                     'stats_monkey_config':
                                                     (dict, None, False)
                                                 })
        # All settings are optional, so if nothing is specified, no need to change anything
        if len(support_info) == 0:
            StorageRouterController._logger.warning(
                'Configure support called without any specific settings. Doing nothing'
            )
            return

        # Collect information
        support_agent_key = '/ovs/framework/support|support_agent'
        support_agent_new = support_info.get('support_agent')
        support_agent_old = Configuration.get(key=support_agent_key)
        support_agent_change = support_agent_new is not None and support_agent_old != support_agent_new

        remote_access_key = '/ovs/framework/support|remote_access'
        remote_access_new = support_info.get('remote_access')
        remote_access_old = Configuration.get(key=remote_access_key)
        remote_access_change = remote_access_new is not None and remote_access_old != remote_access_new

        stats_monkey_celery_key = '/ovs/framework/scheduling/celery'
        stats_monkey_config_key = '/ovs/framework/monitoring/stats_monkey'
        stats_monkey_new_config = support_info.get('stats_monkey_config')
        stats_monkey_old_config = Configuration.get(
            key=stats_monkey_config_key, default={})
        stats_monkey_celery_config = Configuration.get(
            key=stats_monkey_celery_key, default={})
        stats_monkey_new = support_info.get('stats_monkey')
        stats_monkey_old = stats_monkey_celery_config.get(
            'ovs.stats_monkey.run_all'
        ) is not None or stats_monkey_celery_config.get(
            'alba.stats_monkey.run_all') is not None
        stats_monkey_change = stats_monkey_new is not None and (
            stats_monkey_old != stats_monkey_new
            or stats_monkey_new_config != stats_monkey_old_config)

        # Make sure support agent is enabled when trying to enable remote access
        if remote_access_new is True:
            if support_agent_new is False or (support_agent_new is None
                                              and support_agent_old is False):
                raise RuntimeError(
                    'Remote access cannot be enabled without the heart beat enabled'
                )

        # Collect root_client information
        root_clients = {}
        for storagerouter in StorageRouterList.get_storagerouters():
            try:
                root_clients[storagerouter] = SSHClient(endpoint=storagerouter,
                                                        username='******')
            except UnableToConnectException:
                raise RuntimeError('Not all StorageRouters are reachable')

        if stats_monkey_new is True:
            ExtensionsToolbox.verify_required_params(
                actual_params=stats_monkey_new_config,
                required_params={
                    'host': (str, ExtensionsToolbox.regex_ip),
                    'port': (int, {
                        'min': 1,
                        'max': 65535
                    }),
                    'database': (str, None),
                    'interval': (int, {
                        'min': 1,
                        'max': 86400
                    }),
                    'transport': (str, ['influxdb', 'redis', 'graphite']),
                    'environment': (str, None)
                })
            if stats_monkey_new_config['transport'] in ['influxdb', 'reddis']:
                ExtensionsToolbox.verify_required_params(
                    actual_params=stats_monkey_new_config,
                    required_params={'password': (str, None)})

            if stats_monkey_new_config['transport'] == 'influxdb':
                ExtensionsToolbox.verify_required_params(
                    actual_params=stats_monkey_new_config,
                    required_params={'username': (str, None)})

        # Configure remote access
        if remote_access_change is True:
            Configuration.set(key=remote_access_key, value=remote_access_new)
            cid = Configuration.get('/ovs/framework/cluster_id').replace(
                r"'", r"'\''")
            for storagerouter, root_client in root_clients.iteritems():
                if remote_access_new is False:
                    StorageRouterController._logger.info(
                        'Un-configuring remote access on StorageRouter {0}'.
                        format(root_client.ip))
                    nid = storagerouter.machine_id.replace(r"'", r"'\''")
                    service_name = 'openvpn@ovs_{0}-{1}'.format(cid, nid)
                    if StorageRouterController._service_manager.has_service(
                            name=service_name, client=root_client):
                        StorageRouterController._service_manager.stop_service(
                            name=service_name, client=root_client)
                    root_client.file_delete(filenames=['/etc/openvpn/ovs_*'])

        # Configure support agent
        if support_agent_change is True:
            service_name = 'support-agent'
            Configuration.set(key=support_agent_key, value=support_agent_new)
            for root_client in root_clients.itervalues():
                if support_agent_new is True:
                    StorageRouterController._logger.info(
                        'Configuring support agent on StorageRouter {0}'.
                        format(root_client.ip))
                    if StorageRouterController._service_manager.has_service(
                            name=service_name, client=root_client) is False:
                        StorageRouterController._service_manager.add_service(
                            name=service_name, client=root_client)
                    StorageRouterController._service_manager.restart_service(
                        name=service_name, client=root_client)
                else:
                    StorageRouterController._logger.info(
                        'Un-configuring support agent on StorageRouter {0}'.
                        format(root_client.ip))
                    if StorageRouterController._service_manager.has_service(
                            name=service_name, client=root_client):
                        StorageRouterController._service_manager.stop_service(
                            name=service_name, client=root_client)
                        StorageRouterController._service_manager.remove_service(
                            name=service_name, client=root_client)

        # Configure stats monkey
        if stats_monkey_change is True:
            # 2 keys matter here:
            #    - /ovs/framework/scheduling/celery --> used to check whether the stats monkey is disabled or not
            #    - /ovs/framework/monitoring/stats_monkey --> contains the actual configuration parameters when enabling the stats monkey, such as host, port, username, ...
            service_name = 'scheduled-tasks'
            if stats_monkey_new is True:  # Enable the scheduled task by removing the key
                StorageRouterController._logger.info(
                    'Configuring stats monkey')
                interval = stats_monkey_new_config['interval']
                # The scheduled task cannot be configured to run more than once a minute, so for intervals < 60, the stats monkey task handles this itself
                StorageRouterController._logger.debug(
                    'Requested interval to run at: {0}'.format(interval))
                Configuration.set(key=stats_monkey_config_key,
                                  value=stats_monkey_new_config)
                if interval > 0:
                    days, hours, minutes, _ = ExtensionsToolbox.convert_to_days_hours_minutes_seconds(
                        seconds=interval)
                    if days == 1:  # Max interval is 24 * 60 * 60, so once every day at 3 AM
                        schedule = {'hour': '3'}
                    elif hours > 0:
                        schedule = {'hour': '*/{0}'.format(hours)}
                    else:
                        schedule = {'minute': '*/{0}'.format(minutes)}
                    stats_monkey_celery_config[
                        'ovs.stats_monkey.run_all'] = schedule
                    stats_monkey_celery_config[
                        'alba.stats_monkey.run_all'] = schedule
                    StorageRouterController._logger.debug(
                        'Configured schedule is: {0}'.format(schedule))
                else:
                    stats_monkey_celery_config.pop('ovs.stats_monkey.run_all',
                                                   None)
                    stats_monkey_celery_config.pop('alba.stats_monkey.run_all',
                                                   None)
            else:  # Disable the scheduled task by setting the values for the celery tasks to None
                StorageRouterController._logger.info(
                    'Un-configuring stats monkey')
                stats_monkey_celery_config['ovs.stats_monkey.run_all'] = None
                stats_monkey_celery_config['alba.stats_monkey.run_all'] = None

            Configuration.set(key=stats_monkey_celery_key,
                              value=stats_monkey_celery_config)
            for storagerouter in StorageRouterList.get_masters():
                root_client = root_clients[storagerouter]
                StorageRouterController._logger.debug(
                    'Restarting ovs-scheduled-tasks service on node with IP {0}'
                    .format(root_client.ip))
                StorageRouterController._service_manager.restart_service(
                    name=service_name, client=root_client)
コード例 #22
0
    def create_vm(self,
                  name,
                  vcpus,
                  ram,
                  disks,
                  cdrom_iso=None,
                  os_type=None,
                  os_variant=None,
                  vnc_listen='0.0.0.0',
                  networks=None,
                  start=False,
                  autostart=False,
                  edge_configuration=None):
        """
        Creates a VM
        @TODO use Edge instead of fuse for disks
        :param name: name of the vm
        :param vcpus: number of cpus
        :param ram: number of ram (MB)
        :param disks: list of dicts : options see SdkOptionsMapping
        when using existing storage, size can be removed
        :param cdrom_iso: path to the iso the mount
        :param autostart: start vm when the hypervisor starts
        :param edge_configuration: virtual machine setup for ovs with edge configuration
        :param os_type: type of os
        :param os_variant: variant of the os
        :param vnc_listen:
        :param networks: lists of tuples : ("network=default", "mac=RANDOM" or a valid mac, "model=e1000" (any model for vmachines)
        :param start: start the guest after creation
        :return:
        """
        try:
            self._conn.lookupByName(name)
            raise AssertionError(
                'Name {0} is currently in use by another VM.'.format(name))
        except AssertionError as ex:
            raise RuntimeError(str(ex))
        except libvirt.libvirtError:
            pass

        ovs_vm = False
        if edge_configuration is not None:
            required_edge_params = {
                'port': (int, {
                    'min': 1,
                    'max': 65535
                }),
                'protocol': (str, ['tcp', 'udp', 'rdma']),
                'hostname': (str, None),
                'username': (str, None, False),
                'password': (str, None, False)
            }
            ExtensionsToolbox.verify_required_params(required_edge_params,
                                                     edge_configuration)
            ovs_vm = True
        command = ['virt-install']
        options = [
            '--connect=qemu+ssh://{0}@{1}/system'.format(
                self.login, self.host),
            '--name={0}'.format(name),
            '--vcpus={0}'.format(vcpus),
            '--ram={0}'.format(ram),
            '--graphics=vnc,listen={0}'.format(
                vnc_listen
            ),  # Have to specify 0.0.0.0 else it will listen on 127.0.0.1 only
            '--noautoconsole',
            '--print-xml=1'
        ]

        if cdrom_iso is None:
            options.append('--import')
        else:
            options.append('--cdrom={0}'.format(cdrom_iso))
        for disk in disks:
            options.append('--disk={}'.format(
                self._extract_command(disk,
                                      SdkOptionMapping.disk_options_mapping)))
        if os_type is not None:
            if os_type not in SdkOptionMapping.optype_options:
                raise ValueError('Ostype {0} is not supported'.format(os_type))
            options.append('--os-type={0}'.format(os_type))
        if os_variant is not None:
            options.append('--os-variant={0}'.format(os_variant))
        if networks is None or networks == []:
            options.append('--network=none')
        if autostart is True:
            options.append('--autostart')
        if edge_configuration is True:
            options.append('--dry-run')
        else:
            for network in networks:
                options.append('--network={0}'.format(
                    self._extract_command(
                        network, SdkOptionMapping.network_option_mapping)))
        try:
            logger.info('Creating vm {0} with command {1}'.format(
                name, ' '.join(command + options)))
            vm_xml = self.ssh_client.run(command + options)
            if ovs_vm is True:
                vm_xml = self._update_xml_for_ovs(vm_xml, edge_configuration)
            self._conn.defineXML(vm_xml)
            if start is True:
                self.power_on(name)
            logger.info('Vm {0} has been created.'.format(name))
        except subprocess.CalledProcessError as ex:
            msg = 'Error during creation of VM. Got {0}'.format(str(ex))
            logger.exception(msg)
            print ' '.join(command + options)
            raise RuntimeError(msg)
コード例 #23
0
    def _refresh_backend_metadata(self, backend_info, connection_info):
        """
        Returns data about the backend. Used to store all required data in the metadata property of a vPool
        :param backend_info: Information about the backend (should contain the alba_backend_guid and the preset_name)
        :param connection_info: Information about the cluster to find the backend on (host, port, client_id, client_secret)
        :return: Filled backend info
        Structure:
        {
            'name': <ALBA Backend name>,
            'preset': <preset name>,
            'scaling': LOCAL|GLOBAL,
            'policies': <policies>,
            'sco_size': <sco size>,
            'frag_size': <fragment cache size>,
            'total_size': <total ALBA Backend size>,
            'backend_guid': <Backend guid>,
            'alba_backend_guid': <ALBA Backend guid>,
            'connection_info': {
                'host': <ip>,
                'port': <port>,
                'local': <bool indicating local ALBA backend>,
                'client_id': <client_id>,
                'client_secret': <client_secret>
            }
        }
        :rtype: dict
        """
        # Validation
        if self.is_new is True and self.sd_installer is None:
            raise RuntimeError(
                'A StorageDriver installer is required when working with a new vPool'
            )
        ExtensionsToolbox.verify_required_params(actual_params=backend_info,
                                                 required_params={
                                                     'alba_backend_guid':
                                                     (str, None),
                                                     'preset': (str, None)
                                                 })
        ovs_client = OVSClient.get_instance(
            connection_info=connection_info,
            cache_store=VolatileFactory.get_client())

        new_backend_info = copy.deepcopy(backend_info)
        preset_name = backend_info['preset']
        alba_backend_guid = backend_info['alba_backend_guid']
        arakoon_config = VPoolShared.retrieve_alba_arakoon_config(
            alba_backend_guid=alba_backend_guid, ovs_client=ovs_client)

        # Requesting the remote stack for re-use in calculate read preference
        backend_dict = ovs_client.get(
            '/alba/backends/{0}/'.format(alba_backend_guid),
            params={'contents': 'name,usages,presets,backend,remote_stack'})
        self.complete_backend_info[alba_backend_guid] = backend_dict

        preset_info = dict(
            (preset['name'], preset) for preset in backend_dict['presets'])
        if preset_name not in preset_info:
            raise RuntimeError(
                'Given preset {0} is not available in backend {1}'.format(
                    preset_name, backend_dict['name']))

        policies = []
        for policy_info in preset_info[preset_name]['policies']:
            policy = json.loads('[{0}]'.format(policy_info.strip('()')))
            policies.append(policy)
        # Get the sco_size
        if self.is_new is True:
            sco_size = self.sd_installer.sco_size * 1024.0**2
        else:
            sco_size = self.vpool.configuration['sco_size'] * 1024.0**2

        new_backend_info.update({
            'name':
            backend_dict['name'],
            'preset':
            preset_name,
            'scaling':
            backend_dict['scaling'],
            'policies':
            policies,
            'sco_size':
            sco_size,
            'frag_size':
            float(preset_info[preset_name]['fragment_size']),
            'total_size':
            float(backend_dict['usages']['size']),
            'backend_guid':
            backend_dict['backend_guid'],
            'alba_backend_guid':
            alba_backend_guid,
            'connection_info':
            connection_info,
            'arakoon_config':
            arakoon_config
        })

        return new_backend_info
コード例 #24
0
    def prepare_vm_disks(self,
                         source_storagedriver,
                         cloud_image_path,
                         cloud_init_loc,
                         vm_name,
                         data_disk_size,
                         edge_user_info=None,
                         logger=LOGGER):
        """
        Will create all necessary vdisks to create the bulk of vms
        :param source_storagedriver: storagedriver to create the disks on
        :param cloud_image_path: path to the cloud image
        :param cloud_init_loc: path to the cloud init script
        :param vm_name: name prefix for the vms
        :param data_disk_size: size of the data disk
        :param edge_user_info: user information for the edge. Optional
        :param logger: logging instance
        :return: 
        """
        logger.info('Starting with preparing vm disk(s)')
        vm_amount = self.amount_of_vms
        if isinstance(edge_user_info, dict):
            required_edge_params = {
                'username': (str, None, False),
                'password': (str, None, False)
            }
            ExtensionsToolbox.verify_required_params(required_edge_params,
                                                     edge_user_info)
        if edge_user_info is None:
            edge_user_info = {}

        protocol = source_storagedriver.cluster_node_config[
            'network_server_uri'].split(':')[0]
        vpool = source_storagedriver.vpool
        client = SSHClient(source_storagedriver.storagerouter, username='******')

        edge_configuration = {
            'ip': source_storagedriver.storage_ip,
            'port': source_storagedriver.ports['edge'],
            'protocol': protocol
        }
        edge_configuration.update(edge_user_info)

        original_boot_disk_name = None  # Cloning purposes
        original_data_disk_name = None  # Cloning purposes

        connection_messages = []
        vm_info = {}
        volume_amount = 0

        for vm_number in xrange(0, vm_amount):
            filled_number = str(vm_number).zfill(3)
            vm_name = '{0}-{1}'.format(vm_name, filled_number)
            create_msg = '{0}_{1}'.format(str(uuid.uuid4()), vm_name)
            boot_vdisk_name = '{0}_vdisk_boot_{1}'.format(
                vm_name, filled_number)
            data_vdisk_name = '{0}_vdisk_data_{1}'.format(
                vm_name, filled_number)
            cd_vdisk_name = '{0}_vdisk_cd_{1}'.format(vm_name, filled_number)
            boot_vdisk_path = '/mnt/{0}/{1}.raw'.format(
                vpool.name, boot_vdisk_name)
            data_vdisk_path = '/mnt/{0}/{1}.raw'.format(
                vpool.name, data_vdisk_name)
            cd_vdisk_path = '/mnt/{0}/{1}.raw'.format(vpool.name,
                                                      cd_vdisk_name)
            if vm_number == 0:
                try:
                    # Create VDISKs
                    self.convert_image(client, cloud_image_path,
                                       boot_vdisk_name, edge_configuration)
                except RuntimeError as ex:
                    logger.error('Could not covert the image. Got {0}'.format(
                        str(ex)))
                    raise
                boot_vdisk = VDiskHelper.get_vdisk_by_name(
                    '{0}.raw'.format(boot_vdisk_name), vpool.name)
                original_boot_disk_name = boot_vdisk_name
                logger.info('Boot VDisk successfully created.')
                try:
                    data_vdisk = VDiskHelper.get_vdisk_by_guid(
                        VDiskSetup.create_vdisk(
                            data_vdisk_name, vpool.name, data_disk_size,
                            source_storagedriver.storage_ip))
                    logger.info('VDisk data_vdisk successfully created!')
                except TimeOutError:
                    logger.error(
                        'The creation of the data vdisk has timed out.')
                    raise
                except RuntimeError as ex:
                    logger.error(
                        'Could not create the data vdisk. Got {0}'.format(
                            str(ex)))
                    raise
                original_data_disk_name = data_vdisk_name
            else:
                # Rely on cloning to speed up the process
                boot_vdisk_info = VDiskSetup.create_clone(
                    vdisk_name=original_boot_disk_name,
                    vpool_name=vpool.name,
                    new_vdisk_name=boot_vdisk_name,
                    storagerouter_ip=source_storagedriver.storage_ip)
                boot_vdisk = VDiskHelper.get_vdisk_by_guid(
                    boot_vdisk_info['vdisk_guid'])
                data_vdisk_info = VDiskSetup.create_clone(
                    vdisk_name=original_data_disk_name,
                    vpool_name=vpool.name,
                    new_vdisk_name=data_vdisk_name,
                    storagerouter_ip=source_storagedriver.storage_ip)
                data_vdisk = VDiskHelper.get_vdisk_by_guid(
                    data_vdisk_info['vdisk_guid'])
            #######################
            # GENERATE CLOUD INIT #
            #######################
            iso_loc = self._generate_cloud_init(
                client=client,
                convert_script_loc=cloud_init_loc,
                create_msg=create_msg)
            self.convert_image(client, iso_loc, cd_vdisk_name,
                               edge_configuration)
            cd_creation_time = time.time()
            cd_vdisk = None
            while cd_vdisk is None:
                if time.time() - cd_creation_time > 60:
                    raise RuntimeError(
                        'Could not fetch the cd vdisk after {}s'.format(
                            time.time() - cd_creation_time))
                try:
                    cd_vdisk = VDiskHelper.get_vdisk_by_name(
                        cd_vdisk_name, vpool.name)
                except VDiskNotFoundError:
                    logger.warning(
                        'Could not fetch the cd vdisk after {0}s.'.format(
                            time.time() - cd_creation_time))
                time.sleep(0.5)

            # Take snapshot to revert back to after every migrate scenario
            data_snapshot_guid = VDiskSetup.create_snapshot(
                '{0}_data'.format(vm_name),
                data_vdisk.devicename,
                vpool.name,
                consistent=False)
            vm_info[vm_name] = {
                'data_snapshot_guid':
                data_snapshot_guid,
                'vdisks': [boot_vdisk, data_vdisk, cd_vdisk],
                'cd_path':
                cd_vdisk_path,
                'disks': [{
                    'mountpoint': boot_vdisk_path
                }, {
                    'mountpoint': data_vdisk_path
                }],
                'networks': [{
                    'network': 'default',
                    'mac': 'RANDOM',
                    'model': 'e1000'
                }],
                'created':
                False,
                'ip':
                '',
                'create_msg':
                create_msg
            }
            connection_messages.append(create_msg)
            volume_amount += len(vm_info[vm_name]['vdisks'])
            logger.info('Prepped everything for VM {0}.'.format(vm_name))

        self.vm_info = vm_info
        self.connection_messages = connection_messages
        self.volume_amount = volume_amount
コード例 #25
0
    def create_service(self, volume_uri, block_size=MINIMAL_BLOCK_SIZE):
        # type: (str, int) -> str
        """
        Create NBD service
        :param volume_uri: tcp://user:pass@ip:port/volume-name
        :param block_size: block size in bytes
        :return: path /dev/nbdx
        :raises: RuntimeError if volume uri -ip:port does not match ip regex
                                            -tcp does not match tcp connection regex
                                 block size is too small or no integer
                                 volumedriver-nbd package is not installed
        """

        # Unittests
        if os.environ.get('RUNNING_UNITTESTS') == 'True':
            node_id = 'unittest_guid'
        else:
            node_id = System.get_my_machine_id().strip()

        # Parameter verification
        cache = apt.Cache()
        try:
            cache['volumedriver-nbd'].is_installed
        except KeyError:
            raise RuntimeError('Package volumedriver-nbd is not yet installed')
        if type(volume_uri) != str:
            raise RuntimeError(
                'Invalid parameter: {0} should be of type `str`'.format(
                    volume_uri))
        if type(block_size) != int or block_size < self.MINIMAL_BLOCK_SIZE:
            raise RuntimeError(
                'Invalid parameter: {0} should be of type `int` and bigger then > {1}'
                .format(block_size, self.MINIMAL_BLOCK_SIZE))

        node_path = self.NODE_PATH.format(node_id)
        user_pass, ip_port = volume_uri.split('@')
        ip_port, vol_name = ip_port.split('/')

        ExtensionsToolbox.verify_required_params(required_params={
            'user_pass': (str, ExtensionsToolbox.regex_tcp_conn, True),
            'ip_port': (str, ExtensionsToolbox.regex_ip_port, True),
            'vol_name': (str, None, True)
        },
                                                 actual_params={
                                                     'user_pass': user_pass,
                                                     'ip_port': ip_port,
                                                     'vol_name': vol_name
                                                 },
                                                 verify_keys=True)

        nbd_number = self._find_first_free_device_number(node_path)
        config_path = os.path.join(node_path, nbd_number, 'config')

        # Set self._configuration keys and values in local config
        nbd_path = self.DEVICE_PATH.format(nbd_number)
        config_settings = {'volume_uri': volume_uri, 'nbd_path': nbd_path}
        if block_size > NBDManager.MINIMAL_BLOCK_SIZE:
            config_settings['libovsvoldrv_request_split_size'] = block_size
        self._configuration.set(key=config_path,
                                value=yaml.dump(config_settings,
                                                default_flow_style=False),
                                raw=True)

        # Add service
        opt_config_path = self.OPT_CONFIG_PATH.format(nbd_number)
        if not self._client.file_exists(opt_config_path):
            self._client.file_create(opt_config_path)
        self._service_manager.add_service(
            name='nbd',
            client=self._client,
            params={
                'NODE_ID': str(node_id),
                'NBDX': nbd_number,
                'SCRIPT': self.SERVICE_SCRIPT_PATH,
                'WD': self.
                WORKING_DIRECTORY,  # Module path and wd depend on the module the nbd service is called in eg. ISCSI manager
                'MODULE_PATH': self.MODULE_PATH,
                'MGR_SERVICE': self.MANAGER_SERVICE_NAME
            },
            target_name=self.SERVICE_NAME.format(nbd_number, vol_name),
            path=self.SERVICE_FILE_PATH)
        return nbd_path
コード例 #26
0
    def fill_slots(node_cluster_guid,
                   node_guid,
                   osd_information,
                   metadata=None):
        # type: (str, str, List[Dict[str, Any]]) -> None
        """
        Creates 1 or more new OSDs
        :param node_cluster_guid: Guid of the node cluster to which the disks belong
        :type node_cluster_guid: basestring
        :param node_guid: Guid of the AlbaNode to act as the 'active' side
        :type node_guid: basestring
        :param osd_information: Information about the amount of OSDs to add to each Slot
        :type osd_information: list
        :param metadata: Metadata to add to the OSD (connection information for remote Backend, general Backend information)
        :type metadata: dict
        :return: None
        :rtype: NoneType
        """
        metadata_type_validation = {
            'integer': (int, None),
            'osd_type': (str, AlbaOSD.OSD_TYPES.keys()),
            'ip': (str, ExtensionsToolbox.regex_ip),
            'port': (int, {
                'min': 1,
                'max': 65535
            })
        }
        node_cluster = AlbaNodeCluster(node_cluster_guid)
        # Check for the active side if it's part of the cluster
        active_node = AlbaNode(node_guid)
        if active_node not in node_cluster.alba_nodes:
            raise ValueError(
                'The requested active AlbaNode is not part of AlbaNodeCluster {0}'
                .format(node_cluster.guid))
        required_params = {'slot_id': (str, None)}
        can_be_filled = False
        for flow in ['fill', 'fill_add']:
            if node_cluster.cluster_metadata[flow] is False:
                continue
            can_be_filled = True
            if flow == 'fill_add':
                required_params['alba_backend_guid'] = (str, None)
            for key, mtype in node_cluster.cluster_metadata[
                    '{0}_metadata'.format(flow)].iteritems():
                if mtype in metadata_type_validation:
                    required_params[key] = metadata_type_validation[mtype]
        if can_be_filled is False:
            raise ValueError(
                'The given node cluster does not support filling slots')

        validation_reasons = []
        for slot_info in osd_information:
            try:
                ExtensionsToolbox.verify_required_params(
                    required_params=required_params, actual_params=slot_info)
            except RuntimeError as ex:
                validation_reasons.append(str(ex))
        if len(validation_reasons) > 0:
            raise ValueError('Missing required parameter:\n *{0}'.format(
                '\n* '.join(validation_reasons)))

        for slot_info in osd_information:
            if node_cluster.cluster_metadata['fill'] is True:
                # Only filling is required
                active_node.client.fill_slot(
                    slot_id=slot_info['slot_id'],
                    extra=dict((key, slot_info[key]) for key in
                               node_cluster.cluster_metadata['fill_metadata']))
            elif node_cluster.cluster_metadata['fill_add'] is True:
                # Fill the slot
                active_node.client.fill_slot(
                    slot_id=slot_info['slot_id'],
                    extra=dict(
                        (key, slot_info[key]) for key in
                        node_cluster.cluster_metadata['fill_add_metadata']))

                # And add/claim the OSD
                AlbaController.add_osds(
                    alba_backend_guid=slot_info['alba_backend_guid'],
                    osds=[slot_info],
                    alba_node_guid=node_guid,
                    metadata=metadata)
        # Invalidate the stack and sync towards all passive sides
        active_node.invalidate_dynamics('stack')
        for node in node_cluster.alba_nodes:
            if node != active_node:
                try:
                    node.client.sync_stack(active_node.stack)
                except:
                    AlbaNodeClusterController._logger.exception(
                        'Error while syncing stacks to the passive side')
        node_cluster.invalidate_dynamics('stack')
コード例 #27
0
    def __init__(self, vp_installer, configurations=None, storagedriver=None):
        """
        Initialize a StorageDriverInstaller class instance containing information about:
            - vPool information on which a new StorageDriver is going to be deployed, eg: global vPool configurations, vPool name, ...
            - Information about caching behavior
            - Information about which ALBA Backends to use as main Backend, fragment cache Backend, block cache Backend
            - Connection information about how to reach the ALBA Backends via the API
            - StorageDriver configuration settings
            - The storage IP address
        """
        if (configurations is None and storagedriver is None) or (configurations is not None and storagedriver is not None):
            raise RuntimeError('Configurations and storagedriver are mutual exclusive options')

        self.sd_service = 'ovs-volumedriver_{0}'.format(vp_installer.name)
        self.dtl_service = 'ovs-dtl_{0}'.format(vp_installer.name)
        self.sr_installer = None
        self.vp_installer = vp_installer
        self.storagedriver = storagedriver
        self.service_manager = ServiceFactory.get_manager()

        # Validations
        if configurations is not None:
            storage_ip = configurations.get('storage_ip')
            caching_info = configurations.get('caching_info')
            backend_info = configurations.get('backend_info')
            connection_info = configurations.get('connection_info')
            sd_configuration = configurations.get('sd_configuration')

            if not re.match(pattern=ExtensionsToolbox.regex_ip, string=storage_ip):
                raise ValueError('Incorrect storage IP provided')

            ExtensionsToolbox.verify_required_params(actual_params=caching_info,
                                                     required_params={'cache_quota_bc': (int, None, False),
                                                                      'cache_quota_fc': (int, None, False),
                                                                      'block_cache_on_read': (bool, None),
                                                                      'block_cache_on_write': (bool, None),
                                                                      'fragment_cache_on_read': (bool, None),
                                                                      'fragment_cache_on_write': (bool, None)})

            ExtensionsToolbox.verify_required_params(actual_params=sd_configuration,
                                                     required_params={'advanced': (dict, {'number_of_scos_in_tlog': (int, {'min': 4, 'max': 20}),
                                                                                          'non_disposable_scos_factor': (float, {'min': 1.5, 'max': 20})},
                                                                                   False),
                                                                      'dtl_mode': (str, StorageDriverClient.VPOOL_DTL_MODE_MAP.keys()),
                                                                      'sco_size': (int, StorageDriverClient.TLOG_MULTIPLIER_MAP.keys()),
                                                                      'cluster_size': (int, StorageDriverClient.CLUSTER_SIZES),
                                                                      'write_buffer': (int, {'min': 128, 'max': 10240}),  # Volume write buffer
                                                                      'dtl_transport': (str, StorageDriverClient.VPOOL_DTL_TRANSPORT_MAP.keys())})

            for section, backend_information in backend_info.iteritems():
                if section == 'main' or backend_information is not None:  # For the main section we require the backend info to be filled out
                    ExtensionsToolbox.verify_required_params(actual_params=backend_information,
                                                             required_params={'preset': (str, ExtensionsToolbox.regex_preset),
                                                                              'alba_backend_guid': (str, ExtensionsToolbox.regex_guid)})
                    if backend_information is not None:  # For block and fragment cache we only need connection information when backend info has been passed
                        ExtensionsToolbox.verify_required_params(actual_params=connection_info[section],
                                                                 required_params={'host': (str, ExtensionsToolbox.regex_ip),
                                                                                  'port': (int, {'min': 1, 'max': 65535}),
                                                                                  'client_id': (str, None),
                                                                                  'client_secret': (str, None),
                                                                                  'local': (bool, None, False)})

            # General configurations
            self.storage_ip = storage_ip
            self.write_caches = []
            self.backend_info = backend_info['main']
            self.cache_size_local = None
            self.connection_info = connection_info['main']
            self.storagedriver_partition_dtl = None
            self.storagedriver_partition_tlogs = None
            self.storagedriver_partitions_caches = []
            self.storagedriver_partition_metadata = None
            self.storagedriver_partition_file_driver = None

            # StorageDriver configurations
            self.dtl_mode = sd_configuration['dtl_mode']
            self.sco_size = sd_configuration['sco_size']
            self.cluster_size = sd_configuration['cluster_size']
            self.write_buffer = sd_configuration['write_buffer']
            self.rdma_enabled = sd_configuration['dtl_transport'] == StorageDriverClient.FRAMEWORK_DTL_TRANSPORT_RSOCKET
            self.dtl_transport = sd_configuration['dtl_transport']
            self.tlog_multiplier = StorageDriverClient.TLOG_MULTIPLIER_MAP[self.sco_size]

            # Block cache behavior configurations
            self.block_cache_quota = caching_info.get('cache_quota_bc')
            self.block_cache_on_read = caching_info['block_cache_on_read']
            self.block_cache_on_write = caching_info['block_cache_on_write']
            self.block_cache_backend_info = backend_info[StorageDriverConfiguration.CACHE_BLOCK]
            self.block_cache_connection_info = connection_info[StorageDriverConfiguration.CACHE_BLOCK]
            self.block_cache_local = self.block_cache_backend_info is None and (self.block_cache_on_read is True or self.block_cache_on_write is True)

            # Fragment cache behavior configurations
            self.fragment_cache_quota = caching_info.get('cache_quota_fc')
            self.fragment_cache_on_read = caching_info['fragment_cache_on_read']
            self.fragment_cache_on_write = caching_info['fragment_cache_on_write']
            self.fragment_cache_backend_info = backend_info[StorageDriverConfiguration.CACHE_FRAGMENT]
            self.fragment_cache_connection_info = connection_info[StorageDriverConfiguration.CACHE_FRAGMENT]
            self.fragment_cache_local = self.fragment_cache_backend_info is None and (self.fragment_cache_on_read is True or self.fragment_cache_on_write is True)

            # Additional validations
            if (self.sco_size == 128 and self.write_buffer < 256) or not (128 <= self.write_buffer <= 10240):
                raise RuntimeError('Incorrect StorageDriver configuration settings specified')

            alba_backend_guid_main = self.backend_info['alba_backend_guid']
            if self.block_cache_backend_info is not None and alba_backend_guid_main == self.block_cache_backend_info['alba_backend_guid']:
                raise RuntimeError('Backend and block cache backend cannot be the same')
            if self.fragment_cache_backend_info is not None and alba_backend_guid_main == self.fragment_cache_backend_info['alba_backend_guid']:
                raise RuntimeError('Backend and fragment cache backend cannot be the same')

            if self.vp_installer.is_new is False:
                if alba_backend_guid_main != self.vp_installer.vpool.metadata['backend']['backend_info']['alba_backend_guid']:
                    raise RuntimeError('Incorrect ALBA Backend guid specified')

                current_vpool_configuration = self.vp_installer.vpool.configuration
                for key, value in sd_configuration.iteritems():
                    current_value = current_vpool_configuration.get(key)
                    if value != current_value:
                        raise RuntimeError('Specified StorageDriver config "{0}" with value {1} does not match the expected value {2}'.format(key, value, current_value))

            # Add some additional required information
            self.backend_info['sco_size'] = self.sco_size * 1024.0 ** 2
            if self.block_cache_backend_info is not None:
                self.block_cache_backend_info['sco_size'] = self.sco_size * 1024.0 ** 2
            if self.fragment_cache_backend_info is not None:
                self.fragment_cache_backend_info['sco_size'] = self.sco_size * 1024.0 ** 2

        # Cross reference
        self.vp_installer.sd_installer = self
コード例 #28
0
def _validate_and_retrieve_pre_config():
    """
    Validate whether the values in the pre-configuration file are valid
    :return: JSON contents
    """
    if not os.path.exists(PRECONFIG_FILE):
        return {}

    with open(PRECONFIG_FILE) as pre_config:
        try:
            config = json.loads(pre_config.read())
        except Exception:
            _print_and_log(
                level='exception',
                message='\n' + Interactive.boxed_message([
                    'JSON contents could not be retrieved from file {0}'.
                    format(PRECONFIG_FILE)
                ]))
            sys.exit(1)

    if 'asdmanager' not in config or not isinstance(config['asdmanager'],
                                                    dict):
        _print_and_log(
            level='error',
            message='\n' + Interactive.boxed_message([
                'The ASD manager pre-configuration file must contain a "asdmanager" key with a dictionary as value'
            ]))
        sys.exit(1)

    errors = []
    config = config['asdmanager']
    actual_keys = config.keys()
    allowed_keys = [
        'api_ip', 'api_port', 'asd_ips', 'asd_start_port',
        'configuration_store', 'ipmi'
    ]
    for key in actual_keys:
        if key not in allowed_keys:
            errors.append(
                'Key {0} is not supported by the ASD manager'.format(key))
    if len(errors) > 0:
        _print_and_log(
            level='error',
            message='\n' + Interactive.boxed_message([
                'Errors found while verifying pre-configuration:',
                ' - {0}'.format('\n - '.join(errors)), '', 'Allowed keys:\n'
                ' - {0}'.format('\n - '.join(allowed_keys))
            ]))
        sys.exit(1)

    try:
        ExtensionsToolbox.verify_required_params(
            actual_params=config,
            required_params={
                'api_ip': (str, ExtensionsToolbox.regex_ip, True),
                'asd_ips': (list, ExtensionsToolbox.regex_ip, False),
                'api_port': (int, {
                    'min': 1025,
                    'max': 65535
                }, False),
                'asd_start_port': (int, {
                    'min': 1025,
                    'max': 65435
                }, False),
                'configuration_store': (str, ['arakoon'], False)
            })
        if config.get('ipmi') is not None:
            ExtensionsToolbox.verify_required_params(
                actual_params=config.get('ipmi'),
                required_params={
                    'ip': (str, ExtensionsToolbox.regex_ip, True),
                    'username': (str, None, True),
                    'pwd': (str, None, True)
                })
    except RuntimeError:
        _print_and_log(message='\n' + Interactive.boxed_message([
            'The asd-manager pre-configuration file does not contain correct information'
        ]),
                       level='exception')
        sys.exit(1)
    return config
コード例 #29
0
    def fill_slots(node_guid, osd_information, metadata=None):
        """
        Creates 1 or more new OSDs
        :param node_guid: Guid of the node to which the disks belong
        :type node_guid: str
        :param osd_information: Information about the amount of OSDs to add to each Slot
        :type osd_information: list
        :param metadata: Metadata to add to the OSD (connection information for remote Backend, general Backend information)
        :type metadata: dict
        :return: None
        :rtype: NoneType
        """
        metadata_type_validation = {
            'integer': (int, None),
            'osd_type': (str, AlbaOSD.OSD_TYPES.keys()),
            'ip': (str, ExtensionsToolbox.regex_ip),
            'port': (int, {
                'min': 1,
                'max': 65535
            })
        }
        node = AlbaNode(node_guid)
        required_params = {'slot_id': (str, None)}
        can_be_filled = False
        for flow in ['fill', 'fill_add']:
            if node.node_metadata[flow] is False:
                continue
            can_be_filled = True
            if flow == 'fill_add':
                required_params['alba_backend_guid'] = (str, None)
            for key, mtype in node.node_metadata['{0}_metadata'.format(
                    flow)].iteritems():
                if mtype in metadata_type_validation:
                    required_params[key] = metadata_type_validation[mtype]
        if can_be_filled is False:
            raise ValueError('The given node does not support filling slots')

        validation_reasons = []
        for osd_info in osd_information:  # type: dict
            try:
                ExtensionsToolbox.verify_required_params(
                    required_params=required_params, actual_params=osd_info)
            except RuntimeError as ex:
                validation_reasons.append(str(ex))
        if len(validation_reasons) > 0:
            raise ValueError('Missing required parameter:\n *{0}'.format(
                '\n* '.join(validation_reasons)))

        for osd_info in osd_information:
            if node.node_metadata['fill'] is True:
                # Only filling is required
                AlbaNodeController._fill_slot(
                    node, osd_info['slot_id'],
                    dict((key, osd_info[key])
                         for key in node.node_metadata['fill_metadata']))
            elif node.node_metadata['fill_add'] is True:
                # Fill the slot
                created_osds = AlbaNodeController._fill_slot(
                    node, osd_info['slot_id'],
                    dict((key, osd_info[key])
                         for key in node.node_metadata['fill_add_metadata']))
                # And add/claim the OSD
                if node.type == AlbaNode.NODE_TYPES.S3:
                    # The S3 manager returns the information about the osd when filling it
                    for created_osd_info in created_osds:
                        osd_info.update(
                            created_osd_info
                        )  # Add additional information about the osd
                        AlbaController.add_osds(
                            alba_backend_guid=osd_info['alba_backend_guid'],
                            osds=[osd_info],
                            alba_node_guid=node_guid,
                            metadata=metadata)
                else:
                    AlbaController.add_osds(
                        alba_backend_guid=osd_info['alba_backend_guid'],
                        osds=[osd_info],
                        alba_node_guid=node_guid,
                        metadata=metadata)
        node.invalidate_dynamics('stack')
コード例 #30
0
    def check_if_proxies_work(cls, result_handler):
        """
        Checks if all Alba Proxies work on a local machine, it creates a namespace and tries to put and object
        :param result_handler: logging object
        :type result_handler: ovs.extensions.healthcheck.result.HCResults
        :return: None
        :rtype: NoneType
        """
        namespace_params = {'bucket_count': (list, None),
                            'logical': (int, None),
                            'storage': (int, None),
                            'storage_per_osd': (list, None)}

        result_handler.info('Checking the ALBA proxies.', add_to_result=False)

        amount_of_presets_not_working = []
        # ignore possible subprocess output
        fnull = open(os.devnull, 'w')
        # try put/get/verify on all available proxies on the local node
        local_proxies = ServiceHelper.get_local_proxy_services()
        if len(local_proxies) == 0:
            result_handler.info('Found no proxies.', add_to_result=False)
            return amount_of_presets_not_working
        api_cache = {}
        for service in local_proxies:
            try:
                result_handler.info('Checking ALBA proxy {0}.'.format(service.name), add_to_result=False)
                ip = service.alba_proxy.storagedriver.storage_ip
                # Encapsulating try to determine test output
                try:
                    # Determine what to what backend the proxy is connected
                    proxy_client_cfg = AlbaCLI.run(command='proxy-client-cfg', named_params={'host': ip, 'port': service.ports[0]})
                except AlbaException:
                    result_handler.failure('Fetching proxy info has failed. Please verify if {0}:{1} is the correct address for proxy {2}.'.format(ip, service.ports[0], service.name),
                                           code=ErrorCodes.alba_cmd_fail)
                    continue
                # Fetch arakoon information
                abm_name = proxy_client_cfg.get('cluster_id')
                # Check if proxy config is correctly setup
                if abm_name is None:
                    raise ConfigNotMatchedException('Proxy config for proxy {0} does not have the correct format on node {1} with port {2}.'.format(service.name, ip, service.ports[0]))
                abm_config = Configuration.get_configuration_path('/ovs/vpools/{0}/proxies/{1}/config/abm' .format(service.alba_proxy.storagedriver.vpool.guid, service.alba_proxy.guid))

                # Determine presets / backend
                try:
                    presets = AlbaCLI.run(command='list-presets', config=abm_config)
                except AlbaException:
                    result_handler.failure('Listing the presets has failed. Please check the arakoon config path. We used {0}'.format(abm_config),
                                           code=ErrorCodes.alba_cmd_fail)
                    continue

                for preset in presets:
                    # If preset is not in use, test will fail so add a skip
                    if preset['in_use'] is False:
                        result_handler.skip('Preset {0} is not in use and will not be checked'.format(preset['name']))
                        continue
                    preset_name = preset['name']
                    # Encapsulation try for cleanup
                    try:
                        # Generate new namespace name using the preset
                        namespace_key_prefix = 'ovs-healthcheck-ns-{0}-{1}'.format(preset_name, AlbaHealthCheck.LOCAL_ID)
                        namespace_key = '{0}_{1}'.format(namespace_key_prefix, uuid.uuid4())
                        object_key = 'ovs-healthcheck-obj-{0}'.format(str(uuid.uuid4()))
                        # Create namespace
                        AlbaCLI.run(command='proxy-create-namespace',
                                    named_params={'host': ip, 'port': service.ports[0]},
                                    extra_params=[namespace_key, preset_name])
                        # Wait until fully created
                        namespace_start_time = time.time()
                        for index in xrange(2):
                            # Running twice because the first one could give a false positive as the osds will alert the nsm
                            # and the nsm would respond with got messages but these were not the ones we are after
                            AlbaCLI.run(command='deliver-messages', config=abm_config)
                        while True:
                            if time.time() - namespace_start_time > AlbaHealthCheck.NAMESPACE_TIMEOUT:
                                raise AlbaTimeOutException('Creating namespace has timed out after {0}s'.format(time.time() - namespace_start_time), 'deliver-messages')
                            list_ns_osds_output = AlbaCLI.run(command='list-ns-osds', config=abm_config, extra_params=[namespace_key])
                            # Example output: [[0, [u'Active']], [3, [u'Active']]]
                            namespace_ready = True
                            for osd_info in list_ns_osds_output:
                                if osd_info[1][0] != 'Active':
                                    # If we found an OSD not Active, check if preset is satisfiable
                                    namespace_ready = False
                                    break
                            if namespace_ready is True:
                                break
                            else:
                                result_handler.info('Not all OSDs have responded to the creation message. Fetching the safety', add_to_result=False)
                                try:
                                    # Fetch the preset information on the Framework
                                    # This add an extra delay for the messages to propagate too
                                    vpool = service.alba_proxy.storagedriver.vpool
                                    alba_backend_guid = vpool.metadata['backend']['backend_info']['alba_backend_guid']
                                    api_url = 'alba/backends/{0}'.format(alba_backend_guid)
                                    if api_url not in api_cache:
                                        connection_info = vpool.metadata['backend']['backend_info']['connection_info']
                                        api_client = OVSClient(connection_info['host'], connection_info['port'], (connection_info['client_id'], connection_info['client_secret']))
                                        start = time.time()
                                        _presets = api_client.get(api_url, params={'contents': 'presets'})['presets']
                                        api_cache[api_url] = _presets
                                        result_handler.info('Fetching the safety took {0} seconds'.format(time.time() - start))
                                    _presets = api_cache[api_url]
                                    _preset = filter(lambda p: p['name'] == preset_name, _presets)[0]
                                    if _preset['is_available'] is True:
                                        # Preset satisfiable, don't care about osds availability
                                        result_handler.info('Requested preset is available, no longer waiting on \'deliver_messages\'', add_to_result=False)
                                        break
                                    else:
                                        raise ValueError('Requested preset is marked as unavailable. Please check the disk safety'.format(time.time() - namespace_start_time))
                                except ValueError:
                                    raise
                                except Exception:
                                    msg = 'Could not query the preset data. Checking the preset might timeout'
                                    result_handler.warning(msg)
                                    cls.logger.exception(msg)
                                    # Sleep for syncing purposes
                                    time.sleep(1)
                        result_handler.success('Namespace successfully created on proxy {0} with preset {1}!'.format(service.name, preset_name),
                                               code=ErrorCodes.proxy_namespace_create)
                        namespace_info = AlbaCLI.run(command='show-namespace', config=abm_config, extra_params=[namespace_key])
                        ExtensionsToolbox.verify_required_params(required_params=namespace_params, actual_params=namespace_info)
                        result_handler.success('Namespace successfully fetched on proxy {0} with preset {1}!'.format(service.name, preset_name),
                                               code=ErrorCodes.proxy_namespace_fetch)

                        # Put test object to given dir
                        with open(AlbaHealthCheck.TEMP_FILE_LOC, 'wb') as output_file:
                            output_file.write(os.urandom(AlbaHealthCheck.TEMP_FILE_SIZE))
                        AlbaCLI.run(command='proxy-upload-object',
                                    named_params={'host': ip, 'port': service.ports[0]},
                                    extra_params=[namespace_key, AlbaHealthCheck.TEMP_FILE_LOC, object_key])
                        result_handler.success('Successfully uploaded the object to namespace {0}'.format(namespace_key),
                                               code=ErrorCodes.proxy_upload_obj)
                        # download object
                        AlbaCLI.run(command='proxy-download-object',
                                    named_params={'host': ip, 'port': service.ports[0]},
                                    extra_params=[namespace_key, object_key, AlbaHealthCheck.TEMP_FILE_FETCHED_LOC])
                        result_handler.success('Successfully downloaded the object to namespace {0}'.format(namespace_key),
                                               code=ErrorCodes.proxy_download_obj)
                        # check if files exists - issue #57
                        if not(os.path.isfile(AlbaHealthCheck.TEMP_FILE_FETCHED_LOC) and os.path.isfile(AlbaHealthCheck.TEMP_FILE_LOC)):
                            # creation of object failed
                            raise ObjectNotFoundException(ValueError('Creation of object has failed'))
                        hash_original = hashlib.md5(open(AlbaHealthCheck.TEMP_FILE_LOC, 'rb').read()).hexdigest()
                        hash_fetched = hashlib.md5(open(AlbaHealthCheck.TEMP_FILE_FETCHED_LOC, 'rb').read()).hexdigest()

                        if hash_original == hash_fetched:
                            result_handler.success('Fetched object {0} from namespace {1} on proxy {2} with preset {3} matches the created object!'.format(object_key, namespace_key, service.name, preset_name),
                                                   code=ErrorCodes.proxy_verify_obj)
                        else:
                            result_handler.failure('Fetched object {0} from namespace {1} on proxy {2} with preset {3} does not match the created object!'.format(object_key, namespace_key, service.name, preset_name),
                                                   code=ErrorCodes.proxy_verify_obj_fail)

                    except ValueError:
                        result_handler.failure('The preset is not available for use')
                    except ObjectNotFoundException as ex:
                        amount_of_presets_not_working.append(preset_name)
                        result_handler.failure('Failed to put object on namespace {0} failed on proxy {1}with preset {2} With error {3}'.format(namespace_key, service.name, preset_name, ex))
                    except AlbaTimeOutException as ex:
                        result_handler.failure(str(ex))
                    except AlbaException as ex:
                        code = ErrorCodes.alba_cmd_fail
                        if ex.alba_command == 'proxy-create-namespace':
                            result_handler.failure('Create namespace has failed with {0} on namespace {1} with proxy {2} with preset {3}'.format(str(ex), namespace_key, service.name, preset_name),
                                                   code=code)
                        elif ex.alba_command == 'show-namespace':
                            result_handler.failure('Show namespace has failed with {0} on namespace {1} with proxy {2} with preset {3}'.format(str(ex), namespace_key, service.name, preset_name),
                                                   code=code)
                        elif ex.alba_command == 'proxy-upload-object':
                            result_handler.failure('Uploading the object has failed with {0} on namespace {1} with proxy {2} with preset {3}'.format(str(ex), namespace_key, service.name, preset_name),
                                                   code=code)
                        elif ex.alba_command == 'proxy-download-object':
                            result_handler.failure('Downloading the object has failed with {0} on namespace {1} with proxy {2} with preset {3}'.format(str(ex), namespace_key, service.name, preset_name),
                                                   code=code)
                    finally:
                        # Delete the created namespace and preset
                        subprocess.call(['rm', str(AlbaHealthCheck.TEMP_FILE_LOC)], stdout=fnull, stderr=subprocess.STDOUT)
                        subprocess.call(['rm', str(AlbaHealthCheck.TEMP_FILE_FETCHED_LOC)], stdout=fnull, stderr=subprocess.STDOUT)
                        try:
                            namespaces = AlbaCLI.run(command='list-namespaces', config=abm_config)
                            namespaces_to_remove = []
                            proxy_named_params = {'host': ip, 'port': service.ports[0]}
                            for namespace in namespaces:
                                if namespace['name'].startswith(namespace_key_prefix):
                                    namespaces_to_remove.append(namespace['name'])
                            for namespace_name in namespaces_to_remove:
                                if namespace_name == namespace_key:
                                    result_handler.info('Deleting namespace {0}.'.format(namespace_name))
                                else:
                                    result_handler.warning('Deleting namespace {0} which was leftover from a previous run.'.format(namespace_name))

                                AlbaCLI.run(command='proxy-delete-namespace',
                                            named_params=proxy_named_params,
                                            extra_params=[namespace_name])

                                namespace_delete_start = time.time()
                                while True:
                                    try:
                                        AlbaCLI.run(command='show-namespace', config=abm_config, extra_params=[namespace_name])  # Will fail if the namespace does not exist
                                    except AlbaException:
                                        result_handler.success('Namespace {0} successfully removed.'.format(namespace_name))
                                        break
                                    if time.time() - namespace_delete_start > AlbaHealthCheck.NAMESPACE_TIMEOUT:
                                        raise AlbaTimeOutException('Delete namespace has timed out after {0}s'.format(time.time() - namespace_start_time), 'show-namespace')

                                # be tidy, and make the proxy forget the namespace
                                try:
                                    AlbaCLI.run(command='proxy-statistics',
                                                named_params=proxy_named_params,
                                                extra_params=['--forget', namespace_name])
                                except:
                                    result_handler.warning('Failed to make proxy forget namespace {0}.'.format(namespace_name))
                        except AlbaException as ex:
                            if ex.alba_command == 'list-namespaces':
                                result_handler.failure(
                                    'list namespaces has failed with {0} on namespace {1} with proxy {2} with preset {3}'.format(
                                        str(ex), namespace_key, service.name, preset_name))
                            elif ex.alba_command == 'proxy-delete-namespace':
                                result_handler.failure(
                                    'Delete namespace has failed with {0} on namespace {1} with proxy {2} with preset {3}'.format(
                                        str(ex), namespace_key, service.name, preset_name))

            except subprocess.CalledProcessError as ex:
                # this should stay for the deletion of the remaining files
                amount_of_presets_not_working.append(service.name)
                result_handler.failure('Proxy {0} has some problems. Got {1} as error'.format(service.name, ex),
                                       code=ErrorCodes.proxy_problems)

            except ConfigNotMatchedException as ex:
                amount_of_presets_not_working.append(service.name)
                result_handler.failure('Proxy {0} has some problems. Got {1} as error'.format(service.name, ex),
                                       code=ErrorCodes.proxy_problems)
コード例 #31
0
    def add_vpool(cls,
                  vpool_name,
                  vpool_details,
                  storagerouter_ip,
                  proxy_amount=2,
                  timeout=ADD_VPOOL_TIMEOUT,
                  *args,
                  **kwargs):
        """
        Adds a VPool to a storagerouter

        :param vpool_name: name of the new vpool
        :type vpool_name: str
        :param vpool_details: dictionary with storagedriver settings
        :type vpool_details: dict
        :param timeout: specify a timeout
        :type timeout: int
        :param storagerouter_ip: ip of the storagerouter to add the vpool too
        :type storagerouter_ip: str
        :param proxy_amount: amount of proxies for this vpool
        :type proxy_amount: int
        :return: (storagerouter_ip, vpool_mountpoint)
        :rtype: tuple
        """

        # Build ADD_VPOOL parameters
        call_parameters = {
            'vpool_name':
            vpool_name,
            'backend_info': {
                'alba_backend_guid':
                BackendHelper.get_albabackend_by_name(
                    vpool_details['backend_name']).guid,
                'preset':
                vpool_details['preset']
            },
            'connection_info': {
                'host': '',
                'port': '',
                'client_id': '',
                'client_secret': ''
            },
            'storage_ip':
            vpool_details['storage_ip'],
            'storagerouter_ip':
            storagerouter_ip,
            'writecache_size':
            int(vpool_details['storagedriver']['global_write_buffer']),
            'fragment_cache_on_read':
            vpool_details['fragment_cache']['strategy']['cache_on_read'],
            'fragment_cache_on_write':
            vpool_details['fragment_cache']['strategy']['cache_on_write'],
            'config_params': {
                'dtl_mode':
                vpool_details['storagedriver']['dtl_mode'],
                'sco_size':
                int(vpool_details['storagedriver']['sco_size']),
                'cluster_size':
                int(vpool_details['storagedriver']['cluster_size']),
                'write_buffer':
                int(vpool_details['storagedriver']['volume_write_buffer']),
                'dtl_transport':
                vpool_details['storagedriver']['dtl_transport']
            },
            'parallelism': {
                'proxies': proxy_amount
            }
        }
        api_data = {'call_parameters': call_parameters}

        # Setting for mds_safety
        if vpool_details.get('mds_safety') is not None:
            call_parameters['mds_config_params'] = {
                'mds_safety': vpool_details['mds_safety']
            }

        # Setting possible alba accelerated alba
        if vpool_details['fragment_cache']['location'] == 'backend':
            call_parameters['backend_info_aa'] = {
                'alba_backend_guid':
                BackendHelper.get_albabackend_by_name(
                    vpool_details['fragment_cache']['backend']['name']).guid,
                'preset':
                vpool_details['fragment_cache']['backend']['preset']
            }
            call_parameters['connection_info_aa'] = {
                'host': '',
                'port': '',
                'client_id': '',
                'client_secret': ''
            }
        elif vpool_details['fragment_cache']['location'] == 'disk':
            pass
        else:
            error_msg = 'Wrong `fragment_cache->location` in vPool configuration, it should be `disk` or `backend`'
            VPoolSetup.LOGGER.error(error_msg)
            raise RuntimeError(error_msg)

        # Optional param
        if vpool_details.get('block_cache') is not None:
            call_parameters['block_cache_on_read'] = vpool_details[
                'block_cache']['strategy']['cache_on_read']
            call_parameters['block_cache_on_write'] = vpool_details[
                'block_cache']['strategy']['cache_on_write']
            if vpool_details['block_cache']['location'] == 'backend':
                call_parameters['backend_info_bc'] = {
                    'alba_backend_guid':
                    BackendHelper.get_albabackend_by_name(
                        vpool_details['block_cache']['backend']['name']).guid,
                    'preset':
                    vpool_details['block_cache']['backend']['preset']
                }
                call_parameters['connection_info_bc'] = {
                    'host': '',
                    'port': '',
                    'client_id': '',
                    'client_secret': ''
                }
            elif vpool_details['block_cache'][
                    'location'] == 'disk':  # Ignore disk
                pass
            else:
                # @ Todo has to be removed for development version
                error_msg = 'Wrong `block_cache->location` in vPool configuration, it should be `disk` or `backend`'
                VPoolSetup.LOGGER.error(error_msg)
                raise RuntimeError(error_msg)

        task_guid = cls.api.post(api='/storagerouters/{0}/add_vpool/'.format(
            StoragerouterHelper.get_storagerouter_by_ip(
                storagerouter_ip).guid),
                                 data=api_data)
        task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
        if not task_result[0]:
            error_msg = 'vPool {0} has failed to create on storagerouter {1} because: {2}'.format(
                vpool_name, storagerouter_ip, task_result[1])
            VPoolSetup.LOGGER.error(error_msg)
            raise RuntimeError(error_msg)
        else:
            VPoolSetup.LOGGER.info(
                'Creation of vPool `{0}` should have succeeded on storagerouter `{1}`'
                .format(vpool_name, storagerouter_ip))

        # Settings volumedriver
        storagedriver_config = vpool_details.get('storagedriver')
        if storagedriver_config is not None:
            ExtensionsToolbox.verify_required_params(
                VPoolSetup.STORAGEDRIVER_PARAMS, storagedriver_config)
            VPoolSetup.LOGGER.info(
                'Updating volumedriver configuration of vPool `{0}` on storagerouter `{1}`.'
                .format(vpool_name, storagerouter_ip))
            vpool = VPoolHelper.get_vpool_by_name(vpool_name)
            storagedriver = [
                sd for sd in vpool.storagedrivers
                if sd.storagerouter.ip == storagerouter_ip
            ][0]
            if not storagedriver:
                error_msg = 'Unable to find the storagedriver of vPool {0} on storagerouter {1}'.format(
                    vpool_name, storagerouter_ip)
                raise RuntimeError(error_msg)
            StoragedriverHelper.change_config(storagedriver,
                                              storagedriver_config)
            VPoolSetup.LOGGER.info(
                'Updating volumedriver config of vPool `{0}` should have succeeded on storagerouter `{1}`'
                .format(vpool_name, storagerouter_ip))

        return storagerouter_ip, '/mnt/{0}'.format(vpool_name)