Example #1
0
 def remove(license_guid):
     """
     Removes a license
     """
     clients = {}
     storagerouters = StorageRouterList.get_storagerouters()
     try:
         for storagerouter in storagerouters:
             clients[storagerouter] = SSHClient(storagerouter.ip)
     except UnableToConnectException:
         raise RuntimeError('Not all StorageRouters are reachable')
     lic = License(license_guid)
     if lic.can_remove is True:
         remove_functions = Toolbox.fetch_hooks(
             'license', '{0}.remove'.format(lic.component))
         result = remove_functions[0](component=lic.component,
                                      data=lic.data,
                                      valid_until=lic.valid_until,
                                      signature=lic.signature)
         if result is True:
             lic.delete()
             license_contents = []
             for lic in LicenseList.get_licenses():
                 license_contents.append(lic.hash)
             for storagerouter in storagerouters:
                 client = clients[storagerouter]
                 client.file_write(
                     '/opt/OpenvStorage/config/licenses',
                     '{0}\n'.format('\n'.join(license_contents)))
         return result
     return None
 def _configure_arakoon_to_volumedriver(offline_node_ips=None):
     print 'Update existing vPools'
     logger.info('Update existing vPools')
     if offline_node_ips is None:
         offline_node_ips = []
     for storagerouter in StorageRouterList.get_storagerouters():
         config = ArakoonClusterConfig('voldrv')
         config.load_config()
         arakoon_nodes = []
         for node in config.nodes:
             arakoon_nodes.append({'host': node.ip,
                                   'port': node.client_port,
                                   'node_id': node.name})
         with Remote(storagerouter.ip, [os, RawConfigParser, EtcdConfiguration, StorageDriverConfiguration], 'ovs') as remote:
             configuration_dir = '{0}/storagedriver/storagedriver'.format(EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
             if not remote.os.path.exists(configuration_dir):
                 remote.os.makedirs(configuration_dir)
             for json_file in remote.os.listdir(configuration_dir):
                 vpool_name = json_file.replace('.json', '')
                 if json_file.endswith('.json'):
                     if remote.os.path.exists('{0}/{1}.cfg'.format(configuration_dir, vpool_name)):
                         continue  # There's also a .cfg file, so this is an alba_proxy configuration file
                     storagedriver_config = remote.StorageDriverConfiguration('storagedriver', vpool_name)
                     storagedriver_config.load()
                     storagedriver_config.configure_volume_registry(vregistry_arakoon_cluster_id='voldrv',
                                                                    vregistry_arakoon_cluster_nodes=arakoon_nodes)
                     storagedriver_config.configure_distributed_lock_store(dls_type='Arakoon',
                                                                           dls_arakoon_cluster_id='voldrv',
                                                                           dls_arakoon_cluster_nodes=arakoon_nodes)
                     storagedriver_config.save(reload_config=True)
Example #3
0
 def remove(license_guid):
     """
     Removes a license
     """
     clients = {}
     storagerouters = StorageRouterList.get_storagerouters()
     try:
         for storagerouter in storagerouters:
             clients[storagerouter] = SSHClient(storagerouter.ip)
     except UnableToConnectException:
         raise RuntimeError('Not all StorageRouters are reachable')
     lic = License(license_guid)
     if lic.can_remove is True:
         remove_functions = Toolbox.fetch_hooks('license', '{0}.remove'.format(lic.component))
         result = remove_functions[0](component=lic.component, data=lic.data, valid_until=lic.valid_until, signature=lic.signature)
         if result is True:
             lic.delete()
             license_contents = []
             for lic in LicenseList.get_licenses():
                 license_contents.append(lic.hash)
             for storagerouter in storagerouters:
                 client = clients[storagerouter]
                 client.file_write('/opt/OpenvStorage/config/licenses', '{0}\n'.format('\n'.join(license_contents)))
         return result
     return None
Example #4
0
def getVPoolByIPandPort(ip, port):
    for storagerouter in StorageRouterList.get_storagerouters():
        for storagedriver in storagerouter.storagedrivers:
            if storagedriver.ports[
                    "edge"] == port and storagedriver.storage_ip == ip:
                return storagedriver.vpool
    raise RuntimeError("Could not find vpool for {}:{}".format(ip, port))
Example #5
0
 def print_current_mds_layout():
     """
     Prints the current MDS layout
     """
     output = ['',
               'Open vStorage - MDS debug information',
               '=====================================',
               'timestamp: {0}'.format(time.time()),
               '']
     for storagerouter in StorageRouterList.get_storagerouters():
         output.append('+ {0} ({1})'.format(storagerouter.name, storagerouter.ip))
         vpools = set(sd.vpool for sd in storagerouter.storagedrivers)
         for vpool in vpools:
             output.append('  + {0}'.format(vpool.name))
             for mds_service in vpool.mds_services:
                 if mds_service.service.storagerouter_guid == storagerouter.guid:
                     masters, slaves = 0, 0
                     for junction in mds_service.vdisks:
                         if junction.is_master:
                             masters += 1
                         else:
                             slaves += 1
                     capacity = mds_service.capacity
                     if capacity == -1:
                         capacity = 'infinite'
                     load, _ = MDSServiceController.get_mds_load(mds_service)
                     if load == float('inf'):
                         load = 'infinite'
                     else:
                         load = '{0}%'.format(round(load, 2))
                     output.append('    + {0} - port {1} - {2} master(s), {3} slave(s) - capacity: {4}, load: {5}'.format(
                         mds_service.number, mds_service.service.ports[0], masters, slaves, capacity, load
                     ))
     print '\n'.join(output)
Example #6
0
 def list(self):
     """
     Overview of all StorageRouters
     :return: List of StorageRouters
     :rtype: list[ovs.dal.hybrids.storagerouter.StorageRouter]
     """
     return StorageRouterList.get_storagerouters()
Example #7
0
    def _run_and_validate_dtl_checkup(self, vdisk, validations):
        """
        Execute the DTL checkup for a vDisk and validate the settings afterwards
        """
        single_node = len(StorageRouterList.get_storagerouters()) == 1
        VDiskController.dtl_checkup(vdisk_guid=vdisk.guid)
        config = vdisk.storagedriver_client.get_dtl_config(vdisk.volume_id)
        config_mode = vdisk.storagedriver_client.get_dtl_config_mode(vdisk.volume_id)
        msg = '{0} node - {{0}} - Actual: {{1}} - Expected: {{2}}'.format('Single' if single_node is True else 'Multi')

        validations.append({'key': 'config_mode', 'value': DTLConfigMode.MANUAL})
        for validation in validations:
            key = validation['key']
            value = validation['value']
            if key == 'config':
                actual_value = config
            elif key == 'host':
                actual_value = config.host
            elif key == 'port':
                actual_value = config.port
            elif key == 'mode':
                actual_value = config.mode
            else:
                actual_value = config_mode

            if isinstance(value, list):
                self.assertTrue(expr=actual_value in value,
                                msg=msg.format(key.capitalize(), actual_value, ', '.join(value)))
            else:
                self.assertEqual(first=actual_value,
                                 second=value,
                                 msg=msg.format(key.capitalize(), actual_value, value))
        return config
Example #8
0
    def pulse():
        """
        Update the heartbeats for the Current Routers
        :return: None
        """
        logger = Logger('extensions-generic')
        machine_id = System.get_my_machine_id()
        current_time = int(time.time())

        routers = StorageRouterList.get_storagerouters()
        for node in routers:
            if node.machine_id == machine_id:
                with volatile_mutex('storagerouter_heartbeat_{0}'.format(
                        node.guid)):
                    node_save = StorageRouter(node.guid)
                    node_save.heartbeats['process'] = current_time
                    node_save.save()
                StorageRouterController.ping.s(
                    node.guid, current_time).apply_async(
                        routing_key='sr.{0}'.format(machine_id))
            else:
                try:
                    # check timeout of other nodes and clear arp cache
                    if node.heartbeats and 'process' in node.heartbeats:
                        if current_time - node.heartbeats[
                                'process'] >= HeartBeat.ARP_TIMEOUT:
                            check_output("/usr/sbin/arp -d '{0}'".format(
                                node.name.replace(r"'", r"'\''")),
                                         shell=True)
                except CalledProcessError:
                    logger.exception('Error clearing ARP cache')
Example #9
0
    def pulse():
        """
        Update the heartbeats for the Current Routers
        :return: None
        """
        logger = LogHandler.get('extensions', name='heartbeat')
        machine_id = System.get_my_machine_id()
        current_time = int(time.time())

        routers = StorageRouterList.get_storagerouters()
        for node in routers:
            if node.machine_id == machine_id:
                with volatile_mutex('storagerouter_heartbeat_{0}'.format(node.guid)):
                    node_save = StorageRouter(node.guid)
                    node_save.heartbeats['process'] = current_time
                    node_save.save()
                StorageRouterController.ping.s(node.guid, current_time).apply_async(routing_key='sr.{0}'.format(machine_id))
            else:
                try:
                    # check timeout of other nodes and clear arp cache
                    if node.heartbeats and 'process' in node.heartbeats:
                        if current_time - node.heartbeats['process'] >= HeartBeat.ARP_TIMEOUT:
                            check_output("/usr/sbin/arp -d '{0}'".format(node.name.replace(r"'", r"'\''")), shell=True)
                except CalledProcessError:
                    logger.exception('Error clearing ARP cache')
Example #10
0
 def _configure_arakoon_to_volumedriver():
     print 'Update existing vPools'
     logger.info('Update existing vPools')
     for storagerouter in StorageRouterList.get_storagerouters():
         with Remote(storagerouter.ip, [os, RawConfigParser, Configuration, StorageDriverConfiguration, ArakoonManagementEx], 'ovs') as remote:
             arakoon_cluster_config = remote.ArakoonManagementEx().getCluster('voldrv').getClientConfig()
             arakoon_nodes = []
             for node_id, node_config in arakoon_cluster_config.iteritems():
                 arakoon_nodes.append({'host': node_config[0][0],
                                       'port': node_config[1],
                                       'node_id': node_id})
             configuration_dir = '{0}/storagedriver/storagedriver'.format(
                 remote.Configuration.get('ovs.core.cfgdir'))
             if not remote.os.path.exists(configuration_dir):
                 remote.os.makedirs(configuration_dir)
             for json_file in remote.os.listdir(configuration_dir):
                 vpool_name = json_file.replace('.json', '')
                 if json_file.endswith('.json'):
                     if remote.os.path.exists('{0}/{1}.cfg'.format(configuration_dir, vpool_name)):
                         continue  # There's also a .cfg file, so this is an alba_proxy configuration file
                     storagedriver_config = remote.StorageDriverConfiguration('storagedriver', vpool_name)
                     storagedriver_config.load()
                     storagedriver_config.configure_volume_registry(vregistry_arakoon_cluster_id='voldrv',
                                                                    vregistry_arakoon_cluster_nodes=arakoon_nodes)
                     storagedriver_config.configure_distributed_lock_store(dls_type='Arakoon',
                                                                           dls_arakoon_cluster_id='voldrv',
                                                                           dls_arakoon_cluster_nodes=arakoon_nodes)
                     storagedriver_config.save()
Example #11
0
    def _dtl_status(self):
        """
        Retrieve the DTL status for a vDisk
        """
        sd_status = self.info.get('failover_mode', 'UNKNOWN').lower()
        if sd_status == '':
            sd_status = 'unknown'
        if sd_status != 'ok_standalone':
            return sd_status

        # Verify whether 'ok_standalone' is the correct status for this vDisk
        vpool_dtl = self.vpool.configuration['dtl_enabled']
        if self.has_manual_dtl is True or vpool_dtl is False:
            return sd_status

        domains = []
        possible_dtl_targets = set()
        for sr in StorageRouterList.get_storagerouters():
            if sr.guid == self.storagerouter_guid:
                domains = [junction.domain for junction in sr.domains]
            elif len(sr.storagedrivers) > 0:
                possible_dtl_targets.add(sr)

        if len(domains) > 0:
            possible_dtl_targets = set()
            for domain in domains:
                possible_dtl_targets.update(
                    StorageRouterList.get_primary_storagerouters_for_domain(
                        domain))

        if len(possible_dtl_targets) == 0:
            return sd_status
        return 'checkup_required'
Example #12
0
 def _configure_arakoon_to_volumedriver():
     print "Update existing vPools"
     logger.info("Update existing vPools")
     for storagerouter in StorageRouterList.get_storagerouters():
         config = ArakoonClusterConfig("voldrv")
         config.load_config()
         arakoon_nodes = []
         for node in config.nodes:
             arakoon_nodes.append({"host": node.ip, "port": node.client_port, "node_id": node.name})
         with Remote(
             storagerouter.ip, [os, RawConfigParser, EtcdConfiguration, StorageDriverConfiguration], "ovs"
         ) as remote:
             configuration_dir = "{0}/storagedriver/storagedriver".format(
                 EtcdConfiguration.get("/ovs/framework/paths|cfgdir")
             )
             if not remote.os.path.exists(configuration_dir):
                 remote.os.makedirs(configuration_dir)
             for json_file in remote.os.listdir(configuration_dir):
                 vpool_name = json_file.replace(".json", "")
                 if json_file.endswith(".json"):
                     if remote.os.path.exists("{0}/{1}.cfg".format(configuration_dir, vpool_name)):
                         continue  # There's also a .cfg file, so this is an alba_proxy configuration file
                     storagedriver_config = remote.StorageDriverConfiguration("storagedriver", vpool_name)
                     storagedriver_config.load()
                     storagedriver_config.configure_volume_registry(
                         vregistry_arakoon_cluster_id="voldrv", vregistry_arakoon_cluster_nodes=arakoon_nodes
                     )
                     storagedriver_config.configure_distributed_lock_store(
                         dls_type="Arakoon", dls_arakoon_cluster_id="voldrv", dls_arakoon_cluster_nodes=arakoon_nodes
                     )
                     storagedriver_config.save(reload_config=True)
Example #13
0
 def on_demote(cluster_ip, master_ip):
     """
     Handles the demote for the StorageDrivers
     :param cluster_ip: IP of the node to demote
     :param master_ip: IP of the master node
     """
     client = SSHClient(cluster_ip, username='******')
     servicetype = ServiceTypeList.get_by_name('Arakoon')
     current_service = None
     remaining_ips = []
     for service in servicetype.services:
         if service.name == 'arakoon-voldrv':
             if service.storagerouter.ip == cluster_ip:
                 current_service = service
             else:
                 remaining_ips.append(service.storagerouter.ip)
     if current_service is not None:
         print '* Shrink StorageDriver cluster'
         ArakoonInstaller.shrink_cluster(master_ip, cluster_ip, 'voldrv')
         if ServiceManager.has_service(current_service.name, client=client) is True:
             ServiceManager.stop_service(current_service.name, client=client)
             ServiceManager.remove_service(current_service.name, client=client)
         ArakoonInstaller.restart_cluster_remove('voldrv', remaining_ips)
         current_service.delete()
         for storagerouter in StorageRouterList.get_storagerouters():
             ArakoonInstaller.deploy_to_slave(master_ip, storagerouter.ip, 'voldrv')
         StorageDriverController._configure_arakoon_to_volumedriver()
Example #14
0
    def _dtl_status(self):
        """
        Retrieve the DTL status for a vDisk
        """
        sd_status = self.info.get('failover_mode', 'UNKNOWN').lower()
        if sd_status == '':
            sd_status = 'unknown'
        if sd_status != 'ok_standalone':
            return sd_status

        # Verify whether 'ok_standalone' is the correct status for this vDisk
        vpool_dtl = self.vpool.configuration['dtl_enabled']
        if self.has_manual_dtl is True or vpool_dtl is False:
            return sd_status

        domains = []
        possible_dtl_targets = set()
        for sr in StorageRouterList.get_storagerouters():
            if sr.guid == self.storagerouter_guid:
                domains = [junction.domain for junction in sr.domains]
            elif len(sr.storagedrivers) > 0:
                possible_dtl_targets.add(sr)

        if len(domains) > 0:
            possible_dtl_targets = set()
            for domain in domains:
                possible_dtl_targets.update(StorageRouterList.get_primary_storagerouters_for_domain(domain))

        if len(possible_dtl_targets) == 0:
            return sd_status
        return 'checkup_required'
Example #15
0
    def pulse():
        """
        Update the heartbeats for all Storage Routers
        :return: None
        """
        logger = LogHandler.get('extensions', name='heartbeat')

        current_time = int(time.time())
        machine_id = System.get_my_machine_id()
        amqp = '{0}://{1}:{2}@{3}//'.format(EtcdConfiguration.get('/ovs/framework/messagequeue|protocol'),
                                            EtcdConfiguration.get('/ovs/framework/messagequeue|user'),
                                            EtcdConfiguration.get('/ovs/framework/messagequeue|password'),
                                            EtcdConfiguration.get('/ovs/framework/hosts/{0}/ip'.format(machine_id)))

        celery_path = OSManager.get_path('celery')
        worker_states = check_output("{0} inspect ping -b {1} --timeout=5 2> /dev/null | grep OK | perl -pe 's/\x1b\[[0-9;]*m//g' || true".format(celery_path, amqp), shell=True)
        routers = StorageRouterList.get_storagerouters()
        for node in routers:
            if node.heartbeats is None:
                node.heartbeats = {}
            if 'celery@{0}: OK'.format(node.name) in worker_states:
                node.heartbeats['celery'] = current_time
            if node.machine_id == machine_id:
                node.heartbeats['process'] = current_time
            else:
                try:
                    # check timeout of other nodes and clear arp cache
                    if node.heartbeats and 'process' in node.heartbeats:
                        if current_time - node.heartbeats['process'] >= HeartBeat.ARP_TIMEOUT:
                            check_output("/usr/sbin/arp -d {0}".format(node.name), shell=True)
                except CalledProcessError:
                    logger.exception('Error clearing ARP cache')
            node.save()
Example #16
0
    def _run_and_validate_dtl_checkup(self, vdisk, validations):
        """
        Execute the DTL checkup for a vDisk and validate the settings afterwards
        """
        single_node = len(StorageRouterList.get_storagerouters()) == 1
        VDiskController.dtl_checkup(vdisk_guid=vdisk.guid)
        config = vdisk.storagedriver_client.get_dtl_config(vdisk.volume_id)
        config_mode = vdisk.storagedriver_client.get_dtl_config_mode(vdisk.volume_id)
        msg = '{0} node - {{0}} - Actual: {{1}} - Expected: {{2}}'.format('Single' if single_node is True else 'Multi')

        validations.append({'key': 'config_mode', 'value': DTLConfigMode.MANUAL})
        for validation in validations:
            key = validation['key']
            value = validation['value']
            if key == 'config':
                actual_value = config
            elif key == 'host':
                actual_value = config.host
            elif key == 'port':
                actual_value = config.port
            elif key == 'mode':
                actual_value = config.mode
            else:
                actual_value = config_mode

            if isinstance(value, list):
                self.assertTrue(expr=actual_value in value,
                                msg=msg.format(key.capitalize(), actual_value, ', '.join(value)))
            else:
                self.assertEqual(first=actual_value,
                                 second=value,
                                 msg=msg.format(key.capitalize(), actual_value, value))
        return config
Example #17
0
    def get_stats_storagerouters(cls):
        """
        Retrieve amount of vDisks and some read/write statistics for all StorageRouters
        """
        if cls._config is None:
            cls.validate_and_retrieve_config()

        stats = []
        errors = False
        environment = cls._config['environment']
        for storagerouter in StorageRouterList.get_storagerouters():
            if len(storagerouter.storagedrivers) == 0:
                cls._logger.debug('StorageRouter {0} does not have any StorageDrivers linked to it, skipping'.format(storagerouter.name))
                continue
            try:
                statistics = storagerouter.statistics
                stats.append({'tags': {'environment': environment,
                                       'storagerouter_name': storagerouter.name},
                              'fields': {'read_byte': statistics['data_read'],
                                         'write_byte': statistics['data_written'],
                                         'operations': statistics['4k_operations'],
                                         'amount_vdisks': len(storagerouter.vdisks_guids),
                                         'read_operations': statistics['4k_read_operations'],
                                         'write_operations': statistics['4k_write_operations']},
                              'measurement': 'storagerouter'})
            except Exception:
                errors = True
                cls._logger.exception('Retrieving statistics for StorageRouter {0} failed'.format(storagerouter.name))
        return errors, stats
Example #18
0
 def process_response(self, request, response):
     """
     Processes responses
     """
     _ = self
     # Timings
     if isinstance(response, OVSResponse):
         if hasattr(request, '_entry_time'):
             # noinspection PyProtectedMember
             response.timings['total'] = [
                 time.time() - request._entry_time, 'Total'
             ]
         response.build_timings()
     # Process CORS responses
     if 'HTTP_ORIGIN' in request.META:
         path = request.path
         storagerouters = StorageRouterList.get_storagerouters()
         allowed_origins = [
             'https://{0}'.format(storagerouter.ip)
             for storagerouter in storagerouters
         ]
         if request.META[
                 'HTTP_ORIGIN'] in allowed_origins or '/swagger.json' in path:
             response['Access-Control-Allow-Origin'] = request.META[
                 'HTTP_ORIGIN']
             response[
                 'Access-Control-Allow-Headers'] = 'x-requested-with, content-type, accept, origin, authorization'
             response[
                 'Access-Control-Allow-Methods'] = 'GET, POST, PUT, PATCH, DELETE, OPTIONS'
     return response
Example #19
0
 def test_ssh_connectivity():
     """
     Validates whether all nodes can SSH into eachother
     """
     MonitoringController._logger.info('Starting SSH connectivity test...')
     ips = [sr.ip for sr in StorageRouterList.get_storagerouters()]
     for ip in ips:
         for primary_username in ['root', 'ovs']:
             try:
                 with remote(ip, [SSHClient],
                             username=primary_username) as rem:
                     for local_ip in ips:
                         for username in ['root', 'ovs']:
                             message = '* Connection from {0}@{1} to {2}@{3}... {{0}}'.format(
                                 primary_username, ip, username, local_ip)
                             try:
                                 c = rem.SSHClient(local_ip,
                                                   username=username)
                                 assert c.run(['whoami'
                                               ]).strip() == username
                                 message = message.format('OK')
                                 logger = MonitoringController._logger.info
                             except Exception as ex:
                                 message = message.format(ex.message)
                                 logger = MonitoringController._logger.error
                             logger(message)
             except Exception as ex:
                 MonitoringController._logger.error(
                     '* Could not connect to {0}@{1}: {2}'.format(
                         primary_username, ip, ex.message))
     MonitoringController._logger.info('Finished')
    def get_storagerouters():
        """
        Fetch the storagerouters

        :return: list with storagerouters
        :rtype: list
        """
        return StorageRouterList.get_storagerouters()
Example #21
0
 def get_available_actions(self):
     """
     Gets a list of all available actions
     """
     actions = []
     storagerouters = StorageRouterList.get_storagerouters()
     if len(storagerouters) > 1:
         actions.append('MOVE_AWAY')
     return Response(actions, status=status.HTTP_200_OK)
 def get_available_actions(self):
     """
     Gets a list of all available actions
     """
     actions = []
     storagerouters = StorageRouterList.get_storagerouters()
     if len(storagerouters) > 1:
         actions.append('MOVE_AWAY')
     return Response(actions, status=status.HTTP_200_OK)
Example #23
0
 def get_available_actions(self):
     """
     Gets a list of all available actions
     """
     actions = []
     storagerouters = StorageRouterList.get_storagerouters()
     if len(storagerouters) > 1:
         actions.append('MOVE_AWAY')
     return actions
Example #24
0
 def get_available_actions(self):
     """
     Gets a list of all available actions
     """
     actions = []
     storagerouters = StorageRouterList.get_storagerouters()
     if len(storagerouters) > 1:
         actions.append('MOVE_AWAY')
     return actions
Example #25
0
 def apply(license_string):
     """
     Applies a license. It will apply as much licenses as possible, however, it won't fail on invalid licenses as it
     will simply skip them.
     """
     try:
         clients = {}
         storagerouters = StorageRouterList.get_storagerouters()
         try:
             for storagerouter in storagerouters:
                 clients[storagerouter] = SSHClient(storagerouter.ip)
         except UnableToConnectException:
             raise RuntimeError('Not all StorageRouters are reachable')
         data = LicenseController._decode(license_string)
         for component in data:
             cdata = data[component]
             name = cdata['name']
             data = cdata['data']
             token = cdata['token']
             valid_until = float(
                 cdata['valid_until']) if 'valid_until' in cdata else None
             if valid_until is not None and valid_until <= time.time():
                 continue
             signature = cdata['signature'] if 'signature' in cdata else None
             validate_functions = Toolbox.fetch_hooks(
                 'license', '{0}.validate'.format(component))
             apply_functions = Toolbox.fetch_hooks(
                 'license', '{0}.apply'.format(component))
             if len(validate_functions) == 1 and len(apply_functions) == 1:
                 valid, metadata = validate_functions[0](
                     component=component, data=data, signature=signature)
                 if valid is True:
                     success = apply_functions[0](component=component,
                                                  data=data,
                                                  signature=signature)
                     if success is True:
                         license_object = LicenseList.get_by_component(
                             component)
                         if license_object is None:
                             license_object = License()
                         license_object.component = component
                         license_object.name = name
                         license_object.token = token
                         license_object.data = data
                         license_object.valid_until = valid_until
                         license_object.signature = signature
                         license_object.save()
         license_contents = []
         for lic in LicenseList.get_licenses():
             license_contents.append(lic.hash)
         for storagerouter in storagerouters:
             client = clients[storagerouter]
             client.file_write('/opt/OpenvStorage/config/licenses',
                               '{0}\n'.format('\n'.join(license_contents)))
     except Exception, ex:
         logger.exception('Error applying license: {0}'.format(ex))
         return None
Example #26
0
 def list(self, query=None):
     """
     Overview of all Storage Routers
     """
     if query is None:
         return StorageRouterList.get_storagerouters()
     else:
         query = json.loads(query)
         return DataList(StorageRouter, query)
Example #27
0
 def list(self, query=None):
     """
     Overview of all Storage Routers
     """
     if query is None:
         return StorageRouterList.get_storagerouters()
     else:
         query = json.loads(query)
         return DataList(StorageRouter, query)
Example #28
0
 def list(self, query=None):
     """
     Overview of all Storage Routers
     """
     if query is None:
         return StorageRouterList.get_storagerouters()
     else:
         query = json.loads(query)
         query_result = DataList({"object": StorageRouter, "data": DataList.select.GUIDS, "query": query}).data
         return DataObjectList(query_result, StorageRouter)
Example #29
0
 def list(self, query=None):
     """
     Overview of all Storage Routers
     :param query: A query to filter the StorageRouters
     :type query: DataQuery
     """
     if query is None:
         return StorageRouterList.get_storagerouters()
     else:
         return DataList(StorageRouter, query)
Example #30
0
    def collapse_arakoon():
        """
        Collapse Arakoon's Tlogs
        :return: None
        """
        from ovs_extensions.generic.toolbox import ExtensionsToolbox

        GenericController._logger.info('Arakoon collapse started')
        cluster_info = []
        storagerouters = StorageRouterList.get_storagerouters()
        if os.environ.get('RUNNING_UNITTESTS') != 'True':
            cluster_info = [('cacc', storagerouters[0])]

        cluster_names = []
        for service in ServiceList.get_services():
            if service.is_internal is True and service.type.name in (ServiceType.SERVICE_TYPES.ARAKOON,
                                                                     ServiceType.SERVICE_TYPES.NS_MGR,
                                                                     ServiceType.SERVICE_TYPES.ALBA_MGR):
                cluster = ExtensionsToolbox.remove_prefix(service.name, 'arakoon-')
                if cluster in cluster_names and cluster not in [ARAKOON_NAME, ARAKOON_NAME_UNITTEST]:
                    continue
                cluster_names.append(cluster)
                cluster_info.append((cluster, service.storagerouter))
        workload = {}
        cluster_config_map = {}
        for cluster, storagerouter in cluster_info:
            GenericController._logger.debug('  Collecting info for cluster {0}'.format(cluster))
            ip = storagerouter.ip if cluster in [ARAKOON_NAME, ARAKOON_NAME_UNITTEST] else None
            try:
                config = ArakoonClusterConfig(cluster_id=cluster, source_ip=ip)
                cluster_config_map[cluster] = config
            except:
                GenericController._logger.exception('  Retrieving cluster information on {0} for {1} failed'.format(storagerouter.ip, cluster))
                continue
            for node in config.nodes:
                if node.ip not in workload:
                    workload[node.ip] = {'node_id': node.name,
                                         'clusters': []}
                workload[node.ip]['clusters'].append((cluster, ip))
        for storagerouter in storagerouters:
            try:
                if storagerouter.ip not in workload:
                    continue
                node_workload = workload[storagerouter.ip]
                client = SSHClient(storagerouter)
                for cluster, ip in node_workload['clusters']:
                    try:
                        GenericController._logger.debug('  Collapsing cluster {0} on {1}'.format(cluster, storagerouter.ip))
                        client.run(['arakoon', '--collapse-local', node_workload['node_id'], '2', '-config', cluster_config_map[cluster].external_config_path])
                        GenericController._logger.debug('  Collapsing cluster {0} on {1} completed'.format(cluster, storagerouter.ip))
                    except:
                        GenericController._logger.exception('  Collapsing cluster {0} on {1} failed'.format(cluster, storagerouter.ip))
            except UnableToConnectException:
                GenericController._logger.error('  Could not collapse any cluster on {0} (not reachable)'.format(storagerouter.name))
        GenericController._logger.info('Arakoon collapse finished')
Example #31
0
    def get(self, request, *args, **kwargs):
        """
        Fetches metadata
        """
        _ = args, kwargs
        data = {'authenticated': False,
                'authentication_state': None,
                'username': None,
                'userguid': None,
                'roles': [],
                'storagerouter_ips': [sr.ip for sr in StorageRouterList.get_storagerouters()],
                'versions': list(settings.VERSION),
                'plugins': {}}
        try:
            # Gather plugin metadata
            plugins = {}
            # - Backends. BackendType plugins must set the has_plugin flag on True
            for backend_type in BackendTypeList.get_backend_types():
                if backend_type.has_plugin is True:
                    if backend_type.code not in plugins:
                        plugins[backend_type.code] = []
                    plugins[backend_type.code] += ['backend', 'gui']
            data['plugins'] = plugins

            # Gather authorization metadata
            if 'HTTP_AUTHORIZATION' not in request.META:
                return HttpResponse, dict(data.items() + {'authentication_state': 'unauthenticated'}.items())
            authorization_type, access_token = request.META['HTTP_AUTHORIZATION'].split(' ')
            if authorization_type != 'Bearer':
                return HttpResponse, dict(data.items() + {'authentication_state': 'invalid_authorization_type'}.items())
            tokens = BearerTokenList.get_by_access_token(access_token)
            if len(tokens) != 1:
                return HttpResponse, dict(data.items() + {'authentication_state': 'invalid_token'}.items())
            token = tokens[0]
            if token.expiration < time.time():
                for junction in token.roles.itersafe():
                    junction.delete()
                token.delete()
                return HttpResponse, dict(data.items() + {'authentication_state': 'token_expired'}.items())

            # Gather user metadata
            user = token.client.user
            if not user.is_active:
                return HttpResponse, dict(data.items() + {'authentication_state': 'inactive_user'}.items())
            roles = [j.role.code for j in token.roles]

            return HttpResponse, dict(data.items() + {'authenticated': True,
                                                      'authentication_state': 'authenticated',
                                                      'username': user.username,
                                                      'userguid': user.guid,
                                                      'roles': roles,
                                                      'plugins': plugins}.items())
        except Exception as ex:
            logger.exception('Unexpected exception: {0}'.format(ex))
            return HttpResponse, dict(data.items() + {'authentication_state': 'unexpected_exception'}.items())
    def get_storagerouter_ips():
        """
         Fetch all the ip addresses in this cluster

         :return: list with storagerouter ips
         :rtype: list
         """
        return [
            storagerouter.ip
            for storagerouter in StorageRouterList.get_storagerouters()
        ]
Example #33
0
 def monitor_mds_layout():
     """
     Prints the current MDS layout
     :return: None
     :rtype: NoneType
     """
     try:
         while True:
             output = [
                 '', 'Open vStorage - MDS debug information',
                 '=====================================',
                 'timestamp: {0}'.format(datetime.datetime.now()), ''
             ]
             vpools_deployed = False
             for storagerouter in sorted(
                     StorageRouterList.get_storagerouters(),
                     key=lambda k: k.name):
                 vpools = set(sd.vpool
                              for sd in storagerouter.storagedrivers)
                 if len(vpools) > 0:
                     vpools_deployed = True
                     output.append('+ {0} ({1})'.format(
                         storagerouter.name, storagerouter.ip))
                 for vpool in sorted(vpools, key=lambda k: k.name):
                     output.append('  + {0}'.format(vpool.name))
                     for mds_service in sorted(vpool.mds_services,
                                               key=lambda k: k.number):
                         if mds_service.service.storagerouter_guid == storagerouter.guid:
                             masters, slaves = 0, 0
                             for junction in mds_service.vdisks:
                                 if junction.is_master:
                                     masters += 1
                                 else:
                                     slaves += 1
                             capacity = mds_service.capacity
                             if capacity == -1:
                                 capacity = 'infinite'
                             load, _ = MDSServiceController.get_mds_load(
                                 mds_service)
                             if load == float('inf'):
                                 load = 'infinite'
                             else:
                                 load = '{0}%'.format(round(load, 2))
                             output.append(
                                 '    + {0} - port {1} - {2} master(s), {3} slave(s) - capacity: {4}, load: {5}'
                                 .format(mds_service.number,
                                         mds_service.service.ports[0],
                                         masters, slaves, capacity, load))
             if vpools_deployed is False:
                 output.append('No vPools deployed')
             print '\x1b[2J\x1b[H' + '\n'.join(output)
             time.sleep(1)
     except KeyboardInterrupt:
         pass
Example #34
0
 def get_scrub_storagerouters(self):
     """
     Loads a list of suitable StorageRouters for scrubbing the given vDisk
     """
     storagerouters = []
     for storagerouter in StorageRouterList.get_storagerouters():
         scrub_partitions = storagerouter.partition_config.get(DiskPartition.ROLES.SCRUB, [])
         if len(scrub_partitions) == 0:
             continue
         storagerouters.append(storagerouter)
     return storagerouters
    def _remote_stack(self):
        """
        Live list of information about remote linked OSDs of type ALBA BACKEND
        :return: Information about all linked OSDs
        :rtype: dict
        """
        # Import here to prevent from circular references
        from ovs.dal.hybrids.albaosd import AlbaOSD

        def _load_backend_info(_connection_info, _alba_backend_guid):
            client = OVSClient(ip=_connection_info['host'],
                               port=_connection_info['port'],
                               credentials=(_connection_info['username'], _connection_info['password']),
                               version=3)

            try:
                info = client.get('/alba/backends/{0}/'.format(_alba_backend_guid),
                                  params={'contents': 'local_summary'})
                with lock:
                    return_value[_alba_backend_guid].update(info['local_summary'])
            except NotFoundException:
                return_value[_alba_backend_guid]['error'] = 'backend_deleted'
            except ForbiddenException:
                return_value[_alba_backend_guid]['error'] = 'not_allowed'
            except Exception as ex:
                return_value[_alba_backend_guid]['error'] = 'unknown'
                AlbaBackend._logger.exception('Collecting remote ALBA backend information failed with error: {0}'.format(ex))

        # Retrieve local summaries of all related OSDs of type ALBA_BACKEND
        lock = Lock()
        threads = []
        return_value = {}
        cluster_ips = [sr.ip for sr in StorageRouterList.get_storagerouters()]
        for osd in self.osds:
            if osd.osd_type == AlbaOSD.OSD_TYPES.ALBA_BACKEND and osd.metadata is not None:
                backend_info = osd.metadata['backend_info']
                connection_info = osd.metadata['backend_connection_info']
                connection_host = connection_info['host']
                alba_backend_guid = backend_info['linked_guid']
                return_value[alba_backend_guid] = {'name': backend_info['linked_name'],
                                                   'error': '',
                                                   'domain': None if osd.domain is None else {'guid': osd.domain_guid,
                                                                                              'name': osd.domain.name},
                                                   'preset': backend_info['linked_preset'],
                                                   'osd_id': backend_info['linked_alba_id'],
                                                   'local_ip': connection_host in cluster_ips,
                                                   'remote_host': connection_host}
                thread = Thread(target=_load_backend_info, args=(connection_info, alba_backend_guid))
                thread.start()
                threads.append(thread)

        for thread in threads:
            thread.join()
        return return_value
Example #36
0
 def check_scrub_partition_present():
     """
     Checks whether at least 1 scrub partition is present on any StorageRouter
     :return: True if at least 1 SCRUB role present in the cluster else False
     :rtype: bool
     """
     for storage_router in StorageRouterList.get_storagerouters():
         for disk in storage_router.disks:
             for partition in disk.partitions:
                 if DiskPartition.ROLES.SCRUB in partition.roles:
                     return True
     return False
Example #37
0
 def list(self, query=None):
     """
     Overview of all Storage Routers
     """
     if query is None:
         return StorageRouterList.get_storagerouters()
     else:
         query = json.loads(query)
         query_result = DataList({'object': StorageRouter,
                                  'data': DataList.select.GUIDS,
                                  'query': query}).data
         return DataObjectList(query_result, StorageRouter)
Example #38
0
    def refresh_package_information():
        """
        Retrieve and store the package information of all StorageRouters
        :return: None
        """
        GenericController._logger.info('Updating package information')
        threads = []
        information = {}
        all_storagerouters = StorageRouterList.get_storagerouters()
        for storagerouter in all_storagerouters:
            information[storagerouter.ip] = {}
            for fct in Toolbox.fetch_hooks('update', 'get_package_info_multi'):
                try:
                    # We make use of these clients in Threads --> cached = False
                    client = SSHClient(endpoint=storagerouter,
                                       username='******',
                                       cached=False)
                except UnableToConnectException:
                    information[storagerouter.ip]['errors'] = [
                        'StorageRouter {0} is inaccessible'.format(
                            storagerouter.name)
                    ]
                    break
                thread = Thread(target=fct, args=(client, information))
                thread.start()
                threads.append(thread)

        for fct in Toolbox.fetch_hooks('update', 'get_package_info_single'):
            thread = Thread(target=fct, args=(information, ))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()

        errors = []
        copy_information = copy.deepcopy(information)
        for ip, info in information.iteritems():
            if len(info.get('errors', [])) > 0:
                errors.extend(
                    ['{0}: {1}'.format(ip, error) for error in info['errors']])
                copy_information.pop(ip)

        for storagerouter in all_storagerouters:
            info = copy_information.get(storagerouter.ip, {})
            if 'errors' in info:
                info.pop('errors')
            storagerouter.package_information = info
            storagerouter.save()

        if len(errors) > 0:
            errors = [str(error) for error in set(errors)]
            raise Exception(' - {0}'.format('\n - '.join(errors)))
Example #39
0
 def get_scrub_storagerouters(self):
     """
     Loads a list of suitable StorageRouters for scrubbing the given vDisk
     """
     storagerouters = []
     for storagerouter in StorageRouterList.get_storagerouters():
         scrub_partitions = storagerouter.partition_config.get(
             DiskPartition.ROLES.SCRUB, [])
         if len(scrub_partitions) == 0:
             continue
         storagerouters.append(storagerouter)
     return storagerouters
Example #40
0
    def install_plugins():
        """
        (Re)load plugins
        """
        if ServiceManager.has_service('ovs-watcher-framework',
                                      SSHClient('127.0.0.1', username='******')):
            # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load
            # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            clients = []
            try:
                for storagerouter in StorageRouterList.get_storagerouters():
                    clients.append(SSHClient(storagerouter, username='******'))
            except UnableToConnectException:
                raise RuntimeError('Not all StorageRouters are reachable')

            for client in clients:
                for service_name in ['watcher-framework', 'memcached']:
                    ServiceManager.stop_service(service_name, client=client)
                    wait = 30
                    while wait > 0:
                        if ServiceManager.get_service_status(
                                service_name, client=client) is False:
                            break
                        time.sleep(1)
                        wait -= 1
                    if wait == 0:
                        raise RuntimeError(
                            'Could not stop service: {0}'.format(service_name))

            for client in clients:
                for service_name in ['memcached', 'watcher-framework']:
                    ServiceManager.start_service(service_name, client=client)
                    wait = 30
                    while wait > 0:
                        if ServiceManager.get_service_status(
                                service_name, client=client) is True:
                            break
                        time.sleep(1)
                        wait -= 1
                    if wait == 0:
                        raise RuntimeError(
                            'Could not start service: {0}'.format(
                                service_name))

            from ovs.dal.helpers import Migration
            Migration.migrate()

            from ovs.lib.helpers.toolbox import Toolbox
            ip = System.get_my_storagerouter().ip
            functions = Toolbox.fetch_hooks('plugin', 'postinstall')
            for function in functions:
                function(ip=ip)
Example #41
0
 def build_clients(self):
     # type: () -> Dict[StorageRouter, SSHClient]
     """
     Builds SSHClients towards all StorageRouters
     :return: SSHClient mapped by storagerouter
     :rtype: dict((storagerouter, sshclient))
     """
     clients = {}
     for storagerouter in StorageRouterList.get_storagerouters():
         client = self.build_ssh_client(storagerouter)
         if client is not None:
             clients[storagerouter] = client
     return clients
Example #42
0
 def get_scrub_storagerouters(self):
     """
     Loads a list of suitable StorageRouters for scrubbing the given vDisk
     :return: A list of StorageRouters which have the SCRUB role
     :rtype: list[ovs.dal.hybrids.storagerouter.StorageRouter]
     """
     storagerouters = []
     for storagerouter in StorageRouterList.get_storagerouters():
         scrub_partitions = storagerouter.partition_config.get(DiskPartition.ROLES.SCRUB, [])
         if len(scrub_partitions) == 0:
             continue
         storagerouters.append(storagerouter)
     return storagerouters
Example #43
0
    def get_my_storagerouter():
        """
        Returns unique machine storagerouter id
        """

        from ovs.dal.hybrids.storagerouter import StorageRouter
        from ovs.dal.lists.storagerouterlist import StorageRouterList

        if not System.my_storagerouter_guid:
            for storagerouter in StorageRouterList.get_storagerouters():
                if storagerouter.machine_id == System.get_my_machine_id():
                    System.my_storagerouter_guid = storagerouter.guid
        return StorageRouter(System.my_storagerouter_guid)
Example #44
0
    def get_my_storagerouter():
        """
        Returns unique machine storagerouter id
        """

        from ovs.dal.hybrids.storagerouter import StorageRouter
        from ovs.dal.lists.storagerouterlist import StorageRouterList

        if not System.my_storagerouter_guid:
            for storagerouter in StorageRouterList.get_storagerouters():
                if storagerouter.machine_id == System.get_my_machine_id():
                    System.my_storagerouter_guid = storagerouter.guid
        return StorageRouter(System.my_storagerouter_guid)
Example #45
0
def listEdgeclients():
    edgeclients = []
    protocol = getEdgeProtocol()
    for storagerouter in StorageRouterList.get_storagerouters():
        if storagerouter.status == 'FAILURE':
            continue
        for storagedriver in storagerouter.storagedrivers:
            edgeclient = {'vpool': storagedriver.vpool.name,
                          'protocol': protocol,
                          'ip': storagerouter.ip,
                          'port': storagedriver.ports['edge']}
            edgeclients.append(edgeclient)
    return edgeclients
Example #46
0
 def process_response(self, request, response):
     """
     Processes responses
     """
     _ = self
     # Process CORS responses
     if 'HTTP_ORIGIN' in request.META:
         storagerouters = StorageRouterList.get_storagerouters()
         allowed_origins = ['https://{0}'.format(storagerouter.ip) for storagerouter in storagerouters]
         if request.META['HTTP_ORIGIN'] in allowed_origins:
             response['Access-Control-Allow-Origin'] = request.META['HTTP_ORIGIN']
             response['Access-Control-Allow-Headers'] = 'x-requested-with, content-type, accept, origin, authorization'
             response['Access-Control-Allow-Methods'] = 'GET, POST, PUT, PATCH, DELETE, OPTIONS'
     return response
Example #47
0
    def collapse_arakoon():
        """
        Collapse Arakoon's Tlogs
        :return: None
        """
        ScheduledTaskController._logger.info('Starting arakoon collapse')
        storagerouters = StorageRouterList.get_storagerouters()
        cluster_info = [('cacc', storagerouters[0], True)]
        cluster_names = []
        for service in ServiceList.get_services():
            if service.is_internal is True and service.type.name in (ServiceType.SERVICE_TYPES.ARAKOON,
                                                                     ServiceType.SERVICE_TYPES.NS_MGR,
                                                                     ServiceType.SERVICE_TYPES.ALBA_MGR):
                cluster = service.name.replace('arakoon-', '')
                if cluster in cluster_names:
                    continue
                cluster_names.append(cluster)
                cluster_info.append((cluster, service.storagerouter, False))
        workload = {}
        for cluster, storagerouter, filesystem in cluster_info:
            ScheduledTaskController._logger.debug('  Collecting info for cluster {0}'.format(cluster))
            config = ArakoonClusterConfig(cluster, filesystem=filesystem)
            config.load_config(storagerouter.ip)
            for node in config.nodes:
                if node.ip not in workload:
                    workload[node.ip] = {'node_id': node.name,
                                         'clusters': []}
                workload[node.ip]['clusters'].append((cluster, filesystem))
        for storagerouter in storagerouters:
            try:
                if storagerouter.ip not in workload:
                    continue
                node_workload = workload[storagerouter.ip]
                client = SSHClient(storagerouter)
                for cluster, filesystem in node_workload['clusters']:
                    try:
                        ScheduledTaskController._logger.debug('  Collapsing cluster {0} on {1}'.format(cluster, storagerouter.ip))
                        if filesystem is True:
                            config_path = ArakoonClusterConfig.CONFIG_FILE.format(cluster)
                        else:
                            config_path = Configuration.get_configuration_path(ArakoonClusterConfig.CONFIG_KEY.format(cluster))
                        client.run(['arakoon', '--collapse-local', node_workload['node_id'], '2', '-config', config_path])
                        ScheduledTaskController._logger.info('  Collapsing cluster {0} on {1} completed'.format(cluster, storagerouter.ip))
                    except:
                        ScheduledTaskController._logger.exception('  Collapsing cluster {0} on {1} failed'.format(cluster, storagerouter.ip))
            except UnableToConnectException:
                ScheduledTaskController._logger.error('  Could not collapse any cluster on {0} (not reachable)'.format(storagerouter.name))

        ScheduledTaskController._logger.info('Arakoon collapse finished')
Example #48
0
 def apply(license_string):
     """
     Applies a license. It will apply as much licenses as possible, however, it won't fail on invalid licenses as it
     will simply skip them.
     """
     try:
         clients = {}
         storagerouters = StorageRouterList.get_storagerouters()
         try:
             for storagerouter in storagerouters:
                 clients[storagerouter] = SSHClient(storagerouter.ip)
         except UnableToConnectException:
             raise RuntimeError('Not all StorageRouters are reachable')
         data = LicenseController._decode(license_string)
         for component in data:
             cdata = data[component]
             name = cdata['name']
             data = cdata['data']
             token = cdata['token']
             valid_until = float(cdata['valid_until']) if 'valid_until' in cdata else None
             if valid_until is not None and valid_until <= time.time():
                 continue
             signature = cdata['signature'] if 'signature' in cdata else None
             validate_functions = Toolbox.fetch_hooks('license', '{0}.validate'.format(component))
             apply_functions = Toolbox.fetch_hooks('license', '{0}.apply'.format(component))
             if len(validate_functions) == 1 and len(apply_functions) == 1:
                 valid, metadata = validate_functions[0](component=component, data=data, signature=signature)
                 if valid is True:
                     success = apply_functions[0](component=component, data=data, signature=signature)
                     if success is True:
                         license_object = LicenseList.get_by_component(component)
                         if license_object is None:
                             license_object = License()
                         license_object.component = component
                         license_object.name = name
                         license_object.token = token
                         license_object.data = data
                         license_object.valid_until = valid_until
                         license_object.signature = signature
                         license_object.save()
         license_contents = []
         for lic in LicenseList.get_licenses():
             license_contents.append(lic.hash)
         for storagerouter in storagerouters:
             client = clients[storagerouter]
             client.file_write('/opt/OpenvStorage/config/licenses', '{0}\n'.format('\n'.join(license_contents)))
     except Exception, ex:
         LicenseController._logger.exception('Error applying license: {0}'.format(ex))
         return None
Example #49
0
    def refresh_package_information():
        """
        Retrieve and store the package information of all StorageRouters
        :return: None
        """
        GenericController._logger.info('Updating package information')
        threads = []
        information = {}
        all_storagerouters = StorageRouterList.get_storagerouters()
        for storagerouter in all_storagerouters:
            information[storagerouter.ip] = {}
            for function in Toolbox.fetch_hooks('update', 'get_package_info_multi'):
                try:
                    # We make use of these clients in Threads --> cached = False
                    client = SSHClient(endpoint=storagerouter, username='******', cached=False)
                except UnableToConnectException:
                    information[storagerouter.ip]['errors'] = ['StorageRouter {0} is inaccessible'.format(storagerouter.name)]
                    break
                thread = Thread(target=function,
                                args=(client, information))
                thread.start()
                threads.append(thread)

        for function in Toolbox.fetch_hooks('update', 'get_package_info_single'):
            thread = Thread(target=function,
                            args=(information,))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()

        errors = []
        copy_information = copy.deepcopy(information)
        for ip, info in information.iteritems():
            if len(info.get('errors', [])) > 0:
                errors.extend(['{0}: {1}'.format(ip, error) for error in info['errors']])
                copy_information.pop(ip)

        for storagerouter in all_storagerouters:
            info = copy_information.get(storagerouter.ip, {})
            if 'errors' in info:
                info.pop('errors')
            storagerouter.package_information = info
            storagerouter.save()

        if len(errors) > 0:
            errors = [str(error) for error in set(errors)]
            raise Exception(' - {0}'.format('\n - '.join(errors)))
Example #50
0
    def _default_starting_values(since=None,
                                 until=None,
                                 search_locations=None,
                                 hosts=None):

        if until and since is not None and isinstance(
                since, str) and isinstance(until, str):
            # Test for times to be properly formatted, allow hh:mm or hh:mm:ss
            pattern = re.compile(
                r'(^[2][0-3]|[0-1][0-9]):[0-5][0-9](:[0-5][0-9])?$')
            # If only hours are supplied, match them to a day
            if pattern.match(since) or pattern.match(until):
                # Determine Time Range
                yesterday = date.fromordinal(date.today().toordinal() -
                                             1).strftime('%Y-%m-%d')
                today = datetime.now().strftime('%Y-%m-%d')
                now = datetime.now().strftime('%R')
                if since > now or since > until:
                    search_start = yesterday
                else:
                    search_start = today
                if until > since > now:
                    search_end = yesterday
                else:
                    search_end = today
                since = LogFileTimeParser._parse_date(search_start + ' ' +
                                                      since)
                until = LogFileTimeParser._parse_date(search_end + ' ' + until)
            else:
                # Set dates
                since = LogFileTimeParser._parse_date(since)
                until = LogFileTimeParser._parse_date(until)
        # Setup default times
        if since is None:
            since = (datetime.today() - timedelta(hours=1))
        if until is None:
            until = datetime.today()

        if search_locations is None:
            execution_mode = LogFileTimeParser._get_execution_mode()
            search_locations = LogFileTimeParser.STANDARD_SEARCH_LOCATIONS
            for service, info in LogFileTimeParser.INTERNAL_MAPPING.iteritems(
            ):
                search_locations.append(info.get(execution_mode))

        if not hosts:
            hosts = [sr.ip for sr in StorageRouterList.get_storagerouters()]

        return since, until, search_locations, hosts
Example #51
0
def listEdgeclients():
    edgeclients = []
    protocol = getEdgeProtocol()
    for storagerouter in StorageRouterList.get_storagerouters():
        if storagerouter.status == "FAILURE":
            continue
        for storagedriver in storagerouter.storagedrivers:
            edgeclient = {
                "vpool": storagedriver.vpool.name,
                "protocol": protocol,
                "ip": storagerouter.ip,
                "port": storagedriver.ports["edge"],
            }
            edgeclients.append(edgeclient)
    return edgeclients
Example #52
0
 def _storage_router_layout(self):
     """
     Creates a dictionary with information about which Storage Routers use this domain as its normal and recovery domain
     :return: Information about Storage Routers using this domain
     :rtype: dict
     """
     layout = {'regular': [],
               'recovery': []}
     for sr in StorageRouterList.get_storagerouters():
         for junction in sr.domains:
             if junction.domain_guid == self.guid:
                 if junction.backup is True:
                     layout['recovery'].append(sr.guid)
                 else:
                     layout['regular'].append(sr.guid)
     return layout
Example #53
0
    def install_plugins():
        """
        (Re)load plugins
        """
        if ServiceManager.has_service('ovs-watcher-framework', SSHClient('127.0.0.1', username='******')):
            # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load
            # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            clients = []
            try:
                for storagerouter in StorageRouterList.get_storagerouters():
                    clients.append(SSHClient(storagerouter, username='******'))
            except UnableToConnectException:
                raise RuntimeError('Not all StorageRouters are reachable')

            for client in clients:
                for service_name in ['watcher-framework', 'memcached']:
                    ServiceManager.stop_service(service_name, client=client)
                    wait = 30
                    while wait > 0:
                        if ServiceManager.get_service_status(service_name, client=client) is False:
                            break
                        time.sleep(1)
                        wait -= 1
                    if wait == 0:
                        raise RuntimeError('Could not stop service: {0}'.format(service_name))

            for client in clients:
                for service_name in ['memcached', 'watcher-framework']:
                    ServiceManager.start_service(service_name, client=client)
                    wait = 30
                    while wait > 0:
                        if ServiceManager.get_service_status(service_name, client=client) is True:
                            break
                        time.sleep(1)
                        wait -= 1
                    if wait == 0:
                        raise RuntimeError('Could not start service: {0}'.format(service_name))

            from ovs.dal.helpers import Migration
            Migration.migrate()

            from ovs.lib.helpers.toolbox import Toolbox
            ip = System.get_my_storagerouter().ip
            functions = Toolbox.fetch_hooks('plugin', 'postinstall')
            for function in functions:
                function(ip=ip)
Example #54
0
 def register(name, email, company, phone, newsletter):
     """
     Registers the environment
     """
     SupportAgent().run()  # Execute a single heartbeat run
     client = OVSClient('monitoring.openvstorage.com', 443, credentials=None, verify=True, version=1)
     task_id = client.post('/support/register/',
                           data={'cluster_id': Configuration.get('ovs.support.cid'),
                                 'name': name,
                                 'email': email,
                                 'company': company,
                                 'phone': phone,
                                 'newsletter': newsletter,
                                 'register_only': True})
     if task_id:
         client.wait_for_task(task_id, timeout=120)
     for storagerouter in StorageRouterList.get_storagerouters():
         client = SSHClient(storagerouter)
         client.config_set('ovs.core.registered', True)
Example #55
0
    def _default_starting_values(since=None, until=None, search_locations=None, hosts=None):

        if until and since is not None and isinstance(since, str) and isinstance(until, str):
            # Test for times to be properly formatted, allow hh:mm or hh:mm:ss
            pattern = re.compile(r'(^[2][0-3]|[0-1][0-9]):[0-5][0-9](:[0-5][0-9])?$')
            # If only hours are supplied, match them to a day
            if pattern.match(since) or pattern.match(until):
                # Determine Time Range
                yesterday = date.fromordinal(date.today().toordinal() - 1).strftime('%Y-%m-%d')
                today = datetime.now().strftime('%Y-%m-%d')
                now = datetime.now().strftime('%R')
                if since > now or since > until:
                    search_start = yesterday
                else:
                    search_start = today
                if until > since > now:
                    search_end = yesterday
                else:
                    search_end = today
                since = LogFileTimeParser._parse_date(search_start + ' ' + since)
                until = LogFileTimeParser._parse_date(search_end + ' ' + until)
            else:
                # Set dates
                since = LogFileTimeParser._parse_date(since)
                until = LogFileTimeParser._parse_date(until)
        # Setup default times
        if since is None:
            since = (datetime.today() - timedelta(hours=1))
        if until is None:
            until = datetime.today()

        if search_locations is None:
            execution_mode = LogFileTimeParser._get_execution_mode()
            search_locations = LogFileTimeParser.STANDARD_SEARCH_LOCATIONS
            for service, info in LogFileTimeParser.INTERNAL_MAPPING.iteritems():
                search_locations.append(info.get(execution_mode))

        if not hosts:
            hosts = [sr.ip for sr in StorageRouterList.get_storagerouters()]

        return since, until, search_locations, hosts
Example #56
0
    def sync_with_reality(storagerouter_guid=None):
        """
        Syncs the Disks from all StorageRouters with the reality.
        :param storagerouter_guid: Guid of the Storage Router to synchronize
        """
        storagerouters = []
        if storagerouter_guid is not None:
            storagerouters.append(StorageRouter(storagerouter_guid))
        else:
            storagerouters = StorageRouterList.get_storagerouters()
        for storagerouter in storagerouters:
            try:
                client = SSHClient(storagerouter, username='******')
            except UnableToConnectException:
                DiskController._logger.info('Could not connect to StorageRouter {0}, skipping'.format(storagerouter.ip))
                continue
            configuration = {}
            # Gather mount data
            mount_mapping = {}
            mount_data = client.run('mount')
            for mount in mount_data.splitlines():
                mount = mount.strip()
                match = re.search('(/dev/(.+?)) on (/.*?) type.*', mount)
                if match is not None:
                    dev_name = match.groups()[0]
                    uuid = client.run('blkid -o value -s UUID {0}'.format(dev_name))
                    if uuid:
                        mount_mapping[uuid] = match.groups()[2]
                    else:
                        mount_mapping[match.groups()[1]] = match.groups()[2]
            # Gather raid information
            try:
                md_information = client.run('mdadm --detail /dev/md*', suppress_logging=True)
            except CalledProcessError:
                md_information = ''
            raid_members = []
            for member in re.findall('(?: +[0-9]+){4} +[^/]+/dev/([a-z0-9]+)', md_information):
                raid_members.append(member)
            # Gather disk information
            with remote(storagerouter.ip, [Context, os]) as rem:
                context = rem.Context()
                devices = [device for device in context.list_devices(subsystem='block')
                           if ('ID_TYPE' in device and device['ID_TYPE'] == 'disk') or
                              ('DEVNAME' in device and ('loop' in device['DEVNAME'] or 'nvme' in device['DEVNAME'] or 'md' in device['DEVNAME']))]
                for device in devices:
                    is_partition = device['DEVTYPE'] == 'partition'
                    device_path = device['DEVNAME']
                    device_name = device_path.split('/')[-1]
                    partition_id = None
                    partition_name = None
                    extended_partition_info = None
                    if is_partition is True:
                        partition_name = device['ID_FS_UUID'] if 'ID_FS_UUID' in device else device_name
                        if 'ID_PART_ENTRY_NUMBER' in device:
                            extended_partition_info = True
                            partition_id = device['ID_PART_ENTRY_NUMBER']
                            if device_name.startswith('nvme') or device_name.startswith('loop'):
                                device_name = device_name[:0 - int(len(partition_id)) - 1]
                            elif device_name.startswith('md'):
                                device_name = device_name[:device_name.index('p')]
                            else:
                                device_name = device_name[:0 - int(len(partition_id))]
                        else:
                            DiskController._logger.debug('Partition {0} has no partition metadata'.format(device_path))
                            extended_partition_info = False
                            match = re.match('^(\D+?)(\d+)$', device_name)
                            if match is None:
                                DiskController._logger.debug('Could not handle disk/partition {0}'.format(device_path))
                                continue  # Unable to handle this disk/partition
                            partition_id = match.groups()[1]
                            device_name = match.groups()[0]
                    sectors = int(client.run('cat /sys/block/{0}/size'.format(device_name)))
                    sector_size = int(client.run('cat /sys/block/{0}/queue/hw_sector_size'.format(device_name)))
                    rotational = int(client.run('cat /sys/block/{0}/queue/rotational'.format(device_name)))

                    if sectors == 0:
                        continue
                    if device_name in raid_members:
                        continue
                    if device_name not in configuration:
                        configuration[device_name] = {'partitions': {}}
                    path = None
                    for path_type in ['by-id', 'by-uuid']:
                        if path is not None:
                            break
                        if 'DEVLINKS' in device:
                            for item in device['DEVLINKS'].split(' '):
                                if path_type in item:
                                    path = item
                    if path is None:
                        path = device_path
                    if is_partition is True:
                        if 'ID_PART_ENTRY_TYPE' in device and device['ID_PART_ENTRY_TYPE'] == '0x5':
                            continue  # This is an extended partition, let's skip that one
                        if extended_partition_info is True:
                            offset = int(device['ID_PART_ENTRY_OFFSET']) * sector_size
                            size = int(device['ID_PART_ENTRY_SIZE']) * sector_size
                        else:
                            match = re.match('^(\D+?)(\d+)$', device_path)
                            if match is None:
                                DiskController._logger.debug('Could not handle disk/partition {0}'.format(device_path))
                                continue  # Unable to handle this disk/partition
                            partitions_info = DiskTools.get_partitions_info(match.groups()[0])
                            if device_path in partitions_info:
                                partition_info = partitions_info[device_path]
                                offset = int(partition_info['start'])
                                size = int(partition_info['size'])
                            else:
                                DiskController._logger.warning('Could not retrieve partition info for disk/partition {0}'.format(device_path))
                                continue
                        configuration[device_name]['partitions'][partition_id] = {'offset': offset,
                                                                                  'size': size,
                                                                                  'path': path,
                                                                                  'state': 'OK'}
                        partition_data = configuration[device_name]['partitions'][partition_id]
                        if partition_name in mount_mapping:
                            mountpoint = mount_mapping[partition_name]
                            partition_data['mountpoint'] = mountpoint
                            partition_data['inode'] = rem.os.stat(mountpoint).st_dev
                            del mount_mapping[partition_name]
                            try:
                                client.run('touch {0}/{1}; rm {0}/{1}'.format(mountpoint, str(time.time())))
                            except CalledProcessError:
                                partition_data['state'] = 'FAILURE'
                                pass
                        if 'ID_FS_TYPE' in device:
                            partition_data['filesystem'] = device['ID_FS_TYPE']
                    else:
                        configuration[device_name].update({'name': device_name,
                                                           'path': path,
                                                           'vendor': device['ID_VENDOR'] if 'ID_VENDOR' in device else None,
                                                           'model': device['ID_MODEL'] if 'ID_MODEL' in device else None,
                                                           'size': sector_size * sectors,
                                                           'is_ssd': rotational == 0,
                                                           'state': 'OK'})
                    for partition_name in mount_mapping:
                        device_name = partition_name.split('/')[-1]
                        match = re.search('^(\D+?)(\d+)$', device_name)
                        if match is not None:
                            device_name = match.groups()[0]
                            partition_id = match.groups()[1]
                            if device_name not in configuration:
                                configuration[device_name] = {'partitions': {},
                                                              'state': 'MISSING'}
                            configuration[device_name]['partitions'][partition_id] = {'mountpoint': mount_mapping[partition_name],
                                                                                      'state': 'MISSING'}
            # Sync the model
            disk_names = []
            for disk in storagerouter.disks:
                if disk.name not in configuration:
                    for partition in disk.partitions:
                        partition.delete()
                    disk.delete()
                else:
                    disk_names.append(disk.name)
                    DiskController._update_disk(disk, configuration[disk.name])
                    partitions = []
                    partition_info = configuration[disk.name]['partitions']
                    for partition in disk.partitions:
                        if partition.id not in partition_info:
                            partition.delete()
                        else:
                            partitions.append(partition.id)
                            DiskController._update_partition(partition, partition_info[partition.id])
                    for partition_id in partition_info:
                        if partition_id not in partitions:
                            DiskController._create_partition(partition_id, partition_info[partition_id], disk)
            for disk_name in configuration:
                if disk_name not in disk_names and configuration[disk_name]['state'] not in ['MISSING']:
                    disk = Disk()
                    disk.storagerouter = storagerouter
                    disk.name = disk_name
                    DiskController._update_disk(disk, configuration[disk_name])
                    partition_info = configuration[disk_name]['partitions']
                    for partition_id in partition_info:
                        if partition_info[partition_id]['state'] not in ['MISSING']:
                            DiskController._create_partition(partition_id, partition_info[partition_id], disk)
Example #57
0
    def _bootstrap_dal_models(self):
        """
        Load/hook dal models as snmp oids
        """
        _guids = set()

        enabled_key = "{0}_config_dal_enabled".format(STORAGE_PREFIX)
        self.instance_oid = 0
        try:
            enabled = self.persistent.get(enabled_key)
        except KeyNotFoundException:
            enabled = True  # Enabled by default, can be disabled by setting the key
        if enabled:
            from ovs.dal.lists.vdisklist import VDiskList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.dal.lists.pmachinelist import PMachineList
            from ovs.dal.lists.vmachinelist import VMachineList
            from ovs.dal.lists.vpoollist import VPoolList
            from ovs.dal.lists.storagedriverlist import StorageDriverList

            for storagerouter in StorageRouterList.get_storagerouters():
                _guids.add(storagerouter.guid)
                if not self._check_added(storagerouter):
                    self._register_dal_model(10, storagerouter, 'guid', "0")
                    self._register_dal_model(10, storagerouter, 'name', "1")
                    self._register_dal_model(10, storagerouter, 'pmachine', "3", key = 'host_status')
                    self._register_dal_model(10, storagerouter, 'description', "4")
                    self._register_dal_model(10, storagerouter, 'devicename', "5")
                    self._register_dal_model(10, storagerouter, 'dtl_mode', "6")
                    self._register_dal_model(10, storagerouter, 'ip', "8")
                    self._register_dal_model(10, storagerouter, 'machineid', "9")
                    self._register_dal_model(10, storagerouter, 'status', "10")
                    self._register_dal_model(10, storagerouter, '#vdisks', "11",
                                             func = lambda storagerouter: len([vdisk for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id]),
                                             atype = int)
                    self._register_dal_model(10, storagerouter, '#vmachines', "12",
                                             func = lambda storagerouter: len(set([vdisk.vmachine.guid for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id])),
                                             atype = int)
                    self._register_dal_model(10, storagerouter, '#stored_data', "13",
                                             func = lambda storagerouter: sum([vdisk.vmachine.stored_data for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id]),
                                             atype = int)
                    self.instance_oid += 1

            for vm in VMachineList.get_vmachines():
                _guids.add(vm.guid)
                if not self._check_added(vm):
                    if vm.is_vtemplate:
                        self._register_dal_model(11, vm, 'guid', "0")
                        self._register_dal_model(11, vm, 'name', "1")

                        def _children(vmt):
                            children = 0
                            disks = [vd.guid for vd in vmt.vdisks]
                            for vdisk in [vdisk.parent_vdisk_guid for item in [vm.vdisks for vm in VMachineList.get_vmachines() if not vm.is_vtemplate] for vdisk in item]:
                                for disk in disks:
                                    if vdisk == disk:
                                        children += 1
                            return children
                        self._register_dal_model(11, vm, '#children', 2, func = _children, atype = int)
                        self.instance_oid += 1

            for vm in VMachineList.get_vmachines():
                _guids.add(vm.guid)
                if not self._check_added(vm):
                    if not vm.is_vtemplate:
                        self._register_dal_model(0, vm, 'guid', "0")
                        self._register_dal_model(0, vm, 'name', "1")
                        self._register_dal_model(0, vm, 'statistics', "2.0", key = "operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.2", key = "data_read", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.6", key = "write_operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.8", key = "read_operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.10", key = "backend_write_operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.11", key = "backend_data_read", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.12", key = "cache_hits", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.16", key = "backend_data_written", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.17", key = "data_read_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.18", key = "read_operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.20", key = "data_written_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.23", key = "timestamp", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.26", key = "backend_read_operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.27", key = "data_written", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.30", key = "operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.33", key = "write_operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.34", key = "data_transferred", atype = int)
                        self._register_dal_model(0, vm, 'stored_data', "3", atype = int)
                        self._register_dal_model(0, vm, 'description', "4")
                        self._register_dal_model(0, vm, 'devicename', "5")
                        self._register_dal_model(0, vm, 'dtl_mode', "6")
                        self._register_dal_model(0, vm, 'hypervisorid', "7")
                        self._register_dal_model(0, vm, 'ip', "8")
                        self._register_dal_model(0, vm, 'status', "10")
                        self._register_dal_model(0, vm, 'stored_data', "10", atype = int)
                        self._register_dal_model(0, vm, 'snapshots', "11", atype = int)
                        self._register_dal_model(0, vm, 'vdisks', "12", atype = int)
                        self._register_dal_model(0, vm, 'DTL', '13',
                                                 func = lambda vm: 'DEGRADED' if all(item == 'DEGRADED' for item in [vd.info['failover_mode'] for vd in vm.vdisks]) else 'OK')
                    self.instance_oid += 1

            for vd in VDiskList.get_vdisks():
                _guids.add(vd.guid)
                if not self._check_added(vd):
                    self._register_dal_model(1, vd, 'guid', "0")
                    self._register_dal_model(1, vd, 'name', "1")
                    self._register_dal_model(1, vd, 'statistics', "2.0", key = "operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.1", key = "data_written_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.2", key = "data_read", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.6", key = "write_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.8", key = "read_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.10", key = "backend_write_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.11", key = "backend_data_read", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.12", key = "cache_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.16", key = "backend_data_written", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.17", key = "data_read_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.18", key = "read_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.20", key = "cluster_cache_misses_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.23", key = "timestamp", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.26", key = "backend_read_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.27", key = "data_written", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.30", key = "operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.33", key = "write_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.34", key = "data_transferred", atype = int)
                    self._register_dal_model(1, vd, 'info', "3", key = 'stored', atype = int)
                    self._register_dal_model(1, vd, 'info', "4", key = 'failover_mode', atype = int)
                    self._register_dal_model(1, vd, 'snapshots', "5", atype = int)
                    self.instance_oid += 1

            for pm in PMachineList.get_pmachines():
                _guids.add(pm.guid)
                if not self._check_added(pm):
                    self._register_dal_model(2, pm, 'guid', "0")
                    self._register_dal_model(2, pm, 'name', "1")
                    self._register_dal_model(2, pm, 'host_status', "2")
                    self.instance_oid += 1

            for vp in VPoolList.get_vpools():
                _guids.add(vp.guid)
                if not self._check_added(vp):
                    self._register_dal_model(3, vp, 'guid', "0")
                    self._register_dal_model(3, vp, 'name', "1")
                    self._register_dal_model(3, vp, 'statistics', "2.0", key = "operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.2", key = "data_read", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.6", key = "write_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.8", key = "read_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.10", key = "backend_write_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.11", key = "backend_data_read", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.12", key = "cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.16", key = "backend_data_written", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.17", key = "data_read_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.18", key = "read_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.20", key = "data_written_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.23", key = "timestamp", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.26", key = "backend_read_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.27", key = "data_written", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.30", key = "operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.33", key = "write_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.34", key = "data_transferred", atype = int)
                    self._register_dal_model(3, vp, 'status', "3")
                    self._register_dal_model(3, vp, 'description', "4")
                    self._register_dal_model(3, vp, 'vdisks', "5", atype = int)
                    self._register_dal_model(3, vp, '#vmachines', "6",
                                             func = lambda vp: len(set([vd.vmachine.guid for vd in vp.vdisks])),
                                             atype = int)
                    self.instance_oid += 1

            for storagedriver in StorageDriverList.get_storagedrivers():
                _guids.add(storagedriver.guid)
                if not self._check_added(storagedriver):
                    self._register_dal_model(4, storagedriver, 'guid', "0")
                    self._register_dal_model(4, storagedriver, 'name', "1")
                    self._register_dal_model(4, storagedriver, 'stored_data', "2", atype = int)
                    self.instance_oid += 1

            try:
                # try to load OVS Backends
                from ovs.dal.lists.albabackendlist import AlbaBackendList
                for backend in AlbaBackendList.get_albabackends():
                    _guids.add(backend.guid)
                    if not self._check_added(backend):
                        self._register_dal_model(5, backend, 'guid', 0)
                        self._register_dal_model(5, backend, 'name', 1)
                        for disk_id in range(len((backend.all_disks))):
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.0'.format(disk_id), key = "name", index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.1'.format(disk_id), key = "usage.size", atype = long, index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.2'.format(disk_id), key = "usage.used", atype = long, index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.3'.format(disk_id), key = "usage.available", atype = long, index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.4'.format(disk_id), key = "state.state", index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.5'.format(disk_id), key = "node_id", index=disk_id)

                        self.instance_oid += 1
            except ImportError:
                print('OVS Backend not present')
                pass
            reload = False
            for object_guid in list(self.model_oids):
                if object_guid not in _guids:
                    self.model_oids.remove(object_guid)
                    reload = True
            if reload:
                self._reload_snmp()
Example #58
0
 def sync_with_reality(storagerouter_guid=None):
     """
     Syncs the Disks from all StorageRouters with the reality.
     """
     storagerouters = []
     if storagerouter_guid is not None:
         storagerouters.append(StorageRouter(storagerouter_guid))
     else:
         storagerouters = StorageRouterList.get_storagerouters()
     for storagerouter in storagerouters:
         try:
             client = SSHClient(storagerouter, username='******')
         except UnableToConnectException:
             logger.info('Could not connect to StorageRouter {0}, skipping'.format(storagerouter.ip))
             continue
         configuration = {}
         # Gather mount data
         mount_mapping = {}
         mount_data = client.run('mount')
         for mount in mount_data.splitlines():
             mount = mount.strip()
             match = re.search('/dev/(.+?) on (/.*?) type.*', mount)
             if match is not None:
                 mount_mapping[match.groups()[0]] = match.groups()[1]
         # Gather disk information
         with Remote(storagerouter.ip, [Context, os]) as remote:
             context = remote.Context()
             devices = [device for device in context.list_devices(subsystem='block')
                        if 'ID_TYPE' in device and device['ID_TYPE'] == 'disk']
             for device in devices:
                 is_partition = device['DEVTYPE'] == 'partition'
                 device_path = device['DEVNAME']
                 device_name = device_path.split('/')[-1]
                 partition_id = None
                 partition_name = None
                 extended_parition_info = None
                 if is_partition is True:
                     if 'ID_PART_ENTRY_NUMBER' in device:
                         extended_parition_info = True
                         partition_id = device['ID_PART_ENTRY_NUMBER']
                         partition_name = device_name
                         device_name = device_name[:0 - int(len(partition_id))]
                     else:
                         logger.debug('Partition {0} has no partition metadata'.format(device_path))
                         extended_parition_info = False
                         match = re.match('^(\D+?)(\d+)$', device_name)
                         if match is None:
                             logger.debug('Could not handle disk/partition {0}'.format(device_path))
                             continue  # Unable to handle this disk/partition
                         partition_name = device_name
                         partition_id = match.groups()[1]
                         device_name = match.groups()[0]
                 if device_name not in configuration:
                     configuration[device_name] = {'partitions': {}}
                 path = None
                 for path_type in ['by-id', 'by-uuid']:
                     if path is not None:
                         break
                     for item in device['DEVLINKS'].split(' '):
                         if path_type in item:
                             path = item
                 if path is None:
                     path = device_path
                 sectors = int(client.run('cat /sys/block/{0}/size'.format(device_name)))
                 sector_size = int(client.run('cat /sys/block/{0}/queue/hw_sector_size'.format(device_name)))
                 rotational = int(client.run('cat /sys/block/{0}/queue/rotational'.format(device_name)))
                 if is_partition is True:
                     if 'ID_PART_ENTRY_TYPE' in device and device['ID_PART_ENTRY_TYPE'] == '0x5':
                         continue  # This is an extended partition, let's skip that one
                     if extended_parition_info is True:
                         offset = int(device['ID_PART_ENTRY_OFFSET']) * sector_size
                         size = int(device['ID_PART_ENTRY_SIZE']) * sector_size
                     else:
                         match = re.match('^(\D+?)(\d+)$', device_path)
                         if match is None:
                             logger.debug('Could not handle disk/partition {0}'.format(device_path))
                             continue  # Unable to handle this disk/partition
                         fdisk_info = client.run('fdisk -l {0} | grep {1}'.format(match.groups()[0], device_path)).strip()
                         fdisk_data = filter(None, fdisk_info.split(' '))
                         offset = int(fdisk_data[1]) * sector_size
                         size = (int(fdisk_data[2]) - int(fdisk_data[1])) * sector_size
                     configuration[device_name]['partitions'][partition_id] = {'offset': offset,
                                                                               'size': size,
                                                                               'path': path,
                                                                               'state': 'OK'}
                     partition_data = configuration[device_name]['partitions'][partition_id]
                     if partition_name in mount_mapping:
                         mountpoint = mount_mapping[partition_name]
                         partition_data['mountpoint'] = mountpoint
                         partition_data['inode'] = remote.os.stat(mountpoint).st_dev
                         del mount_mapping[partition_name]
                         try:
                             client.run('touch {0}/{1}; rm {0}/{1}'.format(mountpoint, str(time.time())))
                         except CalledProcessError:
                             partition_data['state'] = 'FAILURE'
                             pass
                     if 'ID_FS_TYPE' in device:
                         partition_data['filesystem'] = device['ID_FS_TYPE']
                 else:
                     configuration[device_name].update({'name': device_name,
                                                        'path': path,
                                                        'vendor': device['ID_VENDOR'] if 'ID_VENDOR' in device else None,
                                                        'model': device['ID_MODEL'] if 'ID_MODEL' in device else None,
                                                        'size': sector_size * sectors,
                                                        'is_ssd': rotational == 0,
                                                        'state': 'OK'})
                 for partition_name in mount_mapping:
                     device_name = partition_name.split('/')[-1]
                     match = re.search('^(\D+?)(\d+)$', device_name)
                     if match is not None:
                         device_name = match.groups()[0]
                         partition_id = match.groups()[1]
                         if device_name not in configuration:
                             configuration[device_name] = {'partitions': {},
                                                           'state': 'MISSING'}
                         configuration[device_name]['partitions'][partition_id] = {'mountpoint': mount_mapping[partition_name],
                                                                                   'state': 'MISSING'}
         # Sync the model
         disk_names = []
         for disk in storagerouter.disks:
             if disk.name not in configuration:
                 for partition in disk.partitions:
                     partition.delete()
                 disk.delete()
             else:
                 disk_names.append(disk.name)
                 DiskController._update_disk(disk, configuration[disk.name])
                 partitions = []
                 partition_info = configuration[disk.name]['partitions']
                 for partition in disk.partitions:
                     if partition.id not in partition_info:
                         partition.delete()
                     else:
                         partitions.append(partition.id)
                         DiskController._update_partition(partition, partition_info[partition.id])
                 for partition_id in partition_info:
                     if partition_id not in partitions:
                         DiskController._create_partition(partition_id, partition_info[partition_id], disk)
         for disk_name in configuration:
             if disk_name not in disk_names and configuration[disk_name]['state'] not in ['MISSING']:
                 disk = Disk()
                 disk.storagerouter = storagerouter
                 disk.name = disk_name
                 DiskController._update_disk(disk, configuration[disk_name])
                 partition_info = configuration[disk_name]['partitions']
                 for partition_id in partition_info:
                     if partition_info[partition_id]['state'] not in ['MISSING']:
                         DiskController._create_partition(partition_id, partition_info[partition_id], disk)