コード例 #1
0
    def get_unused_arakoon_metadata_and_claim(cluster_type, locked=True):
        """
        Retrieve arakoon cluster information based on its type
        :param cluster_type: Type of arakoon cluster (See ServiceType.ARAKOON_CLUSTER_TYPES)
        :type cluster_type: str

        :param locked: Execute this in a locked context
        :type locked: bool

        :return: List of ArakoonClusterMetadata objects
        :rtype: ArakoonClusterMetadata
        """
        cluster_type = cluster_type.upper()
        if cluster_type not in ServiceType.ARAKOON_CLUSTER_TYPES:
            raise ValueError('Unsupported arakoon cluster type provided. Please choose from {0}'.format(', '.join(ServiceType.ARAKOON_CLUSTER_TYPES)))
        if not EtcdConfiguration.dir_exists('/ovs/arakoon'):
            return None

        mutex = volatile_mutex('claim_arakoon_metadata', wait=10)
        try:
            if locked is True:
                mutex.acquire()

            for cluster_name in EtcdConfiguration.list('/ovs/arakoon'):
                metadata = ArakoonClusterMetadata(cluster_id=cluster_name)
                metadata.load_metadata()
                if metadata.cluster_type == cluster_type and metadata.in_use is False and metadata.internal is False:
                    metadata.claim()
                    return metadata
        finally:
            if locked is True:
                mutex.release()
コード例 #2
0
 def _configure_arakoon_to_volumedriver():
     print 'Update existing vPools'
     logger.info('Update existing vPools')
     config = ArakoonClusterConfig('voldrv')
     config.load_config()
     arakoon_nodes = []
     for node in config.nodes:
         arakoon_nodes.append({
             'host': node.ip,
             'port': node.client_port,
             'node_id': node.name
         })
     if EtcdConfiguration.dir_exists('/ovs/vpools'):
         for vpool_guid in EtcdConfiguration.list('/ovs/vpools'):
             for storagedriver_id in EtcdConfiguration.list(
                     '/ovs/vpools/{0}/hosts'.format(vpool_guid)):
                 storagedriver_config = StorageDriverConfiguration(
                     'storagedriver', vpool_guid, storagedriver_id)
                 storagedriver_config.load()
                 storagedriver_config.configure_volume_registry(
                     vregistry_arakoon_cluster_id='voldrv',
                     vregistry_arakoon_cluster_nodes=arakoon_nodes)
                 storagedriver_config.configure_distributed_lock_store(
                     dls_type='Arakoon',
                     dls_arakoon_cluster_id='voldrv',
                     dls_arakoon_cluster_nodes=arakoon_nodes)
                 storagedriver_config.save(reload_config=True)
コード例 #3
0
    def get_unused_arakoon_metadata_and_claim(cluster_type, locked=True):
        """
        Retrieve arakoon cluster information based on its type
        :param cluster_type: Type of arakoon cluster (See ServiceType.ARAKOON_CLUSTER_TYPES)
        :type cluster_type: str

        :param locked: Execute this in a locked context
        :type locked: bool

        :return: List of ArakoonClusterMetadata objects
        :rtype: ArakoonClusterMetadata
        """
        cluster_type = cluster_type.upper()
        if cluster_type not in ServiceType.ARAKOON_CLUSTER_TYPES:
            raise ValueError('Unsupported arakoon cluster type provided. Please choose from {0}'.format(', '.join(ServiceType.ARAKOON_CLUSTER_TYPES)))
        if not EtcdConfiguration.dir_exists('/ovs/arakoon'):
            return None

        mutex = volatile_mutex('claim_arakoon_metadata', wait=10)
        try:
            if locked is True:
                mutex.acquire()

            for cluster_name in EtcdConfiguration.list('/ovs/arakoon'):
                metadata = ArakoonClusterMetadata(cluster_id=cluster_name)
                metadata.load_metadata()
                if metadata.cluster_type == cluster_type and metadata.in_use is False and metadata.internal is False:
                    metadata.claim()
                    return metadata
        finally:
            if locked is True:
                mutex.release()
コード例 #4
0
 def _get_free_ports(client):
     node_name = System.get_my_machine_id(client)
     clusters = []
     exclude_ports = []
     if EtcdConfiguration.dir_exists(ArakoonInstaller.ETCD_CONFIG_ROOT):
         for cluster_name in EtcdConfiguration.list(
                 ArakoonInstaller.ETCD_CONFIG_ROOT):
             try:
                 config = ArakoonClusterConfig(cluster_name)
                 config.load_config()
                 for node in config.nodes:
                     if node.name == node_name:
                         clusters.append(cluster_name)
                         exclude_ports.append(node.client_port)
                         exclude_ports.append(node.messaging_port)
             except:
                 logger.error(
                     '  Could not load port information of cluster {0}'.
                     format(cluster_name))
     ports = System.get_free_ports(
         EtcdConfiguration.get(
             '/ovs/framework/hosts/{0}/ports|arakoon'.format(node_name)),
         exclude_ports, 2, client)
     logger.debug(
         '  Loaded free ports {0} based on existing clusters {1}'.format(
             ports, clusters))
     return ports
コード例 #5
0
    def list(self, discover=False, ip=None, node_id=None):
        """
        Lists all available ALBA Nodes
        :param discover: If True and IP provided, return list of single ALBA node, If True and no IP provided, return all ALBA nodes else return modeled ALBA nodes
        :param ip: IP of ALBA node to retrieve
        :param node_id: ID of the ALBA node
        """
        if discover is False and (ip is not None or node_id is not None):
            raise RuntimeError('Discover is mutually exclusive with IP and nodeID')
        if (ip is None and node_id is not None) or (ip is not None and node_id is None):
            raise RuntimeError('Both IP and nodeID need to be specified')

        if discover is False:
            return AlbaNodeList.get_albanodes()

        if ip is not None:
            node = AlbaNode(volatile=True)
            node.ip = ip
            node.type = 'ASD'
            node.node_id = node_id
            node.port = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|port'.format(node_id))
            node.username = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|username'.format(node_id))
            node.password = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|password'.format(node_id))
            data = node.client.get_metadata()
            if data['_success'] is False and data['_error'] == 'Invalid credentials':
                raise RuntimeError('Invalid credentials')
            if data['node_id'] != node_id:
                raise RuntimeError('Unexpected node identifier. {0} vs {1}'.format(data['node_id'], node_id))
            node_list = DataList(AlbaNode, {})
            node_list._executed = True
            node_list._guids = [node.guid]
            node_list._objects = {node.guid: node}
            node_list._data = {node.guid: {'guid': node.guid, 'data': node._data}}
            return node_list

        nodes = {}
        model_node_ids = [node.node_id for node in AlbaNodeList.get_albanodes()]
        found_node_ids = []
        asd_node_ids = []
        if EtcdConfiguration.dir_exists('/ovs/alba/asdnodes'):
            asd_node_ids = EtcdConfiguration.list('/ovs/alba/asdnodes')

        for node_id in asd_node_ids:
            node = AlbaNode(volatile=True)
            node.type = 'ASD'
            node.node_id = node_id
            node.ip = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|ip'.format(node_id))
            node.port = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|port'.format(node_id))
            node.username = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|username'.format(node_id))
            node.password = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|password'.format(node_id))
            if node.node_id not in model_node_ids and node.node_id not in found_node_ids:
                nodes[node.guid] = node
                found_node_ids.append(node.node_id)
        node_list = DataList(AlbaNode, {})
        node_list._executed = True
        node_list._guids = nodes.keys()
        node_list._objects = nodes
        node_list._data = dict([(node.guid, {'guid': node.guid, 'data': node._data}) for node in nodes.values()])
        return node_list
コード例 #6
0
 def load(self):
     """
     Loads the configuration from a given file, optionally a remote one
     """
     self.configuration = {}
     if EtcdConfiguration.dir_exists(self.path.format('')):
         self.is_new = False
         for key in self.params[self.config_type]:
             if EtcdConfiguration.exists(self.path.format(key)):
                 self.configuration[key] = json.loads(EtcdConfiguration.get(self.path.format(key), raw=True))
     else:
         self._logger.debug('Could not find config {0}, a new one will be created'.format(self.path.format('')))
     self.dirty_entries = []
コード例 #7
0
 def load(self):
     """
     Loads the configuration from a given file, optionally a remote one
     """
     self.configuration = {}
     if EtcdConfiguration.dir_exists(self.path.format('')):
         self.is_new = False
         for key in self.params[self.config_type]:
             if EtcdConfiguration.exists(self.path.format(key)):
                 self.configuration[key] = json.loads(
                     EtcdConfiguration.get(self.path.format(key), raw=True))
     else:
         self._logger.debug(
             'Could not find config {0}, a new one will be created'.format(
                 self.path.format('')))
     self.dirty_entries = []
コード例 #8
0
 def _get_free_ports(client):
     node_name = System.get_my_machine_id(client)
     clusters = []
     exclude_ports = []
     if EtcdConfiguration.dir_exists(ArakoonInstaller.ETCD_CONFIG_ROOT):
         for cluster_name in EtcdConfiguration.list(ArakoonInstaller.ETCD_CONFIG_ROOT):
             try:
                 config = ArakoonClusterConfig(cluster_name)
                 config.load_config()
                 for node in config.nodes:
                     if node.name == node_name:
                         clusters.append(cluster_name)
                         exclude_ports.append(node.client_port)
                         exclude_ports.append(node.messaging_port)
             except:
                 logger.error('  Could not load port information of cluster {0}'.format(cluster_name))
     ports = System.get_free_ports(EtcdConfiguration.get('/ovs/framework/hosts/{0}/ports|arakoon'.format(node_name)), exclude_ports, 2, client)
     logger.debug('  Loaded free ports {0} based on existing clusters {1}'.format(ports, clusters))
     return ports
コード例 #9
0
 def _configure_arakoon_to_volumedriver(cluster_name):
     print 'Update existing vPools'
     StorageDriverController._logger.info('Update existing vPools')
     config = ArakoonClusterConfig(cluster_name)
     config.load_config()
     arakoon_nodes = []
     for node in config.nodes:
         arakoon_nodes.append({'host': node.ip,
                               'port': node.client_port,
                               'node_id': node.name})
     if EtcdConfiguration.dir_exists('/ovs/vpools'):
         for vpool_guid in EtcdConfiguration.list('/ovs/vpools'):
             for storagedriver_id in EtcdConfiguration.list('/ovs/vpools/{0}/hosts'.format(vpool_guid)):
                 storagedriver_config = StorageDriverConfiguration('storagedriver', vpool_guid, storagedriver_id)
                 storagedriver_config.load()
                 storagedriver_config.configure_volume_registry(vregistry_arakoon_cluster_id=cluster_name,
                                                                vregistry_arakoon_cluster_nodes=arakoon_nodes)
                 storagedriver_config.configure_distributed_lock_store(dls_type='Arakoon',
                                                                       dls_arakoon_cluster_id=cluster_name,
                                                                       dls_arakoon_cluster_nodes=arakoon_nodes)
                 storagedriver_config.save(reload_config=True)
コード例 #10
0
    def model_local_albanode(**kwargs):
        """
        Add all ALBA nodes known to etcd to the model
        :param kwargs: Kwargs containing information regarding the node
        :type kwargs: dict

        :return: None
        """
        _ = kwargs
        if EtcdConfiguration.dir_exists('/ovs/alba/asdnodes'):
            for node_id in EtcdConfiguration.list('/ovs/alba/asdnodes'):
                node = AlbaNodeList.get_albanode_by_node_id(node_id)
                if node is None:
                    node = AlbaNode()
                main_config = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main'.format(node_id))
                node.type = 'ASD'
                node.node_id = node_id
                node.ip = main_config['ip']
                node.port = main_config['port']
                node.username = main_config['username']
                node.password = main_config['password']
                node.storagerouter = StorageRouterList.get_by_ip(main_config['ip'])
                node.save()
コード例 #11
0
    def create_cluster(cluster_name, cluster_type, ip, base_dir, plugins=None, locked=True, internal=True, claim=False):
        """
        Always creates a cluster but marks it's usage according to the internal flag
        :param cluster_name: Name of the cluster
        :type cluster_name: str

        :param cluster_type: Type of the cluster (See ServiceType.ARAKOON_CLUSTER_TYPES)
        :type cluster_type: str

        :param ip: IP address of the first node of the new cluster
        :type ip: str

        :param base_dir: Base directory that should contain the data and tlogs
        :type base_dir: str

        :param plugins: Plugins that should be added to the configuration file
        :type plugins: list

        :param locked: Indicates whether the create should run in a locked context (e.g. to prevent port conflicts)
        :type locked: bool

        :param internal: Is cluster internally managed by OVS
        :type internal: bool

        :param claim: Claim the cluster right away
        :type claim: bool

        :return: Ports used by arakoon cluster
        :rtype: dict
        """
        if cluster_type not in ServiceType.ARAKOON_CLUSTER_TYPES:
            raise ValueError('Cluster type {0} is not supported. Please choose from {1}'.format(cluster_type, ', '.join(ServiceType.ARAKOON_CLUSTER_TYPES)))

        if EtcdConfiguration.dir_exists('/ovs/arakoon/{0}'.format(cluster_name)):
            raise ValueError('An Arakoon cluster with name "{0}" already exists'.format(cluster_name))

        ArakoonInstaller._logger.debug('Creating cluster {0} on {1}'.format(cluster_name, ip))
        base_dir = base_dir.rstrip('/')

        client = SSHClient(ip, username=ArakoonInstaller.SSHCLIENT_USER)
        if ArakoonInstaller.is_running(cluster_name, client):
            ArakoonInstaller._logger.info('Arakoon service running for cluster {0}'.format(cluster_name))
            config = ArakoonClusterConfig(cluster_name, plugins)
            config.load_config()
            for node in config.nodes:
                if node.ip == ip:
                    return {'client_port': node.client_port,
                            'messaging_port': node.messaging_port}

        node_name = System.get_my_machine_id(client)

        home_dir = ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name)
        tlog_dir = ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name)
        ArakoonInstaller.clean_leftover_arakoon_data(ip, [home_dir, tlog_dir])

        port_mutex = None
        try:
            if locked is True:
                from ovs.extensions.generic.volatilemutex import volatile_mutex
                port_mutex = volatile_mutex('arakoon_install_ports_{0}'.format(ip))
                port_mutex.acquire(wait=60)
            ports = ArakoonInstaller._get_free_ports(client)
            config = ArakoonClusterConfig(cluster_name, plugins)
            config.nodes.append(ArakoonNodeConfig(name=node_name,
                                                  ip=ip,
                                                  client_port=ports[0],
                                                  messaging_port=ports[1],
                                                  log_sinks=LogHandler.get_sink_path('arakoon_server'),
                                                  crash_log_sinks=LogHandler.get_sink_path('arakoon_server_crash'),
                                                  home=home_dir,
                                                  tlog_dir=tlog_dir))
            ArakoonInstaller._deploy(config)

            metadata = ArakoonClusterMetadata(cluster_id=cluster_name)
            metadata.internal = internal
            metadata.cluster_type = cluster_type.upper()
            metadata.write()
            if claim is True:
                metadata.claim()
        finally:
            if port_mutex is not None:
                port_mutex.release()

        ArakoonInstaller._logger.debug('Creating cluster {0} on {1} completed'.format(cluster_name, ip))
        return {'metadata': metadata,
                'client_port': ports[0],
                'messaging_port': ports[1]}
コード例 #12
0
    def create_cluster(cluster_name, cluster_type, ip, base_dir, plugins=None, locked=True, internal=True, claim=False):
        """
        Always creates a cluster but marks it's usage according to the internal flag
        :param cluster_name: Name of the cluster
        :type cluster_name: str

        :param cluster_type: Type of the cluster (See ServiceType.ARAKOON_CLUSTER_TYPES)
        :type cluster_type: str

        :param ip: IP address of the first node of the new cluster
        :type ip: str

        :param base_dir: Base directory that should contain the data and tlogs
        :type base_dir: str

        :param plugins: Plugins that should be added to the configuration file
        :type plugins: list

        :param locked: Indicates whether the create should run in a locked context (e.g. to prevent port conflicts)
        :type locked: bool

        :param internal: Is cluster internally managed by OVS
        :type internal: bool

        :param claim: Claim the cluster right away
        :type claim: bool

        :return: Ports used by arakoon cluster
        :rtype: dict
        """
        if cluster_type not in ServiceType.ARAKOON_CLUSTER_TYPES:
            raise ValueError('Cluster type {0} is not supported. Please choose from {1}'.format(cluster_type, ', '.join(ServiceType.ARAKOON_CLUSTER_TYPES)))

        if EtcdConfiguration.dir_exists('/ovs/arakoon/{0}'.format(cluster_name)):
            raise ValueError('An Arakoon cluster with name "{0}" already exists'.format(cluster_name))

        ArakoonInstaller._logger.debug('Creating cluster {0} on {1}'.format(cluster_name, ip))
        base_dir = base_dir.rstrip('/')

        client = SSHClient(ip, username=ArakoonInstaller.SSHCLIENT_USER)
        if ArakoonInstaller.is_running(cluster_name, client):
            ArakoonInstaller._logger.info('Arakoon service running for cluster {0}'.format(cluster_name))
            config = ArakoonClusterConfig(cluster_name, plugins)
            config.load_config()
            for node in config.nodes:
                if node.ip == ip:
                    return {'client_port': node.client_port,
                            'messaging_port': node.messaging_port}

        node_name = System.get_my_machine_id(client)

        home_dir = ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name)
        log_dir = ArakoonInstaller.ARAKOON_LOG_DIR.format(cluster_name)
        tlog_dir = ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name)
        ArakoonInstaller.clean_leftover_arakoon_data(ip, {log_dir: True,
                                                          home_dir: False,
                                                          tlog_dir: False})

        port_mutex = None
        try:
            if locked is True:
                from ovs.extensions.generic.volatilemutex import volatile_mutex
                port_mutex = volatile_mutex('arakoon_install_ports_{0}'.format(ip))
                port_mutex.acquire(wait=60)
            ports = ArakoonInstaller._get_free_ports(client)
            config = ArakoonClusterConfig(cluster_name, plugins)
            config.nodes.append(ArakoonNodeConfig(name=node_name,
                                                  ip=ip,
                                                  client_port=ports[0],
                                                  messaging_port=ports[1],
                                                  log_dir=log_dir,
                                                  home=home_dir,
                                                  tlog_dir=tlog_dir))
            ArakoonInstaller._deploy(config)

            metadata = ArakoonClusterMetadata(cluster_id=cluster_name)
            metadata.internal = internal
            metadata.cluster_type = cluster_type.upper()
            metadata.write()
            if claim is True:
                metadata.claim()
        finally:
            if port_mutex is not None:
                port_mutex.release()

        ArakoonInstaller._logger.debug('Creating cluster {0} on {1} completed'.format(cluster_name, ip))
        return {'metadata': metadata,
                'client_port': ports[0],
                'messaging_port': ports[1]}