コード例 #1
0
 def save(self, client=None, reload_config=True):
     """
     Saves the configuration to a given file, optionally a remote one
     :param client: If provided, save remote configuration
     :param reload_config: Reload the running Storage Driver configuration
     """
     self._validate()
     contents = json.dumps(self.configuration, indent=4)
     EtcdConfiguration.set(self.path, contents, raw=True)
     if self.config_type == 'storagedriver' and reload_config is True:
         if len(self.dirty_entries) > 0:
             if client is None:
                 logger.info('Applying local storagedriver configuration changes')
                 changes = LSRClient(self.remote_path).update_configuration(self.remote_path)
             else:
                 logger.info('Applying storagedriver configuration changes on {0}'.format(client.ip))
                 with Remote(client.ip, [LSRClient]) as remote:
                     changes = copy.deepcopy(remote.LocalStorageRouterClient(self.remote_path).update_configuration(self.remote_path))
             for change in changes:
                 if change['param_name'] not in self.dirty_entries:
                     raise RuntimeError('Unexpected configuration change: {0}'.format(change['param_name']))
                 logger.info('Changed {0} from "{1}" to "{2}"'.format(change['param_name'], change['old_value'], change['new_value']))
                 self.dirty_entries.remove(change['param_name'])
             logger.info('Changes applied')
             if len(self.dirty_entries) > 0:
                 logger.warning('Following changes were not applied: {0}'.format(', '.join(self.dirty_entries)))
         else:
             logger.debug('No need to apply changes, nothing changed')
     self.is_new = False
     self.dirty_entries = []
コード例 #2
0
    def register(node_id):
        """
        Adds a Node with a given node_id to the model
        :param node_id: ID of the ALBA node
        :type node_id: str

        :return: None
        """
        node = AlbaNodeList.get_albanode_by_node_id(node_id)
        if node is None:
            main_config = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main'.format(node_id))
            node = AlbaNode()
            node.ip = main_config['ip']
            node.port = main_config['port']
            node.username = main_config['username']
            node.password = main_config['password']
            node.storagerouter = StorageRouterList.get_by_ip(main_config['ip'])
        data = node.client.get_metadata()
        if data['_success'] is False and data['_error'] == 'Invalid credentials':
            raise RuntimeError('Invalid credentials')
        if data['node_id'] != node_id:
            AlbaNodeController._logger.error('Unexpected node_id: {0} vs {1}'.format(data['node_id'], node_id))
            raise RuntimeError('Unexpected node identifier')
        node.node_id = node_id
        node.type = 'ASD'
        node.save()

        # increase maintenance agents count for all nodes by 1
        for backend in AlbaBackendList.get_albabackends():
            nr_of_agents_key = AlbaNodeController.NR_OF_AGENTS_ETCD_TEMPLATE.format(backend.guid)
            if EtcdConfiguration.exists(nr_of_agents_key):
                EtcdConfiguration.set(nr_of_agents_key, int(EtcdConfiguration.get(nr_of_agents_key) + 1))
            else:
                EtcdConfiguration.set(nr_of_agents_key, 1)
        AlbaNodeController.checkup_maintenance_agents()
コード例 #3
0
 def register(name, email, company, phone, newsletter):
     """
     Registers the environment
     """
     SupportAgent().run()  # Execute a single heartbeat run
     client = OVSClient('monitoring.openvstorage.com',
                        443,
                        credentials=None,
                        verify=True,
                        version=1)
     task_id = client.post(
         '/support/register/',
         data={
             'cluster_id':
             EtcdConfiguration.get('/ovs/framework/cluster_id'),
             'name': name,
             'email': email,
             'company': company,
             'phone': phone,
             'newsletter': newsletter,
             'register_only': True
         })
     if task_id:
         client.wait_for_task(task_id, timeout=120)
     EtcdConfiguration.set('/ovs/framework/registered', True)
コード例 #4
0
    def migrate(master_ips=None, extra_ips=None):
        """
        Executes all migrations. It keeps track of an internal "migration version" which is always increasing by one
        :param master_ips: IP addresses of the MASTER nodes
        :param extra_ips: IP addresses of the EXTRA nodes
        """

        data = EtcdConfiguration.get('/ovs/framework/versions') if EtcdConfiguration.exists('/ovs/framework/versions') else {}
        migrators = []
        path = os.path.join(os.path.dirname(__file__), 'migration')
        for filename in os.listdir(path):
            if os.path.isfile(os.path.join(path, filename)) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                module = imp.load_source(name, os.path.join(path, filename))
                for member in inspect.getmembers(module):
                    if inspect.isclass(member[1]) and member[1].__module__ == name and 'object' in [base.__name__ for base in member[1].__bases__]:
                        migrators.append((member[1].identifier, member[1].migrate))

        end_version = 0
        for identifier, method in migrators:
            base_version = data[identifier] if identifier in data else 0
            version = method(base_version, master_ips, extra_ips)
            if version > end_version:
                end_version = version
            data[identifier] = end_version

        EtcdConfiguration.set('/ovs/framework/versions', data)
コード例 #5
0
    def migrate(master_ips=None, extra_ips=None):
        """
        Executes all migrations. It keeps track of an internal "migration version" which is always increasing by one
        :param master_ips: IP addresses of the MASTER nodes
        :param extra_ips: IP addresses of the EXTRA nodes
        """

        data = EtcdConfiguration.get(
            '/ovs/framework/versions') if EtcdConfiguration.exists(
                '/ovs/framework/versions') else {}
        migrators = []
        path = os.path.join(os.path.dirname(__file__), 'migration')
        for filename in os.listdir(path):
            if os.path.isfile(os.path.join(
                    path, filename)) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                module = imp.load_source(name, os.path.join(path, filename))
                for member in inspect.getmembers(module):
                    if inspect.isclass(
                            member[1]
                    ) and member[1].__module__ == name and 'object' in [
                            base.__name__ for base in member[1].__bases__
                    ]:
                        migrators.append(
                            (member[1].identifier, member[1].migrate))

        end_version = 0
        for identifier, method in migrators:
            base_version = data[identifier] if identifier in data else 0
            version = method(base_version, master_ips, extra_ips)
            if version > end_version:
                end_version = version
            data[identifier] = end_version

        EtcdConfiguration.set('/ovs/framework/versions', data)
コード例 #6
0
 def get_path(binary_name):
     machine_id = System.get_my_machine_id()
     config_location = '/ovs/framework/hosts/{0}/paths|{1}'.format(machine_id, binary_name)
     path = EtcdConfiguration.get(config_location)
     if not path:
         try:
             path = check_output('which {0}'.format(binary_name), shell=True).strip()
             EtcdConfiguration.set(config_location, path)
         except CalledProcessError:
             return None
     return path
コード例 #7
0
    def write(self):
        """
        Write the metadata to Etcd
        :return: None
        """
        if self.cluster_type is None or self.cluster_type == '':
            raise ValueError('Cluster type must be defined before being able to store the cluster metadata information')

        etcd_key = ArakoonClusterMetadata.ETCD_METADATA_KEY.format(self.cluster_id)
        EtcdConfiguration.set(key=etcd_key, value={'type': self.cluster_type,
                                                   'in_use': self.in_use,
                                                   'internal': self.internal})
コード例 #8
0
    def write(self):
        """
        Write the metadata to Etcd
        :return: None
        """
        if self.cluster_type is None or self.cluster_type == '':
            raise ValueError('Cluster type must be defined before being able to store the cluster metadata information')

        etcd_key = ArakoonClusterMetadata.ETCD_METADATA_KEY.format(self.cluster_id)
        EtcdConfiguration.set(key=etcd_key, value={'type': self.cluster_type,
                                                   'in_use': self.in_use,
                                                   'internal': self.internal})
コード例 #9
0
    def ovs_3977_maintenance_agent_test():
        """
        Test maintenance agent processes
        """
        def _get_agent_distribution(agent_name):
            result = {}
            total = 0
            for ip in alba_node_ips:
                count = General.execute_command_on_node(ip, 'ls /etc/init/alba-maintenance_{0}-* | wc -l'.format(agent_name))
                if count:
                    count = int(count)
                else:
                    count = 0
                total += count
                result[ip] = count
            result['total'] = total

            print 'Maintenance agent distribution: {0}'.format(result)
            for ip in alba_node_ips:
                assert (result[ip] == total / len(alba_node_ips) or result[ip] == (total / len(alba_node_ips)) + 1),\
                    "Agents not equally distributed!"

            return result

        backend = GeneralBackend.get_by_name(TestALBA.backend_name)
        if backend is None:
            backend = GeneralAlba.add_alba_backend(TestALBA.backend_name).backend
        name = backend.alba_backend.name

        alba_node_ips = [node.ip for node in GeneralAlba.get_alba_nodes()]

        etcd_key = '/ovs/alba/backends/{0}/maintenance/nr_of_agents'.format(backend.alba_backend.guid)
        nr_of_agents = EtcdConfiguration.get(etcd_key)
        print '1. - nr of agents: {0}'.format(nr_of_agents)

        actual_nr_of_agents = _get_agent_distribution(name)['total']
        assert nr_of_agents == actual_nr_of_agents, \
            'Actual {0} and requested {1} nr of agents does not match'.format(nr_of_agents, actual_nr_of_agents)

        # set nr to zero
        EtcdConfiguration.set(etcd_key, 0)
        GeneralAlba.checkup_maintenance_agents()
        assert _get_agent_distribution(name)['total'] == 0, \
            'Actual {0} and requested {1} nr of agents does not match'.format(nr_of_agents, actual_nr_of_agents)
        print '2. - nr of agents: {0}'.format(nr_of_agents)

        # set nr to 10
        EtcdConfiguration.set(etcd_key, 10)
        GeneralAlba.checkup_maintenance_agents()
        assert _get_agent_distribution(name)['total'] == 10, \
            'Actual {0} and requested {1} nr of agents does not match'.format(nr_of_agents, actual_nr_of_agents)
        print '3. - nr of agents: {0}'.format(nr_of_agents)
コード例 #10
0
 def write_config(self):
     """
     Writes the configuration down to in the format expected by Arakoon
     """
     contents = RawConfigParser()
     data = self.export()
     for section in data:
         contents.add_section(section)
         for item in data[section]:
             contents.set(section, item, data[section][item])
     config_io = StringIO()
     contents.write(config_io)
     EtcdConfiguration.set(ArakoonClusterConfig.ETCD_CONFIG_KEY.format(self.cluster_id), config_io.getvalue(), raw=True)
コード例 #11
0
 def write_config(self):
     """
     Writes the configuration down to in the format expected by Arakoon
     """
     contents = RawConfigParser()
     data = self.export()
     for section in data:
         contents.add_section(section)
         for item in data[section]:
             contents.set(section, item, data[section][item])
     config_io = StringIO()
     contents.write(config_io)
     EtcdConfiguration.set(ArakoonClusterConfig.ETCD_CONFIG_KEY.format(self.cluster_id), config_io.getvalue(), raw=True)
コード例 #12
0
 def write_config(self):
     """
     Writes the configuration down to in the format expected by Arakoon
     """
     (temp_handle, temp_filename) = tempfile.mkstemp()
     contents = RawConfigParser()
     data = self.export()
     for section in data:
         contents.add_section(section)
         for item in data[section]:
             contents.set(section, item, data[section][item])
     with open(temp_filename, 'wb') as config_file:
         contents.write(config_file)
     with open(temp_filename, 'r') as the_file:
         EtcdConfiguration.set(ArakoonClusterConfig.ETCD_CONFIG_KEY.format(self.cluster_id), the_file.read(), raw=True)
     os.remove(temp_filename)
コード例 #13
0
ファイル: license.py プロジェクト: dawnpower/framework
 def register(name, email, company, phone, newsletter):
     """
     Registers the environment
     """
     SupportAgent().run()  # Execute a single heartbeat run
     client = OVSClient('monitoring.openvstorage.com', 443, credentials=None, verify=True, version=1)
     task_id = client.post('/support/register/',
                           data={'cluster_id': EtcdConfiguration.get('/ovs/framework/cluster_id'),
                                 'name': name,
                                 'email': email,
                                 'company': company,
                                 'phone': phone,
                                 'newsletter': newsletter,
                                 'register_only': True})
     if task_id:
         client.wait_for_task(task_id, timeout=120)
     EtcdConfiguration.set('/ovs/framework/registered', True)
コード例 #14
0
ファイル: ubuntu.py プロジェクト: dawnpower/framework
 def get_path(binary_name):
     """
     Retrieve the absolute path for binary
     :param binary_name: Binary to get path for
     :return: Path
     """
     machine_id = System.get_my_machine_id()
     config_location = '/ovs/framework/hosts/{0}/paths|{1}'.format(machine_id, binary_name)
     if not EtcdConfiguration.exists(config_location):
         try:
             path = check_output('which {0}'.format(binary_name), shell=True).strip()
             EtcdConfiguration.set(config_location, path)
         except CalledProcessError:
             return None
     else:
         path = EtcdConfiguration.get(config_location)
     return path
コード例 #15
0
ファイル: migrator.py プロジェクト: th3architect/framework
    def migrate(master_ips=None, extra_ips=None):
        """
        Executes all migrations. It keeps track of an internal "migration version" which is always increasing by one
        :param master_ips: IP addresses of the MASTER nodes
        :param extra_ips: IP addresses of the EXTRA nodes
        """
        machine_id = System.get_my_machine_id()
        key = '/ovs/framework/hosts/{0}/versions'.format(machine_id)
        try:
            data = EtcdConfiguration.get(key) if EtcdConfiguration.exists(
                key) else {}
        except EtcdConnectionFailed:
            import json  # Most likely 2.6 to 2.7 migration
            data = {}
            filename = '/opt/OpenvStorage/config/ovs.json'
            if os.path.exists(filename):
                with open(filename) as config_file:
                    data = json.load(config_file).get('core',
                                                      {}).get('versions', {})
        migrators = []
        path = '/'.join([os.path.dirname(__file__), 'migration'])
        for filename in os.listdir(path):
            if os.path.isfile('/'.join([path, filename
                                        ])) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                module = imp.load_source(name, '/'.join([path, filename]))
                for member in inspect.getmembers(module):
                    if inspect.isclass(
                            member[1]
                    ) and member[1].__module__ == name and 'object' in [
                            base.__name__ for base in member[1].__bases__
                    ]:
                        migrators.append(
                            (member[1].identifier, member[1].migrate))

        end_version = 0
        for identifier, method in migrators:
            base_version = data[identifier] if identifier in data else 0
            version = method(base_version, master_ips, extra_ips)
            if version > end_version:
                end_version = version
            data[identifier] = end_version

        EtcdConfiguration.set(key, data)
コード例 #16
0
 def save(self, client=None, reload_config=True):
     """
     Saves the configuration to a given file, optionally a remote one
     :param client: If provided, save remote configuration
     :param reload_config: Reload the running Storage Driver configuration
     """
     self._validate()
     for key in self.configuration:
         contents = json.dumps(self.configuration[key], indent=4)
         EtcdConfiguration.set(self.path.format(key), contents, raw=True)
     if self.config_type == 'storagedriver' and reload_config is True:
         if len(self.dirty_entries) > 0:
             if client is None:
                 self._logger.info(
                     'Applying local storagedriver configuration changes')
                 changes = LSRClient(self.remote_path).update_configuration(
                     self.remote_path)
             else:
                 self._logger.info(
                     'Applying storagedriver configuration changes on {0}'.
                     format(client.ip))
                 with remote(client.ip, [LSRClient]) as rem:
                     changes = copy.deepcopy(
                         rem.LocalStorageRouterClient(
                             self.remote_path).update_configuration(
                                 self.remote_path))
             for change in changes:
                 if change['param_name'] not in self.dirty_entries:
                     raise RuntimeError(
                         'Unexpected configuration change: {0}'.format(
                             change['param_name']))
                 self._logger.info('Changed {0} from "{1}" to "{2}"'.format(
                     change['param_name'], change['old_value'],
                     change['new_value']))
                 self.dirty_entries.remove(change['param_name'])
             self._logger.info('Changes applied')
             if len(self.dirty_entries) > 0:
                 self._logger.warning(
                     'Following changes were not applied: {0}'.format(
                         ', '.join(self.dirty_entries)))
         else:
             self._logger.debug('No need to apply changes, nothing changed')
     self.is_new = False
     self.dirty_entries = []
コード例 #17
0
 def write_config(self):
     """
     Writes the configuration down to in the format expected by Arakoon
     """
     (temp_handle, temp_filename) = tempfile.mkstemp()
     contents = RawConfigParser()
     data = self.export()
     for section in data:
         contents.add_section(section)
         for item in data[section]:
             contents.set(section, item, data[section][item])
     with open(temp_filename, 'wb') as config_file:
         contents.write(config_file)
     with open(temp_filename, 'r') as the_file:
         EtcdConfiguration.set(ArakoonClusterConfig.ETCD_CONFIG_KEY.format(
             self.cluster_id),
                               the_file.read(),
                               raw=True)
     os.remove(temp_filename)
コード例 #18
0
ファイル: ubuntu.py プロジェクト: tjdeadfish/openvstorage
 def get_path(binary_name):
     """
     Retrieve the absolute path for binary
     :param binary_name: Binary to get path for
     :return: Path
     """
     machine_id = System.get_my_machine_id()
     config_location = '/ovs/framework/hosts/{0}/paths|{1}'.format(
         machine_id, binary_name)
     if not EtcdConfiguration.exists(config_location):
         try:
             path = check_output('which {0}'.format(binary_name),
                                 shell=True).strip()
             EtcdConfiguration.set(config_location, path)
         except CalledProcessError:
             return None
     else:
         path = EtcdConfiguration.get(config_location)
     return path
コード例 #19
0
    def verify_namespaces():
        """
        Verify namespaces for all backends
        """
        logger.info('verify namespace task scheduling started')

        job_factor = 10
        job_factor_key = '/ovs/alba/backends/job_factor'
        if EtcdConfiguration.exists(job_factor_key):
            job_factor = EtcdConfiguration.get(job_factor_key)
        else:
            EtcdConfiguration.set(job_factor_key, job_factor)

        for albabackend in AlbaBackendList.get_albabackends():
            config = 'etcd://127.0.0.1:2379/ovs/arakoon/{0}-abm/config'.format(albabackend.backend.name)
            namespaces = AlbaCLI.run('list-namespaces', config=config, as_json=True)
            for namespace in namespaces:
                logger.info('verifying namespace: {0} scheduled ...'.format(namespace['name']))
                AlbaCLI.run('verify-namespace {0} --factor={1}'.format(namespace['name'], job_factor))

        logger.info('verify namespace task scheduling finished')
コード例 #20
0
    def verify_namespaces():
        """
        Verify namespaces for all backends
        """
        AlbaScheduledTaskController._logger.info('verify namespace task scheduling started')

        verification_factor = 10
        verification_factor_key = '/ovs/alba/backends/verification_factor'
        if EtcdConfiguration.exists(verification_factor_key):
            verification_factor = EtcdConfiguration.get(verification_factor_key)
        else:
            EtcdConfiguration.set(verification_factor_key, verification_factor)

        for albabackend in AlbaBackendList.get_albabackends():
            backend_name = albabackend.abm_services[0].service.name if albabackend.abm_services else albabackend.name + '-abm'
            config = 'etcd://127.0.0.1:2379/ovs/arakoon/{0}/config'.format(backend_name)
            namespaces = AlbaCLI.run('list-namespaces', config=config, as_json=True)
            for namespace in namespaces:
                AlbaScheduledTaskController._logger.info('verifying namespace: {0} scheduled ...'.format(namespace['name']))
                AlbaCLI.run('verify-namespace {0} --factor={1}'.format(namespace['name'], verification_factor))

        AlbaScheduledTaskController._logger.info('verify namespace task scheduling finished')
コード例 #21
0
ファイル: migrator.py プロジェクト: dawnpower/framework
    def migrate(master_ips=None, extra_ips=None):
        """
        Executes all migrations. It keeps track of an internal "migration version" which is always increasing by one
        :param master_ips: IP addresses of the MASTER nodes
        :param extra_ips: IP addresses of the EXTRA nodes
        """
        machine_id = System.get_my_machine_id()
        key = '/ovs/framework/hosts/{0}/versions'.format(machine_id)
        try:
            data = EtcdConfiguration.get(key) if EtcdConfiguration.exists(key) else {}
        except EtcdConnectionFailed:
            import json  # Most likely 2.6 to 2.7 migration
            data = {}
            filename = '/opt/OpenvStorage/config/ovs.json'
            if os.path.exists(filename):
                with open(filename) as config_file:
                    data = json.load(config_file).get('core', {}).get('versions', {})
        migrators = []
        path = os.path.join(os.path.dirname(__file__), 'migration')
        for filename in os.listdir(path):
            if os.path.isfile(os.path.join(path, filename)) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                module = imp.load_source(name, os.path.join(path, filename))
                for member in inspect.getmembers(module):
                    if inspect.isclass(member[1]) and member[1].__module__ == name and 'object' in [base.__name__ for base in member[1].__bases__]:
                        migrators.append((member[1].identifier, member[1].migrate))

        end_version = 0
        for identifier, method in migrators:
            base_version = data[identifier] if identifier in data else 0
            version = method(base_version, master_ips, extra_ips)
            if version > end_version:
                end_version = version
            data[identifier] = end_version

        EtcdConfiguration.set(key, data)
コード例 #22
0
    def checkup_maintenance_agents():
        """
        Check if requested nr of maintenance agents / backend is actually present
        Add / remove as necessary
        :return: None
        """
        service_template_key = 'alba-maintenance_{0}-{1}'
        maintenance_agents_map = {}
        asd_nodes = AlbaNodeList.get_albanodes()
        nr_of_storage_nodes = len(asd_nodes)

        def _get_node_load(backend_name):
            highest_load = 0
            lowest_load = sys.maxint
            agent_load = {'high_load_node': asd_nodes[0] if asd_nodes else None,
                          'low_load_node': asd_nodes[0] if asd_nodes else None,
                          'total_load': 0}
            for asd_node in asd_nodes:
                actual_nr_of_agents = 0
                maint_services = asd_node.client.list_maintenance_services()
                for service_name in maint_services:
                    if service_template_key.format(backend_name, '') in service_name:
                        actual_nr_of_agents += 1
                if actual_nr_of_agents > highest_load:
                    agent_load['high_load_node'] = asd_node
                    highest_load = actual_nr_of_agents
                if actual_nr_of_agents < lowest_load:
                    agent_load['low_load_node'] = asd_node
                    lowest_load = actual_nr_of_agents
                agent_load['total_load'] += actual_nr_of_agents

            return agent_load

        alba_backends = AlbaBackendList.get_albabackends()
        for alba_backend in alba_backends:
            nr_of_agents_key = AlbaNodeController.NR_OF_AGENTS_ETCD_TEMPLATE.format(alba_backend.guid)
            name = alba_backend.backend.name
            if not EtcdConfiguration.exists(nr_of_agents_key):
                EtcdConfiguration.set(nr_of_agents_key, nr_of_storage_nodes)
            required_nr = EtcdConfiguration.get(nr_of_agents_key)
            maintenance_agents_map[name] = {'required': required_nr,
                                            'actual': _get_node_load(name)['total_load'],
                                            'backend': alba_backend.backend}

        for name, values in maintenance_agents_map.iteritems():
            AlbaNodeController._logger.info('Checking backend: {0}'.format(name))
            to_process = values['required'] - values['actual']

            if to_process == 0:
                AlbaNodeController._logger.info('No action required for: {0}'.format(name))
            elif to_process >= 0:
                AlbaNodeController._logger.info('Adding {0} maintenance agent(s) for {1}'.format(to_process, name))
                for _ in xrange(to_process):
                    unique_hash = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(16))
                    node = _get_node_load(name)['low_load_node']
                    AlbaNodeController._logger.info('Service to add: ' + service_template_key.format(name, unique_hash))
                    if node and node.client:
                        node.client.add_maintenance_service(service_template_key.format(name, unique_hash),
                                                            values['backend'].alba_backend.guid,
                                                            AlbaController.get_abm_service_name(values['backend']))
                        AlbaNodeController._logger.info('Service added')
            else:
                to_process = abs(to_process)
                AlbaNodeController._logger.info('Removing {0} maintenance agent(s) for {1}'.format(to_process, name))
                for _ in xrange(to_process):
                    node = _get_node_load(name)['high_load_node']
                    services = node.client.list_maintenance_services()
                    if services and node and node.client:
                        for service in services:
                            if 'alba-maintenance_' + name in service:
                                node.client.remove_maintenance_service(service)
                                break
コード例 #23
0
ファイル: ovsmigrator.py プロジェクト: th3architect/framework
    def migrate(previous_version, master_ips=None, extra_ips=None):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        verison 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        :param previous_version: The previous version from which to start the migration.
        :param master_ips: IP addresses of the MASTER nodes
        :param extra_ips: IP addresses of the EXTRA nodes
        """

        logger = LogHandler.get('extensions', name='migration')
        working_version = previous_version

        # Version 1 introduced:
        # - Flexible SSD layout
        if working_version < 1:
            try:
                from ovs.extensions.generic.configuration import Configuration
                if Configuration.exists('ovs.arakoon'):
                    Configuration.delete('ovs.arakoon', remove_root=True)
                Configuration.set('ovs.core.ovsdb', '/opt/OpenvStorage/db')
            except:
                logger.exception('Error migrating to version 1')

            working_version = 1

        # Version 2 introduced:
        # - Registration
        if working_version < 2:
            try:
                import time
                from ovs.extensions.generic.configuration import Configuration
                if not Configuration.exists('ovs.core.registered'):
                    Configuration.set('ovs.core.registered', False)
                    Configuration.set('ovs.core.install_time', time.time())
            except:
                logger.exception('Error migrating to version 2')

        working_version = 2

        # Version 3 introduced:
        # - New arakoon clients
        if working_version < 3:
            try:
                from ovs.extensions.db.arakoon import ArakoonInstaller
                reload(ArakoonInstaller)
                from ovs.extensions.db.arakoon.ArakoonInstaller import ArakoonInstaller
                from ovs.extensions.generic.sshclient import SSHClient
                from ovs.extensions.generic.configuration import Configuration
                if master_ips is not None:
                    for ip in master_ips:
                        client = SSHClient(ip)
                        if client.dir_exists(
                                ArakoonInstaller.ARAKOON_CONFIG_DIR):
                            for cluster_name in client.dir_list(
                                    ArakoonInstaller.ARAKOON_CONFIG_DIR):
                                try:
                                    ArakoonInstaller.deploy_cluster(
                                        cluster_name, ip)
                                except:
                                    pass
                if Configuration.exists('ovs.core.storage.persistent'):
                    Configuration.set('ovs.core.storage.persistent',
                                      'pyrakoon')
            except:
                logger.exception('Error migrating to version 3')

            working_version = 3

        # Version 4 introduced:
        # - Etcd
        if working_version < 4:
            try:
                import os
                import json
                from ConfigParser import RawConfigParser
                from ovs.extensions.db.etcd import installer
                reload(installer)
                from ovs.extensions.db.etcd.installer import EtcdInstaller
                from ovs.extensions.db.etcd.configuration import EtcdConfiguration
                from ovs.extensions.generic.system import System
                host_id = System.get_my_machine_id()
                etcd_migrate = False
                if EtcdInstaller.has_cluster('127.0.0.1', 'config'):
                    etcd_migrate = True
                else:
                    if master_ips is not None and extra_ips is not None:
                        cluster_ip = None
                        for ip in master_ips + extra_ips:
                            if EtcdInstaller.has_cluster(ip, 'config'):
                                cluster_ip = ip
                                break
                        node_ip = None
                        path = '/opt/OpenvStorage/config/ovs.json'
                        if os.path.exists(path):
                            with open(path) as config_file:
                                config = json.load(config_file)
                                node_ip = config['grid']['ip']
                        if node_ip is not None:
                            if cluster_ip is None:
                                EtcdInstaller.create_cluster('config', node_ip)
                                EtcdConfiguration.initialize()
                                EtcdConfiguration.initialize_host(host_id)
                            else:
                                EtcdInstaller.extend_cluster(
                                    cluster_ip, node_ip, 'config')
                                EtcdConfiguration.initialize_host(host_id)
                            etcd_migrate = True
                if etcd_migrate is True:
                    # Migrating configuration files
                    path = '/opt/OpenvStorage/config/ovs.json'
                    if os.path.exists(path):
                        with open(path) as config_file:
                            config = json.load(config_file)
                            EtcdConfiguration.set('/ovs/framework/cluster_id',
                                                  config['support']['cid'])
                            if not EtcdConfiguration.exists(
                                    '/ovs/framework/install_time'):
                                EtcdConfiguration.set(
                                    '/ovs/framework/install_time',
                                    config['core']['install_time'])
                            else:
                                EtcdConfiguration.set(
                                    '/ovs/framework/install_time',
                                    min(
                                        EtcdConfiguration.get(
                                            '/ovs/framework/install_time'),
                                        config['core']['install_time']))
                            EtcdConfiguration.set('/ovs/framework/registered',
                                                  config['core']['registered'])
                            EtcdConfiguration.set(
                                '/ovs/framework/plugins/installed',
                                config['plugins'])
                            EtcdConfiguration.set('/ovs/framework/stores',
                                                  config['core']['storage'])
                            EtcdConfiguration.set(
                                '/ovs/framework/paths', {
                                    'cfgdir': config['core']['cfgdir'],
                                    'basedir': config['core']['basedir'],
                                    'ovsdb': config['core']['ovsdb']
                                })
                            EtcdConfiguration.set(
                                '/ovs/framework/support', {
                                    'enablesupport':
                                    config['support']['enablesupport'],
                                    'enabled':
                                    config['support']['enabled'],
                                    'interval':
                                    config['support']['interval']
                                })
                            EtcdConfiguration.set(
                                '/ovs/framework/storagedriver', {
                                    'mds_safety':
                                    config['storagedriver']['mds']['safety'],
                                    'mds_tlogs':
                                    config['storagedriver']['mds']['tlogs'],
                                    'mds_maxload':
                                    config['storagedriver']['mds']['maxload']
                                })
                            EtcdConfiguration.set(
                                '/ovs/framework/webapps', {
                                    'html_endpoint':
                                    config['webapps']['html_endpoint'],
                                    'oauth2':
                                    config['webapps']['oauth2']
                                })
                            EtcdConfiguration.set(
                                '/ovs/framework/messagequeue', {
                                    'endpoints': [],
                                    'protocol':
                                    config['core']['broker']['protocol'],
                                    'user':
                                    config['core']['broker']['login'],
                                    'port':
                                    config['core']['broker']['port'],
                                    'password':
                                    config['core']['broker']['password'],
                                    'queues':
                                    config['core']['broker']['queues']
                                })
                            host_key = '/ovs/framework/hosts/{0}{{0}}'.format(
                                host_id)
                            EtcdConfiguration.set(
                                host_key.format('/storagedriver'), {
                                    'rsp':
                                    config['storagedriver']['rsp'],
                                    'vmware_mode':
                                    config['storagedriver']['vmware_mode']
                                })
                            EtcdConfiguration.set(host_key.format('/ports'),
                                                  config['ports'])
                            EtcdConfiguration.set(
                                host_key.format('/setupcompleted'),
                                config['core']['setupcompleted'])
                            EtcdConfiguration.set(
                                host_key.format('/versions'),
                                config['core'].get('versions', {}))
                            EtcdConfiguration.set(host_key.format('/type'),
                                                  config['core']['nodetype'])
                            EtcdConfiguration.set(host_key.format('/ip'),
                                                  config['grid']['ip'])
                    path = '{0}/memcacheclient.cfg'.format(
                        EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
                    if os.path.exists(path):
                        config = RawConfigParser()
                        config.read(path)
                        nodes = [
                            config.get(node.strip(), 'location').strip()
                            for node in config.get('main', 'nodes').split(',')
                        ]
                        EtcdConfiguration.set(
                            '/ovs/framework/memcache|endpoints', nodes)
                        os.remove(path)
                    path = '{0}/rabbitmqclient.cfg'.format(
                        EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
                    if os.path.exists(path):
                        config = RawConfigParser()
                        config.read(path)
                        nodes = [
                            config.get(node.strip(), 'location').strip()
                            for node in config.get('main', 'nodes').split(',')
                        ]
                        EtcdConfiguration.set(
                            '/ovs/framework/messagequeue|endpoints', nodes)
                        os.remove(path)
                    # Migrate arakoon configuration files
                    from ovs.extensions.db.arakoon import ArakoonInstaller
                    reload(ArakoonInstaller)
                    from ovs.extensions.db.arakoon.ArakoonInstaller import ArakoonInstaller, ArakoonClusterConfig
                    from ovs.extensions.generic.sshclient import SSHClient
                    if master_ips is not None:
                        config_dir = '/opt/OpenvStorage/config/arakoon/'
                        for ip in master_ips:
                            client = SSHClient(ip)
                            if client.dir_exists(config_dir):
                                for cluster_name in client.dir_list(
                                        config_dir):
                                    try:
                                        with open('{0}/{1}/{1}.cfg'.format(
                                                config_dir,
                                                cluster_name)) as config_file:
                                            EtcdConfiguration.set(
                                                ArakoonClusterConfig.
                                                ETCD_CONFIG_KEY.format(
                                                    cluster_name),
                                                config_file.read(),
                                                raw=True)
                                            ArakoonInstaller.deploy_cluster(
                                                cluster_name, ip)
                                    except:
                                        logger.exception(
                                            'Error migrating {0} on {1}'.
                                            format(cluster_name, ip))
                                client.dir_delete(config_dir)
            except:
                logger.exception('Error migrating to version 4')

            working_version = 4

        return working_version
コード例 #24
0
    def _voldrv_arakoon_checkup(create_cluster):
        def add_service(service_storagerouter, arakoon_ports):
            """
            Add a service to the storage router
            :param service_storagerouter: Storage Router to add the service to
            :param arakoon_ports: Port information
            :return: The newly created and added service
            """
            new_service = Service()
            new_service.name = service_name
            new_service.type = service_type
            new_service.ports = arakoon_ports
            new_service.storagerouter = service_storagerouter
            new_service.save()
            return new_service

        service_name = 'arakoon-voldrv'
        service_type = ServiceTypeList.get_by_name(
            ServiceType.SERVICE_TYPES.ARAKOON)

        current_ips = []
        current_services = []
        for service in service_type.services:
            if service.name == service_name:
                current_services.append(service)
                if service.is_internal is True:
                    current_ips.append(service.storagerouter.ip)

        all_sr_ips = [
            storagerouter.ip
            for storagerouter in StorageRouterList.get_slaves()
        ]
        available_storagerouters = {}
        for storagerouter in StorageRouterList.get_masters():
            storagerouter.invalidate_dynamics(['partition_config'])
            if len(storagerouter.partition_config[DiskPartition.ROLES.DB]) > 0:
                available_storagerouters[storagerouter] = DiskPartition(
                    storagerouter.partition_config[DiskPartition.ROLES.DB][0])
            all_sr_ips.append(storagerouter.ip)

        if create_cluster is True and len(
                current_services) == 0:  # Create new cluster
            metadata = ArakoonInstaller.get_unused_arakoon_metadata_and_claim(
                cluster_type=ServiceType.ARAKOON_CLUSTER_TYPES.SD)
            if metadata is None:  # No externally managed cluster found, we create 1 ourselves
                if not available_storagerouters:
                    raise RuntimeError(
                        'Could not find any Storage Router with a DB role')

                storagerouter, partition = available_storagerouters.items()[0]
                result = ArakoonInstaller.create_cluster(
                    cluster_name='voldrv',
                    cluster_type=ServiceType.ARAKOON_CLUSTER_TYPES.SD,
                    ip=storagerouter.ip,
                    base_dir=partition.folder,
                    claim=True)
                ports = [result['client_port'], result['messaging_port']]
                metadata = result['metadata']
                ArakoonInstaller.restart_cluster_add(cluster_name='voldrv',
                                                     current_ips=current_ips,
                                                     new_ip=storagerouter.ip)
                current_ips.append(storagerouter.ip)
            else:
                ports = []
                storagerouter = None

            cluster_name = metadata.cluster_id
            EtcdConfiguration.set('/ovs/framework/arakoon_clusters|voldrv',
                                  cluster_name)
            StorageDriverController._logger.info(
                'Claiming {0} managed arakoon cluster: {1}'.format(
                    'externally' if storagerouter is None else 'internally',
                    cluster_name))
            StorageDriverController._configure_arakoon_to_volumedriver(
                cluster_name=cluster_name)
            current_services.append(
                add_service(service_storagerouter=storagerouter,
                            arakoon_ports=ports))

        cluster_name = EtcdConfiguration.get(
            '/ovs/framework/arakoon_clusters').get('voldrv')
        if cluster_name is None:
            return
        metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(
            cluster_name=cluster_name)
        if 0 < len(current_services) < len(
                available_storagerouters) and metadata.internal is True:
            for storagerouter, partition in available_storagerouters.iteritems(
            ):
                if storagerouter.ip in current_ips:
                    continue
                result = ArakoonInstaller.extend_cluster(
                    master_ip=current_services[0].storagerouter.ip,
                    new_ip=storagerouter.ip,
                    cluster_name=cluster_name,
                    base_dir=partition.folder)
                add_service(storagerouter,
                            [result['client_port'], result['messaging_port']])
                current_ips.append(storagerouter.ip)
                ArakoonInstaller.restart_cluster_add(cluster_name, current_ips,
                                                     storagerouter.ip)
            StorageDriverController._configure_arakoon_to_volumedriver(
                cluster_name=cluster_name)
コード例 #25
0
 def post_upgrade(client):
     """
     Upgrade actions after the new packages have actually been installed
     :param client: SSHClient object
     :return: None
     """
     # If we can reach Etcd with a valid config, and there's still an old config file present, delete it
     from ovs.extensions.db.etcd.configuration import EtcdConfiguration
     path = '/opt/OpenvStorage/config/ovs.json'
     if EtcdConfiguration.exists(
             '/ovs/framework/cluster_id') and client.file_exists(path):
         client.file_delete(path)
     # Migrate volumedriver & albaproxy configuration files
     import uuid
     from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
     from ovs.dal.lists.storagedriverlist import StorageDriverList
     from ovs.extensions.generic.system import System
     with remote(client.ip,
                 [StorageDriverConfiguration, os, open, json, System],
                 username='******') as rem:
         configuration_dir = '{0}/storagedriver/storagedriver'.format(
             EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
         host_id = rem.System.get_my_machine_id()
         if rem.os.path.exists(configuration_dir):
             for storagedriver in StorageDriverList.get_storagedrivers_by_storagerouter(
                     rem.System.get_my_storagerouter().guid):
                 vpool = storagedriver.vpool
                 if storagedriver.alba_proxy is not None:
                     config_tree = '/ovs/vpools/{0}/proxies/{1}/config/{{0}}'.format(
                         vpool.guid, storagedriver.alba_proxy.guid)
                     # ABM config
                     abm_config = '{0}/{1}_alba.cfg'.format(
                         configuration_dir, vpool.name)
                     if rem.os.path.exists(abm_config):
                         with rem.open(abm_config) as config_file:
                             EtcdConfiguration.set(
                                 config_tree.format('abm'),
                                 config_file.read(),
                                 raw=True)
                         rem.os.remove(abm_config)
                     # Albaproxy config
                     alba_config = '{0}/{1}_alba.json'.format(
                         configuration_dir, vpool.name)
                     if rem.os.path.exists(alba_config):
                         with rem.open(alba_config) as config_file:
                             config = rem.json.load(config_file)
                             del config['albamgr_cfg_file']
                             config[
                                 'albamgr_cfg_url'] = 'etcd://127.0.0.1:2379{0}'.format(
                                     config_tree.format('abm'))
                             EtcdConfiguration.set(
                                 config_tree.format('main'),
                                 json.dumps(config, indent=4),
                                 raw=True)
                         params = {
                             'VPOOL_NAME': vpool.name,
                             'VPOOL_GUID': vpool.guid,
                             'PROXY_ID': storagedriver.alba_proxy.guid
                         }
                         alba_proxy_service = 'ovs-albaproxy_{0}'.format(
                             vpool.name)
                         ServiceManager.add_service(
                             name='ovs-albaproxy',
                             params=params,
                             client=client,
                             target_name=alba_proxy_service)
                         rem.os.remove(alba_config)
                 # Volumedriver config
                 current_file = '{0}/{1}.json'.format(
                     configuration_dir, vpool.name)
                 if rem.os.path.exists(current_file):
                     readcache_size = 0
                     with rem.open(current_file) as config_file:
                         config = rem.json.load(config_file)
                     config['distributed_transaction_log'] = {}
                     config['distributed_transaction_log'][
                         'dtl_transport'] = config['failovercache'][
                             'failovercache_transport']
                     config['distributed_transaction_log'][
                         'dtl_path'] = config['failovercache'][
                             'failovercache_path']
                     config['volume_manager'][
                         'dtl_throttle_usecs'] = config['volume_manager'][
                             'foc_throttle_usecs']
                     del config['failovercache']
                     del config['volume_manager']['foc_throttle_usecs']
                     sdc = rem.StorageDriverConfiguration(
                         'storagedriver', vpool.guid,
                         storagedriver.storagedriver_id)
                     sdc.configuration = config
                     sdc.save(reload_config=False)
                     for mountpoint in config['content_addressed_cache'][
                             'clustercache_mount_points']:
                         readcache_size += int(mountpoint['size'].replace(
                             'KiB', ''))
                     params = {
                         'VPOOL_MOUNTPOINT':
                         storagedriver.mountpoint,
                         'HYPERVISOR_TYPE':
                         storagedriver.storagerouter.pmachine.hvtype,
                         'VPOOL_NAME':
                         vpool.name,
                         'CONFIG_PATH':
                         sdc.remote_path,
                         'UUID':
                         str(uuid.uuid4()),
                         'OVS_UID':
                         client.run('id -u ovs').strip(),
                         'OVS_GID':
                         client.run('id -g ovs').strip(),
                         'KILL_TIMEOUT':
                         str(
                             int(readcache_size / 1024.0 / 1024.0 / 6.0 +
                                 30))
                     }
                     vmware_mode = EtcdConfiguration.get(
                         '/ovs/framework/hosts/{0}/storagedriver|vmware_mode'
                         .format(host_id))
                     dtl_service = 'ovs-dtl_{0}'.format(vpool.name)
                     ServiceManager.add_service(name='ovs-dtl',
                                                params=params,
                                                client=client,
                                                target_name=dtl_service)
                     if vpool.backend_type.code == 'alba':
                         alba_proxy_service = 'ovs-albaproxy_{0}'.format(
                             vpool.name)
                         dependencies = [alba_proxy_service]
                     else:
                         dependencies = None
                     if vmware_mode == 'ganesha':
                         template_name = 'ovs-ganesha'
                     else:
                         template_name = 'ovs-volumedriver'
                     voldrv_service = 'ovs-volumedriver_{0}'.format(
                         vpool.name)
                     ServiceManager.add_service(
                         name=template_name,
                         params=params,
                         client=client,
                         target_name=voldrv_service,
                         additional_dependencies=dependencies)
                     rem.os.remove(current_file)
                 # Ganesha config, if available
                 current_file = '{0}/{1}_ganesha.conf'.format(
                     configuration_dir, vpool.name)
                 if rem.os.path.exists(current_file):
                     sdc = rem.StorageDriverConfiguration(
                         'storagedriver', vpool.guid,
                         storagedriver.storagedriver_id)
                     contents = ''
                     for template in ['ganesha-core', 'ganesha-export']:
                         contents += client.file_read(
                             '/opt/OpenvStorage/config/templates/{0}.conf'.
                             format(template))
                     params = {
                         'VPOOL_NAME':
                         vpool.name,
                         'VPOOL_MOUNTPOINT':
                         '/mnt/{0}'.format(vpool.name),
                         'CONFIG_PATH':
                         sdc.remote_path,
                         'NFS_FILESYSTEM_ID':
                         storagedriver.storagerouter.ip.split('.', 2)[-1]
                     }
                     for key, value in params.iteritems():
                         contents = contents.replace(
                             '<{0}>'.format(key), value)
                     client.file_write(current_file, contents)
コード例 #26
0
    def _voldrv_arakoon_checkup(create_cluster):
        def add_service(service_storagerouter, arakoon_ports):
            """
            Add a service to the storage router
            :param service_storagerouter: Storage Router to add the service to
            :param arakoon_ports: Port information
            :return: The newly created and added service
            """
            new_service = Service()
            new_service.name = service_name
            new_service.type = service_type
            new_service.ports = arakoon_ports
            new_service.storagerouter = service_storagerouter
            new_service.save()
            return new_service

        service_name = 'arakoon-voldrv'
        service_type = ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.ARAKOON)

        current_ips = []
        current_services = []
        for service in service_type.services:
            if service.name == service_name:
                current_services.append(service)
                if service.is_internal is True:
                    current_ips.append(service.storagerouter.ip)

        all_sr_ips = [storagerouter.ip for storagerouter in StorageRouterList.get_slaves()]
        available_storagerouters = {}
        for storagerouter in StorageRouterList.get_masters():
            storagerouter.invalidate_dynamics(['partition_config'])
            if len(storagerouter.partition_config[DiskPartition.ROLES.DB]) > 0:
                available_storagerouters[storagerouter] = DiskPartition(storagerouter.partition_config[DiskPartition.ROLES.DB][0])
            all_sr_ips.append(storagerouter.ip)

        if create_cluster is True and len(current_services) == 0:  # Create new cluster
            metadata = ArakoonInstaller.get_unused_arakoon_metadata_and_claim(cluster_type=ServiceType.ARAKOON_CLUSTER_TYPES.SD)
            if metadata is None:  # No externally managed cluster found, we create 1 ourselves
                if not available_storagerouters:
                    raise RuntimeError('Could not find any Storage Router with a DB role')

                storagerouter, partition = available_storagerouters.items()[0]
                result = ArakoonInstaller.create_cluster(cluster_name='voldrv',
                                                         cluster_type=ServiceType.ARAKOON_CLUSTER_TYPES.SD,
                                                         ip=storagerouter.ip,
                                                         base_dir=partition.folder,
                                                         claim=True)
                ports = [result['client_port'], result['messaging_port']]
                metadata = result['metadata']
                ArakoonInstaller.restart_cluster_add(cluster_name='voldrv', current_ips=current_ips, new_ip=storagerouter.ip)
                current_ips.append(storagerouter.ip)
            else:
                ports = []
                storagerouter = None

            cluster_name = metadata.cluster_id
            EtcdConfiguration.set('/ovs/framework/arakoon_clusters|voldrv', cluster_name)
            StorageDriverController._logger.info('Claiming {0} managed arakoon cluster: {1}'.format('externally' if storagerouter is None else 'internally', cluster_name))
            StorageDriverController._configure_arakoon_to_volumedriver(cluster_name=cluster_name)
            current_services.append(add_service(service_storagerouter=storagerouter, arakoon_ports=ports))

        cluster_name = EtcdConfiguration.get('/ovs/framework/arakoon_clusters').get('voldrv')
        if cluster_name is None:
            return
        metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(cluster_name=cluster_name)
        if 0 < len(current_services) < len(available_storagerouters) and metadata.internal is True:
            for storagerouter, partition in available_storagerouters.iteritems():
                if storagerouter.ip in current_ips:
                    continue
                result = ArakoonInstaller.extend_cluster(master_ip=current_services[0].storagerouter.ip,
                                                         new_ip=storagerouter.ip,
                                                         cluster_name=cluster_name,
                                                         base_dir=partition.folder)
                add_service(storagerouter, [result['client_port'], result['messaging_port']])
                current_ips.append(storagerouter.ip)
                ArakoonInstaller.restart_cluster_add(cluster_name, current_ips, storagerouter.ip)
            StorageDriverController._configure_arakoon_to_volumedriver(cluster_name=cluster_name)
コード例 #27
0
    def migrate(previous_version, master_ips=None, extra_ips=None):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        verison 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        :param previous_version: The previous version from which to start the migration.
        :param master_ips: IP addresses of the MASTER nodes
        :param extra_ips: IP addresses of the EXTRA nodes
        """
        logger = LogHandler.get('extensions', name='albamigration')
        working_version = previous_version

        # Version 1 introduced:
        # - Etcd
        if working_version < 1:
            try:
                import os
                import json
                from ovs.extensions.db.etcd import installer
                reload(installer)
                from ovs.extensions.db.etcd.installer import EtcdInstaller
                from ovs.extensions.db.etcd.configuration import EtcdConfiguration
                from ovs.extensions.generic.system import System
                host_id = System.get_my_machine_id()
                etcd_migrate = False
                if EtcdInstaller.has_cluster('127.0.0.1', 'config'):
                    etcd_migrate = True
                else:
                    if master_ips is not None and extra_ips is not None:
                        cluster_ip = None
                        for ip in master_ips + extra_ips:
                            if EtcdInstaller.has_cluster(ip, 'config'):
                                cluster_ip = ip
                                break
                        node_ip = None
                        path = '/opt/OpenvStorage/config/ovs.json'
                        if os.path.exists(path):
                            with open(path) as config_file:
                                config = json.load(config_file)
                                node_ip = config['grid']['ip']
                        if node_ip is not None:
                            if cluster_ip is None:
                                EtcdInstaller.create_cluster('config', node_ip)
                                EtcdConfiguration.initialize()
                                EtcdConfiguration.initialize_host(host_id)
                            else:
                                EtcdInstaller.extend_cluster(cluster_ip, node_ip, 'config')
                                EtcdConfiguration.initialize_host(host_id)
                            etcd_migrate = True
                if etcd_migrate is True:
                    # At this point, there is an etcd cluster. Migrating alba.json
                    path = '/opt/OpenvStorage/config/alba.json'
                    if os.path.exists(path):
                        with open(path) as config_file:
                            config = json.load(config_file)
                            EtcdConfiguration.set('/ovs/framework/plugins/alba/config', config)
                        os.remove(path)
                        EtcdConfiguration.set('/ovs/alba/backends/global_gui_error_interval', 300)
            except:
                logger.exception('Error migrating to version 1')

            working_version = 1

        return working_version
コード例 #28
0
ファイル: ovsmigrator.py プロジェクト: dawnpower/framework
    def migrate(previous_version, master_ips=None, extra_ips=None):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        verison 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        :param previous_version: The previous version from which to start the migration.
        :param master_ips: IP addresses of the MASTER nodes
        :param extra_ips: IP addresses of the EXTRA nodes
        """

        working_version = previous_version

        # Version 1 introduced:
        # - Flexible SSD layout
        if working_version < 1:
            try:
                from ovs.extensions.generic.configuration import Configuration
                if Configuration.exists('ovs.arakoon'):
                    Configuration.delete('ovs.arakoon', remove_root=True)
                Configuration.set('ovs.core.ovsdb', '/opt/OpenvStorage/db')
            except:
                logger.exception('Error migrating to version 1')

            working_version = 1

        # Version 2 introduced:
        # - Registration
        if working_version < 2:
            try:
                import time
                from ovs.extensions.generic.configuration import Configuration
                if not Configuration.exists('ovs.core.registered'):
                    Configuration.set('ovs.core.registered', False)
                    Configuration.set('ovs.core.install_time', time.time())
            except:
                logger.exception('Error migrating to version 2')

        working_version = 2

        # Version 3 introduced:
        # - New arakoon clients
        if working_version < 3:
            try:
                from ovs.extensions.db.arakoon import ArakoonInstaller
                reload(ArakoonInstaller)
                from ovs.extensions.db.arakoon.ArakoonInstaller import ArakoonInstaller
                from ovs.extensions.generic.sshclient import SSHClient
                from ovs.extensions.generic.configuration import Configuration
                if master_ips is not None:
                    for ip in master_ips:
                        client = SSHClient(ip)
                        if client.dir_exists(ArakoonInstaller.ARAKOON_CONFIG_DIR):
                            for cluster_name in client.dir_list(ArakoonInstaller.ARAKOON_CONFIG_DIR):
                                try:
                                    ArakoonInstaller.deploy_cluster(cluster_name, ip)
                                except:
                                    pass
                if Configuration.exists('ovs.core.storage.persistent'):
                    Configuration.set('ovs.core.storage.persistent', 'pyrakoon')
            except:
                logger.exception('Error migrating to version 3')

            working_version = 3

        # Version 4 introduced:
        # - Etcd
        if working_version < 4:
            try:
                import os
                import json
                from ConfigParser import RawConfigParser
                from ovs.extensions.db.etcd import installer
                reload(installer)
                from ovs.extensions.db.etcd.installer import EtcdInstaller
                from ovs.extensions.db.etcd.configuration import EtcdConfiguration
                from ovs.extensions.generic.system import System
                host_id = System.get_my_machine_id()
                etcd_migrate = False
                if EtcdInstaller.has_cluster('127.0.0.1', 'config'):
                    etcd_migrate = True
                else:
                    if master_ips is not None and extra_ips is not None:
                        cluster_ip = None
                        for ip in master_ips + extra_ips:
                            if EtcdInstaller.has_cluster(ip, 'config'):
                                cluster_ip = ip
                                break
                        node_ip = None
                        path = '/opt/OpenvStorage/config/ovs.json'
                        if os.path.exists(path):
                            with open(path) as config_file:
                                config = json.load(config_file)
                                node_ip = config['grid']['ip']
                        if node_ip is not None:
                            if cluster_ip is None:
                                EtcdInstaller.create_cluster('config', node_ip)
                                EtcdConfiguration.initialize()
                                EtcdConfiguration.initialize_host(host_id)
                            else:
                                EtcdInstaller.extend_cluster(cluster_ip, node_ip, 'config')
                                EtcdConfiguration.initialize_host(host_id)
                            etcd_migrate = True
                if etcd_migrate is True:
                    # Migrating configuration files
                    path = '/opt/OpenvStorage/config/ovs.json'
                    if os.path.exists(path):
                        with open(path) as config_file:
                            config = json.load(config_file)
                            EtcdConfiguration.set('/ovs/framework/cluster_id', config['support']['cid'])
                            if not EtcdConfiguration.exists('/ovs/framework/install_time'):
                                EtcdConfiguration.set('/ovs/framework/install_time', config['core']['install_time'])
                            else:
                                EtcdConfiguration.set('/ovs/framework/install_time', min(EtcdConfiguration.get('/ovs/framework/install_time'), config['core']['install_time']))
                            EtcdConfiguration.set('/ovs/framework/registered', config['core']['registered'])
                            EtcdConfiguration.set('/ovs/framework/plugins/installed', config['plugins'])
                            EtcdConfiguration.set('/ovs/framework/stores', config['core']['storage'])
                            EtcdConfiguration.set('/ovs/framework/paths', {'cfgdir': config['core']['cfgdir'],
                                                                           'basedir': config['core']['basedir'],
                                                                           'ovsdb': config['core']['ovsdb']})
                            EtcdConfiguration.set('/ovs/framework/support', {'enablesupport': config['support']['enablesupport'],
                                                                             'enabled': config['support']['enabled'],
                                                                             'interval': config['support']['interval']})
                            EtcdConfiguration.set('/ovs/framework/storagedriver', {'mds_safety': config['storagedriver']['mds']['safety'],
                                                                                   'mds_tlogs': config['storagedriver']['mds']['tlogs'],
                                                                                   'mds_maxload': config['storagedriver']['mds']['maxload']})
                            EtcdConfiguration.set('/ovs/framework/webapps', {'html_endpoint': config['webapps']['html_endpoint'],
                                                                             'oauth2': config['webapps']['oauth2']})
                            EtcdConfiguration.set('/ovs/framework/messagequeue', {'endpoints': [],
                                                                                  'protocol': config['core']['broker']['protocol'],
                                                                                  'user': config['core']['broker']['login'],
                                                                                  'port': config['core']['broker']['port'],
                                                                                  'password': config['core']['broker']['password'],
                                                                                  'queues': config['core']['broker']['queues']})
                            host_key = '/ovs/framework/hosts/{0}{{0}}'.format(host_id)
                            EtcdConfiguration.set(host_key.format('/storagedriver'), {'rsp': config['storagedriver']['rsp'],
                                                                                      'vmware_mode': config['storagedriver']['vmware_mode']})
                            EtcdConfiguration.set(host_key.format('/ports'), config['ports'])
                            EtcdConfiguration.set(host_key.format('/setupcompleted'), config['core']['setupcompleted'])
                            EtcdConfiguration.set(host_key.format('/versions'), config['core'].get('versions', {}))
                            EtcdConfiguration.set(host_key.format('/type'), config['core']['nodetype'])
                            EtcdConfiguration.set(host_key.format('/ip'), config['grid']['ip'])
                    path = '{0}/memcacheclient.cfg'.format(EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
                    if os.path.exists(path):
                        config = RawConfigParser()
                        config.read(path)
                        nodes = [config.get(node.strip(), 'location').strip()
                                 for node in config.get('main', 'nodes').split(',')]
                        EtcdConfiguration.set('/ovs/framework/memcache|endpoints', nodes)
                        os.remove(path)
                    path = '{0}/rabbitmqclient.cfg'.format(EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
                    if os.path.exists(path):
                        config = RawConfigParser()
                        config.read(path)
                        nodes = [config.get(node.strip(), 'location').strip()
                                 for node in config.get('main', 'nodes').split(',')]
                        EtcdConfiguration.set('/ovs/framework/messagequeue|endpoints', nodes)
                        os.remove(path)
                    # Migrate arakoon configuration files
                    from ovs.extensions.db.arakoon import ArakoonInstaller
                    reload(ArakoonInstaller)
                    from ovs.extensions.db.arakoon.ArakoonInstaller import ArakoonInstaller, ArakoonClusterConfig
                    from ovs.extensions.generic.sshclient import SSHClient
                    if master_ips is not None:
                        config_dir = '/opt/OpenvStorage/config/arakoon/'
                        for ip in master_ips:
                            client = SSHClient(ip)
                            if client.dir_exists(config_dir):
                                for cluster_name in client.dir_list(config_dir):
                                    try:
                                        with open('{0}/{1}/{1}.cfg'.format(config_dir, cluster_name)) as config_file:
                                            EtcdConfiguration.set(ArakoonClusterConfig.ETCD_CONFIG_KEY.format(cluster_name),
                                                                  config_file.read(),
                                                                  raw=True)
                                            ArakoonInstaller.deploy_cluster(cluster_name, ip)
                                    except:
                                        logger.exception('Error migrating {0} on {1}'.format(cluster_name, ip))
                                client.dir_delete(config_dir)
            except:
                logger.exception('Error migrating to version 4')

            working_version = 4

        return working_version
コード例 #29
0
ファイル: update.py プロジェクト: DarumasLegs/framework
 def post_upgrade(client):
     """
     Upgrade actions after the new packages have actually been installed
     :param client: SSHClient object
     :return: None
     """
     # If we can reach Etcd with a valid config, and there's still an old config file present, delete it
     from ovs.extensions.db.etcd.configuration import EtcdConfiguration
     path = '/opt/OpenvStorage/config/ovs.json'
     if EtcdConfiguration.exists('/ovs/framework/cluster_id') and client.file_exists(path):
         client.file_delete(path)
     # Migrate volumedriver & albaproxy configuration files
     import uuid
     from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
     from ovs.dal.lists.storagedriverlist import StorageDriverList
     from ovs.extensions.generic.system import System
     with remote(client.ip, [StorageDriverConfiguration, os, open, json, System], username='******') as rem:
         configuration_dir = '{0}/storagedriver/storagedriver'.format(EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
         host_id = rem.System.get_my_machine_id()
         if rem.os.path.exists(configuration_dir):
             for storagedriver in StorageDriverList.get_storagedrivers_by_storagerouter(rem.System.get_my_storagerouter().guid):
                 vpool = storagedriver.vpool
                 if storagedriver.alba_proxy is not None:
                     config_tree = '/ovs/vpools/{0}/proxies/{1}/config/{{0}}'.format(vpool.guid, storagedriver.alba_proxy.guid)
                     # ABM config
                     abm_config = '{0}/{1}_alba.cfg'.format(configuration_dir, vpool.name)
                     if rem.os.path.exists(abm_config):
                         with rem.open(abm_config) as config_file:
                             EtcdConfiguration.set(config_tree.format('abm'), config_file.read(), raw=True)
                         rem.os.remove(abm_config)
                     # Albaproxy config
                     alba_config = '{0}/{1}_alba.json'.format(configuration_dir, vpool.name)
                     if rem.os.path.exists(alba_config):
                         with rem.open(alba_config) as config_file:
                             config = rem.json.load(config_file)
                             del config['albamgr_cfg_file']
                             config['albamgr_cfg_url'] = 'etcd://127.0.0.1:2379{0}'.format(config_tree.format('abm'))
                             EtcdConfiguration.set(config_tree.format('main'), json.dumps(config, indent=4), raw=True)
                         params = {'VPOOL_NAME': vpool.name,
                                   'VPOOL_GUID': vpool.guid,
                                   'PROXY_ID': storagedriver.alba_proxy.guid}
                         alba_proxy_service = 'ovs-albaproxy_{0}'.format(vpool.name)
                         ServiceManager.add_service(name='ovs-albaproxy', params=params, client=client, target_name=alba_proxy_service)
                         rem.os.remove(alba_config)
                 # Volumedriver config
                 current_file = '{0}/{1}.json'.format(configuration_dir, vpool.name)
                 if rem.os.path.exists(current_file):
                     readcache_size = 0
                     with rem.open(current_file) as config_file:
                         config = rem.json.load(config_file)
                     config['distributed_transaction_log'] = {}
                     config['distributed_transaction_log']['dtl_transport'] = config['failovercache']['failovercache_transport']
                     config['distributed_transaction_log']['dtl_path'] = config['failovercache']['failovercache_path']
                     config['volume_manager']['dtl_throttle_usecs'] = config['volume_manager']['foc_throttle_usecs']
                     del config['failovercache']
                     del config['volume_manager']['foc_throttle_usecs']
                     sdc = rem.StorageDriverConfiguration('storagedriver', vpool.guid, storagedriver.storagedriver_id)
                     sdc.configuration = config
                     sdc.save(reload_config=False)
                     for mountpoint in config['content_addressed_cache']['clustercache_mount_points']:
                         readcache_size += int(mountpoint['size'].replace('KiB', ''))
                     params = {'VPOOL_MOUNTPOINT': storagedriver.mountpoint,
                               'HYPERVISOR_TYPE': storagedriver.storagerouter.pmachine.hvtype,
                               'VPOOL_NAME': vpool.name,
                               'CONFIG_PATH': sdc.remote_path,
                               'UUID': str(uuid.uuid4()),
                               'OVS_UID': client.run('id -u ovs').strip(),
                               'OVS_GID': client.run('id -g ovs').strip(),
                               'KILL_TIMEOUT': str(int(readcache_size / 1024.0 / 1024.0 / 6.0 + 30))}
                     vmware_mode = EtcdConfiguration.get('/ovs/framework/hosts/{0}/storagedriver|vmware_mode'.format(host_id))
                     dtl_service = 'ovs-dtl_{0}'.format(vpool.name)
                     ServiceManager.add_service(name='ovs-dtl', params=params, client=client, target_name=dtl_service)
                     if vpool.backend_type.code == 'alba':
                         alba_proxy_service = 'ovs-albaproxy_{0}'.format(vpool.name)
                         dependencies = [alba_proxy_service]
                     else:
                         dependencies = None
                     if vmware_mode == 'ganesha':
                         template_name = 'ovs-ganesha'
                     else:
                         template_name = 'ovs-volumedriver'
                     voldrv_service = 'ovs-volumedriver_{0}'.format(vpool.name)
                     ServiceManager.add_service(name=template_name, params=params, client=client, target_name=voldrv_service, additional_dependencies=dependencies)
                     rem.os.remove(current_file)
                 # Ganesha config, if available
                 current_file = '{0}/{1}_ganesha.conf'.format(configuration_dir, vpool.name)
                 if rem.os.path.exists(current_file):
                     sdc = rem.StorageDriverConfiguration('storagedriver', vpool.guid, storagedriver.storagedriver_id)
                     contents = ''
                     for template in ['ganesha-core', 'ganesha-export']:
                         contents += client.file_read('/opt/OpenvStorage/config/templates/{0}.conf'.format(template))
                     params = {'VPOOL_NAME': vpool.name,
                               'VPOOL_MOUNTPOINT': '/mnt/{0}'.format(vpool.name),
                               'CONFIG_PATH': sdc.remote_path,
                               'NFS_FILESYSTEM_ID': storagedriver.storagerouter.ip.split('.', 2)[-1]}
                     for key, value in params.iteritems():
                         contents = contents.replace('<{0}>'.format(key), value)
                     client.file_write(current_file, contents)