def setUp(self): super(AdapterTestCase, self).setUp() reload(setting) setting.CONFIG_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) database.init('sqlite://') database.create_db() self.user_object = ( user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL ) ) mock_config = mock.Mock() self.backup_adapter_configs = util.load_configs util.load_configs = mock_config configs = [{ 'NAME': 'openstack_test', 'DISLAY_NAME': 'Test OpenStack Icehouse', 'PACKAGE_INSTALLER': 'chef_installer', 'OS_INSTALLER': 'cobbler', 'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'], 'DEPLOYABLE': True }] util.load_configs.return_value = configs with database.session() as session: adapter_api.add_adapters_internal(session) adapter.load_adapters() self.adapter_object = adapter.list_adapters(user=self.user_object) for adapter_obj in self.adapter_object: if adapter_obj['name'] == 'openstack_icehouse': self.adapter_id = adapter_obj['id'] break
def setUp(self): super(MetadataTestCase, self).setUp() reload(setting) setting.CONFIG_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) database.init('sqlite://') database.create_db() adapter.load_adapters() metadata.load_metadatas() # Get a os_id and adapter_id self.user_object = ( user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL ) ) self.adapter_object = adapter.list_adapters(self.user_object) test_adapter = None for adapter_obj in self.adapter_object: if adapter_obj['name'] == 'openstack_icehouse': self.adapter_id = adapter_obj['id'] test_adapter = adapter_obj break self.os_id = None if test_adapter['flavors']: for supported_os in test_adapter['supported_oses']: self.os_id = supported_os['os_id'] break for flavor in test_adapter['flavors']: if flavor['name'] == 'HA-multinodes': self.flavor_id = flavor['id'] break
def setUp(self): super(AdapterTestCase, self).setUp() os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data') reload(setting) database.init('sqlite://') database.create_db() self.user_object = (user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL)) mock_config = mock.Mock(side_effect=self._mock_load_configs) self.backup_adapter_configs = util.load_configs util.load_configs = mock_config adapter.load_adapters(force_reload=True) adapter.load_flavors(force_reload=True) self.adapter_object = adapter.list_adapters(user=self.user_object) self.adapter_obj = None self.adapter_id = None self.flavor_id = None for adapter_obj in self.adapter_object: if adapter_obj['name'] == 'openstack_icehouse': self.adapter_obj = adapter_obj self.adapter_id = adapter_obj['id'] break for flavor in self.adapter_obj['flavors']: if flavor['name'] == 'HA-multinodes': self.flavor_id = flavor['id'] break
def setUp(self): super(AdapterTestCase, self).setUp() os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) reload(setting) database.init('sqlite://') database.create_db() self.user_object = ( user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL ) ) mock_config = mock.Mock(side_effect=self._mock_load_configs) self.backup_adapter_configs = util.load_configs util.load_configs = mock_config adapter.load_adapters(force_reload=True) adapter.load_flavors(force_reload=True) self.adapter_object = adapter.list_adapters(user=self.user_object) self.adapter_obj = None self.adapter_id = None self.flavor_id = None for adapter_obj in self.adapter_object: if adapter_obj['name'] == 'openstack_icehouse': self.adapter_obj = adapter_obj self.adapter_id = adapter_obj['id'] break for flavor in self.adapter_obj['flavors']: if flavor['name'] == 'HA-multinodes': self.flavor_id = flavor['id'] break
def setUp(self): super(MetadataTestCase, self).setUp() os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data') reload(setting) database.init('sqlite://') database.create_db() adapter.load_adapters(force_reload=True) metadata.load_metadatas(force_reload=True) adapter.load_flavors(force_reload=True) # Get a os_id and adapter_id self.user_object = (user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL)) self.adapter_object = adapter.list_adapters(self.user_object) test_adapter = None for adapter_obj in self.adapter_object: if adapter_obj['name'] == 'openstack_icehouse': self.adapter_id = adapter_obj['id'] test_adapter = adapter_obj break self.os_id = None if test_adapter['flavors']: for supported_os in test_adapter['supported_oses']: self.os_id = supported_os['os_id'] break for flavor in test_adapter['flavors']: if flavor['name'] == 'HA-multinodes': self.flavor_id = flavor['id'] break
def test_list_adapters(self): adapters = adapter.list_adapters(user=self.user_object) result = [] for item in adapters: result.append(item['name']) expects = [ 'openstack_icehouse', 'os_only', 'ceph(chef)', ] self.assertIsNotNone(adapters) for expect in expects: self.assertIn(expect, result)
def test_list_adapters(self): adapters = adapter.list_adapters( user=self.user_object ) result = [] for item in adapters: result.append(item['name']) expects = [ 'openstack_icehouse', 'os_only', 'ceph(chef)', ] self.assertIsNotNone(adapters) for expect in expects: self.assertIn(expect, result)
def clean_installers(): os_installers = [ os_installer for os_installer in flags.OPTIONS.os_installers.split(',') if os_installer ] package_installers = [ package_installer for package_installer in flags.OPTIONS.package_installers.split(',') if package_installer ] user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) adapters = adapter_api.list_adapters(user=user) filtered_os_installers = {} filtered_package_installers = {} for adapter in adapters: logging.info('got adapter: %s', adapter) if 'os_installer' in adapter: os_installer = adapter['os_installer'] os_installer_name = os_installer['alias'] if not os_installers or os_installer_name in os_installers: filtered_os_installers[os_installer_name] = os_installer else: logging.info('ignore os installer %s', os_installer_name) else: logging.info('cannot find os installer in adapter %s', adapter['name']) if 'package_installer' in adapter: package_installer = adapter['package_installer'] package_installer_name = package_installer['alias'] if (not package_installers or package_installer_name in package_installers): filtered_package_installers[package_installer_name] = ( package_installer) else: logging.info('ignore package installer %s', package_installer_name) else: logging.info('cannot find package installer in adapter %s', adapter['name']) logging.info('clean os installers: %s', filtered_os_installers.keys()) logging.info('clean package installers: %s', filtered_package_installers.keys()) if flags.OPTIONS. async: for os_installer_name, os_installer in filtered_os_installers.items(): celery.send_task('compass.tasks.clean_os_installer', (os_installer['name'], os_installer['settings'])) for package_installer_name, package_installer in ( filtered_package_installers.items()): celery.send_task( 'compass.tasks.clean_package_installer', (package_installer['name'], package_installer['settings'])) else: for os_installer_name, os_installer in ( filtered_os_installers.items()): try: clean.clean_os_installer(os_installer['name'], os_installer['settings']) except Exception as error: logging.error('failed to clean os installer %s', os_installer_name) logging.exception(error) for package_installer_name, package_installer in ( filtered_package_installers.items()): try: clean.clean_package_installer(package_installer['name'], package_installer['settings']) except Exception as error: logging.error('failed to clean package installer %s', package_installer_name) logging.exception(error)
def update_progress(): """Update status and installing progress of the given cluster. :param cluster_hosts: clusters and hosts in each cluster to update. :type cluster_hosts: dict of int or str to list of int or str .. note:: The function should be called out of the database session scope. In the function, it will update the database cluster_state and host_state table for the deploying cluster and hosts. The function will also query log_progressing_history table to get the lastest installing progress and the position of log it has processed in the last run. The function uses these information to avoid recalculate the progress from the beginning of the log file. After the progress got updated, these information will be stored back to the log_progressing_history for next time run. """ with util.lock('log_progressing', timeout=60, blocking=False) as lock: if not lock: logging.error( 'failed to acquire lock to calculate installation progress' ) return logging.info('update installing progress') user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) hosts = host_api.list_hosts(user=user) host_mapping = {} for host in hosts: if 'id' not in host: logging.error('id is not in host %s', host) continue host_id = host['id'] if 'os_name' not in host: logging.error('os_name is not in host %s', host) continue if 'os_installer' not in host: logging.error('os_installer is not in host %s', host) continue host_dirname = setting.HOST_INSTALLATION_LOGDIR_NAME if host_dirname not in host: logging.error( '%s is not in host %s', host_dirname, host ) continue host_state = host_api.get_host_state(host_id, user=user) if 'state' not in host_state: logging.error('state is not in host state %s', host_state) continue if host_state['state'] == 'INSTALLING': host_log_histories = host_api.get_host_log_histories( host_id, user=user ) host_log_history_mapping = {} for host_log_history in host_log_histories: if 'filename' not in host_log_history: logging.error( 'filename is not in host log history %s', host_log_history ) continue host_log_history_mapping[ host_log_history['filename'] ] = host_log_history host_mapping[host_id] = ( host, host_state, host_log_history_mapping ) else: logging.info( 'ignore host state %s since it is not in installing', host_state ) adapters = adapter_api.list_adapters(user=user) adapter_mapping = {} for adapter in adapters: if 'id' not in adapter: logging.error( 'id not in adapter %s', adapter ) continue if 'package_installer' not in adapter: logging.info( 'package_installer not in adapter %s', adapter ) continue adapter_id = adapter['id'] adapter_mapping[adapter_id] = adapter clusters = cluster_api.list_clusters(user=user) cluster_mapping = {} for cluster in clusters: if 'id' not in cluster: logging.error('id not in cluster %s', cluster) continue cluster_id = cluster['id'] if 'adapter_id' not in cluster: logging.error( 'adapter_id not in cluster %s', cluster ) continue cluster_state = cluster_api.get_cluster_state( cluster_id, user=user ) if 'state' not in cluster_state: logging.error('state not in cluster state %s', cluster_state) continue cluster_mapping[cluster_id] = (cluster, cluster_state) clusterhosts = cluster_api.list_clusterhosts(user=user) clusterhost_mapping = {} for clusterhost in clusterhosts: if 'clusterhost_id' not in clusterhost: logging.error( 'clusterhost_id not in clusterhost %s', clusterhost ) continue clusterhost_id = clusterhost['clusterhost_id'] if 'distributed_system_name' not in clusterhost: logging.error( 'distributed_system_name is not in clusterhost %s', clusterhost ) continue clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME if clusterhost_dirname not in clusterhost: logging.error( '%s is not in clusterhost %s', clusterhost_dirname, clusterhost ) continue if 'cluster_id' not in clusterhost: logging.error( 'cluster_id not in clusterhost %s', clusterhost ) continue cluster_id = clusterhost['cluster_id'] if cluster_id not in cluster_mapping: logging.info( 'ignore clusterhost %s ' 'since the cluster_id ' 'is not in cluster_mapping %s', clusterhost, cluster_mapping ) continue cluster, _ = cluster_mapping[cluster_id] adapter_id = cluster['adapter_id'] if adapter_id not in adapter_mapping: logging.info( 'ignore clusterhost %s ' 'since the adapter_id %s ' 'is not in adaper_mapping %s', clusterhost, adapter_id, adapter_mapping ) continue adapter = adapter_mapping[adapter_id] if 'package_installer' not in adapter: logging.info( 'ignore clusterhost %s ' 'since the package_installer is not define ' 'in adapter %s', clusterhost, adapter ) continue package_installer = adapter['package_installer'] clusterhost['package_installer'] = package_installer clusterhost_state = cluster_api.get_clusterhost_self_state( clusterhost_id, user=user ) if 'state' not in clusterhost_state: logging.error( 'state not in clusterhost_state %s', clusterhost_state ) continue if clusterhost_state['state'] == 'INSTALLING': clusterhost_log_histories = ( cluster_api.get_clusterhost_log_histories( clusterhost_id, user=user ) ) clusterhost_log_history_mapping = {} for clusterhost_log_history in clusterhost_log_histories: if 'filename' not in clusterhost_log_history: logging.error( 'filename not in clusterhost_log_history %s', clusterhost_log_history ) continue clusterhost_log_history_mapping[ clusterhost_log_history['filename'] ] = clusterhost_log_history clusterhost_mapping[clusterhost_id] = ( clusterhost, clusterhost_state, clusterhost_log_history_mapping ) else: logging.info( 'ignore clusterhost state %s ' 'since it is not in installing', clusterhost_state ) progress_calculator.update_host_progress( host_mapping) for host_id, (host, host_state, host_log_history_mapping) in ( host_mapping.items() ): host_api.update_host_state( host_id, user=user, percentage=host_state.get('percentage', 0), message=host_state.get('message', ''), severity=host_state.get('severity', 'INFO') ) for filename, host_log_history in ( host_log_history_mapping.items() ): host_api.add_host_log_history( host_id, filename=filename, user=user, position=host_log_history.get('position', 0), percentage=host_log_history.get('percentage', 0), partial_line=host_log_history.get('partial_line', ''), message=host_log_history.get('message', ''), severity=host_log_history.get('severity', 'INFO'), line_matcher_name=host_log_history.get( 'line_matcher_name', 'start' ) ) progress_calculator.update_clusterhost_progress( clusterhost_mapping) for ( clusterhost_id, (clusterhost, clusterhost_state, clusterhost_log_history_mapping) ) in ( clusterhost_mapping.items() ): cluster_api.update_clusterhost_state( clusterhost_id, user=user, percentage=clusterhost_state.get('percentage', 0), message=clusterhost_state.get('message', ''), severity=clusterhost_state.get('severity', 'INFO') ) for filename, clusterhost_log_history in ( clusterhost_log_history_mapping.items() ): cluster_api.add_clusterhost_log_history( clusterhost_id, user=user, filename=filename, position=clusterhost_log_history.get('position', 0), percentage=clusterhost_log_history.get('percentage', 0), partial_line=clusterhost_log_history.get( 'partial_line', ''), message=clusterhost_log_history.get('message', ''), severity=clusterhost_log_history.get('severity', 'INFO'), line_matcher_name=( clusterhost_log_history.get( 'line_matcher_name', 'start' ) ) ) progress_calculator.update_cluster_progress( cluster_mapping) for cluster_id, (cluster, cluster_state) in cluster_mapping.items(): cluster_api.update_cluster_state( cluster_id, user=user )
def setUp(self): super(HostTestCase, self).setUp() os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) reload(setting) database.init('sqlite://') database.create_db() adapter.load_adapters(force_reload=True) metadata.load_metadatas(force_reload=True) adapter.load_flavors(force_reload=True) self.user_object = ( user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL ) ) # get adapter information list_adapters = adapter.list_adapters(user=self.user_object) for list_adapter in list_adapters: for supported_os in list_adapter['supported_oses']: self.os_id = supported_os['os_id'] break if list_adapter['flavors']: details = list_adapter['flavors'] for detail in details: if detail['display_name'] == 'allinone': roles = detail['roles'] for role in roles: self.adapter_id = role['adapter_id'] self.flavor_id = role['flavor_id'] break # add cluster cluster_names = ['test_cluster1', 'test_cluster2'] for cluster_name in cluster_names: cluster.add_cluster( user=self.user_object, adapter_id=self.adapter_id, os_id=self.os_id, flavor_id=self.flavor_id, name=cluster_name ) clusters = cluster.list_clusters(user=self.user_object) self.roles = None for list_cluster in clusters: for item in list_cluster['flavor']['roles']: self.roles = item if list_cluster['name'] == 'test_cluster1': self.cluster_id = list_cluster['id'] break # add switch switch.add_switch( user=self.user_object, ip='172.29.8.40' ) switches = switch.list_switches(user=self.user_object) self.switch_id = None for item in switches: self.switch_id = item['id'] macs = ['28:6e:d4:46:c4:25', '00:0c:29:bf:eb:1d'] for mac in macs: switch.add_switch_machine( self.switch_id, user=self.user_object, mac=mac, port='1' ) # get machine information machines = machine.list_machines(user=self.user_object) self.machine_ids = [] for item in machines: self.machine_ids.append(item['id']) # add cluster host name = ['newname1', 'newname2'] for i in range(0, 2): cluster.add_cluster_host( self.cluster_id, user=self.user_object, machine_id=self.machine_ids[i], name=name[i] ) self.host_ids = [] clusterhosts = cluster.list_clusterhosts(user=self.user_object) for clusterhost in clusterhosts: self.host_ids.append(clusterhost['host_id']) # add subnet subnets = ['10.145.88.0/23', '192.168.100.0/23'] for subnet in subnets: network.add_subnet( user=self.user_object, subnet=subnet ) list_subnet = network.list_subnets( user=self.user_object ) self.subnet_ids = [] for item in list_subnet: self.subnet_ids.append(item['id']) # add host network host.add_host_network( self.host_ids[0], user=self.user_object, interface='eth0', ip='10.145.88.0', subnet_id=self.subnet_ids[0], is_mgmt=True ) host.add_host_network( self.host_ids[1], user=self.user_object, interface='eth1', ip='192.168.100.0', subnet_id=self.subnet_ids[1], is_promiscuous=True ) # add log history filenames = ['log1', 'log2'] for filename in filenames: host.add_host_log_history( self.host_ids[0], user=self.user_object, filename=filename ) self.os_configs = { 'general': { 'language': 'EN', 'timezone': 'UTC', 'http_proxy': 'http://127.0.0.1:3128', 'https_proxy': 'http://127.0.0.1:3128', 'no_proxy': [ '127.0.0.1', 'compass' ], 'ntp_server': '127.0.0.1', 'dns_servers': [ '127.0.0.1' ], 'domain': 'ods.com', 'search_path': [ 'ods.com' ], 'default_gateway': '127.0.0.1', }, 'server_credentials': { 'username': '******', 'password': '******', }, 'partition': { '/var': { 'max_size': '100G', 'percentage': 10, 'size': '1G' } } } self.package_configs = { 'security': { 'service_credentials': { '$service': { 'username': '******', 'password': '******' } }, 'console_credentials': { '$console': { 'username': '******', 'password': '******' } } }, 'network_mapping': { '$interface_type': 'eth0' } }
def clean_installers(): os_installers = [ os_installer for os_installer in flags.OPTIONS.os_installers.split(',') if os_installer ] package_installers = [ package_installer for package_installer in flags.OPTIONS.package_installers.split(',') if package_installer ] user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) adapters = adapter_api.list_adapters(user=user) filtered_os_installers = {} filtered_package_installers = {} for adapter in adapters: logging.info( 'got adapter: %s', adapter ) if 'os_installer' in adapter: os_installer = adapter['os_installer'] os_installer_name = os_installer['alias'] if not os_installers or os_installer_name in os_installers: filtered_os_installers[os_installer_name] = os_installer else: logging.info( 'ignore os isntaller %s', os_installer_name ) else: logging.info( 'cannot find os installer in adapter %s', adapter['name'] ) if 'package_installer' in adapter: package_installer = adapter['package_installer'] package_installer_name = package_installer['alias'] if ( not package_installers or package_installer_name in package_installers ): filtered_package_installers[package_installer_name] = ( package_installer ) else: logging.info( 'ignore package installer %s', package_installer_name ) else: logging.info( 'cannot find package installer in adapter %s', adapter['name'] ) logging.info( 'clean os installers: %s', filtered_os_installers.keys() ) logging.info( 'clean package installers: %s', filtered_package_installers.keys() ) if flags.OPTIONS.async: for os_installer_name, os_installer in filtered_os_installers.items(): celery.send_task( 'compass.tasks.clean_os_installer', ( os_installer['name'], os_installer['settings'] ) ) for package_installer_name, package_installer in ( filtered_package_installers.items() ): celery.send_task( 'compass.tasks.clean_package_installer', ( package_installer['name'], package_installer['settings'] ) ) else: for os_installer_name, os_installer in ( filtered_os_installers.items() ): try: clean.clean_os_installer( os_installer['name'], os_installer['settings'] ) except Exception as error: logging.error( 'failed to clean os installer %s', os_installer_name ) logging.exception(error) for package_installer_name, package_installer in ( filtered_package_installers.items() ): try: clean.clean_package_installer( package_installer['name'], package_installer['settings'] ) except Exception as error: logging.error( 'failed to clean package installer %s', package_installer_name ) logging.exception(error)
def update_progress(): """Update status and installing progress of the given cluster. :param cluster_hosts: clusters and hosts in each cluster to update. :type cluster_hosts: dict of int or str to list of int or str .. note:: The function should be called out of the database session scope. In the function, it will update the database cluster_state and host_state table for the deploying cluster and hosts. The function will also query log_progressing_history table to get the lastest installing progress and the position of log it has processed in the last run. The function uses these information to avoid recalculate the progress from the beginning of the log file. After the progress got updated, these information will be stored back to the log_progressing_history for next time run. """ with util.lock('log_progressing', timeout=60, blocking=False) as lock: if not lock: logging.error( 'failed to acquire lock to calculate installation progress' ) return logging.info('update installing progress') user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) hosts = host_api.list_hosts(user=user) host_mapping = {} for host in hosts: if 'id' not in host: logging.error('id is not in host %s', host) continue host_id = host['id'] if 'os_name' not in host: logging.error('os_name is not in host %s', host) continue if 'os_installer' not in host: logging.error('os_installer is not in host %s', host) continue host_dirname = setting.HOST_INSTALLATION_LOGDIR_NAME if host_dirname not in host: logging.error( '%s is not in host %s', host_dirname, host ) continue host_state = host_api.get_host_state(host_id, user=user) if 'state' not in host_state: logging.error('state is not in host state %s', host_state) continue if host_state['state'] == 'INSTALLING': host_log_histories = host_api.get_host_log_histories( host_id, user=user ) host_log_history_mapping = {} for host_log_history in host_log_histories: if 'filename' not in host_log_history: logging.error( 'filename is not in host log history %s', host_log_history ) continue host_log_history_mapping[ host_log_history['filename'] ] = host_log_history host_mapping[host_id] = ( host, host_state, host_log_history_mapping ) else: logging.info( 'ignore host state %s since it is not in installing', host_state ) adapters = adapter_api.list_adapters(user=user) adapter_mapping = {} for adapter in adapters: if 'id' not in adapter: logging.error( 'id not in adapter %s', adapter ) continue if 'package_installer' not in adapter: logging.info( 'package_installer not in adapter %s', adapter ) continue adapter_id = adapter['id'] adapter_mapping[adapter_id] = adapter clusters = cluster_api.list_clusters(user=user) cluster_mapping = {} for cluster in clusters: if 'id' not in cluster: logging.error('id not in cluster %s', cluster) continue cluster_id = cluster['id'] if 'adapter_id' not in cluster: logging.error( 'adapter_id not in cluster %s', cluster ) continue cluster_state = cluster_api.get_cluster_state( cluster_id, user=user ) if 'state' not in cluster_state: logging.error('state not in cluster state %s', cluster_state) continue cluster_mapping[cluster_id] = (cluster, cluster_state) clusterhosts = cluster_api.list_clusterhosts(user=user) clusterhost_mapping = {} for clusterhost in clusterhosts: if 'clusterhost_id' not in clusterhost: logging.error( 'clusterhost_id not in clusterhost %s', clusterhost ) continue clusterhost_id = clusterhost['clusterhost_id'] if 'cluster_id' not in clusterhost: logging.error( 'cluster_id not in clusterhost %s', clusterhost ) continue cluster_id = clusterhost['cluster_id'] if cluster_id not in cluster_mapping: logging.info( 'ignore clusterhost %s ' 'since the cluster_id ' 'is not in cluster_mapping %s', clusterhost, cluster_mapping ) continue cluster, _ = cluster_mapping[cluster_id] if 'flavor_name' not in cluster: logging.error( 'flavor_name is not in clusterhost %s related cluster', clusterhost ) continue clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME if clusterhost_dirname not in clusterhost: logging.error( '%s is not in clusterhost %s', clusterhost_dirname, clusterhost ) continue adapter_id = cluster['adapter_id'] if adapter_id not in adapter_mapping: logging.info( 'ignore clusterhost %s ' 'since the adapter_id %s ' 'is not in adaper_mapping %s', clusterhost, adapter_id, adapter_mapping ) continue adapter = adapter_mapping[adapter_id] if 'package_installer' not in adapter: logging.info( 'ignore clusterhost %s ' 'since the package_installer is not define ' 'in adapter %s', clusterhost, adapter ) continue package_installer = adapter['package_installer'] clusterhost['package_installer'] = package_installer clusterhost['adapter_name'] = adapter['name'] clusterhost_state = cluster_api.get_clusterhost_self_state( clusterhost_id, user=user ) if 'state' not in clusterhost_state: logging.error( 'state not in clusterhost_state %s', clusterhost_state ) continue if clusterhost_state['state'] == 'INSTALLING': clusterhost_log_histories = ( cluster_api.get_clusterhost_log_histories( clusterhost_id, user=user ) ) clusterhost_log_history_mapping = {} for clusterhost_log_history in clusterhost_log_histories: if 'filename' not in clusterhost_log_history: logging.error( 'filename not in clusterhost_log_history %s', clusterhost_log_history ) continue clusterhost_log_history_mapping[ clusterhost_log_history['filename'] ] = clusterhost_log_history clusterhost_mapping[clusterhost_id] = ( clusterhost, clusterhost_state, clusterhost_log_history_mapping ) else: logging.info( 'ignore clusterhost state %s ' 'since it is not in installing', clusterhost_state ) progress_calculator.update_host_progress( host_mapping) for host_id, (host, host_state, host_log_history_mapping) in ( host_mapping.items() ): host_api.update_host_state( host_id, user=user, percentage=host_state.get('percentage', 0), message=host_state.get('message', ''), severity=host_state.get('severity', 'INFO') ) for filename, host_log_history in ( host_log_history_mapping.items() ): host_api.add_host_log_history( host_id, filename=filename, user=user, position=host_log_history.get('position', 0), percentage=host_log_history.get('percentage', 0), partial_line=host_log_history.get('partial_line', ''), message=host_log_history.get('message', ''), severity=host_log_history.get('severity', 'INFO'), line_matcher_name=host_log_history.get( 'line_matcher_name', 'start' ) ) progress_calculator.update_clusterhost_progress( clusterhost_mapping) for ( clusterhost_id, (clusterhost, clusterhost_state, clusterhost_log_history_mapping) ) in ( clusterhost_mapping.items() ): cluster_api.update_clusterhost_state( clusterhost_id, user=user, percentage=clusterhost_state.get('percentage', 0), message=clusterhost_state.get('message', ''), severity=clusterhost_state.get('severity', 'INFO') ) for filename, clusterhost_log_history in ( clusterhost_log_history_mapping.items() ): cluster_api.add_clusterhost_log_history( clusterhost_id, user=user, filename=filename, position=clusterhost_log_history.get('position', 0), percentage=clusterhost_log_history.get('percentage', 0), partial_line=clusterhost_log_history.get( 'partial_line', ''), message=clusterhost_log_history.get('message', ''), severity=clusterhost_log_history.get('severity', 'INFO'), line_matcher_name=( clusterhost_log_history.get( 'line_matcher_name', 'start' ) ) ) progress_calculator.update_cluster_progress( cluster_mapping) for cluster_id, (cluster, cluster_state) in cluster_mapping.items(): cluster_api.update_cluster_state( cluster_id, user=user )
def _prepare_database(self): adapter.load_adapters() metadata.load_metadatas() self.user_object = ( user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL ) ) self.adapter_id = None self.os_id = None self.flavor_id = None self.cluster_id = None # get adapter information list_adapters = adapter.list_adapters(user=self.user_object) for adptr in list_adapters: self.adapter_id = None if adptr['name'] != ADAPTER_NAME: continue self.adapter_id = adptr['id'] self.os_id = None for supported_os in adptr['supported_oses']: if supported_os['name'] == OS_NAME: self.os_id = supported_os['os_id'] break if not self.os_id: continue if ( 'package_installer' in adptr.keys() and adptr['flavors'] != [] and adptr['distributed_system_name'] == 'openstack' ): self.flavor_id = None for flavor in adptr['flavors']: if flavor['name'] == 'allinone': self.flavor_id = flavor['id'] break if not self.flavor_id: continue else: continue if self.adapter_id and self.os_id and self.flavor_id: break if not self.adapter_id: raise Exception('adapter id not found') if not self.os_id: raise Exception('os id not found') if not self.flavor_id: raise Exception('flavor id not found') # add cluster cluster.add_cluster( adapter_id=self.adapter_id, os_id=self.os_id, flavor_id=self.flavor_id, name='test_cluster', user=self.user_object, ) list_clusters = cluster.list_clusters(user=self.user_object) for list_cluster in list_clusters: if list_cluster['name'] == 'test_cluster': self.cluster_id = list_cluster['id'] break for list_cluster in list_clusters: self.cluster_id = list_cluster['id'] # add switch switch.add_switch( ip=SWITCH_IP, user=self.user_object, ) list_switches = switch.list_switches(user=self.user_object) for list_switch in list_switches: self.switch_id = list_switch['id'] switch.add_switch_machine( self.switch_id, user=self.user_object, mac=MACHINE_MAC, port='1' ) # get machine information list_machines = machine.list_machines(user=self.user_object) for list_machine in list_machines: self.machine_id = list_machine['id'] # add cluster host cluster.add_cluster_host( self.cluster_id, user=self.user_object, machine_id=self.machine_id, name='test_clusterhost' ) list_clusterhosts = cluster.list_clusterhosts(user=self.user_object) for list_clusterhost in list_clusterhosts: self.host_id = list_clusterhost['host_id'] self.clusterhost_id = list_clusterhost['clusterhost_id'] # add subnet network.add_subnet( subnet=SUBNET, user=self.user_object, ) list_subnets = network.list_subnets( user=self.user_object ) for list_subnet in list_subnets: self.subnet_id = list_subnet['id'] # add host network host.add_host_network( self.host_id, user=self.user_object, interface='eth0', ip=HOST_IP, subnet_id=self.subnet_id, is_mgmt=True ) # get clusterhost list_clusterhosts = cluster.list_clusterhosts( user=self.user_object ) for list_clusterhost in list_clusterhosts: self.clusterhost_id = list_clusterhost['id'] # update host state self.list_hosts = host.list_hosts(user=self.user_object) for list_host in self.list_hosts: self.host_id = list_host['id'] self.host_state = host.update_host_state( self.host_id, user=self.user_object, state='INSTALLING' ) # update cluster state cluster.update_cluster_state( self.cluster_id, user=self.user_object, state='INSTALLING' ) # update clusterhost state cluster.update_clusterhost_state( self.clusterhost_id, user=self.user_object, state='INSTALLING' )