def test_del_switch(self): switch.del_switch( 1, user=self.user_object, ) del_switch = switch.list_switches(user=self.user_object) self.assertEqual([], del_switch)
def test_del_switch(self): switch.del_switch( 1, user=self.user_object, ) del_switch = switch.list_switches( user=self.user_object ) self.assertEqual([], del_switch)
def test_list_switches(self): switch.add_switch( ip='2887583784', user=self.user_object, ) list_switches = switch.list_switches(user=self.user_object) expected = '172.29.8.40' self.assertIsNotNone(list_switches) self.assertEqual(expected, list_switches[0]['ip'])
def poll_switch(poller_email, ip_addr, credentials, req_obj='mac', oper="SCAN"): """Query switch and update switch machines. .. note:: When polling switch succeeds, for each mac it got from polling switch, A Machine record associated with the switch is added to the database. :param ip_addr: switch ip address. :type ip_addr: str :param credentials: switch crednetials. :type credentials: dict :param req_obj: the object requested to query from switch. :type req_obj: str :param oper: the operation to query the switch. :type oper: str, should be one of ['SCAN', 'GET', 'SET'] .. note:: The function should be called out of database session scope. """ poller = user_api.get_user_object(poller_email) ip_int = long(netaddr.IPAddress(ip_addr)) with util.lock('poll switch %s' % ip_addr, timeout=120) as lock: if not lock: raise Exception('failed to acquire lock to poll switch %s' % ip_addr) # TODO(grace): before repoll the switch, set the state to repolling. # and when the poll switch is timeout, set the state to error. # the frontend should only consider some main state like INTIALIZED, # ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to # indicate the switch is in learning the mac of the machines connected # to it. logging.debug('poll switch: %s', ip_addr) switch_dict, machine_dicts = _poll_switch(ip_addr, credentials, req_obj=req_obj, oper=oper) switches = switch_api.list_switches(ip_int=ip_int, user=poller) if not switches: logging.error('no switch found for %s', ip_addr) return for switch in switches: for machine_dict in machine_dicts: logging.info('add machine: %s', machine_dict) machine_dict['owner_id'] = poller.id switch_api.add_switch_machine(switch['id'], False, user=poller, **machine_dict) switch_api.update_switch(switch['id'], user=poller, **switch_dict)
def test_list_switches(self): switch.add_switch( self.user_object, ip='2887583784' ) list_switches = switch.list_switches( self.user_object ) self.assertIsNotNone(list_switches)
def test_list_switches_ip_int_invalid(self): switch.add_switch( ip='2887583784', user=self.user_object, ) list_switches = switch.list_switches( ip_int='test', user=self.user_object, ) self.assertEqual(list_switches, [])
def list_switches(): """List switches.""" data = _get_request_args() _filter_ip(data) return utils.make_json_response( 200, switch_api.list_switches( current_user, **data ) )
def poll_switch(poller_email, ip_addr, credentials, req_obj='mac', oper="SCAN"): """Query switch and update switch machines. .. note:: When polling switch succeeds, for each mac it got from polling switch, A Machine record associated with the switch is added to the database. :param ip_addr: switch ip address. :type ip_addr: str :param credentials: switch crednetials. :type credentials: dict :param req_obj: the object requested to query from switch. :type req_obj: str :param oper: the operation to query the switch. :type oper: str, should be one of ['SCAN', 'GET', 'SET'] .. note:: The function should be called out of database session scope. """ poller = user_api.get_user_object(poller_email) ip_int = long(netaddr.IPAddress(ip_addr)) with util.lock('poll switch %s' % ip_addr, timeout=120) as lock: if not lock: raise Exception( 'failed to acquire lock to poll switch %s' % ip_addr ) # TODO(grace): before repoll the switch, set the state to repolling. # and when the poll switch is timeout, set the state to error. # the frontend should only consider some main state like INTIALIZED, # ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to # indicate the switch is in learning the mac of the machines connected # to it. logging.debug('poll switch: %s', ip_addr) switch_dict, machine_dicts = _poll_switch( ip_addr, credentials, req_obj=req_obj, oper=oper ) switches = switch_api.list_switches(ip_int=ip_int, user=poller) if not switches: logging.error('no switch found for %s', ip_addr) return for switch in switches: for machine_dict in machine_dicts: logging.debug('add machine: %s', machine_dict) switch_api.add_switch_machine( switch['id'], False, user=poller, **machine_dict ) switch_api.update_switch( switch['id'], user=poller, **switch_dict )
def test_list_switches(self): switch.add_switch( ip='2887583784', user=self.user_object, ) list_switches = switch.list_switches( user=self.user_object ) expected = '172.29.8.40' self.assertIsNotNone(list_switches) self.assertEqual(expected, list_switches[0]['ip'])
def test_list_switches_with_ip_int(self): switch.add_switch( self.user_object, ip='2887583784' ) list_switches = switch.list_switches( self.user_object, ip_int='2887583784' ) expected = '2887583784' self.assertTrue( item in expected.items() for item in list_switches[0].items() )
def poll_switch(poller_email, ip_addr, credentials, req_obj='mac', oper="SCAN"): """Query switch and update switch machines. .. note:: When polling switch succeeds, for each mac it got from polling switch, A Machine record associated with the switch is added to the database. :param ip_addr: switch ip address. :type ip_addr: str :param credentials: switch crednetials. :type credentials: dict :param req_obj: the object requested to query from switch. :type req_obj: str :param oper: the operation to query the switch. :type oper: str, should be one of ['SCAN', 'GET', 'SET'] .. note:: The function should be called out of database session scope. """ poller = user_api.get_user_object(poller_email) ip_int = long(netaddr.IPAddress(ip_addr)) with util.lock('poll switch %s' % ip_addr, timeout=120) as lock: if not lock: raise Exception( 'failed to acquire lock to poll switch %s' % ip_addr ) logging.debug('poll switch: %s', ip_addr) switch_dict, machine_dicts = _poll_switch( ip_addr, credentials, req_obj=req_obj, oper=oper ) switches = switch_api.list_switches(ip_int=ip_int, user=poller) if not switches: logging.error('no switch found for %s', ip_addr) return for switch in switches: for machine_dict in machine_dicts: logging.debug('add machine: %s', machine_dict) switch_api.add_switch_machine( switch['id'], False, user=poller, **machine_dict ) switch_api.update_switch( switch['id'], user=poller, **switch_dict )
def pollswitches(switch_ips): """poll switch.""" user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) poll_switches = [] all_switches = dict([ (switch['ip'], switch['credentials']) for switch in switch_api.list_switches(user) ]) if switch_ips: poll_switches = dict([ (switch_ip, all_switches[switch_ip]) for switch_ip in switch_ips if switch_ip in all_switches ]) else: poll_switches = all_switches if flags.OPTIONS.async: for switch_ip, switch_credentials in poll_switches.items(): celery.send_task( 'compass.tasks.pollswitch', (user.email, switch_ip, switch_credentials) ) else: try: pool = Pool(processes=flags.OPTIONS.thread_pool_size) for switch_ip, switch_credentials in poll_switches.items(): pool.apply_async( poll_switch.poll_switch, (user.email, switch_ip, switch_credentials) ) pool.close() pool.join() except Exception as error: logging.error('failed to poll switches %s', poll_switches) logging.exception(error)
def pollswitches(switch_ips): """poll switch.""" user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) poll_switches = [] all_switches = dict([ (switch['ip'], switch['credentials']) for switch in switch_api.list_switches(user=user) ]) if switch_ips: poll_switches = dict([ (switch_ip, all_switches[switch_ip]) for switch_ip in switch_ips if switch_ip in all_switches ]) else: poll_switches = all_switches if flags.OPTIONS.async: for switch_ip, switch_credentials in poll_switches.items(): celery.send_task( 'compass.tasks.pollswitch', (user.email, switch_ip, switch_credentials) ) else: try: pool = Pool(processes=flags.OPTIONS.thread_pool_size) for switch_ip, switch_credentials in poll_switches.items(): pool.apply_async( poll_switch.poll_switch, (user.email, switch_ip, switch_credentials) ) pool.close() pool.join() except Exception as error: logging.error('failed to poll switches %s', poll_switches) logging.exception(error)
def setUp(self): super(HostTestCase, self).setUp() os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_CONFIG_DIR'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'data' ) reload(setting) database.init('sqlite://') database.create_db() adapter.load_adapters(force_reload=True) metadata.load_metadatas(force_reload=True) adapter.load_flavors(force_reload=True) self.user_object = ( user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL ) ) # get adapter information list_adapters = adapter.list_adapters(user=self.user_object) for list_adapter in list_adapters: for supported_os in list_adapter['supported_oses']: self.os_id = supported_os['os_id'] break if list_adapter['flavors']: details = list_adapter['flavors'] for detail in details: if detail['display_name'] == 'allinone': roles = detail['roles'] for role in roles: self.adapter_id = role['adapter_id'] self.flavor_id = role['flavor_id'] break # add cluster cluster_names = ['test_cluster1', 'test_cluster2'] for cluster_name in cluster_names: cluster.add_cluster( user=self.user_object, adapter_id=self.adapter_id, os_id=self.os_id, flavor_id=self.flavor_id, name=cluster_name ) clusters = cluster.list_clusters(user=self.user_object) self.roles = None for list_cluster in clusters: for item in list_cluster['flavor']['roles']: self.roles = item if list_cluster['name'] == 'test_cluster1': self.cluster_id = list_cluster['id'] break # add switch switch.add_switch( user=self.user_object, ip='172.29.8.40' ) switches = switch.list_switches(user=self.user_object) self.switch_id = None for item in switches: self.switch_id = item['id'] macs = ['28:6e:d4:46:c4:25', '00:0c:29:bf:eb:1d'] for mac in macs: switch.add_switch_machine( self.switch_id, user=self.user_object, mac=mac, port='1' ) # get machine information machines = machine.list_machines(user=self.user_object) self.machine_ids = [] for item in machines: self.machine_ids.append(item['id']) # add cluster host name = ['newname1', 'newname2'] for i in range(0, 2): cluster.add_cluster_host( self.cluster_id, user=self.user_object, machine_id=self.machine_ids[i], name=name[i] ) self.host_ids = [] clusterhosts = cluster.list_clusterhosts(user=self.user_object) for clusterhost in clusterhosts: self.host_ids.append(clusterhost['host_id']) # add subnet subnets = ['10.145.88.0/23', '192.168.100.0/23'] for subnet in subnets: network.add_subnet( user=self.user_object, subnet=subnet ) list_subnet = network.list_subnets( user=self.user_object ) self.subnet_ids = [] for item in list_subnet: self.subnet_ids.append(item['id']) # add host network host.add_host_network( self.host_ids[0], user=self.user_object, interface='eth0', ip='10.145.88.0', subnet_id=self.subnet_ids[0], is_mgmt=True ) host.add_host_network( self.host_ids[1], user=self.user_object, interface='eth1', ip='192.168.100.0', subnet_id=self.subnet_ids[1], is_promiscuous=True ) # add log history filenames = ['log1', 'log2'] for filename in filenames: host.add_host_log_history( self.host_ids[0], user=self.user_object, filename=filename ) self.os_configs = { 'general': { 'language': 'EN', 'timezone': 'UTC', 'http_proxy': 'http://127.0.0.1:3128', 'https_proxy': 'http://127.0.0.1:3128', 'no_proxy': [ '127.0.0.1', 'compass' ], 'ntp_server': '127.0.0.1', 'dns_servers': [ '127.0.0.1' ], 'domain': 'ods.com', 'search_path': [ 'ods.com' ], 'default_gateway': '127.0.0.1', }, 'server_credentials': { 'username': '******', 'password': '******', }, 'partition': { '/var': { 'max_size': '100G', 'percentage': 10, 'size': '1G' } } } self.package_configs = { 'security': { 'service_credentials': { '$service': { 'username': '******', 'password': '******' } }, 'console_credentials': { '$console': { 'username': '******', 'password': '******' } } }, 'network_mapping': { '$interface_type': 'eth0' } }
def _prepare_database(self): adapter.load_adapters() metadata.load_metadatas() self.user_object = ( user_api.get_user_object( setting.COMPASS_ADMIN_EMAIL ) ) self.adapter_id = None self.os_id = None self.flavor_id = None self.cluster_id = None # get adapter information list_adapters = adapter.list_adapters(user=self.user_object) for adptr in list_adapters: self.adapter_id = None if adptr['name'] != ADAPTER_NAME: continue self.adapter_id = adptr['id'] self.os_id = None for supported_os in adptr['supported_oses']: if supported_os['name'] == OS_NAME: self.os_id = supported_os['os_id'] break if not self.os_id: continue if ( 'package_installer' in adptr.keys() and adptr['flavors'] != [] and adptr['distributed_system_name'] == 'openstack' ): self.flavor_id = None for flavor in adptr['flavors']: if flavor['name'] == 'allinone': self.flavor_id = flavor['id'] break if not self.flavor_id: continue else: continue if self.adapter_id and self.os_id and self.flavor_id: break if not self.adapter_id: raise Exception('adapter id not found') if not self.os_id: raise Exception('os id not found') if not self.flavor_id: raise Exception('flavor id not found') # add cluster cluster.add_cluster( adapter_id=self.adapter_id, os_id=self.os_id, flavor_id=self.flavor_id, name='test_cluster', user=self.user_object, ) list_clusters = cluster.list_clusters(user=self.user_object) for list_cluster in list_clusters: if list_cluster['name'] == 'test_cluster': self.cluster_id = list_cluster['id'] break for list_cluster in list_clusters: self.cluster_id = list_cluster['id'] # add switch switch.add_switch( ip=SWITCH_IP, user=self.user_object, ) list_switches = switch.list_switches(user=self.user_object) for list_switch in list_switches: self.switch_id = list_switch['id'] switch.add_switch_machine( self.switch_id, user=self.user_object, mac=MACHINE_MAC, port='1' ) # get machine information list_machines = machine.list_machines(user=self.user_object) for list_machine in list_machines: self.machine_id = list_machine['id'] # add cluster host cluster.add_cluster_host( self.cluster_id, user=self.user_object, machine_id=self.machine_id, name='test_clusterhost' ) list_clusterhosts = cluster.list_clusterhosts(user=self.user_object) for list_clusterhost in list_clusterhosts: self.host_id = list_clusterhost['host_id'] self.clusterhost_id = list_clusterhost['clusterhost_id'] # add subnet network.add_subnet( subnet=SUBNET, user=self.user_object, ) list_subnets = network.list_subnets( user=self.user_object ) for list_subnet in list_subnets: self.subnet_id = list_subnet['id'] # add host network host.add_host_network( self.host_id, user=self.user_object, interface='eth0', ip=HOST_IP, subnet_id=self.subnet_id, is_mgmt=True ) # get clusterhost list_clusterhosts = cluster.list_clusterhosts( user=self.user_object ) for list_clusterhost in list_clusterhosts: self.clusterhost_id = list_clusterhost['id'] # update host state self.list_hosts = host.list_hosts(user=self.user_object) for list_host in self.list_hosts: self.host_id = list_host['id'] self.host_state = host.update_host_state( self.host_id, user=self.user_object, state='INSTALLING' ) # update cluster state cluster.update_cluster_state( self.cluster_id, user=self.user_object, state='INSTALLING' ) # update clusterhost state cluster.update_clusterhost_state( self.clusterhost_id, user=self.user_object, state='INSTALLING' )