def __init__(self, args): self._homepage_links = [] self._args = args self.service_config = args.service_config self.cassandra_config = args.cassandra_config self._debug = { 'hb_stray': 0, 'msg_pubs': 0, 'msg_subs': 0, 'msg_query': 0, 'msg_hbt': 0, 'ttl_short': 0, 'policy_rr': 0, 'policy_lb': 0, 'policy_fi': 0, 'db_upd_hb': 0, 'throttle_subs': 0, '503': 0, 'count_lb': 0, } self._ts_use = 1 self.short_ttl_map = {} self._sem = BoundedSemaphore(1) self._base_url = "http://%s:%s" % (self._args.listen_ip_addr, self._args.listen_port) self._pipe_start_app = None bottle.route('/', 'GET', self.homepage_http_get) # heartbeat bottle.route('/heartbeat', 'POST', self.api_heartbeat) # publish service bottle.route('/publish', 'POST', self.api_publish) self._homepage_links.append( LinkObject('action', self._base_url, '/publish', 'publish service')) bottle.route('/publish/<end_point>', 'POST', self.api_publish) # subscribe service bottle.route('/subscribe', 'POST', self.api_subscribe) self._homepage_links.append( LinkObject('action', self._base_url, '/subscribe', 'subscribe service')) # query service bottle.route('/query', 'POST', self.api_query) self._homepage_links.append( LinkObject('action', self._base_url, '/query', 'query service')) # collection - services bottle.route('/services', 'GET', self.show_all_services) self._homepage_links.append( LinkObject('action', self._base_url, '/services', 'show published services')) bottle.route('/services.json', 'GET', self.services_json) self._homepage_links.append( LinkObject('action', self._base_url, '/services.json', 'List published services in JSON format')) # show a specific service type bottle.route('/services/<service_type>', 'GET', self.show_all_services) # api to perform on-demand load-balance across available publishers bottle.route('/load-balance/<service_type>', 'POST', self.api_lb_service) # update service bottle.route('/service/<id>', 'PUT', self.service_http_put) # get service info bottle.route('/service/<id>', 'GET', self.service_http_get) bottle.route('/service/<id>/brief', 'GET', self.service_brief_http_get) # delete (un-publish) service bottle.route('/service/<id>', 'DELETE', self.service_http_delete) # collection - clients bottle.route('/clients', 'GET', self.show_all_clients) bottle.route('/clients/<service_type>/<service_id>', 'GET', self.show_all_clients) self._homepage_links.append( LinkObject('action', self._base_url, '/clients', 'list all subscribers')) bottle.route('/clients.json', 'GET', self.clients_json) self._homepage_links.append( LinkObject('action', self._base_url, '/clients.json', 'list all subscribers in JSON format')) # show config bottle.route('/config', 'GET', self.show_config) self._homepage_links.append( LinkObject('action', self._base_url, '/config', 'show discovery service config')) # show debug bottle.route('/stats', 'GET', self.show_stats) self._homepage_links.append( LinkObject('action', self._base_url, '/stats', 'show discovery service stats')) # cleanup bottle.route('/cleanup', 'GET', self.cleanup_http_get) self._homepage_links.append( LinkObject('action', self._base_url, '/cleanup', 'Purge inactive publishers')) if not self._pipe_start_app: self._pipe_start_app = bottle.app() # sandesh init self._sandesh = Sandesh() if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( \ self._args.sandesh_send_rate_limit) module = Module.DISCOVERY_SERVICE module_name = ModuleNames[module] node_type = Module2NodeType[module] node_type_name = NodeTypeNames[node_type] instance_id = self._args.worker_id disc_client = discovery_client.DiscoveryClient( '127.0.0.1', self._args.listen_port, ModuleNames[Module.DISCOVERY_SERVICE]) self._sandesh.init_generator( module_name, socket.gethostname(), node_type_name, instance_id, self._args.collectors, 'discovery_context', int(self._args.http_server_port), ['sandesh'], disc_client, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) self._sandesh.set_logging_params(enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, enable_syslog=args.use_syslog, file=self._args.log_file) self._sandesh.trace_buffer_create(name="dsHeartBeatTraceBuf", size=1000) # DB interface initialization self._db_connect(self._args.reset_config) self._db_conn.db_update_service_entry_oper_state() # build in-memory subscriber data self._sub_data = {} for (client_id, service_type) in self._db_conn.subscriber_entries(): self.create_sub_data(client_id, service_type)
def __init__(self, args=None): self._args = args # Initialize discovery client self._disc = None if self._args.disc_server_ip and self._args.disc_server_port: self._disc = client.DiscoveryClient( self._args.disc_server_ip, self._args.disc_server_port, ModuleNames[Module.DEVICE_MANAGER]) PushConfigState.set_repush_interval(int(self._args.repush_interval)) PushConfigState.set_repush_max_interval( int(self._args.repush_max_interval)) PushConfigState.set_push_delay_per_kb( float(self._args.push_delay_per_kb)) PushConfigState.set_push_delay_max(int(self._args.push_delay_max)) PushConfigState.set_push_delay_enable( bool(self._args.push_delay_enable)) self._sandesh = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) module = Module.DEVICE_MANAGER module_name = ModuleNames[module] node_type = Module2NodeType[module] node_type_name = NodeTypeNames[node_type] instance_id = INSTANCE_ID_DEFAULT hostname = socket.gethostname() self._sandesh.init_generator(module_name, hostname, node_type_name, instance_id, self._args.collectors, 'to_bgp_context', int(args.http_server_port), ['cfgm_common', 'device_manager.sandesh'], self._disc) self._sandesh.set_logging_params(enable_local_log=args.log_local, category=args.log_category, level=args.log_level, file=args.log_file, enable_syslog=args.use_syslog, syslog_facility=args.syslog_facility) PhysicalRouterDM._sandesh = self._sandesh ConnectionState.init( self._sandesh, hostname, module_name, instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus) # Retry till API server is up connected = False self.connection_state_update(ConnectionStatus.INIT) while not connected: try: self._vnc_lib = VncApi( args.admin_user, args.admin_password, args.admin_tenant_name, args.api_server_ip, args.api_server_port, api_server_use_ssl=args.api_server_use_ssl) connected = True self.connection_state_update(ConnectionStatus.UP) except requests.exceptions.ConnectionError as e: # Update connection info self.connection_state_update(ConnectionStatus.DOWN, str(e)) time.sleep(3) except ResourceExhaustionError: # haproxy throws 503 time.sleep(3) rabbit_servers = self._args.rabbit_server rabbit_port = self._args.rabbit_port rabbit_user = self._args.rabbit_user rabbit_password = self._args.rabbit_password rabbit_vhost = self._args.rabbit_vhost rabbit_ha_mode = self._args.rabbit_ha_mode self._db_resync_done = gevent.event.Event() q_name = 'device_manager.%s' % (socket.gethostname()) self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port, rabbit_user, rabbit_password, rabbit_vhost, rabbit_ha_mode, q_name, self._vnc_subscribe_callback, self.config_log) self._cassandra = DMCassandraDB.getInstance(self, _zookeeper_client) DBBaseDM.init(self, self._sandesh.logger(), self._cassandra) for obj in GlobalSystemConfigDM.list_obj(): GlobalSystemConfigDM.locate(obj['uuid'], obj) for obj in GlobalVRouterConfigDM.list_obj(): GlobalVRouterConfigDM.locate(obj['uuid'], obj) for obj in VirtualNetworkDM.list_obj(): vn = VirtualNetworkDM.locate(obj['uuid'], obj) if vn is not None and vn.routing_instances is not None: for ri_id in vn.routing_instances: ri_obj = RoutingInstanceDM.locate(ri_id) for obj in BgpRouterDM.list_obj(): BgpRouterDM.locate(obj['uuid'], obj) pr_obj_list = PhysicalRouterDM.list_obj() pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list]) self._cassandra.handle_pr_deletes(pr_uuid_set) for obj in PortTupleDM.list_obj(): PortTupleDM.locate(obj['uuid'], obj) for obj in pr_obj_list: pr = PhysicalRouterDM.locate(obj['uuid'], obj) li_set = pr.logical_interfaces vmi_set = set() for pi_id in pr.physical_interfaces: pi = PhysicalInterfaceDM.locate(pi_id) if pi: li_set |= pi.logical_interfaces vmi_set |= pi.virtual_machine_interfaces for li_id in li_set: li = LogicalInterfaceDM.locate(li_id) if li and li.virtual_machine_interface: vmi_set |= set([li.virtual_machine_interface]) for vmi_id in vmi_set: vmi = VirtualMachineInterfaceDM.locate(vmi_id) si_obj_list = ServiceInstanceDM.list_obj() si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list]) self._cassandra.handle_pnf_resource_deletes(si_uuid_set) for obj in si_obj_list: ServiceInstanceDM.locate(obj['uuid'], obj) for obj in InstanceIpDM.list_obj(): InstanceIpDM.locate(obj['uuid'], obj) for obj in FloatingIpDM.list_obj(): FloatingIpDM.locate(obj['uuid'], obj) for vn in VirtualNetworkDM.values(): vn.update_instance_ip_map() for pr in PhysicalRouterDM.values(): pr.set_config_state() self._db_resync_done.set() gevent.joinall(self._vnc_kombu.greenlets())
def __init__(self, dm_logger=None, args=None): self._args = args PushConfigState.set_repush_interval(int(self._args.repush_interval)) PushConfigState.set_repush_max_interval( int(self._args.repush_max_interval)) PushConfigState.set_push_delay_per_kb( float(self._args.push_delay_per_kb)) PushConfigState.set_push_delay_max(int(self._args.push_delay_max)) PushConfigState.set_push_delay_enable( bool(self._args.push_delay_enable)) if dm_logger is not None: self.logger = dm_logger else: # Initialize discovery client discovery_client = None if self._args.disc_server_ip and self._args.disc_server_port: dss_kwargs = {} if self._args.disc_server_ssl: if self._args.disc_server_cert: dss_kwargs.update( {'cert': self._args.disc_server_cert}) if self._args.disc_server_key: dss_kwargs.update({'key': self._args.disc_server_key}) if self._args.disc_server_cacert: dss_kwargs.update( {'cacert': self._args.disc_server_cacert}) discovery_client = client.DiscoveryClient( self._args.disc_server_ip, self._args.disc_server_port, ModuleNames[Module.DEVICE_MANAGER], **dss_kwargs) # Initialize logger module = Module.DEVICE_MANAGER module_pkg = "device_manager" self.logger = ConfigServiceLogger(discovery_client, module, module_pkg, args) # Retry till API server is up connected = False self.connection_state_update(ConnectionStatus.INIT) while not connected: try: self._vnc_lib = VncApi( args.admin_user, args.admin_password, args.admin_tenant_name, args.api_server_ip, args.api_server_port, api_server_use_ssl=args.api_server_use_ssl) connected = True self.connection_state_update(ConnectionStatus.UP) except requests.exceptions.ConnectionError as e: # Update connection info self.connection_state_update(ConnectionStatus.DOWN, str(e)) time.sleep(3) except ResourceExhaustionError: # haproxy throws 503 time.sleep(3) # Initialize amqp self._vnc_amqp = DMAmqpHandle(self.logger, self.REACTION_MAP, self._args) self._vnc_amqp.establish() # Initialize cassandra self._cassandra = DMCassandraDB.getInstance(self, _zookeeper_client) DBBaseDM.init(self, self.logger, self._cassandra) DBBaseDM._sandesh = self.logger._sandesh for obj in GlobalSystemConfigDM.list_obj(): GlobalSystemConfigDM.locate(obj['uuid'], obj) for obj in GlobalVRouterConfigDM.list_obj(): GlobalVRouterConfigDM.locate(obj['uuid'], obj) for obj in VirtualNetworkDM.list_obj(): vn = VirtualNetworkDM.locate(obj['uuid'], obj) if vn is not None and vn.routing_instances is not None: for ri_id in vn.routing_instances: ri_obj = RoutingInstanceDM.locate(ri_id) for obj in BgpRouterDM.list_obj(): BgpRouterDM.locate(obj['uuid'], obj) pr_obj_list = PhysicalRouterDM.list_obj() pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list]) self._cassandra.handle_pr_deletes(pr_uuid_set) for obj in PortTupleDM.list_obj(): PortTupleDM.locate(obj['uuid'], obj) for obj in pr_obj_list: pr = PhysicalRouterDM.locate(obj['uuid'], obj) li_set = pr.logical_interfaces vmi_set = set() for pi_id in pr.physical_interfaces: pi = PhysicalInterfaceDM.locate(pi_id) if pi: li_set |= pi.logical_interfaces vmi_set |= pi.virtual_machine_interfaces for li_id in li_set: li = LogicalInterfaceDM.locate(li_id) if li and li.virtual_machine_interface: vmi_set |= set([li.virtual_machine_interface]) for vmi_id in vmi_set: vmi = VirtualMachineInterfaceDM.locate(vmi_id) si_obj_list = ServiceInstanceDM.list_obj() si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list]) self._cassandra.handle_pnf_resource_deletes(si_uuid_set) for obj in si_obj_list: ServiceInstanceDM.locate(obj['uuid'], obj) for obj in InstanceIpDM.list_obj(): InstanceIpDM.locate(obj['uuid'], obj) for obj in FloatingIpDM.list_obj(): FloatingIpDM.locate(obj['uuid'], obj) for vn in VirtualNetworkDM.values(): vn.update_instance_ip_map() for pr in PhysicalRouterDM.values(): pr.set_config_state() self._vnc_amqp._db_resync_done.set() try: gevent.joinall(self._vnc_amqp._vnc_kombu.greenlets()) except KeyboardInterrupt: self._vnc_amqp.close() raise
def __init__(self, args=None): self._args = args self._fabric_rt_inst_obj = None # Initialize discovery client self._disc = None if self._args.disc_server_ip and self._args.disc_server_port: self._disc = client.DiscoveryClient( self._args.disc_server_ip, self._args.disc_server_port, ModuleNames[Module.SCHEMA_TRANSFORMER]) self._sandesh = Sandesh() # Reset the sandesh send rate limit value if args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( args.sandesh_send_rate_limit) sandesh.VnList.handle_request = self.sandesh_vn_handle_request sandesh.RoutintInstanceList.handle_request = \ self.sandesh_ri_handle_request sandesh.ServiceChainList.handle_request = \ self.sandesh_sc_handle_request sandesh.StObjectReq.handle_request = \ self.sandesh_st_object_handle_request module = Module.SCHEMA_TRANSFORMER module_name = ModuleNames[module] node_type = Module2NodeType[module] node_type_name = NodeTypeNames[node_type] self.table = "ObjectConfigNode" instance_id = INSTANCE_ID_DEFAULT hostname = socket.gethostname() self._sandesh.init_generator( module_name, hostname, node_type_name, instance_id, self._args.collectors, 'to_bgp_context', int(args.http_server_port), ['cfgm_common', 'schema_transformer.sandesh'], self._disc, logger_class=args.logger_class, logger_config_file=args.logging_conf) self._sandesh.set_logging_params(enable_local_log=args.log_local, category=args.log_category, level=args.log_level, file=args.log_file, enable_syslog=args.use_syslog, syslog_facility=args.syslog_facility) ConnectionState.init( self._sandesh, hostname, module_name, instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.table) self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf", size=1000) rabbit_servers = self._args.rabbit_server rabbit_port = self._args.rabbit_port rabbit_user = self._args.rabbit_user rabbit_password = self._args.rabbit_password rabbit_vhost = self._args.rabbit_vhost rabbit_ha_mode = self._args.rabbit_ha_mode self._db_resync_done = gevent.event.Event() q_name = 'schema_transformer.%s' % (socket.gethostname()) self._vnc_kombu = VncKombuClient( rabbit_servers, rabbit_port, rabbit_user, rabbit_password, rabbit_vhost, rabbit_ha_mode, q_name, self._vnc_subscribe_callback, self.config_log, rabbit_use_ssl=self._args.rabbit_use_ssl, kombu_ssl_version=self._args.kombu_ssl_version, kombu_ssl_keyfile=self._args.kombu_ssl_keyfile, kombu_ssl_certfile=self._args.kombu_ssl_certfile, kombu_ssl_ca_certs=self._args.kombu_ssl_ca_certs) try: self._cassandra = SchemaTransformerDB(self, _zookeeper_client) DBBaseST.init(self, self._sandesh.logger(), self._cassandra) DBBaseST._sandesh = self._sandesh DBBaseST._vnc_lib = _vnc_lib ServiceChain.init() self.reinit() # create cpu_info object to send periodic updates sysinfo_req = False cpu_info = vnc_cpu_info.CpuInfo(module_name, instance_id, sysinfo_req, self._sandesh, 60) self._cpu_info = cpu_info self._db_resync_done.set() except Exception as e: # If any of the above tasks like CassandraDB read fails, cleanup # the RMQ constructs created earlier and then give up. self._vnc_kombu.shutdown() raise e
def get_discovery_client(self): _disc = client.DiscoveryClient(self.discovery_server, self.discovery_port, self.module_id) return _disc
def get_discovery_client(self): _disc = client.DiscoveryClient(self.discovery_server, self.discovery_port, self.type_info._module_name) return _disc
def parse(self): ''' command line example contrail-snmp-scanner --log_level SYS_DEBUG --logging_level DEBUG --log_category test --log_file <stdout> --use_syslog --syslog_facility LOG_USER --disc_server_ip 127.0.0.1 --disc_server_port 5998 --conf_file /etc/contrail/contrail-snmp-scanner.conf conf file example: [DEFAULTS] log_local = 0 log_level = SYS_DEBUG log_category = log_file = /var/log/contrail/contrail-analytics-api.log file = /etc/contrail/snmp-dev.ini /etc/contrail/snmp-dev.ini example: #snmp version 1 or 2 [1.1.1.190] Community = public Version = 2 #snmp version 3 [1.1.1.191] Version = 3 SecLevel = authPriv AuthProto = SHA AuthPass = foo PrivProto = AES PrivPass = foo SecName = snmpuser # Mibs default to all, to get a subset Mibs = LldpTable, ArpTable ''' # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) kwargs = {'help': "Specify config file", 'metavar':"FILE", 'action':'append'} if os.path.exists(self.CONF_DEFAULT_PATH): kwargs['default'] = [self.CONF_DEFAULT_PATH] conf_parser.add_argument("-c", "--conf_file", **kwargs) args, remaining_argv = conf_parser.parse_known_args(self._argv.split()) defaults = { 'collectors' : ['127.0.0.1:8086'], 'log_local' : False, 'log_level' : SandeshLevel.SYS_DEBUG, 'log_category' : '', 'log_file' : Sandesh._DEFAULT_LOG_FILE, 'use_syslog' : False, 'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY, 'scan_frequency' : 600, 'fast_scan_frequency' : 60, 'http_server_port' : 5920, 'zookeeper' : '127.0.0.1:2181', } ksopts = { 'auth_host': '127.0.0.1', 'auth_protocol': 'http', 'auth_port': 35357, 'admin_user': '******', 'admin_password': '******', 'admin_tenant_name': 'default-domain' } disc_opts = { 'disc_server_ip' : '127.0.0.1', 'disc_server_port' : 5998, } config = None if args.conf_file: config = ConfigParser.SafeConfigParser() config.optionxform = str config.read(args.conf_file) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'DISCOVERY' in config.sections(): disc_opts.update(dict(config.items('DISCOVERY'))) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(ksopts) defaults.update(disc_opts) parser.set_defaults(**defaults) parser.add_argument("--collectors", help="List of Collector IP addresses in ip:port format", nargs="+") parser.add_argument( "--log_file", help="Filename for the logs to be written to") parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--scan_frequency", type=int, help="Time between snmp full poll") parser.add_argument("--fast_scan_frequency", type=int, help="Time between snmp interface status poll") parser.add_argument("--http_server_port", type=int, help="introspect server port") parser.add_argument("--auth_host", help="ip of keystone server") parser.add_argument("--auth_protocol", help="keystone authentication protocol") parser.add_argument("--auth_port", type=int, help="ip of keystone server") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--zookeeper", help="ip:port of zookeeper server") parser.add_argument("--disc_server_ip", help="Discovery Server IP address") parser.add_argument("--disc_server_port", type=int, help="Discovery Server port") group = parser.add_mutually_exclusive_group(required=False) group.add_argument("--device-config-file", help="where to look for snmp credentials") group.add_argument("--api_server", help="ip:port of api-server for snmp credentials") self._args = parser.parse_args(remaining_argv) if type(self._args.collectors) is str: self._args.collectors = self._args.collectors.split() self._args.config_sections = config self._disc = client.DiscoveryClient(*self.discovery_params())
def test_bug_1549243(self): puburl = '/publish' suburl = "/subscribe" service_type = 'pulkit-pub' subscriber_type = "pulkit-sub" dsa = DiscoveryServiceAssignment() rule_entry = build_dsa_rule_entry('77.77.2.0/24,%s 77.77.2.0/24,%s' % (service_type, subscriber_type)) rule_uuid = uuid.uuid4() dsa_rule1 = DsaRule(name=str(rule_uuid), parent_obj=dsa, dsa_rule_entry=rule_entry) dsa_rule1.set_uuid(str(rule_uuid)) self._vnc_lib.dsa_rule_create(dsa_rule1) # publish 3 instances pub_tasks = [] client_type = 'test-discovery' for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]: pub_id = 'test_discovery-%s' % ipaddr pub_data = {service_type: '%s-%s' % (service_type, ipaddr)} disc = client.DiscoveryClient(self._disc_server_ip, self._disc_server_port, client_type, pub_id) disc.set_remote_addr(ipaddr) task = disc.publish(service_type, pub_data) pub_tasks.append(task) time.sleep(1) # Verify all services are published. (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 3) service_count = 2 sub_tasks = [] for remote, count in [("77.77.3.11", 6), ("77.77.2.11", 4)]: for i in range(count): subscriber_id = "client-%s-%d" % (remote, i) disc = client.DiscoveryClient(self._disc_server_ip, self._disc_server_port, subscriber_type, pub_id=subscriber_id) disc.set_remote_addr(remote) obj = disc.subscribe(service_type, service_count, info_callback, subscriber_id) sub_tasks.append(obj.task) time.sleep(1) print 'Started tasks to subscribe service %s, count %d' \ % (service_type, service_count) # validate all clients have subscribed time.sleep(1) (code, msg) = self._http_get('/clients.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 6 * 2 + 4) # verify service assignment is 4,4,8 expected_in_use_counts = { 'test_discovery-77.77.1.10': 4, 'test_discovery-77.77.2.10': 8, 'test_discovery-77.77.3.10': 4, } (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 3) success = validate_in_use_count( response, expected_in_use_counts, 'In-use count after initial subscribe') self.assertEqual(success, True) # validate assignment remains same after resubscribe time.sleep(2 * 60) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 3) success = validate_in_use_count( response, expected_in_use_counts, 'In-use count after initial subscribe') self.assertEqual(success, True)
def __init__(self, args=None): self._args = args # Initialize discovery client self._disc = None if self._args.disc_server_ip and self._args.disc_server_port: self._disc = client.DiscoveryClient( self._args.disc_server_ip, self._args.disc_server_port, ModuleNames[Module.DEVICE_MANAGER]) self._sandesh = Sandesh() module = Module.DEVICE_MANAGER module_name = ModuleNames[module] node_type = Module2NodeType[module] node_type_name = NodeTypeNames[node_type] instance_id = INSTANCE_ID_DEFAULT hostname = socket.gethostname() self._sandesh.init_generator(module_name, hostname, node_type_name, instance_id, self._args.collectors, 'to_bgp_context', int(args.http_server_port), ['cfgm_common', 'device_manager.sandesh'], self._disc) self._sandesh.set_logging_params(enable_local_log=args.log_local, category=args.log_category, level=args.log_level, file=args.log_file, enable_syslog=args.use_syslog, syslog_facility=args.syslog_facility) ConnectionState.init( self._sandesh, hostname, module_name, instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus) # Retry till API server is up connected = False self.connection_state_update(ConnectionStatus.INIT) while not connected: try: self._vnc_lib = VncApi(args.admin_user, args.admin_password, args.admin_tenant_name, args.api_server_ip, args.api_server_port) connected = True self.connection_state_update(ConnectionStatus.UP) except requests.exceptions.ConnectionError as e: # Update connection info self.connection_state_update(ConnectionStatus.DOWN, str(e)) time.sleep(3) except ResourceExhaustionError: # haproxy throws 503 time.sleep(3) rabbit_servers = self._args.rabbit_server rabbit_port = self._args.rabbit_port rabbit_user = self._args.rabbit_user rabbit_password = self._args.rabbit_password rabbit_vhost = self._args.rabbit_vhost rabbit_ha_mode = self._args.rabbit_ha_mode self._db_resync_done = gevent.event.Event() q_name = 'device_manager.%s' % (socket.gethostname()) self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port, rabbit_user, rabbit_password, rabbit_vhost, rabbit_ha_mode, q_name, self._vnc_subscribe_callback, self.config_log) cass_server_list = self._args.cassandra_server_list reset_config = self._args.reset_config self._cassandra = VncCassandraClient(cass_server_list, reset_config, self._args.cluster_id, None, self.config_log) DBBase.init(self, self._sandesh.logger(), self._cassandra) ok, pr_list = self._cassandra._cassandra_physical_router_list() if not ok: self.config_log('physical router list returned error: %s' % pr_list) else: vn_set = set() for fq_name, uuid in pr_list: pr = PhysicalRouterDM.locate(uuid) if pr.bgp_router: BgpRouterDM.locate(pr.bgp_router) vn_set |= pr.virtual_networks li_set = pr.logical_interfaces for pi_id in pr.physical_interfaces: pi = PhysicalInterfaceDM.locate(pi_id) if pi: li_set |= pi.logical_interfaces vmi_set = set() for li_id in li_set: li = LogicalInterfaceDM.locate(li_id) if li and li.virtual_machine_interface: vmi_set |= set([li.virtual_machine_interface]) for vmi_id in vmi_set: vmi = VirtualMachineInterfaceDM.locate(vmi_id) if vmi: vn_set |= set([vmi.virtual_network]) for vn_id in vn_set: VirtualNetworkDM.locate(vn_id) for pr in PhysicalRouterDM.values(): pr.push_config() self._db_resync_done.set() while 1: # Just wait indefinitely time.sleep(5)
def test_load_balance(self): # publish 3 instances of service foobar tasks = [] service_type = 'foobar' for i in range(3): client_type = 'test-discovery' pub_id = 'test_discovery-%d' % i pub_data = '%s-%d' % ('foobar', i) disc = client.DiscoveryClient(self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) time.sleep(1) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 3) self.assertEqual(response['services'][0]['service_type'], 'foobar') # multiple subscribers for 2 instances each subcount = 20 service_count = 2 tasks = [] for i in range(subcount): disc = client.DiscoveryClient(self._disc_server_ip, self._disc_server_port, "test-load-balance-%d" % i) obj = disc.subscribe(service_type, service_count, info_callback) tasks.append(obj.task) time.sleep(1) print 'Started %d tasks to subscribe service %s, count %d' \ % (subcount, service_type, service_count) # validate all clients have subscribed time.sleep(1) (code, msg) = self._http_get('/clients.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), subcount * service_count) # start one more publisher pub_id = 'test_discovery-3' pub_data = 'foobar-3' disc = client.DiscoveryClient(self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) # verify 4th publisher is up (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 4) print response # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1 * 60) # total subscriptions (must be subscount * service_count) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) subs = sum([item['in_use'] for item in response['services']]) self.assertEqual(subs, subcount * service_count) # verify newly added in-use count is 0 data = [ item for item in response['services'] if item['service_id'] == 'test_discovery-3:foobar' ] entry = data[0] self.assertEqual(len(data), 1) self.assertEqual(entry['in_use'], 0) # Issue load-balance command (code, msg) = self._http_post('/load-balance/foobar', '') self.assertEqual(code, 200) # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1 * 60) pass # total subscriptions (must still be subscount * service_count) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) subs = sum([item['in_use'] for item in response['services']]) self.assertEqual(subs, subcount * service_count) # verify newly added in-use count is 10 data = [ item for item in response['services'] if item['service_id'] == 'test_discovery-3:foobar' ] entry = data[0] self.assertEqual(len(data), 1) print 'After LB entry %s' % entry self.assertEqual(entry['in_use'], 10)
def test_load_balance_siul(self): # publish 2 instances tasks = [] service_type = 'SvcLoadBalance' pubcount = 2 for i in range(pubcount): client_type = 'test-discovery' pub_id = 'test_discovery-%d' % i pub_data = {service_type : '%s-%d' % (service_type, i)} disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) time.sleep(1) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), pubcount) self.assertEqual(response['services'][0]['service_type'], service_type) # multiple subscribers for 2 instances each subcount = 20 service_count = 2 suburl = "/subscribe" payload = { 'service' : '%s' % service_type, 'instances' : service_count, 'client-type' : 'Vrouter-Agent', 'service-in-use-list' : {'publisher-id': ["test_discovery-0", 'test_discovery-1'] } } for i in range(subcount): payload['client'] = "ut-client-%d" % i (code, msg) = self._http_post(suburl, json.dumps(payload)) self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response[service_type]), service_count) # validate both publishers are assigned fairly time.sleep(1) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count after clients with service-in-use-list') self.assertEqual(failure, False) # start one more publisher pub_id = 'test_discovery-2' pub_data = {service_type : '%s-2' % service_type} disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) pubcount += 1 # verify new publisher is up (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), pubcount) subs = sum([item['in_use'] for item in response['services']]) self.assertEqual(subs, subcount*service_count) # verify newly added in-use count is 0 data = [item for item in response['services'] if item['service_id'] == '%s:%s' % (pub_id, service_type)] entry = data[0] self.assertEqual(len(data), 1) self.assertEqual(entry['in_use'], 0) # Issue load-balance command (code, msg) = self._http_post('/load-balance/%s' % service_type, '') self.assertEqual(code, 200) for i in range(subcount): payload['client'] = "ut-client-%d" % i (code, msg) = self._http_post(suburl, json.dumps(payload)) self.assertEqual(code, 200) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) print response failure = validate_assignment_count(response, 'In-use count after LB command') self.assertEqual(failure, False)
def test_active_load_balance(self): # publish 3 instances of service. Active LB must be enabled! tasks = [] service_type = 'SvcActiveLoadBalance' for i in range(3): client_type = 'test-discovery' pub_id = 'test_discovery-%d' % i pub_data = {service_type : '%s-%d' % (service_type, i)} disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) time.sleep(1) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 3) self.assertEqual(response['services'][0]['service_type'], service_type) failure = validate_assignment_count(response, 'In-use count just after publishing') self.assertEqual(failure, False) # multiple subscribers for 2 instances each subcount = 20 service_count = 2 tasks = [] for i in range(subcount): client_id = "test-load-balance-%d" % i disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_id) obj = disc.subscribe( service_type, service_count, info_callback, client_id) tasks.append(obj.task) time.sleep(1) # validate all clients have subscribed time.sleep(1) (code, msg) = self._http_get('/clients.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), subcount*service_count) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count just after initial subscribe') self.assertEqual(failure, False) # start one more publisher pub_id = 'test_discovery-3' pub_data = {service_type : '%s-3' % service_type} pub_url = '/service/%s' % pub_id disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1*60) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count just after bringing up one more publisher') self.assertEqual(failure, False) # set operational state down - new service payload = { 'service-type' : '%s' % service_type, 'admin-state' : 'down', } (code, msg) = self._http_put(pub_url, json.dumps(payload)) self.assertEqual(code, 200) # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1*60) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count just after publisher-3 down') self.assertEqual(failure, False) # set operational state up - again payload = { 'service-type' : '%s' % service_type, 'admin-state' : 'up', } (code, msg) = self._http_put(pub_url, json.dumps(payload)) self.assertEqual(code, 200) # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1*60) # total subscriptions must be subscount * service_count (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count just after publisher-3 up again') self.assertEqual(failure, False)