def _get_tenant_from_coordinator(self): """ This method calls to the coordinator to retrieve tenant """ config_cache = ConfigCache() config = config_cache.get_config() token_header = { MESSAGE_TOKEN: self.message_token, "WORKER-ID": config.worker_id, "WORKER-TOKEN": config.worker_token } request_uri = "{0}/tenant/{1}".format(config.coordinator_uri, self.tenant_id) try: resp = http_request(request_uri, token_header, http_verb='GET') except requests.RequestException as ex: _LOG.exception(ex.message) raise errors.CoordinatorCommunicationError if resp.status_code == httplib.OK: response_body = resp.json() tenant = load_tenant_from_dict(response_body['tenant']) return tenant elif resp.status_code == httplib.NOT_FOUND: message = 'unable to locate tenant.' _LOG.debug(message) raise errors.ResourceNotFoundError(message) else: raise errors.CoordinatorCommunicationError
def _get_tenant_from_coordinator(self): """ This method calls to the coordinator to retrieve tenant """ config_cache = ConfigCache() config = config_cache.get_config() token_header = { MESSAGE_TOKEN: self.message_token, "WORKER-ID": config.worker_id, "WORKER-TOKEN": config.worker_token } request_uri = "{0}/tenant/{1}".format( config.coordinator_uri, self.tenant_id) try: resp = http_request(request_uri, token_header, http_verb='GET') except requests.RequestException: raise errors.CoordinatorCommunicationError if resp.status_code == httplib.OK: response_body = resp.json() tenant = load_tenant_from_dict(response_body['tenant']) return tenant elif resp.status_code == httplib.NOT_FOUND: raise errors.ResourceNotFoundError('Unable to locate tenant.') else: raise errors.CoordinatorCommunicationError
def _validate_token_with_coordinator(self): """ This method calls to the coordinator to validate the message token """ config_cache = ConfigCache() config = config_cache.get_config() token_header = { MESSAGE_TOKEN: self.message_token, "WORKER-ID": config.worker_id, "WORKER-TOKEN": config.worker_token } request_uri = "{0}/tenant/{1}/token".format( config.coordinator_uri, self.tenant_id) try: resp = http_request(request_uri, token_header, http_verb='HEAD') except requests.RequestException as ex: _LOG.exception(ex.message) raise errors.CoordinatorCommunicationError if resp.status_code != httplib.OK: raise errors.MessageAuthenticationError( 'Message not authenticated, check your tenant id ' 'and or message token for validity') return True
def test_get_config_calls_returns_none(self): with patch.object( NativeProxy, 'cache_exists', self.cache_false): config_cache = ConfigCache() config = config_cache.get_config() self.assertIs(config, None)
def get_routes_from_coordinator(): """ get the associated routes for the worker and store them in cache """ config_cache = ConfigCache() config = config_cache.get_config() token_header = {"WORKER-ID": config.worker_id, "WORKER-TOKEN": config.worker_token} request_uri = "{0}/worker/{1}/routes".format( config.coordinator_uri, config.worker_id) try: resp = http_request(request_uri, token_header, http_verb='GET') except requests.RequestException: return False #if the coordinator issues a response, cache the worker routes #and return true if resp.status_code == httplib.OK: routes = resp.json()['routes'] config_cache.set_routes(routes) return True
def _validate_token_with_coordinator(self): """ This method calls to the coordinator to validate the message token """ config_cache = ConfigCache() config = config_cache.get_config() token_header = { MESSAGE_TOKEN: self.message_token, "WORKER-ID": config.worker_id, "WORKER-TOKEN": config.worker_token } request_uri = "{0}/tenant/{1}/token".format(config.coordinator_uri, self.tenant_id) try: resp = http_request(request_uri, token_header, http_verb='HEAD') except requests.RequestException as ex: _LOG.exception(ex.message) raise errors.CoordinatorCommunicationError if resp.status_code != httplib.OK: raise errors.MessageAuthenticationError( 'Message not authenticated, check your tenant id ' 'and or message token for validity') return True
def _register_with_coordinator( self, coordinator_uri, personality, registration, auth_header): """ register with the coordinator and persist the configuration to cache """ try: resp = http_request(coordinator_uri + '/pairing', auth_header, jsonutils.dumps( registration), http_verb='POST') except requests.RequestException: _LOG.exception( 'Pairing Process: error posting worker registration') return False _LOG.debug('resp.status_code: {0}'.format(resp.status_code)) if resp.status_code == httplib.ACCEPTED: body = resp.json()['worker_identity'] config = WorkerConfiguration( personality, body['personality_module'], body['worker_token'], body['worker_id'], coordinator_uri) config_cache = ConfigCache() config_cache.set_config(config) return True
def _register_with_coordinator(self, coordinator_uri, personality, registration, auth_header): """ register with the coordinator and persist the configuration to cache """ try: resp = http_request(coordinator_uri + '/pairing', auth_header, jsonutils.dumps(registration), http_verb='POST') except requests.RequestException: _LOG.exception( 'Pairing Process: error posting worker registration') return False _LOG.debug('resp.status_code: {0}'.format(resp.status_code)) if resp.status_code == httplib.ACCEPTED: body = resp.json()['worker_identity'] config = WorkerConfiguration(personality, body['personality_module'], body['worker_token'], body['worker_id'], coordinator_uri) config_cache = ConfigCache() config_cache.set_config(config) return True
def test_delete_config_calls_cache_del(self): with patch.object(NativeProxy, 'cache_exists', self.cache_true), patch.object( NativeProxy, 'cache_del', self.cache_del): config_cache = ConfigCache() config_cache.delete_config() self.cache_del.assert_called_once_with('worker_configuration', CACHE_CONFIG)
def test_delete_config_calls_cache_del(self): with patch.object( NativeProxy, 'cache_exists', self.cache_true ), patch.object(NativeProxy, 'cache_del', self.cache_del): config_cache = ConfigCache() config_cache.delete_config() self.cache_del.assert_called_once_with( 'worker_configuration', CACHE_CONFIG)
def test_set_routes_calls_cache_set(self): with patch.object( NativeProxy, 'cache_exists', self.cache_false ), patch.object(NativeProxy, 'cache_set', self.cache_set): config_cache = ConfigCache() config_cache.set_routes(self.routes) self.cache_set.assert_called_once_with( 'routes', jsonutils.dumps(self.routes), CONFIG_EXPIRES, CACHE_CONFIG)
def test_delete_config_does_not_call_cache_del(self): with patch.object( NativeProxy, 'cache_exists', self.cache_false ), patch.object(NativeProxy, 'cache_del', self.cache_del): config_cache = ConfigCache() config_cache.delete_config() with self.assertRaises(AssertionError): self.cache_del.assert_called_once_with( 'worker_configuration', CACHE_CONFIG)
def test_set_config_calls_cache_set(self): with patch.object(NativeProxy, 'cache_exists', self.cache_false), patch.object( NativeProxy, 'cache_set', self.cache_set): config_cache = ConfigCache() config_cache.set_config(self.config) self.cache_set.assert_called_once_with( 'worker_configuration', jsonutils.dumps(self.config.format()), CONFIG_EXPIRES, CACHE_CONFIG)
def test_set_config_calls_cache_set(self): with patch.object( NativeProxy, 'cache_exists', self.cache_false ), patch.object(NativeProxy, 'cache_set', self.cache_set): config_cache = ConfigCache() config_cache.set_config(self.config) self.cache_set.assert_called_once_with( 'worker_configuration', jsonutils.dumps(self.config.format()), CONFIG_EXPIRES, CACHE_CONFIG)
def test_delete_config_does_not_call_cache_del(self): with patch.object(NativeProxy, 'cache_exists', self.cache_false), patch.object( NativeProxy, 'cache_del', self.cache_del): config_cache = ConfigCache() config_cache.delete_config() with self.assertRaises(AssertionError): self.cache_del.assert_called_once_with('worker_configuration', CACHE_CONFIG)
def test_get_config_calls_returns_config(self): with patch.object( NativeProxy, 'cache_exists', self.cache_true ), patch.object(NativeProxy, 'cache_get', self.cache_get_config): config_cache = ConfigCache() config = config_cache.get_config() self.cache_get_config.assert_called_once_with( 'worker_configuration', CACHE_CONFIG) self.assertIsInstance(config, WorkerConfiguration)
def test_get_routes_calls_returns_config(self): with patch.object( NativeProxy, 'cache_exists', self.cache_true ), patch.object(NativeProxy, 'cache_get', self.cache_get_routes): config_cache = ConfigCache() routes = config_cache.get_routes() self.cache_get_routes.assert_called_once_with( 'routes', CACHE_CONFIG) self.assertEqual(routes, self.routes)
def test_get_config_calls_returns_config(self): with patch.object(NativeProxy, 'cache_exists', self.cache_true), patch.object( NativeProxy, 'cache_get', self.cache_get_config): config_cache = ConfigCache() config = config_cache.get_config() self.cache_get_config.assert_called_once_with('worker_configuration', CACHE_CONFIG) self.assertIsInstance(config, WorkerConfiguration)
def publish_worker_stats(): """ Publishes worker stats to the Coordinator(s) at set times """ try: cache = ConfigCache() config = cache.get_config() request_uri = "{0}/worker/{1}/status".format(config.coordinator_uri, config.worker_id) req_body = {"worker_status": {"status": "online", "system_info": SystemInfo().format()}} http_request(url=request_uri, json_payload=jsonutils.dumps(req_body), http_verb="PUT") except Exception as ex: _LOG.info(ex.message)
def publish_worker_stats(): """ Publishes worker stats to the Coordinator(s) at set times """ try: cache = ConfigCache() config = cache.get_config() request_uri = "{0}/worker/{1}/status".format( config.coordinator_uri, config.hostname) req_body = { 'worker_status': Worker(personality=config.personality).format() } http_request(url=request_uri, json_payload=jsonutils.dumps(req_body), http_verb='PUT') except Exception as ex: _LOG.info(ex.message)
def _send_stats(self, load_ave_interval, disk_usage_interval): """ send system usage data to the coordinator on specified intervals """ time_lapsed = 0 while True: sleep(load_ave_interval) time_lapsed += load_ave_interval cache = ConfigCache() config = cache.get_config() if config: token_header = { "WORKER-ID": config.worker_id, "WORKER-TOKEN": config.worker_token } request_uri = "{0}/worker/{1}/status".format( config.coordinator_uri, config.worker_id) req_body = {'load_average': sys_assist.get_load_average()} if time_lapsed == disk_usage_interval: time_lapsed = 0 req_body.update( {'disk_usage': sys_assist.get_disk_usage()}) try: http_request(request_uri, token_header, jsonutils.dumps(req_body), http_verb='PUT') except requests.RequestException: pass if self.run_once: break
def publish_worker_stats(): """ Publishes worker stats to the Coordinator(s) at set times """ try: cache = ConfigCache() config = cache.get_config() request_uri = "{0}/worker/{1}/status".format(config.coordinator_uri, config.worker_id) req_body = { 'worker_status': { 'status': 'online', 'system_info': SystemInfo().format() } } http_request(url=request_uri, json_payload=jsonutils.dumps(req_body), http_verb='PUT') except Exception as ex: _LOG.info(ex.message)
def _register_worker_online(self, status): """ register the worker with the coordinator with an online status """ cache = ConfigCache() config = cache.get_config() token_header = {"WORKER-TOKEN": config.worker_token} request_uri = "{0}/worker/{1}/status".format( config.coordinator_uri, config.worker_id) status = {"worker_status": status} try: resp = http_request(request_uri, token_header, jsonutils.dumps(status), http_verb='PUT') except requests.RequestException: return False if resp.status_code == httplib.OK: return True
def test_get_config_calls_returns_none(self): with patch.object(NativeProxy, 'cache_exists', self.cache_false): config_cache = ConfigCache() config = config_cache.get_config() self.assertIs(config, None)
cfg.StrOpt('coordinator_uri', default='http://localhost:8080/v1', help="""The URI of the Coordinator (can be a load balancer)""") ] get_config().register_opts(_NODE_OPTIONS, group=_node_group) try: init_config() conf = get_config() except cfg.ConfigFilesNotFoundError: conf = get_config() PERSONALITY = conf.node.personality COORDINATOR_URI = conf.node.coordinator_uri config_cache = ConfigCache() def bootstrap_api(): # Persist the coordinator_uri and personality to ConfigCache config = WorkerConfiguration(PERSONALITY, platform.node(), COORDINATOR_URI) config_cache.set_config(config) personality_module = 'meniscus.personas.{0}.app'.format(PERSONALITY) _LOG.info( 'loading default personality module: {0}'.format(personality_module)) #load the personality module as a plug in plugin_mod = import_module(personality_module) #start up the api from the specified personality_module
class Router(object): def __init__(self): self._config_cache = ConfigCache() self._blacklist_cache = BlacklistCache() self._personality = self._config_cache.get_config().personality self._active_worker_socket = dict() self._dispatch = Dispatch() def _get_next_service_domain(self): if self._personality == personalities.CORRELATION: return personalities.STORAGE if self._personality == personalities.SYSLOG: return personalities.STORAGE if self._personality == personalities.NORMALIZATION: return personalities.STORAGE return None def _get_route_targets(self, service_domain): routes = self._config_cache.get_routes() for domain in routes: if domain['service_domain'] == service_domain: return domain['targets'] return None def _blacklist_worker(self, service_domain, worker_id): self._active_worker_socket[service_domain] = None self._blacklist_cache.add_blacklist_worker(worker_id) config = self._config_cache.get_config() if config: token_header = { "WORKER-ID": config.worker_id, "WORKER-TOKEN": config.worker_token } request_uri = "{0}/worker/{1}".format( config.coordinator_uri, worker_id) try: http_request(request_uri, token_header, http_verb='PUT') except requests.RequestException: #Todo log failure to contact coordinator pass def _get_worker_socket(self, service_domain): worker_socket = self._active_worker_socket.get(service_domain) if worker_socket: return worker_socket for worker in self._get_route_targets(service_domain): if not self._blacklist_cache.is_worker_blacklisted( worker['worker_id']): if worker['ip_address_v6']: protocol = socket.AF_INET6 address = (worker['ip_address_v6'], 9001, 0, 0) else: protocol = socket.AF_INET address = (worker['ip_address_v4'], 9001) sock = socket.socket(protocol, socket.SOCK_STREAM) try: sock.connect(address) worker_socket = (worker, sock) self._active_worker_socket[service_domain] = worker_socket return worker_socket except socket.error as ex: self._blacklist_worker( service_domain, worker['worker_id']) return None def route_message(self, message): next_service_domain = self._get_next_service_domain() worker_socket = self._get_worker_socket(next_service_domain) while worker_socket: worker, sock = worker_socket try: self._dispatch.dispatch_message(message, sock) return except DispatchException: #TODO(dmend) log this and report to coordinator self._blacklist_worker(next_service_domain, worker['worker_id']) worker_socket = self._get_worker_socket(next_service_domain) raise RoutingException()
def __init__(self): self._config_cache = ConfigCache() self._blacklist_cache = BlacklistCache() self._personality = self._config_cache.get_config().personality self._active_worker_socket = dict() self._dispatch = Dispatch()
def test_clear_calls_cache_clear(self): with patch.object(NativeProxy, 'cache_clear', self.cache_clear): config_cache = ConfigCache() config_cache.clear() self.cache_clear.assert_called_once_with(CACHE_CONFIG)