def do_update(request): _logger.info("{} plugin update started...".format(request._name)) code, link = update_repo_sources_and_plugin(request._type, request._name) if code != 0: _logger.error("{} plugin update failed. Logs available at {}".format( request._name, link)) else: _logger.info("{} plugin update completed. Logs available at {}".format( request._name, link)) # PKGUP audit log entry storage_client = connect.get_storage_async() audit = AuditLogger(storage_client) audit_detail = { 'packageName': "fledge-{}-{}".format(request._type, request._name.replace("_", "-")) } asyncio.ensure_future(audit.information('PKGUP', audit_detail)) # Restart the services which were disabled before plugin update for s in request._sch_list: asyncio.ensure_future( server.Server.scheduler.enable_schedule(uuid.UUID(s))) # Below case is applicable for the notification plugins ONLY # Enabled back configuration categories which were disabled during update process if request._type in ['notify', 'rule']: storage_client = connect.get_storage_async() config_mgr = ConfigurationManager(storage_client) for n in request._notification_list: asyncio.ensure_future( config_mgr.set_category_item_value_entry(n, "enable", "true"))
def execute_backup(self): """ Executes the backup functionality Args: Returns: Raises: exceptions.BackupFailed """ self._logger.debug("{func}".format(func="execute_backup")) self._purge_old_backups() backup_file = self._generate_file_name() self._backup_lib.sl_backup_status_create(backup_file, lib.BackupType.FULL, lib.BackupStatus.RUNNING) status, exit_code = self._run_backup_command(backup_file) backup_information = self._backup_lib.sl_get_backup_details_from_file_name(backup_file) self._backup_lib.sl_backup_status_update(backup_information['id'], status, exit_code) audit = AuditLogger(self._storage_async) loop = asyncio.get_event_loop() if status != lib.BackupStatus.COMPLETED: self._logger.error(self._MESSAGES_LIST["e000007"]) loop.run_until_complete(audit.information('BKEXC', {'status': 'failed'})) raise exceptions.BackupFailed else: loop.run_until_complete(audit.information('BKEXC', {'status': 'completed'}))
async def test_purge_error_storage_response(self, conf, expected_return): """Test that purge_data logs error when storage purge returns an error response""" @asyncio.coroutine def mock_audit_info(): return "" mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) with patch.object(FledgeProcess, '__init__'): with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() p._logger = logger p._logger.info = MagicMock() p._logger.error = MagicMock() p._storage_async = MagicMock(spec=StorageClientAsync) p._readings_storage_async = MagicMock( spec=ReadingsStorageClientAsync) audit = p._audit with patch.object( p._storage_async, "query_tbl_with_payload", return_value=q_result('streams')) as patch_storage: with patch.object(p._readings_storage_async, 'purge', side_effect=self.store_purge): with patch.object(audit, 'information', return_value=mock_audit_info()): assert expected_return == await p.purge_data(conf) assert patch_storage.called assert 1 == patch_storage.call_count
async def test_set_configuration(self): """Test that purge's set_configuration returns configuration item with key 'PURGE_READ' """ @asyncio.coroutine def mock_cm_return(): return "" mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) with patch.object(FledgeProcess, '__init__'): with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() p._storage = MagicMock(spec=StorageClientAsync) mock_cm = ConfigurationManager(p._storage) with patch.object( mock_cm, 'create_category', return_value=mock_cm_return()) as mock_create_cat: with patch.object(mock_cm, 'create_child_category', return_value=mock_cm_return() ) as mock_create_child_cat: with patch.object( mock_cm, 'get_category_all_items', return_value=mock_cm_return()) as mock_get_cat: await p.set_configuration() mock_get_cat.assert_called_once_with('PURGE_READ') mock_create_child_cat.assert_called_once_with( 'Utilities', ['PURGE_READ']) args, kwargs = mock_create_cat.call_args assert 4 == len(args) assert 5 == len(args[1].keys()) assert 'PURGE_READ' == args[0] assert 'Purge the readings, log, statistics history table' == args[ 2] assert args[3] is True
async def test_run_exception(self, event_loop): """Test that run calls all units of purge process and checks the exception handling""" mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) @asyncio.coroutine def mock_config(): return "Some config" @asyncio.coroutine def mock_purge(): raise Exception() with patch.object(FledgeProcess, '__init__'): with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() p._logger.exception = MagicMock() with patch.object(p, 'set_configuration', return_value=mock_config()): with patch.object(p, 'purge_data', return_value=mock_purge()): with patch.object(p, 'write_statistics'): await p.run() # Test the negative case when function purge_data raise some exception p._logger.exception.assert_called_once_with("")
async def test_run(self): """Test that run calls all units of purge process""" @asyncio.coroutine def mock_config(): return "Some config" @asyncio.coroutine def mock_purge(): return 1, 2 mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) with patch.object(FledgeProcess, '__init__'): with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() p._logger.exception = MagicMock() with patch.object( p, 'set_configuration', return_value=mock_config()) as mock_set_config: with patch.object( p, 'purge_data', return_value=mock_purge()) as mock_purge_data: with patch.object( p, 'write_statistics') as mock_write_stats: await p.run() # Test the positive case when no error in try block mock_write_stats.assert_called_once_with(1, 2) mock_purge_data.assert_called_once_with("Some config") mock_set_config.assert_called_once_with()
async def remove_plugin(request): """ Remove installed plugin from fledge type: installed plugin type name: installed plugin name Example: curl -X DELETE http://localhost:8081/fledge/plugins/south/sinusoid curl -X DELETE http://localhost:8081/fledge/plugins/north/http_north curl -X DELETE http://localhost:8081/fledge/plugins/filter/expression curl -X DELETE http://localhost:8081/fledge/plugins/notificationDelivery/alexa curl -X DELETE http://localhost:8081/fledge/plugins/notificationRule/Average """ plugin_type = request.match_info.get('type', None) name = request.match_info.get('name', None) try: plugin_type = str(plugin_type).lower() if not str(plugin_type).startswith('notification') else plugin_type if plugin_type not in valid_plugin_types: raise ValueError("Invalid plugin type. Please provide valid type: {}".format(valid_plugin_types)) installed_plugin = PluginDiscovery.get_plugins_installed(plugin_type, False) if name not in [plugin['name'] for plugin in installed_plugin]: raise KeyError("Invalid plugin name {} or plugin is not installed".format(name)) if plugin_type in ['notificationDelivery', 'notificationRule']: notification_instances_plugin_used_in = await check_plugin_usage_in_notification_instances(name) if notification_instances_plugin_used_in: raise RuntimeError("{} cannot be removed. This is being used by {} instances". format(name, notification_instances_plugin_used_in)) plugin_type = 'notify' if plugin_type == 'notificationDelivery' else 'rule' else: get_tracked_plugins = await check_plugin_usage(plugin_type, name) if get_tracked_plugins: e = "{} cannot be removed. This is being used by {} instances".\ format(name, get_tracked_plugins[0]['service_list']) _logger.error(e) raise RuntimeError(e) else: _logger.info("No entry found for {name} plugin in asset tracker; or " "{name} plugin may have been added in disabled state & never used".format(name=name)) res, log_path, is_package = purge_plugin(plugin_type, name) if res != 0: e_msg = "Something went wrong. Please check log {}".format(log_path) _logger.error(e_msg) raise RuntimeError(e_msg) else: if is_package: storage_client = connect.get_storage_async() audit_log = AuditLogger(storage_client) audit_detail = {'package_name': "fledge-{}-{}".format(plugin_type, name)} await audit_log.information('PKGRM', audit_detail) except (ValueError, RuntimeError) as ex: raise web.HTTPBadRequest(reason=str(ex)) except KeyError as ex: raise web.HTTPNotFound(reason=str(ex)) except PackageError as e: msg = "Failed to remove package for plugin {}".format(name) raise web.HTTPBadRequest(body=json.dumps({"message": msg, "link": str(e)}), reason=msg) else: _logger.info('{} plugin removed successfully'.format(name)) return web.json_response({'message': '{} plugin removed successfully'.format(name)}, status=200)
async def _purge_instance(): mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) with patch.object(FledgeProcess, "__init__"): with patch.object(logger, "setup"): with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() return p
async def _stats_history_instance(): mock_storage_client = MagicMock(spec=StorageClientAsync) mock_audit_logger = AuditLogger(mock_storage_client) with patch.object(FledgeProcess, "__init__"): with patch.object(logger, "setup"): with patch.object(mock_audit_logger, "__init__", return_value=None): stats = StatisticsHistory() return stats
async def test_singleton(self, event_loop): """ Test that two audit loggers share the same state """ storageMock1 = MagicMock(spec=StorageClientAsync) attrs = { 'insert_into_tbl.return_value': asyncio.ensure_future(mock_coro(), loop=event_loop) } storageMock1.configure_mock(**attrs) a1 = AuditLogger(storageMock1) storageMock2 = MagicMock(spec=StorageClientAsync) attrs = { 'insert_into_tbl.return_value': asyncio.ensure_future(mock_coro(), loop=event_loop) } storageMock2.configure_mock(**attrs) a2 = AuditLogger(storageMock2) assert a1._storage == a2._storage a1._storage.insert_into_tbl.reset_mock()
async def test_success(self, event_loop): """ Test that audit log results in a database insert """ storageMock = MagicMock(spec=StorageClientAsync) attrs = { 'insert_into_tbl.return_value': asyncio.ensure_future(mock_coro(), loop=event_loop) } storageMock.configure_mock(**attrs) audit = AuditLogger(storageMock) await audit.success('AUDTCODE', {'message': 'failure'}) assert audit._storage.insert_into_tbl.called is True audit._storage.insert_into_tbl.reset_mock()
def test_init(self): """Test that creating an instance of Purge calls init of FledgeProcess and creates loggers""" mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) with patch.object(FledgeProcess, "__init__") as mock_process: with patch.object(logger, "setup") as log: with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() assert isinstance(p, Purge) assert isinstance(p._audit, AuditLogger) log.assert_called_once_with("Data Purge") mock_process.assert_called_once_with()
def do_update(request): _logger.info("{} service update started...".format(request._name)) name = "fledge-service-{}".format(request._name.lower()) _platform = platform.platform() stdout_file_path = common.create_log_file("update", name) pkg_mgt = 'apt' cmd = "sudo {} -y update > {} 2>&1".format(pkg_mgt, stdout_file_path) if 'centos' in _platform or 'redhat' in _platform: pkg_mgt = 'yum' cmd = "sudo {} check-update > {} 2>&1".format(pkg_mgt, stdout_file_path) ret_code = os.system(cmd) # sudo apt/yum -y install only happens when update is without any error if ret_code == 0: cmd = "sudo {} -y install {} >> {} 2>&1".format( pkg_mgt, name, stdout_file_path) ret_code = os.system(cmd) # relative log file link link = "log/" + stdout_file_path.split("/")[-1] if ret_code != 0: _logger.error("{} service update failed. Logs available at {}".format( request._name, link)) else: _logger.info( "{} service update completed. Logs available at {}".format( request._name, link)) # PKGUP audit log entry storage_client = connect.get_storage_async() audit = AuditLogger(storage_client) audit_detail = {'packageName': name} asyncio.ensure_future(audit.information('PKGUP', audit_detail)) # Restart the service which was disabled before update for s in request._sch_list: asyncio.ensure_future( server.Server.scheduler.enable_schedule(uuid.UUID(s)))
async def run(self): global _log_performance global _LOGGER # Setups signals handlers, to properly handle the termination # a) SIGTERM - 15 : kill or system shutdown signal.signal(signal.SIGTERM, SendingProcess._signal_handler) # Command line parameter handling self._log_performance, self._debug_level = handling_input_parameters() _log_performance = self._log_performance try: self._storage_async = StorageClientAsync(self._core_management_host, self._core_management_port) self._readings = ReadingsStorageClientAsync(self._core_management_host, self._core_management_port) self._audit = AuditLogger(self._storage_async) except Exception as ex: SendingProcess._logger.exception(_MESSAGES_LIST["e000023"].format(str(ex))) sys.exit(1) else: SendingProcess._logger.removeHandler(SendingProcess._logger.handle) logger_name = _MODULE_NAME + "_" + self._name SendingProcess._logger = logger.setup(logger_name, level=logging.INFO if self._debug_level in [None, 0, 1] else logging.DEBUG) _LOGGER = SendingProcess._logger try: is_started = await self._start() if is_started: await self.send_data() self.stop() SendingProcess._logger.info("Execution completed.") sys.exit(0) except (ValueError, Exception) as ex: SendingProcess._logger.exception(_MESSAGES_LIST["e000002"].format(str(ex))) sys.exit(1)
async def test_purge_data(self, conf, expected_return, expected_calls): """Test that purge_data calls Storage's purge with defined configuration""" @asyncio.coroutine def mock_audit_info(): return "" mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) with patch.object(FledgeProcess, '__init__'): with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() p._logger = logger p._logger.info = MagicMock() p._logger.error = MagicMock() p._storage_async = MagicMock(spec=StorageClientAsync) p._readings_storage_async = MagicMock( spec=ReadingsStorageClientAsync) audit = p._audit with patch.object( p._storage_async, "query_tbl_with_payload", return_value=q_result('streams')) as patch_storage: with patch.object(p._readings_storage_async, 'purge', side_effect=self.store_purge ) as mock_storage_purge: with patch.object( audit, 'information', return_value=mock_audit_info()) as audit_info: # Test the positive case when all if conditions in purge_data pass assert expected_return == await p.purge_data(conf) assert audit_info.called args, kwargs = mock_storage_purge.call_args assert kwargs == expected_calls assert patch_storage.called assert 1 == patch_storage.call_count args, kwargs = patch_storage.call_args assert ( 'streams', '{"aggregate": {"operation": "min", "column": "last_object"}}' ) == args
async def test_create_audit_entry(self, client, loop): request_data = {"source": "LMTR", "severity": "warning", "details": {"message": "Engine oil pressure low"}} response = {'details': {'message': 'Engine oil pressure low'}, 'source': 'LMTR', 'timestamp': '2018-03-05 07:36:52.823', 'severity': 'warning'} async def async_mock(): return response storage_mock = MagicMock(spec=StorageClientAsync) AuditLogger(storage_mock) with patch.object(storage_mock, 'insert_into_tbl', return_value=asyncio.ensure_future(async_mock(), loop=loop)) as insert_tbl_patch: resp = await client.post('/fledge/audit', data=json.dumps(request_data)) assert 200 == resp.status result = await resp.text() json_response = json.loads(result) assert response['details'] == json_response['details'] assert response['source'] == json_response['source'] assert response['severity'] == json_response['severity'] assert 'timestamp' in json_response
async def delete_notification(request): """ Delete an existing notification :Example: curl -X DELETE http://localhost:8081/fledge/notification/<notification_name> """ try: notification_service = ServiceRegistry.get( s_type=ServiceRecord.Type.Notification.name) _address, _port = notification_service[ 0]._address, notification_service[0]._port except service_registry_exceptions.DoesNotExist: raise web.HTTPNotFound(reason="No Notification service available.") try: notif = request.match_info.get('notification_name', None) if notif is None: raise ValueError("Notification name is required for deletion.") # Stop & remove notification url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(notif)) notification = json.loads(await _hit_delete_url(url)) # Removes the child categories for the rule and delivery plugins, Removes the category for the notification itself storage = connect.get_storage_async() config_mgr = ConfigurationManager(storage) await config_mgr.delete_category_and_children_recursively(notif) audit = AuditLogger(storage) await audit.information('NTFDL', {"name": notif}) except ValueError as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as ex: raise web.HTTPInternalServerError(reason=str(ex)) else: return web.json_response( {'result': 'Notification {} deleted successfully.'.format(notif)})
async def test_purge_data_invalid_conf(self, conf, expected_error_key): """Test that purge_data raises exception when called with invalid configuration""" @asyncio.coroutine def mock_audit_info(): return "" mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) with patch.object(FledgeProcess, '__init__'): with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() p._logger = logger p._logger.info = MagicMock() p._logger.error = MagicMock() p._storage_async = MagicMock(spec=StorageClientAsync) p._readings_storage_async = MagicMock( spec=ReadingsStorageClientAsync) audit = p._audit with patch.object( p._storage_async, "query_tbl_with_payload", return_value=q_result('streams')) as patch_storage: with patch.object(p._readings_storage_async, 'purge', side_effect=self.store_purge): with patch.object(audit, 'information', return_value=mock_audit_info()): # Test the code block when purge failed because of invalid configuration await p.purge_data(conf) p._logger.error.assert_called_with( 'Configuration item {} bla should be integer!'. format(expected_error_key)) assert patch_storage.called assert 1 == patch_storage.call_count
async def test_write_statistics(self): """Test that write_statistics calls update statistics with defined keys and value increments""" @asyncio.coroutine def mock_s_update(): return "" mockStorageClientAsync = MagicMock(spec=StorageClientAsync) mockAuditLogger = AuditLogger(mockStorageClientAsync) with patch.object(FledgeProcess, '__init__'): with patch.object(Statistics, '_load_keys', return_value=mock_s_update()): with patch.object( Statistics, 'update', return_value=mock_s_update()) as mock_stats_update: with patch.object(mockAuditLogger, "__init__", return_value=None): p = Purge() p._storage_async = mockStorageClientAsync await p.write_statistics(1, 2) mock_stats_update.assert_has_calls( [call('PURGED', 1), call('UNSNPURGED', 2)])
async def add_plugin(request: web.Request) -> web.Response: """ add plugin :Example: curl -X POST http://localhost:8081/fledge/plugins data: format - the format of the file. One of tar or package (deb, rpm) or repository name - the plugin package name to pull from repository version - (optional) the plugin version to install from repository url - The url to pull the plugin file from if format is not a repository compressed - (optional) boolean this is used to indicate the package is a compressed gzip image checksum - the checksum of the file, used to verify correct upload curl -sX POST http://localhost:8081/fledge/plugins -d '{"format":"repository", "name": "fledge-south-sinusoid"}' curl -sX POST http://localhost:8081/fledge/plugins -d '{"format":"repository", "name": "fledge-notify-slack", "version":"1.6.0"}' """ try: data = await request.json() url = data.get('url', None) file_format = data.get('format', None) compressed = data.get('compressed', None) plugin_type = data.get('type', None) checksum = data.get('checksum', None) if not file_format: raise TypeError('file format param is required') if file_format not in ["tar", "deb", "rpm", "repository"]: raise ValueError( "Invalid format. Must be 'tar' or 'deb' or 'rpm' or 'repository'" ) if file_format == 'repository': name = data.get('name', None) if name is None: raise ValueError('name param is required') version = data.get('version', None) if version: if str(version).count('.') != 2: raise ValueError( 'Invalid version; it should be empty or a valid semantic version X.Y.Z i.e. major.minor.patch to install as per the configured repository' ) plugins, log_path = await common.fetch_available_packages() if name not in plugins: raise KeyError( '{} plugin is not available for the configured repository'. format(name)) _platform = platform.platform() pkg_mgt = 'yum' if 'centos' in _platform or 'redhat' in _platform else 'apt' code, link, msg = await install_package_from_repo( name, pkg_mgt, version) if code != 0: raise PackageError(link) storage = connect.get_storage_async() audit = AuditLogger(storage) audit_detail = {'packageName': name} log_code = 'PKGUP' if msg == 'updated' else 'PKGIN' await audit.information(log_code, audit_detail) result_payload = { "message": "{} is successfully {}".format(name, msg), "link": link } else: if not url or not checksum: raise TypeError('URL, checksum params are required') if file_format == "tar" and not plugin_type: raise ValueError("Plugin type param is required") if file_format == "tar" and plugin_type not in [ 'south', 'north', 'filter', 'notificationDelivery', 'notificationRule' ]: raise ValueError( "Invalid plugin type. Must be 'north' or 'south' or 'filter' " "or 'notificationDelivery' or 'notificationRule'") if compressed: if compressed not in ['true', 'false', True, False]: raise ValueError( 'Only "true", "false", true, false are allowed for value of compressed.' ) is_compressed = ((isinstance(compressed, str) and compressed.lower() in ['true']) or ((isinstance(compressed, bool) and compressed is True))) # All stuff goes into _PATH if not os.path.exists(_PATH): os.makedirs(_PATH) result = await download([url]) file_name = result[0] # validate checksum with MD5sum if validate_checksum(checksum, file_name) is False: raise ValueError("Checksum is failed.") _LOGGER.debug("Found {} format with compressed {}".format( file_format, is_compressed)) if file_format == 'tar': files = extract_file(file_name, is_compressed) _LOGGER.debug("Files {} {}".format(files, type(files))) code, msg = copy_file_install_requirement( files, plugin_type, file_name) if code != 0: raise ValueError(msg) else: pkg_mgt = 'yum' if file_format == 'rpm' else 'apt' code, msg = install_package(file_name, pkg_mgt) if code != 0: raise ValueError(msg) result_payload = { "message": "{} is successfully downloaded and installed".format(file_name) } except (FileNotFoundError, KeyError) as ex: raise web.HTTPNotFound(reason=str(ex)) except (TypeError, ValueError) as ex: raise web.HTTPBadRequest(reason=str(ex)) except PackageError as e: msg = "Plugin installation request failed" raise web.HTTPBadRequest(body=json.dumps({ "message": msg, "link": str(e) }), reason=msg) except Exception as ex: raise web.HTTPInternalServerError(reason=str(ex)) else: return web.json_response(result_payload)
def __init__(self): super().__init__() self._logger = logger.setup("Data Purge") self._audit = AuditLogger(self._storage_async)
async def add_service(request): """ Create a new service to run a specific plugin :Example: curl -X POST http://localhost:8081/fledge/service -d '{"name": "DHT 11", "plugin": "dht11", "type": "south", "enabled": true}' curl -sX POST http://localhost:8081/fledge/service -d '{"name": "Sine", "plugin": "sinusoid", "type": "south", "enabled": true, "config": {"dataPointsPerSec": {"value": "10"}}}' | jq curl -X POST http://localhost:8081/fledge/service -d '{"name": "NotificationServer", "type": "notification", "enabled": true}' | jq curl -sX POST http://localhost:8081/fledge/service?action=install -d '{"format":"repository", "name": "fledge-service-notification"}' curl -sX POST http://localhost:8081/fledge/service?action=install -d '{"format":"repository", "name": "fledge-service-notification", "version":"1.6.0"}' """ try: data = await request.json() if not isinstance(data, dict): raise ValueError('Data payload must be a valid JSON') name = data.get('name', None) plugin = data.get('plugin', None) service_type = data.get('type', None) enabled = data.get('enabled', None) config = data.get('config', None) if name is None: raise web.HTTPBadRequest( reason='Missing name property in payload.') if 'action' in request.query and request.query['action'] != '': if request.query['action'] == 'install': file_format = data.get('format', None) if file_format is None: raise ValueError("format param is required") if file_format not in ["repository"]: raise ValueError("Invalid format. Must be 'repository'") version = data.get('version', None) if version: delimiter = '.' if str(version).count(delimiter) != 2: raise ValueError( 'Service semantic version is incorrect; it should be like X.Y.Z' ) services, log_path = await common.fetch_available_packages( "service") if name not in services: raise KeyError( '{} service is not available for the given repository or already installed' .format(name)) _platform = platform.platform() pkg_mgt = 'yum' if 'centos' in _platform or 'redhat' in _platform else 'apt' code, link, msg = await install.install_package_from_repo( name, pkg_mgt, version) if code != 0: raise PackageError(link) message = "{} is successfully {}".format(name, msg) storage = connect.get_storage_async() audit = AuditLogger(storage) audit_detail = {'packageName': name} log_code = 'PKGUP' if msg == 'updated' else 'PKGIN' await audit.information(log_code, audit_detail) return web.json_response({'message': message, "link": link}) else: raise web.HTTPBadRequest(reason='{} is not a valid action'. format(request.query['action'])) if utils.check_reserved(name) is False: raise web.HTTPBadRequest( reason='Invalid name property in payload.') if utils.check_fledge_reserved(name) is False: raise web.HTTPBadRequest( reason= "'{}' is reserved for Fledge and can not be used as service name!" .format(name)) if service_type is None: raise web.HTTPBadRequest( reason='Missing type property in payload.') service_type = str(service_type).lower() if service_type == 'north': raise web.HTTPNotAcceptable( reason='north type is not supported for the time being.') if service_type not in ['south', 'notification']: raise web.HTTPBadRequest( reason='Only south and notification type are supported.') if plugin is None and service_type == 'south': raise web.HTTPBadRequest( reason='Missing plugin property for type south in payload.') if plugin and utils.check_reserved(plugin) is False: raise web.HTTPBadRequest( reason='Invalid plugin property in payload.') if enabled is not None: if enabled not in ['true', 'false', True, False]: raise web.HTTPBadRequest( reason='Only "true", "false", true, false' ' are allowed for value of enabled.') is_enabled = True if ( (type(enabled) is str and enabled.lower() in ['true']) or ((type(enabled) is bool and enabled is True))) else False # Check if a valid plugin has been provided plugin_module_path, plugin_config, process_name, script = "", {}, "", "" if service_type == 'south': # "plugin_module_path" is fixed by design. It is MANDATORY to keep the plugin in the exactly similar named # folder, within the plugin_module_path. # if multiple plugin with same name are found, then python plugin import will be tried first plugin_module_path = "{}/python/fledge/plugins/{}/{}".format( _FLEDGE_ROOT, service_type, plugin) try: plugin_info = common.load_and_fetch_python_plugin_info( plugin_module_path, plugin, service_type) plugin_config = plugin_info['config'] if not plugin_config: _logger.exception("Plugin %s import problem from path %s", plugin, plugin_module_path) raise web.HTTPNotFound( reason='Plugin "{}" import problem from path "{}".'. format(plugin, plugin_module_path)) process_name = 'south_c' script = '["services/south_c"]' except FileNotFoundError as ex: # Checking for C-type plugins plugin_config = load_c_plugin(plugin, service_type) if not plugin_config: _logger.exception( "Plugin %s import problem from path %s. %s", plugin, plugin_module_path, str(ex)) raise web.HTTPNotFound( reason='Plugin "{}" import problem from path "{}".'. format(plugin, plugin_module_path)) process_name = 'south_c' script = '["services/south_c"]' except TypeError as ex: _logger.exception(str(ex)) raise web.HTTPBadRequest(reason=str(ex)) except Exception as ex: _logger.exception("Failed to fetch plugin configuration. %s", str(ex)) raise web.HTTPInternalServerError( reason='Failed to fetch plugin configuration') elif service_type == 'notification': process_name = 'notification_c' script = '["services/notification_c"]' storage = connect.get_storage_async() config_mgr = ConfigurationManager(storage) # Check whether category name already exists category_info = await config_mgr.get_category_all_items( category_name=name) if category_info is not None: raise web.HTTPBadRequest( reason="The '{}' category already exists".format(name)) # Check that the schedule name is not already registered count = await check_schedules(storage, name) if count != 0: raise web.HTTPBadRequest( reason='A service with this name already exists.') # Check that the process name is not already registered count = await check_scheduled_processes(storage, process_name) if count == 0: # Now first create the scheduled process entry for the new service payload = PayloadBuilder().INSERT(name=process_name, script=script).payload() try: res = await storage.insert_into_tbl("scheduled_processes", payload) except StorageServerError as ex: _logger.exception("Failed to create scheduled process. %s", ex.error) raise web.HTTPInternalServerError( reason='Failed to create service.') except Exception as ex: _logger.exception("Failed to create scheduled process. %s", str(ex)) raise web.HTTPInternalServerError( reason='Failed to create service.') # check that notification service is not already registered, right now notification service LIMIT to 1 if service_type == 'notification': res = await check_notification_schedule(storage) for ps in res['rows']: if 'notification_c' in ps['process_name']: raise web.HTTPBadRequest( reason='A Notification service schedule already exists.' ) elif service_type == 'south': try: # Create a configuration category from the configuration defined in the plugin category_desc = plugin_config['plugin']['description'] await config_mgr.create_category( category_name=name, category_description=category_desc, category_value=plugin_config, keep_original_items=True) # Create the parent category for all South services await config_mgr.create_category("South", {}, "South microservices", True) await config_mgr.create_child_category("South", [name]) # If config is in POST data, then update the value for each config item if config is not None: if not isinstance(config, dict): raise ValueError('Config must be a JSON object') for k, v in config.items(): await config_mgr.set_category_item_value_entry( name, k, v['value']) except Exception as ex: await config_mgr.delete_category_and_children_recursively(name) _logger.exception("Failed to create plugin configuration. %s", str(ex)) raise web.HTTPInternalServerError( reason='Failed to create plugin configuration. {}'.format( ex)) # If all successful then lastly add a schedule to run the new service at startup try: schedule = StartUpSchedule() schedule.name = name schedule.process_name = process_name schedule.repeat = datetime.timedelta(0) schedule.exclusive = True # if "enabled" is supplied, it gets activated in save_schedule() via is_enabled flag schedule.enabled = False # Save schedule await server.Server.scheduler.save_schedule(schedule, is_enabled) schedule = await server.Server.scheduler.get_schedule_by_name(name) except StorageServerError as ex: await config_mgr.delete_category_and_children_recursively(name) _logger.exception("Failed to create schedule. %s", ex.error) raise web.HTTPInternalServerError( reason='Failed to create service.') except Exception as ex: await config_mgr.delete_category_and_children_recursively(name) _logger.exception("Failed to create service. %s", str(ex)) raise web.HTTPInternalServerError( reason='Failed to create service.') except PackageError as e: msg = "Service installation request failed" raise web.HTTPBadRequest(body=json.dumps({ "message": msg, "link": str(e) }), reason=msg) except ValueError as e: raise web.HTTPBadRequest(reason=str(e)) except KeyError as ex: raise web.HTTPNotFound(reason=str(ex)) else: return web.json_response({ 'name': name, 'id': str(schedule.schedule_id) })
async def _monitor_loop(self): """async Monitor loop to monitor registered services""" # check health of all micro-services every N seconds round_cnt = 0 check_count = {} # dict to hold current count of current status. # In case of ok and running status, count will always be 1. # In case of of non running statuses, count shows since when this status is set. while True: round_cnt += 1 self._logger.debug( "Starting next round#{} of service monitoring, sleep/i:{} ping/t:{} max/a:{}" .format(round_cnt, self._sleep_interval, self._ping_timeout, self._max_attempts)) for service_record in ServiceRegistry.all(): if service_record._id not in check_count: check_count.update({service_record._id: 1}) # Try ping if service status is either running or doubtful (i.e. give service a chance to recover) if service_record._status not in [ ServiceRecord.Status.Running, ServiceRecord.Status.Unresponsive, ServiceRecord.Status.Failed ]: continue self._logger.debug("Service: {} Status: {}".format( service_record._name, service_record._status)) if service_record._status == ServiceRecord.Status.Failed: if self._restart_failed == "auto": if service_record._id not in self.restarted_services: self.restarted_services.append(service_record._id) asyncio.ensure_future( self.restart_service(service_record)) continue try: url = "{}://{}:{}/fledge/service/ping".format( service_record._protocol, service_record._address, service_record._management_port) async with aiohttp.ClientSession() as session: async with session.get( url, timeout=self._ping_timeout) as resp: text = await resp.text() res = json.loads(text) if res["uptime"] is None: raise ValueError('res.uptime is None') except (asyncio.TimeoutError, aiohttp.client_exceptions.ServerTimeoutError) as ex: service_record._status = ServiceRecord.Status.Unresponsive check_count[service_record._id] += 1 self._logger.info("ServerTimeoutError: %s, %s", str(ex), service_record.__repr__()) except aiohttp.client_exceptions.ClientConnectorError as ex: service_record._status = ServiceRecord.Status.Unresponsive check_count[service_record._id] += 1 self._logger.info("ClientConnectorError: %s, %s", str(ex), service_record.__repr__()) except ValueError as ex: service_record._status = ServiceRecord.Status.Unresponsive check_count[service_record._id] += 1 self._logger.info("Invalid response: %s, %s", str(ex), service_record.__repr__()) except Exception as ex: service_record._status = ServiceRecord.Status.Unresponsive check_count[service_record._id] += 1 self._logger.info("Exception occurred: %s, %s", str(ex), service_record.__repr__()) else: service_record._status = ServiceRecord.Status.Running check_count[service_record._id] = 1 if check_count[service_record._id] > self._max_attempts: ServiceRegistry.mark_as_failed(service_record._id) check_count[service_record._id] = 0 try: audit = AuditLogger(connect.get_storage_async()) await audit.failure('SRVFL', {'name': service_record._name}) except Exception as ex: self._logger.info("Failed to audit service failure %s", str(ex)) await self._sleep(self._sleep_interval)
async def create_audit_entry(request): """ Creates a new Audit entry Args: request: POST /fledge/audit { "source" : "LMTR", # 5 char max "severity" : "WARNING", "details" : { "message" : "Engine oil pressure low" } } :Example: curl -X POST -d '{"source":"LMTR","severity":"WARNING","details":{"message":"Engine oil pressure low"}} http://localhost:8081/fledge/audit Returns: json object representation of created audit entry { "timestamp" : "2017-06-21T09:39:51.8949395", "source" : "LMTR", "severity" : "WARNING", "details" : { "message" : "Engine oil pressure low" } } """ return_error = False err_msg = "Missing required parameter" payload = await request.json() severity = payload.get("severity") source = payload.get("source") details = payload.get("details") if severity is None or severity == "": err_msg += " severity" return_error = True if source is None or source == "": err_msg += " source" return_error = True if details is None: err_msg += " details" return_error = True if return_error: raise web.HTTPBadRequest(reason=err_msg) if not isinstance(details, dict): raise web.HTTPBadRequest(reason="Details should be a valid json object") try: audit = AuditLogger() await getattr(audit, str(severity).lower())(source, details) # Set timestamp for return message timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] message = {'timestamp': str(timestamp), 'source': source, 'severity': severity, 'details': details } return web.json_response(message) except AttributeError as e: # Return error for wrong severity method err_msg = "severity type {} is not supported".format(severity) _logger.error("Error in create_audit_entry(): %s | %s", err_msg, str(e)) raise web.HTTPNotFound(reason=err_msg) except Exception as ex: raise web.HTTPInternalServerError(reason=str(ex))
async def post_notification(request): """ Create a new notification to run a specific plugin :Example: curl -X POST http://localhost:8081/fledge/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false}' curl -X POST http://localhost:8081/fledge/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false, "rule_config": {}, "delivery_config": {}}' """ try: notification_service = ServiceRegistry.get( s_type=ServiceRecord.Type.Notification.name) _address, _port = notification_service[ 0]._address, notification_service[0]._port except service_registry_exceptions.DoesNotExist: raise web.HTTPNotFound(reason="No Notification service available.") try: data = await request.json() if not isinstance(data, dict): raise ValueError('Data payload must be a valid JSON') name = data.get('name', None) description = data.get('description', None) rule = data.get('rule', None) channel = data.get('channel', None) notification_type = data.get('notification_type', None) enabled = data.get('enabled', None) rule_config = data.get('rule_config', {}) delivery_config = data.get('delivery_config', {}) if name is None or name.strip() == "": raise ValueError('Missing name property in payload.') if description is None: raise ValueError('Missing description property in payload.') if rule is None: raise ValueError('Missing rule property in payload.') if channel is None: raise ValueError('Missing channel property in payload.') if notification_type is None: raise ValueError('Missing notification_type property in payload.') if utils.check_reserved(name) is False: raise ValueError('Invalid name property in payload.') if utils.check_reserved(rule) is False: raise ValueError('Invalid rule property in payload.') if utils.check_reserved(channel) is False: raise ValueError('Invalid channel property in payload.') if notification_type not in NOTIFICATION_TYPE: raise ValueError('Invalid notification_type property in payload.') if enabled is not None: if enabled not in ['true', 'false', True, False]: raise ValueError( 'Only "true", "false", true, false are allowed for value of enabled.' ) is_enabled = "true" if ( (type(enabled) is str and enabled.lower() in ['true']) or ((type(enabled) is bool and enabled is True))) else "false" storage = connect.get_storage_async() config_mgr = ConfigurationManager(storage) curr_config = await config_mgr.get_category_all_items(name) if curr_config is not None: raise ValueError( "A Category with name {} already exists.".format(name)) try: # Get default config for rule and channel plugins url = '{}/plugin'.format(request.url) try: # When authentication is mandatory we need to pass token in request header auth_token = request.token except AttributeError: auth_token = None list_plugins = json.loads(await _hit_get_url(url, auth_token)) r = list( filter(lambda rules: rules['name'] == rule, list_plugins['rules'])) c = list( filter(lambda channels: channels['name'] == channel, list_plugins['delivery'])) if len(r) == 0 or len(c) == 0: raise KeyError rule_plugin_config = r[0]['config'] delivery_plugin_config = c[0]['config'] except KeyError: raise ValueError( "Invalid rule plugin {} and/or delivery plugin {} supplied.". format(rule, channel)) # Verify if rule_config contains valid keys if rule_config != {}: for k, v in rule_config.items(): if k not in rule_plugin_config: raise ValueError( "Invalid key {} in rule_config {} supplied for plugin {}." .format(k, rule_config, rule)) # Verify if delivery_config contains valid keys if delivery_config != {}: for k, v in delivery_config.items(): if k not in delivery_plugin_config: raise ValueError( "Invalid key {} in delivery_config {} supplied for plugin {}." .format(k, delivery_config, channel)) # First create templates for notification and rule, channel plugins post_url = 'http://{}:{}/notification/{}'.format( _address, _port, urllib.parse.quote(name)) await _hit_post_url(post_url) # Create Notification template post_url = 'http://{}:{}/notification/{}/rule/{}'.format( _address, _port, urllib.parse.quote(name), urllib.parse.quote(rule)) await _hit_post_url(post_url) # Create Notification rule template post_url = 'http://{}:{}/notification/{}/delivery/{}'.format( _address, _port, urllib.parse.quote(name), urllib.parse.quote(channel)) await _hit_post_url(post_url) # Create Notification delivery template # Create configurations notification_config = { "description": description, "rule": rule, "channel": channel, "notification_type": notification_type, "enable": is_enabled, } await _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config) audit = AuditLogger(storage) await audit.information('NTFAD', {"name": name}) except ValueError as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as e: raise web.HTTPInternalServerError(reason=str(e)) else: return web.json_response( {'result': "Notification {} created successfully".format(name)})
async def test_constructor_no_storage(self): """ Test that we must construct with a storage client """ with pytest.raises(TypeError) as excinfo: AuditLogger() assert 'Must be a valid Storage object' in str(excinfo.value)
class SendingProcess(FledgeProcess): """ SendingProcess """ _logger = None # type: logging.Logger _stop_execution = False """ sets to True when a signal is captured and a termination is needed """ TASK_FETCH_SLEEP = 0.5 """ The amount of time the fetch operation will sleep if there are no more data to load or in case of an error """ TASK_SEND_SLEEP = 0.5 """ The amount of time the sending operation will sleep in case of an error """ TASK_SLEEP_MAX_INCREMENTS = 7 """ Maximum number of increments for the sleep handling, the amount of time is doubled at every sleep """ TASK_SEND_UPDATE_POSITION_MAX = 10 """ the position is updated after the specified numbers of interactions of the sending task """ _PLUGIN_TYPE = "north" """Define the type of the plugin managed by the Sending Process""" _AUDIT_CODE = "STRMN" """Audit code to use""" _CONFIG_CATEGORY_NAME = 'SEND_PR' _CONFIG_CATEGORY_DESCRIPTION = 'Sending Process' _CONFIG_DEFAULT = { "enable": { "description": "Enable execution of the sending process", "type": "boolean", "default": "true", "readonly": "true" }, "duration": { "description": "Time in seconds the sending process should run", "type": "integer", "default": "60", "order": "7", "displayName": "Duration" }, "blockSize": { "description": "Number of readings to send in each transmission", "type": "integer", "default": "5000", "order": "8", "displayName": "Readings Block Size" }, "sleepInterval": { "description": "Time in seconds to wait between duration checks", "type": "integer", "default": "1", "order": "11", "displayName": "Sleep Interval" }, "memory_buffer_size": { "description": "Number of elements of blockSize size to be buffered in memory", "type": "integer", "default": "10", "order": "12", "displayName": "Memory Buffer Size" } } def __init__(self, loop=None): super().__init__() if not SendingProcess._logger: SendingProcess._logger = _LOGGER self._config = { 'enable': self._CONFIG_DEFAULT['enable']['default'], 'duration': int(self._CONFIG_DEFAULT['duration']['default']), 'blockSize': int(self._CONFIG_DEFAULT['blockSize']['default']), 'sleepInterval': float(self._CONFIG_DEFAULT['sleepInterval']['default']), 'memory_buffer_size': int(self._CONFIG_DEFAULT['memory_buffer_size']['default']), } self._config_from_manager = "" self._module_template = "fledge.plugins.north." + "empty." + "empty" self._plugin = importlib.import_module(self._module_template) self._plugin_info = { 'name': "", 'version': "", 'type': "", 'interface': "", 'config': "" } self._plugin_handle = None self.statistics_key = None self._readings = None """" Interfaces to the Fledge Storage Layer """ self._audit = None """" Used to log operations in the Storage Layer """ self._log_performance = None """ Enable/Disable performance logging, enabled using a command line parameter""" self._debug_level = None """ Defines what and the level of details for logging """ self._task_fetch_data_run = True self._task_send_data_run = True """" The specific task will run until the value is True """ self._task_fetch_data_task_id = None self._task_send_data_task_id = None """" Used to to managed the fetch/send operations """ self._task_fetch_data_sem = None self._task_send_data_sem = None """" Semaphores used for the synchronization of the fetch/send operations """ self._memory_buffer = [None] """" In memory buffer where the data is loaded from the storage layer before to send it to the plugin """ self._memory_buffer_fetch_idx = 0 self._memory_buffer_send_idx = 0 """" Used to to managed the in memory buffer for the fetch/send operations """ self._event_loop = asyncio.get_event_loop() if loop is None else loop @staticmethod def _signal_handler(_signal_num, _stack_frame): """ Handles signals to properly terminate the execution""" SendingProcess._stop_execution = True SendingProcess._logger.info( "{func} - signal captured |{signal_num}| ".format(func="_signal_handler", signal_num=_signal_num)) @staticmethod def performance_track(message): """ Tracks information for performance measurement""" if _log_performance: usage = resource.getrusage(resource.RUSAGE_SELF) process_memory = usage.ru_maxrss / 1000 async def _update_statistics(self, num_sent): """ Updates Fledge statistics""" try: key = self.statistics_key _stats = await statistics.create_statistics(self._storage_async) await _stats.update(key, num_sent) await _stats.update(self.master_statistics_key, num_sent) except Exception: _message = _MESSAGES_LIST["e000010"] SendingProcess._logger.error(_message) raise async def _last_object_id_update(self, new_last_object_id): """ Updates reached position""" try: payload = payload_builder.PayloadBuilder() \ .SET(last_object=new_last_object_id, ts='now()') \ .WHERE(['id', '=', self._stream_id]) \ .payload() await self._storage_async.update_tbl("streams", payload) except Exception as _ex: SendingProcess._logger.error(_MESSAGES_LIST["e000020"].format(_ex)) raise async def _update_position_reached(self, update_last_object_id, tot_num_sent): """ Updates last_object_id, statistics and audit""" await self._last_object_id_update(update_last_object_id) await self._update_statistics(tot_num_sent) await self._audit.information(self._AUDIT_CODE, {"sentRows": tot_num_sent}) async def _task_send_data(self): """ Sends the data from the in memory structure to the destination using the loaded plugin""" data_sent = False db_update = False update_last_object_id = 0 tot_num_sent = 0 update_position_idx = 0 try: self._memory_buffer_send_idx = 0 sleep_time = self.TASK_SEND_SLEEP sleep_num_increments = 1 while self._task_send_data_run: slept = False if self._memory_buffer_send_idx < self._config['memory_buffer_size']: new_last_object_id = None num_sent = 0 if self._memory_buffer[self._memory_buffer_send_idx] is not None: # if there are data to send try: data_sent, new_last_object_id, num_sent = \ await self._plugin.plugin_send(self._plugin_handle, self._memory_buffer[self._memory_buffer_send_idx], self._stream_id) except Exception as ex: _message = _MESSAGES_LIST["e000021"].format(ex) SendingProcess._logger.error(_message) await self._audit.failure(self._AUDIT_CODE, {"error - on _task_send_data": _message}) data_sent = False slept = True await asyncio.sleep(sleep_time) if data_sent: # asset tracker checking for _reads in self._memory_buffer[self._memory_buffer_send_idx]: payload = {"asset": _reads['asset_code'], "event": "Egress", "service": self._name, "plugin": self._config['plugin']} if payload not in self._tracked_assets: self._core_microservice_management_client.create_asset_tracker_event( payload) self._tracked_assets.append(payload) db_update = True update_last_object_id = new_last_object_id tot_num_sent = tot_num_sent + num_sent self._memory_buffer[self._memory_buffer_send_idx] = None self._memory_buffer_send_idx += 1 self._task_send_data_sem.release() self.performance_track("task _task_send_data") else: # Updates the position before going to wait for the semaphore if db_update: await self._update_position_reached(update_last_object_id, tot_num_sent) update_position_idx = 0 tot_num_sent = 0 db_update = False await self._task_fetch_data_sem.acquire() # Updates the Storage layer every 'self.UPDATE_POSITION_MAX' interactions if db_update: if update_position_idx >= self.TASK_SEND_UPDATE_POSITION_MAX: await self._update_position_reached(update_last_object_id, tot_num_sent) update_position_idx = 0 tot_num_sent = 0 db_update = False else: update_position_idx += 1 else: self._memory_buffer_send_idx = 0 # Handles the sleep time, it is doubled every time up to a limit if slept: sleep_num_increments += 1 sleep_time *= 2 if sleep_num_increments > self.TASK_SLEEP_MAX_INCREMENTS: sleep_time = self.TASK_SEND_SLEEP sleep_num_increments = 1 # Checks if the information on the Storage layer needs to be updates if db_update: await self._update_position_reached(update_last_object_id, tot_num_sent) except Exception as ex: SendingProcess._logger.error(_MESSAGES_LIST["e000021"].format(ex)) if db_update: await self._update_position_reached(update_last_object_id, tot_num_sent) await self._audit.failure(self._AUDIT_CODE, {"error - on _task_send_data": _message}) raise @staticmethod def _transform_in_memory_data_statistics(raw_data): converted_data = [] for row in raw_data: try: timestamp = apply_date_format(row['history_ts']) # Adds timezone UTC asset_code = row['key'].strip() # Skips row having undefined asset_code if asset_code != "": new_row = { 'id': row['id'], 'asset_code': asset_code, 'read_key': str(uuid.uuid4()), 'reading': {'value': row['value']}, 'user_ts': timestamp, } converted_data.append(new_row) else: SendingProcess._logger.warning(_MESSAGES_LIST["e000032"].format(row)) except Exception as e: SendingProcess._logger.warning(_MESSAGES_LIST["e000022"].format(str(e), row)) return converted_data async def _load_data_into_memory_statistics(self, last_object_id): """ Extracts statistics data from the DB Layer, converts it into the proper format""" raw_data = None try: payload = payload_builder.PayloadBuilder() \ .SELECT("id", "key", '{"column": "ts", "timezone": "UTC"}', "value", "history_ts") \ .WHERE(['id', '>', last_object_id]) \ .LIMIT(self._config['blockSize']) \ .ORDER_BY(['id', 'ASC']) \ .payload() statistics_history = await self._storage_async.query_tbl_with_payload('statistics_history', payload) raw_data = statistics_history['rows'] converted_data = self._transform_in_memory_data_statistics(raw_data) except Exception: SendingProcess._logger.error(_MESSAGES_LIST["e000009"]) raise return converted_data @staticmethod def _transform_in_memory_data_readings(raw_data): """ Applies the transformation/validation required to have a standard data set. Note: Python is not able to automatically convert a string containing a number starting with 0 to a dictionary (using the eval also), like for example : '{"value":02}' so these rows will generate an exception and will be skipped. """ converted_data = [] for row in raw_data: try: asset_code = row['asset_code'].replace(" ", "") # Skips row having undefined asset_code if asset_code != "": # Converts values to the proper types, for example "180.2" to float 180.2 payload = row['reading'] for key in list(payload.keys()): value = payload[key] payload[key] = plugin_common.convert_to_type(value) timestamp = apply_date_format(row['user_ts']) # Adds timezone UTC new_row = { 'id': row['id'], 'asset_code': asset_code, 'read_key': row['read_key'], 'reading': payload, 'user_ts': timestamp } converted_data.append(new_row) else: SendingProcess._logger.warning(_MESSAGES_LIST["e000032"].format(row)) except Exception as e: SendingProcess._logger.warning(_MESSAGES_LIST["e000031"].format(str(e), row)) return converted_data async def _load_data_into_memory_readings(self, last_object_id): """ Extracts from the DB Layer data related to the readings loading into a memory structure""" raw_data = None converted_data = [] try: # Loads data, +1 as > is needed readings = await self._readings.fetch(last_object_id + 1, self._config['blockSize']) raw_data = readings['rows'] converted_data = self._transform_in_memory_data_readings(raw_data) except aiohttp.client_exceptions.ClientPayloadError as _ex: SendingProcess._logger.warning(_MESSAGES_LIST["e000009"].format(str(_ex))) except Exception as _ex: SendingProcess._logger.error(_MESSAGES_LIST["e000009"].format(str(_ex))) raise return converted_data async def _load_data_into_memory(self, last_object_id): """ Identifies the data source requested and call the appropriate handler""" try: if self._config['source'] == 'readings': data_to_send = await self._load_data_into_memory_readings(last_object_id) elif self._config['source'] == 'statistics': data_to_send = await self._load_data_into_memory_statistics(last_object_id) else: SendingProcess._logger.error(_MESSAGES_LIST["e000008"]) raise UnknownDataSource except Exception: SendingProcess._logger.error(_MESSAGES_LIST["e000009"]) raise return data_to_send async def _last_object_id_read(self): """ Retrieves the starting point for the send operation""" try: where = 'id={0}'.format(self._stream_id) streams = await self._storage_async.query_tbl('streams', where) rows = streams['rows'] if len(rows) == 0: raise ValueError(_MESSAGES_LIST["e000016"].format(str(self._stream_id))) elif len(rows) > 1: raise ValueError(_MESSAGES_LIST["e000014"].format(str(self._stream_id))) else: last_object_id = rows[0]['last_object'] except Exception: SendingProcess._logger.error(_MESSAGES_LIST["e000019"]) raise return last_object_id async def _task_fetch_data(self): """ Read data from the Storage Layer into a memory structure""" try: last_object_id = await self._last_object_id_read() self._memory_buffer_fetch_idx = 0 sleep_time = self.TASK_FETCH_SLEEP sleep_num_increments = 1 while self._task_fetch_data_run: slept = False if self._memory_buffer_fetch_idx < self._config['memory_buffer_size']: # Checks if there is enough space to load a new block of data if self._memory_buffer[self._memory_buffer_fetch_idx] is None: try: data_to_send = await self._load_data_into_memory(last_object_id) except Exception as ex: _message = _MESSAGES_LIST["e000028"].format(ex) SendingProcess._logger.error(_message) await self._audit.failure(self._AUDIT_CODE, {"error - on _task_fetch_data": _message}) data_to_send = False slept = True await asyncio.sleep(sleep_time) if data_to_send: # Handles the JQFilter functionality if self._config_from_manager['applyFilter']["value"].upper() == "TRUE": jqfilter = JQFilter() # Steps needed to proper format the data generated by the JQFilter # to the one expected by the SP data_to_send_2 = jqfilter.transform(data_to_send, self._config_from_manager['filterRule']["value"]) data_to_send_3 = json.dumps(data_to_send_2) del data_to_send_2 data_to_send_4 = eval(data_to_send_3) del data_to_send_3 data_to_send = data_to_send_4[0] del data_to_send_4 # Loads the block of data into the in memory buffer self._memory_buffer[self._memory_buffer_fetch_idx] = data_to_send last_position = len(data_to_send) - 1 last_object_id = data_to_send[last_position]['id'] self._memory_buffer_fetch_idx += 1 self._task_fetch_data_sem.release() self.performance_track("task _task_fetch_data") else: # There is no more data to load slept = True await asyncio.sleep(sleep_time) else: # There is no more space in the in memory buffer await self._task_send_data_sem.acquire() else: self._memory_buffer_fetch_idx = 0 # Handles the sleep time, it is doubled every time up to a limit if slept: sleep_num_increments += 1 sleep_time *= 2 if sleep_num_increments > self.TASK_SLEEP_MAX_INCREMENTS: sleep_time = self.TASK_FETCH_SLEEP sleep_num_increments = 1 except Exception as ex: _message = _MESSAGES_LIST["e000028"].format(ex) SendingProcess._logger.error(_message) await self._audit.failure(self._AUDIT_CODE, {"error - on _task_fetch_data": _message}) raise async def send_data(self): """ Handles the sending of the data to the destination using the configured plugin for a defined amount of time""" # Prepares the in memory buffer for the fetch/send operations self._memory_buffer = [None for _ in range(self._config['memory_buffer_size'])] self._task_fetch_data_sem = asyncio.Semaphore(0) self._task_send_data_sem = asyncio.Semaphore(0) self._task_fetch_data_task_id = asyncio.ensure_future(self._task_fetch_data()) self._task_send_data_task_id = asyncio.ensure_future(self._task_send_data()) self._task_fetch_data_run = True self._task_send_data_run = True try: start_time = time.time() elapsed_seconds = 0 while elapsed_seconds < self._config['duration']: # Terminates the execution in case a signal has been received if SendingProcess._stop_execution: SendingProcess._logger.info("{func} - signal received, stops the execution".format( func="send_data")) break # Context switch to either the fetch or the send operation await asyncio.sleep(self._config['sleepInterval']) elapsed_seconds = time.time() - start_time SendingProcess._logger.debug("{0} - elapsed_seconds {1}".format("send_data", elapsed_seconds)) except Exception as ex: _message = _MESSAGES_LIST["e000021"].format(ex) SendingProcess._logger.error(_message) await self._audit.failure(self._AUDIT_CODE, {"error - on send_data": _message}) try: # Graceful termination of the tasks self._task_fetch_data_run = False self._task_send_data_run = False # Unblocks the task if it is waiting self._task_fetch_data_sem.release() self._task_send_data_sem.release() await self._task_fetch_data_task_id await self._task_send_data_task_id except Exception as ex: SendingProcess._logger.error(_MESSAGES_LIST["e000029"].format(ex)) async def _get_stream_id(self, config_stream_id): async def get_rows_from_stream_id(stream_id): payload = payload_builder.PayloadBuilder() \ .SELECT("id", "description", "active") \ .WHERE(['id', '=', stream_id]) \ .payload() streams = await self._storage_async.query_tbl_with_payload("streams", payload) return streams['rows'] async def get_rows_from_name(description): payload = payload_builder.PayloadBuilder() \ .SELECT("id", "description", "active") \ .WHERE(['description', '=', description]) \ .payload() streams = await self._storage_async.query_tbl_with_payload("streams", payload) return streams['rows'] async def add_stream(config_stream_id, description): if config_stream_id: payload = payload_builder.PayloadBuilder() \ .INSERT(id=config_stream_id, description=description) \ .payload() await self._storage_async.insert_into_tbl("streams", payload) rows = await get_rows_from_stream_id(stream_id=config_stream_id) else: # If an user is upgrading Fledge, then it has got existing data in streams table but # no entry in configuration for streams_id for this schedule name. Hence it must # check if an entry is already there for this schedule name in streams table. rows = await get_rows_from_name(description=self._name) if len(rows) == 0: payload = payload_builder.PayloadBuilder() \ .INSERT(description=description) \ .payload() await self._storage_async.insert_into_tbl("streams", payload) rows = await get_rows_from_name(description=self._name) return rows[0]['id'], rows[0]['active'] stream_id = None try: rows = await get_rows_from_stream_id(config_stream_id) if len(rows) == 0: stream_id, stream_id_valid = await add_stream(config_stream_id, self._name) elif len(rows) > 1: raise ValueError(_MESSAGES_LIST["e000013"].format(stream_id)) else: stream_id = rows[0]['id'] if rows[0]['active'] == 't': stream_id_valid = True else: SendingProcess._logger.info(_MESSAGES_LIST["i000004"].format(stream_id)) stream_id_valid = False except Exception as e: SendingProcess._logger.error(_MESSAGES_LIST["e000013"].format(str(e))) raise e return stream_id, stream_id_valid async def _get_statistics_key(self): async def get_rows(key): payload = payload_builder.PayloadBuilder() \ .SELECT("key", "description") \ .WHERE(['key', '=', key]) \ .LIMIT(1) \ .payload() statistics = await self._storage_async.query_tbl_with_payload("statistics", payload) return statistics['rows'] async def add_statistics(key, description): payload = payload_builder.PayloadBuilder() \ .INSERT(key=key, description=description) \ .payload() await self._storage_async.insert_into_tbl("statistics", payload) rows = await get_rows(key=key) return rows[0]['key'] try: rows = await get_rows(key=self._name) statistics_key = await add_statistics(key=self._name, description=self._name) if len(rows) == 0 else rows[0]['key'] except Exception as e: SendingProcess._logger.error("Unable to fetch statistics key for {} | {}".format(self._name, str(e))) raise e return statistics_key async def _get_master_statistics_key(self): async def get_rows(key): payload = payload_builder.PayloadBuilder() \ .SELECT("key", "description") \ .WHERE(['key', '=', key]) \ .LIMIT(1) \ .payload() statistics = await self._storage_async.query_tbl_with_payload("statistics", payload) return statistics['rows'] async def add_statistics(key, description): payload = payload_builder.PayloadBuilder() \ .INSERT(key=key, description=description) \ .payload() await self._storage_async.insert_into_tbl("statistics", payload) rows = await get_rows(key=key) return rows[0]['key'] try: if self._config['source'] == 'readings': key='Readings Sent' description='Readings Sent North' elif self._config['source'] == 'statistics': key='Statistics Sent' description='Statistics Sent North' elif self._config['source'] == 'audit': key='Audit Sent' description='Statistics Sent North' rows = await get_rows(key=key) master_statistics_key = await add_statistics(key=key, description=description) if len(rows) == 0 else rows[0]['key'] except Exception as e: SendingProcess._logger.error("Unable to fetch master statistics key for {} | {}".format(self._name, str(e))) raise e return master_statistics_key def _is_north_valid(self): """ Checks if the north has adequate characteristics to be used for sending of the data""" north_ok = False try: if self._plugin_info['type'] == self._PLUGIN_TYPE and \ self._plugin_info['name'] != "Empty North Plugin": north_ok = True except Exception: SendingProcess._logger.error(_MESSAGES_LIST["e000000"]) raise return north_ok def _plugin_load(self): try: plugin_module_path = "{}/python/fledge/plugins/{}/{}".format(_FLEDGE_ROOT, self._PLUGIN_TYPE, self._config['plugin']) self._plugin = common.load_python_plugin(plugin_module_path, self._config['plugin'], self._PLUGIN_TYPE) except (ImportError, FileNotFoundError): SendingProcess._logger.error(_MESSAGES_LIST["e000005"].format(plugin_module_path)) raise def _fetch_configuration(self, cat_name=None, cat_desc=None, cat_config=None, cat_keep_original=False): """ Retrieves the configuration from the Configuration Manager""" try: config_payload = json.dumps({ "key": cat_name, "description": cat_desc, "value": cat_config, "keep_original_items": cat_keep_original }) self._core_microservice_management_client.create_configuration_category(config_payload) _config_from_manager = self._core_microservice_management_client.get_configuration_category(category_name=cat_name) # Check and warn if pipeline exists in North task instance if 'filter' in _config_from_manager: _LOGGER.warning('Filter pipeline is not supported on Python North task instance [%s], plugin [%s]', cat_name, _config_from_manager['plugin']['value']) # Create the parent category for all north services try: parent_payload = json.dumps({"key": "North", "description": "North tasks", "value": {}, "children": [cat_name], "keep_original_items": True}) self._core_microservice_management_client.create_configuration_category(parent_payload) except KeyError: _LOGGER.error("Failed to create North parent configuration category for sending process") raise return _config_from_manager except Exception: SendingProcess._logger.error(_MESSAGES_LIST["e000003"]) raise def _retrieve_configuration(self, cat_name=None, cat_desc=None, cat_config=None, cat_keep_original=False): """ Retrieves the configuration from the Configuration Manager""" try: _config_from_manager = self._fetch_configuration(cat_name, cat_desc, cat_config, cat_keep_original) # Retrieves the configurations and apply the related conversions self._config['enable'] = True if _config_from_manager['enable']['value'].upper() == 'TRUE' else False self._config['duration'] = int(_config_from_manager['duration']['value']) if 'source' in _config_from_manager: self._config['source'] = _config_from_manager['source']['value'] self._config['blockSize'] = int(_config_from_manager['blockSize']['value']) self._config['sleepInterval'] = float(_config_from_manager['sleepInterval']['value']) if 'plugin' in _config_from_manager: self._config['plugin'] = _config_from_manager['plugin']['value'] self._config['memory_buffer_size'] = int(_config_from_manager['memory_buffer_size']['value']) _config_from_manager['_CONFIG_CATEGORY_NAME'] = cat_name if 'stream_id' in _config_from_manager: self._config["stream_id"] = int(_config_from_manager['stream_id']['value']) else: # Sets stream_id as not defined self._config["stream_id"] = 0 self._config_from_manager = _config_from_manager except Exception: SendingProcess._logger.error(_MESSAGES_LIST["e000003"]) raise async def _start(self): """ Setup the correct state for the Sending Process""" exec_sending_process = False try: SendingProcess._logger.debug("{}, for Linux (x86_64) {}".format(_MODULE_NAME, __copyright__)) SendingProcess._logger.info("Started") # config from sending process self._retrieve_configuration(cat_name=self._name, cat_desc=self._CONFIG_CATEGORY_DESCRIPTION, cat_config=self._CONFIG_DEFAULT, cat_keep_original=True) # Fetch stream_id self._stream_id, is_stream_valid = await self._get_stream_id(self._config["stream_id"]) if is_stream_valid is False: raise ValueError("Error in Stream Id for Sending Process {}".format(self._name)) self.statistics_key = await self._get_statistics_key() self.master_statistics_key = await self._get_master_statistics_key() # updates configuration with the new stream_id stream_id_config = { "stream_id": { "description": "Stream ID", "type": "integer", "default": str(self._stream_id), "readonly": "true" } } self._retrieve_configuration(cat_name=self._name, cat_desc=self._CONFIG_CATEGORY_DESCRIPTION, cat_config=stream_id_config, cat_keep_original=True) exec_sending_process = self._config['enable'] if self._config['enable']: # Checks if the plug is defined if not end the execution if 'plugin' in self._config: self._plugin_load() self._plugin_info = self._plugin.plugin_info() if self._is_north_valid(): try: # Fetch plugin configuration self._retrieve_configuration(cat_name=self._name, cat_desc=self._CONFIG_CATEGORY_DESCRIPTION, cat_config=self._plugin_info['config'], cat_keep_original=True) data = self._config_from_manager # Append stream_id etc to payload to be send to the plugin init data['stream_id'] = self._stream_id data['debug_level'] = self._debug_level data['log_performance'] = self._log_performance data.update({'sending_process_instance': self}) self._plugin_handle = self._plugin.plugin_init(data) except Exception as e: _message = _MESSAGES_LIST["e000018"].format(self._config['plugin']) SendingProcess._logger.error(_message) raise PluginInitialiseFailed(e) else: exec_sending_process = False _message = _MESSAGES_LIST["e000015"].format(self._plugin_info['type'], self._plugin_info['name']) SendingProcess._logger.warning(_message) else: SendingProcess._logger.info(_MESSAGES_LIST["i000005"]) exec_sending_process = False else: SendingProcess._logger.info(_MESSAGES_LIST["i000003"]) except (ValueError, Exception) as _ex: _message = _MESSAGES_LIST["e000004"].format(str(_ex)) SendingProcess._logger.error(_message) await self._audit.failure(self._AUDIT_CODE, {"error - on start": _message}) raise # The list of unique reading payload for asset tracker self._tracked_assets = [] return exec_sending_process async def run(self): global _log_performance global _LOGGER # Setups signals handlers, to properly handle the termination # a) SIGTERM - 15 : kill or system shutdown signal.signal(signal.SIGTERM, SendingProcess._signal_handler) # Command line parameter handling self._log_performance, self._debug_level = handling_input_parameters() _log_performance = self._log_performance try: self._storage_async = StorageClientAsync(self._core_management_host, self._core_management_port) self._readings = ReadingsStorageClientAsync(self._core_management_host, self._core_management_port) self._audit = AuditLogger(self._storage_async) except Exception as ex: SendingProcess._logger.exception(_MESSAGES_LIST["e000023"].format(str(ex))) sys.exit(1) else: SendingProcess._logger.removeHandler(SendingProcess._logger.handle) logger_name = _MODULE_NAME + "_" + self._name SendingProcess._logger = logger.setup(logger_name, level=logging.INFO if self._debug_level in [None, 0, 1] else logging.DEBUG) _LOGGER = SendingProcess._logger try: is_started = await self._start() if is_started: await self.send_data() self.stop() SendingProcess._logger.info("Execution completed.") sys.exit(0) except (ValueError, Exception) as ex: SendingProcess._logger.exception(_MESSAGES_LIST["e000002"].format(str(ex))) sys.exit(1) def stop(self): """ Terminates the sending process and the related plugin""" try: self._plugin.plugin_shutdown(self._plugin_handle) except Exception: SendingProcess._logger.error(_MESSAGES_LIST["e000007"]) self._event_loop.run_until_complete( self._audit.failure(self._AUDIT_CODE, {"error - on stop": _MESSAGES_LIST["e000007"]})) raise SendingProcess._logger.info("Stopped")
async def add_configuration_item(request): """ Args: request: A JSON object that defines the config item and has key-pair (default, type, description, value[optional]) Returns: Json response with message key :Example: curl -d '{"default": "true", "description": "Test description", "type": "boolean"}' -X POST https://localhost:1995/fledge/category/{category_name}/{new_config_item} --insecure curl -d '{"default": "true", "description": "Test description", "type": "boolean", "value": "false"}' -X POST https://localhost:1995/fledge/category/{category_name}/{new_config_item} --insecure """ category_name = request.match_info.get('category_name', None) new_config_item = request.match_info.get('config_item', None) category_name = urllib.parse.unquote( category_name) if category_name is not None else None new_config_item = urllib.parse.unquote( new_config_item) if new_config_item is not None else None try: storage_client = connect.get_storage_async() cf_mgr = ConfigurationManager(storage_client) data = await request.json() if not isinstance(data, dict): raise ValueError('Data payload must be a dictionary') # if value key is in data then go ahead with data payload and validate # else update the data payload with value key and set its value to default value and validate val = data.get('value', None) if val is None: data.update({'value': data.get('default')}) config_item_dict = {new_config_item: data} else: config_item_dict = {new_config_item: data} # validate configuration category value await cf_mgr._validate_category_val( category_name=category_name, category_val=config_item_dict, set_value_val_from_default_val=False) # validate category category = await cf_mgr.get_category_all_items(category_name) if category is None: raise NameError( "No such Category found for {}".format(category_name)) # check if config item is already in use if new_config_item in category.keys(): raise KeyError( "Config item is already in use for {}".format(category_name)) # merge category values with keep_original_items True merge_cat_val = await cf_mgr._merge_category_vals( config_item_dict, category, keep_original_items=True) # update category value in storage payload = PayloadBuilder().SET(value=merge_cat_val).WHERE( ["key", "=", category_name]).payload() result = await storage_client.update_tbl("configuration", payload) response = result['response'] # logged audit new config item for category audit = AuditLogger(storage_client) audit_details = { 'category': category_name, 'item': new_config_item, 'value': config_item_dict } await audit.information('CONAD', audit_details) except (KeyError, ValueError, TypeError) as ex: raise web.HTTPBadRequest(reason=str(ex)) except NameError as ex: raise web.HTTPNotFound(reason=str(ex)) except Exception as ex: raise web.HTTPInternalServerError(reason=str(ex)) return web.json_response({ "message": "{} config item has been saved for {} category".format( new_config_item, category_name) })