def add_ambiguous_request(db_session, new_request_url: str) -> Optional[AmbiguousRequest]: """ Adds a new ambiguous request entry to the database if it doesn't already exists. Args: db_session: the database session to use for db actions new_request_url: the requested url of the ambiguous request Returns: the instance of the new ambiguous request or None if it already exists """ new_ambiguous, is_new = db_get_or_create(db_session, AmbiguousRequest, request=new_request_url) if is_new: db_session.commit() Logger() \ .event(category="database", action="ambiguous request added") \ .log(original=f"Added new ambiguous request with id: {new_ambiguous.id}") \ .out(severity=Severity.INFO) return new_ambiguous else: Logger() \ .event(category="database", action="ambiguous request added") \ .log(original=f"Ambiguous request already exists") \ .out(severity=Severity.INFO) return None
def util_new_hs_db_version_callback_test(self, new_version: str): """ The function is the same as util_new_hs_db_version_callback(). The only difference between the two is that the test variant only loads the database on the management pod. All of the worker pods will be left untouched. Args: new_version: the new version of the just compiled db """ if new_version is self.current_hs_db_version: return self.current_hs_db_version = new_version Logger().event(category="synchronizer", action="synchronizer new hs db version test", dataset=f"new hs db version: {new_version}").out( severity=Severity.INFO) # Metrics HYPERSCAN_DB_COMPILED_TOTAL.labels(self.configuration.node_type).inc() HYPERSCAN_DB_VERSION.labels( self.configuration.node_type).set(new_version) # Trigger management update management = K8sManager().get_management_pod() if management and management.reload_hs_db(): Logger().event( category="synchronizer", action="synchronizer management update test", dataset= f"triggered management update for new hs db version: {new_version}" ).out(severity=Severity.INFO)
def delete_ambiguous_request(db_session, ambiguous_id: int) -> bool: """ Deletes an ambiguous request with a given id from the database. It is used when the ambiguous request is fixed. Args: db_session: the database session to use for db actions ambiguous_id: the id of the ambiguous request to delete Returns: true if rule deleted successfully else false if rule not found """ result: AmbiguousRequest = get_model_by_id(db_session, AmbiguousRequest, ambiguous_id) if result: db_session.delete(result) db_session.commit() Logger() \ .event(category="database", action="ambiguous request deleted") \ .log(original=f"Deleted ambiguous request with id: {ambiguous_id}") \ .out(severity=Severity.INFO) return True Logger() \ .event(category="database", action="ambiguous request deleted") \ .log(original=f"No ambiguous request with id: {ambiguous_id}. Nothing was deleted.") \ .out(severity=Severity.INFO) return False
def _load_hs_db(path: str): if os.path.isfile(path) is False: Logger() \ .error(message=f"File at path: {path} does not exists") \ .out(severity=Severity.ERROR) return None with io.open(path, "rb") as bin_file: bin_data = bin_file.read() Logger() \ .event(category="hyperscan", action="hyperscan database loaded", dataset=path) \ .out(severity=Severity.INFO) return hs.loads(bytearray(bin_data))
def configuration(monkeypatch, mocker): """ This py.test fixture mocks the Configuration object used by the application """ def pass_mock(*args): mock = mocker.MagicMock() mock.values.deployment: str = 'test' mock.values.log_level: str = 'debug' mock.values.node_type: str = 'management' mock.values.directories.data: str = '/home/test/redirectory_data' mock.values.directories.ui: str = '/home/test/redirectory_ui' mock.values.service.ip: str = '0.0.0.0' mock.values.service.port: int = 8001 mock.values.service.metrics_port: int = 8002 mock.values.database.type: str = 'sqlite' mock.values.database.path: str = 'redirectory_sqlite.db' mock.values.hyperscan.domain_db: str = 'hs_compiled_domain.hsd' mock.values.hyperscan.rules_db: str = 'hs_compiled_rules.hsd' mock.values.kubernetes.namespace: str = 'redirectory' mock.values.kubernetes.worker_selector: str = '' mock.values.kubernetes.management_selector: str = '' return mock from redirectory.libs_int.config import Configuration monkeypatch.setattr(Configuration, '__new__', pass_mock) from kubi_ecs_logger import Logger, Severity Logger().severity_output_level = Severity[str(Configuration().values.log_level).upper()]
def search_domain(self, domain: str, domain_search_ctx: SearchContext = None ) -> Optional[SearchContext]: """ Searches a domain in the hyperscan domain database. Creates a SearchContext object and runs a scan for the domain. Also handles a cancellation of the search which is a hyperscan error with error code -3. If the search doesn't find any matches a None is returned. If there are matches then a SearchContext object will be returned. Args: domain: the domain to search for domain_search_ctx: SearchContext to be passed to Hyperscan Returns: None or a SearchContext object """ if domain_search_ctx is None: domain_search_ctx = SearchContext(original=domain) try: self.database.domain_db.scan(domain, self._match_event_handler, context=domain_search_ctx) except hyperscan.error as e: if self.get_error_code(e) != -3: raise e Logger() \ .event(category="hyperscan", action="hyperscan domain search successful", dataset=str(domain_search_ctx.matched_ids)) \ .out(severity=Severity.DEBUG) return domain_search_ctx
def search_rule( self, rule: str, rule_search_ctx: SearchContext = None) -> Optional[SearchContext]: """ Searches a rule in the hyperscan rule database. Really similar to the search_domain() method. If the search doesn't find any matches a None is returned. If there are matches then a SearchContext object will be returned. Args: rule: the rule to search for. {domain_id}/{path} rule_search_ctx: SearchContext to be passed to Hyperscan Returns: None or SearchContext object """ if rule_search_ctx is None: rule_search_ctx = SearchContext(original=rule) try: self.database.rules_db.scan(rule, self._match_event_handler, context=rule_search_ctx) except hyperscan.error as e: if self.get_error_code(e) != -3: raise e Logger() \ .event(category="hyperscan", action="hyperscan rule search successful", dataset=str(rule_search_ctx.matched_ids)) \ .out(severity=Severity.DEBUG) return rule_search_ctx
def run(self): HsManager().database.load_database() # Add the redirect from redirectory.services import WorkerRedirect self.api.add_resource(WorkerRedirect, "/", "/<path:content>") # Get needed namespaces worker_ns = NamespaceManager().get_namespace("worker") status_ns = NamespaceManager().get_namespace("status") # Add namespaces to api self.api.add_namespace(worker_ns) self.api.add_namespace(status_ns) # Init api with application self.api.init_app(self.application) # Log Logger() \ .event(category="runnable", action="service configured") \ .service(name="worker").out(severity=Severity.INFO) # Run according to configuration if self.config.deployment == "prod": self._run_production(is_worker=True) elif self.config.deployment == "dev": self._run_development() elif self.config.deployment == "test": self._run_test()
def _after_request(response: Response): """ Logs every request that has been processed by the application. Logs Prometheus metrics for every request except for the status updates. (health and readiness checks) Args: response: the response to be returned Returns: the unmodified response """ config = Configuration().values Logger() \ .event(category="requests", action="request received") \ .url(path=request.path, domain=request.host) \ .source(ip=request.remote_addr) \ .http_response(status_code=response.status_code) \ .out(severity=Severity.INFO) if "status" not in request.path: REQUESTS_TOTAL.labels(config.node_type, str(response.status_code)).inc() return response
def _run_development(self): DatabaseManager().create_db_tables() Logger() \ .event(category="runnable", action="run development") \ .server(ip=self.host, port=self.port) \ .out(severity=Severity.INFO) # CORS only in development @self.application.after_request def _after_request(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Max-Age', 21600) return response # Start metric server start_metrics_server() # Run application self.application.run( **{ "host": self.host, "port": self.port, "debug": True, "use_reloader": False })
def util_new_hs_db_version_callback(self, new_version: str): """ This function is passed as an event callback function to the CompilerJob. When the CompilerJob is done with compiling and saving the Hyperscan databases if specified it can call this function. The function updates some prometheus metrics and updates both the management pod and the worker pods. Args: new_version: the new version of the just compiled db """ if new_version is self.current_hs_db_version: return self.current_hs_db_version = new_version Logger().event(category="synchronizer", action="synchronizer new hs db version", dataset=f"new hs db version: {new_version}").out( severity=Severity.INFO) # Metrics HYPERSCAN_DB_COMPILED_TOTAL.labels(self.configuration.node_type).inc() HYPERSCAN_DB_VERSION.labels( self.configuration.node_type).set(new_version) # Trigger worker updates workers = K8sManager().get_worker_pods() if workers: Logger().event( category="synchronizer", action="synchronizer worker updates", dataset= f"triggered worker updates for new hs db version: {new_version}" ).out(severity=Severity.INFO) self.management_update_workers(workers) # Trigger management update management = K8sManager().get_management_pod() if management and management.reload_hs_db(): Logger().event( category="synchronizer", action="synchronizer management update", dataset= f"triggered management update for new hs db version: {new_version}" ).out(severity=Severity.INFO)
def import_into_db(self): """ Imports all the rules in the given csv file into the database as RedirectRules. If a rule is a duplicate it will be skipped. If there is an error in parsing the csv then all the changes will be roll backed and the whole import will be marked as fail. """ db_session = DatabaseManager().get_session() try: row_counter = 1 for row in self.csv_reader: row_counter += 1 assert len(row) == len(self.columns), f"Entry at line: {row_counter} has different amount of " \ f"arguments than expected. Expected: {len(self.columns)} instead got: {len(row)}" self.data_template["domain"] = row[0] self.data_template["domain_is_regex"] = self._get_bool_from_str(row[1]) self.data_template["path"] = row[2] self.data_template["path_is_regex"] = self._get_bool_from_str(row[3]) self.data_template["destination"] = row[4] self.data_template["destination_is_rewrite"] = self._get_bool_from_str(row[5]) self.data_template["weight"] = int(row[6]) result = add_redirect_rule(db_session, **self.data_template, commit=False) if isinstance(result, int) and result == 2: # 2 means already exists raise AssertionError except AssertionError as e: Logger() \ .event(category="import", action="import failed") \ .error(message=str(e)) \ .out(severity=Severity.ERROR) db_session.rollback() except Exception as e: Logger() \ .event(category="import", action="import failed") \ .error(message=str(e)) \ .out(severity=Severity.CRITICAL) db_session.rollback() else: Logger() \ .event(category="import", action="import successful", dataset=f"Rules added from import: {row_counter - 1}") db_session.commit() DatabaseManager().return_session(db_session)
def post(self): url = request.get_json()["request_url"] host, path = self.parse_url(url) # Init managers and sessions hs_manager = HsManager() db_session = DatabaseManager().get_session() # Search try: start_search_time = time() search_data = hs_manager.search(domain=host, path=path, is_test=True) end_search_time = time() except AssertionError as e: Logger() \ .event(category="hyperscan", action="hyperscan search") \ .error(message=str(e)) \ .out(severity=Severity.ERROR) DatabaseManager().return_session(db_session) return api_error( message="Something went wrong during Hyperscan search!", errors=str(e), status_code=400) # Convert ids into models domain_rules_data = {} redirect_rules_data = {} d_r_map = search_data["domain_rule_map"] for domain_id in d_r_map.keys(): domain_rule = get_model_by_id(db_session, DomainRule, domain_id) domain_rules_data[domain_id] = db_encode_model(domain_rule) for rule_id in d_r_map[domain_id]: redirect_rule = get_model_by_id(db_session, RedirectRule, rule_id) redirect_rules_data[rule_id] = db_encode_model(redirect_rule, expand=True) # Get final result final_redirect_rule, is_ambiguous = HsManager.pick_result( db_session, list(redirect_rules_data.keys())) # Fill in test data search_data[ "final_result_id"] = final_redirect_rule.id if final_redirect_rule is not None else None search_data["is_ambiguous"] = is_ambiguous search_data["time"] = str(end_search_time - start_search_time) DatabaseManager().return_session(db_session) return make_response( jsonify({ "domain_rules": domain_rules_data, "redirect_rules": redirect_rules_data, "search_data": search_data }), 200)
def worker_sync_files(self): """ This function gets called when the application is in worker mode. Stops the readiness checks from succeeding. Downloads and reloads the databases. After that it logs a couple of metrics and also logging. """ Logger() \ .event(category="synchronizer", action="synchronizer sync started") \ .out(severity=Severity.INFO) db_manager = DatabaseManager() hs_manager = HsManager() # Mark hs database as not loaded which causes ready check to fail hs_manager.database.is_loaded = False # Download sync files and write to disc sync_zip_file = self.management_get_sync_files() if sync_zip_file is None: return self.util_save_sync_zip_file(sync_zip_file) # Reload sql db_manager.reload() # Reload hs hs_manager.database.reload_database() new_hs_db_version = hs_manager.database.db_version # Metrics HYPERSCAN_DB_RELOADED_TOTAL.labels(self.configuration.node_type).inc() HYPERSCAN_DB_VERSION.labels( self.configuration.node_type).set(new_hs_db_version) # Log Logger() \ .event(category="synchronizer", action="synchronizer sync complete", dataset=f"New hs db version: {new_hs_db_version}") \ .out(severity=Severity.INFO)
def delete(self, db_session, safe: bool = True): db_session.delete(self) db_session.commit() Logger() \ .event(category="database", action="redirect rule deleted") \ .log(original=f"Redirect rule with id: {self.id} has been deleted") \ .out(severity=Severity.DEBUG) self.domain_rule.delete(db_session, safe=safe) self.path_rule.delete(db_session, safe=safe) self.destination_rule.delete(db_session, safe=safe)
def post(self): args = request.get_json() name = args["name"] ip = args["ip"] port = args["port"] worker_pod = WorkerPod(name=name, ip=ip, port=port) if not worker_pod.get_status_health(): message = "Unable to update worker pod." error = f"Pod with name: {worker_pod.name} and address: {worker_pod.ip}:{worker_pod.port} " \ "is unreachable" Logger() \ .event(category="request", action="request failed", dataset=message) \ .error(message=error) \ .out(severity=Severity.ERROR) return api_error(message=message, errors=error, status_code=400) sync = Synchronizer() did_sync_start = sync.management_update_worker(worker_pod) if not did_sync_start: message = "Unable to update worker pod" error = f"Pod with name: {worker_pod.name} and address: {worker_pod.ip}:{worker_pod.port} " \ f"is reachable but did not respond correctly to sync worker request" Logger() \ .event(category="synchronizer", action="synchronizer request failed", dataset=message) \ .error(message=error) \ .out(severity=Severity.ERROR) return api_error(message=message, errors=error, status_code=400) return make_response( jsonify({ "message": f"New hyperscan DB is updated on the specified worker with address: {worker_pod.ip}:{worker_pod.port}", "status": "done" }), 200)
def delete(self, db_session, safe: bool = True): if safe: from redirectory.libs_int.database import get_usage_count if get_usage_count(db_session, type(self), self.id) > 0: return db_session.delete(self) db_session.commit() Logger() \ .event(category="database", action="domain deleted") \ .log(original=f"Domain with id: {self.id} has been deleted") \ .out(severity=Severity.DEBUG)
def run(self): # Load hyperscan database because of test in UI HsManager().database.load_database() # Get needed namespaces management_ns = NamespaceManager().get_namespace("management") status_ns = NamespaceManager().get_namespace("status") # Add the ui from redirectory.services import ManagementUI self.api.add_resource(ManagementUI, "/", "/<path:path>") # Log ui folder Logger().event( category="ui", action="ui loaded", dataset=self.config.directories.ui).out(severity=Severity.INFO) # Add namespaces to api self.api.add_namespace(management_ns) self.api.add_namespace(status_ns) # Init api with application self.api.init_app(self.application) # Log Logger() \ .event(category="runnable", action="service configured") \ .service(name="management").out(severity=Severity.INFO) # Run according to configuration if self.config.deployment == "prod": self._run_production() elif self.config.deployment == "dev": self._run_development() elif self.config.deployment == "test": self._run_test()
def __new__(cls): if cls.__instance is None: cls.__instance = super(Synchronizer, cls).__new__(cls) cls.__instance.configuration = Configuration().values # Load current db version from SQL _, current_version = get_hs_db_version() cls.__instance.current_hs_db_version = current_version Logger().event( category="synchronizer", action="synchronizer configured", dataset=f"current hs db version: {current_version}").out( severity=Severity.INFO) return cls.__instance
def _save_hs_db(path: str, db_to_save: hs.Database): """ TODO: Args: path: db_to_save: """ assert db_to_save is not None, "Hyperscan database must not be none in order to save to file" serialized_db = hs.dumps(db_to_save) with io.open(path, "wb") as bin_file: bin_file.write(serialized_db) Logger().event(category="hyperscan", action="hyperscan database saved", dataset=path).out(severity=Severity.INFO)
def __new__(cls): if cls.__instance is None: cls.__instance = super(DatabaseManager, cls).__new__(cls) connection_string = get_connection_string() cls.__instance.__engine = create_engine(connection_string, echo=False) cls.__instance.__base = declarative_base(bind=cls.__instance.__engine) cls.__instance.__session_maker = sessionmaker(expire_on_commit=True, autoflush=True) cls.__instance.__sessions = {} cls.__initialized = True Logger().event( category="database", action="database loaded", dataset=connection_string ).out(severity=Severity.INFO) return cls.__instance
def get(self): sync = Synchronizer() try: zip_sync_file = sync.util_get_sync_files_as_zip() except FileNotFoundError as e: Logger() \ .event(category="sync", action="sync download failed", dataset="File not found while compressing into zip") \ .error(message=str(e)) \ .out(severity=Severity.ERROR) return api_error( message="Unable to gather needed files", errors="Some file/s needed for syncing is/are not available. " "If the error persist contact the administrator", status_code=400) return send_file(zip_sync_file, mimetype='zip', attachment_filename="sync.zip", as_attachment=True)
def start_metrics_server(): """ Starts a http server on a port specified in the configuration file and exposes Prometheus metrics on it. Also removes GC_COLLECTOR metrics because they are not really needed. """ # Remove garbage collection metrics REGISTRY.unregister(GC_COLLECTOR) # Gather configurations config = Configuration().values ip = config.service.ip metrics_port = config.service.metrics_port # Start server start_wsgi_server(metrics_port) # Log Logger() \ .event(category="runnable", action="run metrics") \ .server(ip=ip, port=metrics_port) \ .out(severity=Severity.INFO)
def _run_production(self, is_worker: bool = False): DatabaseManager().create_db_tables() if is_worker: # Start loading hyperscan database from management if it exists sync = Synchronizer() sync.worker_sync_files() service_options = { "bind": f"{self.host}:{self.port}", "loglevel": "critical", "worker_class": "gthread", "threads": 2 if is_worker else 10 } Logger() \ .event(category="runnable", action="run production",) \ .server(ip=self.host, port=self.port) \ .out(severity=Severity.INFO) # Run application GunicornServer(self.application, service_options).run()
def get(self, content=None): del content host = request.host.split(":")[0] path = request.path # Stop spamming for favicon pls if path == "/favicon.ico": self.page_404() # Init managers and sessions hs_manager = HsManager() db_session = DatabaseManager().get_session() # Search try: matching_ids = None with HYPERSCAN_REQUESTS_REDIRECTED_DURATION_SECONDS.time( ): # Metric matching_ids = hs_manager.search(domain=host, path=path) is_404 = matching_ids is None except AssertionError as e: Logger() \ .event(category="hyperscan", action="hyperscan search failed") \ .error(message=str(e)) \ .out(severity=Severity.ERROR) DatabaseManager().return_session(db_session) is_404 = True if is_404: REQUESTS_REDIRECTED_TOTAL.labels("worker", "404", "not_found").inc() self.page_404() # Get final result with DB_LOOKUP_REQUESTS_REDIRECTED_DURATION_SECONDS.time(): # Metric final_redirect_rule, is_ambiguous = HsManager.pick_result( db_session, list(matching_ids)) final_destination_url = final_redirect_rule.destination_rule.destination_url is_back_ref: bool = final_redirect_rule.destination_rule.is_rewrite # Do back reference if is_back_ref: final_destination_url = self.apply_back_reference( path, final_redirect_rule) # Add ambiguous request to db if needed if is_ambiguous or final_destination_url is None: from redirectory.libs_int.kubernetes import K8sManager, ManagementPod try: management_pod: ManagementPod = K8sManager( ).get_management_pod() management_pod.add_ambiguous_request(request.url) except AssertionError as e: Logger() \ .event(category="sync", action="sync ambiguous request", dataset="Unable to sync ambiguous request with the management pod") \ .error(message=str(e)) \ .out(severity=Severity.ERROR) if final_destination_url is None: self.page_404() # Sanitize final redirect url final_destination_url = self.sanitize_outgoing_redirect( final_destination_url) DatabaseManager().return_session(db_session) if is_ambiguous: REQUESTS_REDIRECTED_TOTAL.labels("worker", "301", "ambiguous").inc() elif is_back_ref: REQUESTS_REDIRECTED_TOTAL.labels("worker", "301", "back_ref").inc() else: REQUESTS_REDIRECTED_TOTAL.labels("worker", "301", "normal").inc() return redirect(final_destination_url, 301)
def add_redirect_rule(db_session, domain: str, domain_is_regex: bool, path: str, path_is_regex: bool, destination: str, destination_is_rewrite: bool, weight: int, commit: bool = True) \ -> Union[RedirectRule, int]: """ Creates a new Redirect Rule from all of the given arguments. If a domain, path or destination is already used it is just going to be re-used in the new rule. Before all that it validates rules which are rewrites to see if they are configured correctly. Depending on where the check failed different integers will be returned. Args: db_session: the database session to use for the DB actions domain: the domain of the new rule domain_is_regex: is the domain a regex or not path: the path of the new rule path_is_regex: is the path a regex or not destination: the destination of the new rule destination_is_rewrite: is the destination a rewrite or not weight: the weight of the new rule commit: should the function commit the new rule or just flush for ids Returns: Redirect Rule - if all went well 1 (int) - if the check failed for rewrite rule 2 (int) - if the check for already existing rule failed """ if destination_is_rewrite and not validate_rewrite_rule( path, path_is_regex, destination): Logger() \ .event(category="database", action="rule added") \ .log(original=f"Rewrite rule failed validation check") \ .out(severity=Severity.DEBUG) return 1 domain_instance, is_new_domain = db_get_or_create(db_session, DomainRule, rule=domain, is_regex=domain_is_regex) path_instance, is_new_path = db_get_or_create(db_session, PathRule, rule=path, is_regex=path_is_regex) dest_instance, is_new_dest = db_get_or_create( db_session, DestinationRule, destination_url=destination, is_rewrite=destination_is_rewrite) new_redirect_rule = RedirectRule() new_redirect_rule.domain_rule_id = domain_instance.id new_redirect_rule.path_rule_id = path_instance.id new_redirect_rule.destination_rule_id = dest_instance.id new_redirect_rule.weight = weight db_session.add(new_redirect_rule) try: if commit: db_session.commit() else: db_session.flush() Logger() \ .event(category="database", action="rule added") \ .log(original=f"Added new redirect rule with id: {new_redirect_rule.id}") \ .out(severity=Severity.DEBUG) return new_redirect_rule except IntegrityError: Logger() \ .event(category="database", action="rule added") \ .log(original=f"Rule already exists") \ .out(severity=Severity.DEBUG) db_session.rollback() return 2
def update_redirect_rule(db_session, redirect_rule_id: int, domain: str, domain_is_regex: bool, path: str, path_is_regex: bool, destination: str, destination_is_rewrite: bool, weight: int) \ -> Union[RedirectRule, int]: """ Updates the rule with the given ID and with the given arguments. Finds the rule specified with the redirect_rule_id and updates it's values correspondingly. If everything goes correctly then the new version of the rule returned. If no rule with that ID is found an integer is returned If the new rule fails the rewrite validation an integer is returned Args: db_session: the database session to use for db actions redirect_rule_id: the ID of the rule to update domain: the new domain of the rule domain_is_regex: the new status of the domain rule path: the new path of the rule path_is_regex: the new status of the path rule destination: the new destination of the rule destination_is_rewrite: the new status of the destination rule weight: the new weight of the rule Returns: Redirect Rule - which is the updated version if all went well 1 (int) - rule exists but fails validation check for rewrite rule 2 (int) - rule with this id does not exist """ if destination_is_rewrite and not validate_rewrite_rule( path, path_is_regex, destination): Logger() \ .event(category="database", action="rule update") \ .log(original=f"The new rule updates make the rewrite rule incorrect") \ .out(severity=Severity.DEBUG) return 1 redirect_rule: RedirectRule = get_model_by_id(db_session, RedirectRule, redirect_rule_id) if not redirect_rule: Logger() \ .event(category="database", action="rule update") \ .log(original=f"A rule with ID: {redirect_rule_id} does not exist") \ .out(severity=Severity.DEBUG) return 2 domain_instance, _ = db_get_or_create(db_session, DomainRule, rule=domain, is_regex=domain_is_regex) path_instance, _ = db_get_or_create(db_session, PathRule, rule=path, is_regex=path_is_regex) dest_instance, _ = db_get_or_create(db_session, DestinationRule, destination_url=destination, is_rewrite=destination_is_rewrite) redirect_rule.domain_rule_id = domain_instance.id redirect_rule.path_rule_id = path_instance.id redirect_rule.destination_rule_id = dest_instance.id redirect_rule.weight = weight # redirect_rule.save() db_session.commit() Logger() \ .event(category="database", action="rule update") \ .log(original=f"A rule with ID: {redirect_rule_id} has been updated") \ .out(severity=Severity.DEBUG) return redirect_rule