def project_initialization(self, instances, app=None): """ Custom initialization of your project Please define your class Initializer in project/YOURPROJECT/backend/initialization/initialization.py """ try: # NOTE: this might be a pattern # see in meta.py:get_customizer_class module_path = "{}.{}.{}".format( CUSTOM_PACKAGE, 'initialization', 'initialization', ) module = Meta.get_module_from_string(module_path) meta = Meta() Initializer = meta.get_class_from_string( 'Initializer', module, skip_error=True ) if Initializer is None: log.debug("No custom init available") else: try: Initializer(instances, app=app) except BaseException as e: log.error("Errors during custom initialization: {}", e) else: log.info("Vanilla project has been initialized") except BaseException: log.debug("No custom init available")
def __init__(self): self.authentication_service = None self.authentication_name = 'authentication' self.task_service_name = 'celery' self.services_configuration = [] self.services = {} self.services_classes = {} self.extensions_instances = {} self.available_services = {} self.meta = Meta() self.check_configuration() self.load_classes()
def extract_endpoints(self, base_dir: Path) -> List[Type[EndpointResource]]: endpoints_classes: List[Type[EndpointResource]] = [] # get last item of the path # normpath is required to strip final / if any base_module = base_dir.name apis_dir = base_dir.joinpath("endpoints") apiclass_module = f"{base_module}.endpoints" for epfile in apis_dir.glob("*.py"): # get module name (es: endpoints.filename) module_name = f"{apiclass_module}.{epfile.stem}" # Convert module name into a module log.debug("Importing {}", module_name) module = Meta.get_module_from_string( module_name, exit_on_fail=True, ) # Extract classes from the module # module can't be none because of exit_on_fail=True... # but my-py can't understand this classes = Meta.get_new_classes_from_module(module) # type: ignore for class_name, epclss in classes.items(): # Filtering out classes without expected data if ( not hasattr(epclss, "methods") or epclss.methods is None ): # pragma: no cover continue log.debug("Importing {} from {}", class_name, module_name) skip, dependency = self.skip_endpoint(epclss.depends_on) if skip: log.debug( "Skipping '{} {}' due to unmet dependency: {}", module_name, class_name, dependency, ) continue endpoints_classes.append(epclss) return endpoints_classes
def custom_post_handle_user_input(self, user_node, input_data): module_path = "{}.initialization.initialization".format(CUSTOM_PACKAGE) module = Meta.get_module_from_string(module_path) meta = Meta() Customizer = meta.get_class_from_string('Customizer', module, skip_error=True) if Customizer is None: log.debug("No user properties customizer available") else: try: Customizer().custom_post_handle_user_input( self, user_node, input_data) except BaseException as e: log.error("Unable to customize user properties: {}", e)
def extract_endpoints(self, base_dir): endpoints_classes = [] # get last item of the path # normpath is required to strip final / if any base_module = os.path.basename(os.path.normpath(base_dir)) apis_dir = os.path.join(base_dir, "endpoints") apiclass_module = f"{base_module}.endpoints" for epfiles in glob.glob(f"{apis_dir}/*.py"): # get module name (es: endpoints.filename) module_file = os.path.basename(os.path.splitext(epfiles)[0]) module_name = f"{apiclass_module}.{module_file}" # Convert module name into a module log.debug("Importing {}", module_name) module = Meta.get_module_from_string( module_name, exit_on_fail=True, ) # Extract classes from the module # module can't be none because of exit_on_fail=True... # but my-py can't understand this classes = Meta.get_new_classes_from_module(module) # type: ignore for class_name, epclss in classes.items(): # Filtering out classes without expected data if not hasattr(epclss, "methods") or epclss.methods is None: continue log.debug("Importing {} from {}.{}", class_name, apis_dir, module_file) skip, dependency = self.skip_endpoint(epclss.depends_on) if skip: log.debug( "Skipping '{} {}' due to unmet dependency: {}", module_name, class_name, dependency, ) continue endpoints_classes.append(epclss) return endpoints_classes
def custom_user_properties(self, userdata): module_path = "{}.initialization.initialization".format(CUSTOM_PACKAGE) module = Meta.get_module_from_string(module_path) meta = Meta() Customizer = meta.get_class_from_string('Customizer', module, skip_error=True) if Customizer is None: log.debug("No user properties customizer available") else: try: userdata = Customizer().custom_user_properties(userdata) except BaseException as e: log.error("Unable to customize user properties: {}", e) if "email" in userdata: userdata["email"] = userdata["email"].lower() return userdata
def get_class(connector_module: Optional[ModuleType]) -> Optional[Type]: if not connector_module: # pragma: no cover return False classes = Meta.get_new_classes_from_module(connector_module) for connector_class in classes.values(): if issubclass(connector_class, Connector): return connector_class return None # pragma: no cover
def load_models(connectors: List[str]) -> None: for connector in connectors: connector_path = os.path.join( ABS_RESTAPI_PATH, CONNECTORS_FOLDER, connector ) # Models are strictly core-dependent. If you need to enable models starting # from a custom connector this function has to be refactored: # 1) now is checked the existence of models.py in ABS_RESTAPI_PATH/connector # 2) Core model is mandatory # 3) Connector class, used to inject models is taken from BACKEND_PACKAGE if os.path.isfile(os.path.join(connector_path, "models.py")): log.debug("Loading models from {}", connector) base_models = Meta.import_models( connector, BACKEND_PACKAGE, mandatory=True ) if EXTENDED_PACKAGE == EXTENDED_PROJECT_DISABLED: extended_models = {} else: extended_models = Meta.import_models(connector, EXTENDED_PACKAGE) custom_models = Meta.import_models(connector, CUSTOM_PACKAGE) log.info( "Models loaded from {}: core {}, extended {}, custom {}", connector, len(base_models), len(extended_models), len(custom_models), ) connector_module = Connector.get_module(connector, BACKEND_PACKAGE) connector_class = Connector.get_class(connector_module) if connector_class: connector_class.set_models( base_models, extended_models, custom_models ) else: # pragma: no cover log.error("Connector class not found for {}", connector) else: log.debug("No model found for {}", connector)
def wrapper(*args, **kwargs): # Recover the auth object auth_type, token = self.get_authorization_token( allow_access_token_parameter=allow_access_token_parameter) # Base header for errors headers = {HTTPAUTH_AUTH_HEADER: self.authenticate_header()} # Internal API 'self' reference decorated_self = Meta.get_self_reference_from_args(*args) if auth_type is None or auth_type.lower( ) != self._scheme.lower(): # Wrong authentication string msg = ( "Missing credentials in headers, e.g. {}: '{} TOKEN'". format(HTTPAUTH_AUTH_FIELD, HTTPAUTH_DEFAULT_SCHEME)) log.info("Unauthorized request: missing credentials") return decorated_self.force_response( errors=msg, code=hcodes.HTTP_BAD_UNAUTHORIZED, headers=headers, ) # Handling OPTIONS forwarded to our application: # ignore headers and let go, avoid unwanted interactions with CORS if request.method != 'OPTIONS': # Check authentication token_fn = decorated_self.auth.verify_token if not self.authenticate(token_fn, token): # Clear TCP receive buffer of any pending data log.verbose(request.data) # Mimic the response from a normal endpoint # To use the same standards log.info("Invalid token received '{}'", token) return decorated_self.force_response( errors="Invalid token received", code=hcodes.HTTP_BAD_UNAUTHORIZED, headers=headers) # Check roles if len(roles) > 0: roles_fn = decorated_self.auth.verify_roles if not self.authenticate_roles(roles_fn, roles, required_roles): log.info("Unauthorized request: missing privileges") return decorated_self.force_response( errors="You are not authorized: missing privileges", code=hcodes.HTTP_BAD_UNAUTHORIZED, ) return func(*args, **kwargs)
def load_class_from_module(self, classname, service=None): if service is None: flaskext = '' else: flaskext = '.' + service.get('extension') # Try inside our extensions module = Meta.get_module_from_string( modulestring=BACKEND_PACKAGE + '.flask_ext' + flaskext, exit_on_fail=True ) if module is None: log.exit("Missing {} for {}", flaskext, service) return getattr(module, classname)
def wrapper(*args: Any, **kwargs: Any) -> Any: # Recover the auth object auth_type, token = HTTPTokenAuth.get_authorization_token( allow_access_token_parameter=allow_access_token_parameter) # Internal API 'self' reference caller = Meta.get_self_reference_from_args(*args) if caller is None: # pragma: no cover # An exit here is really really dangerous, but even if # get_self_reference_from_args can return None, this case is quite # impossible... however with None the server can't continue! print_and_exit( "Server misconfiguration, self reference can't be None!" ) if (auth_type is not None and auth_type == HTTPAUTH_SCHEME and request.method != "OPTIONS"): # valid, token, jti, user valid, token, _, user = caller.auth.verify_token(token) # Check authentication. Optional authentication is valid if: # 1) token is missing # 2) token is valid # Invalid tokens are rejected if not valid: # Clear TCP receive buffer of any pending data _ = request.data # Mimic the response from a normal endpoint # To use the same standards # log.info("Invalid token received '{}'", token) log.debug("Invalid token received") return caller.response( "Invalid token received", headers=HTTPAUTH_ERR_HEADER, code=401, allow_html=True, ) caller.authorized_user = user.uuid kwargs["user"] = user request.environ[TOKEN_VALIDATED_KEY] = True else: kwargs["user"] = None return func(*args, **kwargs)
def load_commands(self): Meta.get_module_from_string("restapi.services.bot") if EXTENDED_PACKAGE != EXTENDED_PROJECT_DISABLED: Meta.get_module_from_string(f"{EXTENDED_PACKAGE}.bot") Meta.get_module_from_string(f"{CUSTOM_PACKAGE}.bot") # Handle the rest as normal messages # NOTE: this has to be the last handler to be attached self.updater.dispatcher.add_handler( MessageHandler(Filters.text, self.invalid_message))
def custom_connection(self, **kwargs): # What service will hold authentication? auth_service = self.variables.get('service') auth_module = Meta.get_authentication_module(auth_service) custom_auth = auth_module.Authentication() secret = str(custom_auth.import_secret(self.app.config['SECRET_KEY_FILE'])) # Install self.app secret for oauth2 # !? self.app.secret_key = secret + '_app' custom_auth.TOTP = 'TOTP' custom_auth.REGISTER_FAILED_LOGIN = ( self.variables.get("register_failed_login", False) == 'True' ) custom_auth.FORCE_FIRST_PASSWORD_CHANGE = ( self.variables.get("force_first_password_change", False) == 'True' ) custom_auth.VERIFY_PASSWORD_STRENGTH = ( self.variables.get("verify_password_strength", False) == 'True' ) custom_auth.MAX_PASSWORD_VALIDITY = int( self.variables.get("max_password_validity", 0) ) custom_auth.DISABLE_UNUSED_CREDENTIALS_AFTER = int( self.variables.get("disable_unused_credentials_after", 0) ) custom_auth.MAX_LOGIN_ATTEMPTS = int( self.variables.get("max_login_attempts", 0) ) custom_auth.SECOND_FACTOR_AUTHENTICATION = self.variables.get( "second_factor_authentication", None ) if custom_auth.SECOND_FACTOR_AUTHENTICATION == "None": custom_auth.SECOND_FACTOR_AUTHENTICATION = None return custom_auth
def do_schema(self): """ Schemas exposing, if requested """ name = '{}.rest.schema'.format(BACKEND_PACKAGE) module = Meta.get_module_from_string(name, exit_if_not_found=True, exit_on_fail=True) schema_class = getattr(module, 'RecoverSchema') self._schema_endpoint = EndpointElements( cls=schema_class, exists=True, custom={ 'methods': { 'get': ExtraAttributes(auth=None), # WHY DOES POST REQUEST AUTHENTICATION # 'post': ExtraAttributes(auth=None) } }, methods={}, )
def test_meta(self) -> None: # This is a valid package containing other packages... but no task will be found tasks = Meta.get_celery_tasks("restapi.utilities") assert isinstance(tasks, list) assert len(tasks) == 0 tasks = Meta.get_celery_tasks("this-should-not-exist") assert isinstance(tasks, list) assert len(tasks) == 0 mcls = Meta.get_classes_from_module("this-should-not-exist") # type: ignore assert isinstance(mcls, dict) assert len(mcls) == 0 assert Meta.get_class("this-should-not-exist", "this-should-not-exist") is None assert Meta.get_class("initialization", "this-should-not-exist") is None assert Meta.get_class("initialization", "Initializer") is not None assert not Meta.get_module_from_string("this-should-not-exist") with pytest.raises(ModuleNotFoundError): Meta.get_module_from_string( "this-should-not-exist", exit_on_fail=True, ) # This method is not very robust... but... let's test the current implementation # It basicaly return the first args if it is an instance of some classes assert not Meta.get_self_reference_from_args() selfref = Meta.get_self_reference_from_args("test") assert selfref == "test" models = Meta.import_models("this-should", "not-exist", mandatory=False) assert isinstance(models, dict) assert len(models) == 0 with pytest.raises(SystemExit): Meta.import_models("this-should", "not-exist", mandatory=True) # Check exit_on_fail default value models = Meta.import_models("this-should", "not-exist") assert isinstance(models, dict) assert len(models) == 0
DateTimeProperty, EmailProperty, OneOrMore, RelationshipFrom, RelationshipTo, StringProperty, StructuredNode, ZeroOrMore, ZeroOrOne, ) from restapi.connectors.neo4j.types import IdentifiedNode from restapi.utilities.meta import Meta # mypy: ignore-errors UserCustomClass: Type[IdentifiedNode] = (Meta.get_class( "models.neo4j", "UserCustom") or IdentifiedNode) # mypy: ignore-errors GroupCustomClass: Type[IdentifiedNode] = (Meta.get_class( "models.neo4j", "GroupCustom") or IdentifiedNode) class User(UserCustomClass): email = EmailProperty(required=True, unique_index=True) name = StringProperty(required=True) surname = StringProperty(required=True) authmethod = StringProperty(required=True) password = StringProperty() mfa_hash = StringProperty() first_login = DateTimeProperty() last_login = DateTimeProperty() last_password_change = DateTimeProperty()
def get_module(connector: str, module: str) -> Optional[ModuleType]: return Meta.get_module_from_string( ".".join((module, CONNECTORS_FOLDER, connector)) )
class Detector: def __init__(self): self.authentication_service = None self.authentication_name = 'authentication' self.task_service_name = 'celery' self.services_configuration = [] self.services = {} self.services_classes = {} self.extensions_instances = {} self.available_services = {} self.meta = Meta() self.check_configuration() self.load_classes() @staticmethod def get_global_var(key, default=None): return os.environ.get(key, default) @staticmethod @lru_cache(maxsize=None) # avoid calling it twice for the same var def get_bool_envvar(bool_var): if isinstance(bool_var, bool): return bool_var # if not directly a bool, try an interpretation # INTEGERS try: tmp = int(bool_var) return bool(tmp) except ValueError: pass # STRINGS if isinstance(bool_var, str): # false / False / FALSE if bool_var.lower() == 'false': return False # any non empty string has to be considered True if len(bool_var) > 0: return True return False @staticmethod @lru_cache(maxsize=None) # avoid calling it twice for the same var def get_bool_from_os(name): bool_var = os.environ.get(name, False) return Detector.get_bool_envvar(bool_var) @staticmethod def prefix_name(service): return service.get('name'), service.get('prefix').lower() + '_' def check_configuration(self): try: self.services_configuration = load_yaml_file( file='services.yaml', path=ABS_RESTAPI_CONFSPATH) except AttributeError as e: log.exit(e) for service in self.services_configuration: name, prefix = self.prefix_name(service) # Was this service enabled from the developer? enable_var = str(prefix + 'enable').upper() self.available_services[name] = self.get_bool_from_os(enable_var) if self.available_services[name]: # read variables variables = self.load_variables(service, enable_var, prefix) service['variables'] = variables # set auth service if name == self.authentication_name: self.authentication_service = variables.get('service') if self.authentication_service is None: log.info("No service defined for authentication") else: log.info( "Authentication based on '{}' service", self.authentication_service ) @staticmethod def load_group(label): variables = {} for var, value in os.environ.items(): var = var.lower() if var.startswith(label): key = var[len(label):].strip('_') value = value.strip('"').strip("'") variables[key] = value return variables def output_service_variables(self, service_name): service_class = self.services_classes.get(service_name, {}) try: return service_class.variables except BaseException: return {} @staticmethod def load_variables(service, enable_var=None, prefix=None): variables = {} host = None if prefix is None: _, prefix = Detector.prefix_name(service) for var, value in os.environ.items(): if enable_var is not None and var == enable_var: continue var = var.lower() # This is the case when a variable belongs to a service 'prefix' if var.startswith(prefix): # Fix key and value before saving key = var[len(prefix) :] # One thing that we must avoid is any quote around our value value = value.strip('"').strip("'") # save variables[key] = value if key == 'host': host = value # Verify if service is EXTERNAL variables['external'] = False if isinstance(host, str): # and host.count('.') > 2: if not host.endswith('dockerized.io'): variables['external'] = True log.verbose("Service {} detected as external: {}", service, host) return variables def load_class_from_module(self, classname, service=None): if service is None: flaskext = '' else: flaskext = '.' + service.get('extension') # Try inside our extensions module = Meta.get_module_from_string( modulestring=BACKEND_PACKAGE + '.flask_ext' + flaskext, exit_on_fail=True ) if module is None: log.exit("Missing {} for {}", flaskext, service) return getattr(module, classname) def load_classes(self): for service in self.services_configuration: name, _ = self.prefix_name(service) if not self.available_services.get(name): continue log.verbose("Looking for class {}", name) variables = service.get('variables') ext_name = service.get('class') # Get the existing class try: MyClass = self.load_class_from_module(ext_name, service=service) # Passing variables MyClass.set_variables(variables) if service.get('load_models'): base_models = self.meta.import_models( name, BACKEND_PACKAGE, exit_on_fail=True ) if EXTENDED_PACKAGE == EXTENDED_PROJECT_DISABLED: extended_models = {} else: extended_models = self.meta.import_models( name, EXTENDED_PACKAGE, exit_on_fail=False ) custom_models = self.meta.import_models( name, CUSTOM_PACKAGE, exit_on_fail=False ) MyClass.set_models(base_models, extended_models, custom_models) except AttributeError as e: log.error(str(e)) log.exit('Invalid Extension class: {}', ext_name) # Save self.services_classes[name] = MyClass log.debug("Got class definition for {}", MyClass) if len(self.services_classes) < 1: raise KeyError("No classes were recovered!") return self.services_classes def init_services( self, app, worker_mode=False, project_init=False, project_clean=False ): instances = {} auth_backend = None for service in self.services_configuration: name, _ = self.prefix_name(service) if not self.available_services.get(name): continue if name == self.authentication_name and auth_backend is None: if self.authentication_service is None: log.warning("No authentication") continue else: log.exit( "Auth service '{}' is unreachable".format( self.authentication_service) ) args = {} if name == self.task_service_name: args['worker_mode'] = worker_mode # Get extension class and build the extension object ExtClass = self.services_classes.get(name) try: ext_instance = ExtClass(app, **args) except TypeError as e: log.exit('Your class {} is not compliant:\n{}', name, e) else: self.extensions_instances[name] = ext_instance if not project_init: do_init = False elif name == self.authentication_service: do_init = True elif name == self.authentication_name: do_init = True else: do_init = False # Initialize the real service getting the first service object log.debug("Initializing {} (pinit={})", name, do_init) service_instance = ext_instance.custom_init( pinit=do_init, pdestroy=project_clean, abackend=auth_backend ) instances[name] = service_instance if name == self.authentication_service: auth_backend = service_instance # NOTE: commented, looks like a duplicate from try/expect above # self.extensions_instances[name] = ext_instance # Injecting into the Celery Extension Class # all celery tasks found in *vanilla_package/tasks* if name == self.task_service_name: do_init = True task_package = "{}.tasks".format(CUSTOM_PACKAGE) submodules = self.meta.import_submodules_from_package( task_package, exit_on_fail=True ) for submodule in submodules: tasks = Meta.get_celery_tasks_from_module(submodule) for func_name, funct in tasks.items(): setattr(ExtClass, func_name, funct) if len(self.extensions_instances) < 1: raise KeyError("No instances available for modules") # Only once in a lifetime if project_init: self.project_initialization(instances, app=app) return self.extensions_instances def check_availability(self, name): if '.' in name: # In this case we are receiving a module name # e.g. restapi.services.mongodb name = name.split('.')[::-1][0] return self.available_services.get(name) @classmethod def project_initialization(self, instances, app=None): """ Custom initialization of your project Please define your class Initializer in project/YOURPROJECT/backend/initialization/initialization.py """ try: # NOTE: this might be a pattern # see in meta.py:get_customizer_class module_path = "{}.{}.{}".format( CUSTOM_PACKAGE, 'initialization', 'initialization', ) module = Meta.get_module_from_string(module_path) meta = Meta() Initializer = meta.get_class_from_string( 'Initializer', module, skip_error=True ) if Initializer is None: log.debug("No custom init available") else: try: Initializer(instances, app=app) except BaseException as e: log.error("Errors during custom initialization: {}", e) else: log.info("Vanilla project has been initialized") except BaseException: log.debug("No custom init available")
def connect(self, **kwargs): variables = self.variables.copy() variables.update(kwargs) broker = variables.get("broker") if broker is None: # pragma: no cover print_and_exit("Unable to start Celery, missing broker service") if broker == "RABBIT": service_vars = Env.load_variables_group(prefix="rabbitmq") self.celery_app.conf.broker_use_ssl = Env.to_bool( service_vars.get("ssl_enabled")) self.celery_app.conf.broker_url = self.get_rabbit_url( service_vars, protocol="amqp") elif broker == "REDIS": service_vars = Env.load_variables_group(prefix="redis") self.celery_app.conf.broker_use_ssl = False self.celery_app.conf.broker_url = self.get_redis_url( service_vars, protocol="redis") else: # pragma: no cover print_and_exit( "Unable to start Celery: unknown broker service: {}", broker) log.info( "Configured {} as broker {}", broker, obfuscate_url(self.celery_app.conf.broker_url), ) # From the guide: "Default: Taken from broker_url." # But it is not true, connection fails if not explicitly set self.celery_app.conf.broker_read_url = self.celery_app.conf.broker_url self.celery_app.conf.broker_write_url = self.celery_app.conf.broker_url backend = variables.get("backend", broker) if backend == "RABBIT": service_vars = Env.load_variables_group(prefix="rabbitmq") log.warning( "RABBIT backend is quite limited and not fully supported. " "Consider to enable Redis or MongoDB as a backend database") self.celery_app.conf.result_backend = self.get_rabbit_url( service_vars, protocol="rpc") elif backend == "REDIS": service_vars = Env.load_variables_group(prefix="redis") self.celery_app.conf.result_backend = self.get_redis_url( service_vars, protocol="redis") # set('redis_backend_use_ssl', kwargs.get('redis_backend_use_ssl')) elif backend == "MONGODB": service_vars = Env.load_variables_group(prefix="mongo") self.celery_app.conf.result_backend = self.get_mongodb_url( service_vars, protocol="mongodb") else: # pragma: no cover print_and_exit( "Unable to start Celery: unknown backend service: {}", backend) log.info( "Configured {} as backend {}", backend, obfuscate_url(self.celery_app.conf.result_backend), ) # Should be enabled? # Default: Disabled by default (transient messages). # If set to True, result messages will be persistent. # This means the messages won’t be lost after a broker restart. # self.celery_app.conf.result_persistent = True # Skip initial warnings, avoiding pickle format (deprecated) self.celery_app.conf.accept_content = ["json"] self.celery_app.conf.task_serializer = "json" self.celery_app.conf.result_serializer = "json" # Already enabled by default to use UTC # self.celery_app.conf.enable_utc # self.celery_app.conf.timezone # Not needed, because tasks are dynamcally injected # self.celery_app.conf.imports # self.celery_app.conf.includes # Max priority default value for all queues # Required to be able to set priority parameter on task calls self.celery_app.conf.task_queue_max_priority = 10 # Default priority for taks (if not specified) self.celery_app.conf.task_default_priority = 5 # If you want to apply a more strict priority to items # probably prefetching should also be disabled: # Late ack means the task messages will be acknowledged after the task # has been executed, not just before (the default behavior). # self.celery_app.conf.task_acks_late = True # How many messages to prefetch at a time multiplied by the number # of concurrent processes. The default is 4 (four messages for each process). # The default setting is usually a good choice, however – if you have very # long running tasks waiting in the queue and you have to start the workers, # note that the first worker to start will receive four times the number # of messages initially. Thus the tasks may not be fairly distributed to # the workers. To disable prefetching, set worker_prefetch_multiplier to 1. # Changing that setting to 0 will allow the worker to keep consuming as many # messages as it wants. self.celery_app.conf.worker_prefetch_multiplier = 1 if Env.get_bool("CELERYBEAT_ENABLED"): CeleryExt.CELERYBEAT_SCHEDULER = backend if backend == "MONGODB": service_vars = Env.load_variables_group(prefix="mongo") url = self.get_mongodb_url(service_vars, protocol="mongodb") SCHEDULER_DB = "celery" self.celery_app.conf[ "CELERY_MONGODB_SCHEDULER_DB"] = SCHEDULER_DB self.celery_app.conf[ "CELERY_MONGODB_SCHEDULER_COLLECTION"] = "schedules" self.celery_app.conf["CELERY_MONGODB_SCHEDULER_URL"] = url import mongoengine m = mongoengine.connect(SCHEDULER_DB, host=url) log.info("Celery-beat connected to MongoDB: {}", m) elif backend == "REDIS": service_vars = Env.load_variables_group(prefix="redis") url = self.get_redis_url(service_vars, protocol="redis") self.celery_app.conf["REDBEAT_REDIS_URL"] = url self.celery_app.conf["REDBEAT_KEY_PREFIX"] = REDBEAT_KEY_PREFIX log.info("Celery-beat connected to Redis: {}", obfuscate_url(url)) else: # pragma: no cover log.warning( "Cannot configure celery beat scheduler with backend: {}", backend) # self.disconnected = False conf = self.celery_app.conf # Replace the previous App with new settings self.celery_app = Celery("RAPyDo", broker=conf["broker_url"], backend=conf["result_backend"]) self.celery_app.conf = conf for funct in Meta.get_celery_tasks(f"{CUSTOM_PACKAGE}.tasks"): # Weird errors due to celery-stubs? # "Callable[[], Any]" has no attribute "register" # The code is correct... let's ignore it self.celery_app.tasks.register(funct) # type: ignore return self
def init_services( self, app, worker_mode=False, project_init=False, project_clean=False ): instances = {} auth_backend = None for service in self.services_configuration: name, _ = self.prefix_name(service) if not self.available_services.get(name): continue if name == self.authentication_name and auth_backend is None: if self.authentication_service is None: log.warning("No authentication") continue else: log.exit( "Auth service '{}' is unreachable".format( self.authentication_service) ) args = {} if name == self.task_service_name: args['worker_mode'] = worker_mode # Get extension class and build the extension object ExtClass = self.services_classes.get(name) try: ext_instance = ExtClass(app, **args) except TypeError as e: log.exit('Your class {} is not compliant:\n{}', name, e) else: self.extensions_instances[name] = ext_instance if not project_init: do_init = False elif name == self.authentication_service: do_init = True elif name == self.authentication_name: do_init = True else: do_init = False # Initialize the real service getting the first service object log.debug("Initializing {} (pinit={})", name, do_init) service_instance = ext_instance.custom_init( pinit=do_init, pdestroy=project_clean, abackend=auth_backend ) instances[name] = service_instance if name == self.authentication_service: auth_backend = service_instance # NOTE: commented, looks like a duplicate from try/expect above # self.extensions_instances[name] = ext_instance # Injecting into the Celery Extension Class # all celery tasks found in *vanilla_package/tasks* if name == self.task_service_name: do_init = True task_package = "{}.tasks".format(CUSTOM_PACKAGE) submodules = self.meta.import_submodules_from_package( task_package, exit_on_fail=True ) for submodule in submodules: tasks = Meta.get_celery_tasks_from_module(submodule) for func_name, funct in tasks.items(): setattr(ExtClass, func_name, funct) if len(self.extensions_instances) < 1: raise KeyError("No instances available for modules") # Only once in a lifetime if project_init: self.project_initialization(instances, app=app) return self.extensions_instances
# Reload Flask app code also for the worker # This is necessary to have the app context available app = create_app(worker_mode=True) celery_app = app.extensions.get('celery').celery_app celery_app.app = app def get_service(service, **kwargs): ext = celery_app.app.extensions.get(service) if ext is None: log.error("{} is not enabled", service) return None return ext.get_instance(**kwargs) celery_app.get_service = get_service ################################################ # Import tasks modules to make sure all tasks are available meta = Meta() # main_package = "commons.tasks." # # Base tasks # submodules = meta.import_submodules_from_package(main_package + "base") # # Custom tasks submodules = meta.import_submodules_from_package( "{}.tasks".format(CUSTOM_PACKAGE)) log.debug("Celery worker is ready {}", celery_app)
class BaseExtension(metaclass=abc.ABCMeta): models = {} # I get models on a cls level, instead of instances meta = Meta() def __init__(self, app=None, **kwargs): self.objs = {} self.set_name() self.args = kwargs self.app = app if app is not None: self.init_app(app) def set_name(self): """ a different name for each extended object """ self.name = self.__class__.__name__.lower() @classmethod def set_models(cls, base_models, extended_models, custom_models): # Join models as described by issue #16 cls.models = base_models for m in [extended_models, custom_models]: for key, model in m.items(): # Verify if overriding if key in base_models.keys(): original_model = base_models[key] # Override if issubclass(model, original_model): log.verbose("Overriding model {}", key) cls.models[key] = model continue # Otherwise just append cls.models[key] = model if len(cls.models) > 0: log.verbose("Loaded models") @classmethod def set_variables(cls, envvars): cls.variables = envvars def init_app(self, app): app.teardown_appcontext(self.teardown) def pre_object(self, ref, key): """ Make sure reference and key are strings """ if ref is None: ref = self.__class__.__name__ elif isinstance(ref, object): ref = ref.__class__.__name__ elif not isinstance(ref, str): ref = str(ref) if not isinstance(key, str): key = str(key) return ref + key def set_object(self, obj, key='[]', ref=None): """ set object into internal array """ h = self.pre_object(ref, key) self.objs[h] = obj return obj def get_object(self, key='[]', ref=None): """ recover object if any """ h = self.pre_object(ref, key) obj = self.objs.get(h, None) return obj def connect(self, **kwargs): obj = None # BEFORE ok = self.pre_connection(**kwargs) if not ok: log.critical("Unable to make preconnection for {}", self.name) return obj # Try until it's connected if len(kwargs) > 0: obj = self.custom_connection(**kwargs) else: obj = self.retry() log.verbose("Connected! {}", self.name) # AFTER self.post_connection(obj, **kwargs) obj.connection_time = datetime.now() return obj def set_models_to_service(self, obj): if len(self.models) < 1 and self.__class__.__name__ == 'NeoModel': raise Exception() for name, model in self.models.items(): # Save attribute inside class with the same name log.verbose("Injecting model '{}'", name) setattr(obj, name, model) obj.models = self.models return obj def set_connection_exception(self): return None def retry(self, retry_interval=3, max_retries=-1): retry_count = 0 # Get the exception which will signal a missing connection exceptions = self.set_connection_exception() if exceptions is None: exceptions = (BaseException, ) while max_retries != 0 or retry_count < max_retries: retry_count += 1 if retry_count > 1: log.verbose("testing again in {} secs", retry_interval) try: obj = self.custom_connection() except exceptions as e: log.error("Catched: {}({})", e.__class__.__name__, e) log.exit("Service '{}' not available", self.name) else: break # Increment sleeps time if doing a lot of retries if retry_count % 3 == 0: log.debug("Incrementing interval") retry_interval += retry_interval return obj def teardown(self, exception): ctx = stack.top if self.get_object(ref=ctx) is not None: self.close_connection(ctx) def get_instance(self, **kwargs): # Parameters global_instance = kwargs.pop('global_instance', False) isauth = kwargs.pop('authenticator', False) cache_expiration = kwargs.pop('cache_expiration', None) # pinit = kwargs('project_initialization', False) # Variables obj = None ctx = stack.top ref = self unique_hash = str(sorted(kwargs.items())) # When not using the context, this is the first connection if ctx is None: # First connection, before any request obj = self.connect() if obj is None: return None # self.initialization(obj=obj) self.set_object(obj=obj, ref=ref) log.verbose("First connection for {}", self.name) else: if not isauth: if not global_instance: ref = ctx obj = self.get_object(ref=ref, key=unique_hash) if obj is not None and cache_expiration is not None: now = datetime.now() exp = timedelta(seconds=cache_expiration) if now < obj.connection_time + exp: log.verbose("Cache is still valid for {}", self) else: log.info("Cache expired for {}", self) obj = None if obj is None: obj = self.connect(**kwargs) if obj is None: return None self.set_object(obj=obj, ref=ref, key=unique_hash) else: pass obj = self.set_models_to_service(obj) return obj ############################ # OPTIONALLY # to be executed only at init time? def pre_connection(self, **kwargs): return True def post_connection(self, obj=None, **kwargs): return True def close_connection(self, ctx): """ override this method if you must close your connection after each request""" # obj = self.get_object(ref=ctx) # obj.close() self.set_object(obj=None, ref=ctx) # it could be overidden ############################ # To be overridden @abc.abstractmethod def custom_connection(self, **kwargs): return ############################ # Already has default def custom_init(self, pinit=False, pdestroy=False, abackend=None, **kwargs): """ - A backend is needed for non-standalone services e.g. authentication module - Project initialization/removal could be used here or carried on to low levels; they get activated by specific flask cli commands """ return self.get_instance()
def custom_connection(self, **kwargs): if len(kwargs) > 0: print("TODO: use args for connection?", kwargs) uri = '{}://{}:{}@{}:{}/{}'.format( self.variables.get('dbtype', 'postgresql'), self.variables.get('user'), self.variables.get('password'), self.variables.get('host'), self.variables.get('port'), self.variables.get('db'), ) # TODO: in case we need different connection binds # (multiple connections with sql) then: # SQLALCHEMY_BINDS = { # 'users': 'mysqldb://localhost/users', # 'appmeta': 'sqlite:////path/to/appmeta.db' # } self.app.config['SQLALCHEMY_DATABASE_URI'] = uri # self.app.config['SQLALCHEMY_POOL_TIMEOUT'] = 3 self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # pool_size = self.variables.get('poolsize') # if pool_size is not None: # # sqlalchemy docs: http://j.mp/2xT0GOc # # defaults: overflow=10, pool_size=5 # # self.app.config['SQLALCHEMY_MAX_OVERFLOW'] = 0 # self.app.config['SQLALCHEMY_POOL_SIZE'] = int(pool_size) # log.debug("Setting SQLALCHEMY_POOL_SIZE = {}", pool_size) obj_name = 'db' # search the original sqlalchemy object into models db = Meta.obj_from_models(obj_name, self.name, CUSTOM_PACKAGE) try: from flask_migrate import Migrate # The Alembic package, which handles the migration work, does not recognize # type changes in columns by default. If you want that fine level of # detection you need to enable the compare_type option Migrate(self.app, db, compare_type=True) except BaseException as e: log.warning("Flask Migrate not enabled") log.error(str(e)) # no 'db' set in CUSTOM_PACKAGE, looking for EXTENDED PACKAGE, if any if db is None and EXTENDED_PACKAGE != EXTENDED_PROJECT_DISABLED: db = Meta.obj_from_models(obj_name, self.name, EXTENDED_PACKAGE) if db is None: log.warning("No sqlalchemy db imported in custom package") db = Meta.obj_from_models(obj_name, self.name, BACKEND_PACKAGE) if db is None: log.exit("Could not get {} within {} models", obj_name, self.name) # Overwrite db.session created by flask_alchemy due to errors # with transaction when concurrent requests... from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session from sqlalchemy.orm import sessionmaker db.engine_bis = create_engine(uri) db.session = scoped_session(sessionmaker(bind=db.engine_bis)) return db
def create_app( name: str = __name__, mode: ServerModes = ServerModes.NORMAL, options: Optional[Dict[str, bool]] = None, ) -> Flask: """ Create the server istance for Flask application """ if PRODUCTION and TESTING and not FORCE_PRODUCTION_TESTS: # pragma: no cover print_and_exit("Unable to execute tests in production") # TERM is not catched by Flask # https://github.com/docker/compose/issues/4199#issuecomment-426109482 # signal.signal(signal.SIGTERM, teardown_handler) # SIGINT is registered as STOPSIGNAL in Dockerfile signal.signal(signal.SIGINT, teardown_handler) # Flask app instance # template_folder = template dir for output in HTML microservice = Flask( name, template_folder=os.path.join(ABS_RESTAPI_PATH, "templates") ) # CORS if not PRODUCTION: cors = CORS( allow_headers=[ "Content-Type", "Authorization", "X-Requested-With", "x-upload-content-length", "x-upload-content-type", "content-range", ], supports_credentials=["true"], methods=["GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"], ) cors.init_app(microservice) log.debug("CORS Injected") # Flask configuration from config file microservice.config.from_object(config) log.debug("Flask app configured") if PRODUCTION: log.info("Production server mode is ON") endpoints_loader = EndpointsLoader() mem.configuration = endpoints_loader.load_configuration() mem.initializer = Meta.get_class("initialization", "Initializer") if not mem.initializer: # pragma: no cover print_and_exit("Invalid Initializer class") mem.customizer = Meta.get_instance("customization", "Customizer") if not mem.customizer: # pragma: no cover print_and_exit("Invalid Customizer class") if not isinstance(mem.customizer, BaseCustomizer): # pragma: no cover print_and_exit("Invalid Customizer class, it should inherit BaseCustomizer") Connector.init_app(app=microservice, worker_mode=(mode == ServerModes.WORKER)) # Initialize reading of all files mem.geo_reader = geolite2.reader() # when to close?? # geolite2.close() if mode == ServerModes.INIT: Connector.project_init(options=options) if mode == ServerModes.DESTROY: Connector.project_clean() # Restful plugin with endpoint mapping (skipped in INIT|DESTROY|WORKER modes) if mode == ServerModes.NORMAL: logging.getLogger("werkzeug").setLevel(logging.ERROR) # ignore warning messages from apispec warnings.filterwarnings( "ignore", message="Multiple schemas resolved to the name " ) mem.cache = Cache.get_instance(microservice) endpoints_loader.load_endpoints() mem.authenticated_endpoints = endpoints_loader.authenticated_endpoints mem.private_endpoints = endpoints_loader.private_endpoints # Triggering automatic mapping of REST endpoints rest_api = Api(catch_all_404s=True) for endpoint in endpoints_loader.endpoints: # Create the restful resource with it; # this method is from RESTful plugin rest_api.add_resource(endpoint.cls, *endpoint.uris) # HERE all endpoints will be registered by using FlaskRestful rest_api.init_app(microservice) # APISpec configuration api_url = get_backend_url() scheme, host = api_url.rstrip("/").split("://") spec = APISpec( title=get_project_configuration( "project.title", default="Your application name" ), version=get_project_configuration("project.version", default="0.0.1"), openapi_version="2.0", # OpenApi 3 not working with FlaskApiSpec # -> Duplicate parameter with name body and location body # https://github.com/jmcarp/flask-apispec/issues/170 # Find other warning like this by searching: # **FASTAPI** # openapi_version="3.0.2", plugins=[MarshmallowPlugin()], host=host, schemes=[scheme], tags=endpoints_loader.tags, ) # OpenAPI 3 changed the definition of the security level. # Some changes needed here? api_key_scheme = {"type": "apiKey", "in": "header", "name": "Authorization"} spec.components.security_scheme("Bearer", api_key_scheme) microservice.config.update( { "APISPEC_SPEC": spec, # 'APISPEC_SWAGGER_URL': '/api/swagger', "APISPEC_SWAGGER_URL": None, # 'APISPEC_SWAGGER_UI_URL': '/api/swagger-ui', # Disable Swagger-UI "APISPEC_SWAGGER_UI_URL": None, } ) mem.docs = FlaskApiSpec(microservice) # Clean app routes ignore_verbs = {"HEAD", "OPTIONS"} for rule in microservice.url_map.iter_rules(): endpoint = microservice.view_functions[rule.endpoint] if not hasattr(endpoint, "view_class"): continue newmethods = ignore_verbs.copy() rulename = str(rule) for verb in rule.methods - ignore_verbs: method = verb.lower() if method in endpoints_loader.uri2methods[rulename]: # remove from flask mapping # to allow 405 response newmethods.add(verb) rule.methods = newmethods # Register swagger. Note: after method mapping cleaning with microservice.app_context(): for endpoint in endpoints_loader.endpoints: try: mem.docs.register(endpoint.cls) except TypeError as e: # pragma: no cover print(e) log.error("Cannot register {}: {}", endpoint.cls.__name__, e) # marshmallow errors handler microservice.register_error_handler(422, handle_marshmallow_errors) # Logging responses microservice.after_request(handle_response) if SENTRY_URL is not None: # pragma: no cover if PRODUCTION: sentry_sdk.init( dsn=SENTRY_URL, # already catched by handle_marshmallow_errors ignore_errors=[werkzeug.exceptions.UnprocessableEntity], integrations=[FlaskIntegration()], ) log.info("Enabled Sentry {}", SENTRY_URL) else: # Could be enabled in print mode # sentry_sdk.init(transport=print) log.info("Skipping Sentry, only enabled in PRODUCTION mode") log.info("Boot completed") return microservice
""" import os import copy from restapi.confs import API_URL, BASE_URLS, ABS_RESTAPI_PATH, CONF_PATH from restapi.confs import BACKEND_PACKAGE, CUSTOM_PACKAGE from restapi.services.detect import detector from restapi.attributes import EndpointElements, ExtraAttributes from restapi.swagger import BeSwagger from restapi.utilities.meta import Meta from restapi.utilities.configuration import read_configuration from restapi.utilities.logs import log meta = Meta() CONF_FOLDERS = detector.load_group(label='project_confs') ######################## # Customization on the table ######################## class Customizer: """ Customize your BACKEND: Read all of available configurations and definitions. """ def __init__(self, testing=False, init=False): # Input
def find_endpoints(self): ################## # Walk folders looking for endpoints endpoints_folders = [] # base swagger dir (rapydo/http-ap) endpoints_folders.append({'path': ABS_RESTAPI_PATH, 'iscore': True}) # swagger dir from extended project, if any if self._extended_project is not None: endpoints_folders.append({ 'path': os.path.join(os.curdir, self._extended_project), 'iscore': False }) # custom swagger dir endpoints_folders.append({ 'path': os.path.join(os.curdir, CUSTOM_PACKAGE), 'iscore': False }) # already_loaded = {} for folder in endpoints_folders: base_dir = folder.get('path') iscore = folder.get('iscore') # get last item of the path # normapath is required to strip final / is any base_module = os.path.basename(os.path.normpath(base_dir)) if iscore: apis_dir = os.path.join(base_dir, 'resources') apiclass_module = '{}.resources'.format(base_module) else: apis_dir = os.path.join(base_dir, 'apis') apiclass_module = '{}.apis'.format(base_module) # Looking for all file in apis folder for epfiles in os.listdir(apis_dir): # get module name (es: apis.filename) module_file = os.path.splitext(epfiles)[0] module_name = "{}.{}".format(apiclass_module, module_file) # Convert module name into a module log.debug("Importing {}", module_name) try: module = Meta.get_module_from_string( module_name, exit_on_fail=True, exit_if_not_found=True) except BaseException as e: log.exit("Cannot import {}\nError: {}", module_name, e) # Extract classes from the module # classes = meta.get_classes_from_module(module) classes = meta.get_new_classes_from_module(module) for class_name in classes: ep_class = classes.get(class_name) # Filtering out classes without required data if not hasattr(ep_class, "methods"): continue if ep_class.methods is None: continue # if class_name in already_loaded: # log.warning( # "Skipping import of {} from {}.{}, already loded from {}", # class_name, # apis_dir, # module_file, # already_loaded[class_name], # ) # continue # already_loaded[class_name] = "{}.{}".format(apis_dir, module_file) log.debug("Importing {} from {}.{}", class_name, apis_dir, module_file) if not self._testing: skip = False for var in ep_class.depends_on: pieces = var.strip().split(' ') pieces_num = len(pieces) if pieces_num == 1: dependency = pieces.pop() negate = False elif pieces_num == 2: negate, dependency = pieces negate = negate.lower() == 'not' else: log.exit('Wrong parameter: {}', var) check = detector.get_bool_from_os(dependency) if negate: check = not check # Skip if not meeting the requirements of the dependency if not check: skip = True break if skip: log.debug( "Skipping '{} {}' due to unmet dependency: {}", module_name, class_name, dependency) continue # Building endpoint endpoint = EndpointElements(custom={}) endpoint.cls = ep_class endpoint.exists = True endpoint.iscore = iscore # Global tags to be applied to all methods endpoint.tags = ep_class.labels # base URI base = ep_class.baseuri if base not in BASE_URLS: log.warning("Invalid base {}", base) base = API_URL base = base.strip('/') endpoint.base_uri = base endpoint.uris = {} # attrs python lib bug? endpoint.custom['schema'] = { 'expose': ep_class.expose_schema, 'publish': {}, } endpoint.methods = {} mapping_lists = [] for m in ep_class.methods: method_name = "_{}".format(m) if not hasattr(ep_class, method_name): method_name = m if not hasattr(ep_class, method_name): log.warning("{} configuration not found in {}", m, class_name) continue # Enable this warning to start conversions GET -> _GET # Find other warning like this by searching: # **FASTAPI** # else: # log.warning( # "Obsolete dict {} in {}", m, class_name # ) conf = getattr(ep_class, method_name) kk = conf.keys() mapping_lists.extend(kk) endpoint.methods[m.lower()] = copy.deepcopy(conf) if endpoint.custom['schema']['expose']: for uri in mapping_lists: total_uri = '/{}{}'.format(endpoint.base_uri, uri) schema_uri = '{}/schemas{}'.format(API_URL, uri) p = hex(id(endpoint.cls)) self._schema_endpoint.uris[uri + p] = schema_uri self._schemas_map[schema_uri] = total_uri self._endpoints.append(endpoint)
def create_app( name: str = __name__, mode: ServerModes = ServerModes.NORMAL, options: Optional[Dict[str, bool]] = None, ) -> Flask: """Create the server istance for Flask application""" if PRODUCTION and TESTING and not FORCE_PRODUCTION_TESTS: # pragma: no cover print_and_exit("Unable to execute tests in production") # TERM is not catched by Flask # https://github.com/docker/compose/issues/4199#issuecomment-426109482 # signal.signal(signal.SIGTERM, teardown_handler) # SIGINT is registered as STOPSIGNAL in Dockerfile signal.signal(signal.SIGINT, teardown_handler) # Flask app instance # template_folder = template dir for output in HTML flask_app = Flask(name, template_folder=str(ABS_RESTAPI_PATH.joinpath("templates"))) # CORS if not PRODUCTION: if TESTING: cors_origin = "*" else: # pragma: no cover cors_origin = get_frontend_url() # Beware, this only works because get_frontend_url never append a port cors_origin += ":*" CORS( flask_app, allow_headers=[ "Content-Type", "Authorization", "X-Requested-With", "x-upload-content-length", "x-upload-content-type", "content-range", ], supports_credentials=["true"], methods=["GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"], resources={r"*": {"origins": cors_origin}}, ) log.debug("CORS Enabled") # Flask configuration from config file flask_app.config.from_object(config) flask_app.json_encoder = ExtendedJSONEncoder # Used to force flask to avoid json sorting and ensure that # the output to reflect the order of field in the Marshmallow schema flask_app.config["JSON_SORT_KEYS"] = False log.debug("Flask app configured") if PRODUCTION: log.info("Production server mode is ON") endpoints_loader = EndpointsLoader() if HOST_TYPE == DOCS: # pragma: no cover log.critical("Creating mocked configuration") mem.configuration = {} log.critical("Loading Mocked Initializer and Customizer classes") from restapi.mocks import Customizer, Initializer mem.initializer = Initializer mem.customizer = Customizer() else: mem.configuration = endpoints_loader.load_configuration() mem.initializer = Meta.get_class("initialization", "Initializer") if not mem.initializer: # pragma: no cover print_and_exit("Invalid Initializer class") customizer = Meta.get_class("customization", "Customizer") if not customizer: # pragma: no cover print_and_exit("Invalid Customizer class") mem.customizer = customizer() if not isinstance(mem.customizer, BaseCustomizer): # pragma: no cover print_and_exit("Invalid Customizer class, it should inherit BaseCustomizer") Connector.init_app(app=flask_app, worker_mode=(mode == ServerModes.WORKER)) # Initialize reading of all files mem.geo_reader = geolite2.reader() # when to close?? # geolite2.close() if mode == ServerModes.INIT: Connector.project_init(options=options) if mode == ServerModes.DESTROY: Connector.project_clean() # Restful plugin with endpoint mapping (skipped in INIT|DESTROY|WORKER modes) if mode == ServerModes.NORMAL: logging.getLogger("werkzeug").setLevel(logging.ERROR) # warnings levels: # default # Warn once per call location # error # Convert to exceptions # always # Warn every time # module # Warn once per calling module # once # Warn once per Python process # ignore # Never warn # Types of warnings: # Warning: This is the base class of all warning category classes # UserWarning: The default category for warn(). # DeprecationWarning: Base category for warnings about deprecated features when # those warnings are intended for other Python developers # SyntaxWarning: Base category for warnings about dubious syntactic features. # RuntimeWarning: Base category for warnings about dubious runtime features. # FutureWarning: Base category for warnings about deprecated features when those # warnings are intended for end users # PendingDeprecationWarning: Base category for warnings about features that will # be deprecated in the future (ignored by default). # ImportWarning: Base category for warnings triggered during the process of # importing a module # UnicodeWarning: Base category for warnings related to Unicode. # BytesWarning: Base category for warnings related to bytes and bytearray. # ResourceWarning: Base category for warnings related to resource usage if TESTING: warnings.simplefilter("always", Warning) warnings.simplefilter("error", UserWarning) warnings.simplefilter("error", DeprecationWarning) warnings.simplefilter("error", SyntaxWarning) warnings.simplefilter("error", RuntimeWarning) warnings.simplefilter("error", FutureWarning) # warnings about features that will be deprecated in the future warnings.simplefilter("default", PendingDeprecationWarning) warnings.simplefilter("error", ImportWarning) warnings.simplefilter("error", UnicodeWarning) warnings.simplefilter("error", BytesWarning) # Can't set this an error due to false positives with downloads # a lot of issues like: https://github.com/pallets/flask/issues/2468 warnings.simplefilter("always", ResourceWarning) warnings.simplefilter("default", Neo4jExperimentalWarning) # Remove me in a near future, this is due to hypothesis with pytest 7 # https://github.com/HypothesisWorks/hypothesis/issues/3222 warnings.filterwarnings( "ignore", message="A private pytest class or function was used." ) elif PRODUCTION: # pragma: no cover warnings.simplefilter("ignore", Warning) warnings.simplefilter("always", UserWarning) warnings.simplefilter("default", DeprecationWarning) warnings.simplefilter("ignore", SyntaxWarning) warnings.simplefilter("ignore", RuntimeWarning) warnings.simplefilter("ignore", FutureWarning) warnings.simplefilter("ignore", PendingDeprecationWarning) warnings.simplefilter("ignore", ImportWarning) warnings.simplefilter("ignore", UnicodeWarning) warnings.simplefilter("ignore", BytesWarning) warnings.simplefilter("ignore", ResourceWarning) # even if ignore it is raised once # because of the imports executed before setting this to ignore warnings.simplefilter("ignore", Neo4jExperimentalWarning) else: # pragma: no cover warnings.simplefilter("default", Warning) warnings.simplefilter("always", UserWarning) warnings.simplefilter("always", DeprecationWarning) warnings.simplefilter("default", SyntaxWarning) warnings.simplefilter("default", RuntimeWarning) warnings.simplefilter("always", FutureWarning) warnings.simplefilter("default", PendingDeprecationWarning) warnings.simplefilter("default", ImportWarning) warnings.simplefilter("default", UnicodeWarning) warnings.simplefilter("default", BytesWarning) warnings.simplefilter("always", ResourceWarning) # even if ignore it is raised once # because of the imports executed before setting this to ignore warnings.simplefilter("ignore", Neo4jExperimentalWarning) # ignore warning messages from apispec warnings.filterwarnings( "ignore", message="Multiple schemas resolved to the name " ) # ignore warning messages on flask socket after teardown warnings.filterwarnings("ignore", message="unclosed <socket.socket") # from flask_caching 1.10.1 with python 3.10 on core tests... # try to remove this once upgraded flask_caching in a near future warnings.filterwarnings( "ignore", message="_SixMetaPathImporter.find_spec", ) # Raised from sentry_sdk 1.5.11 with python 3.10 events warnings.filterwarnings( "ignore", message="SelectableGroups dict interface is deprecated. Use select.", ) mem.cache = Cache.get_instance(flask_app) endpoints_loader.load_endpoints() mem.authenticated_endpoints = endpoints_loader.authenticated_endpoints mem.private_endpoints = endpoints_loader.private_endpoints for endpoint in endpoints_loader.endpoints: ename = endpoint.cls.__name__.lower() endpoint_view = endpoint.cls.as_view(ename) for url in endpoint.uris: flask_app.add_url_rule(url, view_func=endpoint_view) # APISpec configuration api_url = get_backend_url() scheme, host = api_url.rstrip("/").split("://") spec = APISpec( title=get_project_configuration( "project.title", default="Your application name" ), version=get_project_configuration("project.version", default="0.0.1"), openapi_version="2.0", # OpenApi 3 not working with FlaskApiSpec # -> Duplicate parameter with name body and location body # https://github.com/jmcarp/flask-apispec/issues/170 # Find other warning like this by searching: # **FASTAPI** # openapi_version="3.0.2", plugins=[MarshmallowPlugin()], host=host, schemes=[scheme], tags=endpoints_loader.tags, ) # OpenAPI 3 changed the definition of the security level. # Some changes needed here? if Env.get_bool("AUTH_ENABLE"): api_key_scheme = {"type": "apiKey", "in": "header", "name": "Authorization"} spec.components.security_scheme("Bearer", api_key_scheme) flask_app.config.update( { "APISPEC_SPEC": spec, # 'APISPEC_SWAGGER_URL': '/api/swagger', "APISPEC_SWAGGER_URL": None, # 'APISPEC_SWAGGER_UI_URL': '/api/swagger-ui', # Disable Swagger-UI "APISPEC_SWAGGER_UI_URL": None, } ) mem.docs = FlaskApiSpec(flask_app) # Clean app routes ignore_verbs = {"HEAD", "OPTIONS"} for rule in flask_app.url_map.iter_rules(): view_function = flask_app.view_functions[rule.endpoint] if not hasattr(view_function, "view_class"): continue newmethods = ignore_verbs.copy() rulename = str(rule) if rule.methods: for verb in rule.methods - ignore_verbs: method = verb.lower() if method in endpoints_loader.uri2methods[rulename]: # remove from flask mapping # to allow 405 response newmethods.add(verb) rule.methods = newmethods # Register swagger. Note: after method mapping cleaning with flask_app.app_context(): for endpoint in endpoints_loader.endpoints: try: mem.docs.register(endpoint.cls) except TypeError as e: # pragma: no cover print(e) log.error("Cannot register {}: {}", endpoint.cls.__name__, e) # marshmallow errors handler # Can't get the typing to work with flask 2.1 flask_app.register_error_handler(422, handle_marshmallow_errors) # type: ignore flask_app.register_error_handler(400, handle_http_errors) # type: ignore flask_app.register_error_handler(404, handle_http_errors) # type: ignore flask_app.register_error_handler(405, handle_http_errors) # type: ignore flask_app.register_error_handler(500, handle_http_errors) # type: ignore # flask_app.before_request(inspect_request) # Logging responses # Can't get the typing to work with flask 2.1 flask_app.after_request(handle_response) # type: ignore if SENTRY_URL is not None: # pragma: no cover if PRODUCTION: sentry_sdk_init( dsn=SENTRY_URL, # already catched by handle_marshmallow_errors ignore_errors=[werkzeug.exceptions.UnprocessableEntity], integrations=[FlaskIntegration()], ) log.info("Enabled Sentry {}", SENTRY_URL) else: # Could be enabled in print mode # sentry_sdk_init(transport=print) log.info("Skipping Sentry, only enabled in PRODUCTION mode") log.info("Boot completed") if PRODUCTION and not TESTING and name == MAIN_SERVER_NAME: # pragma: no cover save_event_log( event=Events.server_startup, payload={"server": name}, user=None, target=None, ) return flask_app
def wrapper(*args: Any, **kwargs: Any) -> Any: # Recover the auth object auth_type, token = HTTPTokenAuth.get_authorization_token( allow_access_token_parameter=allow_access_token_parameter) # Internal API 'self' reference caller = Meta.get_self_reference_from_args(*args) if caller is None: # pragma: no cover # An exit here is really really dangerous, but even if # get_self_reference_from_args can return None, this case is quite # impossible... however with None the server can't continue! print_and_exit( "Server misconfiguration, self reference can't be None!" ) if auth_type is None or auth_type != HTTPAUTH_SCHEME: # Wrong authentication string msg = ( "Missing credentials in headers" f", e.g. {HTTPAUTH_AUTH_FIELD}: '{HTTPAUTH_SCHEME} TOKEN'" ) log.debug("Unauthorized request: missing credentials") return caller.response(msg, code=401, headers=HTTPAUTH_ERR_HEADER, allow_html=True) # Handling OPTIONS forwarded to our application: # ignore headers and let go, avoid unwanted interactions with CORS if request.method != "OPTIONS": # valid, token, jti, user valid, token, _, user = caller.auth.verify_token(token) # Check authentication if not valid: # Clear TCP receive buffer of any pending data _ = request.data # Mimic the response from a normal endpoint # To use the same standards # log.info("Invalid token received '{}'", token) log.debug("Invalid token received") return caller.response( "Invalid token received", headers=HTTPAUTH_ERR_HEADER, code=401, allow_html=True, ) request.environ[TOKEN_VALIDATED_KEY] = True # Check roles if not caller.auth.verify_roles( user, roles, required_roles=required_roles): log.info("Unauthorized request: missing privileges.") return caller.response( "You are not authorized: missing privileges", code=401, allow_html=True, ) caller.authorized_user = user.uuid kwargs["user"] = user return func(*args, **kwargs)
def connect(self, **kwargs: str) -> "CeleryExt": variables = self.variables.copy() variables.update(kwargs) broker = variables.get("broker_service") if HOST_TYPE == DOCS: # pragma: no cover broker = "RABBIT" if broker is None: # pragma: no cover print_and_exit("Unable to start Celery, missing broker service") if broker == "RABBIT": service_vars = Env.load_variables_group(prefix="rabbitmq") if Env.to_bool(service_vars.get("ssl_enabled")): # The setting can be a dict with the following keys: # ssl_cert_reqs (required): one of the SSLContext.verify_mode values: # ssl.CERT_NONE # ssl.CERT_OPTIONAL # ssl.CERT_REQUIRED # ssl_ca_certs (optional): path to the CA certificate # ssl_certfile (optional): path to the client certificate # ssl_keyfile (optional): path to the client key server_hostname = RabbitExt.get_hostname( service_vars.get("host", "")) force_self_signed = Env.get_bool("SSL_FORCE_SELF_SIGNED") ca_certs = (SSL_CERTIFICATE if server_hostname == "localhost" or force_self_signed else certifi.where()) self.celery_app.conf.broker_use_ssl = { # 'keyfile': '/var/ssl/private/worker-key.pem', # 'certfile': '/var/ssl/amqp-server-cert.pem', # 'ca_certs': '/var/ssl/myca.pem', # 'cert_reqs': ssl.CERT_REQUIRED # 'cert_reqs': ssl.CERT_OPTIONAL "cert_reqs": ssl.CERT_REQUIRED, "server_hostname": server_hostname, "ca_certs": ca_certs, } self.celery_app.conf.broker_url = self.get_rabbit_url( service_vars, protocol="pyamqp") elif broker == "REDIS": service_vars = Env.load_variables_group(prefix="redis") self.celery_app.conf.broker_use_ssl = False self.celery_app.conf.broker_url = self.get_redis_url( service_vars, protocol="redis", db=RedisExt.CELERY_BROKER_DB) else: # pragma: no cover print_and_exit( "Unable to start Celery: unknown broker service: {}", broker) log.info( "Configured {} as broker {}", broker, obfuscate_url(self.celery_app.conf.broker_url), ) # From the guide: "Default: Taken from broker_url." # But it is not true, connection fails if not explicitly set self.celery_app.conf.broker_read_url = self.celery_app.conf.broker_url self.celery_app.conf.broker_write_url = self.celery_app.conf.broker_url backend = variables.get("backend_service", broker) if backend == "RABBIT": service_vars = Env.load_variables_group(prefix="rabbitmq") log.warning( "RABBIT backend is quite limited and not fully supported. " "Consider to enable Redis as a backend database") self.celery_app.conf.result_backend = self.get_rabbit_url( service_vars, protocol="rpc") elif backend == "REDIS": service_vars = Env.load_variables_group(prefix="redis") self.celery_app.conf.result_backend = self.get_redis_url( service_vars, protocol="redis", db=RedisExt.CELERY_BACKEND_DB) # set('redis_backend_use_ssl', kwargs.get('redis_backend_use_ssl')) else: # pragma: no cover print_and_exit( "Unable to start Celery: unknown backend service: {}", backend) log.info( "Configured {} as backend {}", backend, obfuscate_url(self.celery_app.conf.result_backend), ) # Should be enabled? # Default: Disabled by default (transient messages). # If set to True, result messages will be persistent. # This means the messages won’t be lost after a broker restart. # self.celery_app.conf.result_persistent = True # Decides if publishing task messages will be retried in the case of # connection loss or other connection errors self.celery_app.conf.task_publish_retry = True # Already enabled by default to use UTC # self.celery_app.conf.enable_utc # self.celery_app.conf.timezone # Not needed, because tasks are dynamcally injected # self.celery_app.conf.imports # self.celery_app.conf.includes # Note about priority: multi-queues is better than prioritized tasks # https://docs.celeryproject.org/en/master/faq.html#does-celery-support-task-priorities # Max priority default value for all queues # Required to be able to set priority parameter on task calls self.celery_app.conf.task_queue_max_priority = 10 # Default priority for taks (if not specified) self.celery_app.conf.task_default_priority = 5 # If you want to apply a more strict priority to items # probably prefetching should also be disabled: # Late ack means the task messages will be acknowledged after the task # has been executed, not just before (the default behavior). # self.celery_app.conf.task_acks_late = True # How many messages to prefetch at a time multiplied by the number # of concurrent processes. The default is 4 (four messages for each process). # The default setting is usually a good choice, however – if you have very # long running tasks waiting in the queue and you have to start the workers, # note that the first worker to start will receive four times the number # of messages initially. Thus the tasks may not be fairly distributed to # the workers. To disable prefetching, set worker_prefetch_multiplier to 1. # Changing that setting to 0 will allow the worker to keep consuming as many # messages as it wants. self.celery_app.conf.worker_prefetch_multiplier = 1 # Introduced in Celery 5.1: on connection loss cancels all currently executed # tasks with late acknowledgement enabled. # These tasks cannot be acknowledged as the connection is gone, # and the tasks are automatically redelivered back to the queue. # In Celery 5.1 it is set to False by default. # The setting will be set to True by default in Celery 6.0. self.celery_app.conf.worker_cancel_long_running_tasks_on_connection_loss = True if not PRODUCTION: # Skip initial warnings by avoiding pickle format (deprecated) # Only set in DEV mode since in PROD mode the auth serializer is used self.celery_app.conf.accept_content = ["json"] self.celery_app.conf.task_serializer = "json" self.celery_app.conf.result_serializer = "json" if Env.get_bool("CELERYBEAT_ENABLED"): CeleryExt.CELERYBEAT_SCHEDULER = backend if backend == "REDIS": service_vars = Env.load_variables_group(prefix="redis") url = self.get_redis_url(service_vars, protocol="redis", db=RedisExt.CELERY_BEAT_DB) self.celery_app.conf["REDBEAT_REDIS_URL"] = url self.celery_app.conf["REDBEAT_KEY_PREFIX"] = REDBEAT_KEY_PREFIX log.info("Celery-beat connected to Redis: {}", obfuscate_url(url)) else: # pragma: no cover log.warning( "Cannot configure celery beat scheduler with backend: {}", backend) # self.disconnected = False conf = self.celery_app.conf # Replace the previous App with new settings self.celery_app = Celery("RAPyDo", broker=conf.broker_url, backend=conf.result_backend) self.celery_app.conf = conf if PRODUCTION: # https://docs.celeryq.dev/en/stable/userguide/security.html#message-signing self.celery_app.conf.update( security_key=SSL_SECRET, security_certificate=SSL_CERTIFICATE, security_cert_store=SSL_CERTIFICATE, security_digest="sha256", task_serializer="auth", result_serializer="auth", event_serializer="auth", accept_content=["auth"], ) self.celery_app.setup_security() for funct in Meta.get_celery_tasks(f"{CUSTOM_PACKAGE}.tasks"): # Weird errors due to celery-stubs? # "Callable[[], Any]" has no attribute "register" # The code is correct... let's ignore it self.celery_app.tasks.register(funct) # type: ignore return self