def read_env(self, env: dict = os.environ): r = EnvReader(env, log=logging.getLogger(__name__).debug) self.host = r.read(StrVar(CRATE_HOST_ENV_VAR, self.host)) self.port = r.read(IntVar(CRATE_PORT_ENV_VAR, self.port)) self.db_user = r.read(StrVar(CRATE_DB_USER_ENV_VAR, self.db_user)) self.db_pass = r.read( StrVar(CRATE_DB_PASS_ENV_VAR, self.db_pass, mask_value=True)) # Added backoff_factor for retry interval between attempt of # consecutive retries self.backoff_factor = r.read(FloatVar('CRATE_BACKOFF_FACTOR', 0.0)) self.active_shards = r.read(StrVar('CRATE_WAIT_ACTIVE_SHARDS', '1'))
def setup(self): url = "{}:{}".format(self.host, self.port) self.ccm = ConnectionManager() self.connection = self.ccm.get_connection('crate') if self.connection is None: try: self.connection = client.connect([url], error_trace=True) self.ccm.set_connection('crate', self.connection) except Exception as e: self.logger.warning(str(e), exc_info=True) raise e self.cursor = self.connection.cursor() # TODO this reduce queries to crate, # but only within a single API call to QUANTUMLEAP # we need to think if we want to cache this information # and save few msec for evey API call self.db_version = self.get_db_version() self.active_shards = EnvReader(log=logging.getLogger(__name__).debug)\ .read(StrVar('CRATE_WAIT_ACTIVE_SHARDS', '1')) major = int(self.db_version.split('.')[0]) if major < 4: logging.error("CRATE 4.x is the minimal version supported") raise Exception("Unsupported CrateDB version")
def CrateTranslatorInstance(): r = EnvReader(log=logging.getLogger(__name__).info) db_host = r.read(StrVar('CRATE_HOST', 'crate')) db_port = r.read(IntVar('CRATE_PORT', 4200)) db_name = "ngsi-tsdb" with CrateTranslator(db_host, db_port, db_name) as trans: yield trans
def log(): r = EnvReader(log=logging.getLogger(__name__).info) level = r.read(StrVar('LOGLEVEL', 'INFO')).upper() logging.basicConfig(level=level, format='%(asctime)s.%(msecs)03d ' '%(levelname)s:%(name)s:%(message)s ' 'Thread ID: [%(thread)d] Process ID: [%(process)d]', datefmt='%Y-%m-%d %I:%M:%S') return logging.getLogger(__name__)
def default_backend() -> MaybeString: cfg_reader = YamlReader(log=log().debug) env_reader = EnvReader(log=log().debug) config = cfg_reader.from_env_file(QL_CONFIG_ENV_VAR, defaults={}) config_backend = maybe_string_match(config, 'default-backend') env_backend = env_reader.read(StrVar(QL_DEFAULT_DB_ENV_VAR, None)) return env_backend or config_backend or CRATE_BACKEND
def lookup_backend(fiware_service: str) -> MaybeString: cfg_reader = YamlReader(log=log().debug) env_reader = EnvReader(log=log().info) config = cfg_reader.from_env_file(QL_CONFIG_ENV_VAR, defaults={}) tenant_backend = maybe_string_match(config, 'tenants', fiware_service, 'backend') default_backend = maybe_string_match(config, 'default-backend') env_backend = env_reader.read(StrVar(QL_DEFAULT_DB_ENV_VAR, CRATE_BACKEND)) return tenant_backend or env_backend or default_backend
def db_con_factory(t: DbType) -> Any: if t is DbType.CRATE: r = EnvReader() host = r.read(StrVar('CRATE_HOST', 'localhost')) port = r.read(IntVar('CRATE_PORT', 4200)) return client.connect([f"{host}:{port}"], error_trace=True) if t is DbType.TIMESCALE: cfg = PostgresConnectionData() cfg.read_env() pg8000.paramstyle = "qmark" cx = pg8000.connect(host=cfg.host, port=cfg.port, database=cfg.db_name, user=cfg.db_user, password=cfg.db_pass) cx.autocommit = True return cx return None
def redis_host(self) -> MaybeString: return self.env.read(StrVar('REDIS_HOST', None))
def redis_host(self) -> MaybeString: return self.env.read(StrVar(REDIS_HOST_ENV_VAR, None))
def setup(): r = EnvReader(log=logging.getLogger().debug) level = r.read(StrVar('LOGLEVEL', 'INFO')).upper() logger = logging.getLogger() logger.setLevel(level) logger.addHandler(default_handler)
def log(): r = EnvReader(log=logging.getLogger(__name__).info) level = r.read(StrVar('LOGLEVEL', 'INFO')).upper() logging.basicConfig(level=level) return logging.getLogger(__name__)
def successful_task_retention_period() -> int: """ How long to keep successfully executed tasks in the system. Past that period, any successful task gets deleted. :return: how long, in seconds, to keep successful tasks. """ return EnvReader().safe_read(SUCCESS_TTL_VAR) # NOTE. Retention periods. # In the future we could have more fine-grained configuration so e.g. each # task type gets different retention periods. LOG_LEVEL_VAR = StrVar('LOGLEVEL', 'INFO') def log_level() -> int: """ Read the log level to use from the ``LOGLEVEL`` environment variable. If the variable isn't set, return the info level ID. If set but its value isn't one of the strings recognised by the ``logging`` lib (case-insensitive comparison), then return the info level ID again. Otherwise return the corresponding log level ID. :return: one of the log level IDs known to the ``logging`` lib. """ r = EnvReader() level_name = r.safe_read(LOG_LEVEL_VAR).upper() try: