Пример #1
0
    def setup(self):
        url = "{}:{}".format(self.host, self.port)
        self.ccm = ConnectionManager()
        self.connection = self.ccm.get_connection('crate')
        # Added backoff_factor for retry interval between attempt of
        # consecutive retries
        backoff_factor = EnvReader(log=logging.getLogger(__name__).debug) \
            .read(FloatVar('CRATE_BACKOFF_FACTOR', 0.0))
        if self.connection is None:
            try:
                self.connection = client.connect([url],
                                                 error_trace=True,
                                                 backoff_factor=backoff_factor)
                self.ccm.set_connection('crate', self.connection)
            except Exception as e:
                self.logger.warning(str(e), exc_info=True)
                raise e

        self.cursor = self.connection.cursor()
        # TODO this reduce queries to crate,
        # but only within a single API call to QUANTUMLEAP
        # we need to think if we want to cache this information
        # and save few msec for evey API call
        self.db_version = self.get_db_version()
        self.active_shards = EnvReader(log=logging.getLogger(__name__).debug) \
            .read(StrVar('CRATE_WAIT_ACTIVE_SHARDS', '1'))

        major = int(self.db_version.split('.')[0])
        if major < 4:
            logging.error("CRATE 4.x is the minimal version supported")
            raise Exception("Unsupported CrateDB version")
Пример #2
0
def CrateTranslatorInstance():
    r = EnvReader(log=logging.getLogger(__name__).info)
    db_host = r.read(StrVar('CRATE_HOST', 'crate'))
    db_port = r.read(IntVar('CRATE_PORT', 4200))
    db_name = "ngsi-tsdb"

    with CrateTranslator(db_host, db_port, db_name) as trans:
        yield trans
Пример #3
0
def log():
    r = EnvReader(log=logging.getLogger(__name__).info)
    level = r.read(StrVar('LOGLEVEL', 'INFO')).upper()

    logging.basicConfig(level=level,
                        format='%(asctime)s.%(msecs)03d '
                        '%(levelname)s:%(name)s:%(message)s '
                        'Thread ID: [%(thread)d]  Process ID: [%(process)d]',
                        datefmt='%Y-%m-%d %I:%M:%S')
    return logging.getLogger(__name__)
def default_backend() -> MaybeString:
    cfg_reader = YamlReader(log=log().debug)
    env_reader = EnvReader(log=log().debug)

    config = cfg_reader.from_env_file(QL_CONFIG_ENV_VAR, defaults={})

    config_backend = maybe_string_match(config, 'default-backend')

    env_backend = env_reader.read(StrVar(QL_DEFAULT_DB_ENV_VAR, None))

    return env_backend or config_backend or CRATE_BACKEND
Пример #5
0
def lookup_backend(fiware_service: str) -> MaybeString:
    cfg_reader = YamlReader(log=log().debug)
    env_reader = EnvReader(log=log().info)

    config = cfg_reader.from_env_file(QL_CONFIG_ENV_VAR, defaults={})
    tenant_backend = maybe_string_match(config, 'tenants', fiware_service,
                                        'backend')
    default_backend = maybe_string_match(config, 'default-backend')

    env_backend = env_reader.read(StrVar(QL_DEFAULT_DB_ENV_VAR, CRATE_BACKEND))

    return tenant_backend or env_backend or default_backend
Пример #6
0
def configured_insert_max_size_in_bytes() -> Optional[int]:
    """
    Read the insert max size env var and return its value in bytes if
    set to a parsable value or ``None`` otherwise. Notice if a value
    is present but is garbage we still return ``None`` but we also
    log a warning.

    :return: the max size in bytes if available, ``None`` otherwise.
    """
    env_reader = EnvReader(log=_log().debug)
    parsed = env_reader.safe_read(BitSizeVar(INSERT_MAX_SIZE_VAR, None))
    if parsed:
        return int(parsed.to_Byte())
    return None
Пример #7
0
class GeoCodingEnvReader:
    """
    Helper class to encapsulate the reading of geo-coding env vars.
    """
    def __init__(self):
        self.env = EnvReader(log=logging.getLogger(__name__).info)

    def use_geocoding(self) -> bool:
        return self.env.read(BoolVar('USE_GEOCODING', False))

    def redis_host(self) -> MaybeString:
        return self.env.read(StrVar('REDIS_HOST', None))

    def redis_port(self) -> int:
        return self.env.read(IntVar('REDIS_PORT', 6379))
Пример #8
0
class SQLTranslatorConfig:
    """
    Provide access to SQL Translator config values.
    """
    def __init__(self, env: dict = os.environ):
        self.store = EnvReader(var_store=env,
                               log=logging.getLogger(__name__).debug)

    def default_limit(self) -> int:
        var = IntVar(DEFAULT_LIMIT_VAR, default_value=FALLBACK_LIMIT)
        return self.store.safe_read(var)

    def keep_raw_entity(self) -> bool:
        var = BoolVar(KEEP_RAW_ENTITY_VAR, False)
        return self.store.safe_read(var)
Пример #9
0
def log_level() -> int:
    """
    Read the log level to use from the ``LOGLEVEL`` environment variable.
    If the variable isn't set, return the info level ID. If set but its
    value isn't one of the strings recognised by the ``logging`` lib
    (case-insensitive comparison), then return the info level ID again.
    Otherwise return the corresponding log level ID.

    :return: one of the log level IDs known to the ``logging`` lib.
    """
    r = EnvReader()
    level_name = r.safe_read(LOG_LEVEL_VAR).upper()
    try:
        return logging._nameToLevel[level_name]
    except KeyError:
        return logging.INFO
Пример #10
0
def successful_task_retention_period() -> int:
    """
    How long to keep successfully executed tasks in the system. Past that
    period, any successful task gets deleted.

    :return: how long, in seconds, to keep successful tasks.
    """
    return EnvReader().safe_read(SUCCESS_TTL_VAR)
Пример #11
0
def offload_to_work_queue() -> bool:
    """
    Offload task execution to the work queue?

    :return: `True` to offload tasks to the work queue; `False` to execute
        them synchronously within the calling thread.
    """
    return EnvReader().safe_read(OFFLOAD_WORK_VAR)
Пример #12
0
def failed_task_retention_period() -> int:
    """
    How long to keep failed tasks in the system. Past that period, failed
    tasks get deleted. Notice if you configure a task with retries, then
    it gets flagged as "failed" only after all retries attempts have failed.

    :return: how long, in seconds, to keep failed tasks.
    """
    return EnvReader().safe_read(FAILURE_TTL_VAR)
Пример #13
0
def lookup_backend(fiware_service: str) -> MaybeString:
    cfg_reader = YamlReader(log=log().debug)
    env_reader = EnvReader(log=log().debug)

    config = cfg_reader.from_env_file(QL_CONFIG_ENV_VAR, defaults={})
    tenant_backend = maybe_string_match(config, 'tenants', fiware_service,
                                        'backend')

    return tenant_backend or default_backend()
Пример #14
0
class CacheEnvReader:
    """
    Helper class to encapsulate the reading of geo-coding env vars.
    """

    def __init__(self):
        self.env = EnvReader(log=logging.getLogger(__name__).debug)

    def redis_host(self) -> MaybeString:
        return self.env.read(StrVar('REDIS_HOST', None))

    def redis_port(self) -> int:
        return self.env.read(IntVar('REDIS_PORT', 6379))

    def default_ttl(self) -> int:
        return self.env.read(IntVar('DEFAULT_CACHE_TTL', 60))

    def cache_queries(self) -> bool:
        return self.env.read(BoolVar('CACHE_QUERIES', False))
Пример #15
0
 def read_env(self, env: dict = os.environ):
     r = EnvReader(env, log=logging.getLogger(__name__).debug)
     self.host = r.read(StrVar(CRATE_HOST_ENV_VAR, self.host))
     self.port = r.read(IntVar(CRATE_PORT_ENV_VAR, self.port))
     self.db_user = r.read(StrVar(CRATE_DB_USER_ENV_VAR, self.db_user))
     self.db_pass = r.read(
         StrVar(CRATE_DB_PASS_ENV_VAR, self.db_pass, mask_value=True))
     # Added backoff_factor for retry interval between attempt of
     # consecutive retries
     self.backoff_factor = r.read(FloatVar('CRATE_BACKOFF_FACTOR', 0.0))
     self.active_shards = r.read(StrVar('CRATE_WAIT_ACTIVE_SHARDS', '1'))
Пример #16
0
def db_con_factory(t: DbType) -> Any:
    if t is DbType.CRATE:
        r = EnvReader()
        host = r.read(StrVar('CRATE_HOST', 'localhost'))
        port = r.read(IntVar('CRATE_PORT', 4200))

        return client.connect([f"{host}:{port}"], error_trace=True)
    if t is DbType.TIMESCALE:
        cfg = PostgresConnectionData()
        cfg.read_env()

        pg8000.paramstyle = "qmark"
        cx = pg8000.connect(host=cfg.host,
                            port=cfg.port,
                            database=cfg.db_name,
                            user=cfg.db_user,
                            password=cfg.db_pass)
        cx.autocommit = True

        return cx

    return None
Пример #17
0
def recover_from_enqueueing_failure() -> bool:
    """
    Attempt to run tasks synchronously if the queue is temporarily not
    available? When offloading task execution to the work queue, it could
    happen that the queueing of a task fails, e.g. the queue backend is
    down and the task can't be added to the work queue. In that case, if
    this function returns ``True``, then QL tries to recover from the
    error by executing the task synchronously in the calling thread.
    On the other hand, if this function returns ``False``, then QL will
    just raise an error.

    Only take this setting into account if ``offload_to_work_queue`` is
    ``True``. (If ``False``, then tasks already get run synchronously.)

    :return: ``True`` for try synchronous task execution on enqueueing
        failure, ``False`` for raise an error instead.
    """
    return EnvReader().safe_read(RECOVER_FROM_ENQUEUEING_FAILURE_VAR)
Пример #18
0
def use_flask() -> bool:
    env_var = BoolVar('USE_FLASK', False)
    return EnvReader().safe_read(env_var)
Пример #19
0
def max_retries() -> int:
    """
    :return: how many times a failed task should be retried.
    """
    return EnvReader().safe_read(MAX_RETRIES_VAR)
Пример #20
0
def log():
    r = EnvReader(log=logging.getLogger(__name__).info)
    level = r.read(StrVar('LOGLEVEL', 'INFO')).upper()

    logging.basicConfig(level=level)
    return logging.getLogger(__name__)
Пример #21
0
 def __init__(self):
     self.env = EnvReader(log=logging.getLogger(__name__).debug)
Пример #22
0
 def __init__(self, env: dict = os.environ):
     self.store = EnvReader(var_store=env,
                            log=logging.getLogger(__name__).info)
Пример #23
0
def setup():
    r = EnvReader(log=logging.getLogger().debug)
    level = r.read(StrVar('LOGLEVEL', 'INFO')).upper()
    logger = logging.getLogger()
    logger.setLevel(level)
    logger.addHandler(default_handler)