コード例 #1
0
    def __init__(self, config_name):
        self.logger = get_logger(__name__)
        self._config_name = config_name

        self._db_user = os.getenv("INVENTORY_DB_USER", "insights")
        self._db_password = os.getenv("INVENTORY_DB_PASS", "insights")
        self._db_host = os.getenv("INVENTORY_DB_HOST", "localhost")
        self._db_name = os.getenv("INVENTORY_DB_NAME", "insights")

        self.db_uri = f"postgresql://{self._db_user}:{self._db_password}@{self._db_host}/{self._db_name}"
        self.db_pool_timeout = int(os.getenv("INVENTORY_DB_POOL_TIMEOUT", "5"))
        self.db_pool_size = int(os.getenv("INVENTORY_DB_POOL_SIZE", "5"))

        self.base_url_path = self._build_base_url_path()
        self.api_url_path_prefix = self._build_api_path()
        self.legacy_api_url_path_prefix = os.getenv("INVENTORY_LEGACY_API_URL",
                                                    "")
        self.mgmt_url_path_prefix = os.getenv(
            "INVENTORY_MANAGEMENT_URL_PATH_PREFIX", "/")

        self.api_urls = [
            self.api_url_path_prefix, self.legacy_api_url_path_prefix
        ]

        self._log_configuration()
コード例 #2
0
    def __init__(self, runtime_environment):
        self.logger = get_logger(__name__)
        self._runtime_environment = runtime_environment

        self._db_user = os.getenv("INVENTORY_DB_USER", "insights")
        self._db_password = os.getenv("INVENTORY_DB_PASS", "insights")
        self._db_host = os.getenv("INVENTORY_DB_HOST", "localhost")
        self._db_name = os.getenv("INVENTORY_DB_NAME", "insights")
        self._db_ssl_mode = os.getenv("INVENTORY_DB_SSL_MODE", "")
        self._db_ssl_cert = os.getenv("INVENTORY_DB_SSL_CERT", "")

        self.db_pool_timeout = int(os.getenv("INVENTORY_DB_POOL_TIMEOUT", "5"))
        self.db_pool_size = int(os.getenv("INVENTORY_DB_POOL_SIZE", "5"))

        self.db_uri = self._build_db_uri(self._db_ssl_mode)

        self.base_url_path = self._build_base_url_path()
        self.api_url_path_prefix = self._build_api_path()
        self.legacy_api_url_path_prefix = os.getenv("INVENTORY_LEGACY_API_URL", "")
        self.mgmt_url_path_prefix = os.getenv("INVENTORY_MANAGEMENT_URL_PATH_PREFIX", "/")

        self.api_urls = [self.api_url_path_prefix, self.legacy_api_url_path_prefix]

        self.host_ingress_topic = os.environ.get("KAFKA_HOST_INGRESS_TOPIC", "platform.inventory.host-ingress")
        self.host_ingress_consumer_group = os.environ.get("KAFKA_HOST_INGRESS_GROUP", "inventory-mq")
        self.host_egress_topic = os.environ.get("KAFKA_HOST_EGRESS_TOPIC", "platform.inventory.host-egress")
        self.system_profile_topic = os.environ.get("KAFKA_TOPIC", "platform.system-profile")
        self.bootstrap_servers = os.environ.get("KAFKA_BOOTSTRAP_SERVERS", "localhost:29092")
        self.event_topic = os.environ.get("KAFKA_EVENT_TOPIC", "platform.inventory.events")

        self.prometheus_pushgateway = os.environ.get("PROMETHEUS_PUSHGATEWAY", "localhost:9091")
        self.kubernetes_namespace = os.environ.get("NAMESPACE")

        # https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html#kafka.KafkaConsumer
        self.kafka_consumer = {
            "request_timeout_ms": int(os.environ.get("KAFKA_CONSUMER_REQUEST_TIMEOUT_MS", "305000")),
            "max_in_flight_requests_per_connection": int(
                os.environ.get("KAFKA_CONSUMER_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION", "5")
            ),
            "auto_offset_reset": os.environ.get("KAFKA_CONSUMER_AUTO_OFFSET_RESET", "latest"),
            "auto_commit_interval_ms": int(os.environ.get("KAFKA_CONSUMER_AUTO_COMMIT_INTERVAL_MS", "5000")),
            "max_poll_records": int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_RECORDS", "10")),
            "max_poll_interval_ms": int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS", "300000")),
            "session_timeout_ms": int(os.environ.get("KAFKA_CONSUMER_SESSION_TIMEOUT_MS", "10000")),
            "heartbeat_interval_ms": int(os.environ.get("KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS", "3000")),
        }

        self.payload_tracker_kafka_topic = os.environ.get("PAYLOAD_TRACKER_KAFKA_TOPIC", "platform.payload-status")
        self.payload_tracker_service_name = os.environ.get("PAYLOAD_TRACKER_SERVICE_NAME", "inventory")
        payload_tracker_enabled = os.environ.get("PAYLOAD_TRACKER_ENABLED", "true")
        self.payload_tracker_enabled = payload_tracker_enabled.lower() == "true"

        self.culling_stale_warning_offset_days = int(os.environ.get("CULLING_STALE_WARNING_OFFSET_DAYS", "7"))
        self.culling_culled_offset_days = int(os.environ.get("CULLING_CULLED_OFFSET_DAYS", "14"))

        self.xjoin_graphql_url = os.environ.get("XJOIN_GRAPHQL_URL", "http://localhost:4000/graphql")
        self.bulk_query_source = getattr(BulkQuerySource, os.environ.get("BULK_QUERY_SOURCE", "db"))
        self.bulk_query_source_beta = getattr(BulkQuerySource, os.environ.get("BULK_QUERY_SOURCE_BETA", "db"))
コード例 #3
0
    def __init__(self):
        self.logger = get_logger(__name__)

        self._db_user = os.getenv("INVENTORY_DB_USER", "insights")
        self._db_password = os.getenv("INVENTORY_DB_PASS", "insights")
        self._db_host = os.getenv("INVENTORY_DB_HOST", "localhost")
        self._db_name = os.getenv("INVENTORY_DB_NAME", "insights")
        self._db_ssl_mode = os.getenv("INVENTORY_DB_SSL_MODE", "")
        self._db_ssl_cert = os.getenv("INVENTORY_DB_SSL_CERT", "")

        self.db_pool_timeout = int(os.getenv("INVENTORY_DB_POOL_TIMEOUT", "5"))
        self.db_pool_size = int(os.getenv("INVENTORY_DB_POOL_SIZE", "5"))

        self.db_uri = self._build_db_uri(self._db_ssl_mode)

        self.base_url_path = self._build_base_url_path()
        self.api_url_path_prefix = self._build_api_path()
        self.legacy_api_url_path_prefix = os.getenv("INVENTORY_LEGACY_API_URL",
                                                    "")
        self.mgmt_url_path_prefix = os.getenv(
            "INVENTORY_MANAGEMENT_URL_PATH_PREFIX", "/")

        self.api_urls = [
            self.api_url_path_prefix, self.legacy_api_url_path_prefix
        ]

        self.host_ingress_topic = os.environ.get(
            "KAFKA_HOST_INGRESS_TOPIC", "platform.inventory.host-ingress")
        self.host_ingress_consumer_group = os.environ.get(
            "KAFKA_HOST_INGRESS_GROUP", "inventory-mq")
        self.host_egress_topic = os.environ.get(
            "KAFKA_HOST_EGRESS_TOPIC", "platform.inventory.host-egress")
        self.system_profile_topic = os.environ.get("KAFKA_TOPIC",
                                                   "platform.system-profile")
        self.consumer_group = os.environ.get("KAFKA_GROUP", "inventory")
        self.bootstrap_servers = os.environ.get("KAFKA_BOOTSTRAP_SERVERS",
                                                "localhost:29092")
        self.event_topic = os.environ.get("KAFKA_EVENT_TOPIC",
                                          "platform.inventory.events")
        self.kafka_enabled = all(
            map(os.environ.get,
                ["KAFKA_TOPIC", "KAFKA_GROUP", "KAFKA_BOOTSTRAP_SERVERS"]))

        self.payload_tracker_kafka_topic = os.environ.get(
            "PAYLOAD_TRACKER_KAFKA_TOPIC", "platform.payload-status")
        self.payload_tracker_service_name = os.environ.get(
            "PAYLOAD_TRACKER_SERVICE_NAME", "inventory")
        payload_tracker_enabled = os.environ.get("PAYLOAD_TRACKER_ENABLED",
                                                 "true")
        self.payload_tracker_enabled = payload_tracker_enabled.lower(
        ) == "true"

        self.culling_stale_warning_offset_days = int(
            os.environ.get("CULLING_STALE_WARNING_OFFSET_DAYS", "7"))
        self.culling_culled_offset_days = int(
            os.environ.get("CULLING_CULLED_OFFSET_DAYS", "14"))
コード例 #4
0
ファイル: main.py プロジェクト: stfloyd/pycli-template
def main(argv):
    '''
    Main entry point into the program.
    '''

    # Get arguments and argument parser.
    (args, parser) = app.cli(argv)

    # Initialize logging and set verbosity level.
    logger = logging.get_logger(__name__)

    logger.debug(f'Program arguments: {argv}')
    # Check if any command arguments have been passed.
    if (len(argv) <= 1):
        # No arguments passed.
        logger.warning(f'No command arguments passed')
        #parser.print_help()
        #sys.exit(errno.EAGAIN)

    if args.list_configs:
        configs = settings.list_configs()
        print(configs)
        return os.EX_OK

    # Initialize our app.
    try:
        app.init(args)
    except Exception as e:
        logger.exception(e)
        logger.failure(f'App initialization failed: {e.errno}')
        parser.print_help()
        return os.EX_SOFTWARE

    # Load application configuration.
    try:
        config = app.load_config(args)
    except Exception as e:
        logger.exception(e)
        logger.failure(f'App configuration failed: {e.errno}')
        parser.print_help()
        return os.EX_CONFIG

    # Do something with config before running main app logic.

    # Run main app logic
    try:
        exit_code = app.process(args, config)
    except Exception as e:
        logger.exception(e)
        logger.failure(f'App processing failed: {e.errno}')
        parser.print_help()
        return os.EX_SOFTWARE

    # Handle anything else you need to, we're getting out of here.

    return exit_code
コード例 #5
0
def run(config, session):
    logger = get_logger(LOGGER_NAME)

    conditions = Conditions.from_config(config)
    query_filter = stale_timestamp_filter(*conditions.culled())

    query = session.query(Host).filter(query_filter)

    events = delete_hosts(query)
    for host_id, deleted in events:
        if deleted:
            logger.info("Deleted host: %s", host_id)
        else:
            logger.info("Host %s already deleted. Delete event not emitted.",
                        host_id)
コード例 #6
0
ファイル: models.py プロジェクト: alexdev27/cashbox_fastAPI
    def close_shift(*args, **kwargs):
        info = real_kkt.close_shift(*args)

        arcus_logger = get_logger('arcus_logs.txt', 'arcus_logger')

        arcus_logger.info(f'=== Начало исполнения функции в пакете {__file__}'
                          f' - {real_kkt.close_shift_pin_pad.__name__}')

        try:
            arcus_info = real_kkt.close_shift_pin_pad(*args)
            if arcus_info['error']:
                raise Exception(arcus_info)
            arcus_logger.info(f'=== Функция {real_kkt.close_shift_pin_pad.__name__} завершилась без ошибок')
        except Exception as e:
            arcus_logger.error(f'\t====== Error ======\n'
                               f'\tОшибка при исполнении функции {real_kkt.close_shift_pin_pad.__name__}.\n'
                               f'\tДетали ошибки: {str(e)}')

        return info
コード例 #7
0
def main(config_name):
    config = _init_config(config_name)
    init_tasks(config)

    registry = CollectorRegistry()
    for metric in COLLECTED_METRICS:
        registry.register(metric)

    Session = _init_db(config)
    session = Session()

    try:
        with session_guard(session):
            run(config, session)
    except Exception as exception:
        logger = get_logger(LOGGER_NAME)
        logger.exception(exception)
    finally:
        flush()

        job = _prometheus_job(config.kubernetes_namespace)
        push_to_gateway(config.prometheus_pushgateway, job, registry)
コード例 #8
0
    def __init__(self):
        self.logger = get_logger(__name__)

        self._db_user = os.getenv("INVENTORY_DB_USER", "insights")
        self._db_password = os.getenv("INVENTORY_DB_PASS", "insights")
        self._db_host = os.getenv("INVENTORY_DB_HOST", "localhost")
        self._db_name = os.getenv("INVENTORY_DB_NAME", "insights")
        self._db_ssl_mode = os.getenv("INVENTORY_DB_SSL_MODE", "")
        self._db_ssl_cert = os.getenv("INVENTORY_DB_SSL_CERT", "")

        self.db_pool_timeout = int(os.getenv("INVENTORY_DB_POOL_TIMEOUT", "5"))
        self.db_pool_size = int(os.getenv("INVENTORY_DB_POOL_SIZE", "5"))

        self.db_uri = self._build_db_uri(self._db_ssl_mode)

        self.base_url_path = self._build_base_url_path()
        self.api_url_path_prefix = self._build_api_path()
        self.legacy_api_url_path_prefix = os.getenv("INVENTORY_LEGACY_API_URL",
                                                    "")
        self.mgmt_url_path_prefix = os.getenv(
            "INVENTORY_MANAGEMENT_URL_PATH_PREFIX", "/")

        self.api_urls = [
            self.api_url_path_prefix, self.legacy_api_url_path_prefix
        ]

        self.system_profile_topic = os.environ.get("KAFKA_TOPIC",
                                                   "platform.system-profile")
        self.consumer_group = os.environ.get("KAFKA_GROUP", "inventory")
        self.bootstrap_servers = os.environ.get("KAFKA_BOOTSTRAP_SERVERS",
                                                "kafka:29092")
        self.event_topic = os.environ.get("KAFKA_EVENT_TOPIC",
                                          "platform.inventory.events")
        self.kafka_enabled = all(
            map(os.environ.get,
                ["KAFKA_TOPIC", "KAFKA_GROUP", "KAFKA_BOOTSTRAP_SERVERS"]))
            test_results = validate_sp_schemas(
                consumer,
                {
                    config.kafka_consumer_topic,
                    config.additional_validation_topic
                },
                schemas,
                VALIDATE_DAYS,
                config.sp_validator_max_messages,
            )
            consumer.close()
        except ValueError as ve:
            logger.exception(ve)
            consumer.close()
            sys.exit(1)

        _post_git_results_comment(pr_number, test_results)

    logger.info("The validator has finished. Bye!")
    sys.exit(0)


if __name__ == "__main__":
    configure_logging()

    logger = get_logger(LOGGER_NAME)
    sys.excepthook = partial(_excepthook, logger)

    threadctx.request_id = UNKNOWN_REQUEST_ID_VALUE
    main(logger)
コード例 #10
0
from api import api_operation
from api import build_collection_response
from api import flask_json_response
from api import metrics
from api.host import get_bulk_query_source
from api.host_query_xjoin import build_tag_query_dict_tuple
from app import Permission
from app.config import BulkQuerySource
from app.logging import get_logger
from app.xjoin import check_pagination
from app.xjoin import graphql_query
from app.xjoin import pagination_params
from app.xjoin import staleness_filter
from lib.middleware import rbac

logger = get_logger(__name__)

TAGS_QUERY = """
    query hostTags (
        $hostFilter: HostFilter,
        $filter: TagAggregationFilter,
        $order_by: HOST_TAGS_ORDER_BY,
        $order_how: ORDER_DIR,
        $limit: Int,
        $offset: Int
    ) {
        hostTags (
            hostFilter: $hostFilter,
            filter: $filter,
            order_by: $order_by,
            order_how: $order_how,
コード例 #11
0
from alembic import context
from flask import current_app
from sqlalchemy import engine_from_config
from sqlalchemy import pool

from app.logging import get_logger

# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config

# Interpret the config file for Python logging.
# This line sets up loggers basically.
logger = get_logger("alembic.env")

# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata

config.set_main_option("sqlalchemy.url",
                       current_app.config.get("SQLALCHEMY_DATABASE_URI"))
target_metadata = current_app.extensions["migrate"].db.metadata

# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.


def run_migrations_offline():
コード例 #12
0
from signal import Signals

from kafka import KafkaConsumer
from prometheus_client import start_http_server

from app import create_app
from app.environment import RuntimeEnvironment
from app.logging import get_logger
from app.queue.event_producer import EventProducer
from app.queue.queue import event_loop
from app.queue.queue import handle_message

logger = get_logger("mq_service")


class ShutdownHandler:
    def __init__(self):
        self._shutdown = False

    def signal_handler(self, signum, frame):
        signame = Signals(signum).name
        logger.info("Gracefully Shutting Down. Received: %s", signame)
        self._shutdown = True

    def shut_down(self):
        return self._shutdown


def main():
    application = create_app(RuntimeEnvironment.SERVICE)
    start_http_server(9126)
コード例 #13
0
from app import create_app
from app import events
from app import UNKNOWN_REQUEST_ID_VALUE
from app.environment import RuntimeEnvironment
from app.events import HostEvent
from app.logging import get_logger
from app.logging import threadctx
from app.models import Host

logger = get_logger("utils")


def test_validations(host):
    schema = HostEvent()
    event = events.delete(host)
    deserialized = schema.loads(event)
    return deserialized.errors


def main():
    flask_app = create_app(RuntimeEnvironment.COMMAND)
    with flask_app.app_context() as ctx:
        threadctx.request_id = UNKNOWN_REQUEST_ID_VALUE
        ctx.push()
    query = Host.query
    logger.info("Validating delete event for hosts.")
    logger.info("Total number of hosts: %i", query.count())

    number_of_errors = 0
    for host in query.yield_per(1000):
        host_validation_errors = test_validations(host)
コード例 #14
0
DLL_PATH = envs.get('SPARK_DLL', r'C:\SPARK115F\services\UDSpark.dll')
GetModule(DLL_PATH)

from comtypes.gen._445B09C3_EF00_47B4_9DB0_68DDD7AA9FF1_0_1_0 import FPSpark, IFPSpark
from app.enums import DocumentTypes, PaymentChoices
from app.kkt_device.models import IKKTDevice
from app.exceptions import CashboxException
from app.helpers import round_half_down
from dateutil import parser

import arcus2

DEFAULT_CASHIER_PASSWORD = '******'
DEFAULT_CASHIER_NAME = 'Mr. Printer'

arcus_logger = get_logger('arcus_logs.txt', 'arcus_logger')


def _handle_kkt_errors(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            result = func(*args, **kwargs)
            return result
        except Exception as exc:
            msg = f'Фискальный регистратор не смог ' \
                  f'выполнить функцию ({func.__name__}) ' \
                  f'Тип ошибки: {exc.__class__.__name__} ' \
                  f'Описание: {str(exc)}'
            # pp(exc_info()[2])
            # print_tb(exc_info()[2], 20)
コード例 #15
0
def test_get_logger():
    logger = get_logger(__name__)
    assert logging.SUCCESS == 25
    assert logging.FAILURE == 35
コード例 #16
0
def logger(name):
    return get_logger(f"migrations.{name}")
コード例 #17
0
    def __init__(self, runtime_environment):
        self.logger = get_logger(__name__)
        self._runtime_environment = runtime_environment

        if os.getenv("CLOWDER_ENABLED", "").lower() == "true":
            self.clowder_config()
        else:
            self.non_clowder_config()

        self._db_ssl_mode = os.getenv("INVENTORY_DB_SSL_MODE", "")
        self.db_pool_timeout = int(os.getenv("INVENTORY_DB_POOL_TIMEOUT", "5"))
        self.db_pool_size = int(os.getenv("INVENTORY_DB_POOL_SIZE", "5"))

        self.db_uri = self._build_db_uri(self._db_ssl_mode)

        self.base_url_path = self._build_base_url_path()
        self.api_url_path_prefix = self._build_api_path()
        self.legacy_api_url_path_prefix = os.getenv("INVENTORY_LEGACY_API_URL",
                                                    "")
        self.mgmt_url_path_prefix = os.getenv(
            "INVENTORY_MANAGEMENT_URL_PATH_PREFIX", "/")

        self.api_urls = [
            self.api_url_path_prefix, self.legacy_api_url_path_prefix
        ]
        self.rest_post_enabled = os.environ.get("REST_POST_ENABLED",
                                                "true").lower() == "true"

        self.rbac_enforced = os.environ.get("RBAC_ENFORCED",
                                            "false").lower() == "true"
        self.rbac_retries = os.environ.get("RBAC_RETRIES", 2)
        self.rbac_timeout = os.environ.get("RBAC_TIMEOUT", 10)

        self.host_ingress_consumer_group = os.environ.get(
            "KAFKA_HOST_INGRESS_GROUP", "inventory-mq")
        self.secondary_topic_enabled = os.environ.get(
            "KAFKA_SECONDARY_TOPIC_ENABLED", "false").lower() == "true"

        self.prometheus_pushgateway = os.environ.get("PROMETHEUS_PUSHGATEWAY",
                                                     "localhost:9091")
        self.kubernetes_namespace = os.environ.get("NAMESPACE")

        # https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html#kafka.KafkaConsumer
        self.kafka_consumer = {
            "request_timeout_ms":
            int(os.environ.get("KAFKA_CONSUMER_REQUEST_TIMEOUT_MS", "305000")),
            "max_in_flight_requests_per_connection":
            int(
                os.environ.get(
                    "KAFKA_CONSUMER_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION",
                    "5")),
            "auto_offset_reset":
            os.environ.get("KAFKA_CONSUMER_AUTO_OFFSET_RESET", "latest"),
            "auto_commit_interval_ms":
            int(
                os.environ.get("KAFKA_CONSUMER_AUTO_COMMIT_INTERVAL_MS",
                               "5000")),
            "max_poll_records":
            int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_RECORDS", "10")),
            "max_poll_interval_ms":
            int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS",
                               "300000")),
            "session_timeout_ms":
            int(os.environ.get("KAFKA_CONSUMER_SESSION_TIMEOUT_MS", "10000")),
            "heartbeat_interval_ms":
            int(os.environ.get("KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS",
                               "3000")),
        }

        # https://kafka-python.readthedocs.io/en/1.4.7/apidoc/KafkaProducer.html#kafkaproducer
        self.kafka_producer = {
            "acks":
            self._from_dict(PRODUCER_ACKS, "KAFKA_PRODUCER_ACKS", "1"),
            "retries":
            int(os.environ.get("KAFKA_PRODUCER_RETRIES", "0")),
            "batch_size":
            int(os.environ.get("KAFKA_PRODUCER_BATCH_SIZE", "16384")),
            "linger_ms":
            int(os.environ.get("KAFKA_PRODUCER_LINGER_MS", "0")),
            "retry_backoff_ms":
            int(os.environ.get("KAFKA_PRODUCER_RETRY_BACKOFF_MS", "100")),
            "max_in_flight_requests_per_connection":
            int(
                os.environ.get(
                    "KAFKA_PRODUCER_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION",
                    "5")),
        }

        self.payload_tracker_service_name = os.environ.get(
            "PAYLOAD_TRACKER_SERVICE_NAME", "inventory")
        payload_tracker_enabled = os.environ.get("PAYLOAD_TRACKER_ENABLED",
                                                 "true")
        self.payload_tracker_enabled = payload_tracker_enabled.lower(
        ) == "true"

        self.culling_stale_warning_offset_delta = timedelta(
            days=int(os.environ.get("CULLING_STALE_WARNING_OFFSET_DAYS", "7")),
            minutes=int(
                os.environ.get("CULLING_STALE_WARNING_OFFSET_MINUTES", "0")),
        )
        self.culling_culled_offset_delta = timedelta(
            days=int(os.environ.get("CULLING_CULLED_OFFSET_DAYS", "14")),
            minutes=int(os.environ.get("CULLING_CULLED_OFFSET_MINUTES", "0")),
        )

        self.xjoin_graphql_url = os.environ.get(
            "XJOIN_GRAPHQL_URL", "http://localhost:4000/graphql")
        self.bulk_query_source = getattr(
            BulkQuerySource, os.environ.get("BULK_QUERY_SOURCE", "db"))
        self.bulk_query_source_beta = getattr(
            BulkQuerySource, os.environ.get("BULK_QUERY_SOURCE_BETA", "db"))

        self.host_delete_chunk_size = int(
            os.getenv("HOST_DELETE_CHUNK_SIZE", "1000"))
        self.script_chunk_size = int(os.getenv("SCRIPT_CHUNK_SIZE", "1000"))
コード例 #18
0
ファイル: __init__.py プロジェクト: stfloyd/pco-hammerhead
import os
from logging import getLogger
from argparse import ArgumentParser
import json
import time

import pypco

from webbot import Browser

from app import settings, logging
from app.cli import cli


logger = logging.get_logger(__name__)


def init(args):
    # If the config directory doesn't exist, create it.
    if not os.path.exists(settings.CONFIG_DIR):
        logger.warning(f'Config directory does not exist at: {settings.CONFIG_DIR}')
        try:
            os.makedirs(settings.CONFIG_DIR)
            logger.success('Created config directory')
        except IOError as ioe:
            logger.error(ioe)
            logger.failure('Unable to create config directory')
            raise ioe

    # If the config file doesn't exist, write the default config to it.
    if not os.path.exists(settings.CONFIG_FILE):
コード例 #19
0
    def __init__(self, runtime_environment):
        self.logger = get_logger(__name__)
        self._runtime_environment = runtime_environment

        if os.getenv("CLOWDER_ENABLED", "").lower() == "true":
            self.clowder_config()
        else:
            self.non_clowder_config()

        self._db_ssl_mode = os.getenv("INVENTORY_DB_SSL_MODE", "")
        self.db_pool_timeout = int(os.getenv("INVENTORY_DB_POOL_TIMEOUT", "5"))
        self.db_pool_size = int(os.getenv("INVENTORY_DB_POOL_SIZE", "5"))

        self.db_uri = self._build_db_uri(self._db_ssl_mode)

        self.base_url_path = self._build_base_url_path()
        self.api_url_path_prefix = self._build_api_path()
        self.legacy_api_url_path_prefix = os.getenv("INVENTORY_LEGACY_API_URL",
                                                    "")
        self.mgmt_url_path_prefix = os.getenv(
            "INVENTORY_MANAGEMENT_URL_PATH_PREFIX", "/")

        self.api_urls = [
            self.api_url_path_prefix, self.legacy_api_url_path_prefix
        ]

        self.bypass_rbac = os.environ.get("BYPASS_RBAC",
                                          "false").lower() == "true"
        self.rbac_retries = os.environ.get("RBAC_RETRIES", 2)
        self.rbac_timeout = os.environ.get("RBAC_TIMEOUT", 10)

        self.host_ingress_consumer_group = os.environ.get(
            "KAFKA_HOST_INGRESS_GROUP", "inventory-mq")
        self.sp_validator_max_messages = int(
            os.environ.get("KAFKA_SP_VALIDATOR_MAX_MESSAGES", "10000"))

        self.prometheus_pushgateway = os.environ.get("PROMETHEUS_PUSHGATEWAY",
                                                     "localhost:9091")
        self.kubernetes_namespace = os.environ.get("NAMESPACE")

        self.kafka_ssl_configs = {
            "security_protocol":
            os.environ.get("KAFKA_SECURITY_PROTOCOL", "PLAINTEXT").upper(),
            "ssl_cafile":
            self.kafka_ssl_cafile,
            "sasl_mechanism":
            os.environ.get("KAFKA_SASL_MECHANISM", "PLAIN").upper(),
            "sasl_plain_username":
            self.kafka_sasl_username,
            "sasl_plain_password":
            self.kafka_sasl_password,
        }

        # https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html#kafka.KafkaConsumer
        self.kafka_consumer = {
            "request_timeout_ms":
            int(os.environ.get("KAFKA_CONSUMER_REQUEST_TIMEOUT_MS", "305000")),
            "max_in_flight_requests_per_connection":
            int(
                os.environ.get(
                    "KAFKA_CONSUMER_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION",
                    "5")),
            "auto_offset_reset":
            os.environ.get("KAFKA_CONSUMER_AUTO_OFFSET_RESET", "latest"),
            "auto_commit_interval_ms":
            int(
                os.environ.get("KAFKA_CONSUMER_AUTO_COMMIT_INTERVAL_MS",
                               "5000")),
            "max_poll_records":
            int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_RECORDS", "10")),
            "max_poll_interval_ms":
            int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS",
                               "300000")),
            "session_timeout_ms":
            int(os.environ.get("KAFKA_CONSUMER_SESSION_TIMEOUT_MS", "10000")),
            "heartbeat_interval_ms":
            int(os.environ.get("KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS",
                               "3000")),
            **self.kafka_ssl_configs,
        }

        self.validator_kafka_consumer = {
            "group_id":
            "inventory-sp-validator",
            "request_timeout_ms":
            int(os.environ.get("KAFKA_CONSUMER_REQUEST_TIMEOUT_MS", "305000")),
            "max_in_flight_requests_per_connection":
            int(
                os.environ.get(
                    "KAFKA_CONSUMER_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION",
                    "5")),
            "enable_auto_commit":
            False,
            "max_poll_records":
            int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_RECORDS", "10000")),
            "max_poll_interval_ms":
            int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS",
                               "300000")),
            "session_timeout_ms":
            int(os.environ.get("KAFKA_CONSUMER_SESSION_TIMEOUT_MS", "10000")),
            "heartbeat_interval_ms":
            int(os.environ.get("KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS",
                               "3000")),
            **self.kafka_ssl_configs,
        }

        self.events_kafka_consumer = {
            "group_id":
            "inventory-events-rebuild",
            "request_timeout_ms":
            int(os.environ.get("KAFKA_CONSUMER_REQUEST_TIMEOUT_MS", "305000")),
            "max_in_flight_requests_per_connection":
            int(
                os.environ.get(
                    "KAFKA_CONSUMER_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION",
                    "5")),
            "enable_auto_commit":
            False,
            "max_poll_records":
            int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_RECORDS", "10000")),
            "max_poll_interval_ms":
            int(os.environ.get("KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS",
                               "300000")),
            "session_timeout_ms":
            int(os.environ.get("KAFKA_CONSUMER_SESSION_TIMEOUT_MS", "10000")),
            "heartbeat_interval_ms":
            int(os.environ.get("KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS",
                               "3000")),
            **self.kafka_ssl_configs,
        }

        # https://kafka-python.readthedocs.io/en/1.4.7/apidoc/KafkaProducer.html#kafkaproducer
        self.kafka_producer = {
            "acks":
            self._from_dict(PRODUCER_ACKS, "KAFKA_PRODUCER_ACKS", "1"),
            "retries":
            int(os.environ.get("KAFKA_PRODUCER_RETRIES", "0")),
            "batch_size":
            int(os.environ.get("KAFKA_PRODUCER_BATCH_SIZE", "16384")),
            "linger_ms":
            int(os.environ.get("KAFKA_PRODUCER_LINGER_MS", "0")),
            "retry_backoff_ms":
            int(os.environ.get("KAFKA_PRODUCER_RETRY_BACKOFF_MS", "100")),
            "max_in_flight_requests_per_connection":
            int(
                os.environ.get(
                    "KAFKA_PRODUCER_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION",
                    "5")),
            **self.kafka_ssl_configs,
        }

        self.payload_tracker_kafka_producer = {
            "bootstrap_servers": self.bootstrap_servers,
            **self.kafka_ssl_configs
        }

        self.payload_tracker_service_name = os.environ.get(
            "PAYLOAD_TRACKER_SERVICE_NAME", "inventory")
        payload_tracker_enabled = os.environ.get("PAYLOAD_TRACKER_ENABLED",
                                                 "true")
        self.payload_tracker_enabled = payload_tracker_enabled.lower(
        ) == "true"

        self.culling_stale_warning_offset_delta = timedelta(
            days=int(os.environ.get("CULLING_STALE_WARNING_OFFSET_DAYS", "7")),
            minutes=int(
                os.environ.get("CULLING_STALE_WARNING_OFFSET_MINUTES", "0")),
        )
        self.culling_culled_offset_delta = timedelta(
            days=int(os.environ.get("CULLING_CULLED_OFFSET_DAYS", "14")),
            minutes=int(os.environ.get("CULLING_CULLED_OFFSET_MINUTES", "0")),
        )

        self.xjoin_graphql_url = os.environ.get(
            "XJOIN_GRAPHQL_URL", "http://*****:*****@redhat.com").split()

        if self._runtime_environment == RuntimeEnvironment.PENDO_JOB:
            self.pendo_sync_active = os.environ.get("PENDO_SYNC_ACTIVE",
                                                    "false").lower() == "true"
            self.pendo_endpoint = os.environ.get(
                "PENDO_ENDPOINT", "https://app.pendo.io/api/v1")
            self.pendo_integration_key = os.environ.get(
                "PENDO_INTEGRATION_KEY", "")
            self.pendo_retries = int(os.environ.get("PENDO_RETRIES", "3"))
            self.pendo_timeout = int(os.environ.get("PENDO_TIMEOUT", "240"))
            self.pendo_request_size = int(
                os.environ.get("PENDO_REQUEST_SIZE", "500"))

        if self._runtime_environment == RuntimeEnvironment.TEST:
            self.bypass_rbac = "true"