Exemple #1
0
def service_jobs():
    """
    Service jobs in an infinite loop
    """
    # multiprocessing uses fork() which in turn copies file descriptors, so the engine may have connections in its pool
    # that we don't want to reuse. This is the SQLALchemy-recommended way of clearing the connection pool in this thread
    get_engine().dispose()

    while True:
        # if no job was found, sleep for a second, otherwise query for another job straight away
        if not process_job():
            sleep(1)
Exemple #2
0
def service_jobs():
    """
    Service jobs in an infinite loop
    """
    get_engine().dispose()
    t = create_prometheus_server(job_process_registry, 8001)
    try:
        while True:
            # if no job was found, sleep for a second, otherwise query for another job straight away
            if not process_job():
                sleep(1)
    finally:
        logger.info(f"Closing prometheus server")
        t.server_close()
Exemple #3
0
def db_impl(param):
    """
    Connect to a running Postgres database

    param tells whether the db should be built from alembic migrations or using metadata.create_all()
    """

    # running in non-UTC catches some timezone errors
    # os.environ["TZ"] = "Etc/UTC"
    os.environ["TZ"] = "America/New_York"

    # drop everything currently in the database
    with session_scope() as session:
        session.execute(
            "DROP SCHEMA public CASCADE; CREATE SCHEMA public; CREATE EXTENSION postgis; CREATE EXTENSION pg_trgm;"
        )

    if param == "migrations":
        # rebuild it with alembic migrations
        apply_migrations()
    else:
        # create the slugify function
        functions = Path(__file__).parent / "slugify.sql"
        with open(functions) as f, session_scope() as session:
            session.execute(f.read())

        # create everything from the current models, not incrementally through migrations
        Base.metadata.create_all(get_engine())
Exemple #4
0
def create_schema_from_models():
    """
    Create everything from the current models, not incrementally
    through migrations.
    """

    # create the slugify function
    functions = Path(__file__).parent / "slugify.sql"
    with open(functions) as f, session_scope() as session:
        session.execute(f.read())

    Base.metadata.create_all(get_engine())
Exemple #5
0
def run_scheduler():
    """
    Schedules jobs according to schedule in .definitions
    """
    # multiprocessing uses fork() which in turn copies file descriptors, so the engine may have connections in its pool
    # that we don't want to reuse. This is the SQLALchemy-recommended way of clearing the connection pool in this thread
    get_engine().dispose()

    sched = scheduler(monotonic, sleep)

    for schedule_id, (job_type, frequency) in enumerate(SCHEDULE):
        sched.enter(
            0,
            1,
            _run_job_and_schedule,
            argument=(
                sched,
                schedule_id,
            ),
        )

    sched.run()