def reset_db(force):
    setup_logging()
    settings = Settings()
    logger.info('settings: %s', settings.to_string(pretty=True))
    loop = asyncio.get_event_loop()
    logger.info('running prepare_database, force: %r...', force)
    loop.run_until_complete(_prepare_database(settings, force))
Beispiel #2
0
def elasticsearch_patch(patch):
    settings = Settings(sender_cls='app.worker.Sender')
    setup_logging(settings)
    loop = asyncio.get_event_loop()
    es = ElasticSearch(settings=settings)
    try:
        patch_func = getattr(es, '_patch_' + patch)
        logger.info('running patch %s...', patch_func.__name__)
        loop.run_until_complete(patch_func())
    finally:
        es.close()
Beispiel #3
0
def worker(wait):
    """
    Run the worker
    """
    settings = Settings(sender_cls='app.worker.Sender')
    setup_logging(settings)

    logger.info('waiting for elasticsearch and redis to come up...')
    wait and sleep(4)
    _check_services_ready(settings)
    # redis/the network occasionally hangs and gets itself in a mess if we try to connect too early,
    # even once it's "up", hence 2 second wait
    wait and sleep(2)
    RunWorkerProcess('app/worker.py', 'Worker')
def web():
    """
    Serve the application. If the database doesn't already exist it will be created.
    """
    settings = Settings()
    setup_logging()
    logger.info('settings: %s', settings.to_string(pretty=True))

    asyncio.get_event_loop().close()
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    loop = asyncio.get_event_loop()
    loop.run_until_complete(_prepare_database(settings))
    logger.info('starting server...')
    app = create_app(settings)
    run_app(app, port=8000, print=lambda v: None, access_log=None)
Beispiel #5
0
def elasticsearch_snapshot(action, snapshot_name):
    """
    create an elastic search snapshot
    """
    settings = Settings(sender_cls='app.worker.Sender')
    setup_logging(settings)
    loop = asyncio.get_event_loop()
    es = ElasticSearch(settings=settings)
    try:
        if action == 'create':
            f = es.create_snapshot()
        elif action == 'list':
            f = es.restore_list()
        else:
            assert snapshot_name, 'snapshot-name may not be None'
            f = es.restore_snapshot(snapshot_name)
        loop.run_until_complete(f)
    finally:
        es.close()
Beispiel #6
0
def handle_launch(logger_name):
    exec_file = abspath(argv[0])
    exec_dir = dirname(exec_file)
    _, exec_ext = splitext(exec_file)

    as_service = (exec_ext.lower() == '.exe')

    argv[0] = exec_file

    config = Config(exec_dir, as_service)
    config = Config()
    setup_logging(config.current['logs']['dir'])

    log = getLogger(logger_name)
    log.info('Start')
    log.info(f'exec_file={exec_file}')
    log.info(f'exec_dir={exec_dir}')
    log.info(f'getcwd()={getcwd()}')

    return as_service, log
Beispiel #7
0
def main(args):
    if args.command == 'clean':
        remove_data(args.data_dir)
        return

    setup_logging(sql_logging=args.sql_logging)

    perf_db_kwargs = {
        'root_dir': args.data_dir,
        'scenario_name': args.scenario_name
    }
    if args.command == 'generate':
        perf_db_kwargs['db_type'] = perf_db.DatabaseType.template
    else:
        perf_db_kwargs['db_type'] = perf_db.DatabaseType.test_run

    if args.command == 'process':
        perf_db_kwargs['copy_from_template'] = True

    show_elapsed_time = True
    if args.command == 'psql':
        show_elapsed_time = False

    with perf_db.PerfTestDatabase(**perf_db_kwargs) as postgresql:
        with make_perf_session(postgresql) as session:
            # start the timer
            # NOTE: we do not included database connection and initialization in our timing measurements
            start_counter = time.perf_counter()

            if args.command == 'generate':
                generate_data(session, args.scenario_name)
            elif args.command == 'process':
                process_data(session, args.config_name, args.profile_mem)
            elif args.command == 'psql':
                review_data(postgresql.url())

            # stop the counter and log the elapsed time
            end_counter = time.perf_counter()
            if show_elapsed_time:
                LOGGER.info('Elapsed time (seconds): %s', '{:.03f}'.format(end_counter - start_counter))
Beispiel #8
0
def web(wait):
    """
    Serve the application
    If the database doesn't already exist it will be created.
    """
    settings = Settings(sender_cls='app.worker.Sender')
    print(settings.to_string(True), flush=True)
    setup_logging(settings)

    logger.info('waiting for elasticsearch and redis to come up...')
    # give es a chance to come up fully, this just prevents lots of es errors, create_indices is itself lenient

    # skip wait as es and redis are generally already up and delay is causing missed requests
    # wait and sleep(4)
    _check_services_ready(settings)

    _elasticsearch_setup(settings)
    logger.info('starting server...')
    asyncio.get_event_loop().close()
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    loop = asyncio.get_event_loop()
    app = create_app(loop, settings)
    run_app(app, port=8000, loop=loop, print=lambda v: None, access_log=None)
Beispiel #9
0
import logging
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_talisman import Talisman
from config import Config
from celery_app import make_celery

app = Flask(__name__)
app.config.from_object(Config)

from app import logs

# Set up logging
logs.setup_logging()

# Set up flask-talisman to prevent xss and other attacks
csp = {
    'default-src': '\'self\'',
    'script-src': ['\'self\'', 'cdnjs.cloudflare.com', 'www.googletagmanager.com'],
    'style-src': ['\'self\'', 'fonts.googleapis.com'],
    'font-src': ['\'self\'', 'fonts.gstatic.com'],
    'img-src': ['\'self\'', 'www.google-analytics.com', 'data:']}
Talisman(app, content_security_policy=csp,
         content_security_policy_nonce_in=['script-src', 'style-src'])

csrf = CSRFProtect(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db, render_as_batch=True)
celery = make_celery(app)
Beispiel #10
0
def cli(verbose):
    setup_logging(verbose)
Beispiel #11
0
import os

import uvicorn
from app.logs import setup_logging
from app.main import tc_av_app

if __name__ == '__main__':
    setup_logging()
    port = int(os.getenv('PORT', 8000))
    uvicorn.run(tc_av_app, host='0.0.0.0', port=port)
Beispiel #12
0
def cli(verbose):
    """
    Run TutorCruncher socket
    """
    setup_logging(verbose)
Beispiel #13
0
def elasticsearch_setup(force_create_index, force_create_repo):
    settings = Settings(sender_cls='app.worker.Sender')
    setup_logging(settings)
    _elasticsearch_setup(settings, force_create_index, force_create_repo)