예제 #1
0
 def test_set(self, rpush):
     metrics.init('listenbrainz.org')
     os.environ["PRIVATE_IP"] = "127.0.0.1"
     metrics.set("my_metric",
                 timestamp=1619629462352960742,
                 test_i=2,
                 test_fl=.3,
                 test_t=True,
                 test_f=False,
                 test_s="gobble")
     rpush.assert_called_with(
         metrics.REDIS_METRICS_KEY,
         'my_metric,dc=hetzner,server=127.0.0.1,project=listenbrainz.org test_i=2i,test_fl=0.300000,test_t=t,test_f=f,test_s="gobble" 1619629462352960742'
     )
예제 #2
0
    def __init__(self, app):
        threading.Thread.__init__(self)
        self.done = False
        self.app = app
        self.queue = PriorityQueue()
        self.unmatched_listens_complete_time = 0
        self.legacy_load_thread = None
        self.legacy_next_run = 0
        self.legacy_listens_index_date = 0
        self.num_legacy_listens_loaded = 0
        self.last_processed = 0

        init_cache(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'],
                   namespace=app.config['REDIS_NAMESPACE'])
        metrics.init("listenbrainz")
        self.load_legacy_listens()
def compare_coverart(mb_query, lb_query, mb_caa_index, lb_caa_index, mb_compare_key, lb_compare_key):
    """ The core cover art comparison function. Given two sets of queries, index values, and 
        comparison keys this function can perform a complete sync as well as an incremental update.

        The queries must fetch chunks of data from the MB and LB tables ordered by
        the corresponding compare key. The starting indexes (the current comparison index
        into the data) must be provided and match the type of the comparison keys. """

    with psycopg2.connect(config.MBID_MAPPING_DATABASE_URI) as mb_conn:
        with mb_conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as mb_curs:
            with psycopg2.connect(config.SQLALCHEMY_DATABASE_URI) as lb_conn:
                with lb_conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as lb_curs:

                    mb_count, lb_count = get_cover_art_counts(mb_curs, lb_curs)
                    log("CAA count: %d\n LB count: %d" % (mb_count, lb_count))

                    threads = []
                    mb_row = None
                    lb_row = None

                    mb_rows = []
                    lb_rows = []

                    mb_done = False
                    lb_done = True if lb_query is None else False

                    extra = 0
                    missing = 0
                    processed = 0

                    while True:
                        if len(mb_rows) == 0 and not mb_done:
                            mb_curs.execute(
                                mb_query, (mb_caa_index, SYNC_BATCH_SIZE))
                            mb_rows = mb_curs.fetchall()
                            if len(mb_rows) == 0:
                                mb_done = True

                        if len(lb_rows) == 0 and not lb_done:
                            lb_curs.execute(
                                lb_query, (lb_caa_index, SYNC_BATCH_SIZE))
                            lb_rows = lb_curs.fetchall()
                            if len(lb_rows) == 0:
                                lb_done = True

                        if not mb_row and len(mb_rows) > 0:
                            mb_row = mb_rows.pop(0)

                        if not lb_row and len(lb_rows) > 0:
                            lb_row = lb_rows.pop(0)

                        if not lb_row and not mb_row:
                            break

                        processed += 1
                        if processed % 100000 == 0:
                            log("processed %d of %d: missing %d extra %d" %
                                  (processed, mb_count, missing, extra))

                        # If the item is in MB, but not in LB, add to LB
                        if lb_row is None or mb_row[mb_compare_key] < lb_row[lb_compare_key]:
                            process_cover_art(threads, mb_row)
                            missing += 1
                            mb_caa_index = mb_row[mb_compare_key]
                            mb_row = None
                            continue

                        # If the item is in LB, but not in MB, remove from LB
                        if mb_row is None or mb_row[mb_compare_key] > lb_row[lb_compare_key]:
                            extra += 1
                            delete_from_lb(lb_row[lb_compare_key])
                            lb_caa_index = lb_row[lb_compare_key]
                            lb_row = None
                            continue

                        # If the caa_id is present in both, skip both
                        if mb_row[mb_compare_key] == lb_row[lb_compare_key]:
                            mb_caa_index = mb_row[mb_compare_key]
                            lb_caa_index = lb_row[lb_compare_key]
                            lb_row = None
                            mb_row = None
                            continue

                        assert False

                    join_threads(threads)
                    log( "Finished! added/skipped %d removed %d from release_color" % (missing, extra))

                    mb_count, lb_count = get_cover_art_counts(mb_curs, lb_curs)
                    log("CAA count: %d\n LB count: %d" % (mb_count, lb_count))

                    metrics.init("listenbrainz")
                    metrics.set("listenbrainz-caa-mapper",
                                caa_front_count=mb_count, lb_caa_count=lb_count)
예제 #4
0
def gen_app(debug=None):
    """ Generate a Flask app for LB with all configurations done and connections established.

    In the Flask app returned, blueprints are not registered.
    """
    app = CustomFlask(
        import_name=__name__,
        use_flask_uuid=True,
    )

    load_config(app)
    if debug is not None:
        app.debug = debug

    # initialize Flask-DebugToolbar if the debug option is True
    if app.debug and app.config['SECRET_KEY']:
        app.init_debug_toolbar()

    # Logging
    app.init_loggers(file_config=app.config.get('LOG_FILE'),
                     sentry_config=app.config.get('LOG_SENTRY'))

    # Initialize BU cache and metrics
    cache.init(host=app.config['REDIS_HOST'],
               port=app.config['REDIS_PORT'],
               namespace=app.config['REDIS_NAMESPACE'])
    metrics.init("listenbrainz")

    # Redis connection
    create_redis(app)

    # Timescale connection
    create_timescale(app)

    # RabbitMQ connection
    try:
        create_rabbitmq(app)
    except ConnectionError:
        app.logger.critical("RabbitMQ service is not up!", exc_info=True)

    # Database connections
    from listenbrainz import db
    from listenbrainz.db import timescale as ts
    from listenbrainz import messybrainz as msb
    db.init_db_connection(app.config['SQLALCHEMY_DATABASE_URI'])
    ts.init_db_connection(app.config['SQLALCHEMY_TIMESCALE_URI'])
    msb.init_db_connection(app.config['MESSYBRAINZ_SQLALCHEMY_DATABASE_URI'])

    if app.config['MB_DATABASE_URI']:
        from brainzutils import musicbrainz_db
        musicbrainz_db.init_db_engine(app.config['MB_DATABASE_URI'])

    # OAuth
    from listenbrainz.webserver.login import login_manager, provider
    login_manager.init_app(app)
    provider.init(app.config['MUSICBRAINZ_CLIENT_ID'],
                  app.config['MUSICBRAINZ_CLIENT_SECRET'])

    # Error handling
    from listenbrainz.webserver.errors import init_error_handlers
    init_error_handlers(app)

    from brainzutils.ratelimit import inject_x_rate_headers

    @app.after_request
    def after_request_callbacks(response):
        return inject_x_rate_headers(response)

    # Template utilities
    app.jinja_env.add_extension('jinja2.ext.do')
    from listenbrainz.webserver import utils
    app.jinja_env.filters['date'] = utils.reformat_date
    app.jinja_env.filters['datetime'] = utils.reformat_datetime

    return app