Esempio n. 1
0
def main(db, sub, tables, client_class=make_db.GCSClient, stop=None):
    # pylint: disable=too-many-locals
    gcs_client = client_class('', {})

    if stop is None:
        stop = lambda: False

    results = [0] * 1000  # don't sleep on first loop
    while not stop():
        print()
        if len(results) < 10 and client_class is make_db.GCSClient:
            time.sleep(5)  # slow down!

        print('====', time.strftime("%F %T %Z"), '=' * 40)

        results = retry(sub.pull, max_messages=1000)
        start = time.time()
        while time.time() < start + 7:
            results_more = sub.pull(max_messages=1000, return_immediately=True)
            if not results_more:
                break
            results += results_more

        print('PULLED', len(results))

        acks, todo = process_changes(results)

        if acks:
            print('ACK irrelevant', len(acks))
            for n in range(0, len(acks), 1000):
                retry(sub.acknowledge, acks[n:n + 1000])

        if todo:
            print('EXTEND-ACK ', len(todo))
            # give 3 minutes to grab build details
            retry(sub.modify_ack_deadline, [i for i, _j, _b in todo], 60 * 3)

        acks, build_dirs = get_started_finished(gcs_client, db, todo)

        # notify pubsub queue that we've handled the finished.json messages
        if acks:
            print('ACK "finished.json"', len(acks))
            retry(sub.acknowledge, acks)

        # grab junit files for new builds
        make_db.download_junit(db, 16, client_class)

        # stream new rows to tables
        if build_dirs and tables:
            for table, incremental_table in tables.values():
                builds = db.get_builds_from_paths(build_dirs,
                                                  incremental_table)
                emitted = insert_data(table, make_json.make_rows(db, builds))
                db.insert_emitted(emitted, incremental_table)
Esempio n. 2
0
def main(db, sub, tables, client_class=make_db.GCSClient, stop=None):
    # pylint: disable=too-many-locals
    gcs_client = client_class('', {})

    if stop is None:
        stop = lambda: False

    results = [0] * 1000  # don't sleep on first loop
    while not stop():
        print()
        if len(results) < 10 and client_class is make_db.GCSClient:
            time.sleep(5)  # slow down!

        print('====', time.strftime("%F %T %Z"), '=' * 40)

        results = retry(sub.pull, max_messages=1000)
        start = time.time()
        while time.time() < start + 7:
            results_more = sub.pull(max_messages=1000, return_immediately=True)
            if not results_more:
                break
            results += results_more

        print('PULLED', len(results))

        acks, todo = process_changes(results)

        if acks:
            print('ACK irrelevant', len(acks))
            for n in xrange(0, len(acks), 1000):
                retry(sub.acknowledge, acks[n: n + 1000])

        if todo:
            print('EXTEND-ACK ', len(todo))
            # give 3 minutes to grab build details
            retry(sub.modify_ack_deadline, [i for i, _j, _b in todo], 60*3)

        acks, build_dirs = get_started_finished(gcs_client, db, todo)

        # notify pubsub queue that we've handled the finished.json messages
        if acks:
            print('ACK "finished.json"', len(acks))
            retry(sub.acknowledge, acks)

        # grab junit files for new builds
        make_db.download_junit(db, 16, client_class)

        # stream new rows to tables
        if build_dirs and tables:
            for table, incremental_table in tables.itervalues():
                builds = db.get_builds_from_paths(build_dirs, incremental_table)
                emitted = insert_data(table, make_json.make_rows(db, builds))
                db.insert_emitted(emitted, incremental_table)