示例#1
0
def run():
    if db_needs_setup():
        print("[INIT] Initializing db...")
        setup()

    #TODO: if initial sync is interrupted, cache never rebuilt
    #TODO: do not build partial feed_cache during init_sync
    # if this is the initial sync, batch updates until very end
    is_initial_sync = not query_one("SELECT 1 FROM hive_posts_cache LIMIT 1")

    if is_initial_sync:
        print("[INIT] *** Initial sync. db_last_block: %d ***" %
              db_last_block())
    else:
        # perform cleanup in case process did not exit cleanly
        cache_missing_posts()

    # prefetch id->name memory map
    Accounts.load_ids()

    # fast block sync strategies
    sync_from_checkpoints(is_initial_sync)
    sync_from_steemd(is_initial_sync)

    Accounts.cache_old()
    Accounts.update_ranks()

    if is_initial_sync:
        print("[INIT] *** Initial sync complete. Rebuilding cache. ***")
        cache_missing_posts()
        rebuild_feed_cache()

    # initialization complete. follow head blocks
    listen_steemd()
示例#2
0
def listen_steemd(trail_blocks=2):
    steemd = get_adapter()
    curr_block = db_last_block()
    last_hash = False

    while True:
        curr_block = curr_block + 1

        # if trailing too close, take a pause
        while trail_blocks > 0:
            if curr_block <= steemd.head_block() - trail_blocks:
                break
            time.sleep(0.5)

        # get the target block; if DNE, pause and retry
        block = steemd.get_block(curr_block)
        while not block:
            time.sleep(0.5)
            block = steemd.get_block(curr_block)

        num = int(block['block_id'][:8], base=16)
        print("[LIVE] Got block {} at {} with {} txs -- ".format(
            num, block['timestamp'], len(block['transactions'])),
              end='')

        # ensure the block we received links to our last
        if last_hash and last_hash != block['previous']:
            # this condition is very rare unless trail_blocks is 0 and fork is
            # encountered; to handle gracefully, implement a pop_block method
            raise Exception("Unlinkable block: have {}, got {} -> {})".format(
                last_hash, block['previous'], block['block_id']))
        last_hash = block['block_id']

        start_time = time.perf_counter()
        query("START TRANSACTION")

        dirty = process_block(block)
        update_posts_batch(Posts.urls_to_tuples(dirty), steemd,
                           block['timestamp'])

        paidout = select_paidout_posts(block['timestamp'])
        update_posts_batch(paidout, steemd, block['timestamp'])

        Accounts.cache_dirty()
        Accounts.cache_dirty_follows()

        print("{} edits, {} payouts".format(len(dirty), len(paidout)))
        query("COMMIT")
        secs = time.perf_counter() - start_time

        if secs > 1:
            print("WARNING: block {} process took {}s".format(num, secs))

        # approx once per hour, update accounts
        if num % 1200 == 0:
            print("Performing account maintenance...")
            Accounts.cache_old()
            Accounts.update_ranks()