Esempio n. 1
0
def listen_steemd():
    b = Blockchain()
    h = b.stream_from(
        start_block=db_last_block() + 1,
        full_blocks=True,
    )
    for block in h:
        num = int(block['previous'][:8], base=16) + 1
        print("[LIVE] Got block {} at {} with {} txs".format(
            num, block['timestamp'], len(block['transactions'])))
        process_blocks([block])
Esempio n. 2
0
def sync_from_checkpoints():
    last_block = db_last_block()

    fn = lambda f: [int(f.split('/')[1].split('.')[0]), f]
    files = map(fn, glob.glob("checkpoints/*.json.lst"))
    files = sorted(files, key=lambda f: f[0])

    last_read = 0
    for (num, path) in files:
        if last_block < num:
            print("[SYNC] Load {} -- last block: {}".format(path, last_block))
            skip_lines = last_block - last_read
            sync_from_file(path, skip_lines)
            last_block = num
        last_read = num
Esempio n. 3
0
def listen_steemd(trail_blocks=2):
    steemd = get_adapter()
    curr_block = db_last_block()
    last_hash = False

    while True:
        curr_block = curr_block + 1

        # if trailing too close, take a pause
        while trail_blocks > 0:
            if curr_block <= steemd.head_block() - trail_blocks:
                break
            time.sleep(0.5)

        # get the target block; if DNE, pause and retry
        block = steemd.get_block(curr_block)
        while not block:
            time.sleep(0.5)
            block = steemd.get_block(curr_block)

        num = int(block['block_id'][:8], base=16)
        print("[LIVE] Got block {} at {} with {} txs -- ".format(
            num, block['timestamp'], len(block['transactions'])),
              end='')

        # ensure the block we received links to our last
        if last_hash and last_hash != block['previous']:
            # this condition is very rare unless trail_blocks is 0 and fork is
            # encountered; to handle gracefully, implement a pop_block method
            raise Exception("Unlinkable block: have {}, got {} -> {})".format(
                last_hash, block['previous'], block['block_id']))
        last_hash = block['block_id']

        start_time = time.time()
        query("START TRANSACTION")

        dirty = process_block(block)
        update_posts_batch(urls_to_tuples(dirty), steemd, block['timestamp'])

        paidout = select_paidout_posts(block['timestamp'])
        update_posts_batch(paidout, steemd, block['timestamp'])

        print("{} edits, {} payouts".format(len(dirty), len(paidout)))
        query("COMMIT")
        secs = time.time() - start_time

        if secs > 1:
            print("WARNING: block {} process took {}s".format(num, secs))
Esempio n. 4
0
def sync_from_checkpoints(is_initial_sync):
    last_block = db_last_block()

    fn = lambda f: [int(f.split('/')[-1].split('.')[0]), f]
    mydir = os.path.dirname(os.path.realpath(__file__ + "/../.."))
    files = map(fn, glob.glob(mydir + "/checkpoints/*.json.lst"))
    files = sorted(files, key=lambda f: f[0])

    last_read = 0
    for (num, path) in files:
        if last_block < num:
            print("[SYNC] Load {} -- last block: {}".format(path, last_block))
            skip_lines = last_block - last_read
            sync_from_file(path, skip_lines, 250, is_initial_sync)
            last_block = num
        last_read = num
Esempio n. 5
0
def sync_from_steemd():
    s = Steemd()

    lbound = db_last_block() + 1
    ubound = s.last_irreversible_block_num

    start_num = lbound
    start_time = time.time()
    while lbound < ubound:
        to = min(lbound + 1000, ubound)
        blocks = s.get_blocks_range(lbound, to)
        lbound = to
        process_blocks(blocks)

        rate = (lbound - start_num) / (time.time() - start_time)
        print("[SYNC] Got block {} ({}/s) {}m remaining".format(
            to - 1, round(rate, 1), round((ubound - lbound) / rate / 60, 2)))
Esempio n. 6
0
def sync_from_steemd(is_initial_sync):
    steemd = get_adapter()
    dirty = set()

    lbound = db_last_block() + 1
    ubound = steemd.last_irreversible_block_num()

    print("[SYNC] {} blocks to batch sync".format(ubound - lbound + 1))
    print("[SYNC] start sync from block %d" % lbound)

    while lbound < ubound:
        to = min(lbound + 1000, ubound)

        lap_0 = time.time()
        blocks = steemd.get_blocks_range(lbound, to)
        lap_1 = time.time()
        dirty |= process_blocks(blocks, is_initial_sync)
        lap_2 = time.time()

        rate = (to - lbound) / (lap_2 - lap_0)
        rps = int((to - lbound) / (lap_1 - lap_0))
        wps = int((to - lbound) / (lap_2 - lap_1))
        print(
            "[SYNC] Got block {} ({}/s, {}rps {}wps) -- {}m remaining".format(
                to - 1, round(rate, 1), rps, wps,
                round((ubound - to) / rate / 60, 2)))

        lbound = to

    # batch update post cache after catching up to head block
    if not is_initial_sync:

        print("[PREP] Update {} edited posts".format(len(dirty)))
        update_posts_batch(urls_to_tuples(dirty), steemd)

        date = steemd.head_time()
        paidout = select_paidout_posts(date)
        print("[PREP] Process {} payouts since {}".format(len(paidout), date))
        update_posts_batch(paidout, steemd, date)
Esempio n. 7
0
def head_state(*args):
    _ = args  # JSONRPC injects 4 arguments here
    steemd_head = get_adapter().head_block()
    hive_head = db_last_block()
    diff = steemd_head - hive_head
    return dict(steemd=steemd_head, hive=hive_head, diff=diff)
Esempio n. 8
0
def head_state(*args):
    _ = args  # JSONRPC injects 4 arguments here
    steemd_head = Steemd().last_irreversible_block_num
    hive_head = db_last_block()
    diff = steemd_head - hive_head
    return dict(steemd=steemd_head, hive=hive_head, diff=diff)