Пример #1
0
    def listen(cls):
        trail_blocks = Conf.get('trail_blocks')
        assert trail_blocks >= 0
        assert trail_blocks < 25

        steemd = SteemClient.instance()
        hive_head = Blocks.head_num()
        for block in steemd.stream_blocks(hive_head + 1, trail_blocks, max_gap=40):
            start_time = time.perf_counter()

            query("START TRANSACTION")
            num = Blocks.process(block)
            follows = Follow.flush(trx=False)
            accts = Accounts.flush(trx=False, period=8)
            CachedPost.dirty_paidouts(block['timestamp'])
            cnt = CachedPost.flush(trx=False)
            query("COMMIT")

            ms = (time.perf_counter() - start_time) * 1000
            print("[LIVE] Got block %d at %s --% 4d txs,% 3d posts,% 3d edits,"
                  "% 3d payouts,% 3d votes,% 3d accounts,% 3d follows --% 5dms%s"
                  % (num, block['timestamp'], len(block['transactions']),
                     cnt['insert'], cnt['update'], cnt['payout'], cnt['upvote'],
                     accts, follows, int(ms), ' SLOW' if ms > 1000 else ''))

            # once per hour, update accounts
            if num % 1200 == 0:
                Accounts.dirty_oldest(10000)
                Accounts.flush(trx=True)
                #Accounts.update_ranks()

            # once a minute, update chain props
            if num % 20 == 0:
                cls._update_chain_state(steemd)
Пример #2
0
def sync_from_file(file_path, skip_lines, chunk_size=250):
    with open(file_path) as f:
        # each line in file represents one block
        # we can skip the blocks we already have
        remaining = drop(skip_lines, f)
        for batch in partition_all(chunk_size, remaining):
            Blocks.process_multi(map(json.loads, batch), True)
Пример #3
0
def run_sync():
    print("[HIVE] Welcome to hivemind")

    # make sure db schema is up to date, perform checks
    DbState.initialize()

    # prefetch id->name memory map
    Accounts.load_ids()

    if DbState.is_initial_sync():
        # resume initial sync
        Sync.initial()
        DbState.finish_initial_sync()

    else:
        # recover from fork
        Blocks.verify_head()

        # perform cleanup in case process did not exit cleanly
        CachedPost.recover_missing_posts()

    while True:
        # sync up to irreversible block
        Sync.from_steemd()

        # take care of payout backlog
        CachedPost.dirty_paidouts(Blocks.head_date())
        CachedPost.flush(trx=True)

        # start listening
        Sync.listen()
Пример #4
0
    def run(cls):
        """Initialize state; setup/recovery checks; sync and runloop."""

        # ensure db schema up to date, check app status
        DbState.initialize()

        # prefetch id->name memory map
        Accounts.load_ids()

        if DbState.is_initial_sync():
            # resume initial sync
            cls.initial()
            DbState.finish_initial_sync()

        else:
            # recover from fork
            Blocks.verify_head()

            # perform cleanup if process did not exit cleanly
            CachedPost.recover_missing_posts()

        # debug mode: no sync, just stream
        if Conf.get('disable_sync'):
            return cls.listen()

        while True:
            # sync up to irreversible block
            cls.from_steemd()

            # take care of payout backlog
            CachedPost.dirty_paidouts(Blocks.head_date())
            CachedPost.flush(trx=True)

            # listen for new blocks
            cls.listen()
Пример #5
0
    def from_checkpoints(self, chunk_size=1000):
        """Initial sync strategy: read from blocks on disk.

        This methods scans for files matching ./checkpoints/*.json.lst
        and uses them for hive's initial sync. Each line must contain
        exactly one block in JSON format.
        """
        # pylint: disable=no-self-use
        last_block = Blocks.head_num()

        tuplize = lambda path: [int(path.split('/')[-1].split('.')[0]), path]
        basedir = os.path.dirname(os.path.realpath(__file__ + "/../.."))
        files = glob.glob(basedir + "/checkpoints/*.json.lst")
        tuples = sorted(map(tuplize, files), key=lambda f: f[0])

        last_read = 0
        for (num, path) in tuples:
            if last_block < num:
                log.info("[SYNC] Load %s. Last block: %d", path, last_block)
                with open(path) as f:
                    # each line in file represents one block
                    # we can skip the blocks we already have
                    skip_lines = last_block - last_read
                    remaining = drop(skip_lines, f)
                    for lines in partition_all(chunk_size, remaining):
                        Blocks.process_multi(map(json.loads, lines), True)
                last_block = num
            last_read = num
Пример #6
0
    def __enter__(self):
        assert self._db, "The database must exist"

        log.info("Entering into {} synchronization".format(
            "LIVE" if DbLiveContextHolder.is_live_context() else "MASSIVE"))
        Blocks.setup_own_db_access(self._db)
        log.info("Exiting from {} synchronization".format(
            "LIVE" if DbLiveContextHolder.is_live_context() else "MASSIVE"))

        return self
Пример #7
0
    def run(self):
        import sys
        max_block_limit = sys.maxsize
        do_stale_block_check = True
        if self._conf.get('test_max_block'):
            max_block_limit = self._conf.get('test_max_block')
            do_stale_block_check = False
            # Correct max_block_limit by trail_blocks
            max_block_limit = max_block_limit - trail_blocks
            log.info(
                "max_block_limit corrected by specified trail_blocks number: %d is: %d",
                trail_blocks, max_block_limit)

        if self._conf.get('test_disable_sync'):
            # debug mode: no sync, just stream
            result = self.listen(trail_blocks, max_block_limit,
                                 do_stale_block_check)
            restore_handlers()
            return result

        while True:
            # sync up to irreversible block
            self.from_steemd()
            if not can_continue_thread():
                break

            head = Blocks.head_num()
            if head >= max_block_limit:
                self.refresh_sparse_stats()
                log.info(
                    "Exiting [LIVE SYNC] because irreversible block sync reached specified block limit: %d",
                    max_block_limit)
                break

            try:
                # listen for new blocks
                self.listen(trail_blocks, max_block_limit,
                            do_stale_block_check)
            except MicroForkException as e:
                # attempt to recover by restarting stream
                log.error("microfork: %s", repr(e))

            head = Blocks.head_num()
            if head >= max_block_limit:
                self.refresh_sparse_stats()
                log.info(
                    "Exiting [LIVE SYNC] because of specified block limit: %d",
                    max_block_limit)
                break

            if not can_continue_thread():
                break
        restore_handlers()
Пример #8
0
def sync_from_steemd():
    is_initial_sync = DbState.is_initial_sync()
    steemd = get_adapter()

    lbound = Blocks.head_num() + 1
    ubound = steemd.last_irreversible()
    if ubound <= lbound:
        return

    _abort = False
    try:
        print("[SYNC] start block %d, +%d to sync" % (lbound, ubound-lbound+1))
        timer = Timer(ubound - lbound, entity='block', laps=['rps', 'wps'])
        while lbound < ubound:
            to = min(lbound + 1000, ubound)
            timer.batch_start()
            blocks = steemd.get_blocks_range(lbound, to)
            timer.batch_lap()
            Blocks.process_multi(blocks, is_initial_sync)
            timer.batch_finish(len(blocks))
            date = blocks[-1]['timestamp']
            print(timer.batch_status("[SYNC] Got block %d @ %s" % (to-1, date)))
            lbound = to

    except KeyboardInterrupt:
        traceback.print_exc()
        print("\n\n[SYNC] Aborted.. cleaning up..")
        _abort = True

    if not is_initial_sync:
        # Follows flushing may need to be moved closer to core (i.e. moved
        # into main block transactions). Important to keep in sync since
        # we need to prevent expensive recounts. This will fail if we aborted
        # in the middle of a transaction, meaning data loss. Better than
        # forcing it, however, since in-memory cache will be out of sync
        # with db state.
        Follow.flush(trx=True)

        # This flush is low importance; accounts are swept regularly.
        if not _abort:
            Accounts.flush(trx=True)

        # If this flush fails, all that could potentially be lost here is
        # edits and pre-payout votes. If the post has not been paid out yet,
        # then the worst case is it will be synced upon payout. If the post
        # is already paid out, worst case is to lose an edit.
        CachedPost.flush(trx=True)

    if _abort:
        print("[SYNC] Aborted")
        exit()
Пример #9
0
    def run(self):
        """Initialize state; setup/recovery checks; sync and runloop."""

        # ensure db schema up to date, check app status
        DbState.initialize()

        # prefetch id->name and id->rank memory maps
        Accounts.load_ids()
        Accounts.fetch_ranks()

        Community.recalc_pending_payouts()

        if DbState.is_initial_sync():
            # resume initial sync
            self.initial()
            DbState.finish_initial_sync()

        else:
            # recover from fork
            Blocks.verify_head(self._steem)

            # perform cleanup if process did not exit cleanly
            CachedPost.recover_missing_posts(self._steem)

        #audit_cache_missing(self._db, self._steem)
        #audit_cache_deleted(self._db)

        self._update_chain_state()

        if self._conf.get('test_max_block'):
            # debug mode: partial sync
            return self.from_steemd()
        if self._conf.get('test_disable_sync'):
            # debug mode: no sync, just stream
            return self.listen()

        while True:
            # sync up to irreversible block
            self.from_steemd()

            # take care of payout backlog
            CachedPost.dirty_paidouts(Blocks.head_date())
            CachedPost.flush(self._steem, trx=True)

            try:
                # listen for new blocks
                self.listen()
            except MicroForkException as e:
                # attempt to recover by restarting stream
                log.error("NOTIFYALERT microfork: %s", repr(e))
Пример #10
0
    def from_steemd(cls, is_initial_sync=False, chunk_size=1000):
        """Fast sync strategy: read/process blocks in batches."""
        steemd = SteemClient.instance()
        lbound = Blocks.head_num() + 1
        ubound = steemd.last_irreversible()
        count = ubound - lbound
        if count < 1:
            return

        _abort = False
        try:
            print("[SYNC] start block %d, +%d to sync" % (lbound, count))
            timer = Timer(count, entity='block', laps=['rps', 'wps'])
            while lbound < ubound:
                timer.batch_start()

                # fetch blocks
                to = min(lbound + chunk_size, ubound)
                blocks = steemd.get_blocks_range(lbound, to)
                lbound = to
                timer.batch_lap()

                # process blocks
                Blocks.process_multi(blocks, is_initial_sync)
                timer.batch_finish(len(blocks))
                date = blocks[-1]['timestamp']
                print(
                    timer.batch_status("[SYNC] Got block %d @ %s" %
                                       (to - 1, date)))

        except KeyboardInterrupt:
            traceback.print_exc()
            print("\n\n[SYNC] Aborted.. cleaning up..")
            _abort = True

        if not is_initial_sync:
            # This flush is low importance; accounts are swept regularly.
            if not _abort:
                Accounts.flush(trx=True)

            # If this flush fails, all that could potentially be lost here is
            # edits and pre-payout votes. If the post has not been paid out yet,
            # then the worst case is it will be synced upon payout. If the post
            # is already paid out, worst case is to lose an edit.
            CachedPost.flush(trx=True)

        if _abort:
            print("[SYNC] Aborted")
            exit()
Пример #11
0
    def from_steemd(self, is_initial_sync=False, chunk_size=1000):
        """Fast sync strategy: read/process blocks in batches."""
        steemd = self._steem

        lbound = Blocks.head_num() + 1
        ubound = steemd.last_irreversible()

        if self._conf.get('test_max_block'
                          ) and self._conf.get('test_max_block') < ubound:
            ubound = self._conf.get('test_max_block')

        count = ubound - lbound
        if count < 1:
            return

        massive_blocks_data_provider = None
        databases = None
        if self._conf.get('hived_database_url'):
            databases = MassiveBlocksDataProviderHiveDb.Databases(self._conf)
            massive_blocks_data_provider = MassiveBlocksDataProviderHiveDb(
                databases, self._conf.get('max_batch'), lbound, ubound,
                can_continue_thread, set_exception_thrown)
        else:
            massive_blocks_data_provider = MassiveBlocksDataProviderHiveRpc(
                self._conf, self._steem, self._conf.get('max_workers'),
                self._conf.get('max_workers'), self._conf.get('max_batch'),
                lbound, ubound, can_continue_thread, set_exception_thrown)
        _process_blocks_from_provider(self, massive_blocks_data_provider,
                                      is_initial_sync, lbound, ubound)

        if databases:
            databases.close()
Пример #12
0
def show_info(_db):
    database_head_block = Blocks.head_num()

    sql = "SELECT level, patch_date, patched_to_revision FROM hive_db_patch_level ORDER BY level DESC LIMIT 1"
    patch_level_data = _db.query_row(sql)

    from hive.utils.misc import show_app_version
    show_app_version(log, database_head_block, patch_level_data)
Пример #13
0
    def listen(self):
        """Live (block following) mode."""
        trail_blocks = self._conf.get('trail_blocks')
        assert trail_blocks >= 0
        assert trail_blocks <= 100

        # debug: no max gap if disable_sync in effect
        max_gap = None if self._conf.get('test_disable_sync') else 100

        steemd = self._steem
        hive_head = Blocks.head_num()

        for block in steemd.stream_blocks(hive_head + 1, trail_blocks,
                                          max_gap):
            start_time = perf()

            self._db.query("START TRANSACTION")
            num = Blocks.process(block)
            follows = Follow.flush(trx=False)
            accts = Accounts.flush(steemd, trx=False, spread=8)
            CachedPost.dirty_paidouts(block['timestamp'])
            cnt = CachedPost.flush(steemd, trx=False)
            self._db.query("COMMIT")

            ms = (perf() - start_time) * 1000
            log.info(
                "[LIVE] Got block %d at %s --% 4d txs,% 3d posts,% 3d edits,"
                "% 3d payouts,% 3d votes,% 3d counts,% 3d accts,% 3d follows"
                " --% 5dms%s", num, block['timestamp'],
                len(block['transactions']), cnt['insert'], cnt['update'],
                cnt['payout'], cnt['upvote'], cnt['recount'], accts, follows,
                ms, ' SLOW' if ms > 1000 else '')

            if num % 1200 == 0:  #1hr
                log.warning("head block %d @ %s", num, block['timestamp'])
                log.info("[LIVE] hourly stats")
                Accounts.fetch_ranks()
                #Community.recalc_pending_payouts()
            if num % 200 == 0:  #10min
                Community.recalc_pending_payouts()
            if num % 100 == 0:  #5min
                log.info("[LIVE] 5-min stats")
                Accounts.dirty_oldest(500)
            if num % 20 == 0:  #1min
                self._update_chain_state()
Пример #14
0
    def listen(cls):
        """Live (block following) mode."""
        trail_blocks = Conf.get('trail_blocks')
        assert trail_blocks >= 0
        assert trail_blocks <= 100

        # debug: no max gap if disable_sync in effect
        max_gap = None if Conf.get('disable_sync') else 100

        steemd = SteemClient.instance()
        hive_head = Blocks.head_num()

        for block in steemd.stream_blocks(hive_head + 1, trail_blocks,
                                          max_gap):
            start_time = perf()

            query("START TRANSACTION")
            num = Blocks.process(block)
            follows = Follow.flush(trx=False)
            accts = Accounts.flush(trx=False, spread=8)
            CachedPost.dirty_paidouts(block['timestamp'])
            cnt = CachedPost.flush(trx=False)
            query("COMMIT")

            ms = (perf() - start_time) * 1000
            log.info(
                "[LIVE] Got block %d at %s --% 4d txs,% 3d posts,% 3d edits,"
                "% 3d payouts,% 3d votes,% 3d accts,% 3d follows --% 5dms%s",
                num, block['timestamp'], len(block['transactions']),
                cnt['insert'], cnt['update'], cnt['payout'], cnt['upvote'],
                accts, follows, int(ms), ' SLOW' if ms > 1000 else '')

            # once per hour, update accounts
            if num % 1200 == 0:
                Accounts.dirty_oldest(10000)
                Accounts.flush(trx=True)
                #Accounts.update_ranks()

            # once a minute, update chain props
            if num % 20 == 0:
                cls._update_chain_state(steemd)
Пример #15
0
 def _update_chain_state(self):
     """Update basic state props (head block, feed price) in db."""
     state = self._steem.gdgp_extended()
     self._db.query("""UPDATE hive_state SET block_num = :block_num,
                    steem_per_mvest = :spm, usd_per_steem = :ups,
                    sbd_per_steem = :sps, dgpo = :dgpo""",
                    block_num=Blocks.head_num(),
                    spm=state['steem_per_mvest'],
                    ups=state['usd_per_steem'],
                    sps=state['sbd_per_steem'],
                    dgpo=json.dumps(state['dgpo']))
     return state['dgpo']['head_block_number']
Пример #16
0
    def from_checkpoints(cls, chunk_size=1000):
        last_block = Blocks.head_num()

        tuplize = lambda path: [int(path.split('/')[-1].split('.')[0]), path]
        basedir = os.path.dirname(os.path.realpath(__file__ + "/../.."))
        files = glob.glob(basedir + "/checkpoints/*.json.lst")
        tuples = sorted(map(tuplize, files), key=lambda f: f[0])

        last_read = 0
        for (num, path) in tuples:
            if last_block < num:
                print("[SYNC] Load %s -- last block: %d" % (path, last_block))
                with open(path) as f:
                    # each line in file represents one block
                    # we can skip the blocks we already have
                    skip_lines = last_block - last_read
                    remaining = drop(skip_lines, f)
                    for lines in partition_all(chunk_size, remaining):
                        Blocks.process_multi(map(json.loads, lines), True)
                last_block = num
            last_read = num
Пример #17
0
    def from_dpayd(self, is_initial_sync=False, chunk_size=1000):
        """Fast sync strategy: read/process blocks in batches."""
        # pylint: disable=no-self-use
        dpayd = self._dpay
        lbound = Blocks.head_num() + 1
        ubound = self._conf.get('test_max_block') or dpayd.last_irreversible()

        count = ubound - lbound
        if count < 1:
            return

        log.info("[SYNC] start block %d, +%d to sync", lbound, count)
        timer = Timer(count, entity='block', laps=['rps', 'wps'])
        while lbound < ubound:
            timer.batch_start()

            # fetch blocks
            to = min(lbound + chunk_size, ubound)
            blocks = dpayd.get_blocks_range(lbound, to)
            lbound = to
            timer.batch_lap()

            # process blocks
            Blocks.process_multi(blocks, is_initial_sync)
            timer.batch_finish(len(blocks))

            _prefix = ("[SYNC] Got block %d @ %s" %
                       (to - 1, blocks[-1]['timestamp']))
            log.info(timer.batch_status(_prefix))

        if not is_initial_sync:
            # This flush is low importance; accounts are swept regularly.
            Accounts.flush(dpayd, trx=True)

            # If this flush fails, all that could potentially be lost here is
            # edits and pre-payout votes. If the post has not been paid out yet,
            # then the worst case is it will be synced upon payout. If the post
            # is already paid out, worst case is to lose an edit.
            CachedPost.flush(dpayd, trx=True)
Пример #18
0
def run():

    print("[HIVE] Welcome to hivemind")

    # make sure db schema is up to date, perform checks
    DbState.initialize()

    # prefetch id->name memory map
    Accounts.load_ids()

    if DbState.is_initial_sync():
        print("[INIT] *** Initial fast sync ***")
        sync_from_checkpoints()
        sync_from_steemd()

        print("[INIT] *** Initial cache build ***")
        # todo: disable indexes during this process
        cache_missing_posts()
        FeedCache.rebuild()
        DbState.finish_initial_sync()

    else:
        # recover from fork
        Blocks.verify_head()

        # perform cleanup in case process did not exit cleanly
        cache_missing_posts()

    while True:
        # sync up to irreversible block
        sync_from_steemd()

        # take care of payout backlog
        CachedPost.dirty_paidouts(Blocks.head_date())
        CachedPost.flush(trx=True)

        # start listening
        listen_steemd()
Пример #19
0
def sync_from_checkpoints():
    last_block = Blocks.head_num()

    _fn = lambda f: [int(f.split('/')[-1].split('.')[0]), f]
    mydir = os.path.dirname(os.path.realpath(__file__ + "/../.."))
    files = map(_fn, glob.glob(mydir + "/checkpoints/*.json.lst"))
    files = sorted(files, key=lambda f: f[0])

    last_read = 0
    for (num, path) in files:
        if last_block < num:
            print("[SYNC] Load {} -- last block: {}".format(path, last_block))
            skip_lines = last_block - last_read
            sync_from_file(path, skip_lines, 250)
            last_block = num
        last_read = num
Пример #20
0
def _block_consumer(blocks_data_provider, is_initial_sync, lbound, ubound):
    from hive.utils.stats import minmax
    is_debug = log.isEnabledFor(10)
    num = 0
    time_start = OPSM.start()
    rate = {}
    LIMIT_FOR_PROCESSED_BLOCKS = 1000

    rate = minmax(rate, 0, 1.0, 0)
    sync_type_prefix = "[INITIAL SYNC]" if is_initial_sync else "[FAST SYNC]"

    def print_summary():
        stop = OPSM.stop(time_start)
        log.info("=== TOTAL STATS ===")
        wtm = WSM.log_global("Total waiting times")
        ftm = FSM.log_global("Total flush times")
        otm = OPSM.log_global("All operations present in the processed blocks")
        ttm = ftm + otm + wtm
        log.info(
            f"Elapsed time: {stop :.4f}s. Calculated elapsed time: {ttm :.4f}s. Difference: {stop - ttm :.4f}s"
        )
        if rate:
            log.info(
                f"Highest block processing rate: {rate['max'] :.4f} bps. From: {rate['max_from']} To: {rate['max_to']}"
            )
            log.info(
                f"Lowest block processing rate: {rate['min'] :.4f} bps. From: {rate['min_from']} To: {rate['min_to']}"
            )
        log.info("=== TOTAL STATS ===")

    try:
        Blocks.set_end_of_sync_lib(ubound)
        count = ubound - lbound
        timer = Timer(count, entity='block', laps=['rps', 'wps'])

        while lbound < ubound:
            number_of_blocks_to_proceed = min(
                [LIMIT_FOR_PROCESSED_BLOCKS, ubound - lbound])
            time_before_waiting_for_data = perf()

            blocks = blocks_data_provider.get(number_of_blocks_to_proceed)

            if not can_continue_thread():
                break

            assert len(blocks) == number_of_blocks_to_proceed

            to = min(lbound + number_of_blocks_to_proceed, ubound)
            timer.batch_start()

            block_start = perf()
            Blocks.process_multi(blocks, is_initial_sync)
            block_end = perf()

            timer.batch_lap()
            timer.batch_finish(len(blocks))
            time_current = perf()

            prefix = ("%s Got block %d @ %s" %
                      (sync_type_prefix, to - 1, blocks[-1].get_date()))
            log.info(timer.batch_status(prefix))
            log.info("%s Time elapsed: %fs", sync_type_prefix,
                     time_current - time_start)
            log.info("%s Current system time: %s", sync_type_prefix,
                     datetime.now().strftime("%H:%M:%S"))
            log.info(log_memory_usage())
            rate = minmax(rate, len(blocks),
                          time_current - time_before_waiting_for_data, lbound)

            if block_end - block_start > 1.0 or is_debug:
                otm = OPSM.log_current(
                    "Operations present in the processed blocks")
                ftm = FSM.log_current("Flushing times")
                wtm = WSM.log_current("Waiting times")
                log.info(f"Calculated time: {otm+ftm+wtm :.4f} s.")

            OPSM.next_blocks()
            FSM.next_blocks()
            WSM.next_blocks()

            lbound = to
            PC.broadcast(
                BroadcastObject('sync_current_block', lbound, 'blocks'))

            num = num + 1

            if not can_continue_thread():
                break
    except Exception:
        log.exception("Exception caught during processing blocks...")
        set_exception_thrown()
        print_summary()
        raise

    print_summary()
    return num
Пример #21
0
    def listen(self, trail_blocks, max_sync_block, do_stale_block_check):
        """Live (block following) mode.
            trail_blocks - how many blocks need to be collected to start processed the oldest ( delay in blocks processing against blocks collecting )
            max_sync_block - limit of blocks to sync, the function will return if it is reached
            do_stale_block_check - check if the last collected block is not older than 60s
        """

        # debug: no max gap if disable_sync in effect
        max_gap = None if self._conf.get('test_disable_sync') else 100

        steemd = self._steem
        hive_head = Blocks.head_num()

        log.info("[LIVE SYNC] Entering listen with HM head: %d", hive_head)

        if hive_head >= max_sync_block:
            self.refresh_sparse_stats()
            log.info(
                "[LIVE SYNC] Exiting due to block limit exceeded: synced block number: %d, max_sync_block: %d",
                hive_head, max_sync_block)
            return

        for block in self._stream_blocks(hive_head + 1, can_continue_thread,
                                         set_exception_thrown, trail_blocks,
                                         max_gap, do_stale_block_check):
            if not can_continue_thread():
                break
            num = block.get_num()
            log.info(
                "[LIVE SYNC] =====> About to process block %d with timestamp %s",
                num, block.get_date())

            start_time = perf()

            Blocks.process_multi([block], False)
            otm = OPSM.log_current(
                "Operations present in the processed blocks")
            ftm = FSM.log_current("Flushing times")

            ms = (perf() - start_time) * 1000
            log.info(
                "[LIVE SYNC] <===== Processed block %d at %s --% 4d txs"
                " --% 5dms%s", num, block.get_date(),
                block.get_number_of_transactions(), ms,
                ' SLOW' if ms > 1000 else '')
            log.info("[LIVE SYNC] Current system time: %s",
                     datetime.now().strftime("%H:%M:%S"))

            if num % 1200 == 0:  #1hour
                log.warning("head block %d @ %s", num, block.get_date())
                log.info("[LIVE SYNC] hourly stats")

                log.info("[LIVE SYNC] filling payout_stats_view executed")
                with ThreadPoolExecutor(max_workers=2) as executor:
                    executor.submit(PayoutStats.generate)
                    executor.submit(Mentions.refresh)
            if num % 200 == 0:  #10min
                update_communities_posts_and_rank(self._db)
            if num % 20 == 0:  #1min
                self._update_chain_state()

            PC.broadcast(BroadcastObject('sync_current_block', num, 'blocks'))
            FSM.next_blocks()
            OPSM.next_blocks()

            if num >= max_sync_block:
                log.info(
                    "Stopping [LIVE SYNC] because of specified block limit: %d",
                    max_sync_block)
                break
Пример #22
0
    def run(self):
        old_sig_int_handler = getsignal(SIGINT)
        old_sig_term_handler = getsignal(SIGTERM)

        set_handlers()

        Community.start_block = self._conf.get("community_start_block")

        # ensure db schema up to date, check app status
        DbState.initialize()
        if self._conf.get("log_explain_queries"):
            is_superuser = self._db.query_one("SELECT is_superuser()")
            assert is_superuser, 'The parameter --log_explain_queries=true can be used only when connect to the database with SUPERUSER privileges'

        _is_consistency = Blocks.is_consistency()
        if not _is_consistency:
            raise RuntimeError(
                "Fatal error related to `hive_blocks` consistency")

        show_info(self._db)

        paths = self._conf.get("mock_block_data_path") or []
        for path in paths:
            self.load_mock_data(path)

        mock_vops_data_path = self._conf.get("mock_vops_data_path")
        if mock_vops_data_path:
            MockVopsProvider.load_block_data(mock_vops_data_path)
            # MockVopsProvider.print_data()

        # prefetch id->name and id->rank memory maps
        Accounts.load_ids()

        # community stats
        update_communities_posts_and_rank(self._db)

        last_imported_block = Blocks.head_num()
        hived_head_block = self._conf.get(
            'test_max_block') or self._steem.last_irreversible()

        log.info("target_head_block : %s", hived_head_block)

        if DbState.is_initial_sync():
            DbState.before_initial_sync(last_imported_block, hived_head_block)
            # resume initial sync
            self.initial()
            if not can_continue_thread():
                restore_handlers()
                return
            current_imported_block = Blocks.head_num()
            # beacuse we cannot break long sql operations, then we back default CTRL+C
            # behavior for the time of post initial actions
            restore_handlers()
            try:
                DbState.finish_initial_sync(current_imported_block)
            except KeyboardInterrupt:
                log.info("Break finish initial sync")
                set_exception_thrown()
                return
            set_handlers()
        else:
            # recover from fork
            Blocks.verify_head(self._steem)

        self._update_chain_state()

        global trail_blocks
        trail_blocks = self._conf.get('trail_blocks')
        assert trail_blocks >= 0
        assert trail_blocks <= 100
Пример #23
0
 def __exit__(self, exc_type, value, traceback):
     #During massive-sync every object has own copy of database, as a result all copies have to be closed
     #During live-sync an original database is used and can't be closed, because it can be used later.
     if not DbLiveContextHolder.is_live_context():
         Blocks.close_own_db_access()
Пример #24
0
def head_state(*args):
    _ = args  # JSONRPC injects 4 arguments here
    steemd_head = get_adapter().head_block()
    hive_head = Blocks.head_num()
    diff = steemd_head - hive_head
    return dict(steemd=steemd_head, hive=hive_head, diff=diff)
Пример #25
0
def listen_steemd(trail_blocks=0, max_gap=50):
    assert trail_blocks >= 0
    assert trail_blocks < 25

    # db state
    db_last = Blocks.last()
    last_block = db_last['num']
    last_hash = db_last['hash']

    # chain state
    steemd = get_adapter()
    head_block = steemd.head_block()
    next_expected = time.time()

    # loop state
    tries = 0
    queue = []

    # TODO: detect missed blocks by looking at block timestamps.
    #       this would be an even more efficient way to track slots.
    while True:
        assert not last_block > head_block

        # fast fwd head block if slots missed
        curr_time = time.time()
        while curr_time >= next_expected:
            head_block += 1
            next_expected += 3

        # if gap too large, abort. if caught up, wait.
        gap = head_block - last_block
        if gap > max_gap:
            print("[LIVE] gap too large: %d -- abort listen mode" % gap)
            return
        elif gap > 0:
            print("[LIVE] %d blocks behind..." % gap)
        elif gap == 0:
            time.sleep(next_expected - curr_time)
            head_block += 1
            next_expected += 3

        # get the target block; if DNE, pause and retry
        block_num = last_block + 1
        block = steemd.get_block(block_num)
        if not block:
            tries += 1
            print("[LIVE] block %d unavailable (try %d). delay 1s. head: %d/%d."
                  % (block_num, tries, head_block, steemd.head_block()))
            #assert tries < 12, "could not fetch block %s" % block_num
            assert tries < 240, "could not fetch block %s" % block_num #74
            time.sleep(1)      # pause for 1s; and,
            next_expected += 1 # delay schedule 1s
            continue
        last_block = block_num
        tries = 0

        # ensure this block links to our last; otherwise, blow up. see #59
        if last_hash != block['previous']:
            if queue:
                print("[FORK] Fork encountered. Emptying queue to retry!")
                return
            raise Exception("Unlinkable block: have %s, got %s -> %s)"
                            % (last_hash, block['previous'], block['block_id']))
        last_hash = block['block_id']

        # buffer until queue full
        queue.append(block)
        if len(queue) <= trail_blocks:
            continue


        # buffer primed; process head of queue
        # ------------------------------------

        block = queue.pop(0)

        start_time = time.perf_counter()

        query("START TRANSACTION")
        num = Blocks.process(block)
        follows = Follow.flush(trx=False)
        accts = Accounts.flush(trx=False, period=8)
        CachedPost.dirty_paidouts(block['timestamp'])
        cnt = CachedPost.flush(trx=False)
        query("COMMIT")

        ms = (time.perf_counter() - start_time) * 1000
        print("[LIVE] Got block %d at %s --% 4d txs,% 3d posts,% 3d edits,"
              "% 3d payouts,% 3d votes,% 3d accounts,% 3d follows --% 5dms%s"
              % (num, block['timestamp'], len(block['transactions']),
                 cnt['insert'], cnt['update'], cnt['payout'], cnt['upvote'],
                 accts, follows, int(ms), ' SLOW' if ms > 1000 else ''))

        # once per hour, update accounts
        if num % 1200 == 0:
            Accounts.dirty_oldest(10000)
            Accounts.flush(trx=True)
            #Accounts.update_ranks()

        # once a minute, update chain props
        if num % 20 == 0:
            update_chain_state()