def __exec(self, method, params=None): """Perform a single steemd call.""" start = perf() result = self._client.exec(method, params) items = len(params[0]) if method == 'get_accounts' else 1 Stats.log_steem(method, perf() - start, items) return result
async def fetch_ws(http_request: HTTPRequest, jrpc_request: SingleJrpcRequest) -> SingleJrpcResponse: jrpc_request.timings.append((perf(), 'fetch_ws.enter')) pools = http_request.app.config.websocket_pools pool = pools[jrpc_request.upstream.url] upstream_request = jrpc_request.to_upstream_request() try: conn = await pool.acquire() jrpc_request.timings.append((perf(), 'fetch_ws.acquire')) await conn.send(upstream_request) jrpc_request.timings.append((perf(), 'fetch_ws.send')) upstream_response_json = await conn.recv() jrpc_request.timings.append((perf(), 'fetch_ws.response')) upstream_response = loads(upstream_response_json) await pool.release(conn) assert int(upstream_response.get('id')) == jrpc_request.upstream_id upstream_response['id'] = jrpc_request.id jrpc_request.timings.append((perf(), 'fetch_ws.exit')) return upstream_response except Exception as e: try: conn.terminate() except NameError: pass except Exception as e: logger.error('error while closing connection', e=e) raise e
async def fetch_http(http_request: HTTPRequest, jrpc_request: SingleJrpcRequest) -> SingleJrpcResponse: jrpc_request.timings.append((perf(), 'fetch_http.enter')) session = http_request.app.config.aiohttp['session'] upstream_request = jrpc_request.to_upstream_request(as_json=False) try: async with session.post(jrpc_request.upstream.url, json=upstream_request, headers=jrpc_request.upstream_headers, timeout=jrpc_request.upstream.timeout) as resp: jrpc_request.timings.append((perf(), 'fetch_http.sent')) upstream_response = await resp.json(encoding='utf-8', content_type=None) jrpc_request.timings.append((perf(), 'fetch_http.response')) except Exception as e: try: response = upstream_response except NameError: response = None raise UpstreamResponseError(http_request=http_request, jrpc_request=jrpc_request, exception=e, upstream_request=upstream_request, upstream_response=response) upstream_response['id'] = jrpc_request.id jrpc_request.timings.append((perf(), 'fetch_http.exit')) return upstream_response
async def fetch_ws(http_request: HTTPRequest, jrpc_request: SingleJrpcRequest) -> SingleJrpcResponse: jrpc_request.timings.append((perf(), 'fetch_ws.enter')) pools = http_request.app.config.websocket_pools pool = pools[jrpc_request.upstream.url] upstream_request = jrpc_request.to_upstream_request() try: conn = await pool.acquire() jrpc_request.timings.append((perf(), 'fetch_ws.acquire')) await conn.send(upstream_request) jrpc_request.timings.append((perf(), 'fetch_ws.send')) upstream_response_json = await asyncio.wait_for( conn.recv(), jrpc_request.upstream.timeout) jrpc_request.timings.append((perf(), 'fetch_ws.response')) upstream_response = loads(upstream_response_json) await pool.release(conn) assert int(upstream_response.get('id')) == jrpc_request.upstream_id upstream_response['id'] = jrpc_request.id jrpc_request.timings.append((perf(), 'fetch_ws.exit')) return upstream_response except (concurrent.futures.TimeoutError, concurrent.futures.CancelledError, asyncio.TimeoutError) as e: raise RequestTimeoutError(http_request=http_request, jrpc_request=jrpc_request, exception=e, upstream_request=upstream_request, tasks_count=len( asyncio.tasks.Task.all_tasks())) except AssertionError as e: raise UpstreamResponseError(http_request=http_request, jrpc_request=jrpc_request, exception=e, upstream_request=upstream_request, upstream_response=upstream_response) except ConnectionClosed as e: raise UpstreamResponseError(http_request=http_request, jrpc_request=jrpc_request, exception=e, upstream_request=upstream_request) except Exception as e: try: conn.terminate() except NameError: pass try: response = upstream_response except NameError: response = None raise UpstreamResponseError(http_request=http_request, jrpc_request=jrpc_request, exception=e, upstream_request=upstream_request, upstream_response=response, log_traceback=True)
def exec(self, method, args, is_batch=False): """Execute a steemd RPC method, retrying on failure.""" what = "%s[%d]" % (method, len(args) if is_batch else 1) body = self.rpc_body(method, args, is_batch) body_data = json.dumps(body, ensure_ascii=False).encode('utf8') tries = 0 allowed_tries = self.max_retries + 1 while True: tries += 1 secs = -1 info = None try: start = perf() response = self.request(body=body_data) secs = perf() - start info = { 'jussi-id': response.headers.get('x-jussi-request-id'), 'secs': round(secs, 3), 'try': tries } # strict validation/asserts, error check payload = validated_json_payload(response) result = validated_result(payload, body) if secs > 5: log.warning('%s took %.1fs %s', what, secs, info) return result except (AssertionError, RPCErrorFatal) as e: raise e except (Exception, socket.timeout) as e: if secs < 0: # request failed secs = perf() - start info = {'secs': round(secs, 3), 'try': tries} log.warning('%s failed in %.1fs. try %d. %s - %s', what, secs, tries, info, repr(e)) if not can_continue_thread(): break if tries % 2 == 0: self.next_node() sleep(min(tries / 5, 5)) allowed_tries -= 1 if allowed_tries == 0: break if allowed_tries < 0: # case of infinite retries allowed_tries = 0 raise Exception("abort %s after %d tries" % (method, tries))
def __exec_batch(self, method, params): """Perform batch call. Based on config uses either batch or futures.""" start = perf() result = [] for part in self._client.exec_multi(method, params, max_workers=self._max_workers, batch_size=self._max_batch): result.extend(part) Stats.log_steem(method, perf() - start, len(params)) return result
async def fetch_http(http_request: HTTPRequest, jrpc_request: SingleJrpcRequest) -> SingleJrpcResponse: jrpc_request.timings.append((perf(), 'fetch_http.enter')) session = http_request.app.config.aiohttp['session'] upstream_request = jrpc_request.to_upstream_request(as_json=False) async with session.post(jrpc_request.upstream.url, json=upstream_request, headers=jrpc_request.upstream_headers) as resp: jrpc_request.timings.append((perf(), 'fetch_http.response')) upstream_response = await resp.json(encoding='utf-8', content_type=None) upstream_response['id'] = jrpc_request.id jrpc_request.timings.append((perf(), 'fetch_http.exit')) return upstream_response
def exec(self, method, args, is_batch=False): """Execute a dpayd RPC method, retrying on failure.""" what = "%s[%d]" % (method, len(args) if is_batch else 1) body = self.rpc_body(method, args, is_batch) body_data = json.dumps(body, ensure_ascii=False).encode('utf8') tries = 0 while tries < 100: tries += 1 secs = -1 info = None try: start = perf() response = self.request(body=body_data) secs = perf() - start info = { 'jefferson-id': response.headers.get('x-jefferson-request-id'), 'secs': round(secs, 3), 'try': tries } # strict validation/asserts, error check payload = validated_json_payload(response) result = validated_result(payload, body) if secs > 5: log.warning('%s took %.1fs %s', what, secs, info) if tries > 2: log.warning('%s took %d tries %s', what, tries, info) return result except (AssertionError, RPCErrorFatal) as e: raise e except (Exception, socket.timeout) as e: if secs < 0: # request failed secs = perf() - start info = {'secs': round(secs, 3), 'try': tries} log.error('%s failed in %.1fs. try %d. %s - %s', what, secs, tries, info, repr(e)) if tries % 2 == 0: self.next_node() sleep(tries / 10) raise Exception("abort %s after %d tries" % (method, tries))
async def handle_jsonrpc(http_request: HTTPRequest) -> HTTPResponse: # retreive parsed jsonrpc_requests after request middleware processing http_request.timings.append((perf(), 'handle_jsonrpc.enter')) # make upstream requests if http_request.is_single_jrpc: jsonrpc_response = await dispatch_single(http_request, http_request.jsonrpc) else: futures = [ dispatch_single(http_request, request) for request in http_request.jsonrpc ] jsonrpc_response = await asyncio.gather(*futures) http_request.timings.append((perf(), 'handle_jsonrpc.exit')) return response.json(jsonrpc_response)
def lists(cls, name, rep): """Return blacklists the account belongs to.""" assert name inst = cls.instance() # update hourly if perf() - inst.fetched > 3600: inst.load() if name not in inst.blist_map: out = [] if name in inst.blist: url = 'http://blacklist.usesteem.com/user/' + name lists = json.loads(_read_url(url)) out.extend(lists['blacklisted']) if name in inst.accounts: if 'irredeemables' not in out: out.append('irredeemables') if int(rep) < 1: out.append('reputation-0') elif int(rep) == 1: out.append('reputation-1') inst.blist_map[name] = out return inst.blist_map[name]
def add_ms(cls, ms): """Add to total ms elapsed; print if threshold reached.""" cls._ms += ms if cls._ms > cls.PRINT_THRESH_MINS * 60 * 1000: cls.report() cls._ms = 0 cls._idle = 0 cls._start = perf()
def add_secs(cls, secs): """Add to total ms elapsed; print if threshold reached.""" cls._secs += secs if cls._secs > cls.PRINT_THRESH_MINS * 60: cls.report() cls._secs = 0 cls._idle = 0 cls._start = perf()
def listen(self): """Live (block following) mode.""" trail_blocks = self._conf.get('trail_blocks') assert trail_blocks >= 0 assert trail_blocks <= 100 # debug: no max gap if disable_sync in effect max_gap = None if self._conf.get('test_disable_sync') else 100 steemd = self._steem hive_head = Blocks.head_num() for block in steemd.stream_blocks(hive_head + 1, trail_blocks, max_gap): start_time = perf() self._db.query("START TRANSACTION") num = Blocks.process(block) follows = Follow.flush(trx=False) accts = Accounts.flush(steemd, trx=False, spread=8) CachedPost.dirty_paidouts(block['timestamp']) cnt = CachedPost.flush(steemd, trx=False) self._db.query("COMMIT") ms = (perf() - start_time) * 1000 log.info( "[LIVE] Got block %d at %s --% 4d txs,% 3d posts,% 3d edits," "% 3d payouts,% 3d votes,% 3d counts,% 3d accts,% 3d follows" " --% 5dms%s", num, block['timestamp'], len(block['transactions']), cnt['insert'], cnt['update'], cnt['payout'], cnt['upvote'], cnt['recount'], accts, follows, ms, ' SLOW' if ms > 1000 else '') if num % 1200 == 0: #1hr log.warning("head block %d @ %s", num, block['timestamp']) log.info("[LIVE] hourly stats") Accounts.fetch_ranks() #Community.recalc_pending_payouts() if num % 200 == 0: #10min Community.recalc_pending_payouts() if num % 100 == 0: #5min log.info("[LIVE] 5-min stats") Accounts.dirty_oldest(500) if num % 20 == 0: #1min self._update_chain_state()
def load(self): """Reload all accounts from irredeemables endpoint and global lists.""" self.accounts = set(_read_url(self.url).decode('utf8').split()) #jsn = _read_url('http://blacklist.usesteem.com/blacklists') self.blist = dict() #set(json.loads(jsn)) self.blist_map = dict() log.warning("%d muted, %d blacklisted", len(self.accounts), len(self.blist)) self.fetched = perf()
def flush(cls, trx=True): """Flushes pending follow count deltas.""" sqls = [] for col, deltas in cls._delta.items(): for name, delta in deltas.items(): sql = "UPDATE hive_accounts SET %s = %s + :mag WHERE id = :id" sqls.append((sql % (col, col), dict(mag=delta, id=name))) if not sqls: return 0 start = perf() DB.batch_queries(sqls, trx=trx) if trx: total = (perf() - start) log.info("[SYNC] flushed %d follow deltas in %ds", len(sqls), total) cls._delta = {FOLLOWERS: {}, FOLLOWING: {}} return len(sqls)
async def get_response(request: HTTPRequest) -> None: # return cached response from cache if all requests were in cache if not request.jsonrpc: return request.timings.append((perf(), 'get_cached_response.enter')) cache_group = request.app.config.cache_group cache_read_timeout = request.app.config.cache_read_timeout try: cached_response = None async with timeout(cache_read_timeout): if request.is_single_jrpc: cached_response_future = \ cache_group.get_single_jsonrpc_response(request.jsonrpc) elif request.is_batch_jrpc: cached_response_future = \ cache_group.get_batch_jsonrpc_responses(request.jsonrpc) else: request.timings.append((perf(), 'get_cached_response.exit')) return cached_response = await cached_response_future request.timings.append((perf(), 'get_cached_response.response')) if cached_response and \ cache_group.is_complete_response(request.jsonrpc, cached_response): jefferson_cache_key = cache_group.x_jefferson_cache_key( request.jsonrpc) request.timings.append((perf(), 'get_cached_response.exit')) return response.json( cached_response, headers={'x-jefferson-cache-hit': jefferson_cache_key}) except ConnectionRefusedError as e: logger.error('error connecting to redis cache', e=e) except asyncio.TimeoutError: logger.error('cache read timeout', timeout=cache_read_timeout, request_id=request.jefferson_request_id) except Exception as e: logger.error('error querying cache for response', e=e, exc_info=e) request.timings.append((perf(), 'get_cached_response.exit'))
def _query(self, sql, **kwargs): """Send a query off to SQLAlchemy.""" if sql == 'START TRANSACTION': assert not self._trx_active self._trx_active = True elif sql == 'COMMIT': assert self._trx_active self._trx_active = False try: start = perf() query = self._sql_text(sql) result = self._exec(query, **kwargs) Stats.log_db(sql, perf() - start) return result except Exception as e: log.info("[SQL-ERR] %s in query %s (%s)", e.__class__.__name__, sql, kwargs) raise e
class Stats: """Container for steemd and db timing data.""" PRINT_THRESH_MINS = 1 COLLECT_DB_STATS = 0 COLLECT_NODE_STATS = 0 _db = DbStats() _steemd = SteemStats() _secs = 0.0 _idle = 0.0 _start = perf() @classmethod def log_db(cls, sql, secs): """Log a database query. Incoming SQL is normalized.""" if cls.COLLECT_DB_STATS: cls._db.add(_normalize_sql(sql), secs * 1000) cls.add_secs(secs) @classmethod def log_steem(cls, method, secs, batch_size=1): """Log a steemd call.""" if cls.COLLECT_NODE_STATS: cls._steemd.add(method, secs * 1000, batch_size) cls.add_secs(secs) @classmethod def log_idle(cls, secs): """Track idle time (e.g. sleeping until next block)""" cls._idle += secs @classmethod def add_secs(cls, secs): """Add to total ms elapsed; print if threshold reached.""" cls._secs += secs if cls._secs > cls.PRINT_THRESH_MINS * 60: cls.report() cls._secs = 0 cls._idle = 0 cls._start = perf() @classmethod def report(cls): """Emit a timing report for tracked services.""" if not cls._secs: return # nothing to report total = perf() - cls._start non_idle = total - cls._idle log.info("cumtime %ds (%.1f%% of %ds). %.1f%% idle. peak %dmb.", cls._secs, 100 * cls._secs / non_idle, non_idle, 100 * cls._idle / total, peak_usage_mb()) if cls._secs > 1: cls._db.report(cls._secs) cls._steemd.report(cls._secs)
def listen(cls): """Live (block following) mode.""" trail_blocks = Conf.get('trail_blocks') assert trail_blocks >= 0 assert trail_blocks <= 100 # debug: no max gap if disable_sync in effect max_gap = None if Conf.get('disable_sync') else 100 steemd = SteemClient.instance() hive_head = Blocks.head_num() for block in steemd.stream_blocks(hive_head + 1, trail_blocks, max_gap): start_time = perf() query("START TRANSACTION") num = Blocks.process(block) follows = Follow.flush(trx=False) accts = Accounts.flush(trx=False, spread=8) CachedPost.dirty_paidouts(block['timestamp']) cnt = CachedPost.flush(trx=False) query("COMMIT") ms = (perf() - start_time) * 1000 log.info( "[LIVE] Got block %d at %s --% 4d txs,% 3d posts,% 3d edits," "% 3d payouts,% 3d votes,% 3d accts,% 3d follows --% 5dms%s", num, block['timestamp'], len(block['transactions']), cnt['insert'], cnt['update'], cnt['payout'], cnt['upvote'], accts, follows, int(ms), ' SLOW' if ms > 1000 else '') # once per hour, update accounts if num % 1200 == 0: Accounts.dirty_oldest(10000) Accounts.flush(trx=True) #Accounts.update_ranks() # once a minute, update chain props if num % 20 == 0: cls._update_chain_state(steemd)
def report(cls): """Emit a timing report for tracked services.""" if not cls._secs: return # nothing to report total = perf() - cls._start non_idle = total - cls._idle log.info("cumtime %ds (%.1f%% of %ds). %.1f%% idle. peak %dmb.", cls._secs, 100 * cls._secs / non_idle, non_idle, 100 * cls._idle / total, peak_usage_mb()) if cls._secs > 1: cls._db.report(cls._secs) cls._steemd.report(cls._secs)
class Stats: """Container for dpayd and db timing data.""" PRINT_THRESH_MINS = 5 _db = DbStats() _dpayd = DPayStats() _ms = 0.0 _idle = 0.0 _start = perf() @classmethod def log_db(cls, sql, secs): """Log a database query. Incoming SQL is normalized.""" ms = secs * 1000 cls._db.add(_normalize_sql(sql), ms) cls.add_ms(ms) @classmethod def log_dpay(cls, method, ms, batch_size=1): """Log a dpayd call.""" cls._dpayd.add(method, ms, batch_size) cls.add_ms(ms) @classmethod def log_idle(cls, ms): """Track idle time (e.g. sleeping until next block)""" cls._idle += ms @classmethod def add_ms(cls, ms): """Add to total ms elapsed; print if threshold reached.""" cls._ms += ms if cls._ms > cls.PRINT_THRESH_MINS * 60 * 1000: cls.report() cls._ms = 0 cls._idle = 0 cls._start = perf() @classmethod def report(cls): """Emit a timing report for tracked services.""" if not cls._ms: return # nothing to report local = cls._ms / 1000 idle = cls._idle / 1000 total = (perf() - cls._start) non_idle = total - idle log.info("cumtime %ds (%.1f%% of %ds). %.1f%% idle. peak %dmb.", local, 100 * local / non_idle, non_idle, 100 * idle / total, peak_usage_mb()) if local > 1: cls._db.report(cls._ms) cls._dpayd.report(cls._ms)
def flush(cls, trx=True): """Flushes pending follow count deltas.""" updated = 0 sqls = [] for col, deltas in cls._delta.items(): for delta, names in _flip_dict(deltas).items(): updated += len(names) sql = "UPDATE hive_accounts SET %s = %s + :mag WHERE id IN :ids" sqls.append((sql % (col, col), dict(mag=delta, ids=tuple(names)))) if not updated: return 0 start = perf() DB.batch_queries(sqls, trx=trx) if trx: log.info("[SYNC] flushed %d follow deltas in %ds", updated, perf() - start) cls._delta = {FOLLOWERS: {}, FOLLOWING: {}} return updated
def report(cls): """Emit a timing report for tracked services.""" if not cls._ms: return # nothing to report local = cls._ms / 1000 idle = cls._idle / 1000 total = (perf() - cls._start) non_idle = total - idle log.info("cumtime %ds (%.1f%% of %ds). %.1f%% idle. peak %dmb.", local, 100 * local / non_idle, non_idle, 100 * idle / total, peak_usage_mb()) cls._db.report(cls._ms) cls._steemd.report(cls._ms)
async def generate(self): """Re-generate payout stats temp table.""" if self._updated and perf() - self._updated < 60 * 60: return # only update if age > 1hr sql = """ SELECT community_id, author, SUM(payout) payout, COUNT(*) posts, NULL authors FROM hive_posts_cache WHERE is_paidout = '0' GROUP BY community_id, author UNION ALL SELECT community_id, NULL author, SUM(payout) payout, COUNT(*) posts, COUNT(DISTINCT(author)) authors FROM hive_posts_cache WHERE is_paidout = '0' GROUP BY community_id """ log.warning("Rebuilding payout_stats") await self._db.query(""" BEGIN; DROP TABLE IF EXISTS payout_stats; CREATE TABLE payout_stats AS %s; CREATE INDEX payout_stats_ix1 ON payout_stats (community_id, author, payout); COMMIT; """ % sql) self._updated = perf()
def _query(self, sql, is_prepared, **kwargs): """Send a query off to SQLAlchemy.""" if sql == 'START TRANSACTION': assert not self._trx_active self._trx_active = True elif sql == 'COMMIT': assert self._trx_active self._trx_active = False try: start = perf() query = self._sql_text(sql, is_prepared) if 'log_query' in kwargs and kwargs['log_query']: log.info("QUERY: {}".format(query)) result = self._basic_connection.execution_options(autocommit=False).execute(query, **kwargs) if 'log_result' in kwargs and kwargs['log_result']: log.info("RESULT: {}".format(result)) Stats.log_db(sql, perf() - start) return result except Exception as e: log.warning("[SQL-ERR] %s in query %s (%s)", e.__class__.__name__, sql, kwargs) raise e
async def finalize_jussi_response(request: HTTPRequest, response: HTTPResponse) -> None: # pylint: disable=bare-except try: response.headers['x-jussi-request-id'] = request.jussi_request_id response.headers['x-amzn-trace-id'] = request.amzn_trace_id response.headers['x-jussi-response-time'] = str(perf() - request.timings[0][0]) if request.is_single_jrpc: response.headers[ 'x-jussi-namespace'] = request.jsonrpc.urn.namespace response.headers['x-jussi-api'] = request.jsonrpc.urn.api response.headers['x-jussi-method'] = request.jsonrpc.urn.method response.headers['x-jussi-params'] = _repr( request.jsonrpc.urn.params) except BaseException as e: logger.warning('finalize_jussi error', e=e)
def walkeur(matrice, x, y): t = perf() matrice[x][y] = True if x == 0 and y == 0: r = count_in_matrice_angle(matrice, x, y, matrice.shape[0] * matrice.shape[1] - 1) elif x == 0: r = count_in_matrice_bord_x(matrice, x, y, matrice.shape[0] * matrice.shape[1] - 1) elif y == 0: r = count_in_matrice_bord_y(matrice, x, y, matrice.shape[0] * matrice.shape[1] - 1) else: r = count_in_matrice(matrice, x, y, matrice.shape[0] * matrice.shape[1] - 1) matrice[x][y] = False # print(x, y, r) # print(perf() - t) return r
async def _wrapper(*args, **kwargs): start = perf() result = await function(*args, **kwargs) Stats.log_db(args[1], perf() - start) return result
# configuration weight_recurrent[weight] += 1 if weight < min_weight: min_weight = weight min_array = np.copy(tas) # collapse weight if this_collapse_weight > nb_line * nb_column * 18: raise IndexError(f"{this_collapse_weight} est l'avalanche de trop") else: collapse_weight[this_collapse_weight] += 1 # loosed weight loosed_weight[this_loosed_weight] += 1 return nb_line, nb_column, sample, weight_recurrent, min_array, collapse_weight, loosed_weight def save_stats(nb_line, nb_column, sample): """Save the stats""" with open(f"Data\Markov\\markov_{nb_line}_{nb_column}_{sample}", 'wb') as file: pic = Pickler(file) pic.dump(stats_from_markov(nb_line, nb_column, sample)) if __name__ == '__main__': from time import perf_counter as perf t = perf() save_stats(70, 70, 10_000_000) print(perf() - t)
def _wrap(*args, **kwargs): time_start = perf() result = fn(*args, **kwargs) time_end = perf() Stats.log_db(args[1], (time_end - time_start) * 1000) return result