def get_data_core(height): global INDEX global RPCM blktime = list() blktx = list() txin = list() txout = list() if RPCM is not None: rpcm = RPCM else: rpcm = RPCManager(rpc_user, rpc_password) block_hash = rpcm.call('getblockhash', height) block = rpcm.call('getblock', block_hash, 2) blkid = INDEX.select('SELECT_BLKID', (block['hash'], )) blktime.append((blkid, block['time'])) for tx in block['tx']: txid = INDEX.select('SELECT_TXID', (tx['txid'], )) blktx.append((blkid, txid)) for n, vin in enumerate(tx['vin']): if 'coinbase' in vin: txin.append((txid, n, 0, 0)) continue ptxid = INDEX.select('SELECT_TXID', (vin['txid'], )) pn = vin['vout'] txin.append((txid, n, ptxid, pn)) for n, vout in enumerate(tx['vout']): for addr, btc in addr_btc_from_vout(tx['txid'], vout): addrid = INDEX.select('SELECT_ADDRID', (addr, )) txout.append((txid, n, addrid, btc)) RPCM = rpcm return blktime, blktx, txin, txout
def __init__(self, dht_addr): self.my_addr = dht_addr self.my_id = identifier.RandomId() self.my_node = Node(self.my_addr, self.my_id) self.tracker = tracker.Tracker() self.token_m = token_manager.TokenManager() self.reactor = ThreadedReactor() self.rpc_m = RPCManager(self.reactor, self.my_addr[1]) self.querier = Querier(self.rpc_m, self.my_id) self.routing_m = RoutingManager(self.my_node, self.querier, bootstrap_nodes) self.responder = Responder(self.my_id, self.routing_m, self.tracker, self.token_m) self.responder.set_on_query_received_callback( self.routing_m.on_query_received) self.querier.set_on_response_received_callback( self.routing_m.on_response_received) self.querier.set_on_error_received_callback( self.routing_m.on_error_received) self.querier.set_on_timeout_callback(self.routing_m.on_timeout) self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found) self.routing_m.do_bootstrap() self.rpc_m.add_msg_callback(QUERY, self.responder.on_query_received) self.lookup_m = LookupManager(self.my_id, self.querier, self.routing_m)
def __init__(self, dht_addr): my_addr = dht_addr my_id = identifier.RandomId() my_node = Node(my_addr, my_id) tracker_ = tracker.Tracker() token_m = token_manager.TokenManager() self.reactor = ThreadedReactor() rpc_m = RPCManager(self.reactor, my_addr[1]) querier_ = Querier(rpc_m, my_id) routing_m = RoutingManager(my_node, querier_, bootstrap_nodes) responder_ = Responder(my_id, routing_m, tracker_, token_m) responder_.set_on_query_received_callback( routing_m.on_query_received) querier_.set_on_response_received_callback( routing_m.on_response_received) querier_.set_on_error_received_callback( routing_m.on_error_received) querier_.set_on_timeout_callback(routing_m.on_timeout) querier_.set_on_nodes_found_callback(routing_m.on_nodes_found) routing_m.do_bootstrap() rpc_m.add_msg_callback(QUERY, responder_.on_query_received) self.lookup_m = LookupManager(my_id, querier_, routing_m) self._routing_m = routing_m
def __init__(self, dht_addr): my_addr = dht_addr my_id = identifier.RandomId() my_node = Node(my_addr, my_id) tracker_ = tracker.Tracker() token_m = token_manager.TokenManager() self.reactor = ThreadedReactor() rpc_m = RPCManager(self.reactor, my_addr[1]) querier_ = Querier(rpc_m, my_id) routing_m = RoutingManager(my_node, querier_, bootstrap_nodes) responder_ = Responder(my_id, routing_m, tracker_, token_m) responder_.set_on_query_received_callback(routing_m.on_query_received) querier_.set_on_response_received_callback( routing_m.on_response_received) querier_.set_on_error_received_callback(routing_m.on_error_received) querier_.set_on_timeout_callback(routing_m.on_timeout) querier_.set_on_nodes_found_callback(routing_m.on_nodes_found) routing_m.do_bootstrap() rpc_m.add_msg_callback(QUERY, responder_.on_query_received) self.lookup_m = LookupManager(my_id, querier_, routing_m) self._routing_m = routing_m
def main(): if DEBUG: print(f'Parsed arguments {FLAGS}') print(f'Unparsed arguments {_}') rpcm = RPCManager(rpc_user, rpc_password) blk_file = open(FLAGS.blk, 'w') tx_file = open(FLAGS.tx, 'w') addr_file = open(FLAGS.addr, 'w') blk_writer = csv.writer(blk_file, lineterminator=os.linesep) tx_writer = csv.writer(tx_file, lineterminator=os.linesep) addr_writer = csv.writer(addr_file, lineterminator=os.linesep) blk_writer.writerow(('height', 'blkhash')) tx_writer.writerow(('txid',)) addr_writer.writerow(('addr',)) term = FLAGS.term start_height = 0 best_block_hash = rpcm.call('getbestblockhash') best_block = rpcm.call('getblock', best_block_hash) end_height = best_block['height'] - FLAGS.untrusted if DEBUG: print((f'Best Block Heights: {best_block["height"]}, ' f'Time: {get_time(best_block["time"]).isoformat()}')) print(f'Start from {start_height} to {end_height}') pool_num = min(multiprocessing.cpu_count()//2, 4) stime = time.time() try: for sheight, eheight in zip(range(start_height, end_height, term), range(start_height+term, end_height+term, term)): if eheight >= end_height: eheight = end_height+1 with multiprocessing.Pool(pool_num) as p: try: results = p.imap(get_data, range(sheight, eheight)) for blks, txes, addrs in results: blk_writer.writerows(blks) tx_writer.writerows(txes) addr_writer.writerows(addrs) except KeyboardInterrupt: print(f'KeyboardInterrupt detected. Terminate child processes.') p.terminate() p.join(10) raise KeyboardInterrupt if DEBUG: print(f'Job done from {sheight} to {eheight-1} during {time.time()-stime}') except KeyboardInterrupt: print(f'Closing...') finally: blk_file.close() tx_file.close() addr_file.close() if DEBUG: print(f'All job completed {start_height} to {end_height} during {time.time()-stime}')
class Controller: def __init__(self, dht_addr): self.my_addr = dht_addr self.my_id = identifier.RandomId() self.my_node = Node(self.my_addr, self.my_id) self.tracker = tracker.Tracker() self.token_m = token_manager.TokenManager() self.reactor = ThreadedReactor() self.rpc_m = RPCManager(self.reactor, self.my_addr[1]) self.querier = Querier(self.rpc_m, self.my_id) self.routing_m = RoutingManager(self.my_node, self.querier, bootstrap_nodes) self.responder = Responder(self.my_id, self.routing_m, self.tracker, self.token_m) self.responder.set_on_query_received_callback( self.routing_m.on_query_received) self.querier.set_on_response_received_callback( self.routing_m.on_response_received) self.querier.set_on_error_received_callback( self.routing_m.on_error_received) self.querier.set_on_timeout_callback(self.routing_m.on_timeout) self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found) self.routing_m.do_bootstrap() self.rpc_m.add_msg_callback(QUERY, self.responder.on_query_received) self.lookup_m = LookupManager(self.my_id, self.querier, self.routing_m) def start(self): self.reactor.start() def stop(self): #TODO2: stop each manager self.reactor.stop() def get_peers(self, info_hash, callback_f, bt_port=None): return self.lookup_m.get_peers(info_hash, callback_f, bt_port)
def get_data_index(height): global RPCM blks = list() txes = list() addrs = list() if RPCM is not None: rpcm = RPCM else: rpcm = RPCManager(rpc_user, rpc_password) block_hash = rpcm.call('getblockhash', height) block = rpcm.call('getblock', block_hash, 2) blks.append((height, block_hash)) for tx in block['tx']: txes.append((tx['txid'], )) for n, vout in enumerate(tx['vout']): for addr, btc in addr_btc_from_vout(tx['txid'], vout): addrs.append((addr, )) RPCM = rpcm return blks, txes, addrs
def get_data(height): pid = os.getpid() blks = list() txes = list() addrs = list() rpcm = RPCM.get(pid, RPCManager(rpc_user, rpc_password)) block_hash = rpcm.call('getblockhash', height) block = rpcm.call('getblock', block_hash, 2) blks.append((height, block_hash)) for tx in block['tx']: txes.append((tx['txid'],)) for addr in vout_addrs_from_tx(tx): addrs.append((addr,)) RPCM[pid] = rpcm return blks, txes, addrs
def setup_rpc(server_name): print(f"Starting rpc for {cluster_info_by_name[server_name]}") rpc_mgr = RPCManager(address=cluster_info_by_name[server_name], authkey=b"peekaboo") return rpc_mgr
def main(): if DEBUG: print(f'Parsed arguments {FLAGS}') print(f'Unparsed arguments {_}') dbb = DBBuilder(FLAGS.type, FLAGS.output) dbb.cur.execute(f'''PRAGMA journal_mode = OFF;''') dbb.cur.execute(f'''PRAGMA synchronous = OFF;''') dbb.cur.execute(f'''PRAGMA cache_size = {FLAGS.cachesize};''') if not RESUME: dbb.cur.execute(f'''PRAGMA page_size = {FLAGS.pagesize};''') dbb.cur.execute(f'''VACUUM;''') dbb.conn.commit() rpcm = RPCManager(rpc_user, rpc_password) if FLAGS.type == 'index': term = FLAGS.term start_height = dbb.select('SELECT_MAX_BLKID') if start_height is None: start_height = 0 else: start_height = int(start_height) best_block_hash = rpcm.call('getbestblockhash') best_block = rpcm.call('getblock', best_block_hash) end_height = best_block['height'] - FLAGS.untrusted if DEBUG: print((f'Best Block Heights: {best_block["height"]}, ' f'Time: {get_time(best_block["time"]).isoformat()}')) dbb.begin() dbb.insertmany('INSERT_ADDRTYPEID', ['P2PKH', 'P2SH', 'Bech32']) dbb.commit() print(f'Start from {start_height} to {end_height}') pool_num = FLAGS.process stime = time.time() mtime = time.time() for sheight, eheight in zip( range(start_height, end_height, term), range(start_height + term, end_height + term, term)): if eheight >= end_height: eheight = end_height + 1 dbb.begin() with multiprocessing.Pool(pool_num) as p: try: results = p.imap(get_data_index, range(sheight, eheight)) for blks, txes, addrs in results: dbb.insertmany('INSERT_BLKID', blks) dbb.insertmany('INSERT_TXID', txes) dbb.insertmany('INSERT_ADDRID', addrs) except KeyboardInterrupt: print( f'KeyboardInterrupt detected. Terminate child processes.' ) p.terminate() p.join(60) raise KeyboardInterrupt dbb.commit() if DEBUG: print( f'Job done from {sheight} to {eheight-1} during {time.time()-stime}' ) if DEBUG: print( f'All job completed {start_height} to {end_height} during {time.time()-stime}' ) elif FLAGS.type == 'core': if FLAGS.index is None: raise Exception('Need index database path. (--index)') global INDEX INDEX = DBReader(FLAGS.index) term = FLAGS.term start_height = dbb.getmeta('ProcessedBlockHeight') if start_height is None: start_height = 0 else: start_height = int(start_height) end_height = INDEX.select('SELECT_MAX_BLKID') print(f'Start from {start_height} to {end_height}') pool_num = FLAGS.process stime = time.time() mtime = time.time() for sheight, eheight in zip( range(start_height, end_height, term), range(start_height + term, end_height + term, term)): if eheight >= end_height: eheight = end_height + 1 dbb.begin() with multiprocessing.Pool(pool_num) as p: try: results = p.imap(get_data_core, range(sheight, eheight)) for blktime, blktx, txin, txout in results: dbb.insertmany('INSERT_BLKTIME', blktime) dbb.insertmany('INSERT_BLKTX', blktx) dbb.insertmany('INSERT_TXIN', txin) dbb.insertmany('INSERT_TXOUT', txout) except KeyboardInterrupt: print( f'KeyboardInterrupt detected. Terminate child processes.' ) p.terminate() p.join(60) raise KeyboardInterrupt dbb.putmeta('ProcessedBlockHeight', eheight - 1) dbb.commit() if DEBUG: print( f'Job done from {sheight} to {eheight-1} during {time.time()-stime}' ) if not RESUME: dbb.cur.execute( f'''CREATE INDEX idx_BlkTime_2 ON BlkTime(unixtime);''') dbb.cur.execute(f'''CREATE INDEX idx_BlkTx_2 ON BlkTx(tx);''') dbb.cur.execute(f'''CREATE INDEX idx_TxIn_3_4 ON TxIn(ptx, pn);''') dbb.cur.execute(f'''CREATE INDEX idx_TxOut_3 ON TxOut(addr);''') dbb.conn.commit() if DEBUG: print( f'All job completed {start_height} to {end_height} during {time.time()-stime}' ) INDEX.close() dbb.cur.execute(f'''PRAGMA cache_size = -2000;''') if not RESUME: dbb.cur.execute(f'''PRAGMA page_size = 4096;''') dbb.cur.execute(f'''VACUUM;''') dbb.conn.commit() dbb.cur.execute(f'''PRAGMA journal_mode = WAL;''') dbb.cur.execute(f'''PRAGMA synchronous = NORMAL;''') dbb.conn.commit() dbb.close()