def new_block(self, block, verify=False): from __main__ import G tip = None for name, lx in self.blocks.iteritems(): if block.prev_block == lx.block_name: tip = lx break if tip is None: height = block.get_height() if height > self.highest: # I think this happens when the hoover delivers blocks out of order. # we know the previous block is in the database... self.new_block(G.block_db[block.prev_block], verify) self.new_block(block, verify) elif height <= (self.highest - self.horizon): G.log('recent', 'stale', height, str(block.name), str(block.prev_block)) else: G.log('recent', 'nochain', height, str(block.name), str(block.prev_block)) else: t0 = timer() self.blocks[block.name] = tip.extend(block, tip.height + 1, verify) W('[tip.extend %0.2f]' % (t0.end(), )) if len(self.blocks) > 2: # otherwise we are in 'catch up' mode. self.trim()
def load(self): from __main__ import G save_path = os.path.join(G.args.base, self.save_path) try: self.cache = pickle.load(open(save_path, 'rb')) G.log('address-cache', 'load', len(self.cache)) except IOError: self.seed()
def load (self): from __main__ import G save_path = os.path.join (G.args.base, self.save_path) try: self.cache = pickle.load (open (save_path, 'rb')) G.log ('address-cache', 'load', len(self.cache)) except IOError: self.seed()
def go(args, global_state): global G G = global_state G.args = args G.logger = ASN1_Logger(open(os.path.join(G.args.base, 'log.asn1'), 'ab')) G.log = G.logger.log # needed for the sub-imports below... import coro coro.set_exception_notifier(exception_notifier) G.log('starting caesure') G.addr_cache = AddressCache() G.block_db = block_db.BlockDB(read_only=False) G.hoover = BlockHoover() G.txn_pool = TransactionPool() G.recent_blocks = ledger.catch_up(G) G.verbose = args.verbose G.connection_map = {} # install a real resolver coro.dns.cache.install() if args.monitor: import coro.backdoor coro.spawn(coro.backdoor.serve, unix_path='/tmp/caesure.bd') users = {} if args.user: for user in args.user: u, p = user.split(':') users[u] = p if args.webui: import coro.http import caesure.webadmin import zlib G.http_server = h = coro.http.server() G.webadmin_handler = caesure.webadmin.handler(G) if users: h.push_handler( coro.http.handlers.auth_handler(users, G.webadmin_handler)) coro.spawn(h.start, (('', 8380))) else: h.push_handler(G.webadmin_handler) coro.spawn(h.start, (('127.0.0.1', 8380))) h.push_handler(coro.http.handlers.coro_status_handler()) h.push_handler( coro.http.handlers.favicon_handler( zlib.compress(caesure.webadmin.favicon))) G.in_conn_sem = coro.semaphore(args.incoming) G.out_conn_sem = coro.semaphore(args.outgoing) if args.relay: Connection.relay = True if args.serve: for addr in args.serve: coro.spawn(serve, addr) if args.connect: for addr in args.connect: coro.spawn(connect, addr) coro.spawn(G.addr_cache.purge_thread) coro.spawn(new_block_thread) coro.spawn(new_connection_thread) coro.spawn(G.recent_blocks.save_ledger_thread)
def load(self): save_path = os.path.join(G.args.base, self.save_path) try: self.cache = pickle.load(open(save_path, 'rb')) G.log('address-cache', 'load', len(self.cache)) except IOError: pass if not self.cache: self.seed()
def new_block_thread(self): while 1: b = G.block_db.new_block_cv.wait() in_pool = 0 total = len(self.pool) for tx in b.transactions: try: del self.pool[tx.name] in_pool += 1 except KeyError: pass G.log('pool', 'removed', in_pool, total)
def new_block_thread (self): while 1: b = G.block_db.new_block_cv.wait() in_pool = 0 total = len(self.pool) for tx in b.transactions: try: del self.pool[tx.name] in_pool += 1 except KeyError: pass G.log ('pool', 'removed', in_pool, total)
def seed (self): # called only when we don't have a cached peer set. G.log ('dns', 'seeding...') timestamp = coro.tsc_time.now_raw_posix_sec() r = coro.get_resolver() addrs = set() for seed in dns_seeds: try: for (t, ip) in r.cache.query (seed, 'A'): self.add (timestamp, (1, (ip, 8333))) for (t, ip) in r.cache.query (seed, 'AAAA'): self.add (timestamp, (1, (ip, 8333))) except coro.dns.exceptions.DNS_Soft_Error: pass
def seed(self): # called only when we don't have a cached peer set. G.log('dns', 'seeding...') timestamp = coro.tsc_time.now_raw_posix_sec() r = coro.get_resolver() addrs = set() for seed in dns_seeds: try: for (t, ip) in r.cache.query(seed, 'A'): self.add(timestamp, (1, (ip, 8333))) for (t, ip) in r.cache.query(seed, 'AAAA'): self.add(timestamp, (1, (ip, 8333))) except coro.dns.exceptions.DNS_Soft_Error: pass
def serve(addr): addr0 = parse_addr_arg(addr) if ':' in addr0[0]: ipv6_server_addrs.append(addr0) s = coro.tcp6_sock() else: ipv4_server_addrs.append(addr0) s = coro.tcp_sock() s.bind(addr0) s.listen(100) W('starting server on %r\n' % (addr0, )) G.log('server', 'start', addr0) while 1: conn, addr1 = s.accept() G.in_conn_sem.acquire(1) Connection(addr0, addr1, sock=conn)
def new_block_thread(): while 1: block = G.block_db.new_block_cv.wait() name = block.name G.log('block', str(block.name)) G.recent_blocks.new_block(block) if not G.hoover.running: nsent = 0 for c in G.connection_map.values(): if c.packet_count: try: c.send_invs([(OBJ_BLOCK, name)]) nsent += 1 except OSError: # let the gen_packets loop deal with this. pass
def new_block_thread(): while 1: block = G.block_db.new_block_cv.wait() name = block.name G.log ('block', str(block.name)) G.recent_blocks.new_block (block) if not G.hoover.running: nsent = 0 for c in G.connection_map.values(): if c.packet_count: try: c.send_invs ([(OBJ_BLOCK, name)]) nsent += 1 except OSError: # let the gen_packets loop deal with this. pass
def serve (addr): addr0 = parse_addr_arg (addr) if ':' in addr0[0]: ipv6_server_addrs.append (addr0) s = coro.tcp6_sock() else: ipv4_server_addrs.append (addr0) s = coro.tcp_sock() s.bind (addr0) s.listen (100) W ('starting server on %r\n' % (addr0,)) G.log ('server', 'start', addr0) while 1: conn, addr1 = s.accept() G.in_conn_sem.acquire (1) Connection (addr0, addr1, sock=conn)
def trim(self): # this is more complex than I would like, but it solves a difficult problem: # we need to trim the set of recent blocks back to our horizon, *except* in # the case where the most recent common ancestor is *outside* the horizon. from __main__ import G db = G.block_db # get them sorted by height blocks = [(lx.height, lx) for lx in self.blocks.values()] blocks.sort() self.highest = blocks[-1][0] # --- identify leaves within our horizon --- # note: we can't use db.next[name] to identify leaves because # the db is often past our ledger on startup, and leaves in # self.blocks can have children in the db. cutoff = self.highest - self.horizon names = set(self.blocks.keys()) prevs = set([db.prev[lx.block_name] for lx in self.blocks.values()]) leaves = names.difference(prevs) leaves = [self.blocks[name] for name in leaves] # only those leaves within our horizon... leaves = [(lx.height, lx.block_name) for lx in leaves if lx.height >= cutoff] leaves.sort() lca = self.find_lowest_common_ancestor(leaves, db) lca = self.blocks[lca] if lca.height < cutoff: # if the lca is behind the horizon, we must keep it. cutoff = lca.height self.root = lca G.log('lca cutoff', str(lca.block_name)) else: # lca is inside the horizon: crawl back til we hit the cutoff. root = lca while root.height > cutoff: prev = db.prev[root.block_name] if self.blocks.has_key(prev): root = self.blocks[prev] else: # we are building the ledger and don't have horizon nodes yet. break self.root = root # perform the trim, identify root and leaves. for h, lx in blocks: if h < cutoff: del self.blocks[lx.block_name] self.leaves = set(self.blocks[x[1]] for x in leaves)
def trim (self): # this is more complex than I would like, but it solves a difficult problem: # we need to trim the set of recent blocks back to our horizon, *except* in # the case where the most recent common ancestor is *outside* the horizon. from __main__ import G db = G.block_db # get them sorted by height blocks = [(lx.height, lx) for lx in self.blocks.values()] blocks.sort() self.highest = blocks[-1][0] # --- identify leaves within our horizon --- # note: we can't use db.next[name] to identify leaves because # the db is often past our ledger on startup, and leaves in # self.blocks can have children in the db. cutoff = self.highest - self.horizon names = set (self.blocks.keys()) prevs = set ([db.prev[lx.block_name] for lx in self.blocks.values()]) leaves = names.difference (prevs) leaves = [self.blocks[name] for name in leaves] # only those leaves within our horizon... leaves = [(lx.height, lx.block_name) for lx in leaves if lx.height >= cutoff] leaves.sort() lca = self.find_lowest_common_ancestor (leaves, db) lca = self.blocks[lca] if lca.height < cutoff: # if the lca is behind the horizon, we must keep it. cutoff = lca.height self.root = lca G.log ('lca cutoff', str(lca.block_name)) else: # lca is inside the horizon: crawl back til we hit the cutoff. root = lca while root.height > cutoff: prev = db.prev[root.block_name] if self.blocks.has_key (prev): root = self.blocks[prev] else: # we are building the ledger and don't have horizon nodes yet. break self.root = root # perform the trim, identify root and leaves. for h, lx in blocks: if h < cutoff: del self.blocks[lx.block_name] self.leaves = set (self.blocks[x[1]] for x in leaves)
def resetState(): teleport = Find(statemap[G.Tut_Jack]) ks = G.keys() for k in ks: if not G_tut.has_key(k): G[k] = 0 else: G[k] = G_tut[k] teleport.Trigger() player = FindPlayer() player.Bloodgain(10) player.ClearActiveDisciplines()
def new_block (self, block, verify=False): from __main__ import G tip = None for name, lx in self.blocks.iteritems(): if block.prev_block == lx.block_name: tip = lx break if tip is None: height = block.get_height() if height > self.highest: # I think this happens when the hoover delivers blocks out of order. # we know the previous block is in the database... self.new_block (G.block_db[block.prev_block], verify) self.new_block (block, verify) elif height < self.highest - self.horizon: G.log ('recent', 'stale', height, str(block.name), str(block.prev_block)) else: G.log ('recent', 'nochain', height, str(block.name), str(block.prev_block)) import pdb; pdb.set_trace() else: self.blocks[block.name] = tip.extend (block, tip.height + 1, verify) if len(self.blocks) > 2: # otherwise we are in 'catch up' mode. self.trim()
def new_block (self, block, verify=False): from __main__ import G tip = None for name, lx in self.blocks.iteritems(): if block.prev_block == lx.block_name: tip = lx break if tip is None: # XXX I think we are getting duplicate blocks fed here, # because they are (almost) always behind the horizon. height = block.get_height() if self.highest - 20 <= height <= self.highest: # does this fall within our range? if G.block_db.has_key (block.prev_block): self.new_block (G.block_db[block.prev_block], verify) self.new_block (block, verify) else: G.log ('recent', 'nochain', height, str(block.name), block.prev_block) else: G.log ('recent', 'out of range', height, str(block.name), block.prev_block) else: self.blocks[block.name] = tip.extend (block, tip.height + 1, verify) self.remove_old_blocks() self.find_tips()
def add(self, tx): W('TransactionPool.add() called and ignored\n') return if tx.name not in self.pool: try: i = 0 for outpoint, oscript, sequence in tx.inputs: amt, redeem = G.txmap[outpoint] tx.verify0(i, redeem) i += 1 self.pool[tx.name] = tx except script.ScriptFailure: G.log('pool', 'script failure', str(tx.name)) except KeyError: G.log('pool', 'missing inputs', str(tx.name)) self.missing[tx.name] = tx else: G.log('pool', 'already', str(tx.name))
def add (self, tx): W ('TransactionPool.add() called and ignored\n') return if tx.name not in self.pool: try: i = 0 for outpoint, oscript, sequence in tx.inputs: amt, redeem = G.txmap[outpoint] tx.verify0 (i, redeem) i += 1 self.pool[tx.name] = tx except script.ScriptFailure: G.log ('pool', 'script failure', str(tx.name)) except KeyError: G.log ('pool', 'missing inputs', str(tx.name)) self.missing[tx.name] = tx else: G.log ('pool', 'already', str(tx.name))
def exception_notifier(): me = coro.current() traceback = coro.compact_traceback() G.log ('exception', me.id, me.name, traceback) WY ('exception: %r %r %r\n' % (me.id, me.name, traceback))
def saveState(): ks = G.keys() for k in ks: G_tut[k] = G[k]
def exception_notifier(): me = coro.current() traceback = coro.compact_traceback() G.log('exception', me.id, me.name, traceback) WY('exception: %r %r %r\n' % (me.id, me.name, traceback))
def go (args, global_state): global G G = global_state G.args = args G.logger = ASN1_Logger ( open (os.path.join (G.args.base, 'log.asn1'), 'ab') ) G.log = G.logger.log # needed for the sub-imports below... import coro coro.set_exception_notifier (exception_notifier) G.log ('starting caesure') G.addr_cache = AddressCache() G.block_db = block_db.BlockDB (read_only=False) G.hoover = BlockHoover() G.txn_pool = TransactionPool() G.recent_blocks = ledger.catch_up (G) G.verbose = args.verbose G.connection_map = {} # install a real resolver coro.dns.cache.install() if args.monitor: import coro.backdoor coro.spawn (coro.backdoor.serve, unix_path='/tmp/caesure.bd') users = {} if args.user: for user in args.user: u, p = user.split (':') users[u] = p if args.webui: import coro.http import caesure.webadmin import zlib G.http_server = h = coro.http.server() G.webadmin_handler = caesure.webadmin.handler (G) if users: h.push_handler (coro.http.handlers.auth_handler (users, G.webadmin_handler)) coro.spawn (h.start, (('', 8380))) else: h.push_handler (G.webadmin_handler) coro.spawn (h.start, (('127.0.0.1', 8380))) h.push_handler (coro.http.handlers.coro_status_handler()) h.push_handler ( coro.http.handlers.favicon_handler ( zlib.compress (caesure.webadmin.favicon) ) ) G.in_conn_sem = coro.semaphore (args.incoming) G.out_conn_sem = coro.semaphore (args.outgoing) if args.relay: Connection.relay = True if args.serve: for addr in args.serve: coro.spawn (serve, addr) if args.connect: for addr in args.connect: coro.spawn (connect, addr) coro.spawn (G.addr_cache.purge_thread) coro.spawn (new_block_thread) coro.spawn (new_connection_thread) coro.spawn (G.recent_blocks.save_ledger_thread)
def exception_notifier(): me = coro.current() G.log ('exception', me.id, me.name, coro.compact_traceback())