def add_key(self, target, value, height): assert len(target) == KEYLENGTH path = self.get_path(target, new=True) if path is True: return #print_log("add key: target", target.encode('hex'), "path", map(lambda x: x.encode('hex'), path)) parent = path[-1] parent_node = self.get_node(parent) n = len(parent) c = target[n] if parent_node.has(c): h, v = parent_node.get(c) skip = self.get_skip(parent + c) child = parent + c + skip assert not target.startswith(child) prefix = self.common_prefix(child, target) index = len(prefix) if len(child) == KEYLENGTH: # if it's a leaf, get hash and value of new_key from parent d = Node.from_dict({ target[index]: (None, 0), child[index]: (h, v) }) else: # if it is not a leaf, update its hash because skip_string changed child_node = self.get_node(child) h, v = child_node.get_hash(child, prefix) d = Node.from_dict({ target[index]: (None, 0), child[index]: (h, v) }) self.set_skip(prefix + target[index], target[index + 1:]) self.set_skip(prefix + child[index], child[index + 1:]) self.put_node(prefix, d) path.append(prefix) self.parents[child] = prefix # update parent skip new_skip = prefix[n + 1:] self.set_skip(parent + c, new_skip) parent_node.set(c, None, 0) self.put_node(parent, parent_node) else: # add new letter to parent skip = target[n + 1:] self.set_skip(parent + c, skip) parent_node.set(c, None, 0) self.put_node(parent, parent_node) # write the new leaf s = (int_to_hex(value, 8) + int_to_hex(height, 4)).decode('hex') self.db_utxo.put(target, s) # the hash of a leaf is the txid _hash = target[20:52] self.update_node_hash(target, path, _hash, value)
def import_abandon(self, txid, nout): logger.info("importing abandon txid:{}, nout:{} ".format(txid, nout)) """ handle abandoned claims """ claim_id = self.get_claim_id_from_outpoint(txid, nout) claim_name = self.get_claim_name(claim_id) undo_info = self._get_undo_info('abandon', claim_id, claim_name, txid, nout) self.db_outpoint_to_claim.delete(txid + int_to_hex(nout, 4)) self.db_claim_outpoint.delete(claim_id) self.db_claim_values.delete(claim_id) self.db_claim_height.delete(claim_id) self.db_claim_addrs.delete(claim_id) self.db_claim_names.delete(claim_id) claims_in_db = self.db_claim_order.get(claim_name) claims_for_name = {} if not claims_in_db else msgpack.unpackb( claims_in_db) claim_n = claims_for_name[claim_id] del claims_for_name[claim_id] for cid, cn in claims_for_name.iteritems(): if cn > claim_n: claims_for_name[cid] = cn - 1 self.db_claim_order.delete(claim_name) self.db_claim_order.put(claim_name, msgpack.packb(claims_for_name)) undo_info = self.import_signed_abandon(claim_id, undo_info) return undo_info
def set_spent(self, addr, txi, txid, index, height, undo): key = self.address_to_key(addr) leaf = key + txi s = self.delete_key(leaf) value = hex_to_int(s[0:8]) in_height = hex_to_int(s[8:12]) undo[leaf] = value, in_height # delete backlink txi-> addr self.db_addr.delete(txi) # add to history s = self.db_hist.get(addr) if s is None: s = '' txo = (txid + int_to_hex(index, 4) + int_to_hex(height, 4)).decode('hex') s += txi + int_to_hex(in_height, 4).decode('hex') + txo self.db_hist.put(addr, s)
def revert_add_to_history(self, addr, tx_hash, tx_pos, value, tx_height): key = self.address_to_key(addr) txo = (tx_hash + int_to_hex(tx_pos, 4)).decode('hex') # delete self.delete_key(key + txo) # backlink self.db_addr.delete(txo)
def set_spent(self, addr, txi, txid, index, height, undo): key = self.address_to_key(addr) leaf = key + txi s = self.delete_key(leaf) value = hex_to_int(s[0:8]) in_height = hex_to_int(s[8:12]) undo[leaf] = value, in_height # delete backlink txi-> addr self.db_addr.delete(txi) # add to history s = self.db_hist.get(addr) if s is None: s = '' txo = (txid + int_to_hex(index, 4) + int_to_hex(height, 4)).decode('hex') s += txi + int_to_hex(in_height, 4).decode('hex') + txo s = s[-80 * self.pruning_limit:] self.db_hist.put(addr, s)
def add_to_history(self, addr, tx_hash, tx_pos, value, tx_height): key = self.address_to_key(addr) txo = (tx_hash + int_to_hex(tx_pos, 4)).decode('hex') # write the new history self.add_key(key + txo, value, tx_height) # backlink self.db_addr.put(txo, addr)
def set(self, c, h, value): if h is None: h = chr(0) * 32 vv = int_to_hex(value, 8).decode('hex') item = h + vv assert len(item) == 40 if self.has(c): self.remove(c) x = self.indexof(c) self.s = self.s[0:x] + item + self.s[x:] self.k |= (1 << ord(c)) assert self.k != 0
def _get_undo_info(self, claim_type, claim_id, claim_name, txid, nout): undo_info = { "claim_id": claim_id, "claim_type": claim_type, "claim_name": claim_name } if claim_type != 'claim': undo_info['claim_outpoint'] = self.db_claim_outpoint.get(claim_id) undo_info['claim_names'] = claim_name undo_info['claim_values'] = self.db_claim_values.get(claim_id) undo_info['claim_height'] = self.db_claim_height.get(claim_id) undo_info['claim_addrs'] = self.db_claim_addrs.get(claim_id) undo_info['outpoint_to_claim'] = txid + int_to_hex(nout, 4) undo_info['claim_order'] = self.db_claim_order.get(claim_name) return undo_info
def from_dict(klass, d): k = 0 s = '' for i in xrange(256): if chr(i) in d: k += 1 << i h, value = d[chr(i)] if h is None: h = chr(0) * 32 vv = int_to_hex(value, 8).decode('hex') item = h + vv assert len(item) == 40 s += item k = "0x%0.64X" % k # 32 bytes k = k[2:].decode('hex') assert len(k) == 32 out = k + s return Node(out)
def revert_transaction(self, txid, tx, block_height, touched_addr, undo): # print_log("revert tx", txid) for x in reversed(tx.get('outputs')): addr = x.get('address') if addr is None: continue self.revert_add_to_history(addr, txid, x.get('index'), x.get('value'), block_height) touched_addr.add(addr) prev_addr = undo.pop('prev_addr') for i, x in reversed(list(enumerate(tx.get('inputs')))): addr = prev_addr[i] if addr is not None: txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex') self.revert_set_spent(addr, txi, undo) touched_addr.add(addr) assert undo == {}
def import_transaction(self, txid, tx, block_height, touched_addr): undo = { 'prev_addr': []} # contains the list of pruned items for each address in the tx; also, 'prev_addr' is a list of prev addresses prev_addr = [] for i, x in enumerate(tx.get('inputs')): txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex') addr = self.get_address(txi) if addr is not None: self.set_spent(addr, txi, txid, i, block_height, undo) touched_addr.add(addr) prev_addr.append(addr) undo['prev_addr'] = prev_addr # here I add only the outputs to history; maybe I want to add inputs too (that's in the other loop) for x in tx.get('outputs'): addr = x.get('address') if addr is None: continue self.add_to_history(addr, txid, x.get('index'), x.get('value'), block_height) touched_addr.add(addr) return undo
def import_update(self, claim, claim_id, claim_address, txid, nout, amount, block_height): logger.info( "importing update {}, claim id:{}, txid:{}, nout:{} ".format( claim.name, claim_id, txid, nout)) undo_info = self._get_undo_info('update', claim_id, claim.name, txid, nout) txid_orig_claim, nout_orig_claim, amount = self.get_outpoint_from_claim_id( claim_id) self.db_outpoint_to_claim.delete(txid_orig_claim + int_to_hex(nout_orig_claim, 4)) self.write_claim_id_from_outpoint(txid, nout, claim_id) self.write_outpoint_from_claim_id(claim_id, txid, nout, amount) self.db_claim_values.put(claim_id, claim.value) self.db_claim_height.put(claim_id, str(block_height)) self.db_claim_addrs.put(claim_id, claim_address) undo_info = self.import_signed_claim_transaction( claim, claim_id, undo_info) return undo_info
def get_claim_id_from_outpoint(self, txid, nout): #TODO: may want to look into keeping a db of txid nout to outpoint # if too slow here outpoint = txid + int_to_hex(nout, 4) return self.db_outpoint_to_claim.get(outpoint)
def memorypool_update(self): t0 = time.time() mempool_hashes = set(self.lbrycrdd('getrawmempool')) touched_addresses = set() # get new transactions new_tx = {} for tx_hash in mempool_hashes: if tx_hash in self.mempool_hashes: continue tx = self.get_mempool_transaction(tx_hash) if not tx: continue new_tx[tx_hash] = tx # remove older entries from mempool_hashes self.mempool_hashes = mempool_hashes # check all tx outputs for tx_hash, tx in new_tx.iteritems(): mpa = self.mempool_addresses.get(tx_hash, {}) out_values = [] for x in tx.get('outputs'): addr = x.get('address', '') out_values.append((addr, x['value'])) if not addr: continue v = mpa.get(addr, 0) v += x['value'] mpa[addr] = v touched_addresses.add(addr) self.mempool_addresses[tx_hash] = mpa self.mempool_values[tx_hash] = out_values # check all inputs for tx_hash, tx in new_tx.iteritems(): mpa = self.mempool_addresses.get(tx_hash, {}) for x in tx.get('inputs'): mpv = self.mempool_values.get(x.get('prevout_hash')) if mpv: addr, value = mpv[x.get('prevout_n')] else: txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex') try: addr = self.storage.get_address(txi) value = self.storage.get_utxo_value(addr, txi) except: print_log("utxo not in database; postponing mempool update") return if not addr: continue v = mpa.get(addr, 0) v -= value mpa[addr] = v touched_addresses.add(addr) self.mempool_addresses[tx_hash] = mpa # remove deprecated entries from mempool_addresses for tx_hash, addresses in self.mempool_addresses.items(): if tx_hash not in self.mempool_hashes: self.mempool_addresses.pop(tx_hash) self.mempool_values.pop(tx_hash) touched_addresses.update(addresses) # remove deprecated entries from mempool_hist new_mempool_hist = {} for addr in self.mempool_hist.iterkeys(): h = self.mempool_hist[addr] hh = [] for tx_hash, delta in h: if tx_hash in self.mempool_addresses: hh.append((tx_hash, delta)) if hh: new_mempool_hist[addr] = hh # add new transactions to mempool_hist for tx_hash in new_tx.iterkeys(): addresses = self.mempool_addresses[tx_hash] for addr, delta in addresses.iteritems(): h = new_mempool_hist.get(addr, []) if (tx_hash, delta) not in h: h.append((tx_hash, delta)) new_mempool_hist[addr] = h with self.mempool_lock: self.mempool_hist = new_mempool_hist # invalidate cache for touched addresses for addr in touched_addresses: self.invalidate_cache(addr) t1 = time.time() if t1 - t0 > 1: print_log('mempool_update', t1 - t0, len(self.mempool_hashes), len(self.mempool_hist))
def cmd_utxo_get_address(self, txid, pos): txid = str(txid) pos = int(pos) txi = (txid + int_to_hex(pos, 4)).decode('hex') return self.storage.get_address(txi)
def write_claim_id_from_outpoint(self, txid, nout, claim_id): outpoint = txid + int_to_hex(nout, 4) self.db_outpoint_to_claim.put(outpoint, claim_id)
def write_outpoint_from_claim_id(self, claim_id, txid, nout, amount): txid_nout_amount = txid + int_to_hex(nout, 4) + int_to_hex(amount, 8) self.db_claim_outpoint.put(claim_id, txid_nout_amount)
def memorypool_update(self): t0 = time.time() mempool_hashes = set(self.lbrycrdd('getrawmempool')) touched_addresses = set() # get new transactions new_tx = {} for tx_hash in mempool_hashes: if tx_hash in self.mempool_hashes: continue tx = self.get_mempool_transaction(tx_hash) if not tx: continue new_tx[tx_hash] = tx # remove older entries from mempool_hashes self.mempool_hashes = mempool_hashes # check all tx outputs for tx_hash, tx in new_tx.iteritems(): mpa = self.mempool_addresses.get(tx_hash, {}) out_values = [] for x in tx.get('outputs'): addr = x.get('address', '') out_values.append((addr, x['value'])) if not addr: continue v = mpa.get(addr, 0) v += x['value'] mpa[addr] = v touched_addresses.add(addr) self.mempool_addresses[tx_hash] = mpa self.mempool_values[tx_hash] = out_values # check all inputs for tx_hash, tx in new_tx.iteritems(): mpa = self.mempool_addresses.get(tx_hash, {}) for x in tx.get('inputs'): mpv = self.mempool_values.get(x.get('prevout_hash')) if mpv: addr, value = mpv[x.get('prevout_n')] else: txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex') try: addr = self.storage.get_address(txi) value = self.storage.get_utxo_value(addr, txi) except: print_log( "utxo not in database; postponing mempool update") return if not addr: continue v = mpa.get(addr, 0) v -= value mpa[addr] = v touched_addresses.add(addr) self.mempool_addresses[tx_hash] = mpa # remove deprecated entries from mempool_addresses for tx_hash, addresses in self.mempool_addresses.items(): if tx_hash not in self.mempool_hashes: self.mempool_addresses.pop(tx_hash) self.mempool_values.pop(tx_hash) touched_addresses.update(addresses) # remove deprecated entries from mempool_hist new_mempool_hist = {} for addr in self.mempool_hist.iterkeys(): h = self.mempool_hist[addr] hh = [] for tx_hash, delta in h: if tx_hash in self.mempool_addresses: hh.append((tx_hash, delta)) if hh: new_mempool_hist[addr] = hh # add new transactions to mempool_hist for tx_hash in new_tx.iterkeys(): addresses = self.mempool_addresses[tx_hash] for addr, delta in addresses.iteritems(): h = new_mempool_hist.get(addr, []) if (tx_hash, delta) not in h: h.append((tx_hash, delta)) new_mempool_hist[addr] = h with self.mempool_lock: self.mempool_hist = new_mempool_hist # invalidate cache for touched addresses for addr in touched_addresses: self.invalidate_cache(addr) t1 = time.time() if t1 - t0 > 1: print_log('mempool_update', t1 - t0, len(self.mempool_hashes), len(self.mempool_hist))