def do_item(item): try: return (item, Timestamp(db[item.hexsha])) except KeyError: timestamp = None if isinstance(item, git.Blob): timestamp = Timestamp( file_hash_op.hash_fd(item.data_stream[3])) elif isinstance(item, git.Tree): stamper = GitTreeTimestamper(item, db=db, file_hash_op=file_hash_op, tree_hash_op=tree_hash_op) timestamp = stamper.timestamp elif isinstance(item, git.Submodule): # A submodule is just a git commit hash. # # Unfortunately we're not guaranteed to have the repo # behind it, so all we can do is timestamp that SHA1 hash. # # We do run it through the tree_hash_op to make it # indistinguishable from other things; consider the # degenerate case where the only thing in a git repo was a # submodule. timestamp = Timestamp(tree_hash_op(item.binsha)) else: raise NotImplementedError("Don't know what to do with %r" % item) db[item.hexsha] = timestamp.msg return (item, timestamp)
def merge(self, new_timestamp): try: existing = self[new_timestamp.msg] except KeyError: existing = Timestamp(new_timestamp.msg) existing.merge(new_timestamp) self.__save(existing)
def make_timestamp_from_block(digest, block, blockheight, *, max_tx_size=1000): """Make a timestamp for a digest from a block Returns a timestamp for that digest on success, None on failure """ # Find the smallest transaction containing the root digest # FIXME: note how strategy changes once we add SHA256 midstate support len_smallest_tx_found = max_tx_size + 1 commitment_tx = None prefix = None suffix = None for tx in block.vtx: serialized_tx = tx.serialize() if len(serialized_tx) > len_smallest_tx_found: continue try: i = serialized_tx.index(digest) except ValueError: continue # Found it! commitment_tx = tx prefix = serialized_tx[0:i] suffix = serialized_tx[i + len(digest):] len_smallest_tx_found = len(serialized_tx) if len_smallest_tx_found > max_tx_size: return None digest_timestamp = Timestamp(digest) # Add the commitment ops necessary to go from the digest to the txid op prefix_stamp = digest_timestamp.ops.add(OpPrepend(prefix)) txid_stamp = cat_sha256d(prefix_stamp, suffix) assert commitment_tx.GetHash() == txid_stamp.msg # Create the txid list, with our commitment txid op in the appropriate # place block_txid_stamps = [] for tx in block.vtx: if tx.GetHash() != txid_stamp.msg: block_txid_stamps.append(Timestamp(tx.GetHash())) else: block_txid_stamps.append(txid_stamp) # Build the merkle tree merkleroot_stamp = __make_btc_block_merkle_tree(block_txid_stamps) attestation = BitcoinBlockHeaderAttestation(blockheight) merkleroot_stamp.attestations.add(attestation) return digest_timestamp
def proof_from_txid_to_block(txid, height, network): merkle_path = network.get_merkle_for_transaction(txid, height) timestamp = Timestamp(lx(txid)) pos = merkle_path["pos"] t_old = t_new = timestamp for c in merkle_path["merkle"]: t_new = cat_sha256d(t_old, Timestamp(lx(c))) if pos % 2 == 0 else cat_sha256d(Timestamp(lx(c)), t_old) pos //= 2 t_old = t_new t_new.attestations.add(BitcoinBlockHeaderAttestation(height)) return timestamp
def get_timestamp(self, commitment): """Get a timestamp for a given commitment Raises KeyError if the calendar doesn't have that commitment """ req = urllib.request.Request( self.url + "/timestamp/" + binascii.hexlify(commitment).decode("utf8"), headers=self.request_headers ) try: with urllib.request.urlopen(req) as resp: if resp.status == 200: # FIXME: Not a particularly nice way of handling this, but it'll do # the job for now. resp_bytes = resp.read(10000) if len(resp_bytes) > 10000: raise Exception("Calendar response exceeded size limit") ctx = BytesDeserializationContext(resp_bytes) return Timestamp.deserialize(ctx, commitment) else: raise Exception("Unknown response from calendar: %d" % resp.status) except urllib.error.HTTPError as exp: if exp.code == 404: raise CommitmentNotFoundError(get_sanitised_resp_msg(exp)) else: raise exp
def get_timestamp(self, commitment, timeout=None): """Get a timestamp for a given commitment Raises KeyError if the calendar doesn't have that commitment """ req = urllib.request.Request( self.url + '/timestamp/' + binascii.hexlify(commitment).decode('utf8'), headers=self.request_headers) try: with urllib.request.urlopen(req, timeout=timeout) as resp: if resp.status == 200: # FIXME: Not a particularly nice way of handling this, but it'll do # the job for now. resp_bytes = resp.read(10000) if len(resp_bytes) > 10000: raise Exception( "Calendar response exceeded size limit") ctx = BytesDeserializationContext(resp_bytes) return Timestamp.deserialize(ctx, commitment) else: raise Exception("Unknown response from calendar: %d" % resp.status) except urllib.error.HTTPError as exp: if exp.code == 404: raise CommitmentNotFoundError(get_sanitised_resp_msg(exp)) else: raise exp
def deserialize_ascii_armored_timestamp(git_commit, gpg_sig): stamp_start = gpg_sig.find(ASCII_ARMOR_HEADER) if stamp_start == -1: return (None, None, None) stamp_end = gpg_sig.find(b'\n' + ASCII_ARMOR_FOOTER) if stamp_end == -1: return (None, None, None) base64_encoded_stamp = gpg_sig[stamp_start + len(ASCII_ARMOR_HEADER):stamp_end] initial_msg = hash_signed_commit(git_commit, gpg_sig[0:stamp_start]) try: serialized_stamp = base64.standard_b64decode(base64_encoded_stamp) major_version = serialized_stamp[0] minor_version = serialized_stamp[1] if major_version != 1: logging.error("Can't verify timestamp; major version %d not known" % major_version) sys.exit(1) logging.debug("Git timestamp is version %d.%d" % (major_version, minor_version)) ctx = BytesDeserializationContext(serialized_stamp[2:]) timestamp = Timestamp.deserialize(ctx, initial_msg) return (major_version, minor_version, timestamp) except Exception as err: logging.error("Bad timestamp: %r" % err) return (None, None, None)
def __add_timestamp(self, new_timestamp, batch, batch_cache): existing_timestamp = None try: if new_timestamp.msg in batch_cache: existing_timestamp = batch_cache[new_timestamp.msg] else: existing_timestamp = self.__get_timestamp(new_timestamp.msg) except KeyError: existing_timestamp = Timestamp(new_timestamp.msg) else: if existing_timestamp == new_timestamp: # Note how because we didn't get the existing timestamp # recursively, the only way old and new can be identical is if all # the ops are verify operations. return # Update the existing timestamps attestations with those from the new # timestamp existing_timestamp.attestations.update(new_timestamp.attestations) for new_op, new_op_stamp in new_timestamp.ops.items(): # Make sure the existing timestamp has this operation existing_timestamp.ops.add(new_op) # Add the results timestamp to the calendar self.__add_timestamp(new_op_stamp, batch, batch_cache) self.__put_timestamp(existing_timestamp, batch, batch_cache)
def test_discard_attestations(self): """Discarding attestations""" t = Timestamp(b'') t1 = t.ops.add(OpAppend(b'\x01')) t2 = t.ops.add(OpAppend(b'\x02')) t.attestations = {UnknownAttestation(b'unknown.', b'')} t1.attestations = {BitcoinBlockHeaderAttestation(1)} t2.attestations = {PendingAttestation("c2"), PendingAttestation("c1")} discard_attestations(t, [UnknownAttestation, PendingAttestation("c1")]) tn = Timestamp(b'') tn1 = tn.ops.add(OpAppend(b'\x01')) tn2 = tn.ops.add(OpAppend(b'\x02')) tn1.attestations = {BitcoinBlockHeaderAttestation(1)} tn2.attestations = {PendingAttestation("c2")} self.assertEqual(t, tn)
def make_timestamp_from_block_tx(confirmed_tx, block, blockheight): commitment_tx = confirmed_tx.tx serialized_tx = commitment_tx.serialize(params={'include_witness': False}) digest = confirmed_tx.tip_timestamp.msg try: i = serialized_tx.index(digest) except ValueError: assert False, "can't build a block_timestamp from my tx, this is not supposed to happen, exiting" prefix = serialized_tx[0:i] suffix = serialized_tx[i + len(digest):] digest_timestamp = Timestamp(digest) # Add the commitment ops necessary to go from the digest to the txid op prefix_stamp = digest_timestamp.ops.add(OpPrepend(prefix)) txid_stamp = cat_sha256d(prefix_stamp, suffix) assert commitment_tx.GetTxid() == txid_stamp.msg # Create the txid list, with our commitment txid op in the appropriate # place block_txid_stamps = [] for tx in block.vtx: if tx.GetTxid() != txid_stamp.msg: block_txid_stamps.append(Timestamp(tx.GetTxid())) else: block_txid_stamps.append(txid_stamp) # Build the merkle tree merkleroot_stamp = make_btc_block_merkle_tree(block_txid_stamps) assert merkleroot_stamp.msg == block.hashMerkleRoot if bitcoin.params.NAME.lower() == "testnet": attestation = BitcoinTestnetBlockHeaderAttestation(blockheight) else: attestation = BitcoinBlockHeaderAttestation(blockheight) merkleroot_stamp.attestations.add(attestation) return digest_timestamp
def __getitem__(self, commitment): if self.path is None: raise KeyError elif len(commitment) > 64: # FIXME: hack to avoid filename-too-long errors raise KeyError try: with open(self.__commitment_to_filename(commitment), 'rb') as stamp_fd: ctx = StreamDeserializationContext(stamp_fd) stamp = Timestamp.deserialize(ctx, commitment) return stamp except FileNotFoundError: raise KeyError
def create_proof(block, state, tx_hash, j, tx_raw): # print("tx_raw: " + tx_raw) # print("tx_hash: " + tx_hash) my_root = state.root_hash block_root = bytes.fromhex(block[u'transactionsRoot'][2:]) assert my_root == block_root rlp_encode = rlp.encode(j) # print("rlp_encode: " + bytes.hex(rlp_encode)) nibbles = trie.bin_to_nibbles(rlp_encode) current_node = state.root_node ops_list = [] nibble_iter = nibbles.__iter__() while True: node_type = state._get_node_type(current_node) # print("node type: " + str(node_type)) # print([bytes.hex(cur_el) for cur_el in current_node]) current_node_rlp = rlp.encode(current_node) current_node_encoded = bytes.hex(current_node_rlp) # print(current_node_encoded) try: index = next(nibble_iter) except: pass current_el = current_node[index if node_type == trie.NODE_TYPE_BRANCH else 1] current_el_hex = bytes.hex(current_el) # print(str(index) + ":" + current_el_hex) [prepend, append] = get_append_and_prepend(current_el_hex, current_node_encoded) ops_list.append(OpKECCAK256()) if len(append) > 0: ops_list.append(OpAppend(bytes.fromhex(append))) if len(prepend) > 0: ops_list.append(OpPrepend(bytes.fromhex(prepend))) if node_type == trie.NODE_TYPE_LEAF: break else: current_node = state._decode_to_node(current_el) orig = Timestamp(bytes.fromhex(tx_raw)) current = orig while len(ops_list) > 0: current = current.ops.add(ops_list.pop()) return orig
def __pending_to_merkle_tree(self, n): # Update the most recent timestamp transaction with new commitments commitment_timestamps = [Timestamp(commitment) for commitment in tuple(self.pending_commitments)[0:n]] # Remember that commitment_timestamps contains raw commitments, # which are longer than necessary, so we sha256 them before passing # them to make_merkle_tree, which concatenates whatever it gets (or # for the matter, returns what it gets if there's only one item for # the tree!) commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps] logging.debug("Making merkle tree") tip_timestamp = make_merkle_tree(commitment_digest_timestamps) logging.debug("Done making merkle tree") return (tip_timestamp, commitment_timestamps)
def submit(self, msg): """Submit message for aggregation Aggregator thread will aggregate the message along with all other messages, and return a Timestamp """ timestamp = Timestamp(msg) # Add nonce to ensure requestor doesn't learn anything about other # messages being committed at the same time, as well as to ensure that # anything we store related to this commitment can't be controlled by # them. done_event = threading.Event() self.digest_queue.put((nonce_timestamp(timestamp), done_event)) done_event.wait() return timestamp
def __get_timestamp(self, msg): """Get a timestamp, non-recursively""" serialized_timestamp = self.db.Get(msg) ctx = BytesDeserializationContext(serialized_timestamp) timestamp = Timestamp(msg) for i in range(ctx.read_varuint()): attestation = TimeAttestation.deserialize(ctx) assert attestation not in timestamp.attestations timestamp.attestations.add(attestation) for i in range(ctx.read_varuint()): op = Op.deserialize(ctx) assert op not in timestamp.ops timestamp.ops.add(op) return timestamp
def submit(self, digest, timeout=None): """Submit a digest to the calendar Returns a Timestamp committing to that digest """ req = urllib.request.Request(self.url + '/digest', data=digest, headers=self.request_headers) with urllib.request.urlopen(req, timeout=timeout) as resp: if resp.status != 200: raise Exception("Unknown response from calendar: %d" % resp.status) # FIXME: Not a particularly nice way of handling this, but it'll do # the job for now. resp_bytes = resp.read(10000) if len(resp_bytes) > 10000: raise Exception("Calendar response exceeded size limit") ctx = BytesDeserializationContext(resp_bytes) return Timestamp.deserialize(ctx, digest)
def submit(self, digest): """Submit a digest to the calendar Returns a Timestamp committing to that digest """ req = urllib.request.Request(self.url + "/digest", data=digest, headers=self.request_headers) with urllib.request.urlopen(req) as resp: if resp.status != 200: raise Exception("Unknown response from calendar: %d" % resp.status) # FIXME: Not a particularly nice way of handling this, but it'll do # the job for now. resp_bytes = resp.read(10000) if len(resp_bytes) > 10000: raise Exception("Calendar response exceeded size limit") ctx = BytesDeserializationContext(resp_bytes) return Timestamp.deserialize(ctx, digest)
def upgrade_timestamps_tx(self, tx): for category, script, amount in tx.outputs(): if category == 2: # agt -> txid agt = script[4:] # drop "6a20" op_return and op_pushdata(32) tx_raw = tx.serialize(witness=False) if len(x(tx_raw)) <= Op.MAX_MSG_LENGTH: i = tx_raw.find(agt) prepend = x(tx_raw[:i]) append = x(tx_raw[i + len(agt):]) t_agt = Timestamp(x(agt)) t = t_agt.ops.add(OpPrepend(prepend)) t = t.ops.add(OpAppend(append)) t = t.ops.add(OpSHA256()) t = t.ops.add(OpSHA256()) # txid in little endian for f in self.proofs_storage_file.incomplete_proofs: tf = roll_timestamp(f.detached_timestamp.timestamp) if tf.msg == x(agt): tf.merge(t_agt) f.status = "pending" f.txid = t.msg[::-1].hex() self.update_storage()
def generate(msg_bytes): '''Generates certificate''' hashed_bytes = hashlib.new('sha256', msg_bytes).digest() file_timestamp = DetachedTimestampFile(OpSHA256(), Timestamp(hashed_bytes)) nonce_appended_stamp = file_timestamp.timestamp.ops.add(OpAppend(os.urandom(16))) timestamp = nonce_appended_stamp.ops.add(OpSHA256()) remote_calendar = RemoteCalendar(CALENDAR_URL) result = remote_calendar.submit(timestamp.msg, timeout=None) try: if isinstance(result, Timestamp): timestamp.merge(result) else: logging.debug(str(result)) except Exception as error: logging.debug(str(error)) return file_timestamp
def __do_ethereum(self): if self.pending_commitments and time.time() > self.last_timestamp_tx + self.wait_time_between_txs: logging.info("we have commitments and enough time has passed") # logging.info(self.pending_commitments) # Update the most recent timestamp transaction with new commitments commitment_timestamps = [Timestamp(commitment) for commitment in self.pending_commitments] # Remember that commitment_timestamps contains raw commitments, # which are longer than necessary, so we sha256 them before passing # them to make_merkle_tree, which concatenates whatever it gets (or # for the matter, returns what it gets if there's only one item for # the tree!) commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps] tip_timestamp = make_merkle_tree(commitment_digest_timestamps) eth_tx = {'from': self.account, 'to': self.account, 'value': 0, 'data': '0x' + bytes.hex(tip_timestamp.msg)} logging.info(eth_tx) tx_hash = self.web3.eth.sendTransaction(eth_tx) logging.info("tx_hash " + str(tx_hash)) self.last_timestamp_tx = time.time() self.pending_commitments = set() self.unconfirmed_txs.append(TimestampTx(tx_hash, tip_timestamp, commitment_timestamps))
def __do_bitcoin(self): """Do Bitcoin-related maintenance""" # FIXME: we shouldn't have to create a new proxy each time, but with # current python-bitcoinlib and the RPC implementation it seems that # the proxy connection can timeout w/o recovering properly. proxy = bitcoin.rpc.Proxy() new_blocks = self.known_blocks.update_from_proxy(proxy) for (block_height, block_hash) in new_blocks: logging.info("New block %s at height %d" % (b2lx(block_hash), block_height)) # Save commitments to disk that have reached min_confirmations confirmed_tx = self.txs_waiting_for_confirmation.pop(block_height - self.min_confirmations + 1, None) if confirmed_tx is not None: self.__save_confirmed_timestamp_tx(confirmed_tx) # If there already are txs waiting for confirmation at this # block_height, there was a reorg and those pending commitments now # need to be added back to the pool reorged_tx = self.txs_waiting_for_confirmation.pop(block_height, None) if reorged_tx is not None: # FIXME: the reorged transaction might get mined in another # block, so just adding the commitments for it back to the pool # isn't ideal, but it is safe logging.info('tx %s at height %d removed by reorg, adding %d commitments back to pending' % (b2lx(reorged_tx.tx.GetHash()), block_height, len(reorged_tx.commitment_timestamps))) for reorged_commitment_timestamp in reorged_tx.commitment_timestamps: self.pending_commitments.add(reorged_commitment_timestamp.msg) # Check if this block contains any of the pending transactions try: block = proxy.getblock(block_hash) except KeyError: # Must have been a reorg or something, return logging.error("Failed to get block") return # Check all potential pending txs against this block. for tx in self.unconfirmed_txs: block_timestamp = make_timestamp_from_block(tx.tip_timestamp.msg, block, block_height) if block_timestamp is None: continue # Success! tx.tip_timestamp.merge(block_timestamp) for commitment_timestamp in tx.commitment_timestamps: self.pending_commitments.remove(commitment_timestamp.msg) logging.debug("Removed commitment %s from pending" % b2x(commitment_timestamp.msg)) assert self.min_confirmations > 1 logging.info("Success! %d commitments timestamped, now waiting for %d more confirmations" % (len(tx.commitment_timestamps), self.min_confirmations - 1)) # Add pending_tx to the list of timestamp transactions that # have been mined, and are waiting for confirmations. self.txs_waiting_for_confirmation[block_height] = tx # Since all unconfirmed txs conflict with each other, we can clear the entire lot self.unconfirmed_txs.clear() # And finally, we can reset the last time a timestamp # transaction was mined to right now. self.last_timestamp_tx = time.time() time_to_next_tx = int(self.last_timestamp_tx + self.min_tx_interval - time.time()) if time_to_next_tx > 0: # Minimum interval between transactions hasn't been reached, so do nothing logging.debug("Waiting %ds before next tx" % time_to_next_tx) return prev_tx = None if self.pending_commitments and not self.unconfirmed_txs: # Find the biggest unspent output that's confirmed unspent = find_unspent(proxy) if not len(unspent): logging.error("Can't timestamp; no spendable outputs") return # For the change scriptPubKey, we can save a few bytes by using # a pay-to-pubkey rather than the usual pay-to-pubkeyhash change_addr = proxy.getnewaddress() change_pubkey = proxy.validateaddress(change_addr)['pubkey'] change_scriptPubKey = CScript([change_pubkey, OP_CHECKSIG]) prev_tx = self.__create_new_timestamp_tx_template(unspent[-1]['outpoint'], unspent[-1]['amount'], change_scriptPubKey) logging.debug('New timestamp tx, spending output %r, value %s' % (unspent[-1]['outpoint'], str_money_value(unspent[-1]['amount']))) elif self.unconfirmed_txs: (prev_tx, prev_tip_timestamp, prev_commitment_timestamps) = self.unconfirmed_txs[-1] # Send the first transaction even if we don't have a new block if prev_tx and (new_blocks or not self.unconfirmed_txs): # Update the most recent timestamp transaction with new commitments commitment_timestamps = [Timestamp(commitment) for commitment in self.pending_commitments] # Remember that commitment_timestamps contains raw commitments, # which are longer than necessary, so we sha256 them before passing # them to make_merkle_tree, which concatenates whatever it gets (or # for the matter, returns what it gets if there's only one item for # the tree!) commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps] tip_timestamp = make_merkle_tree(commitment_digest_timestamps) sent_tx = None relay_feerate = self.relay_feerate while sent_tx is None: unsigned_tx = self.__update_timestamp_tx(prev_tx, tip_timestamp.msg, proxy.getblockcount(), relay_feerate) fee = _get_tx_fee(unsigned_tx, proxy) if fee is None: logging.debug("Can't determine txfee of transaction; skipping") return if fee > self.max_fee: logging.error("Maximum txfee reached!") return r = proxy.signrawtransaction(unsigned_tx) if not r['complete']: logging.error("Failed to sign transaction! r = %r" % r) return signed_tx = r['tx'] try: txid = proxy.sendrawtransaction(signed_tx) except bitcoin.rpc.JSONRPCError as err: if err.error['code'] == -26: logging.debug("Err: %r" % err.error) # Insufficient priority - basically means we didn't # pay enough, so try again with a higher feerate relay_feerate *= 2 continue else: raise err # something else, fail! sent_tx = signed_tx if self.unconfirmed_txs: logging.info("Sent timestamp tx %s, replacing %s; %d total commitments" % (b2lx(sent_tx.GetHash()), b2lx(prev_tx.GetHash()), len(commitment_timestamps))) else: logging.info("Sent timestamp tx %s; %d total commitments" % (b2lx(sent_tx.GetHash()), len(commitment_timestamps))) self.unconfirmed_txs.append(TimestampTx(sent_tx, tip_timestamp, commitment_timestamps))
def make_timestamp_from_block(digest, block, blockheight, *, max_tx_size=1000): """Make a timestamp for a message in a block Every transaction within the block is serialized and checked to see if the raw serialized bytes contain the message. If one or more transactions do, the smallest transaction is used to create a timestamp proof for that specific message to the block header. To limit the maximum size of proof, transactions larger than `max_tx_size` are ignored. Returns a timestamp for that message on success, None on failure. """ # Note how strategy changes if we add SHA256 midstate support len_smallest_tx_found = max_tx_size + 1 commitment_tx = None prefix = None suffix = None for tx in block.vtx: serialized_tx = tx.serialize(params={'include_witness': False}) if len(serialized_tx) > len_smallest_tx_found: continue try: i = serialized_tx.index(digest) except ValueError: continue # Found it! commitment_tx = tx prefix = serialized_tx[0:i] suffix = serialized_tx[i + len(digest):] len_smallest_tx_found = len(serialized_tx) if len_smallest_tx_found > max_tx_size: return None digest_timestamp = Timestamp(digest) # Add the commitment ops necessary to go from the digest to the txid op prefix_stamp = digest_timestamp.ops.add(OpPrepend(prefix)) txid_stamp = cat_sha256d(prefix_stamp, suffix) assert commitment_tx.GetTxid() == txid_stamp.msg # Create the txid list, with our commitment txid op in the appropriate # place block_txid_stamps = [] for tx in block.vtx: if tx.GetTxid() != txid_stamp.msg: block_txid_stamps.append(Timestamp(tx.GetTxid())) else: block_txid_stamps.append(txid_stamp) # Build the merkle tree merkleroot_stamp = __make_btc_block_merkle_tree(block_txid_stamps) assert merkleroot_stamp.msg == block.hashMerkleRoot attestation = BitcoinBlockHeaderAttestation(blockheight) merkleroot_stamp.attestations.add(attestation) return digest_timestamp
def main(): parser = otsclient.args.make_common_options_arg_parser() parser.add_argument("-g", "--gpg-program", action="store", default="/usr/bin/gpg", help="Path to the GnuPG binary (default %(default)s)") parser.add_argument( '-c', '--calendar', metavar='URL', dest='calendar_urls', action='append', type=str, default=[ "https://calendar.bitmark.one", "https://a.pool.opentimestamps.org", "https://b.pool.opentimestamps.org", "https://a.pool.eternitywall.com", "https://ots.btc.catallaxy.com" ], help= 'Create timestamp with the aid of a remote calendar. May be specified multiple times. Default: %(default)r' ) parser.add_argument( '-b', '--btc-wallet', dest='use_btc_wallet', action='store_true', help='Create timestamp locally with the local Bitcoin wallet.') parser.add_argument("gpgargs", nargs=argparse.REMAINDER, help='Arguments passed to GnuPG binary') parser.add_argument("--timeout", type=int, default=5, help="Timeout before giving up on a calendar. " "Default: %(default)d") parser.add_argument("-m", type=int, default="2", help="Commitments are sent to remote calendars," "in the event of timeout the timestamp is considered " "done if at least M calendars replied. " "Default: %(default)s") parser.add_argument('--rehash-trees', action='store_true', help=argparse.SUPPRESS) args = otsclient.args.handle_common_options(parser.parse_args(), parser) logging.basicConfig(format='ots: %(message)s') args.verbosity = args.verbose - args.quiet if args.verbosity == 0: logging.root.setLevel(logging.INFO) elif args.verbosity > 0: logging.root.setLevel(logging.DEBUG) elif args.verbosity == -1: logging.root.setLevel(logging.WARNING) elif args.verbosity < -1: logging.root.setLevel(logging.ERROR) if len(args.gpgargs) == 0 or args.gpgargs[0] != '--': parser.error("You need to have '--' as the last argument; see docs") args.gpgargs = args.gpgargs[1:] parser = argparse.ArgumentParser() parser.add_argument("-bsau", action="store") parser.add_argument("--verify", action="store") gpgargs = parser.parse_known_args(args.gpgargs)[0] if gpgargs.bsau: with subprocess.Popen([args.gpg_program] + args.gpgargs, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as gpg_proc: logging.debug("Reading Git commit") git_commit = sys.stdin.buffer.read() logging.debug("Git commit: %r" % git_commit) # FIXME: can this fail to write all bytes? n = gpg_proc.stdin.write(git_commit) logging.debug("Wrote %d bytes to GnuPG out of %d" % (n, len(git_commit))) gpg_proc.stdin.close() gpg_sig = gpg_proc.stdout.read() # GnuPG produces no output on failure if not gpg_sig: sys.exit(1) logging.debug("PGP sig: %r" % gpg_sig) # Timestamp the commit and tag together signed_commit_timestamp = Timestamp( hash_signed_commit(git_commit, gpg_sig)) final_timestamp = signed_commit_timestamp # with git tree rehashing minor_version = 1 # CWD will be the git repo, so this should get us the right one repo = git.Repo() hextree_start = None if git_commit.startswith(b'tree '): hextree_start = 5 elif git_commit.startswith(b'object '): # I believe this is always a git tag hextree_start = 7 else: raise AssertionError("Don't know what to do with %r" % git_commit) hextree = git_commit[hextree_start:hextree_start + 20 * 2].decode() tree = repo.tree(hextree) tree.path = '' tree_stamper = GitTreeTimestamper(tree) final_timestamp = signed_commit_timestamp.ops.add( OpAppend(tree_stamper.timestamp.msg)).ops.add(OpSHA256()) otsclient.cmds.create_timestamp(final_timestamp, args.calendar_urls, args) if args.wait: # Interpreted as override by the upgrade command # FIXME: need to clean this bad abstraction up! args.calendar_urls = [] otsclient.cmds.upgrade_timestamp(signed_commit_timestamp, args) sys.stdout.buffer.write(gpg_sig) write_ascii_armored(signed_commit_timestamp, sys.stdout.buffer, minor_version) elif gpgargs.verify: # Verify with open(gpgargs.verify, 'rb') as gpg_sig_fd: gpg_sig = gpg_sig_fd.read() git_commit = sys.stdin.buffer.read() (major_version, minor_version, timestamp) = deserialize_ascii_armored_timestamp( git_commit, gpg_sig) if timestamp is None: print("OpenTimestamps: No timestamp found", file=sys.stderr) else: good = otsclient.cmds.verify_timestamp(timestamp, args) if good: logging.info("Good timestamp") else: logging.warning("Could not verify timestamp!") sys.stderr.flush() logging.debug("Running GnuPG binary: %r" % ([args.gpg_program] + args.gpgargs)) with subprocess.Popen([args.gpg_program] + args.gpgargs, stdin=subprocess.PIPE) as gpg_proc: gpg_proc.stdin.write(git_commit) gpg_proc.stdin.close()
def make_timestamp_from_block(digest, block, blockheight, *, max_tx_size=1000): state = make_trie(block) my_root = state.root_hash block_root = bytes.fromhex(block['transactionsRoot'][2:]) assert my_root == block_root try: j, prepend_tx, append_tx = found_tx(digest, block, max_tx_size) except ValueError: return None tx_raw = prepend_tx + digest + append_tx # print("tx_raw: " + tx_raw) # print("tx_hash: " + sha3.keccak_256(bytes.fromhex(tx_raw)).hexdigest()) rlp_encode = rlp.encode(j) # print("rlp_encode: " + bytes.hex(rlp_encode)) nibbles = trie.bin_to_nibbles(rlp_encode) current_node = state.root_node ops_list = [] nibble_iter = nibbles.__iter__() while True: node_type = state._get_node_type(current_node) # print("node type: " + str(node_type)) # print([bytes.hex(cur_el) for cur_el in current_node]) current_node_rlp = rlp.encode(current_node) current_node_encoded = bytes.hex(current_node_rlp) # print(current_node_encoded) try: index = next(nibble_iter) except: pass current_el = current_node[index if node_type == trie.NODE_TYPE_BRANCH else 1] current_el_hex = bytes.hex(current_el) # print(str(index) + ":" + current_el_hex) [prepend, append] = get_append_and_prepend(current_el_hex, current_node_encoded) ops_list.append(OpKECCAK256()) if len(append) > 0: ops_list.append(OpAppend(bytes.fromhex(append))) if len(prepend) > 0: ops_list.append(OpPrepend(bytes.fromhex(prepend))) if node_type == trie.NODE_TYPE_LEAF: break else: current_node = state._decode_to_node(current_el) assert tx_raw == prepend_tx + digest + append_tx orig = Timestamp(bytes.fromhex(digest)) current = orig if len(prepend_tx) > 0: current = current.ops.add(OpPrepend(bytes.fromhex(prepend_tx))) if len(append_tx) > 0: current = current.ops.add(OpAppend(bytes.fromhex(append_tx))) while len(ops_list) > 0: current = current.ops.add(ops_list.pop()) attestation = EthereumBlockHeaderAttestation(blockheight) current.attestations.add(attestation) return orig