示例#1
0
    def __loop(self):
        logging.info("Starting aggregator loop")
        while not self.exit_event.wait(self.commitment_interval):
            digests = []
            done_events = []
            last_commitment = time.time()
            while not self.digest_queue.empty():
                # This should never raise the Empty exception, as we should be
                # the only thread taking items off the queue
                (digest, done_event) = self.digest_queue.get_nowait()
                digests.append(digest)
                done_events.append(done_event)

            if not len(digests):
                continue

            digests_commitment = make_merkle_tree(digests)

            logging.info("Aggregated %d digests under commitment %s" %
                         (len(digests), b2x(digests_commitment.msg)))

            self.calendar.submit(digests_commitment)

            # Notify all requestors that the commitment is done
            for done_event in done_events:
                done_event.set()
示例#2
0
def stamp_command(fd, args):
    # Create initial commitment ops for all files
    merkle_roots = []

    try:
        file_timestamp = DetachedTimestampFile.from_fd(OpSHA256(), fd)
    except OSError as exp:
        logging.error("Could not read %r: %s" % (fd.name, exp))
        return

    # Add nonce
    nonce_appended_stamp = file_timestamp.timestamp.ops.add(
        OpAppend(os.urandom(16)))
    merkle_root = nonce_appended_stamp.ops.add(OpSHA256())
    merkle_roots.append(merkle_root)
    merkle_tip = make_merkle_tree(merkle_roots)

    create_timestamp(merkle_tip, CALENDAR_URLS, parse_ots_args(args))

    try:
        with open("%s.ots" % fd.name, "wb") as timestamp_fd:
            ctx = StreamSerializationContext(timestamp_fd)
            file_timestamp.serialize(ctx)
    except IOError as exp:
        logger.error("Failed to create timestamp: %s" % exp)
        return
    def __loop(self):
        logging.info("Starting aggregator loop")
        while not self.exit_event.wait(self.commitment_interval):
            digests = []
            done_events = []
            last_commitment = time.time()
            while not self.digest_queue.empty():
                # This should never raise the Empty exception, as we should be
                # the only thread taking items off the queue
                (digest, done_event) = self.digest_queue.get_nowait()
                digests.append(digest)
                done_events.append(done_event)

            if not len(digests):
                continue

            digests_commitment = make_merkle_tree(digests)

            logging.info("Aggregated %d digests under commitment %s" % (len(digests), b2x(digests_commitment.msg)))

            self.calendar.submit(digests_commitment)

            # Notify all requestors that the commitment is done
            for done_event in done_events:
                done_event.set()
示例#4
0
    def __pending_to_merkle_tree(self, n):
            # Update the most recent timestamp transaction with new commitments
            commitment_timestamps = [Timestamp(commitment) for commitment in tuple(self.pending_commitments)[0:n]]

            # Remember that commitment_timestamps contains raw commitments,
            # which are longer than necessary, so we sha256 them before passing
            # them to make_merkle_tree, which concatenates whatever it gets (or
            # for the matter, returns what it gets if there's only one item for
            # the tree!)
            commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps]

            logging.debug("Making merkle tree")
            tip_timestamp = make_merkle_tree(commitment_digest_timestamps)
            logging.debug("Done making merkle tree")

            return (tip_timestamp, commitment_timestamps)
示例#5
0
文件: ots.py 项目: TimeBags/timebags
def ots_stamp(file_list, min_resp=DEF_MIN_RESP, timeout=DEF_TIMEOUT):
    ''' stamp function '''

    merkle_roots = []
    file_timestamps = []

    for file_name in file_list:
        with open(file_name, 'rb') as file_handler:
            try:
                file_timestamp = DetachedTimestampFile.from_fd(OpSHA256(), file_handler)
            except OSError as exp:
                msg = "Could not read %r: %s" % (file_name, exp)
                logging.error(msg)
                raise

        # nonce
        nonce_appended_stamp = file_timestamp.timestamp.ops.add(OpAppend(os.urandom(16)))
        merkle_root = nonce_appended_stamp.ops.add(OpSHA256())

        merkle_roots.append(merkle_root)
        file_timestamps.append(file_timestamp)

    merkle_tip = make_merkle_tree(merkle_roots)

    calendar_urls = []
    calendar_urls.append('https://a.pool.opentimestamps.org')
    calendar_urls.append('https://b.pool.opentimestamps.org')
    calendar_urls.append('https://a.pool.eternitywall.com')
    calendar_urls.append('https://ots.btc.catallaxy.com')

    if not create_timestamp(merkle_tip, calendar_urls, min_resp, timeout):
        return False

    for (file_name, file_timestamp) in zip(file_list, file_timestamps):
        timestamp_file_path = file_name + '.ots'
        try:
            with open(timestamp_file_path, 'xb') as timestamp_fd:
                ctx = StreamSerializationContext(timestamp_fd)
                file_timestamp.serialize(ctx)
        except IOError as exp:
            msg = "Failed to create timestamp %r: %s" % (timestamp_file_path, exp)
            logging.error(msg)
            raise

    return True
 def aggregate_timestamps(self):
     file_timestamps = []
     for f in self.proofs_storage_file.incomplete_proofs:
         if f.status != "pending":
             try:
                 f.from_file(f.path)
                 f.status = "aggregated"
                 file_timestamps += [nonce_timestamp(f.detached_timestamp.timestamp)]
             except FileNotFoundError:
                 pass
     if not file_timestamps:
         return None
     else:
         t = make_merkle_tree(file_timestamps)
         for f in self.proofs_storage_file.incomplete_proofs:
             if f.status == "aggregated":
                 f.agt = roll_timestamp(f.detached_timestamp.timestamp).msg
         self.update_storage()
     return t.msg
    def __do_ethereum(self):
        if self.pending_commitments and time.time() > self.last_timestamp_tx + self.wait_time_between_txs:
            logging.info("we have commitments and enough time has passed")
            # logging.info(self.pending_commitments)
            # Update the most recent timestamp transaction with new commitments
            commitment_timestamps = [Timestamp(commitment) for commitment in self.pending_commitments]

            # Remember that commitment_timestamps contains raw commitments,
            # which are longer than necessary, so we sha256 them before passing
            # them to make_merkle_tree, which concatenates whatever it gets (or
            # for the matter, returns what it gets if there's only one item for
            # the tree!)
            commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps]

            tip_timestamp = make_merkle_tree(commitment_digest_timestamps)

            eth_tx = {'from': self.account, 'to': self.account, 'value': 0, 'data': '0x' + bytes.hex(tip_timestamp.msg)}
            logging.info(eth_tx)
            tx_hash = self.web3.eth.sendTransaction(eth_tx)
            logging.info("tx_hash " + str(tx_hash))
            self.last_timestamp_tx = time.time()
            self.pending_commitments = set()
            self.unconfirmed_txs.append(TimestampTx(tx_hash, tip_timestamp, commitment_timestamps))
示例#8
0
    def __init__(self,
                 tree,
                 db=None,
                 file_hash_op=OpSHA256(),
                 tree_hash_op=OpSHA256()):
        self.tree = tree

        if db is None:
            os.makedirs(tree.repo.git_dir + '/ots', exist_ok=True)

            # WARNING: change the version number if any of the following is
            # changed; __init__() is consensus-critical!
            db = dbm.open(tree.repo.git_dir + '/ots/tree-hash-cache-v3', 'c')

        self.db = db
        self.file_hash_op = file_hash_op
        self.tree_hash_op = tree_hash_op

        def do_item(item):
            try:
                return (item, Timestamp(db[item.hexsha]))
            except KeyError:
                timestamp = None
                if isinstance(item, git.Blob):
                    timestamp = Timestamp(
                        file_hash_op.hash_fd(item.data_stream[3]))

                elif isinstance(item, git.Tree):
                    stamper = GitTreeTimestamper(item,
                                                 db=db,
                                                 file_hash_op=file_hash_op,
                                                 tree_hash_op=tree_hash_op)
                    timestamp = stamper.timestamp

                elif isinstance(item, git.Submodule):
                    # A submodule is just a git commit hash.
                    #
                    # Unfortunately we're not guaranteed to have the repo
                    # behind it, so all we can do is timestamp that SHA1 hash.
                    #
                    # We do run it through the tree_hash_op to make it
                    # indistinguishable from other things; consider the
                    # degenerate case where the only thing in a git repo was a
                    # submodule.
                    timestamp = Timestamp(tree_hash_op(item.binsha))

                else:
                    raise NotImplementedError("Don't know what to do with %r" %
                                              item)

                db[item.hexsha] = timestamp.msg
                return (item, timestamp)

        self.contents = tuple(do_item(item) for item in self.tree)

        if len(self.contents) > 1:
            # Deterministically nonce contents in an all-or-nothing transform. As
            # mentioned in the class docstring, we want to ensure that the the
            # siblings of any leaf in the merkle tree don't give the attacker any
            # information about what else is in the tree, unless the attacker
            # already knows (or can brute-force) the entire contents of the tree.
            #
            # While not perfect - a user-provided persistant key would prevent the
            # attacker from being able to brute-force the contents - this option
            # has the advantage of being possible to calculate deterministically
            # using only the tree itself, removing the need to keep secret keys
            # that can easily be lost.
            #
            # First, calculate a nonce_key that depends on the entire contents of
            # the tree. The 8-byte tag ensures the key calculated is unique for
            # this purpose.
            contents_sum = b''.join(stamp.msg for item, stamp in self.contents
                                    ) + b'\x01\x89\x08\x0c\xfb\xd0\xe8\x08'
            nonce_key = tree_hash_op.hash_fd(io.BytesIO(contents_sum))

            # Second, calculate per-item nonces deterministically from that key,
            # and add those nonces to the timestamps of every item in the tree.
            #
            # While we usually use 128-bit nonces, here we're using full-length
            # nonces. Additionally, we pick append/prepend pseudo-randomly. This
            # helps obscure the directory structure, as a commitment for a git tree
            # is indistinguishable from a inner node in the per-git-tree merkle
            # tree.
            def deterministically_nonce_stamp(private_stamp):
                nonce1 = tree_hash_op(private_stamp.msg + nonce_key)
                nonce2 = tree_hash_op(nonce1)

                side = OpPrepend if nonce1[0] & 0b1 else OpAppend
                nonce_added = private_stamp.ops.add(side(nonce2))
                return nonce_added.ops.add(tree_hash_op)

            nonced_contents = (deterministically_nonce_stamp(stamp)
                               for item, stamp in self.contents)

            # Note how the current algorithm, if asked to timestamp a tree
            # with a single thing in it, will return the hash of that thing
            # directly. From the point of view of just commiting to the data that's
            # perfectly fine, and probably (slightly) better as it reveals a little
            # less information about directory structure.
            self.timestamp = make_merkle_tree(
                nonced_stamp for nonced_stamp in nonced_contents)

        elif len(self.contents) == 1:
            # If there's only one item in the tree, the fancy all-or-nothing
            # transform above is just a waste of ops, so use the tree contents
            # directly instead.
            self.timestamp = tuple(self.contents)[0][1]

        else:
            raise AssertionError("Empty git tree")
    def __do_bitcoin(self):
        """Do Bitcoin-related maintenance"""

        # FIXME: we shouldn't have to create a new proxy each time, but with
        # current python-bitcoinlib and the RPC implementation it seems that
        # the proxy connection can timeout w/o recovering properly.
        proxy = bitcoin.rpc.Proxy()

        new_blocks = self.known_blocks.update_from_proxy(proxy)

        for (block_height, block_hash) in new_blocks:
            logging.info("New block %s at height %d" % (b2lx(block_hash), block_height))

            # Save commitments to disk that have reached min_confirmations
            confirmed_tx = self.txs_waiting_for_confirmation.pop(block_height - self.min_confirmations + 1, None)
            if confirmed_tx is not None:
                self.__save_confirmed_timestamp_tx(confirmed_tx)

            # If there already are txs waiting for confirmation at this
            # block_height, there was a reorg and those pending commitments now
            # need to be added back to the pool
            reorged_tx = self.txs_waiting_for_confirmation.pop(block_height, None)
            if reorged_tx is not None:
                # FIXME: the reorged transaction might get mined in another
                # block, so just adding the commitments for it back to the pool
                # isn't ideal, but it is safe
                logging.info(
                    "tx %s at height %d removed by reorg, adding %d commitments back to pending"
                    % (b2lx(reorged_tx.tx.GetHash()), block_height, len(reorged_tx.commitment_timestamps))
                )
                for reorged_commitment_timestamp in reorged_tx.commitment_timestamps:
                    self.pending_commitments.add(reorged_commitment_timestamp.msg)

            # Check if this block contains any of the pending transactions

            try:
                block = proxy.getblock(block_hash)
            except KeyError:
                # Must have been a reorg or something, return
                logging.error("Failed to get block")
                return

            # Check all potential pending txs against this block.
            for tx in self.unconfirmed_txs:
                block_timestamp = make_timestamp_from_block(tx.tip_timestamp.msg, block, block_height)

                if block_timestamp is None:
                    continue

                # Success!
                tx.tip_timestamp.merge(block_timestamp)

                for commitment_timestamp in tx.commitment_timestamps:
                    self.pending_commitments.remove(commitment_timestamp.msg)
                    logging.debug("Removed commitment %s from pending" % b2x(commitment_timestamp.msg))

                assert self.min_confirmations > 1
                logging.info(
                    "Success! %d commitments timestamped, now waiting for %d more confirmations"
                    % (len(tx.commitment_timestamps), self.min_confirmations - 1)
                )

                # Add pending_tx to the list of timestamp transactions that
                # have been mined, and are waiting for confirmations.
                self.txs_waiting_for_confirmation[block_height] = tx

                # Since all unconfirmed txs conflict with each other, we can clear the entire lot
                self.unconfirmed_txs.clear()

                # And finally, we can reset the last time a timestamp
                # transaction was mined to right now.
                self.last_timestamp_tx = time.time()

        time_to_next_tx = int(self.last_timestamp_tx + self.min_tx_interval - time.time())
        if time_to_next_tx > 0:
            # Minimum interval between transactions hasn't been reached, so do nothing
            logging.debug("Waiting %ds before next tx" % time_to_next_tx)
            return

        prev_tx = None
        if self.pending_commitments and not self.unconfirmed_txs:
            # Find the biggest unspent output that's confirmed
            unspent = find_unspent(proxy)

            if not len(unspent):
                logging.error("Can't timestamp; no spendable outputs")
                return

            # For the change scriptPubKey, we can save a few bytes by using
            # a pay-to-pubkey rather than the usual pay-to-pubkeyhash
            change_addr = proxy.getnewaddress()
            change_pubkey = proxy.validateaddress(change_addr)["pubkey"]
            change_scriptPubKey = CScript([change_pubkey, OP_CHECKSIG])

            prev_tx = self.__create_new_timestamp_tx_template(
                unspent[-1]["outpoint"], unspent[-1]["amount"], change_scriptPubKey
            )

            logging.debug(
                "New timestamp tx, spending output %r, value %s"
                % (unspent[-1]["outpoint"], str_money_value(unspent[-1]["amount"]))
            )

        elif self.unconfirmed_txs:
            (prev_tx, prev_tip_timestamp, prev_commitment_timestamps) = self.unconfirmed_txs[-1]

        # Send the first transaction even if we don't have a new block
        if prev_tx and (new_blocks or not self.unconfirmed_txs):
            # Update the most recent timestamp transaction with new commitments
            commitment_timestamps = [Timestamp(commitment) for commitment in self.pending_commitments]

            # Remember that commitment_timestamps contains raw commitments,
            # which are longer than necessary, so we sha256 them before passing
            # them to make_merkle_tree, which concatenates whatever it gets (or
            # for the matter, returns what it gets if there's only one item for
            # the tree!)
            commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps]

            tip_timestamp = make_merkle_tree(commitment_digest_timestamps)

            sent_tx = None
            relay_feerate = self.relay_feerate
            while sent_tx is None:
                unsigned_tx = self.__update_timestamp_tx(
                    prev_tx, tip_timestamp.msg, proxy.getblockcount(), relay_feerate
                )

                fee = _get_tx_fee(unsigned_tx, proxy)
                if fee is None:
                    logging.debug("Can't determine txfee of transaction; skipping")
                    return
                if fee > self.max_fee:
                    logging.error("Maximum txfee reached!")
                    return

                r = proxy.signrawtransaction(unsigned_tx)
                if not r["complete"]:
                    logging.error("Failed to sign transaction! r = %r" % r)
                    return
                signed_tx = r["tx"]

                try:
                    txid = proxy.sendrawtransaction(signed_tx)
                except bitcoin.rpc.JSONRPCError as err:
                    if err.error["code"] == -26:
                        logging.debug("Err: %r" % err.error)
                        # Insufficient priority - basically means we didn't
                        # pay enough, so try again with a higher feerate
                        relay_feerate *= 2
                        continue

                    else:
                        raise err  # something else, fail!

                sent_tx = signed_tx

            if self.unconfirmed_txs:
                logging.info(
                    "Sent timestamp tx %s, replacing %s; %d total commitments"
                    % (b2lx(sent_tx.GetHash()), b2lx(prev_tx.GetHash()), len(commitment_timestamps))
                )
            else:
                logging.info(
                    "Sent timestamp tx %s; %d total commitments" % (b2lx(sent_tx.GetHash()), len(commitment_timestamps))
                )

            self.unconfirmed_txs.append(TimestampTx(sent_tx, tip_timestamp, commitment_timestamps))
示例#10
0
    def __do_bitcoin(self):
        """Do Bitcoin-related maintenance"""

        # FIXME: we shouldn't have to create a new proxy each time, but with
        # current python-bitcoinlib and the RPC implementation it seems that
        # the proxy connection can timeout w/o recovering properly.
        proxy = bitcoin.rpc.Proxy()

        new_blocks = self.known_blocks.update_from_proxy(proxy)

        for (block_height, block_hash) in new_blocks:
            logging.info("New block %s at height %d" % (b2lx(block_hash), block_height))

            # Save commitments to disk that have reached min_confirmations
            confirmed_tx = self.txs_waiting_for_confirmation.pop(block_height - self.min_confirmations + 1, None)
            if confirmed_tx is not None:
                self.__save_confirmed_timestamp_tx(confirmed_tx)

            # If there already are txs waiting for confirmation at this
            # block_height, there was a reorg and those pending commitments now
            # need to be added back to the pool
            reorged_tx = self.txs_waiting_for_confirmation.pop(block_height, None)
            if reorged_tx is not None:
                # FIXME: the reorged transaction might get mined in another
                # block, so just adding the commitments for it back to the pool
                # isn't ideal, but it is safe
                logging.info('tx %s at height %d removed by reorg, adding %d commitments back to pending' % (b2lx(reorged_tx.tx.GetHash()), block_height, len(reorged_tx.commitment_timestamps)))
                for reorged_commitment_timestamp in reorged_tx.commitment_timestamps:
                    self.pending_commitments.add(reorged_commitment_timestamp.msg)

            # Check if this block contains any of the pending transactions

            try:
                block = proxy.getblock(block_hash)
            except KeyError:
                # Must have been a reorg or something, return
                logging.error("Failed to get block")
                return

            # Check all potential pending txs against this block.
            for tx in self.unconfirmed_txs:
                block_timestamp = make_timestamp_from_block(tx.tip_timestamp.msg, block, block_height)

                if block_timestamp is None:
                    continue

                # Success!
                tx.tip_timestamp.merge(block_timestamp)

                for commitment_timestamp in tx.commitment_timestamps:
                    self.pending_commitments.remove(commitment_timestamp.msg)
                    logging.debug("Removed commitment %s from pending" % b2x(commitment_timestamp.msg))

                assert self.min_confirmations > 1
                logging.info("Success! %d commitments timestamped, now waiting for %d more confirmations" %
                             (len(tx.commitment_timestamps), self.min_confirmations - 1))

                # Add pending_tx to the list of timestamp transactions that
                # have been mined, and are waiting for confirmations.
                self.txs_waiting_for_confirmation[block_height] = tx

                # Since all unconfirmed txs conflict with each other, we can clear the entire lot
                self.unconfirmed_txs.clear()

                # And finally, we can reset the last time a timestamp
                # transaction was mined to right now.
                self.last_timestamp_tx = time.time()


        time_to_next_tx = int(self.last_timestamp_tx + self.min_tx_interval - time.time())
        if time_to_next_tx > 0:
            # Minimum interval between transactions hasn't been reached, so do nothing
            logging.debug("Waiting %ds before next tx" % time_to_next_tx)
            return

        prev_tx = None
        if self.pending_commitments and not self.unconfirmed_txs:
            # Find the biggest unspent output that's confirmed
            unspent = find_unspent(proxy)

            if not len(unspent):
                logging.error("Can't timestamp; no spendable outputs")
                return

            # For the change scriptPubKey, we can save a few bytes by using
            # a pay-to-pubkey rather than the usual pay-to-pubkeyhash
            change_addr = proxy.getnewaddress()
            change_pubkey = proxy.validateaddress(change_addr)['pubkey']
            change_scriptPubKey = CScript([change_pubkey, OP_CHECKSIG])

            prev_tx = self.__create_new_timestamp_tx_template(unspent[-1]['outpoint'], unspent[-1]['amount'], change_scriptPubKey)

            logging.debug('New timestamp tx, spending output %r, value %s' % (unspent[-1]['outpoint'], str_money_value(unspent[-1]['amount'])))

        elif self.unconfirmed_txs:
            (prev_tx, prev_tip_timestamp, prev_commitment_timestamps) = self.unconfirmed_txs[-1]

        # Send the first transaction even if we don't have a new block
        if prev_tx and (new_blocks or not self.unconfirmed_txs):
            # Update the most recent timestamp transaction with new commitments
            commitment_timestamps = [Timestamp(commitment) for commitment in self.pending_commitments]

            # Remember that commitment_timestamps contains raw commitments,
            # which are longer than necessary, so we sha256 them before passing
            # them to make_merkle_tree, which concatenates whatever it gets (or
            # for the matter, returns what it gets if there's only one item for
            # the tree!)
            commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps]

            tip_timestamp = make_merkle_tree(commitment_digest_timestamps)

            sent_tx = None
            relay_feerate = self.relay_feerate
            while sent_tx is None:
                unsigned_tx = self.__update_timestamp_tx(prev_tx, tip_timestamp.msg,
                                                         proxy.getblockcount(), relay_feerate)

                fee = _get_tx_fee(unsigned_tx, proxy)
                if fee is None:
                    logging.debug("Can't determine txfee of transaction; skipping")
                    return
                if fee > self.max_fee:
                    logging.error("Maximum txfee reached!")
                    return

                r = proxy.signrawtransaction(unsigned_tx)
                if not r['complete']:
                    logging.error("Failed to sign transaction! r = %r" % r)
                    return
                signed_tx = r['tx']

                try:
                    txid = proxy.sendrawtransaction(signed_tx)
                except bitcoin.rpc.JSONRPCError as err:
                    if err.error['code'] == -26:
                        logging.debug("Err: %r" % err.error)
                        # Insufficient priority - basically means we didn't
                        # pay enough, so try again with a higher feerate
                        relay_feerate *= 2
                        continue

                    else:
                        raise err  # something else, fail!

                sent_tx = signed_tx

            if self.unconfirmed_txs:
                logging.info("Sent timestamp tx %s, replacing %s; %d total commitments" % (b2lx(sent_tx.GetHash()), b2lx(prev_tx.GetHash()), len(commitment_timestamps)))
            else:
                logging.info("Sent timestamp tx %s; %d total commitments" % (b2lx(sent_tx.GetHash()), len(commitment_timestamps)))

            self.unconfirmed_txs.append(TimestampTx(sent_tx, tip_timestamp, commitment_timestamps))
示例#11
0
    def __init__(self, tree, db=None, file_hash_op=OpSHA256(), tree_hash_op=OpSHA256()):
        self.tree = tree

        if db is None:
            os.makedirs(tree.repo.git_dir + '/ots', exist_ok=True)

            # WARNING: change the version number if any of the following is
            # changed; __init__() is consensus-critical!
            db = dbm.open(tree.repo.git_dir + '/ots/tree-hash-cache-v3', 'c')

        self.db = db
        self.file_hash_op = file_hash_op
        self.tree_hash_op = tree_hash_op

        def do_item(item):
            try:
                return (item, Timestamp(db[item.hexsha]))
            except KeyError:
                timestamp = None
                if isinstance(item, git.Blob):
                    timestamp = Timestamp(file_hash_op.hash_fd(item.data_stream[3]))

                elif isinstance(item, git.Tree):
                    stamper = GitTreeTimestamper(item, db=db, file_hash_op=file_hash_op, tree_hash_op=tree_hash_op)
                    timestamp = stamper.timestamp

                elif isinstance(item, git.Submodule):
                    # A submodule is just a git commit hash.
                    #
                    # Unfortunately we're not guaranteed to have the repo
                    # behind it, so all we can do is timestamp that SHA1 hash.
                    #
                    # We do run it through the tree_hash_op to make it
                    # indistinguishable from other things; consider the
                    # degenerate case where the only thing in a git repo was a
                    # submodule.
                    timestamp = Timestamp(tree_hash_op(item.binsha))

                else:
                    raise NotImplementedError("Don't know what to do with %r" % item)

                db[item.hexsha] = timestamp.msg
                return (item, timestamp)

        self.contents = tuple(do_item(item) for item in self.tree)

        if len(self.contents) > 1:
            # Deterministically nonce contents in an all-or-nothing transform. As
            # mentioned in the class docstring, we want to ensure that the the
            # siblings of any leaf in the merkle tree don't give the attacker any
            # information about what else is in the tree, unless the attacker
            # already knows (or can brute-force) the entire contents of the tree.
            #
            # While not perfect - a user-provided persistant key would prevent the
            # attacker from being able to brute-force the contents - this option
            # has the advantage of being possible to calculate deterministically
            # using only the tree itself, removing the need to keep secret keys
            # that can easily be lost.
            #
            # First, calculate a nonce_key that depends on the entire contents of
            # the tree. The 8-byte tag ensures the key calculated is unique for
            # this purpose.
            contents_sum = b''.join(stamp.msg for item, stamp in self.contents) + b'\x01\x89\x08\x0c\xfb\xd0\xe8\x08'
            nonce_key = tree_hash_op.hash_fd(io.BytesIO(contents_sum))

            # Second, calculate per-item nonces deterministically from that key,
            # and add those nonces to the timestamps of every item in the tree.
            #
            # While we usually use 128-bit nonces, here we're using full-length
            # nonces. Additionally, we pick append/prepend pseudo-randomly. This
            # helps obscure the directory structure, as a commitment for a git tree
            # is indistinguishable from a inner node in the per-git-tree merkle
            # tree.
            def deterministically_nonce_stamp(private_stamp):
                nonce1 = tree_hash_op(private_stamp.msg + nonce_key)
                nonce2 = tree_hash_op(nonce1)

                side = OpPrepend if nonce1[0] & 0b1 else OpAppend
                nonce_added = private_stamp.ops.add(side(nonce2))
                return nonce_added.ops.add(tree_hash_op)

            nonced_contents = (deterministically_nonce_stamp(stamp) for item, stamp in self.contents)

            # Note how the current algorithm, if asked to timestamp a tree
            # with a single thing in it, will return the hash of that thing
            # directly. From the point of view of just commiting to the data that's
            # perfectly fine, and probably (slightly) better as it reveals a little
            # less information about directory structure.
            self.timestamp = make_merkle_tree(nonced_stamp for nonced_stamp in nonced_contents)

        elif len(self.contents) == 1:
            # If there's only one item in the tree, the fancy all-or-nothing
            # transform above is just a waste of ops, so use the tree contents
            # directly instead.
            self.timestamp = tuple(self.contents)[0][1]

        else:
            raise AssertionError("Empty git tree")