Esempio n. 1
0
def stamp_command(fd, args):
    # Create initial commitment ops for all files
    merkle_roots = []

    try:
        file_timestamp = DetachedTimestampFile.from_fd(OpSHA256(), fd)
    except OSError as exp:
        logging.error("Could not read %r: %s" % (fd.name, exp))
        return

    # Add nonce
    nonce_appended_stamp = file_timestamp.timestamp.ops.add(
        OpAppend(os.urandom(16)))
    merkle_root = nonce_appended_stamp.ops.add(OpSHA256())
    merkle_roots.append(merkle_root)
    merkle_tip = make_merkle_tree(merkle_roots)

    create_timestamp(merkle_tip, CALENDAR_URLS, parse_ots_args(args))

    try:
        with open("%s.ots" % fd.name, "wb") as timestamp_fd:
            ctx = StreamSerializationContext(timestamp_fd)
            file_timestamp.serialize(ctx)
    except IOError as exp:
        logger.error("Failed to create timestamp: %s" % exp)
        return
 def from_file(self, path):
     self.path = path
     self.status = "tracked"
     self.agt = None
     self.txid = None
     self.block = None
     self.date = None
     with open(self.path, "rb") as fo:
         self.detached_timestamp = DetachedTimestampFile.from_fd(OpSHA256(), fo)
 def upgrade_timestamps_tx(self, tx):
     for category, script, amount in tx.outputs():
         if category == 2:  # agt -> txid
             agt = script[4:]  # drop "6a20" op_return and op_pushdata(32)
             tx_raw = tx.serialize(witness=False)
             if len(x(tx_raw)) <= Op.MAX_MSG_LENGTH:
                 i = tx_raw.find(agt)
                 prepend = x(tx_raw[:i])
                 append = x(tx_raw[i + len(agt):])
                 t_agt = Timestamp(x(agt))
                 t = t_agt.ops.add(OpPrepend(prepend))
                 t = t.ops.add(OpAppend(append))
                 t = t.ops.add(OpSHA256())
                 t = t.ops.add(OpSHA256())  # txid in little endian
                 for f in self.proofs_storage_file.incomplete_proofs:
                     tf = roll_timestamp(f.detached_timestamp.timestamp)
                     if tf.msg == x(agt):
                         tf.merge(t_agt)
                         f.status = "pending"
                         f.txid = t.msg[::-1].hex()
     self.update_storage()
Esempio n. 4
0
def generate(msg_bytes):
    '''Generates certificate'''

    hashed_bytes = hashlib.new('sha256', msg_bytes).digest()
    file_timestamp = DetachedTimestampFile(OpSHA256(), Timestamp(hashed_bytes))

    nonce_appended_stamp = file_timestamp.timestamp.ops.add(OpAppend(os.urandom(16)))
    timestamp = nonce_appended_stamp.ops.add(OpSHA256())

    remote_calendar = RemoteCalendar(CALENDAR_URL)

    result = remote_calendar.submit(timestamp.msg, timeout=None)

    try:
        if isinstance(result, Timestamp):
            timestamp.merge(result)
        else:
            logging.debug(str(result))
    except Exception as error:
        logging.debug(str(error))

    return file_timestamp
Esempio n. 5
0
    def __pending_to_merkle_tree(self, n):
            # Update the most recent timestamp transaction with new commitments
            commitment_timestamps = [Timestamp(commitment) for commitment in tuple(self.pending_commitments)[0:n]]

            # Remember that commitment_timestamps contains raw commitments,
            # which are longer than necessary, so we sha256 them before passing
            # them to make_merkle_tree, which concatenates whatever it gets (or
            # for the matter, returns what it gets if there's only one item for
            # the tree!)
            commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps]

            logging.debug("Making merkle tree")
            tip_timestamp = make_merkle_tree(commitment_digest_timestamps)
            logging.debug("Done making merkle tree")

            return (tip_timestamp, commitment_timestamps)
    def __do_ethereum(self):
        if self.pending_commitments and time.time() > self.last_timestamp_tx + self.wait_time_between_txs:
            logging.info("we have commitments and enough time has passed")
            # logging.info(self.pending_commitments)
            # Update the most recent timestamp transaction with new commitments
            commitment_timestamps = [Timestamp(commitment) for commitment in self.pending_commitments]

            # Remember that commitment_timestamps contains raw commitments,
            # which are longer than necessary, so we sha256 them before passing
            # them to make_merkle_tree, which concatenates whatever it gets (or
            # for the matter, returns what it gets if there's only one item for
            # the tree!)
            commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps]

            tip_timestamp = make_merkle_tree(commitment_digest_timestamps)

            eth_tx = {'from': self.account, 'to': self.account, 'value': 0, 'data': '0x' + bytes.hex(tip_timestamp.msg)}
            logging.info(eth_tx)
            tx_hash = self.web3.eth.sendTransaction(eth_tx)
            logging.info("tx_hash " + str(tx_hash))
            self.last_timestamp_tx = time.time()
            self.pending_commitments = set()
            self.unconfirmed_txs.append(TimestampTx(tx_hash, tip_timestamp, commitment_timestamps))
Esempio n. 7
0
    def __init__(self,
                 tree,
                 db=None,
                 file_hash_op=OpSHA256(),
                 tree_hash_op=OpSHA256()):
        self.tree = tree

        if db is None:
            os.makedirs(tree.repo.git_dir + '/ots', exist_ok=True)

            # WARNING: change the version number if any of the following is
            # changed; __init__() is consensus-critical!
            db = dbm.open(tree.repo.git_dir + '/ots/tree-hash-cache-v3', 'c')

        self.db = db
        self.file_hash_op = file_hash_op
        self.tree_hash_op = tree_hash_op

        def do_item(item):
            try:
                return (item, Timestamp(db[item.hexsha]))
            except KeyError:
                timestamp = None
                if isinstance(item, git.Blob):
                    timestamp = Timestamp(
                        file_hash_op.hash_fd(item.data_stream[3]))

                elif isinstance(item, git.Tree):
                    stamper = GitTreeTimestamper(item,
                                                 db=db,
                                                 file_hash_op=file_hash_op,
                                                 tree_hash_op=tree_hash_op)
                    timestamp = stamper.timestamp

                elif isinstance(item, git.Submodule):
                    # A submodule is just a git commit hash.
                    #
                    # Unfortunately we're not guaranteed to have the repo
                    # behind it, so all we can do is timestamp that SHA1 hash.
                    #
                    # We do run it through the tree_hash_op to make it
                    # indistinguishable from other things; consider the
                    # degenerate case where the only thing in a git repo was a
                    # submodule.
                    timestamp = Timestamp(tree_hash_op(item.binsha))

                else:
                    raise NotImplementedError("Don't know what to do with %r" %
                                              item)

                db[item.hexsha] = timestamp.msg
                return (item, timestamp)

        self.contents = tuple(do_item(item) for item in self.tree)

        if len(self.contents) > 1:
            # Deterministically nonce contents in an all-or-nothing transform. As
            # mentioned in the class docstring, we want to ensure that the the
            # siblings of any leaf in the merkle tree don't give the attacker any
            # information about what else is in the tree, unless the attacker
            # already knows (or can brute-force) the entire contents of the tree.
            #
            # While not perfect - a user-provided persistant key would prevent the
            # attacker from being able to brute-force the contents - this option
            # has the advantage of being possible to calculate deterministically
            # using only the tree itself, removing the need to keep secret keys
            # that can easily be lost.
            #
            # First, calculate a nonce_key that depends on the entire contents of
            # the tree. The 8-byte tag ensures the key calculated is unique for
            # this purpose.
            contents_sum = b''.join(stamp.msg for item, stamp in self.contents
                                    ) + b'\x01\x89\x08\x0c\xfb\xd0\xe8\x08'
            nonce_key = tree_hash_op.hash_fd(io.BytesIO(contents_sum))

            # Second, calculate per-item nonces deterministically from that key,
            # and add those nonces to the timestamps of every item in the tree.
            #
            # While we usually use 128-bit nonces, here we're using full-length
            # nonces. Additionally, we pick append/prepend pseudo-randomly. This
            # helps obscure the directory structure, as a commitment for a git tree
            # is indistinguishable from a inner node in the per-git-tree merkle
            # tree.
            def deterministically_nonce_stamp(private_stamp):
                nonce1 = tree_hash_op(private_stamp.msg + nonce_key)
                nonce2 = tree_hash_op(nonce1)

                side = OpPrepend if nonce1[0] & 0b1 else OpAppend
                nonce_added = private_stamp.ops.add(side(nonce2))
                return nonce_added.ops.add(tree_hash_op)

            nonced_contents = (deterministically_nonce_stamp(stamp)
                               for item, stamp in self.contents)

            # Note how the current algorithm, if asked to timestamp a tree
            # with a single thing in it, will return the hash of that thing
            # directly. From the point of view of just commiting to the data that's
            # perfectly fine, and probably (slightly) better as it reveals a little
            # less information about directory structure.
            self.timestamp = make_merkle_tree(
                nonced_stamp for nonced_stamp in nonced_contents)

        elif len(self.contents) == 1:
            # If there's only one item in the tree, the fancy all-or-nothing
            # transform above is just a waste of ops, so use the tree contents
            # directly instead.
            self.timestamp = tuple(self.contents)[0][1]

        else:
            raise AssertionError("Empty git tree")
Esempio n. 8
0
def cat_sha256d(left, right):
    sha256_timestamp = cat_sha256(left, right)
    return sha256_timestamp.ops.add(OpSHA256())
Esempio n. 9
0
    def str_tree(self, indent=0, verbosity=0):
        """Convert to tree (for debugging)"""
        class bcolors:
            HEADER = '\033[95m'
            OKBLUE = '\033[94m'
            OKGREEN = '\033[92m'
            WARNING = '\033[93m'
            FAIL = '\033[91m'
            ENDC = '\033[0m'
            BOLD = '\033[1m'
            UNDERLINE = '\033[4m'

        def str_result(verb, parameter, result):
            rr = ""
            if verb > 0 and result is not None:
                rr += " == "
                result_hex = b2x(result)
                if parameter is not None:
                    parameter_hex = b2x(parameter)
                    try:
                        index = result_hex.index(parameter_hex)
                        parameter_hex_highlight = bcolors.BOLD + parameter_hex + bcolors.ENDC
                        if index == 0:
                            rr += parameter_hex_highlight + result_hex[
                                index + len(parameter_hex):]
                        else:
                            rr += result_hex[0:index] + parameter_hex_highlight
                    except ValueError:
                        rr += result_hex
                else:
                    rr += result_hex

            return rr

        r = ""
        if len(self.attestations) > 0:
            for attestation in sorted(self.attestations):
                r += " " * indent + "verify %s" % str(
                    attestation) + str_result(verbosity, self.msg, None) + "\n"
                if attestation.__class__ == BitcoinBlockHeaderAttestation:
                    r += " " * indent + "# Bitcoin block merkle root " + b2lx(
                        self.msg) + "\n"

        if len(self.ops) > 1:
            for op, timestamp in sorted(self.ops.items()):
                try:
                    CTransaction.deserialize(self.msg)
                    r += " " * indent + "* Bitcoin transaction id " + b2lx(
                        OpSHA256()(OpSHA256()(self.msg))) + "\n"
                except SerializationError:
                    pass
                cur_res = op(self.msg)
                cur_par = op[0]
                r += " " * indent + " -> " + "%s" % str(op) + str_result(
                    verbosity, cur_par, cur_res) + "\n"
                r += timestamp.str_tree(indent + 4, verbosity=verbosity)
        elif len(self.ops) > 0:
            try:
                CTransaction.deserialize(self.msg)
                r += " " * indent + "# Bitcoin transaction id " + \
                     b2lx(OpSHA256()(OpSHA256()(self.msg))) + "\n"
            except SerializationError:
                pass
            op = tuple(self.ops.keys())[0]
            cur_res = op(self.msg)
            cur_par = op[0] if len(op) > 0 else None
            r += " " * indent + "%s" % str(op) + str_result(
                verbosity, cur_par, cur_res) + "\n"
            r += tuple(self.ops.values())[0].str_tree(indent,
                                                      verbosity=verbosity)

        return r
Esempio n. 10
0
def hash_signed_commit(git_commit, gpg_sig):
    return OpSHA256()(OpSHA256()(git_commit) + OpSHA256()(gpg_sig))
Esempio n. 11
0
def main():
    parser = otsclient.args.make_common_options_arg_parser()

    parser.add_argument("-g",
                        "--gpg-program",
                        action="store",
                        default="/usr/bin/gpg",
                        help="Path to the GnuPG binary (default %(default)s)")

    parser.add_argument(
        '-c',
        '--calendar',
        metavar='URL',
        dest='calendar_urls',
        action='append',
        type=str,
        default=[
            "https://calendar.bitmark.one",
            "https://a.pool.opentimestamps.org",
            "https://b.pool.opentimestamps.org",
            "https://a.pool.eternitywall.com", "https://ots.btc.catallaxy.com"
        ],
        help=
        'Create timestamp with the aid of a remote calendar. May be specified multiple times. Default: %(default)r'
    )
    parser.add_argument(
        '-b',
        '--btc-wallet',
        dest='use_btc_wallet',
        action='store_true',
        help='Create timestamp locally with the local Bitcoin wallet.')
    parser.add_argument("gpgargs",
                        nargs=argparse.REMAINDER,
                        help='Arguments passed to GnuPG binary')

    parser.add_argument("--timeout",
                        type=int,
                        default=5,
                        help="Timeout before giving up on a calendar. "
                        "Default: %(default)d")

    parser.add_argument("-m",
                        type=int,
                        default="2",
                        help="Commitments are sent to remote calendars,"
                        "in the event of timeout the timestamp is considered "
                        "done if at least M calendars replied. "
                        "Default: %(default)s")

    parser.add_argument('--rehash-trees',
                        action='store_true',
                        help=argparse.SUPPRESS)

    args = otsclient.args.handle_common_options(parser.parse_args(), parser)

    logging.basicConfig(format='ots: %(message)s')

    args.verbosity = args.verbose - args.quiet
    if args.verbosity == 0:
        logging.root.setLevel(logging.INFO)
    elif args.verbosity > 0:
        logging.root.setLevel(logging.DEBUG)
    elif args.verbosity == -1:
        logging.root.setLevel(logging.WARNING)
    elif args.verbosity < -1:
        logging.root.setLevel(logging.ERROR)

    if len(args.gpgargs) == 0 or args.gpgargs[0] != '--':
        parser.error("You need to have '--' as the last argument; see docs")

    args.gpgargs = args.gpgargs[1:]

    parser = argparse.ArgumentParser()
    parser.add_argument("-bsau", action="store")
    parser.add_argument("--verify", action="store")
    gpgargs = parser.parse_known_args(args.gpgargs)[0]

    if gpgargs.bsau:
        with subprocess.Popen([args.gpg_program] + args.gpgargs,
                              stdin=subprocess.PIPE,
                              stdout=subprocess.PIPE) as gpg_proc:
            logging.debug("Reading Git commit")
            git_commit = sys.stdin.buffer.read()

            logging.debug("Git commit: %r" % git_commit)

            # FIXME: can this fail to write all bytes?
            n = gpg_proc.stdin.write(git_commit)
            logging.debug("Wrote %d bytes to GnuPG out of %d" %
                          (n, len(git_commit)))
            gpg_proc.stdin.close()

            gpg_sig = gpg_proc.stdout.read()

            # GnuPG produces no output on failure
            if not gpg_sig:
                sys.exit(1)

            logging.debug("PGP sig: %r" % gpg_sig)

            # Timestamp the commit and tag together
            signed_commit_timestamp = Timestamp(
                hash_signed_commit(git_commit, gpg_sig))
            final_timestamp = signed_commit_timestamp

            # with git tree rehashing
            minor_version = 1

            # CWD will be the git repo, so this should get us the right one
            repo = git.Repo()

            hextree_start = None
            if git_commit.startswith(b'tree '):
                hextree_start = 5
            elif git_commit.startswith(b'object '):
                # I believe this is always a git tag
                hextree_start = 7
            else:
                raise AssertionError("Don't know what to do with %r" %
                                     git_commit)

            hextree = git_commit[hextree_start:hextree_start + 20 * 2].decode()
            tree = repo.tree(hextree)
            tree.path = ''

            tree_stamper = GitTreeTimestamper(tree)

            final_timestamp = signed_commit_timestamp.ops.add(
                OpAppend(tree_stamper.timestamp.msg)).ops.add(OpSHA256())

            otsclient.cmds.create_timestamp(final_timestamp,
                                            args.calendar_urls, args)

            if args.wait:
                # Interpreted as override by the upgrade command
                # FIXME: need to clean this bad abstraction up!
                args.calendar_urls = []
                otsclient.cmds.upgrade_timestamp(signed_commit_timestamp, args)

            sys.stdout.buffer.write(gpg_sig)
            write_ascii_armored(signed_commit_timestamp, sys.stdout.buffer,
                                minor_version)

    elif gpgargs.verify:
        # Verify
        with open(gpgargs.verify, 'rb') as gpg_sig_fd:
            gpg_sig = gpg_sig_fd.read()
            git_commit = sys.stdin.buffer.read()

            (major_version, minor_version,
             timestamp) = deserialize_ascii_armored_timestamp(
                 git_commit, gpg_sig)
            if timestamp is None:
                print("OpenTimestamps: No timestamp found", file=sys.stderr)
            else:
                good = otsclient.cmds.verify_timestamp(timestamp, args)

                if good:
                    logging.info("Good timestamp")
                else:
                    logging.warning("Could not verify timestamp!")
            sys.stderr.flush()

            logging.debug("Running GnuPG binary: %r" %
                          ([args.gpg_program] + args.gpgargs))
            with subprocess.Popen([args.gpg_program] + args.gpgargs,
                                  stdin=subprocess.PIPE) as gpg_proc:
                gpg_proc.stdin.write(git_commit)
                gpg_proc.stdin.close()
Esempio n. 12
0
    def __do_bitcoin(self):
        """Do Bitcoin-related maintenance"""

        # FIXME: we shouldn't have to create a new proxy each time, but with
        # current python-bitcoinlib and the RPC implementation it seems that
        # the proxy connection can timeout w/o recovering properly.
        proxy = bitcoin.rpc.Proxy()

        new_blocks = self.known_blocks.update_from_proxy(proxy)

        for (block_height, block_hash) in new_blocks:
            logging.info("New block %s at height %d" % (b2lx(block_hash), block_height))

            # Save commitments to disk that have reached min_confirmations
            confirmed_tx = self.txs_waiting_for_confirmation.pop(block_height - self.min_confirmations + 1, None)
            if confirmed_tx is not None:
                self.__save_confirmed_timestamp_tx(confirmed_tx)

            # If there already are txs waiting for confirmation at this
            # block_height, there was a reorg and those pending commitments now
            # need to be added back to the pool
            reorged_tx = self.txs_waiting_for_confirmation.pop(block_height, None)
            if reorged_tx is not None:
                # FIXME: the reorged transaction might get mined in another
                # block, so just adding the commitments for it back to the pool
                # isn't ideal, but it is safe
                logging.info('tx %s at height %d removed by reorg, adding %d commitments back to pending' % (b2lx(reorged_tx.tx.GetHash()), block_height, len(reorged_tx.commitment_timestamps)))
                for reorged_commitment_timestamp in reorged_tx.commitment_timestamps:
                    self.pending_commitments.add(reorged_commitment_timestamp.msg)

            # Check if this block contains any of the pending transactions

            try:
                block = proxy.getblock(block_hash)
            except KeyError:
                # Must have been a reorg or something, return
                logging.error("Failed to get block")
                return

            # Check all potential pending txs against this block.
            for tx in self.unconfirmed_txs:
                block_timestamp = make_timestamp_from_block(tx.tip_timestamp.msg, block, block_height)

                if block_timestamp is None:
                    continue

                # Success!
                tx.tip_timestamp.merge(block_timestamp)

                for commitment_timestamp in tx.commitment_timestamps:
                    self.pending_commitments.remove(commitment_timestamp.msg)
                    logging.debug("Removed commitment %s from pending" % b2x(commitment_timestamp.msg))

                assert self.min_confirmations > 1
                logging.info("Success! %d commitments timestamped, now waiting for %d more confirmations" %
                             (len(tx.commitment_timestamps), self.min_confirmations - 1))

                # Add pending_tx to the list of timestamp transactions that
                # have been mined, and are waiting for confirmations.
                self.txs_waiting_for_confirmation[block_height] = tx

                # Since all unconfirmed txs conflict with each other, we can clear the entire lot
                self.unconfirmed_txs.clear()

                # And finally, we can reset the last time a timestamp
                # transaction was mined to right now.
                self.last_timestamp_tx = time.time()


        time_to_next_tx = int(self.last_timestamp_tx + self.min_tx_interval - time.time())
        if time_to_next_tx > 0:
            # Minimum interval between transactions hasn't been reached, so do nothing
            logging.debug("Waiting %ds before next tx" % time_to_next_tx)
            return

        prev_tx = None
        if self.pending_commitments and not self.unconfirmed_txs:
            # Find the biggest unspent output that's confirmed
            unspent = find_unspent(proxy)

            if not len(unspent):
                logging.error("Can't timestamp; no spendable outputs")
                return

            # For the change scriptPubKey, we can save a few bytes by using
            # a pay-to-pubkey rather than the usual pay-to-pubkeyhash
            change_addr = proxy.getnewaddress()
            change_pubkey = proxy.validateaddress(change_addr)['pubkey']
            change_scriptPubKey = CScript([change_pubkey, OP_CHECKSIG])

            prev_tx = self.__create_new_timestamp_tx_template(unspent[-1]['outpoint'], unspent[-1]['amount'], change_scriptPubKey)

            logging.debug('New timestamp tx, spending output %r, value %s' % (unspent[-1]['outpoint'], str_money_value(unspent[-1]['amount'])))

        elif self.unconfirmed_txs:
            (prev_tx, prev_tip_timestamp, prev_commitment_timestamps) = self.unconfirmed_txs[-1]

        # Send the first transaction even if we don't have a new block
        if prev_tx and (new_blocks or not self.unconfirmed_txs):
            # Update the most recent timestamp transaction with new commitments
            commitment_timestamps = [Timestamp(commitment) for commitment in self.pending_commitments]

            # Remember that commitment_timestamps contains raw commitments,
            # which are longer than necessary, so we sha256 them before passing
            # them to make_merkle_tree, which concatenates whatever it gets (or
            # for the matter, returns what it gets if there's only one item for
            # the tree!)
            commitment_digest_timestamps = [stamp.ops.add(OpSHA256()) for stamp in commitment_timestamps]

            tip_timestamp = make_merkle_tree(commitment_digest_timestamps)

            sent_tx = None
            relay_feerate = self.relay_feerate
            while sent_tx is None:
                unsigned_tx = self.__update_timestamp_tx(prev_tx, tip_timestamp.msg,
                                                         proxy.getblockcount(), relay_feerate)

                fee = _get_tx_fee(unsigned_tx, proxy)
                if fee is None:
                    logging.debug("Can't determine txfee of transaction; skipping")
                    return
                if fee > self.max_fee:
                    logging.error("Maximum txfee reached!")
                    return

                r = proxy.signrawtransaction(unsigned_tx)
                if not r['complete']:
                    logging.error("Failed to sign transaction! r = %r" % r)
                    return
                signed_tx = r['tx']

                try:
                    txid = proxy.sendrawtransaction(signed_tx)
                except bitcoin.rpc.JSONRPCError as err:
                    if err.error['code'] == -26:
                        logging.debug("Err: %r" % err.error)
                        # Insufficient priority - basically means we didn't
                        # pay enough, so try again with a higher feerate
                        relay_feerate *= 2
                        continue

                    else:
                        raise err  # something else, fail!

                sent_tx = signed_tx

            if self.unconfirmed_txs:
                logging.info("Sent timestamp tx %s, replacing %s; %d total commitments" % (b2lx(sent_tx.GetHash()), b2lx(prev_tx.GetHash()), len(commitment_timestamps)))
            else:
                logging.info("Sent timestamp tx %s; %d total commitments" % (b2lx(sent_tx.GetHash()), len(commitment_timestamps)))

            self.unconfirmed_txs.append(TimestampTx(sent_tx, tip_timestamp, commitment_timestamps))
Esempio n. 13
0
def nonce_timestamp(private_timestamp, crypt_op=OpSHA256(), length=16):
    """Create a nonced version of a timestamp for privacy"""
    stamp2 = private_timestamp.ops.add(OpAppend(os.urandom(length)))
    return stamp2.ops.add(crypt_op)