コード例 #1
0
def run_test(name, pairs):

    logger.debug('testing %s' % name)

    def _dec(x):
        if is_string(x) and x.startswith(b'0x'):
            return decode_hex(x[2:])
        return x

    pairs['in'] = [(_dec(k), _dec(v)) for k, v in pairs['in']]
    deletes = [(k, v) for k, v in pairs['in'] if v is None]

    N_PERMUTATIONS = 1000
    for i, permut in enumerate(itertools.permutations(pairs['in'])):
        if i > N_PERMUTATIONS:
            break
        t = trie.Trie(db.EphemDB())
        for k, v in permut:
            #logger.debug('updating with (%s, %s)' %(k, v))
            if v is not None:
                t.update(to_string(k), to_string(v))
            else:
                t.delete(to_string(k))
        # make sure we have deletes at the end
        for k, v in deletes:
            t.delete(to_string(k))
        if pairs['root'] != '0x' + encode_hex(t.root_hash):
            raise Exception("Mismatch: %r %r %r %r" % (
                name, pairs['root'], '0x' + encode_hex(t.root_hash), (i, list(permut) + deletes)))
コード例 #2
0
ファイル: refcount_db.py プロジェクト: biblecoin/pybiblecoin
 def commit_refcount_changes(self, epoch):
     # Save death row nodes
     timeout_epoch = epoch + self.ttl
     try:
         death_row_nodes = rlp.decode(
             self.db.get('deathrow:' + utils.to_string(timeout_epoch)))
     except BaseException:
         death_row_nodes = []
     for nodekey in self.death_row:
         refcount, val = rlp.decode(self.db.get(b'r:' + nodekey))
         if refcount == ZERO_ENCODED:
             new_refcount = utils.encode_int(DEATH_ROW_OFFSET +
                                             timeout_epoch)
             self.db.put(b'r:' + nodekey, rlp.encode([new_refcount, val]))
     if len(self.death_row) > 0:
         sys.stderr.write('%d nodes marked for pruning during block %d\n' %
                          (len(self.death_row), timeout_epoch))
     death_row_nodes.extend(self.death_row)
     self.death_row = []
     self.db.put(b'deathrow:' + utils.to_string(timeout_epoch),
                 rlp.encode(death_row_nodes))
     # Save journal
     try:
         journal = rlp.decode(
             self.db.get(b'journal:' + utils.to_string(epoch)))
     except BaseException:
         journal = []
     journal.extend(self.journal)
     self.journal = []
     self.db.put(b'journal:' + utils.to_string(epoch), rlp.encode(journal))
コード例 #3
0
 def reorganize_head_to(self, block):
     log.info('Replacing head')
     b = block
     new_chain = {}
     while b.header.number >= int(self.db.get(b'GENESIS_NUMBER')):
         new_chain[b.header.number] = b
         key = b'block:' + to_string(b.header.number)
         orig_at_height = self.db.get(key) if key in self.db else None
         if orig_at_height == b.header.hash:
             break
         if b.prevhash not in self.db or self.db.get(
                 b.prevhash) == b'GENESIS':
             break
         b = self.get_parent(b)
     replace_from = b.header.number
     for i in itertools.count(replace_from):
         log.info('Rewriting height %d' % i)
         key = b'block:' + to_string(i)
         orig_at_height = self.db.get(key) if key in self.db else None
         if orig_at_height:
             self.db.delete(key)
             orig_block_at_height = self.get_block(orig_at_height)
             for tx in orig_block_at_height.transactions:
                 if b'txindex:' + tx.hash in self.db:
                     self.db.delete(b'txindex:' + tx.hash)
         if i in new_chain:
             new_block_at_height = new_chain[i]
             self.db.put(key, new_block_at_height.header.hash)
             for i, tx in enumerate(new_block_at_height.transactions):
                 self.db.put(b'txindex:' + tx.hash,
                             rlp.encode([new_block_at_height.number, i]))
         if i not in new_chain and not orig_at_height:
             break
     self.head_hash = block.header.hash
     self.state = self.mk_poststate_of_blockhash(block.hash)
コード例 #4
0
ファイル: pruning_trie.py プロジェクト: biblecoin/pybiblecoin
    def _iter_branch(self, node):
        """yield (key, value) stored in this and the descendant nodes
        :param node: node in form of list, or BLANK_NODE

        .. note::
            Here key is in full form, rather than key of the individual node
        """
        if node == BLANK_NODE:
            raise StopIteration

        node_type = self._get_node_type(node)

        if is_key_value_type(node_type):
            nibbles = without_terminator(unpack_to_nibbles(node[0]))
            key = b'+'.join([to_string(x) for x in nibbles])
            if node_type == NODE_TYPE_EXTENSION:
                sub_tree = self._iter_branch(self._decode_to_node(node[1]))
            else:
                sub_tree = [(to_string(NIBBLE_TERMINATOR), node[1])]

            # prepend key of this node to the keys of children
            for sub_key, sub_value in sub_tree:
                full_key = (key + b'+' + sub_key).strip(b'+')
                yield (full_key, sub_value)

        elif node_type == NODE_TYPE_BRANCH:
            for i in range(16):
                sub_tree = self._iter_branch(self._decode_to_node(node[i]))
                for sub_key, sub_value in sub_tree:
                    full_key = (str_to_bytes(str(i)) + b'+' +
                                sub_key).strip(b'+')
                    yield (full_key, sub_value)
            if node[16]:
                yield (to_string(NIBBLE_TERMINATOR), node[-1])
コード例 #5
0
def test_basic_pruning():
    db = RefcountDB(EphemDB())
    NODES = 60

    t = pruning_trie.Trie(db)
    db.ttl = 0
    db.logging = True

    for i in range(NODES):
        t.update(to_string(i), to_string(i))
        db.commit_refcount_changes(0)
        db.cleanup(0)
        check_db_tightness([t], db)
    for i in range(NODES):
        t.update(to_string(i), to_string(i**3))
        db.commit_refcount_changes(0)
        db.cleanup(0)
        check_db_tightness([t], db)
    for i in range(NODES):
        t.delete(to_string(i))
        db.commit_refcount_changes(0)
        db.cleanup(0)
        check_db_tightness([t], db)
    assert len(t.to_dict()) == 0
    assert len(db.kv) == 0
コード例 #6
0
 def run():
     st = time.time()
     x = trie.Trie(db.EphemDB())
     for i in range(10000):
         x.update(to_string(i), to_string(i**3))
     print('elapsed', time.time() - st)
     return x.root_hash
コード例 #7
0
ファイル: refcount_db.py プロジェクト: biblecoin/pybiblecoin
 def cleanup(self, epoch):
     try:
         death_row_node = self.db.get(b'deathrow:' + utils.to_string(epoch))
     except BaseException:
         death_row_node = rlp.encode([])
     death_row_nodes = rlp.decode(death_row_node)
     pruned = 0
     for nodekey in death_row_nodes:
         try:
             refcount, val = rlp.decode(self.db.get(b'r:' + nodekey))
             if utils.decode_int(refcount) == DEATH_ROW_OFFSET + epoch:
                 self.db.delete(b'r:' + nodekey)
                 pruned += 1
         except BaseException:
             pass
     sys.stderr.write('%d nodes successfully pruned\n' % pruned)
     # Delete the deathrow after processing it
     try:
         self.db.delete(b'deathrow:' + utils.to_string(epoch))
     except BaseException:
         pass
     # Delete journals that are too old
     try:
         self.db.delete(b'journal:' + utils.to_string(epoch - self.ttl))
     except BaseException:
         pass
コード例 #8
0
def initialize_genesis_keys(state, genesis):
    db = state.db
    db.put(b'GENESIS_NUMBER', to_string(genesis.header.number))
    db.put(b'GENESIS_HASH', to_string(genesis.header.hash))
    db.put(b'GENESIS_STATE', json.dumps(state.to_snapshot()))
    db.put(b'GENESIS_RLP', rlp.encode(genesis))
    db.put(b'block:0', genesis.header.hash)
    db.put(b'score:' + genesis.header.hash, "0")
    db.put(b'state:' + genesis.header.hash, state.trie.root_hash)
    db.put(genesis.header.hash, b'GENESIS')
    db.commit()
コード例 #9
0
def test_clear():
    db = RefcountDB(EphemDB())
    NODES = 60
    t = pruning_trie.Trie(db)
    db.ttl = 0
    for i in range(NODES):
        t.update(to_string(i), to_string(i))
        db.commit_refcount_changes(i)
        db.cleanup(i)
    t.clear_all()
    db.commit_refcount_changes(NODES)
    db.cleanup(NODES)
    assert len(db.kv) == 0
コード例 #10
0
def run_test(name):

    logger.debug('testing %s' % name)
    t = trie.Trie(EphemDB())
    data = load_tests()[name]

    for k in data['in']:
        logger.debug('updating with (%s, %s)' % (k, k))
        k = to_string(k)
        t.update(k, k)
    for point, prev, nxt in data['tests']:
        assert to_string(nxt) == (t.next(point) or b'')
        assert to_string(prev) == (t.prev(point) or b'')
コード例 #11
0
def test_two_trees():
    db = RefcountDB(EphemDB())
    NODES = 60
    t1 = pruning_trie.Trie(db)
    t2 = pruning_trie.Trie(db)
    db.ttl = 0
    for i in range(NODES):
        t1.update(to_string(i), to_string(i))
        if i < NODES // 2:
            t2.update(to_string(i), to_string(i))
        db.commit_refcount_changes(i)
        db.cleanup(i)
        check_db_tightness([t1, t2], db)
    for i in range(NODES):
        sys.stderr.write('clearing: %d\n' % i)
        t1.delete(to_string(NODES - 1 - i))
        db.commit_refcount_changes(NODES + i)
        db.cleanup(NODES + i)
        check_db_tightness([t1, t2], db)
    assert t2.to_dict() == {
        to_string(i): to_string(i)
        for i in range(NODES // 2)
    }
    for i in range(NODES // 2):
        t2.delete(to_string(i))
        db.commit_refcount_changes(NODES * 2 + i)
        db.cleanup(NODES * 2 + i)
        check_db_tightness([t1, t2], db)
    assert len(db.kv) == 0
コード例 #12
0
def compile_code(sourcecode,
                 libraries=None,
                 combined='bin,abi',
                 optimize=True,
                 extra_args=None):
    args = solc_arguments(libraries=libraries,
                          combined=combined,
                          optimize=optimize,
                          extra_args=extra_args)
    compiler = get_compiler_path()
    if compiler is None:
        raise SolcMissing("solc not found")
    args.insert(0, compiler)

    process = subprocess.Popen(args,
                               stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)
    stdoutdata, stderrdata = process.communicate(
        input=utils.to_string(sourcecode))

    if process.returncode != 0:
        raise CompileError(stderrdata)

    return solc_parse_output(stdoutdata)
コード例 #13
0
ファイル: refcount_db.py プロジェクト: biblecoin/pybiblecoin
 def revert_refcount_changes(self, epoch):
     timeout_epoch = epoch + self.ttl
     # Delete death row additions
     try:
         self.db.delete(b'deathrow:' + utils.to_string(timeout_epoch))
     except BaseException:
         pass
     # Revert journal changes
     try:
         journal = rlp.decode(
             self.db.get(b'journal:' + utils.to_string(epoch)))
         for new_refcount, hashkey in journal[::-1]:
             node_object = rlp.decode(self.db.get(b'r:' + hashkey))
             self.db.put(b'r:' + hashkey,
                         rlp.encode([new_refcount, node_object[1]]))
     except BaseException:
         pass
コード例 #14
0
def test_two_trees_with_clear():
    db = RefcountDB(EphemDB())
    NODES = 60
    t1 = pruning_trie.Trie(db)
    t2 = pruning_trie.Trie(db)
    db.ttl = NODES // 4
    for i in range(NODES):
        t1.update(to_string(i), to_string(i))
        if i < NODES // 2:
            t2.update(to_string(i), to_string(i))
        db.commit_refcount_changes(i)
        db.cleanup(i)
    t1.clear_all()
    db.cleanup(NODES)
    assert t2.to_dict() == {
        to_string(i): to_string(i)
        for i in range(NODES // 2)
    }
    for i in range(NODES // 2):
        t2.delete(to_string(i))
        db.commit_refcount_changes(NODES + i)
        db.cleanup(NODES + i)
    for i in range(NODES // 4):
        db.cleanup(NODES + NODES // 2 + i)
    assert len(db.kv) == 0
コード例 #15
0
 def add_block_to_head(self, block):
     log.info('Adding to head', head=encode_hex(block.header.prevhash))
     apply_block(self.state, block)
     self.db.put(b'block:' + to_string(block.header.number),
                 block.header.hash)
     self.get_pow_difficulty(block)  # side effect: put 'score:' cache in db
     self.head_hash = block.header.hash
     for i, tx in enumerate(block.transactions):
         self.db.put(b'txindex:' + tx.hash, rlp.encode([block.number, i]))
コード例 #16
0
def test_insert_delete():
    for a in (5, 15, 60):
        db = RefcountDB(EphemDB())
        NODES = a
        t1 = pruning_trie.Trie(db)
        db.ttl = 0
        db.logging = True
        for i in range(NODES):
            t1.update(to_string(i), to_string(i))
            db.commit_refcount_changes(i)
            db.cleanup(i)
            check_db_tightness([t1], db)
        for i in range(NODES):
            t1.delete(to_string(NODES - 1 - i))
            db.commit_refcount_changes(NODES + i)
            db.cleanup(NODES + i)
            check_db_tightness([t1], db)
        assert len(db.kv) == 0
コード例 #17
0
    def update(self, key, value):
        """
        :param key: a string
        :value: a string
        """
        if not is_string(key):
            raise Exception("Key must be string")

        # if len(key) > 32:
        #     raise Exception("Max key length is 32")

        if not is_string(value):
            raise Exception("Value must be string")

        # if value == '':
        #     return self.delete(key)
        self.root_node = self._update_and_delete_storage(
            self.root_node, bin_to_nibbles(to_string(key)), to_string(value))
        self._update_root_hash()
コード例 #18
0
def test_trie_transfer():
    db = RefcountDB(EphemDB())
    NODES = 60
    t1 = pruning_trie.Trie(db)
    db.ttl = NODES * 2
    for i in range(NODES):
        t1.update(to_string(i), to_string(i))
        db.commit_refcount_changes(i)
        db.cleanup(i)
    t2 = pruning_trie.Trie(db)
    t2.root_hash = t1.root_hash
    assert t2.to_dict() == {to_string(i): to_string(i) for i in range(NODES)}
    for i in range(NODES):
        t2.delete(to_string(i))
        db.commit_refcount_changes(NODES + i)
        db.cleanup(NODES + i)
    for i in range(NODES * 2):
        db.cleanup(2 * NODES + i)
    assert len(db.kv) == 0
コード例 #19
0
ファイル: pruning_trie.py プロジェクト: biblecoin/pybiblecoin
    def _to_dict(self, node):
        """convert (key, value) stored in this and the descendant nodes
        to dict items.

        :param node: node in form of list, or BLANK_NODE

        .. note::

            Here key is in full form, rather than key of the individual node
        """
        if node == BLANK_NODE:
            return {}

        node_type = self._get_node_type(node)

        if is_key_value_type(node_type):
            nibbles = without_terminator(unpack_to_nibbles(node[0]))
            key = b'+'.join([to_string(x) for x in nibbles])
            if node_type == NODE_TYPE_EXTENSION:
                sub_dict = self._to_dict(self._decode_to_node(node[1]))
            else:
                sub_dict = {to_string(NIBBLE_TERMINATOR): node[1]}

            # prepend key of this node to the keys of children
            res = {}
            for sub_key, sub_value in sub_dict.items():
                full_key = (key + b'+' + sub_key).strip(b'+')
                res[full_key] = sub_value
            return res

        elif node_type == NODE_TYPE_BRANCH:
            res = {}
            for i in range(16):
                sub_dict = self._to_dict(self._decode_to_node(node[i]))

                for sub_key, sub_value in sub_dict.items():
                    full_key = (str_to_bytes(str(i)) + b'+' +
                                sub_key).strip(b'+')
                    res[full_key] = sub_value

            if node[16]:
                res[to_string(NIBBLE_TERMINATOR)] = node[-1]
            return res
コード例 #20
0
def vm_trace(ext, msg, compustate, opcode, pushcache, tracer=log_vm_op):
    """
    This diverges from normal logging, as we use the logging namespace
    only to decide which features get logged in 'bible.vm.op'
    i.e. tracing can not be activated by activating a sub
    like 'bible.vm.op.stack'
    """

    op, in_args, out_args, fee = opcodes.opcodes[opcode]

    trace_data = {}
    trace_data['stack'] = list(map(to_string, list(compustate.prev_stack)))
    if compustate.prev_prev_op in ('MLOAD', 'MSTORE', 'MSTORE8', 'SHA3', 'CALL',
                   'CALLCODE', 'CREATE', 'CALLDATACOPY', 'CODECOPY',
                   'EXTCODECOPY'):
        if len(compustate.prev_memory) < 4096:
            trace_data['memory'] = \
                ''.join([encode_hex(ascii_chr(x)) for x
                          in compustate.prev_memory])
        else:
            trace_data['sha3memory'] = \
                encode_hex(utils.sha3(b''.join([ascii_chr(x) for
                                      x in compustate.prev_memory])))
    if compustate.prev_prev_op in ('SSTORE',) or compustate.steps == 0:
        trace_data['storage'] = ext.log_storage(msg.to)
    trace_data['gas'] = to_string(compustate.prev_gas)
    trace_data['gas_cost'] = to_string(compustate.prev_gas - compustate.gas)
    trace_data['fee'] = fee
    trace_data['inst'] = opcode
    trace_data['pc'] = to_string(compustate.prev_pc)
    if compustate.steps == 0:
        trace_data['depth'] = msg.depth
        trace_data['address'] = msg.to
    trace_data['steps'] = compustate.steps
    trace_data['depth'] = msg.depth
    if op[:4] == 'PUSH':
        print(repr(pushcache))
        trace_data['pushvalue'] = pushcache[compustate.prev_pc]
    tracer.trace('vm', op=op, **trace_data)
    compustate.steps += 1
    compustate.prev_prev_op = op
コード例 #21
0
def test_revert_deletes():
    db = RefcountDB(EphemDB())
    NODES = 60
    t1 = pruning_trie.Trie(db)
    db.ttl = NODES * 2
    for i in range(NODES):
        t1.update(to_string(i), to_string(i))
        db.commit_refcount_changes(i)
        db.cleanup(i)
    x = t1.root_hash
    for i in range(NODES):
        t1.delete(to_string(i))
        db.commit_refcount_changes(NODES + i)
        db.cleanup(NODES + i)
    for i in range(NODES * 2 - 1, NODES - 1, -1):
        db.revert_refcount_changes(i)
    for i in range(NODES * 2):
        db.cleanup(NODES + i)
        db.revert_refcount_changes(i)
    t1.root_hash = x
    assert t1.to_dict() == {to_string(i): to_string(i) for i in range(NODES)}
コード例 #22
0
    def decode_event(self, log_topics, log_data):
        """ Return a dictionary representation the log.

        Note:
            This function won't work with anonymous events.

        Args:
            log_topics (List[bin]): The log's indexed arguments.
            log_data (bin): The encoded non-indexed arguments.
        """
        # https://github.com/biblecoin/wiki/wiki/Biblecoin-Contract-ABI#function-selector-and-argument-encoding

        # topics[0]: keccak(EVENT_NAME+"("+EVENT_ARGS.map(canonical_type_of).join(",")+")")
        # If the event is declared as anonymous the topics[0] is not generated;
        if not len(log_topics) or log_topics[0] not in self.event_data:
            raise ValueError('Unknown log type')

        event_id_ = log_topics[0]

        event = self.event_data[event_id_]

        # data: abi_serialise(EVENT_NON_INDEXED_ARGS)
        # EVENT_NON_INDEXED_ARGS is the series of EVENT_ARGS that are not
        # indexed, abi_serialise is the ABI serialisation function used for
        # returning a series of typed values from a function.
        unindexed_types = [
            type_
            for type_, indexed in zip(event['types'], event['indexed'])
            if not indexed
        ]
        unindexed_args = decode_abi(unindexed_types, log_data)

        # topics[n]: EVENT_INDEXED_ARGS[n - 1]
        # EVENT_INDEXED_ARGS is the series of EVENT_ARGS that are indexed
        indexed_count = 1  # skip topics[0]

        result = {}
        for name, type_, indexed in zip(
                event['names'], event['types'], event['indexed']):
            if indexed:
                topic_bytes = utils.zpad(
                    utils.encode_int(log_topics[indexed_count]),
                    32,
                )
                indexed_count += 1
                value = decode_single(process_type(type_), topic_bytes)
            else:
                value = unindexed_args.pop(0)

            result[name] = value
        result['_event_type'] = utils.to_string(event['name'])

        return result
コード例 #23
0
def test_delayed_pruning():
    NODES = 60
    db = RefcountDB(EphemDB())
    t = pruning_trie.Trie(db)
    db.ttl = NODES // 4
    for i in range(NODES):
        t.update(to_string(i), to_string(i))
        db.commit_refcount_changes(i)
        db.cleanup(i)
    for i in range(NODES):
        t.update(to_string(i), to_string(i**3))
        db.commit_refcount_changes(i + NODES)
        db.cleanup(i + NODES)
    for i in range(NODES):
        t.delete(to_string(i))
        db.commit_refcount_changes(i + NODES * 2)
        db.cleanup(i + NODES * 2)
    for i in range(NODES // 4):
        db.cleanup(i + NODES * 3)
    assert len(t.to_dict()) == 0
    assert len(db.kv) == 0
コード例 #24
0
    def delete(self, key):
        """
        :param key: a string with length of [0, 32]
        """
        if not is_string(key):
            raise Exception("Key must be string")

        if len(key) > 32:
            raise Exception("Max key length is 32")

        self.root_node = self._delete_and_delete_storage(
            self.root_node, bin_to_nibbles(to_string(key)))
        self._update_root_hash()
コード例 #25
0
def test_revert_adds():
    db = RefcountDB(EphemDB())
    NODES = 60
    t1 = pruning_trie.Trie(db)
    t2 = pruning_trie.Trie(db)
    db.ttl = NODES * 2
    for i in range(NODES):
        t1.update(to_string(i), to_string(i))
        db.commit_refcount_changes(i)
        db.cleanup(i)
    for i in range(NODES):
        t2.update(to_string(i), to_string(i))
        db.commit_refcount_changes(NODES + i)
        db.cleanup(NODES + i)
    for i in range(NODES * 2 - 1, NODES - 1, -1):
        db.revert_refcount_changes(i)
    for i in range(NODES):
        t1.delete(to_string(i))
        db.commit_refcount_changes(NODES + i)
        db.cleanup(NODES + i)
    for i in range(NODES * 2):
        db.cleanup(NODES * 2 + i)
    assert len(db.kv) == 0
コード例 #26
0
 def to_dict(self):
     """Serialize the header to a readable dictionary."""
     d = {}
     for field in ('prevhash', 'uncles_hash', 'extra_data', 'nonce',
                   'mixhash'):
         d[field] = '0x' + encode_hex(getattr(self, field))
     for field in ('state_root', 'tx_list_root', 'receipts_root',
                   'coinbase'):
         d[field] = encode_hex(getattr(self, field))
     for field in ('number', 'difficulty', 'gas_limit', 'gas_used',
                   'timestamp'):
         d[field] = utils.to_string(getattr(self, field))
     d['bloom'] = encode_hex(int256.serialize(self.bloom))
     assert len(d) == len(BlockHeader.fields)
     return d
コード例 #27
0
def decint(n, signed=False):  # pylint: disable=invalid-name,too-many-branches
    """ Decode an unsigned/signed integer. """

    if isinstance(n, str):
        n = utils.to_string(n)

    if n is True:
        return 1

    if n is False:
        return 0

    if n is None:
        return 0

    if is_numeric(n):
        if signed:
            if not -TT255 <= n <= TT255 - 1:
                raise EncodingError('Number out of range: %r' % n)
        else:
            if not 0 <= n <= TT256 - 1:
                raise EncodingError('Number out of range: %r' % n)

        return n

    if is_string(n):
        if len(n) > 32:
            raise EncodingError('String too long: %r' % n)

        if len(n) == 40:
            int_bigendian = decode_hex(n)
        else:
            int_bigendian = n  # pylint: disable=redefined-variable-type

        result = big_endian_to_int(int_bigendian)
        if signed:
            if result >= TT255:
                result -= TT256

            if not -TT255 <= result <= TT255 - 1:
                raise EncodingError('Number out of range: %r' % n)
        else:
            if not 0 <= result <= TT256 - 1:
                raise EncodingError('Number out of range: %r' % n)

        return result

    raise EncodingError('Cannot decode integer: %r' % n)
コード例 #28
0
def enc(typ, arg):
    base, sub, arrlist = typ
    type_size = get_size(typ)

    if base in ('string', 'bytes') and not sub:
        return encode_single(typ, arg)

    # Encode dynamic-sized lists via the head/tail mechanism described in
    # https://github.com/biblecoin/wiki/wiki/Proposal-for-new-ABI-value-encoding
    if type_size is None:
        assert isinstance(arg, list), \
            "Expecting a list argument"
        subtyp = base, sub, arrlist[:-1]
        subsize = get_size(subtyp)
        myhead, mytail = b'', b''
        if arrlist[-1] == []:
            myhead += enc(INT256, len(arg))
        else:
            assert len(arg) == arrlist[-1][0], \
                "Wrong array size: found %d, expecting %d" % \
                (len(arg), arrlist[-1][0])
        for i in range(len(arg)):
            if subsize is None:
                myhead += enc(INT256, 32 * len(arg) + len(mytail))
                mytail += enc(subtyp, arg[i])
            else:
                myhead += enc(subtyp, arg[i])
        return myhead + mytail
    # Encode static-sized lists via sequential packing
    else:
        if arrlist == []:
            return utils.to_string(encode_single(typ, arg))
        else:
            subtyp = base, sub, arrlist[:-1]
            o = b''
            assert len(arg) == arrlist[-1][0], "Incorrect array size"
            for x in arg:
                o += enc(subtyp, x)
            return o
コード例 #29
0
 def get_blockhash_by_number(self, number):
     try:
         return self.db.get(b'block:' + to_string(number))
     except:
         return None
コード例 #30
0
    def __init__(self,
                 genesis=None,
                 env=None,
                 coinbase=b'\x00' * 20,
                 new_head_cb=None,
                 reset_genesis=False,
                 localtime=None,
                 **kwargs):
        self.env = env or Env()
        # Initialize the state
        if b'head_hash' in self.db:  # new head tag
            self.state = self.mk_poststate_of_blockhash(
                self.db.get(b'head_hash'))
            print('Initializing chain from saved head, #%d (%s)' %
                  (self.state.prev_headers[0].number,
                   encode_hex(self.state.prev_headers[0].hash)))
        elif genesis is None:
            raise Exception("Need genesis decl!")
        elif isinstance(genesis, State):
            assert env is None
            self.state = genesis
            self.env = self.state.env
            print('Initializing chain from provided state')
        elif "extraData" in genesis:
            self.state = state_from_genesis_declaration(genesis, self.env)
            reset_genesis = True
            print('Initializing chain from provided genesis declaration')
        elif "prev_headers" in genesis:
            self.state = State.from_snapshot(genesis, self.env)
            reset_genesis = True
            print('Initializing chain from provided state snapshot, %d (%s)' %
                  (self.state.block_number,
                   encode_hex(self.state.prev_headers[0].hash[:8])))
        else:
            print('Initializing chain from new state based on alloc')
            self.state = mk_basic_state(
                genesis, {
                    "number":
                    kwargs.get('number', 0),
                    "gas_limit":
                    kwargs.get('gas_limit', 4712388),
                    "gas_used":
                    kwargs.get('gas_used', 0),
                    "timestamp":
                    kwargs.get('timestamp', 1467446877),
                    "difficulty":
                    kwargs.get('difficulty', 2**25),
                    "hash":
                    kwargs.get('prevhash', '00' * 32),
                    "uncles_hash":
                    kwargs.get('uncles_hash',
                               '0x' + encode_hex(BLANK_UNCLES_HASH))
                }, self.env)
            reset_genesis = True

        assert self.env.db == self.state.db

        initialize(self.state)
        self.new_head_cb = new_head_cb

        self.head_hash = self.state.prev_headers[0].hash
        self.checkpoint_head_hash = b'\x00' * 32
        self.db.put(b'cp_subtree_score' + b'\x00' * 32, 2 / 3.)
        self.commit_logs = []
        self.casper_address = self.config['CASPER_ADDRESS']
        self.db.put(b'GENESIS_NUMBER', to_string(self.state.block_number))
        assert self.state.block_number == self.state.prev_headers[0].number
        if reset_genesis:
            self.genesis = Block(self.state.prev_headers[0], [], [])
            initialize_genesis_keys(self.state, self.genesis)
        else:
            self.genesis = self.get_block_by_number(0)
        self.db.put(b'cp_subtree_score' + self.genesis.hash, 2 / 3.)
        self.min_gasprice = kwargs.get('min_gasprice', 5 * 10**9)
        self.coinbase = coinbase
        self.extra_data = 'moo ha ha says the laughing cow.'
        self.time_queue = []
        self.parent_queue = {}
        self.localtime = time.time() if localtime is None else localtime