def create_blocks(self): """ Create a block with valid transactions """ # create a bigchain instance b = Bigchain() stop = False while True: # read up to 1000 transactions validated_transactions = [] for i in range(1000): try: tx = self.q_tx_validated.get(timeout=5) except queue.Empty: break # poison pill if tx == 'stop': stop = True break validated_transactions.append(tx) # if there are no transactions skip block creation if validated_transactions: # create block block = b.create_block(validated_transactions) self.q_block.put(block) if stop: self.q_block.put('stop') return
class Election: def __init__(self): self.bigchain = Bigchain() def check_for_quorum(self, next_vote): """ Checks if block has enough invalid votes to make a decision """ next_block = r.table('bigchain')\ .get(next_vote['vote']['voting_for_block'])\ .run(self.bigchain.conn) if self.bigchain.block_election_status(next_block) == self.bigchain.BLOCK_INVALID: return next_block def requeue_transactions(self, invalid_block): """ Liquidates transactions from invalid blocks so they can be processed again """ logger.info('Rewriting %s transactions from invalid block %s', len(invalid_block['block']['transactions']), invalid_block['id']) for tx in invalid_block['block']['transactions']: self.bigchain.write_transaction(tx) return invalid_block
def validate_transactions(self): """ Checks if the incoming transactions are valid """ # create a bigchain instance b = Bigchain() while True: self.monitor.gauge('tx_queue_gauge', self.q_tx_to_validate.qsize(), rate=bigchaindb.config['statsd']['rate']) tx = self.q_tx_to_validate.get() # poison pill if tx == 'stop': self.q_tx_delete.put('stop') self.q_tx_validated.put('stop') return self.q_tx_delete.put(tx['id']) with self.monitor.timer('validate_transaction', rate=bigchaindb.config['statsd']['rate']): is_valid_transaction = b.is_valid_transaction(tx) if is_valid_transaction: self.q_tx_validated.put(tx)
def validate(self): """ Checks if incoming blocks are valid or not """ # create a bigchain instance. All processes should create their own bigchcain instance so that they all # have their own connection to the database b = Bigchain() logger.info('voter waiting for new blocks') while True: new_block = self.q_blocks_to_validate.get() # poison pill if new_block == 'stop': self.q_validated_block.put('stop') return logger.info('new_block arrived to voter') block_number = self.v_previous_block_number.value + 1 validity = b.is_valid_block(new_block) self.q_validated_block.put((new_block, self.v_previous_block_id.value.decode(), block_number, validity)) self.v_previous_block_id.value = new_block['id'].encode() self.v_previous_block_number.value = block_number
class Election: """Election class.""" def __init__(self): self.bigchain = Bigchain() def check_for_quorum(self, next_vote): """ Checks if block has enough invalid votes to make a decision Args: next_vote: The next vote. """ next_block = self.bigchain.get_block( next_vote['vote']['voting_for_block']) block_status = self.bigchain.block_election_status(next_block['id'], next_block['block']['voters']) if block_status == self.bigchain.BLOCK_INVALID: return Block.from_dict(next_block) def requeue_transactions(self, invalid_block): """ Liquidates transactions from invalid blocks so they can be processed again """ logger.info('Rewriting %s transactions from invalid block %s', len(invalid_block.transactions), invalid_block.id) for tx in invalid_block.transactions: self.bigchain.write_transaction(tx) return invalid_block
def get_changefeed(): """Create and return ordered changefeed of blocks starting from last voted block""" b = Bigchain() last_block_id = b.get_last_voted_block().id feed = backend.query.get_new_blocks_feed(b.connection, last_block_id) return Node(feed.__next__, name='changefeed')
class Election: """Election class.""" def __init__(self, events_queue=None): self.bigchain = Bigchain() self.events_queue = events_queue def check_for_quorum(self, next_vote): """Checks if block has enough invalid votes to make a decision Args: next_vote: The next vote. """ try: block_id = next_vote['vote']['voting_for_block'] node = next_vote['node_pubkey'] except KeyError: return next_block = self.bigchain.get_block(block_id) result = self.bigchain.block_election(next_block) self.handle_block_events(result, block_id) if result['status'] == self.bigchain.BLOCK_INVALID: return Block.from_dict(next_block) # Log the result if result['status'] != self.bigchain.BLOCK_UNDECIDED: msg = 'node:%s block:%s status:%s' % \ (node, block_id, result['status']) # Extra data can be accessed via the log formatter. # See logging.dictConfig. logger_results.debug(msg, extra={ 'current_vote': next_vote, 'election_result': result, }) def requeue_transactions(self, invalid_block): """Liquidates transactions from invalid blocks so they can be processed again """ logger.info('Rewriting %s transactions from invalid block %s', len(invalid_block.transactions), invalid_block.id) for tx in invalid_block.transactions: self.bigchain.write_transaction(tx) return invalid_block def handle_block_events(self, result, block_id): if self.events_queue: if result['status'] == self.bigchain.BLOCK_UNDECIDED: return elif result['status'] == self.bigchain.BLOCK_INVALID: event_type = EventTypes.BLOCK_INVALID elif result['status'] == self.bigchain.BLOCK_VALID: event_type = EventTypes.BLOCK_VALID event = Event(event_type, self.bigchain.get_block(block_id)) self.events_queue.put(event)
def create_invalid_tx(): """Create and return an invalid transaction. The transaction is invalid because it's missing the signature.""" b = Bigchain() tx = b.create_transaction(b.me, b.me, None, 'CREATE') return tx
def create_write_transaction(tx_left): b = Bigchain() while tx_left > 0: # use uuid to prevent duplicate transactions (transactions with the same hash) tx = b.create_transaction(b.me, b.me, None, 'CREATE', payload={'msg': str(uuid.uuid4())}) tx_signed = b.sign_transaction(tx, b.me_private) b.write_transaction(tx_signed) tx_left -= 1
def bootstrap(self): """ Before starting handling the new blocks received by the changefeed we need to handle unvoted blocks added to the bigchain while the process was down We also need to set the previous_block_id. """ b = Bigchain() last_voted = b.get_last_voted_block() self.v_previous_block_id.value = last_voted['id'].encode()
def mint_coin(user_pk): b = Bigchain() coin_id = uuid.uuid4() for i in range(10): payload = { 'coin_id': str(coin_id), 'coin_share': str(i) } tx = b.create_transaction(b.me, user_pk, None, 'CREATE', payload=payload) tx_signed = b.sign_transaction(tx, b.me_private) b.write_transaction(tx_signed) print('MINT {} {} {}'.format(tx['id'], coin_id, i))
def transfer_coin(user_vk, user_sk, coin_id): b = Bigchain() coins = get_owned_coins(user_vk) if coin_id in coins.keys(): for tx in coins[coin_id]: tx_input = {'txid': tx['id'], 'cid': 0} tx_transfer = b.create_transaction(user_vk, b.me, tx_input, 'TRANSFER', payload=tx['transaction']['data']['payload']) tx_transfer_signed = b.sign_transaction(tx_transfer, user_sk) b.write_transaction(tx_transfer_signed) print('TRANSFER {} {} {}'.format(tx_transfer_signed['id'], coin_id, tx['transaction']['data']['payload']['coin_share']))
def pay_royalties(label_vk, artist_vk, tx, label_share=7, artist_share=3): b = Bigchain() payload = tx['transaction']['data']['payload'] tx_input = {'txid': tx['id'], 'cid': 0} if int(payload['coin_share']) < artist_share: new_owner = artist_vk else: new_owner = label_vk tx_royalties = b.create_transaction(b.me, new_owner, tx_input, 'TRANSFER', payload=payload) tx_royalties_signed = b.sign_transaction(tx_royalties, b.me_private) b.write_transaction(tx_royalties_signed) print('ROYALTIES {} {} {} {}'.format(tx_royalties['id'], payload['coin_id'], payload['coin_share'], new_owner))
def requeue_transactions(self): """ Liquidates transactions from invalid blocks so they can be processed again """ while True: invalid_block = self.q_invalid_blocks.get() # poison pill if invalid_block == 'stop': logger.info('clean exit') return b = Bigchain() for tx in invalid_block['block']['transactions']: b.write_transaction(tx)
def write_blocks(self): """ Write blocks to the bigchain """ # create bigchain instance b = Bigchain() # Write blocks while True: block = self.q_block.get() # poison pill if block == 'stop': return b.write_block(block)
def check_for_quorum(self): """ Checks if block has enough invalid votes to make a decision """ b = Bigchain() while True: next_block = self.q_block_new_vote.get() # poison pill if next_block == 'stop': self.q_invalid_blocks.put('stop') logger.info('clean exit') return if b.block_election_status(next_block) == 'invalid': self.q_invalid_blocks.put(next_block)
def __init__(self): """Initialize the Block voter.""" # Since cannot share a connection to RethinkDB using multiprocessing, # we need to create a temporary instance of BigchainDB that we use # only to query RethinkDB self.consensus = BaseConsensusRules # This is the Bigchain instance that will be "shared" (aka: copied) # by all the subprocesses self.bigchain = Bigchain() self.last_voted_id = Bigchain().get_last_voted_block().id self.counters = Counter() self.validity = {} self.invalid_dummy_tx = Transaction.create([self.bigchain.me], [([self.bigchain.me], 1)])
def write_blocks(self): """ Write blocks to the bigchain """ # create bigchain instance b = Bigchain() # Write blocks while True: block = self.q_block.get() # poison pill if block == 'stop': return with self.monitor.timer('write_block'): b.write_block(block)
def delete_databases(dbnames=[]): b = Bigchain() for dbname in dbnames: logger.info('Dropping database: {}'.format(dbname)) try: r.db_drop(dbname).run(b.conn) except r.ReqlOpFailedError as e: logger.info(e.message)
def __init__(self, new_blocks): """ Create a new BlockStream instance. Args: new_block (queue): a queue of new blocks """ b = Bigchain() self.new_blocks = new_blocks # TODO: there might be duplicate blocks since we *first* get the changefeed and only *then* we query the # database to get the old blocks. # TODO how about a one liner, something like: # self.unvoted_blocks = b.get_unvoted_blocks() if not b.nodes_except_me else [] self.unvoted_blocks = [] if not b.nodes_except_me: self.unvoted_blocks = b.get_unvoted_blocks()
def get_owned_coin_shares_by_id(user_vk, coin_id): b = Bigchain() # get the coins coins = r.table('bigchain')\ .concat_map(lambda doc: doc['block']['transactions'])\ .filter(lambda tx: tx['transaction']['conditions']\ .contains(lambda c: c['new_owners'].contains(user_vk)))\ .group(r.row['transaction']['data']['payload']['coin_id']).run(b.conn) # make sure the coin was not already spent tmp_coins = deepcopy(coins) for coin_id, txs in tmp_coins.items(): for tx in txs: tx_input = {'txid': txs[0]['id'], 'cid': 0} if b.get_spent(tx_input): coins[coin_id].remove(tx) return coins.get(coin_id, [])
def __init__(self, new_blocks): """ Create a new BlockStream instance. Args: new_block (queue): a queue of new blocks """ b = Bigchain() self.new_blocks = new_blocks # TODO: there might be duplicate blocks since we *first* get the changefeed and only *then* we query the # database to get the old blocks. # TODO how about a one liner, something like: # self.unvoted_blocks = b.get_unvoted_blocks() if not b.federation_nodes else [] self.unvoted_blocks = [] if not b.federation_nodes: self.unvoted_blocks = b.get_unvoted_blocks()
def vote(self): """ Votes on the block based on the decision of the validation """ # create a bigchain instance b = Bigchain() while True: elem = self.q_validated_block.get() # poison pill if elem == 'stop': self.q_voted_block.put('stop') return validated_block, previous_block_id, block_number, decision = elem vote = b.vote(validated_block, previous_block_id, decision) self.q_voted_block.put((validated_block, vote, block_number))
def update_block(self): """ Appends the vote in the bigchain table """ # create a bigchain instance b = Bigchain() while True: elem = self.q_voted_block.get() # poison pill if elem == 'stop': logger.info('clean exit') return block, vote, block_number = elem logger.info('updating block %s with number %s and with vote %s', block['id'], block_number, vote) b.write_vote(block, vote, block_number)
def initial(): """Return old transactions from the backlog.""" bigchain = Bigchain() return bigchain.connection.run( r.table('backlog').between( [bigchain.me, r.minval], [bigchain.me, r.maxval], index='assignee__transaction_timestamp').order_by( index=r.asc('assignee__transaction_timestamp')))
def validate_transactions(self): """ Checks if the incoming transactions are valid """ # create a bigchain instance b = Bigchain() while True: tx = self.q_tx_to_validate.get() # poison pill if tx == 'stop': self.q_tx_delete.put('stop') self.q_tx_validated.put('stop') return self.q_tx_delete.put(tx['id']) if b.is_valid_transaction(tx): self.q_tx_validated.put(tx)
def update_block(self): """ Appends the vote in the bigchain table """ # create a bigchain instance b = Bigchain() while True: elem = self.q_voted_block.get() # poison pill if elem == 'stop': logger.info('clean exit') return block, vote = elem pretty_vote = 'valid' if vote['vote']['is_block_valid'] else 'invalid' logger.info('voting %s for block %s', pretty_vote, block['id']) b.write_vote(block, vote)
def __init__(self, timeout=5, backlog_reassign_delay=None): """Initialize StaleTransaction monitor Args: timeout: how often to check for stale tx (in sec) backlog_reassign_delay: How stale a transaction should be before reassignment (in sec). If supplied, overrides the Bigchain default value. """ self.bigchain = Bigchain(backlog_reassign_delay=backlog_reassign_delay) self.timeout = timeout
def validate_transactions(self): """ Checks if the incoming transactions are valid """ # create a bigchain instance b = Bigchain() while True: monitor.gauge('tx_queue_gauge', self.q_tx_to_validate.qsize(), rate=bigchaindb.config['statsd']['rate']) tx = self.q_tx_to_validate.get() # poison pill if tx == 'stop': self.q_tx_delete.put('stop') self.q_tx_validated.put('stop') return self.q_tx_delete.put(tx['id']) if b.is_valid_transaction(tx): self.q_tx_validated.put(tx)
def __init__(self): # you can switch the db by init the Bigchain(dbname=you_choose_dbname) self.read_mode = 'majority' self.durability = 'soft' self.bigchain = Bigchain() self.db = bigchaindb.config['database']['name'] self.tables = ['bigchain','votes','backlog'] self.prefix_index = None self.index = "timestamp" self.left_bound = "open" self.right_bound = "closed" self.limit = 100
def update_block(self): """ Appends the vote in the bigchain table """ # create a bigchain instance b = Bigchain() while True: elem = self.q_voted_block.get() # poison pill if elem == 'stop': logger.info('clean exit') return block, vote = elem pretty_vote = 'valid' if vote['vote'][ 'is_block_valid'] else 'invalid' logger.info('voting %s for block %s', pretty_vote, block['id']) b.write_vote(block, vote)
def create_app(debug=False): """Return an instance of the Flask application. Args: debug (bool): a flag to activate the debug mode for the app (default: False). """ app = Flask(__name__) app.debug = debug app.config['bigchain'] = Bigchain() app.register_blueprint(views.basic_views, url_prefix='/api/v1') return app
def initial(): """Return old transactions from the backlog.""" b = Bigchain() rs = r.table('backlog')\ .between([b.me, r.minval], [b.me, r.maxval], index='assignee__transaction_timestamp')\ .order_by(index=r.asc('assignee__transaction_timestamp'))\ .run(b.conn) return rs
def get_connector_accounts(db='interledger'): b = Bigchain() connector_accounts = [] accounts_db = retrieve_accounts(b, db) for name, accounts in groupby(sorted(accounts_db, key=lambda d: d['name']), key=lambda d: d['name']): accounts = list(accounts) if len(accounts) == 2: connector_accounts.append(tuple(accounts)) return connector_accounts
class StaleTransactionMonitor: """This class encapsulates the logic for re-assigning stale transactions. Note: Methods of this class will be executed in different processes. """ def __init__(self, timeout=5, backlog_reassign_delay=None): """Initialize StaleTransaction monitor Args: timeout: how often to check for stale tx (in sec) backlog_reassign_delay: How stale a transaction should be before reassignment (in sec). If supplied, overrides the Bigchain default value. """ self.bigchain = Bigchain(backlog_reassign_delay=backlog_reassign_delay) self.timeout = timeout def check_transactions(self): """Poll backlog for stale transactions Returns: txs (list): txs to be re assigned """ sleep(self.timeout) for tx in self.bigchain.get_stale_transactions(): yield tx def reassign_transactions(self, tx): """Put tx back in backlog with new assignee Returns: transaction """ # NOTE: Maybe this is to verbose? logger.info('Reassigning transaction with id %s', tx['id']) self.bigchain.reassign_transaction(tx) return tx
def test_get_outputs_filtered(filter_spent, filter_unspent): from bigchaindb.common.transaction import TransactionLink from bigchaindb.core import Bigchain with patch('bigchaindb.fastquery.FastQuery.get_outputs_by_public_key' ) as get_outputs: get_outputs.return_value = [ TransactionLink('a', 1), TransactionLink('b', 2) ] out = Bigchain().get_outputs_filtered('abc') get_outputs.assert_called_once_with('abc') filter_spent.assert_not_called() filter_unspent.assert_not_called() assert out == get_outputs.return_value
def run_reset_bigchaindb(args): # delete databases for ledger args.ledger or all b = Bigchain() # dbs do delete dbnames = [] if args.ledger: dbnames = ['bigchaindb_examples_{}'.format(args.ledger)] elif args.all: regex_db = re.compile(r'^(bigchaindb_examples_\d*$)') for dbname in r.db_list().run(b.conn): if regex_db.match(dbname): dbnames.append(dbname) delete_databases(dbnames)
def validate(self): """ Checks if incoming blocks are valid or not """ # create a bigchain instance. All processes should create their own bigchcain instance so that they all # have their own connection to the database b = Bigchain() logger.info('voter waiting for new blocks') # signal initialization complete self.initialized.set() while True: new_block = self.q_blocks_to_validate.get() # poison pill if new_block == 'stop': self.q_validated_block.put('stop') return logger.info('new_block arrived to voter') with self.monitor.timer('validate_block'): # FIXME: the following check is done also in `is_valid_block`, # but validity can be true even if the block has already # a vote. if b.has_previous_vote(new_block): continue validity = b.is_valid_block(new_block) self.q_validated_block.put((new_block, self.v_previous_block_id.value.decode(), validity)) self.v_previous_block_id.value = new_block['id'].encode()
def test_non_create_input_not_found(self, b, user_pk): from cryptoconditions import Ed25519Sha256 from bigchaindb.common.exceptions import InputDoesNotExist from bigchaindb.common.transaction import Input, TransactionLink from bigchaindb.models import Transaction from bigchaindb import Bigchain # Create an input for a non existing transaction input = Input(Ed25519Sha256(public_key=b58decode(user_pk)), [user_pk], TransactionLink('somethingsomething', 0)) tx = Transaction.transfer([input], [([user_pk], 1)], asset_id='mock_asset_link') with pytest.raises(InputDoesNotExist): tx.validate(Bigchain())
def __init__( self, host=None, port=None, dbname=None, pub_key=None, priv_key=None, keyring=[], consensus_plugin=None ): self.host = host self.port = port self.dbname = dbname self.conn = r.connect(host=host, port=port, db=dbname) self.bigchain = Bigchain( host=host, port=port, dbname=dbname, public_key=pub_key, private_key=priv_key, keyring=keyring, consensus_plugin=consensus_plugin, )
def __init__(self, table, operation, prefeed=None): """Create a new RethinkDB ChangeFeed. Args: table (str): name of the table to listen to for changes. operation (str): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or ChangeFeed.UPDATE. prefeed (iterable): whatever set of data you want to be published first. """ super().__init__(name='changefeed') self.prefeed = prefeed if prefeed else [] self.table = table self.operation = operation self.bigchain = Bigchain()
def map_bigchain(self): # listen to changes on the bigchain and redirect the changes # to the correct queues # create a bigchain instance b = Bigchain() for change in r.table('bigchain').changes().run(b.conn): # insert if change['old_val'] is None: self.q_new_block.put(change['new_val']) # delete elif change['new_val'] is None: # this should never happen in regular operation self.q_revert_delete.put(change['old_val'])
def __init__(self): """Initialize the Block voter.""" # Since cannot share a connection to RethinkDB using multiprocessing, # we need to create a temporary instance of BigchainDB that we use # only to query RethinkDB last_voted = Bigchain().get_last_voted_block() self.consensus = config_utils.load_consensus_plugin() # This is the Bigchain instance that will be "shared" (aka: copied) # by all the subprocesses self.bigchain = Bigchain() self.last_voted_id = last_voted['id'] self.counters = Counter() self.validity = {} self.invalid_dummy_tx = create_invalid_tx()
def __init__(self, table, operation, prefeed=None): """Create a new RethinkDB ChangeFeed. Args: table (str): name of the table to listen to for changes. operation (int): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or ChangeFeed.UPDATE. Combining multiple operation is possible using the bitwise ``|`` operator (e.g. ``ChangeFeed.INSERT | ChangeFeed.UPDATE``) prefeed (iterable): whatever set of data you want to be published first. """ super().__init__(name='changefeed') self.prefeed = prefeed if prefeed else [] self.table = table self.operation = operation self.bigchain = Bigchain()
def bootstrap(self): """ Get transactions from the backlog that may have been assigned to this while it was online (not listening to the changefeed) """ # create bigchain instance b = Bigchain() #inizializzo il client mongo client=pymongo.MongoClient() #indirizzo la collection backlog backlog=client.bigchain.backlog # create a queue to store initial results q_initial = mp.Queue() # get initial results #initial_results = r.table('backlog').between([b.me, r.minval], [b.me, r.maxval], index='assignee__transaction_timestamp'). #order_by(index=r.asc('assignee__transaction_timestamp')).run(b.conn) ''' Nel momento in cui avviene il bootstrap e quindi l'accensione del bigchaindb viene analizzata la backlog per rilevare tutte le transazioni non processate che sono state inserite in coda in attesa di essere processate mentre il bigchaindb non era online. Il porting da rethinkdb può avvenire con un semplice cursore che recupera tutti i documenti da backlog ordinati secondo l'indice stabilito in 'db/utils.py' ''' initial_results=[] for tx in backlog.find( { "$and":[ { "assignee": { "$lte": b.me } } , { "assignee": { "$gt": b.me } } ] } ).sort("assignee",pymongo.ASCENDING): initial_results.append(tx) # add results to the queue for result in initial_results: q_initial.put(result) q_initial.put('stop') return q_initial
def create_inputs(user_public_key, amount=1, b=None): # 1. create the genesis block b = b or Bigchain() try: b.create_genesis_block() except bigchaindb.core.GenesisBlockAlreadyExistsError: pass # 2. create block with transactions for `USER` to spend transactions = [] for i in range(amount): tx = b.create_transaction(b.me, user_public_key, None, 'CREATE') tx_signed = b.sign_transaction(tx, b.me_private) transactions.append(tx_signed) b.write_transaction(tx_signed) block = b.create_block(transactions) b.write_block(block, durability='hard') return block
def filter_by_assignee(self): """ Handle transactions that are assigned to me """ # create a bigchain instance b = Bigchain() while True: tx = self.q_new_transaction.get() # poison pill if tx == 'stop': self.q_tx_to_validate.put('stop') return if tx['assignee'] == b.me: tx.pop('assignee') self.q_tx_to_validate.put(tx)
def mint_coin(user_pk): b = Bigchain() coin_id = uuid.uuid4() for i in range(10): payload = {'coin_id': str(coin_id), 'coin_share': str(i)} tx = b.create_transaction(b.me, user_pk, None, 'CREATE', payload=payload) tx_signed = b.sign_transaction(tx, b.me_private) b.write_transaction(tx_signed) print('MINT {} {} {}'.format(tx['id'], coin_id, i))
def map_bigchain(self): # listen to changes on the bigchain and redirect the changes # to the correct queues # create a bigchain instance b = Bigchain() for change in r.table('bigchain').changes().run(b.conn): # insert if change['old_val'] is None: self.q_new_block.put(change['new_val']) # delete elif change['new_val'] is None: pass # update (new vote) elif change['new_val'] is not None and change['old_val'] is not None: self.q_block_new_vote.put(change['new_val'])
def map_backlog(self): # listen to changes on the backlog and redirect the changes # to the correct queues # create a bigchain instance b = Bigchain() for change in r.table('backlog').changes().run(b.conn): # insert if change['old_val'] is None: self.q_new_transaction.put(change['new_val']) # delete if change['new_val'] is None: pass # update if change['new_val'] is not None and change['old_val'] is not None: pass
def listen_payments(): import rethinkdb as r r.set_loop_type('tornado') print('Entering listen_payments') global play_until b = Bigchain() conn = yield b.conn feed = yield r.table('bigchain').changes().run(conn) while (yield feed.fetch_next()): block = yield feed.next() if not block['old_val']: coins = get_coin_from_block(block['new_val']) update_shares(block['new_val']) if coins: print('received coins') if time.time() > play_until: play_until = time.time() + 10 * len(coins) else: play_until += 10 * len(coins) else: get_coin_shares_from_block(block['new_val'])
def main(): for app in APPS: app_name = '{}'.format(app['name']) if 'num_accounts' in app: ledger_name = 'bigchain'.format(app['ledger']) bigchain = Bigchain() accounts = retrieve_accounts(bigchain, app_name) assets = [] for i in range(app['num_assets']): user = accounts[random.randint(0, app['num_accounts'] - 1)] asset = create_asset(bigchain=bigchain, user_pub=user['vk'], user_priv=user['sk'], asset=app['payload_func'](i)) assets.append(asset) logging.info( '{} assets initialized for app {} on ledger {}'.format( len(assets), app_name, ledger_name)) elif 'accounts' in app: bigchain = bigchaindb.Bigchain() accounts_by_name = get_accounts_by_name( retrieve_accounts(bigchain, app['name'])) for account in app['accounts']: for ledger in account['ledgers']: ledger_name = 'bigchaindb_examples_{}'.format(ledger['id']) account_name = '{}-{}'.format(account['name'], ledger['id']) bigchain = bigchaindb.Bigchain(dbname=ledger_name) assets = [] for i in range(ledger['num_assets']): asset = create_asset( bigchain=bigchain, to=accounts_by_name[account_name]['vk'], payload=app['payload_func'](i)) assets.append(asset) logging.info( '{} assets initialized for account {} in app {} on ledger {}' .format(len(assets), account['name'], app_name, ledger_name))
def tamper_block(): # Cryptographic Identities Generation alice, bob = generate_keypair(), generate_keypair() print(" ") # Digital Asset Definition (e.g. bicycle) asset = Asset(data={ "bicycle": { "manufacturer": "bkfab", "serial_number": "abcd1234" } }) # Metadata Definition metadata = {'planet': 'earth'} # create trnsaction TODO : owners_before might be node_pubkey in v0.8.0 tx = Transaction.create([alice.public_key], [([alice.public_key], 1)], metadata=metadata, asset=asset) # sign with private key tx = tx.sign([alice.private_key]) tx_id = tx.to_dict()['id'] print("tx_id : ", tx_id) # create block b = Bigchain() block = b.create_block([tx]) print("block timestamp right now : ", block.to_dict()['block']['timestamp']) # tamper block block.timestamp = '1' print("tamper block.timestamp to 1 : ") block_id = block.to_dict()['id'] block_voters = block.to_dict()['block']['voters'] print("block timestamp right now : ", block.to_dict()['block']['timestamp']) print("tamper_block_id : ", block_id) print("db response : ", b.write_block(block)) sleep(delay) print("tamper_block status : ", b.block_election_status(block_id, block_voters)) print("blocks_status_containing_tx : ", b.get_blocks_status_containing_tx(tx_id)) print(" ")
class ChainQuery(object): def __init__( self, host=None, port=None, dbname=None, pub_key=None, priv_key=None, keyring=[], consensus_plugin=None ): self.host = host self.port = port self.dbname = dbname self.conn = r.connect(host=host, port=port, db=dbname) self.bigchain = Bigchain( host=host, port=port, dbname=dbname, public_key=pub_key, private_key=priv_key, keyring=keyring, consensus_plugin=consensus_plugin, ) # test def test(self): tables = r.db("bigchain").table_list().run(self.conn) # print(tables) return tables # create key_pair for user def generate_key_pair(self): return crypto.generate_key_pair() # create asset def create_asset(self, public_key, digital_asset_payload): tx = self.bigchain.create_transaction( self.bigchain.me, public_key, None, "CREATE", payload=digital_asset_payload ) tx_signed = self.bigchain.sign_transaction(tx, self.bigchain.me_private) return self.bigchain.write_transaction(tx_signed) # get transaction by payload_uuid def getTxid_by_payload_uuid(self, payload_uuid): cursor = ( r.table("bigchain") .get_all(payload_uuid, index="payload_uuid") .pluck({"block": {"transactions": "id"}}) .run(self.conn) ) transactions = list(cursor) return transactions # get transaction by payload def getTxid_by_payload(self, payload): pass # get currentowner of a payload(assert) def getOwnerofAssert(self, payload): return # get one's assert def get_owned_asserts(self, pub_key): return # if tx contains someone def tx_contains_one(self, tx, one_pub): for condition in tx["conditions"]: if one_pub in condition["new_owners"]: return True for fullfillment in tx["fulfillments"]: if one_pub in fullfillment["current_owners"]: return True # transfer assert to another, old_owner create this transaction,so need old_owner's pub/priv key. def transfer_assert(self, old_owner_pub, old_owner_priv, new_owner_pub, tx_id): tx_transfer = self.bigchain.create_transaction(old_owner_pub, new_owner_pub, tx_id, "TRANSFER") tx_transfer_signed = self.bigchain.sign_transaction(tx_transfer, old_owner_priv) # check if the transaction is valid check = self.bigchain.is_valid_transaction(tx_transfer_signed) if check: self.bigchain.write_transaction(tx_transfer_signed) else: logger.info("this transaction is invalid.")
def __init__(self): """Initialize the Block creator""" self.bigchain = Bigchain() self.txs = []