def admin(): user = session['user'] if user['id'] not in config['admins']: return render_template('gitout.html') sort = request.args.get('sort', 'age') if sort == 'age_asc': keys = r.table('keys').order_by(r.asc('creation_time')).run(get_db()) elif sort == 'age_desc': keys = r.table('keys').order_by(r.desc('creation_time')).run(get_db()) elif sort == 'usage_asc': keys = r.table('keys').order_by(r.asc('total_usage')).run(get_db()) elif sort == 'usage_desc': keys = r.table('keys').order_by(r.desc('total_usage')).run(get_db()) elif sort == 'accept_asc': keys = r.table('keys').order_by(r.asc('acceptance_time')).run(get_db()) elif sort == 'accept_desc': keys = r.table('keys').order_by(r.desc('acceptance_time')).run( get_db()) else: keys = r.table('keys').order_by(r.asc('creation_time')).run(get_db()) apps = r.table('applications').order_by('time').run(get_db()) return render_template('admin.html', name=user['username'], apps=apps, keys=keys, sort=sort)
def _get_jobs(self, conditions=None): jobs = [] failed_job_ids = [] if conditions: documents = list( self.table.filter(lambda x: x['next_run_time'] != None).filter( conditions).order_by(r.asc('next_run_time'), 'id').pluck( 'id', 'job_state').run(self.conn)) else: documents = list( self.table.order_by(r.asc('next_run_time'), 'id').pluck('id', 'job_state').run(self.conn)) for document in documents: try: jobs.append(self._reconstitute_job(document['job_state'])) except: self._logger.exception( 'Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: r.expr(failed_job_ids).for_each( lambda job_id: self.table.get_all(job_id).delete()).run( self.conn) return jobs
def test_sort_multi_1_asc(self, conn): expected = [ {'id': 'glen', 'age': 26, 'score': 15}, {'id': 'joe', 'age': 26, 'score': 60}, {'id': 'bill', 'age': 35, 'score': 78}, {'id': 'todd', 'age': 52, 'score': 15}, {'id': 'pale', 'age': 52, 'score': 30} ] result = r.db('y').table('scores').order_by(r.asc('age'), r.asc('score')).run(conn) assertEqual(expected, list(result))
def bootstrap(self): """ Get transactions from the backlog that may have been assigned to this while it was online (not listening to the changefeed) """ # create bigchain instance b = Bigchain() # create a queue to store initial results q_initial = mp.Queue() # get initial results initial_results = r.table('backlog')\ .between([b.me, r.minval], [b.me, r.maxval], index='assignee__transaction_timestamp')\ .order_by(index=r.asc('assignee__transaction_timestamp'))\ .run(b.conn) # add results to the queue for result in initial_results: q_initial.put(result) for i in range(mp.cpu_count()): q_initial.put('stop') return q_initial
def listener(): """ Listener for Sawtooth State changes """ try: conn = connect_to_db() LOGGER.info("Reading queued Sawtooth transactions") while True: feed = r.table("inbound_queue").order_by(index=r.asc("timestamp")).run(conn) count = 0 for rec in feed: process(rec, conn) count = count + 1 if count == 0: break LOGGER.info("Processed %s records in the inbound queue", count) LOGGER.info("Listening for incoming Sawtooth transactions") feed = r.table("inbound_queue").changes().run(conn) for rec in feed: if rec["new_val"] and not rec["old_val"]: # only insertions process(rec["new_val"], conn) except Exception as err: # pylint: disable=broad-except LOGGER.exception("Inbound listener %s exception", type(err).__name__) LOGGER.exception(err) finally: try: conn.close() except UnboundLocalError: pass
def test_valid_block_voting(self, b): q_new_block = mp.Queue() genesis = b.create_genesis_block() # create valid block block = b.create_block([]) # assert block is valid assert b.is_valid_block(block) b.write_block(block, durability='hard') # create queue and voter voter = Voter(q_new_block) # vote voter.start() # wait for vote to be written time.sleep(1) voter.kill() # retrive block from bigchain blocks = list(r.table('bigchain') .order_by(r.asc((r.row['block']['timestamp']))) .run(b.conn)) # validate vote assert len(blocks[1]['votes']) == 1 vote = blocks[1]['votes'][0] assert vote['vote']['voting_for_block'] == block['id'] assert vote['vote']['previous_block'] == genesis['id'] assert vote['vote']['is_block_valid'] is True assert vote['vote']['invalid_reason'] is None assert vote['node_pubkey'] == b.me assert PublicKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
def get_filter(db_name, table_name, query, limit=None, order_by=None, sort_order='desc'): items = [] if limit: if order_by: if 'asc' in sort_order: query = r.db(db_name).table(table_name).filter(query).order_by( r.asc(order_by)).limit(limit).run(conn) else: query = r.db(db_name).table(table_name).filter(query).order_by( r.desc(order_by)).limit(limit).run(conn) else: query = r.db(db_name).table(table_name).filter(query).limit( limit).run(conn) else: query = r.db(db_name).table(table_name).filter(query).run(conn) for x in query: items.append(x) return items
def lookup_book(author, album, narrator): result = r.table(args.rethinktable).filter(~r.row.has_fields('_deleted')).filter({ 'mp3_author': author, 'mp3_album': album, 'mp3_narrator': narrator }).order_by(r.asc('_item')).limit(1).run() if result: connection = client[args.mongodb_db][args.mongodb_collection] mongo_book = connection\ .find_one({ '_deleted': {"$exists": False}, 'mp3_author': author, 'mp3_album': album, 'mp3_narrator': narrator }) # Make sure we have it in mongodb... if not mongo_book: mongo_book = result[0].copy() mongo_book['_rethinkdb_id'] = mongo_book.pop('id', None) print() mongo_log(mongo_book['mp3_author'], mongo_book['mp3_album'], mongo_book['mp3_narrator']) connection.insert(mongo_book) return True, result[0] return False, {}
def init(): """ init leveldb database by conn""" logger.info('leveldb init...') conn_bigchain = get_conn('bigchain') conn_header = get_conn('header') logger.info('leveldb init...') logger.info('leveldb/header init...') logger.info('leveldb/header init host...' + str(bigchaindb.config['database']['host'])) logger.info('leveldb/header init public_key...' + str(bigchaindb.config['keypair']['public'])) logger.info('leveldb/header init private_key...' + str(bigchaindb.config['keypair']['private'])) update(conn_header, 'host', bigchaindb.config['database']['host']) update(conn_header, 'public_key', bigchaindb.config['keypair']['public']) update(conn_header, 'private_key', bigchaindb.config['keypair']['private']) block_num = int(get_withdefault(conn_header, 'block_num', 0)) genesis_block_id = get_withdefault(conn_header, 'genesis_block_id', '0') if block_num == 0: genesis_block = r.db('bigchain').table('bigchain').order_by( r.asc(r.row['block']['timestamp'])).limit(1).run( bigchaindb.Bigchain().conn)[0] genesis_block_id = genesis_block['id'] insert(conn_bigchain, genesis_block_id, genesis_block) insert(conn_header, 'genesis_block_id', genesis_block_id) insert(conn_header, 'block_num', 1) insert(conn_header, 'current_block_id', genesis_block_id) logger.info('leveldb/header genesis_block_id...' + str(genesis_block_id)) logger.info('leveldb init done')
def test_valid_block_voting(self, b): q_new_block = mp.Queue() genesis = b.create_genesis_block() # create valid block block = b.create_block([]) # assert block is valid assert b.is_valid_block(block) b.write_block(block, durability='hard') # create queue and voter voter = Voter(q_new_block) # vote voter.start() # wait for vote to be written time.sleep(1) voter.kill() # retrive block from bigchain blocks = list(r.table('bigchain') .order_by(r.asc((r.row['block']['timestamp']))) .run(b.conn)) # validate vote assert len(blocks[1]['votes']) == 1 vote = blocks[1]['votes'][0] assert vote['vote']['voting_for_block'] == block['id'] assert vote['vote']['previous_block'] == genesis['id'] assert vote['vote']['is_block_valid'] is True assert vote['vote']['invalid_reason'] is None assert vote['node_pubkey'] == b.me assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
def test_voter_chains_blocks_with_the_previous_ones(self, b): b.create_genesis_block() block_1 = b.create_block([]) b.write_block(block_1, durability='hard') block_2 = b.create_block([]) b.write_block(block_2, durability='hard') q_new_block = mp.Queue() voter = Voter(q_new_block) voter.start() time.sleep(1) voter.kill() # retrive blocks from bigchain blocks = list(r.table('bigchain') .order_by(r.asc((r.row['block']['timestamp']))) .run(b.conn)) assert blocks[0]['block_number'] == 0 assert blocks[1]['block_number'] == 1 assert blocks[2]['block_number'] == 2 # we don't vote on the genesis block right now # assert blocks[0]['votes'][0]['vote']['voting_for_block'] == genesis['id'] assert blocks[1]['votes'][0]['vote']['voting_for_block'] == block_1['id'] assert blocks[2]['votes'][0]['vote']['voting_for_block'] == block_2['id']
def get_posts_facade(limit=10, skip=0, **params): """ Get posts, and return an array where each post is the correct kind based on the `kind` field. """ data = Post.table.filter(params).order_by(r.asc("created")).skip(skip).limit(limit).run(database.db_conn) return [instance(d) for d in data]
def getordered(table, conn, key, index, order, direction=None): '''Fetch all results out of the database by Secondary Key and order them by non-index''' if direction is 'desc': return r.table(table).get_all(key, index=index).order_by( r.desc(order)).coerce_to('array').run(conn) else: return r.table(table).get_all(key, index=index).order_by( r.asc(order)).coerce_to('array').run(conn)
def get_posts_facade(db_conn, limit=10, skip=0, **params): """ Get posts, and return an array where each post is the correct kind based on the `kind` field. """ data = (Post.table.filter(params).order_by( r.asc('created')).skip(skip).limit(limit).run(db_conn)) return [instance(d) for d in data]
def start(request): conn = r.connect("localhost", 28015, "wlps") profile = Profile.objects.get(user=request.user) queued = r.table("queue").filter({"user_id": int(request.user.id)}).order_by(r.asc("date_added")).run(conn) notifications = ( r.table("notifications") .filter({"user_id": int(request.user.id)}) .eq_join("episode_id", r.table("episode")) .order_by(r.asc("date_added")) .run(conn) ) dic = {"queued": queued, "notifications": notifications, "profile": profile} return render_to_response("video/1.html", dic, context_instance=RequestContext(request))
def _get_stop_times_by_vehicle(vehicle, stop_start, stop_end, route, direction, time=arrow.now(), window=45, index='route_direction_vehicle_time'): lower_key = [route, direction, vehicle, r.epoch_time(time.replace(minutes=-window).timestamp)] upper_key = [route, direction, vehicle, r.epoch_time(time.timestamp)] query = r.table('vehicle_stop_times') \ .between(lower_key, upper_key, index=index) \ .order_by(index=r.asc(index)) \ .filter((r.row['stop_id'] == stop_start) | (r.row['stop_id'] == stop_end)) return list(query.run())
def get_next_run_time(self): results = list( self.table .filter(r.row['next_run_time'] != None) .order_by(r.asc('next_run_time')) .map(lambda x: x['next_run_time']) .limit(1) .run(self.conn) ) return utc_timestamp_to_datetime(results[0]) if results else None
def initial(): """Return old transactions from the backlog.""" bigchain = Bigchain() return bigchain.connection.run( r.table('backlog').between( [bigchain.me, r.minval], [bigchain.me, r.maxval], index='assignee__transaction_timestamp').order_by( index=r.asc('assignee__transaction_timestamp')))
def initial(): """Return old transactions from the backlog.""" bigchain = Bigchain() return bigchain.connection.run( r.table('backlog') .between([bigchain.me, r.minval], [bigchain.me, r.maxval], index='assignee__transaction_timestamp') .order_by(index=r.asc('assignee__transaction_timestamp')))
def get_next_run_time(self): document = list( self.table.filter(lambda x: x['next_run_time'] != None).order_by( r.asc('next_run_time')).map( lambda x: x['next_run_time']).limit(1).run(self.conn)) if document: document = utc_timestamp_to_datetime(document[0]) else: document = None return document
def apply_query_parameter_sorts(cls, q, query_parameters): if len(query_parameters.sort) == 0: q = q.order_by("id") else: for sort in query_parameters.sort: if sort["dir"] == "desc": q = q.order_by(rethinkdb.desc(sort["field"])) else: q = q.order_by(rethinkdb.asc(sort["field"])) return q
def initial(): """Return old transactions from the backlog.""" b = Bigchain() rs = r.table('backlog')\ .between([b.me, r.minval], [b.me, r.maxval], index='assignee__transaction_timestamp')\ .order_by(index=r.asc('assignee__transaction_timestamp'))\ .run(b.conn) return rs
def orderBy(self, field, direct="desc"): """ Allows for the results to be ordered by a specific field. If given, direction can be set with passing an additional argument in the form of "asc" or "desc" """ if direct == "desc": self._query = self._query.order_by(r.desc(field)) else: self._query = self._query.order_by(r.asc(field)) return self
def get_unvoted_blocks(connection, node_pubkey): unvoted = connection.run( r.table('bigchain', read_mode=READ_MODE) .filter(lambda block: r.table('votes', read_mode=READ_MODE) .get_all([block['id'], node_pubkey], index='block_and_voter') .is_empty()) .order_by(r.asc(r.row['block']['timestamp']))) # FIXME: I (@vrde) don't like this solution. Filtering should be done at a # database level. Solving issue #444 can help untangling the situation unvoted_blocks = filter(lambda block: not utils.is_genesis_block(block), unvoted) return unvoted_blocks
def get_unvoted_blocks(self): """Return all the blocks that has not been voted by this node.""" unvoted = r.table('bigchain')\ .filter(lambda doc: doc['votes'].contains(lambda vote: vote['node_pubkey'] == self.me).not_())\ .order_by(r.asc((r.row['block']['timestamp'])))\ .run(self.conn) if unvoted and unvoted[0].get('block_number') == 0: unvoted.pop(0) return unvoted
def render_asset(asset_id): versions = list(r.table('versions').filter({'asset_id': asset_id}).order_by(r.asc('version_w')).run(g.rdb_conn)) config = [] for version in versions: #print version['version_width'] print version config.append({"imgwidth": int(version['width'])}) print config filename = os.path.join(app.config['UPLOAD_FOLDER'], asset_id + versions[0]['ext']) encode(filename, config) return render_template('explore.html')
def list_posts(params, db_conn): """ Get a list of posts in Sagefy. """ skip = params.get('skip') or 0 limit = params.get('limit') or 10 params = omit(params, ( 'skip', 'limit', )) query = (r.table(post_schema['tablename']).filter(params).order_by( r.asc('created')).skip(skip).limit(limit)) return list(query.run(db_conn))
def _get_jobs(self, conditions=None): jobs = [] failed_job_ids = [] if conditions: documents = list( self.table .filter( lambda x: x['next_run_time'] != None ) .filter(conditions) .order_by(r.asc('next_run_time'), 'id') .pluck('id', 'job_state') .run(self.conn) ) else: documents = list( self.table .order_by(r.asc('next_run_time'), 'id') .pluck('id', 'job_state') .run(self.conn) ) for document in documents: try: jobs.append(self._reconstitute_job(document['job_state'])) except: self._logger.exception('Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: r.expr(failed_job_ids).for_each( lambda job_id: self.table.get_all(job_id).delete() ).run(self.conn) return jobs
def get_block_votes(self,id,order_by=False): """get all the votes for the special block or all blocks Args: id:block_id order_by: Return: A cursor for the matching votes. """ if order_by: return self.bigchain.connection.run(r.table('votes', read_mode=self.read_mode).filter( r.row['vote']['voting_for_block'] == id).order_by(index=r.asc("vote_timestamp"))) else: return self.bigchain.connection.run(r.table('votes',read_mode=self.read_mode).filter( r.row['vote']['voting_for_block'] == id))
def get_unvoted_blocks(self): """Return all the blocks that has not been voted by this node.""" unvoted = r.table('bigchain', read_mode=self.read_mode) \ .filter(lambda block: r.table('votes', read_mode=self.read_mode) .get_all([block['id'], self.me], index='block_and_voter') .is_empty()) \ .order_by(r.asc(r.row['block']['timestamp'])) \ .run(self.conn) # FIXME: I (@vrde) don't like this solution. Filtering should be done at a # database level. Solving issue #444 can help untangling the situation unvoted = filter(lambda block: not util.is_genesis_block(block), unvoted) return list(unvoted)
def get_unvoted_blocks(self): """Return all the blocks that has not been voted by this node.""" unvoted = ( r.table("bigchain") .filter(lambda block: r.table("votes").get_all([block["id"], self.me], index="block_and_voter").is_empty()) .order_by(r.asc(r.row["block"]["timestamp"])) .run(self.conn) ) # FIXME: I (@vrde) don't like this solution. Filtering should be done at a # database level. Solving issue #444 can help untangling the situation unvoted = filter(lambda block: not util.is_genesis_block(block), unvoted) return list(unvoted)
def fetch_positions(conn, date): # Fetch vehicle positions for the date (in local time) if date is None: date = arrow.now() else: date = date.replace(days=1) date = arrow.now().replace(year=date.year, month=date.month, day=date.day, hour=0, minute=0, second=0, tzinfo='America/Chicago') day_before = date.replace(days=-1) LOGGER.info('Fetching positions from {} to {}.'.format(day_before.isoformat(), date.isoformat())) query = rethinkdb.table('vehicle_position') \ .between(day_before.datetime, date.datetime, index='timestamp') \ .order_by(index=rethinkdb.asc('timestamp')) \ .without('id', 'bearing') return list(query.run(conn))
def get_filter(db_name, table_name, query, limit=None, order_by=None, sort_order='desc'): items = [] if limit: if order_by: if 'asc' in sort_order: query = r.db(db_name).table(table_name).filter(query).order_by(r.asc(order_by)).limit(limit).run(conn) else: query = r.db(db_name).table(table_name).filter(query).order_by(r.desc(order_by)).limit(limit).run(conn) else: query = r.db(db_name).table(table_name).filter(query).limit(limit).run(conn) else: query = r.db(db_name).table(table_name).filter(query).run(conn) for x in query: items.append(x) return items
def get_next_run_time(self): document = list( self.table .filter( lambda x: x['next_run_time'] != None ) .order_by(r.asc('next_run_time')) .map(lambda x: x['next_run_time']) .limit(1) .run(self.conn) ) if document: document = utc_timestamp_to_datetime(document[0]) else: document = None return document
def _get_stop_times_by_vehicle(vehicle, stop_start, stop_end, route, direction, time=arrow.now(), window=45, index='route_direction_vehicle_time'): lower_key = [ route, direction, vehicle, r.epoch_time(time.replace(minutes=-window).timestamp) ] upper_key = [route, direction, vehicle, r.epoch_time(time.timestamp)] query = r.table('vehicle_stop_times') \ .between(lower_key, upper_key, index=index) \ .order_by(index=r.asc(index)) \ .filter((r.row['stop_id'] == stop_start) | (r.row['stop_id'] == stop_end)) return list(query.run())
def test_valid_block_voting_with_create_transaction(self, b): q_new_block = mp.Queue() genesis = b.create_genesis_block() # create a `CREATE` transaction test_user_priv, test_user_pub = crypto.generate_key_pair() tx = b.create_transaction(b.me, test_user_pub, None, 'CREATE') tx_signed = b.sign_transaction(tx, b.me_private) assert b.is_valid_transaction(tx_signed) # create valid block block = b.create_block([tx_signed]) # assert block is valid assert b.is_valid_block(block) b.write_block(block, durability='hard') # create queue and voter voter = Voter(q_new_block) # vote voter.start() # wait for vote to be written time.sleep(1) voter.kill() # retrive block from bigchain blocks = list(r.table('bigchain') .order_by(r.asc((r.row['block']['timestamp']))) .run(b.conn)) # validate vote assert len(blocks[1]['votes']) == 1 vote = blocks[1]['votes'][0] assert vote['vote']['voting_for_block'] == block['id'] assert vote['vote']['previous_block'] == genesis['id'] assert vote['vote']['is_block_valid'] is True assert vote['vote']['invalid_reason'] is None assert vote['node_pubkey'] == b.me assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
def get_unvoted_blocks(self, node_pubkey): """Return all the blocks that have not been voted by the specified node. Args: node_pubkey (str): base58 encoded public key Returns: :obj:`list` of :obj:`dict`: a list of unvoted blocks """ unvoted = self.connection.run( r.table('bigchain', read_mode=self.read_mode) .filter(lambda block: r.table('votes', read_mode=self.read_mode) .get_all([block['id'], node_pubkey], index='block_and_voter') .is_empty()) .order_by(r.asc(r.row['block']['timestamp']))) # FIXME: I (@vrde) don't like this solution. Filtering should be done at a # database level. Solving issue #444 can help untangling the situation unvoted_blocks = filter(lambda block: not util.is_genesis_block(block), unvoted) return unvoted_blocks
def get_unvoted_blocks(self, node_pubkey): """Return all the blocks that have not been voted by the specified node. Args: node_pubkey (str): base58 encoded public key Returns: :obj:`list` of :obj:`dict`: a list of unvoted blocks """ unvoted = self.connection.run( r.table('bigchain', read_mode=self.read_mode).filter( lambda block: r.table('votes', read_mode=self.read_mode). get_all([block['id'], node_pubkey], index='block_and_voter'). is_empty()).order_by(r.asc(r.row['block']['timestamp']))) # FIXME: I (@vrde) don't like this solution. Filtering should be done at a # database level. Solving issue #444 can help untangling the situation unvoted_blocks = filter(lambda block: not util.is_genesis_block(block), unvoted) return unvoted_blocks
async def list(self, table, key='get', value='all', index='update_date', order='desc', create_index=False, limit=False): if create_index: await self.index(table, index) if order == "desc": direction = r.desc(index) else: direction = r.asc(index) if key == 'get' and value == 'all': items = await self.all(table) else: items = await self.filter(table, {key: value}, limit=limit) return items
def test_invalid_block_voting(self, b): # create queue and voter q_new_block = mp.Queue() voter = Voter(q_new_block) # create transaction transaction = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE') transaction_signed = b.sign_transaction(transaction, b.me_private) genesis = b.create_genesis_block() # create invalid block block = b.create_block([transaction_signed]) # change transaction id to make it invalid block['block']['transactions'][0]['id'] = 'abc' assert not b.is_valid_block(block) b.write_block(block, durability='hard') # vote voter.start() time.sleep(1) voter.kill() # retrive block from bigchain blocks = list( r.table('bigchain').order_by(r.asc( (r.row['block']['timestamp']))).run(b.conn)) # validate vote assert len(blocks[1]['votes']) == 1 vote = blocks[1]['votes'][0] assert vote['vote']['voting_for_block'] == block['id'] assert vote['vote']['previous_block'] == genesis['id'] assert vote['vote']['is_block_valid'] == False assert vote['vote']['invalid_reason'] == None assert vote['node_pubkey'] == b.me assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
def test_invalid_block_voting(self, b): # create queue and voter q_new_block = mp.Queue() voter = Voter(q_new_block) # create transaction transaction = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE') transaction_signed = b.sign_transaction(transaction, b.me_private) genesis = b.create_genesis_block() # create invalid block block = b.create_block([transaction_signed]) # change transaction id to make it invalid block['block']['transactions'][0]['id'] = 'abc' assert not b.is_valid_block(block) b.write_block(block, durability='hard') # vote voter.start() time.sleep(1) voter.kill() # retrive block from bigchain blocks = list(r.table('bigchain') .order_by(r.asc((r.row['block']['timestamp']))) .run(b.conn)) # validate vote assert len(blocks[1]['votes']) == 1 vote = blocks[1]['votes'][0] assert vote['vote']['voting_for_block'] == block['id'] assert vote['vote']['previous_block'] == genesis['id'] assert vote['vote']['is_block_valid'] == False assert vote['vote']['invalid_reason'] == None assert vote['node_pubkey'] == b.me assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
def get_txs_count(self,id=None,order_by=False): """get all the transactions count in block Args: id:block_id order_by: Return: block_count: the count of the blocks txs_count: the count of total txs count block_txs_count_list: {id:block_txs_count} """ table = 'bigchain' block_count = 0 txs_count = 0 block_txs_count_list = [] if id: block = self.bigchain.connection.run(r.table(table).get(id)) block_count = 1 txs_count = len(block['block']['transactions']) else: if order_by: index = "block_timestamp" blocks = self.bigchain.connection.run(r.table(table).order_by(index=r.asc(index))) else: blocks = self.bigchain.connection.run(r.table(table)) for block in blocks: block_count += 1 block_id = block['id'] block_txs_count = len(block['block']['transactions']) block_txs_count_dict = dict() block_txs_count_dict[block_id] = block_txs_count block_txs_count_list.append(block_txs_count_dict) txs_count += block_txs_count del block_txs_count_dict return block_count,txs_count,block_txs_count_list
def apply_query_parameters(self, query_parameters): q = self.table if query_parameters.filter is not None: for filter in query_parameters.filter["filters"]: if filter["operator"] == "startswith": start = filter["value"].lower() end = start end = end[:-1] + chr(ord(end[-1]) + 1) q = q.between(start, end, index="lower_name") else: q = q.between(filter["value"].lower(), filter["value"].lower(), index="lower_name", right_bound="closed") for sort in query_parameters.sort: if sort["dir"] == "desc": q = q.order_by(index=r.desc("lower_name")) else: q = q.order_by(index=r.asc("lower_name")) data = self.uow.run_list(q.skip(query_parameters.skip).limit(query_parameters.take)) total = self.uow.run(q.count()) query_result = QueryResult(data, total) return query_result
def _build_cursor_obj(self): self._cursor_obj = r.table(self._document.Meta.table_name) if self._filter: self._cursor_obj = self._cursor_obj.filter(self._filter) order_by = self._order_by or self._document.Meta.order_by if order_by: order_by_r = [] for field in order_by: if field.startswith('-'): order_by_r.append(r.desc(field[1:])) else: order_by_r.append(r.asc(field)) self._cursor_obj = self._cursor_obj.order_by(*order_by_r) if self._limit: self._cursor_obj = self._cursor_obj.limit(self._limit) if self._skip: self._cursor_obj = self._cursor_obj.skip(self._skip) self._iter_index = 0 self._cursor_iter = iter(self._cursor_obj.run(get_conn()))
def _build_cursor_obj(self): self._cursor_obj = r.table(self._document.Meta.table_name) if self._filter: self._cursor_obj = self._cursor_obj.filter(self._filter) order_by = self._order_by or self._document.Meta.order_by if order_by: order_by_r = [] for field in order_by: if field.startswith('-'): order_by_r.append(r.desc(field[1:])) else: order_by_r.append(r.asc(field)) self._cursor_obj = self._cursor_obj.order_by(*order_by_r) if self._skip: self._cursor_obj = self._cursor_obj.skip(self._skip) if self._limit: self._cursor_obj = self._cursor_obj.limit(self._limit) self._iter_index = 0 self._cursor_iter = iter(self._cursor_obj.run(get_conn()))
def test_voter_considers_unvoted_blocks_when_single_node(self, b): # simulate a voter going donw in a single node environment b.create_genesis_block() # insert blocks in the database while the voter process is not listening # (these blocks won't appear in the changefeed) block_1 = b.create_block([]) b.write_block(block_1, durability='hard') block_2 = b.create_block([]) b.write_block(block_2, durability='hard') # voter is back online, we simulate that by creating a queue and a Voter instance q_new_block = mp.Queue() voter = Voter(q_new_block) # create a new block that will appear in the changefeed block_3 = b.create_block([]) b.write_block(block_3, durability='hard') # put the last block in the queue q_new_block.put(block_3) # vote voter.start() time.sleep(1) voter.kill() # retrive blocks from bigchain blocks = list( r.table('bigchain').order_by(r.asc( (r.row['block']['timestamp']))).run(b.conn)) # FIXME: remove genesis block, we don't vote on it (might change in the future) blocks.pop(0) assert all(block['votes'][0]['node_pubkey'] == b.me for block in blocks)
def test_voter_considers_unvoted_blocks_when_single_node(self, b): # simulate a voter going donw in a single node environment b.create_genesis_block() # insert blocks in the database while the voter process is not listening # (these blocks won't appear in the changefeed) block_1 = b.create_block([]) b.write_block(block_1, durability='hard') block_2 = b.create_block([]) b.write_block(block_2, durability='hard') # voter is back online, we simulate that by creating a queue and a Voter instance q_new_block = mp.Queue() voter = Voter(q_new_block) # create a new block that will appear in the changefeed block_3 = b.create_block([]) b.write_block(block_3, durability='hard') # put the last block in the queue q_new_block.put(block_3) # vote voter.start() time.sleep(1) voter.kill() # retrive blocks from bigchain blocks = list(r.table('bigchain') .order_by(r.asc((r.row['block']['timestamp']))) .run(b.conn)) # FIXME: remove genesis block, we don't vote on it (might change in the future) blocks.pop(0) assert all(block['votes'][0]['node_pubkey'] == b.me for block in blocks)
def newsfeed(): print "TEST:::::::::::" feed_list=[] connection=r.connect(host='localhost',port=28015) feed_db=r.db('taggem').table('post').filter({'user_id':'*****@*****.**'}).order_by(r.asc('date')).run(connection) count=r.db('taggem').table('post').filter({'user_id':'*****@*****.**'}).run(connection) print feed_db for f in feed_db: feed_list.append(f) # print "A" # feed={'img_url':'http://girltalkhq.com/wp-content/uploads/2013/09/Image-11.jpg', 'link':'http://www.google.com', 'title':'Colors and paints','text':'Learn more about colors and arts'} # feed2={'img_url':'http://girltalkhq.com/wp-content/uploads/2013/09/Image-11.jpg', 'link':'http://www.google.com', 'title':'Colors and paints','text':'Learn more about colors and arts'} # feed_list.append(feed) # feed_list.append(feed2) feedJSON=jsonify({'feed':feed_list}) return feedJSON
def send_initial_data(client): try: logging.info("send_initial_data::entering") tconn = yield r.connect(DB_HOST,DB_PORT,DB_NAME) #feed = yield r.table("observations").pluck('sensorid','datetime','type','temp','measure_temp','pressure_sea','measure_pressure').order_by(r.desc('datetime')).limit(200).order_by(r.asc('datetime')).run(tconn) feed = yield r.table("observations").order_by(r.desc('timestamp')).limit(200).order_by(r.asc('timestamp')).run(tconn) for document in feed: client.write_message(document) fan = FAN_Adapter(16,'f_0001') d_fan = fan.readJSON() d_observation = {'fan':[d_fan], 'timestamp': str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), 'type':'actors' } tconn.close() logging.info("send_initial_data::leaving") except: exc_type, exc_value, exc_traceback = sys.exc_info() logging.info("send_initial_data::exception %s %s %s",exc_type, exc_value, exc_traceback) pass
def get_all_layers_for_asset(asset_id): layers = list(r.table('layers').filter({'asset_id': asset_id}).order_by(r.asc('time_stamp')).run(g.rdb_conn)) return json.dumps(layers)
def get_all_versions_for_asset(asset_id): versions = list(r.table('versions').filter({'asset_id': asset_id}).order_by(r.asc('width')).order_by(r.asc('height')).run(g.rdb_conn)) return json.dumps(versions)
def test_valid_block_voting_with_transfer_transactions(self, b): q_new_block = mp.Queue() b.create_genesis_block() # create a `CREATE` transaction test_user_priv, test_user_pub = crypto.generate_key_pair() tx = b.create_transaction(b.me, test_user_pub, None, 'CREATE') tx_signed = b.sign_transaction(tx, b.me_private) assert b.is_valid_transaction(tx_signed) # create valid block block = b.create_block([tx_signed]) # assert block is valid assert b.is_valid_block(block) b.write_block(block, durability='hard') # create queue and voter voter = Voter(q_new_block) # vote voter.start() # wait for vote to be written time.sleep(1) voter.kill() # retrive block from bigchain blocks = list(r.table('bigchain') .order_by(r.asc((r.row['block']['timestamp']))) .run(b.conn)) # validate vote assert len(blocks[1]['votes']) == 1 # create a `TRANSFER` transaction test_user2_priv, test_user2_pub = crypto.generate_key_pair() tx2 = b.create_transaction(test_user_pub, test_user2_pub, {'txid': tx['id'], 'cid': 0}, 'TRANSFER') tx2_signed = b.sign_transaction(tx2, test_user_priv) assert b.is_valid_transaction(tx2_signed) # create valid block block = b.create_block([tx2_signed]) # assert block is valid assert b.is_valid_block(block) b.write_block(block, durability='hard') # create queue and voter voter = Voter(q_new_block) # vote voter.start() # wait for vote to be written time.sleep(1) voter.kill() # retrive block from bigchain blocks = list(r.table('bigchain') .order_by(r.asc((r.row['block']['timestamp']))) .run(b.conn)) # validate vote assert len(blocks[2]['votes']) == 1 vote = blocks[2]['votes'][0] assert vote['vote']['voting_for_block'] == block['id'] assert vote['vote']['is_block_valid'] is True assert vote['vote']['invalid_reason'] is None assert vote['node_pubkey'] == b.me assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True