def test_connect_close_expr(self): c = r.connect(port=self.port) r.expr(1).run(c) c.close() self.assertRaisesRegexp( r.RqlDriverError, "Connection is closed.", r.expr(1).run, c)
def set_unknown_domains_not_in_hyps(hyps): # find domains in status Started,Paused,Unknown # that are not in hypervisors r_conn = new_rethink_connection() rtable = r.table('domains') status_to_unknown = ['Started', 'Paused', 'Unknown'] l = list( rtable.filter(lambda d: r.expr(status_to_unknown).contains(d['status']) ).filter(lambda d: r.not_( r.expr(hyps).contains(d['hyp_started']))).update({ 'status': 'Unknown' }).run(r_conn)) status_to_stopped = ['Starting', 'CreatingTemplate'] l = list( rtable.filter(lambda d: r.expr(status_to_stopped).contains(d['status']) ).filter(lambda d: r.not_( r.expr(hyps).contains(d['hyp_started']))).update({ 'status': 'Stopped' }).run(r_conn)) close_rethink_connection(r_conn) return l
def test_connect_close_reconnect(self): c = r.connect(port=self.port) r.expr(1).run(c) c.close() c.close() c.reconnect() r.expr(1).run(c)
def GET(self): self.view.partial("sidebar", "partials/admin/sidebar", {"command": "users"}) try: user = um.User(self.request.id) except NotFoundError: return NotFound() self.view.title = user.username disabled = self.request.get_param("q") hidden_ids = r.table(pm.Phot.table).filter({ "user": user.id }).filter(r.row["disable"].eq(True)).concat_map( lambda doc: [doc["id"]]).coerce_to("array").run() if disabled == "enabled": query = r.table(pm.Phot.table).filter({ "user": user.id }).filter(lambda doc: ~r.expr(hidden_ids).contains(doc["id"])) else: query = r.table(pm.Phot.table).filter({ "user": user.id }).filter(lambda doc: r.expr(hidden_ids).contains(doc["id"])) res = RethinkCollection(pm.Phot, query=query) page = Paginate(res, self.request, "created") self.view.data = {"page": page, "user": user, "command": "phots"} return self.view
def list_all_events_for_training(self): selection = list( self.event_table.filter((r.row["start"] != r.expr("")) & (r.row["end"] != r.expr(""))).run( self.connection)) return selection
def _get_jobs(self, conditions=None): jobs = [] failed_job_ids = [] if conditions: documents = list( self.table.filter(lambda x: x['next_run_time'] != None).filter( conditions).order_by(r.asc('next_run_time'), 'id').pluck( 'id', 'job_state').run(self.conn)) else: documents = list( self.table.order_by(r.asc('next_run_time'), 'id').pluck('id', 'job_state').run(self.conn)) for document in documents: try: jobs.append(self._reconstitute_job(document['job_state'])) except: self._logger.exception( 'Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: r.expr(failed_job_ids).for_each( lambda job_id: self.table.get_all(job_id).delete()).run( self.conn) return jobs
def _get_jobs(self, predicate=None): jobs = [] failed_job_ids = [] query = ( self.table.filter( r.row['next_run_time'] != None).filter(predicate) if # noqa predicate else self.table) query = query.order_by('next_run_time', 'id').pluck('id', 'job_state') for document in query.run(self.conn): try: jobs.append(self._reconstitute_job(document['job_state'])) except Exception: self._logger.exception( 'Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: r.expr(failed_job_ids).for_each( lambda job_id: self.table.get_all(job_id).delete()).run( self.conn) return jobs
def test_shutdown(self): c = r.connect(port=self.port) r.expr(1).run(c) self.servers.stop() sleep(0.2) self.assertRaisesRegexp(r.RqlDriverError, "Connection is closed.", r.expr(1).run, c)
def GET(self): self.view.partial("sidebar", "partials/admin/sidebar", {"command": "phots"}) what = self.request.get_param("v") orig = self.request.get_param("filter", "all") filt = dbu.phot_filter(orig) hidden_ids = list( r.table(pm.Phot.table).filter(r.row["disable"].eq( True)).concat_map(lambda doc: [doc["id"]]).run()) if what == "enabled": query = r.table(pm.Phot.table).filter( lambda doc: ~r.expr(hidden_ids).contains(doc["id"])) else: query = r.table(pm.Phot.table).filter( lambda doc: r.expr(hidden_ids).contains(doc["id"])) query = query.filter(lambda doc: doc["filename"].match(filt)) result = RethinkCollection(pm.Phot, query=query) page = Paginate(result, self.request, "filename") self.view.data = {"page": page} self.view.scripts = ["transientbug/admin/phot"] return self.view
def get_hyps_with_status(list_status, not_=False, empty=False): r_conn = new_rethink_connection() rtable = r.table('hypervisors') if not_ == True: l = list( rtable.filter({ 'enabled': True }).filter(lambda d: r.not_( r.expr(list_status).contains(d['status']))).run(r_conn)) else: l = list( rtable.filter({ 'enabled': True }).filter(lambda d: r.expr(list_status).contains(d['status'])).run( r_conn)) if empty == True: nostatus = list( rtable.filter({ 'enabled': True }).filter(lambda n: ~n.has_fields('status')).run(r_conn)) l = l + nostatus close_rethink_connection(r_conn) return l
def mount(self): if self.mounted: return query = self._ensure_table_query(self.db_query, self.files_table_name) file_index_func = lambda row: rethinkdb.args([ row[STATUS_JSON_NAME], row[FILE_NAME_JSON_NAME], row[ FINISHED_DATE_JSON_NAME] ]) file_prefix_index_func = lambda row: rethinkdb.expr(row[ STATUS_JSON_NAME] == "completed").branch( rethinkdb.args([ row[FILE_NAME_JSON_NAME].split("/").slice(1, -1), row[ FINISHED_DATE_JSON_NAME] ]), rethinkdb.error("File is still uploading.")) query = query.do(lambda result: rethinkdb.expr(result[ "tables_created"] == 1).branch( self._create_index( self.db_query, self.files_table_name, self.file_index, file_index_func).do(lambda _: self._create_index( self.db_query, self.files_table_name, self. file_prefix_index, file_prefix_index_func)), None)) query = query.do(lambda _: self._ensure_table_query( self.db_query, self.chunks_table_name)) chunk_index_func = lambda row: rethinkdb.args( [row[FILE_ID_JSON_NAME], row[NUM_JSON_NAME]]) query = query.do(lambda result: rethinkdb.expr(result[ "tables_created"] == 1).branch( self._create_index(self.db_query, self.chunks_table_name, self. chunk_index, chunk_index_func), None)) query = query.do(lambda _: self._confirm_mount()) return query
def get_bar_data(question_data): r.branch( ( r.expr(question_data["response_format"] == Question().RESPONSE_MULTIPLE_CHOICE) | (question_data["response_format"] == Question().RESPONSE_RATING) ), r.branch( (question_data["response_format"] == Question().RESPONSE_MULTIPLE_CHOICE), { "labels": question[1].distinct(), "series": [ question[1].distinct().do(lambda val: question[1].filter(lambda foo: foo == val).count()) ], }, (question_data["response_format"] == Question().RESPONSE_RATING), { "labels": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "series": [ r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).map( lambda val: question[1].filter(lambda foo: foo == val).count() ) ], }, [], ), [], )
def test_post(): url = 'httpbin.org/post' post_data = {'str': '%in fo+', 'number': 135.5, 'nil': None} res = r.http(url, method='POST', data=post_data).run(conn) post_data['number'] = str(post_data['number']) post_data['nil'] = '' expect_eq(res['form'], post_data) post_data = {'str': '%in fo+', 'number': 135.5, 'nil': None} res = r.http(url, method='POST', data=r.expr(post_data).coerce_to('string'), header={ 'Content-Type': 'application/json' }).run(conn) expect_eq(res['json'], post_data) res = r.http(url, method='POST', data=r.expr(post_data).coerce_to('string')).run(conn) post_data[ 'str'] = '%in fo ' # Default content type is x-www-form-encoded, which changes the '+' to a space expect_eq(res['json'], post_data) post_data = 'a=b&b=c' res = r.http(url, method='POST', data=post_data).run(conn) expect_eq(res['form'], {'a': 'b', 'b': 'c'}) post_data = '<arbitrary>data</arbitrary>' res = r.http(url, method='POST', data=post_data).run(conn) expect_eq(res['data'], post_data)
def test_port_conversion(self): c = r.connect(port=str(self.port)) r.expr(1).run(c) c.close() self.assertRaisesRegexp(r.RqlDriverError, "Could not convert port abc to an integer.", lambda: r.connect(port='abc'))
def test_shutdown(self): c = r.connect(port=self.port) r.expr(1).run(c) self.servers.stop() sleep(0.2) self.assertRaisesRegexp( r.RqlDriverError, "Connection is closed.", r.expr(1).run, c)
def generate_stats(rdb_conn): issues = r.table(ISSUES_TABLE) issues_with_milestone = issues.filter(lambda issue: issue['milestone'] != None) milestones = issues_with_milestone.map(lambda issue: issue['milestone']['title']).distinct() # Generate user stats (how many issues assigned to this user have been opened and closed) for a particular set of issues def user_stats(issue_set): # Remove issues that don't have owners from the issue set issue_set = issue_set.filter(lambda issue: issue['assignee'] != None) # Get a list of users issues are assigned to owners = issue_set.map(lambda issue: issue['assignee']).distinct() # Count the issues with a given owner and state (shorthand since we reuse this) def count_issues(owner,state): return issue_set.filter(lambda issue: (issue['assignee']['login'] == owner['login']) & (issue['state'] == state)).count() # Return a list of documents with stats for each owner return owners.map(lambda owner: { 'owner': owner['login'], 'owner_avatar_url': owner['avatar_url'], 'open_issues': count_issues(owner,'open'), 'closed_issues': count_issues(owner,'closed'), }) # Return owner stats for a particular milestone (filter issues to just include a milestone) def user_stats_by_milestone(m): return user_stats(issues_with_milestone.filter(lambda issue: issue['milestone']['title'] == m)) # Return the number of issues with a particular state (and optionally a particular milestone) def num_issues(state, milestone=None): if milestone is None: issue_set = issues else: issue_set = issues_with_milestone.filter(lambda issue: issue['milestone']['title'] == milestone) return issue_set.filter(lambda issue: issue['state'] == state).count() # Two key things: # - we have to call coerce_to('array') since we get a sequence, and this will error otherwise # - we have to call list() on the stats to make sure we pull down all the data from a Cursor report = r.expr({ 'datetime': r.js('(new Date).toISOString()'), 'by_milestone': r.expr([{ 'milestone': 'all', 'open_issues': num_issues('open'), 'closed_issues': num_issues('closed'), 'user_stats': user_stats(issues).coerce_to('array') }]).union(milestones.map(lambda m: { 'milestone': m, 'open_issues': num_issues('open', m), 'closed_issues': num_issues('closed', m), 'user_stats': user_stats_by_milestone(m).coerce_to('array') })) }) # Add the generated report to the database print "Generating and inserting new user stats at %s" % datetime.now().strftime("%Y-%m-%d %H:%M") r.table(STATS_TABLE).insert(r.expr([report])).run(rdb_conn)
def test_port_conversion(self): c = r.connect(port=str(self.port)) r.expr(1).run(c) c.close() self.assertRaisesRegexp( r.RqlDriverError, "Could not convert port abc to an integer.", lambda: r.connect(port='abc'))
def post(self, *args, **kwargs): conn = self.get_connection() update_data = self.get_update_data() result = self.get_object_qs().update( r.expr(update_data, nesting_depth=40)).run(conn) if max(result.values()) == 0: result = r.table(self.table_name).insert( r.expr(self.get_insert_data(update_data), nesting_depth=40)).run(conn) self.post_update() if self.request.META['CONTENT_TYPE'] == "application/json": return HttpResponse(json.dumps({'success': True}), content_type="application/json") else: return HttpResponseRedirect(self.get_success_url())
def _(): # *** First, find the list of subjects # directly containing the member ID. *** query = (cls.start_accepted_query().filter( r.row['members'].contains( lambda member: member['id'] == unit_id))) subjects = query.run(db_conn) # *** Second, find all the subjects containing # those subjects... recursively. *** found_subjects, all_subjects = subjects, [] while found_subjects: subject_ids = { subject['entity_id'] for subject in found_subjects } all_subjects += found_subjects query = (cls.start_accepted_query().filter( r.row['members'].contains(lambda member: r.expr( subject_ids).contains(member['id'])))) found_subjects = query.run(db_conn) return all_subjects
def rql_highest_revs(query, field): """ r.db("psh").table("images").groupedMapReduce( function(image) { return image('dockerfile') }, function(image) { return {rev: image('rev'), id: image('id')} }, function(left, right) { return r.branch(left('rev').gt(right('rev')), left, right) } ).map( function(group) { return group('reduction')("id") } ) """ ids = query.grouped_map_reduce( lambda image: image[field], lambda image: {"rev": image["rev"], "id": image["id"]}, lambda left, right: r.branch(left["rev"]>right["rev"], left, right) ).map(lambda group: group["reduction"]["id"]).coerce_to("array").run() return query.filter(lambda doc: r.expr(ids).contains(doc["id"]))
def import_from_queue(progress, conn, task_queue, error_queue, replace_conflicts, durability, write_count): if progress[0] is not None and not replace_conflicts: # We were interrupted and it's not ok to overwrite rows, check that the batch either: # a) does not exist on the server # b) is exactly the same on the server task = progress[0] pkey = r.db(task[0]).table(task[1]).info().run(conn)["primary_key"] for i in reversed(range(len(task[2]))): obj = pickle.loads(task[2][i]) if pkey not in obj: raise RuntimeError("Connection error while importing. Current row has no specified primary key, so cannot guarantee absence of duplicates") row = r.db(task[0]).table(task[1]).get(obj[pkey]).run(conn) if row == obj: write_count[0] += 1 del task[2][i] else: raise RuntimeError("Duplicate primary key `%s`:\n%s\n%s" % (pkey, str(obj), str(row))) task = task_queue.get() if progress[0] is None else progress[0] while not isinstance(task, StopIteration): try: # Unpickle objects (TODO: super inefficient, would be nice if we could pass down json) objs = [pickle.loads(obj) for obj in task[2]] conflict_action = 'replace' if replace_conflicts else 'error' res = r.db(task[0]).table(task[1]).insert(r.expr(objs, nesting_depth=max_nesting_depth), durability=durability, conflict=conflict_action).run(conn) except: progress[0] = task raise if res["errors"] > 0: raise RuntimeError("Error when importing into table '%s.%s': %s" % (task[0], task[1], res["first_error"])) write_count[0] += len(objs) task = task_queue.get()
def __init__(self, lambd=(lambda x: r.expr(x))): self.lambd = lambd # This is to trick ReQL function inspection when it creates # the AST. You can't subclass types.FunctionType # unfortunately, so emulating them is the best we can do self.func_code = lambd.func_code self.__doc__ = lambd.__doc__
def new_usuario(self, data): """ Método que cria um usuário com os dados informados. """ data['criado_em'] = r.expr(datetime.now( r.make_timezone(config('TIMEZONE', default='-03:00')))) return self.insert(data)
def get_shared_records_for_report(self, index_values, consider_demand_type=False): if consider_demand_type: report_index = "has_peak" else: report_index = "no_peak" data = self.uow.run_list( self.compiled_energy_records_table .get_all(*index_values, index=report_index) .inner_join(self.compiled_energy_records_table, lambda arow, brow: r.expr(arow['comparison_value'] == brow['comparison_value']) .and_(arow['comparison_type'] == 'temp').and_(brow['comparison_type'] == 'temp') .and_(arow['year'] != brow['year'])) .zip() .group(lambda record: {'year': record['year'], 'account_id': record['account_id'], 'value': record['comparison_value']}) .map(lambda record: {'sum_btu': record['sum_btu'], 'p_norm': record['sum_price_normalization'], 's_norm': record['sum_size_normalization'], 'hrs': record['sum_hours_in_record']}) .reduce(lambda a, b: {'sum_btu': a['sum_btu'] + b['sum_btu'], 'p_norm': a['p_norm'] + b['p_norm'], 's_norm': a['s_norm'] + b['s_norm'], 'hrs': a['hrs'] + b['hrs']}) .ungroup()) return data
def UpdateTransactionState(client, ledgerblocks): """ Update the state of transactions from the transaction collection in the exchange database. Args: client -- sawtooth.client.SawtoothClient for accessing the ledger ledgerblocks -- list of block identifiers in the current ledger """ # add the failed block to the list so we dont keep trying to # fix a transaction that is marked as unfixable blklist = set(ledgerblocks[:]) blklist.add('failed') # ok... now we are looking for any transactions that are not in # one of the blocks in the committed list, transactions that are # in one of these blocks already have the correct state registered # one concern about this approach is that transactions that fail # are likely to stick around for a long time because we don't know # if they might magically show up in another block, for now I'm # just going to assume that a transaction that fails, fails # permanently logger.debug('update transaction state from blocks') # this is the query that we should use, but it isn't working probably # because of missing InBlock fields but there are no logs to be sure txnquery = rethinkdb.table('transactions').filter( lambda doc: ~(rethinkdb.expr(blklist).contains(doc['InBlock']))) txniter = txnquery.run() for txndoc in txniter: txnid = txndoc.get('id') assert txnid if txndoc.get('InBlock') in blklist: logger.debug('already processed transaction %s', txnid) continue try: logger.info('update status of transaction %s', txnid) txn = client.get_transaction(txnid) txndoc['Status'] = txn['Status'] if txn.get('InBlock'): txndoc['InBlock'] = txn['InBlock'] except: # if we cannot retrieve the transaction then assume that it has # failed to commit, this might be an invalid assumption if the # validator itself has failed though presumably that would have # been discovered much earlier logger.info('failed to retrieve transaction %s, marking it failed', txnid) txndoc['Status'] = 3 txndoc['InBlock'] = 'failed' rethinkdb.table('transactions').get(txnid).replace(txndoc).run()
def main(argv): # Main code here print "I'm dequeue worker" url_queue_table = parameters['rethinkdb_server']['tables']['url_queue'] dequeued_ids = [] while True: # Sleep 3 seconds time.sleep(3) num = num_dequeue_urls() if num > 0: del dequeued_ids[:] cursor = r.table(url_queue_table).order_by( index='ts').limit(num).run(rethink) for row in cursor: dequeued_ids.append(row['id']) gm_client.submit_job("crawler", str(row['url']), background=True) if dequeued_ids: print "\t - Dequeue ", len(dequeued_ids), " urls" # Clean all dequeued url r.table(url_queue_table).filter(lambda row: r.expr( dequeued_ids).contains(row['id'])).delete().run(rethink)
def _(): # *** First, find the list of sets # directly containing the member ID. *** query = (cls.start_accepted_query() .filter(r.row['members'].contains( lambda member: member['id'] == unit_id ))) sets = query.run(database.db_conn) # *** Second, find all the sets containing # those sets... recursively. *** found_sets, all_sets = sets, [] while found_sets: set_ids = {set_['entity_id'] for set_ in found_sets} all_sets += found_sets query = (cls.start_accepted_query() .filter(r.row['members'].contains( lambda member: r.expr(set_ids).contains(member['id']) ))) found_sets = query.run(database.db_conn) return all_sets
def after_epoch(self, epoch_id: int, epoch_data: AbstractHook.EpochData, **kwargs) -> None: logging.info('Rethink: after epoch %d', epoch_id) with r.connect(**self._credentials) as conn: item = { 'timestamp': r.expr(datetime.now(pytz.utc)), 'epoch_id': epoch_id, 'epoch_data': RethinkDBHook._numpy_dict_to_jsonable_dict(epoch_data) } response = r.table(self._table)\ .get(self._rethink_id)\ .update({'training': r.row['training'].append(item)})\ .run(conn) if response['errors'] > 0: logging.error('Error: %s', response['errors']) return if response['replaced'] != 1: logging.error( 'Modified unexpected number of documents: %s instead of 1', response['replaced']) return logging.debug('Appended train. progress to: %s', self._rethink_id)
def test_post(): url = 'httpbin.org/post' post_data = {'str': '%in fo+', 'number': 135.5, 'nil': None} res = r.http(url, method='POST', data=post_data).run(conn) post_data['number'] = str(post_data['number']) post_data['nil'] = '' expect_eq(res['form'], post_data) post_data = {'str': '%in fo+', 'number': 135.5, 'nil': None} res = r.http(url, method='POST', data=r.expr(post_data).coerce_to('string'), header={ 'Content-Type': 'application/json' }).run(conn) expect_eq(res['json'], post_data) post_data = 'a=b&b=c' res = r.http(url, method='POST', data=post_data).run(conn) expect_eq(res['form'], {'a': 'b', 'b': 'c'}) post_data = '<arbitrary>data</arbitrary>' res = r.http(url, method='POST', data=post_data, header={ 'Content-Type': 'application/text/' }).run(conn) expect_eq(res['data'], post_data)
def _query_rethinkdb(self, cdx_query): start_key = cdx_query.key.decode('utf-8') end_key = cdx_query.end_key.decode('utf-8') reql = self.r.table(self.table).between( [start_key[:150], rethinkdb.minval], [end_key[:150]+'!', rethinkdb.maxval], index='abbr_canon_surt_timestamp') reql = reql.order_by(index='abbr_canon_surt_timestamp') # filters have to come after order_by apparently # TODO support for POST, etc # http_method='WARCPROX_WRITE_RECORD' for screenshots, thumbnails reql = reql.filter( lambda capture: rethinkdb.expr( ['WARCPROX_WRITE_RECORD','GET']).contains( capture['http_method'])) reql = reql.filter( lambda capture: (capture['canon_surt'] >= start_key) & (capture['canon_surt'] < end_key)) if cdx_query.limit: reql = reql.limit(cdx_query.limit) logging.debug('rethinkdb query: %s', reql) results = reql.run() return results
def _update_legacy(conn, block_num, address, resource, data_type): """ Update the legacy sync tables (expansion by object type name) """ try: data = { "id": address, "start_block_num": int(block_num), "end_block_num": int(sys.maxsize), **resource, } query = ( r.table(TABLE_NAMES[data_type]).get(address).replace( lambda doc: r.branch( # pylint: disable=singleton-comparison (doc == None), # noqa r.expr(data), doc.merge(resource), ))) result = query.run(conn) if result["errors"] > 0: LOGGER.warning("error updating legacy state table:\n%s\n%s", result, query) except Exception as err: # pylint: disable=broad-except LOGGER.warning("_update_legacy %s error:", type(err)) LOGGER.warning(err)
def rql_where_not(table, field, value, pre_filter=None, raw=False): """ Generates a query that is equivlent to running: The SQL ~equ to what I'm aiming for SELECT * FROM model WHERE id NOT IN ( SELECT * FROM model WHERE field == value ) This query can then be passed off to collections or used for anything else. This may or may not get fairly slow once you start getting a lot of id's... """ if not pre_filter: hidden_ids = r.table(table).filter(r.row[field].eq(value)).concat_map( lambda doc: [doc["id"]]).coerce_to("array").run() else: hidden_ids = r.table(table).filter(pre_filter).filter( r.row[field].eq(value)).concat_map( lambda doc: [doc["id"]]).coerce_to("array").run() filt = lambda doc: ~r.expr(hidden_ids).contains(doc["id"]) if not raw: if not pre_filter: query = r.table(table).filter(filt) else: query = r.table(table).filter(pre_filter).filter(filt) return query else: return filt
def test_empty_run(self): # Test the error message when we pass nothing to run and # didn't call `repl` self.assertRaisesRegexp( r.RqlDriverError, "RqlQuery.run must be given a connection to run on.", r.expr(1).run)
def parse_rules(rules): return r.expr({ 'rules': rules.map(lambda rule: (rule['value'] == bytes('', 'utf-8')).branch( rule.without('value'), rule.merge({'value': _value_to_array(rule)}) )) })
def rql_highest_revs(query, field): """ r.db("psh").table("images").groupedMapReduce( function(image) { return image('dockerfile') }, function(image) { return {rev: image('rev'), id: image('id')} }, function(left, right) { return r.branch(left('rev').gt(right('rev')), left, right) } ).map( function(group) { return group('reduction')("id") } ) """ ids = query.grouped_map_reduce( lambda image: image[field], lambda image: { "rev": image["rev"], "id": image["id"] }, lambda left, right: r.branch(left["rev"] > right["rev"], left, right) ).map(lambda group: group["reduction"]["id"]).coerce_to("array").run() return query.filter(lambda doc: r.expr(ids).contains(doc["id"]))
def test_repl(self): # Calling .repl() should set this connection as global state # to be used when `run` is not otherwise passed a connection. c = r.connect(port=self.port).repl() r.expr(1).run() c.repl() # is idempotent r.expr(1).run() c.close() self.assertRaisesRegexp(r.RqlDriverError, "Connection is closed", r.expr(1).run)
def test_repl(self): # Calling .repl() should set this connection as global state # to be used when `run` is not otherwise passed a connection. c = r.connect(port=self.port).repl() r.expr(1).run() c.repl() # is idempotent r.expr(1).run() c.close() self.assertRaisesRegexp( r.RqlDriverError, "Connection is closed", r.expr(1).run)
def get_resource(self): """ obtain a connection resource from the queue :return: ConnectionResource object """ if self._queue.empty() and self.current_conns < self._queue.maxsize: logger.info("create a new connection") conn = connect_to_rethinkdb(self._connection_info) else: logger.info("reuse a connection") conn = self._queue.get(True, self.get_timeout) try: logger.info("tried the connection") r.expr(1).run(conn) except RqlDriverError: print("recreating the connection") conn = self._create_connection() return ConnectionResource(self._queue, conn)
def complex_sindex_fn(row, db, table): return r.expr([row["value"]]).concat_map(lambda item: [item, item, item, item]) \ .concat_map(lambda item: [item, item, item, item]) \ .concat_map(lambda item: [item, item, item, item]) \ .concat_map(lambda item: [item, item, item, item]) \ .concat_map(lambda item: [item, item, item, item]) \ .concat_map(lambda item: [item, item, item, item]) \ .reduce(lambda acc, val: acc + val, 0) return 1
def evaluate(querystring): return r.expr(eval(querystring, { 'r': r, '__builtins__': { 'True': True, 'False': False, 'None': None, } }))
def new_record(location): album_info = rdio.call('search', {'query': request.form['album'], 'types': 'album'}) if album_info['result']['number_results'] != 0: for x in album_info['result']['results']: if x['artist'].upper() == request.form['artist'].upper(): album_art = x['icon'] release_date = x['releaseDate'] duration = x['duration'] duration = duration/60 tracks = x['length'] artist_key = x['artistKey'] else: album_art = 'http://musicunderfire.com/wp-content/uploads/2012/06/No-album-art-itunes-300x300.jpg' release_date = '' duration = 0 tracks = 0 artist_key = '' new_record = m.Records() new_record.user = session['user'] new_record.artist = request.form['artist'] new_record.album = request.form['album'] new_record.album_art = album_art new_record.release_date = release_date new_record.duration = duration new_record.tracks = tracks new_record.record_condition = '' new_record.sleeve_condition = '' new_record.color = '' new_record.size = '' new_record.notes = '' new_record.date_added = r.expr(datetime.datetime.now( timezone('US/Central'))) new_record.user_artwork='' new_record.save() condition = m.Condition.order_by('order').fetch() size = m.Size.order_by('order').fetch() record = m.Records.get(id=new_record['id']) if location == 'grid': return render_template('new_record.html', s=record, condition=condition, size=size) else: return render_template('add_list.html', s=record, condition=condition, size=size)
def _get_jobs(self, predicate=None): jobs = [] failed_job_ids = [] query = self.table.filter(r.row['next_run_time'] != None).filter(predicate) if predicate else self.table query = query.order_by('next_run_time', 'id').pluck('id', 'job_state') for document in query.run(self.conn): try: jobs.append(self._reconstitute_job(document['job_state'])) except: logging.exception('Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: r.expr(failed_job_ids).for_each(lambda job_id: self.table.get_all(job_id).delete()).run(self.conn) return jobs
def get_hyps_with_status(list_status, not_=False, empty=False): r_conn = new_rethink_connection() rtable = r.table('hypervisors') if not_ == True: l = list(rtable.filter({'enabled': True}).filter(lambda d: r.not_(r.expr(list_status). contains(d['status']))). run(r_conn)) else: l = list(rtable.filter({'enabled': True}).filter(lambda d: r.expr(list_status). contains(d['status'])). run(r_conn)) if empty == True: nostatus = list(rtable.filter({'enabled': True}).filter(lambda n: ~n.has_fields('status')).run(r_conn)) l = l + nostatus close_rethink_connection(r_conn) return l
def sequence_search(searchfield, words): return r.expr(words).concat_map( lambda word: r.db(DB).table(table).filter( lambda doc: doc[searchfield].map( lambda title: title.do( lambda matcher: matcher.coerce_to('STRING').match('(?i)' + word) ) ).reduce(lambda left, right: left | right) ).coerce_to('array').map(lambda doc: doc['id']) )
def filter(self, ids=None, **kwargs): if ids: try: query = self.query.get_all(r.args(ids)).filter(kwargs) except AttributeError: # self.query already has a get_all applied query = (self.query.filter(lambda doc: r.expr(ids).contains(doc['id'])) .filter(kwargs)) else: query = self.query.filter(kwargs) return ObjectSet(self, query)
def create_table(db_name, table_name): try: r.db(db_name).table_create(table_name).run() print 'Table created, now adding values ' r.table(table_name).insert(r.expr({'age': 26, 'name': 'Michel'})) r.table("last_upload_time").insert( [{"last_upload_time": int(time.time())}]).run() r.table("update_times").insert( [{"update_times": time.ctime()}]).run() except Exception as ex: print 'Caught in exception ', ex
def add_selections_command(self, command, selections=[], **kwargs): """ Add selections filter to command """ if len(selections) > 0: for sel in selections: field = sel[0] values = sel[1] print("Only downloading documents with field '" + field + "' equal to one of " + str(values)) command = command.filter(lambda doc: r.expr(values).contains(doc[field])) return command
def get(self): parser = reqparse.RequestParser() parser.add_argument('value', type=float, required=True, help='Value of the killmail') parser.add_argument('ids', type=int, required=True, help='IDs from the attackers', action='append') args = parser.parse_args(strict=True) results = r.table(RDB_TABLE).filter( lambda row: row['ids'].default([]).contains(lambda id: r.expr(args['ids']).contains(id)).or_(row['value'].le(args['value'])) ).pluck('url').distinct().run(db.conn) return [x['url'] for x in results]
def get_spending_transactions(connection, links): query = ( r.table('bigchain') .get_all(*[(l['transaction_id'], l['output_index']) for l in links], index='inputs') .concat_map(unwind_block_transactions) # filter transactions spending output .filter(lambda doc: r.expr(links).set_intersection( doc['tx']['inputs'].map(lambda i: i['fulfills']))) ) cursor = connection.run(query) return ((b['id'], b['tx']) for b in cursor)
def new_marcador(self, **kwargs): """ Método que adiciona um novo marcador para o usuário fornecido através do ID. """ data = { 'usuario_id': kwargs.pop('usuario_id', None), 'criado_em': r.expr(datetime.now(r.make_timezone( config('TIMEZONE', default='-03:00')))) } data.update(kwargs) return self.insert(data)
async def update_auth_info(conn, email, public_key, update): result = await r.table('auth')\ .get(email)\ .do(lambda auth_info: r.expr(update.get('email')).branch( r.expr(r.table('auth').insert(auth_info.merge(update), return_changes=True)), r.table('auth').get(email).update(update, return_changes=True)))\ .do(lambda auth_info: auth_info['errors'].gt(0).branch( auth_info, auth_info['changes'][0]['new_val'].pluck('email')))\ .merge(_fetch_account_info(public_key))\ .run(conn) if result.get('errors'): if "Duplicate primary key `email`" in result.get('first_error'): raise ApiBadRequest( "Bad Request: A user with that email already exists") else: raise ApiBadRequest( "Bad Request: {}".format(result.get('first_error'))) if update.get('email'): await remove_auth_entry(conn, email) return result
def get_pie_data(question_data): return r.branch( ( r.expr(question_data["response_format"] == Question().RESPONSE_MULTIPLE_CHOICE) | (question_data["response_format"] == Question().RESPONSE_TRUE_OR_FALSE) ), question[1] .group(lambda r: r) .count() .ungroup() .map(lambda gr: {"name": gr["group"].coerce_to("string"), "value": gr["reduction"]}), [], )
def rql_where_not(table, field, value): """ Generates a query that is equivlent to running: The SQL ~equ to what I'm aiming for SELECT * FROM model WHERE id NOT IN ( SELECT * FROM model WHERE field == value ) This query can then be passed off to collections or used for anything else. This may or may not get fairly slow once you start getting a lot of id's... """ hidden_ids = r.table(table).filter(r.row[field].eq(value)).concat_map(lambda doc: [doc["id"]]).coerce_to("array").run() query = r.table(table).filter(lambda doc: ~r.expr(hidden_ids).contains(doc["id"])) return query
def get_data(question_data, question): return r.branch( (r.expr(question_data['response_format'] == Question().RESPONSE_MULTIPLE_CHOICE) | (question_data['response_format'] == Question().RESPONSE_RATING) | (question_data['response_format'] == Question().RESPONSE_TRUE_OR_FALSE)), r.branch( (question_data['response_format'] == Question().RESPONSE_MULTIPLE_CHOICE), { 'labels': question_data['options'], 'series': [r.expr(question[1]).reduce(lambda left, right: left.map(right, lambda leftVal, rightVal: leftVal + rightVal))] }, (question_data['response_format'] == Question().RESPONSE_TRUE_OR_FALSE), { 'labels': question_data['options'], 'series': r.expr(question[1]).reduce(lambda left, right: left.map(right, lambda leftVal, rightVal: leftVal + rightVal)) }, (question_data['response_format'] == Question().RESPONSE_RATING), { 'labels': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'series': [r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).map(lambda val: question[1].filter(lambda foo: foo == val).count())] }, [] ), [] )
def get(self, id=None): # Get the record value and the party name associated first = self.get_query().table('datavalues') \ .concat_map(lambda doc: doc['steps'].concat_map( lambda step: step['data'].concat_map( lambda data: [{ 'record': doc['record'], 'step': step['step'], 'pos': data['position'], 'party': data['value'], }]) )) \ .filter({'step': 3, 'pos': 1}) \ .pluck('record', 'party') \ .group('party')['record'] \ records_with_docs = list( self.get_query().table('datadocs')['record'].run()) final = {} from operator import itemgetter for party, records in first.run().items(): elements = set(records) - set(records_with_docs) if len(elements) > 0: # Remove the records containing the images ids = list(set(records) - set(records_with_docs)) cursor = self.get_query().table('datavalues') \ .filter(lambda doc: r.expr(ids).contains(doc['record'])) \ .run() newrecord = [] for obj in cursor: val = obj['steps'][0]['data'][0]['value'] tmp = val.split('_') index = 0 if len(tmp) > 1: index = 1 sort = tmp[index] try: sortme = int(sort) except: sortme = -1 newrecord.append({ 'sortme': sortme, 'value': val, 'record': obj['record'] }) final[party] = sorted(newrecord, key=itemgetter('sortme')) # final[party] = list(cursor) return self.response(final)