def main(): conn = r.connect() conn.use('wolfgame') # init empty gamestate state = GameState(conn) inserted = 0 for line in fileinput.input(): state.add_line(line) if state.game_finished(): doc = state.get_state() replace = state.replace state.reset() lines = { 'id': doc['id'], 'lines': doc['lines'] } del doc['lines'] ins = r.table('games').insert(doc, upsert=True).run(conn) ins2 = r.table('lines').insert(lines, upsert=True).run(conn) if 'errors' in ins and ins['errors'] > 0: print (ins['first_error']) exit(1) elif 'errors' in ins2 and ins2['errors'] > 0: print (ins2['first_error']) exit(1) else: inserted += 1 if state.game_running(): print ("There is still a game running but we reached the end of input") print ("Inserted %d document(s), skipped %d document(s)" % (inserted, state.get_skipped())) exit(0)
def js_imm_tag_annonce(self, pIDH='', pAction='', pParam=''): retourObj = '' if pIDH != '' : dbconn = self.myops.rdb_get_lock() if dbconn is not None : if 'toggleusertag' in pAction : curseur = r.table(self.myops.config.get('rdb.table.annon')) # .max('ts_collected').to_json() curseur = curseur.filter(r.row["id_hash"].eq(pIDH)).get_field("user_tags") curseur = curseur.run(dbconn) tags0 = curseur.next() curseur.close() if pParam.lower() in tags0 : # -- le tag est deja dans l'objet : on l'enleve de la liste tags1 = list(tags0) tags1.remove(pParam.lower()) curseur = r.table(self.myops.config.get('rdb.table.annon')) # .max('ts_collected').to_json() curseur = curseur.filter(r.row["id_hash"].eq(pIDH)) curseur = curseur.update({"user_tags": tags1}) curseur.run(dbconn) else : # -- on ajoute le tag a la liste curseur = r.table(self.myops.config.get('rdb.table.annon')) # .max('ts_collected').to_json() curseur = curseur.filter(r.row["id_hash"].eq(pIDH)) curseur = curseur.update({"user_tags": r.row["user_tags"].append(pParam.lower()).distinct()}) curseur.run(dbconn) self.myops.rdb_release() return retourObj
def get_owned_assets(bigchain, vk, query=None, table='bigchain'): assets = [] asset_ids = bigchain.get_owned_ids(vk) if table == 'backlog': reql_query = \ r.table(table) \ .filter(lambda tx: tx['transaction']['conditions'] .contains(lambda c: c['new_owners'] .contains(vk))) response = query_reql_response(reql_query.run(bigchain.conn), query) if response: assets += response elif table == 'bigchain': for asset_id in asset_ids: txid = asset_id['txid'] if isinstance(asset_id, dict) else asset_id reql_query = r.table(table) \ .concat_map(lambda doc: doc['block']['transactions']) \ .filter(lambda transaction: transaction['id'] == txid) response = query_reql_response(reql_query.run(bigchain.conn), query) if response: assets += response return assets
def check_owner(checkid): """claim or unclaim a given check""" if request.method == 'GET': check = r.table("checks").get(checkid).run(rdb.conn) if check: check['id'] = str(check['id']) return jsonify({'check': check}) else: abort(404) elif request.method == 'POST': if not request.json: abort(400) try: if request.json.get('owner'): q = r.table("checks").get(checkid).update({"owner": str(request.json["owner"])}).run(rdb.conn) else: abort(400) if q["replaced"] != 0: return jsonify({'success': True}) else: abort(404) except Exception as err: logger.error(err) abort(400) elif request.method == 'DELETE': try: q = r.table("checks").get(checkid).update({"owner": ""}).run(rdb.conn) if q["replaced"] != 0: return jsonify({'success': True}) else: abort(404) except Exception as err: logger.error(err) abort(400)
def signup(): email = request.form.get('email') name = request.form.get("name") password = None if not is_columbia_email(email): flash("Not a valid columbia ID!", "danger") else: email = email.strip() curr = r.table('users').filter(r.row["email"].eq(email)).run(g.rdb_conn) if curr.items: user = curr.items[0] password = user["password"] else: password = generate_password(email) inserted = r.table('users').insert({ 'email': email, 'name': name, 'password': password }).run(g.rdb_conn) if inserted["generated_keys"]: password = password resp = send_email(email, password) if resp.status_code == 200: flash("Email sent! Check your inbox for your login details", "success") else: flash("Error sending email. Please try again or contact admin", "danger") return redirect(url_for('login'))
def get_visualization(self, ds_id, v_id): dataset = r.table('datasets').get(ds_id).run(db.conn) visualization = r.table('visualizations').get(v_id).run(db.conn) data, canvas_data = goldflakes(visualization) return render_template('datasets/get_visualization.html', dataset=dataset, visualization=visualization, data=data, canvas_data=canvas_data)
def delete_document(tablename, doc_id, db_conn): """ Remove the document from the database. """ r.table(tablename).get(doc_id).delete().run(db_conn) return None
def sendPartitionCount(index, count): #print('index: ' + str(index)) connection = createNewConnection()#todo: use-connection-pool #print('count' + str(count)) #r.table(RDB_TABLE).filter(r.row["partition"] == index).update({"count": count}).run(connection) r.table(RDB_TABLE).insert({"partition":index, "count": count, "time":time.time()}).run(connection) connection.close()
def sendPartition(iter): connection = createNewConnection()#todo: use-connection-pool for record in iter: # r.table(RDB_TABLE).insert(record).run(connection) #print(record[1]) r.table(RDB_TABLE).insert(json.loads(record[1])).run(connection) connection.close()
def createReaction(self, rdb): ''' This will create a reaction with the supplied information ''' reactdata = { 'name': self.name, 'rtype': self.rtype, 'uid': self.uid, 'trigger': self.trigger, 'frequency': self.frequency, 'lastrun': 0, 'data': self.data} if self.exists(reactdata['name'], reactdata['uid'], rdb): return 'exists' else: results = r.table('reactions').insert(reactdata).run(rdb) if results['inserted'] == 1: qdata = {} qdata['item'] = reactdata qdata['action'] = 'create' qdata['type'] = 'reaction' qdata['item']['rid'] = results['generated_keys'][0] q1 = r.table('dc1queue').insert(qdata).run(rdb) q2 = r.table('dc2queue').insert(qdata).run(rdb) return results['generated_keys'][0] else: return False
def save(self): self._run_callbacks('before_save') fields_dict = self.fields.as_dict() try: # Attempt update id_ = fields_dict['id'] result = (r.table(self._table).get(id_).replace(r.row .without(r.row.keys().difference(list(fields_dict.keys()))) .merge(fields_dict), return_changes=True).run()) except KeyError: # Resort to insert result = (r.table(self._table).insert(fields_dict, return_changes=True) .run()) if result['errors'] > 0: raise OperationError(result['first_error']) # RethinkDB 2.0 doesn't add the 'changes' key in the result if the # document hasn't been modified # TODO: Follow on the discussion at linkyndy/remodel#23 and change this # accordingly if 'changes' in result: # Force overwrite so that related caches are flushed self.fields.__dict__ = result['changes'][0]['new_val'] self._run_callbacks('after_save')
def _signal(self, qry, locale, profile, country=None): page = 1 start_time = time.time() print "Simply Hired" html = self._html(qry, page, locale, country) listings = self._listings(html) #print listings if listings.empty: return "none found" while 'day' not in listings.date.tolist()[-1]: page = page + 1 html = self._html(qry, page, locale, country) listings = listings.append(self._listings(html)) print page listings = listings[~listings.date.str.contains('day')] listings["keyword"] = qry listings = listings.drop_duplicates('company_name') listings['source'] = 'Simply Hired' listings["profile"] = profile #print listings companies = listings keys = [row.company_name.lower().replace(" ","")+"_"+profile for i, row in companies.iterrows()] companies["company_key"] = keys companies["createdAt"] = arrow.now().timestamp conn = rethink_conn.conn() #r.table("hiring_signals").insert(companies.to_dict('r')).run(conn) r.table("triggers").insert(companies.to_dict('r')).run(conn) bitmapist.mark_event("function:time:simplyhired_job_scrape", int((time.time() - start_time)*10**6)) rd.zadd("function:time:simplyhired_job_scrape", str((time.time() - start_time)*10**6), arrow.now().timestamp)
def setSubscription(self, rdb): ''' This will set a users subscription to the specified subscription plan ''' # Get User id results = r.table('users').get(self.uid).update( { 'acttype': self.acttype, 'stripeid': self.stripeid, 'stripe': self.stripe, 'subscription': self.subscription, 'subplans': self.subplans } ).run(rdb) if results: loginfo = {} loginfo['type'] = "setSubscription" loginfo['uid'] = self.uid loginfo['acttype'] = self.acttype loginfo['subplans'] = self.subplans loginfo['subscription'] = self.subscription loginfo['time'] = time.time() logresult = r.table('subscription_history').insert( loginfo).run(rdb) return True else: return False
def confirm_email(token): verify = verifyLogin(app.config["SECRET_KEY"], app.config["COOKIE_TIMEOUT"], request.cookies) if verify: user = User() user.config = app.config user.get("uid", verify, g.rdb_conn) if user.confirmed: flash("Account already confirmed. Thank you.", "success") return redirect(url_for("member.dashboard_page")) else: try: email = confirm_token(token) if user.email == email[0]: r.table("users").get(verify).update({"confirmed": True}).run(g.rdb_conn) flash("You have confirmed your account. Thanks!", "success") return redirect(url_for("member.dashboard_page")) else: flash("The confirmation link is invalid.", "danger") return redirect(url_for("user.login_page")) except: flash("The confirmation link is invalid or has expired.", "danger") return redirect(url_for("user.login_page")) else: flash("Please Login.", "warning") return redirect(url_for("user.login_page"))
def validate_tx(self, tx): """Validate a transaction. Also checks if the transaction already exists in the blockchain. If it does, or it's invalid, it's deleted from the backlog immediately. Args: tx (dict): the transaction to validate. Returns: The transaction if valid, ``None`` otherwise. """ if self.bigchain.transaction_exists(tx['id']): # if the transaction already exists, we must check whether # it's in a valid or undecided block tx, status = self.bigchain.get_transaction(tx['id'], include_status=True) if status == self.bigchain.TX_VALID \ or status == self.bigchain.TX_UNDECIDED: # if the tx is already in a valid or undecided block, # then it no longer should be in the backlog, or added # to a new block. We can delete and drop it. r.table('backlog').get(tx['id']) \ .delete(durability='hard') \ .run(self.bigchain.conn) return None tx_validated = self.bigchain.is_valid_transaction(tx) if tx_validated: return tx else: # if the transaction is not valid, remove it from the # backlog r.table('backlog').get(tx['id']) \ .delete(durability='hard') \ .run(self.bigchain.conn) return None
def push_data(self, args): path = args['path'] ds_id = args['ds_id'] filename = os.path.basename(path) tmp_dir = str(int(time.time())) # Create temporary files os.chdir(config.DISCO_FILES) os.makedirs(tmp_dir) copy2(filename, "%s/%s" % (tmp_dir, filename)) os.chdir(tmp_dir) command = 'split -n %s %s' % (config.DISCO_NODES, path) split_process = Popen(command.split(' '), stdout=PIPE) split_process.communicate() # Push data to cluster command = 'ddfs push data:%s ./xa?' % ds_id d = DDFS('disco://localhost') files = [("%s/%s/%s" % (config.DISCO_FILES, tmp_dir, filename), filename) for filename in os.listdir(".") if filename.startswith("xa")] d.push('data:%s' % ds_id, files) r.table('datasets').filter({ 'id': ds_id, }).update({ 'state': 'ready_for_crunching' }).run(db)
def delete_transactions(self): """ Delete transactions from the backlog """ # create bigchain instance b = Bigchain() stop = False while True: # try to delete in batch to reduce io tx_to_delete = [] for i in range(1000): try: tx = self.q_tx_delete.get(timeout=5) except queue.Empty: break # poison pill if tx == 'stop': stop = True break tx_to_delete.append(tx) if tx_to_delete: r.table('backlog').get_all(*tx_to_delete).delete(durability='soft').run(b.conn) if stop: return
def __init__(self, database='apscheduler', table='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(RethinkDBJobStore, self).__init__() self.pickle_protocol = pickle_protocol if not database: raise ValueError('The "database" parameter must not be empty') if not table: raise ValueError('The "table" parameter must not be empty') if client: self.conn = maybe_ref(client) else: self.conn = r.connect(db=database, **connect_args) if database not in r.db_list().run(self.conn): r.db_create(database).run(self.conn) if table not in r.table_list().run(self.conn): r.table_create(table).run(self.conn) if 'next_run_time' not in r.table(table).index_list().run(self.conn): r.table(table).index_create('next_run_time').run(self.conn) self.table = r.db(database).table(table)
def main(port, process_id): conn = r.connect("localhost", port, db="samplesdb") value = list(r.table("processes").get_all(process_id) .eq_join("id", r.table("process2sample"), index="process_id") .zip() .eq_join("attribute_set_id", r.table("sample2attribute_set"), index="attribute_set_id") .zip() .merge(lambda aset: { "attributes": r.table("attribute_set2attribute") .get_all(aset["attribute_set_id"], index="attribute_set_id") .eq_join("attribute_id", r.table("attributes")) .zip() .merge(lambda attr: { "best_measure": r.table("measurements") .get(attr["best_measure_id"]), "history": r.table("best_measure_history") .get_all(attr["id"], index="attribute_id") .merge(lambda best: { "measurement": r.table("measurements") .get(best["measurement_id"]).default("") }) .coerce_to("array"), "measurements": r.table("attribute2measurement") .get_all(attr["id"], index="attribute_id") .eq_join("measurement_id", r.table("measurements")) .zip() .coerce_to("array") }) .coerce_to("array") }) .run(conn, time_format="raw")) print json.dumps(value[0])
def find_missing_formats(self, fmt, limit=None): """ Find ebook versions missing supplied format. Ignores non-fiction ebooks. Each ebook should have several formats available for download (defined in config['EBOOK_FORMATS']). This method is called nightly by a celery task to ensure all defined formats are available. Objects are returned where supplied fmt is missing (ie, needs creating) Params: fmt (str) Required ebook format that might be missing limit (int) Limit number of results returned Returns version_id: [ {format, original_format, file_hash, ebook_id, s3_filename, uploaded}, ... ] """ q = ( r.table("formats") .group(index="version_id") .filter({"is_fiction": True}) .filter( lambda row: r.table("formats").filter({"format": fmt})["version_id"].contains(row["version_id"]).not_() ) .eq_join("version_id", r.table("versions"), index="version_id") .zip() .pluck("format", "original_format", "file_hash", "ebook_id", "s3_filename", "uploaded") ) if limit: q = q.limit(limit) return q.run()
def set_uploaded(self, file_hash, username, filename, isit=True): """ Mark an ebook as having been uploaded to S3 """ r.table("formats").get(file_hash).update( {"uploaded": isit, "uploaded_by": username, "s3_filename": filename} ).run()
def post(self, *args): #post body must be a list #jobs = [] #jobs = tornado.escape.json_decode(self.request.body) c = yield self.dbconnection jobs = json.loads(self.request.body) logger.info("Received %s" % (jobs,)) ts = time.time() # Adding additional info to the json for data in jobs: data["jobstatus"] = "waiting" data["message"] = "waiting to be executed" data["created"] = datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') data["started"] = "" data["completed"] = "" data["returnstatus"] = "" data["stdout"] = "" data["stderr"] = "" json.dumps(data) yield r.table('jobs').run(c) rows = yield r.table("jobs").insert(jobs).run(c) ids =[] # getting the generated keys from the DB for key in rows['generated_keys']: ids.append(key) self.finish(json.dumps({"id": ids}))
def createMonitor(self, rdb): ''' This will create a health check with the supplied domain information ''' mondata = { 'name': self.name, 'ctype': self.ctype, 'uid': self.uid, 'url': self.url, 'failcount': 0, 'status': self.status, 'data': self.data} if self.exists(mondata['name'], mondata['uid'], rdb): return 'exists' else: mondata['status'] = 'queued' results = r.table('monitors').insert(mondata).run(rdb) if results['inserted'] == 1: qdata = {} qdata['item'] = mondata qdata['action'] = 'create' qdata['type'] = 'monitor' qdata['item']['cid'] = results['generated_keys'][0] self.cid = results['generated_keys'][0] urlchk = self.genURL(self.cid, rdb) if urlchk: qdata['item']['url'] = self.url for dc in ["dc1queue", "dc2queue"]: q1 = r.table(dc).insert(qdata).run(rdb) return results['generated_keys'][0] else: return False
def list_notes(hostname): """Retrieve a list of notes associated with a host. Or given {'user': '******', 'note': 'some message'} post a note.""" if request.method == 'GET': try: #someday i should probably add offset support here and in the statelog limit = request.args.get('limit', 50, type=int) except ValueError: abort(400) notes = list(r.table("notes").filter({"hostname": hostname}).order_by(r.desc("ts")).limit(limit).run(rdb.conn)) if notes: return jsonify({'notes': sorted(notes, key=lambda k: k['ts'])}) else: abort(404) elif request.method == 'POST': if not request.json: abort(400) if not request.json.get("user") or not request.json.get("note"): abort(400) if not r.table("hosts").get_all(hostname, index="hostname").run(rdb.conn): abort(404) alerting = [x["check"] for x in r.table("checks").filter({"h stname": hostname, "status": False}).run(rdb.conn)] q = r.table("notes").insert({'hostname': hostname, 'user': request.json.get("user"), 'note': request.json.get("note"), 'ts': time(), 'alerting': alerting}).run(rdb.conn) if q["inserted"] == 1: return jsonify({'success': True}) else: logger.error(q) abort(500) else: abort(400)
def history( self, method=None, hid=None, time=None, start=None, limit=None, rdb=None): ''' This will pull a monitors history from rethinkDB ''' retdata = False if method == "mon-history": retdata = [] monitors = r.table('history').filter( (r.row['cid'] == self.cid) & (r.row['starttime'] >= time) & (r.row['type'] == "monitor")).order_by( r.desc('starttime')).pluck('starttime', 'id', 'cid', 'zone', 'status', 'failcount', 'method', 'name').skip(start).limit(limit).run(rdb) for mon in monitors: mon['starttime'] = datetime.datetime.fromtimestamp( mon['starttime']).strftime('%Y-%m-%d %H:%M:%S') retdata.append(mon) elif method == "detail-history": retdata = [] mon = r.table('history').get(hid).pluck( 'starttime', 'cid', 'zone', 'status', 'failcount', 'method', 'name').run(rdb) mon['reactions'] = [] reactions = r.table('history').filter( (r.row['cid'] == self.cid) & (r.row['starttime'] == mon['starttime']) & (r.row['zone'] == mon['zone']) & (r.row['type'] == "reaction")).pluck('name', 'rstatus', 'time', 'starttime').run(rdb) for react in reactions: react['starttime'] = datetime.datetime.fromtimestamp( react['starttime']).strftime('%Y-%m-%d %H:%M:%S') react['time'] = datetime.datetime.fromtimestamp( react['time']).strftime('%Y-%m-%d %H:%M:%S') mon['reactions'].append(react) mon['starttime'] = datetime.datetime.fromtimestamp( mon['starttime']).strftime('%Y-%m-%d %H:%M:%S') retdata.append(mon) elif method == "count": retdata = r.table('history').filter( (r.row['cid'] == self.cid) & (r.row['starttime'] >= time) & (r.row['type'] == "monitor")).count().run(rdb) return retdata
def check_suspended(checkid): """Suspend a given check""" if request.method == 'GET': check = r.table("checks").get(checkid).run(rdb.conn) if check: check['id'] = str(check['id']) return jsonify({'check': check}) else: abort(404) elif request.method == 'POST': if not request.json: abort(400) try: if not request.json.get('suspended'): abort(400) if request.json.get('suspended') is True: q = r.table("checks").get(checkid).update({"suspended": True}).run(rdb.conn) elif request.json.get('suspended') is False: q = r.table("checks").get(checkid).update({"suspended": False}).run(rdb.conn) else: abort(400) if q['replaced'] != 0: return jsonify({'success': True}) else: abort(404) except Exception as err: logger.error(err) abort(400)
def check_state(state): """List of checks in cluster in a given state [alerting/pending/suspended]""" if state == 'alerting': q = list(r.table("checks").get_all(False, index="status").run(rdb.conn)) if q: return jsonify({'alerting': q}) else: return jsonify({'alerting': []}) elif state == 'pending': q = list(r.table("checks").get_all(True, index="pending").run(rdb.conn)) if q: return jsonify({'pending': q}) else: return jsonify({'pending': []}) elif state == 'in_maintenance': q = list(r.table("checks").get_all(True, index="in_maintenance").run(rdb.conn)) if q: return jsonify({'in_maintenance': q}) else: return jsonify({'in_maintenance': []}) elif state == 'suspended': q = list(r.table("checks").get_all(True, index="suspended").run(rdb.conn)) if q: return jsonify({'suspended': q}) else: return jsonify({'suspended': []}) else: abort(400)
def put(self, character_id, webhook_id): # if request.token['character_id'] != character_id: # abort(403) result = r.table(RDB_TABLE).get(webhook_id).run(db.conn) if result is None or result['character'] != character_id: abort(404) parser = reqparse.RequestParser() parser.add_argument('name', type=str, required=True, help='Name of the new webhook') parser.add_argument('url', type=str, required=True, help='URL for the Slack webhook') parser.add_argument('value', type=int, help='') parser.add_argument('ids', type=int, help='', action='append') args = parser.parse_args(strict=True) update = { 'id': webhook_id, 'character': character_id, 'name': args['name'], 'url': args['url'], } if args['value'] is not None: update['value'] = args['value'] if args['ids'] is not None: update['ids'] = args['ids'] result = r.table(RDB_TABLE).get(webhook_id).replace(update).run(db.conn) return update, 200
def check_next(checkid): """Reschedule a given check""" if request.method == 'GET': check = r.table("checks").get(checkid).run(rdb.conn) if check: check['id'] = str(check['id']) return jsonify({'check': check}) else: abort(404) elif request.method == 'POST': if not request.json: abort(400) try: if not request.json.get('next'): abort(400) if request.json.get('next') == 'now': q = r.table("checks").get(checkid).update({"next": time() - 1}).run(rdb.conn) else: q = r.table("checks").get(checkid).update({"next": int(request.json["next"])}).run(rdb.conn) if q["replaced"] != 0: return jsonify({'success': True}) else: abort(404) except Exception as err: logger.error(err) abort(400)
def load_ebook(self, ebook_id): # query returns dict with ebook->versions->formats nested document # versions are ordered by popularity try: ebook = ( r.table("ebooks") .get(ebook_id) .merge( lambda ebook: { "versions": r.table("versions") .get_all(ebook["ebook_id"], index="ebook_id") .order_by(r.desc("ranking")) .coerce_to("array") .merge( lambda version: { "formats": r.table("formats") .get_all(version["version_id"], index="version_id") .coerce_to("array") } ) } ) .run() ) except RqlRuntimeError as e: if "Cannot perform merge on a non-object non-sequence `null`" in str(e): return None else: raise e return ebook
def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey): return connection.run( r.table('votes').get_all([block_id, node_pubkey], index='block_and_voter').without('id'))
def count_backlog(connection): return connection.run(r.table('backlog', read_mode=READ_MODE).count())
def count_blocks(connection): return connection.run(r.table('bigchain', read_mode=READ_MODE).count())
def get_assets(connection, asset_ids): return connection.run( r.table('assets', read_mode=READ_MODE).get_all(*asset_ids))
def write_assets(connection, assets): return connection.run( r.table('assets').insert(assets, durability=WRITE_DURABILITY))
def get_block(connection, block_id): return connection.run(r.table('bigchain').get(block_id))
def write_block(connection, block_dict): return connection.run( r.table('bigchain').insert(r.json(serialize(block_dict)), durability=WRITE_DURABILITY))
def _get_asset_create_tx_query(asset_id): return r.table('bigchain', read_mode=READ_MODE) \ .get_all(asset_id, index='transaction_id') \ .concat_map(lambda block: block['block']['transactions']) \ .filter(lambda transaction: transaction['id'] == asset_id)
import rethinkdb as r # Connect, defaults are 'localhost', 28015, and the 'test' db. conn = r.connect(host='50.116.0.34', port=28015, db='CheeseFlask') # Create a new table r.table_create('Cheese').run(conn) # Insert some records into the table r.table('Cheese').insert([{ 'flavor': 'Cheddar', 'status': 'Available' }, { 'flavor': 'Swiss', 'status': 'Gone' }]).run(conn) # Get all records back from the table using cursor cheese_cursor = r.table('Cheese').run(conn) for cheese in cheese_cursor: print(cheese) # or just r.table('Cheese').run(conn) for display # Now let's only find the cheeses that are moved moved_cheese = r.table('Cheese').filter(r.row['status'] == 'Gone').run(conn) for cheese in moved_cheese: print(cheese)
def get_votes_by_block_id(connection, block_id): return connection.run( r.table('votes', read_mode=READ_MODE).between( [block_id, r.minval], [block_id, r.maxval], index='block_and_voter').without('id'))
def post_plugin(release_all, target_name_only, target_version_only, upload_plugin): target_version_only = int(target_version_only) username = '******' password = '******' authorid = 68 host = 'http://www.ggzs.me' fid = '55' # on server sortid = '1' resource_type = 'plugin' if IS_TEST: host = 'http://192.168.1.45' logger.debug(host) args = { 'loginurl': host + '/forum/member.php?mod=logging&action=login', 'loginsubmiturl': host + '/forum/member.php?mod=logging&action=login&loginsubmit=yes&loginhash=LUPyq&inajax=1', 'posturl': host + '/forum/forum.php?mod=post&action=newthread&fid=' + fid, 'postsubmiturl': host + '/forum/forum.php?mod=post&action=newthread&fid=%s&extra=&topicsubmit=yes', 'referer': host + '/forum/index', 'newposturl': host + '/forum/forum.php?mod=post&action=newthread&fid=' + fid + '&extra=page%3D1%26filter%3Dsortid%26sortid%3D1&sortid=' + sortid, 'upload_url': host + '/forum/misc.php?mod=swfupload&action=swfupload&operation=upload&fid=' + fid, 'post_attach_thread': host + '/forum/forum.php?mod=post&action=newthread&fid=' + fid + '&extra=&topicsubmit=yes', 'post_attach_load': host + '/forum/forum.php?mod=post&action=newthread&fid=' + fid + '&extra=page%3D1&sortid=' + sortid } dz = Discuz(username, password, args) rethinkdb.connect(host='ga-storage.lbesec.com', port=65306, db='plugins').repl() #all_plugin = rethinkdb.table('PluginDef').filter({"storageId": "d2015549-3fb3-4748-b170-c258635df41d"}).run() #all_plugin = rethinkdb.table('PluginDef').filter({"pluginName": "com.gameassist.autoplugin.com.com2us.inotia3.normal.freefull.google.global.android.common"}).run() #all_plugin = rethinkdb.table('PluginDef').filter({"pluginName": "com.gameassist.autoplugin.com.gameloft.android.ANMP.GloftA8HM"}).run() if release_all: all_plugin = rethinkdb.table('PluginDef').run() else: all_plugin = rethinkdb.table('PluginDef').filter({ "targetName": target_name_only, "targetVersion": target_version_only }).run() mysql_conn = connections('Cursor') mysql_cursor = mysql_conn.cursor() storageId = "" rel_num = 0 plugin_pkg_name = '' plugin_ver_code = '' msg = 101 for def_doc in all_plugin: logger.debug(def_doc) plugin_pkg_name = def_doc['pluginName'] plugin_ver_code = def_doc['pluginVersion'] storageId = def_doc['storageId'] plugin_name = def_doc['pluginName'] cursor = rethinkdb.table('PluginStore').filter({"id": storageId}).run() target_name = def_doc['targetName'] for doc in cursor: logger.debug('select PluginStore') tmp_path = r"/tmp/%s.tmp" % plugin_name apk_path = r"/tmp/%s.apk" % plugin_name apk_name = "%s.apk" % plugin_name open(tmp_path, "wb").write(doc['data']) base64.decode(open(tmp_path, "rb"), open(apk_path, "wb")) subject = plugin_name + "[" + str(def_doc['pluginVersion']) + "]" message = def_doc['label'].encode('UTF-8') format_subject = cgi.escape(subject) tcnt = mysql_cursor.execute( "select * from forum.pre_forum_thread " "where subject = \"%s\" and authorid = %d and fid = %d and displayorder >= 0" % (format_subject, authorid, int(fid))) logger.debug(subject) logger.debug(message) if tcnt > 0: #print 'has thread for this plugin : %s' % subject rel_num += 1 msg = 102 logger.debug('has thread for this plugin : %s' % subject) continue aid = dz.post_attach(apk_name, apk_path, authorid) dz.login() tid = dz.post_thread(aid, subject, message, sortid, resource_type) logger.debug("plugin_name[version]: %s aid: %s tid: %s" % (subject, aid, tid)) msg = 199 if tid != '0': rel_num += 1 if mysql_conn: mysql_conn.close() upload_plugin['plugin_pkg_name'] = plugin_pkg_name upload_plugin['plugin_ver_code'] = plugin_ver_code upload_plugin['update_timestamp'] = int(time.time()) upload_plugin['msg'] = msg upload_plugin['is_finished'] = 1 logger.debug('POST SUCCESS')
def transaction_exists(self, transaction_id): response = r.table('bigchain').get_all(transaction_id, index='transaction_id').run( self.conn) return True if len(response.items) > 0 else False
import rethinkdb as r # Connect, defaults are 'localhost', 28015, and the 'test' db. conn = r.connect(host='50.116.0.34', port=28015, db='CheeseRealtime') # Create a new table r.table_create('Cheese').run(conn) # Insert some records into the table r.table('Cheese').insert([{ 'flavor': 'Cheddar', 'status': 'Available' }, { 'flavor': 'Swiss', 'status': 'Gone' }]).run(conn) # Start a changefeed cheese_cursor = r.table('Cheese').changes().run(conn) for cheese in cheese_cursor: print(cheese)
} msg['zone'] = "Web API" jdata = json.dumps(msg) zsend.send(jdata) return True # Run # ------------------------------------------------------------------ # On Startup Synchronize transaction logs recount = 0 for item in r_server.smembers("history"): record = json.loads(item) try: results = r.table("history").insert(record).run(rdb_server) success = True except: success = False if success: r_server.srem("history", item) recount = recount + 1 logger.info("Imported %d history records from cache to RethinkDB" % recount) # On Startup Synchronize event logs recount = 0 for item in r_server.smembers("events"): record = json.loads(item) try: results = r.table("events").insert(record).run(rdb_server)
def get_last_voted_block(self, node_pubkey): """Get the last voted block for a specific node. Args: node_pubkey (str): base58 encoded public key. Returns: The last block the node has voted on. If the node didn't cast any vote then the genesis block is returned. """ try: # get the latest value for the vote timestamp (over all votes) max_timestamp = self.connection.run( r.table('votes', read_mode=self.read_mode).filter( r.row['node_pubkey'] == node_pubkey).max( r.row['vote']['timestamp']))['vote']['timestamp'] last_voted = list( self.connection.run( r.table('votes', read_mode=self.read_mode).filter( r.row['vote']['timestamp'] == max_timestamp).filter( r.row['node_pubkey'] == node_pubkey))) except r.ReqlNonExistenceError: # return last vote if last vote exists else return Genesis block return self.get_genesis_block() # Now the fun starts. Since the resolution of timestamp is a second, # we might have more than one vote per timestamp. If this is the case # then we need to rebuild the chain for the blocks that have been retrieved # to get the last one. # Given a block_id, mapping returns the id of the block pointing at it. mapping = { v['vote']['previous_block']: v['vote']['voting_for_block'] for v in last_voted } # Since we follow the chain backwards, we can start from a random # point of the chain and "move up" from it. last_block_id = list(mapping.values())[0] # We must be sure to break the infinite loop. This happens when: # - the block we are currenty iterating is the one we are looking for. # This will trigger a KeyError, breaking the loop # - we are visiting again a node we already explored, hence there is # a loop. This might happen if a vote points both `previous_block` # and `voting_for_block` to the same `block_id` explored = set() while True: try: if last_block_id in explored: raise exceptions.CyclicBlockchainError() explored.add(last_block_id) last_block_id = mapping[last_block_id] except KeyError: break return self.connection.run( r.table('bigchain', read_mode=self.read_mode).get(last_block_id))
# { "name": "Laura Roslin", "tv_show": "Battlestar Galactica", # "posts": [ # {"title": "The oath of office", "content": "I, Laura Roslin, ..."}, # {"title": "They look like us", "content": "The Cylons have the ability..."} # ] # }, # { "name": "Jean-Luc Picard", "tv_show": "Star Trek TNG", # "posts": [ # {"title": "Civil rights", "content": "There are some words I've known since..."} # ] # } # ]).run() # logging.debug(pprint.pformat(out)) logging.debug("Retrieve documents") cursor = r.table("authors").run() for document in cursor: logging.debug(pprint.pformat(document)) logging.debug("Filter documents based on a condition") cursor = r.table("authors").filter(r.row["name"] == "William Adama").run() for document in cursor: logging.debug(pprint.pformat(document)) logging.debug("Let's use filter again to retrieve all authors who have more than two posts:") cursor = r.table("authors").filter(r.row["posts"].count() > 2).run() for document in cursor: logging.debug(pprint.pformat(document)) logging.debug("Retrieve documents by primary key") document = r.db('test').table('authors').get('029418c4-6568-4189-9494-46220c61f69c').run()
def delete_todo(todo_id): return jsonify(r.table('todos').get(todo_id).delete().run(g.rdb_conn))
async def process_star(self, r, u): name = (r.emoji if type(r.emoji) == str else r.emoji.name) # may as well count = r.count settings = database.get_settings( self.conn, r.message.guild) # grab me all the settings if not settings: return # oops, no settings if 'starboard_channel' not in settings.keys(): return # oops, no sb channel if r.message.author == u: return # no heckin self stars min_count = database.check_setting( self.conn, r.message.guild, 'starboard_min_count') or 1 sb_name = database.check_setting( self.conn, r.message.guild, 'starboard_emote') or 'тнР' channel = database.check_setting( self.conn, r.message.guild, 'starboard_channel') channel = r.message.guild.get_channel( int(channel)) # get proper channel star_entry = list( rethink.table("starboard") .filter({"message_id": str(r.message.id)}) .run(self.conn)) if channel is None: return # no more starboard channel, don't wanna throw an exception if name == sb_name: print(count) if count >= min_count: e = discord.Embed(colour=r.message.author.color) e.set_author( name=str(r.message.author.display_name), icon_url=r.message.author.avatar_url_as(format='png')) e.description = r.message.content e.timestamp = datetime.datetime.utcnow() if r.message.attachments: e.set_image(url=r.message.attachments[0].url) fallback = f'{self.star_type(count)} **{count}** <#{r.message.channel.id}>' if not star_entry: star_msg = await channel.send(fallback, embed=e) star_entry = { "message_id": str(r.message.id), "starboard_id": str(star_msg.id) } return rethink \ .table("starboard") \ .insert(star_entry) \ .run(self.conn) else: try: star_msg = await channel.get_message( star_entry[0]["starboard_id"]) return await star_msg.edit(content=fallback, embed=e) except discord.errors.NotFound: new_star_msg = await channel.send(fallback, embed=e) return rethink \ .table("starboard") \ .filter({"message_id": str(r.message.id)}) \ .update({"starboard_id": str(new_star_msg.id)}) \ .run(self.conn) elif star_entry: try: star_msg = await channel.get_message( star_entry[0]["starboard_id"]) return await star_msg.delete() except discord.errors.NotFound: return
def update_todo(todo_id): return jsonify( r.table('todos').get(todo_id).replace(request.json).run(g.rdb_conn))
def get_todos(): selection = list(r.table('todos').run(g.rdb_conn)) return json.dumps(selection)
def new_todo(): inserted = r.table('todos').insert(request.json).run(g.rdb_conn) return jsonify(id=inserted['generated_keys'][0])
def patch_todo(todo_id): return jsonify( r.table('todos').get(todo_id).update(request.json).run(g.rdb_conn))
def get_all(cls): return list(r.table(cls._table).run(conn))
def get_todo(todo_id): todo = r.table('todos').get(todo_id).run(g.rdb_conn) return json.dumps(todo)
def find(cls, filters): for doc in r.table(cls.table_name) \ .filter(filters) \ .limit(99) \ .run(db): yield doc
import rethinkdb as r server_build_dir = argv[1] if len(argv) >= 3: lang = argv[2] else: lang = None res = 0 with RethinkDBTestServers(4, server_build_dir=server_build_dir) as servers: port = servers.driver_port() c = r.connect(port=port) r.db_create('test').run(c) r.db('test').table_create('test').run(c) tbl = r.table('test') num_rows = randint(1111, 2222) print "Inserting %d rows" % num_rows documents = [{'id':i, 'nums':range(0, 500)} for i in xrange(0, num_rows)] chunks = (documents[i : i+100] for i in range(0, len(documents), 100)) for chunk in chunks: tbl.insert(chunk).run(c) print '.', stdout.flush() print "Done\n" if not lang or lang == 'py': print "Running Python" res = res | call(["python", "connections/cursor.py", str(port), str(num_rows)])
def get_by_key(cls, key): doc = r.table(cls.table_name) \ .get(key) \ .run(db) return cls(**doc)
def find_passenger_itineraries(cls, filters): return r.table('itineraries').filter(lambda doc : doc['passengers'].contains(lambda passenger: passenger['email'].eq(filters))) \ .run(db)
def update(cls, obj): return r.table(cls.table_name) \ .get(obj[cls.id_field]) \ .update(obj) \ .run(db)
def find_one(cls, filters): for doc in r.table(cls.table_name) \ .filter(filters) \ .limit(1) \ .run(db): return doc