Example #1
0
    def download(self, output=True, **kwargs):
        '''
        download documents from table
        '''

        import time
        start_time = time.time()
        self.connect_rethink(**kwargs)
        select, present, interval = self.parse_subset_arguments(**kwargs)
        sequence_count = r.table(self.sequences_table).count().run()
        # print(sequence_count, "sequences in table:", self.sequences_table)
        virus_count = r.table(self.viruses_table).count().run()
        # print(virus_count, "viruses in table:", self.viruses_table)
        print(
            "Downloading documents from the sequence table \"{}\" (n={}) & virus table \"{}\" (n={})"
            .format(self.sequences_table, sequence_count, self.viruses_table,
                    virus_count))
        sequences = self.rethinkdb_download(self.sequences_table,
                                            self.viruses_table,
                                            presents=present,
                                            selections=select,
                                            intervals=interval,
                                            **kwargs)
        print("Downloaded " + str(len(sequences)) + " sequences")
        sequences = self.resolve_duplicates(sequences, **kwargs)
        if output:
            self.output(sequences, **kwargs)
        print("--- %s minutes to download ---" %
              ((time.time() - start_time) / 60))
Example #2
0
 def update_passage_categories(self,
                               database,
                               table,
                               preview,
                               index='accession',
                               **kwargs):
     print("Updating passage_category field via passage field")
     upload = flu_upload(**args.__dict__)
     updated_sequences = []
     cursor = r.table(table).run()
     counter = 0
     total = r.table(table).count().run()
     print("Analyzing " + str(total) + " sequence entries")
     for sequence in cursor:
         updated = upload.format_passage(sequence)
         if updated:
             updated_sequences.append(sequence)
         counter += 1
         if counter % 10000 == 0:
             print(str(counter) + " entries parsed")
     print("Found " + str(len(updated_sequences)) + " sequences to update")
     if not preview:
         print("Updating " + str(len(updated_sequences)) +
               " sequence passage categories in " + database + "." + table)
         del kwargs['overwrite']
         self.upload_to_rethinkdb(database,
                                  table,
                                  updated_sequences,
                                  overwrite=True,
                                  index='accession')
     else:
         print(
             "Preview of updates to be made, remove --preview to make updates to database"
         )
Example #3
0
 def __init__(self, parent):
     super(RelatedM2MObjectHandler, self).__init__(model_cls)
     # Parent field handler instance
     self.parent = parent
     # Returns all docs from model_cls which are referenced in join_model_cls
     self.query = (r.table(join_model_cls.table_name).get_all(
         self._get_parent_lkey(),
         index=mlkey).eq_join(mrkey,
                              r.table(model_cls.table_name),
                              index=rkey).map(lambda res: res['right']))
Example #4
0
 def sync_via_timestamp(self, table, documents, key='strain', **kwargs):
     '''
     '''
     print(key)
     for document in documents:
         result = r.table(table).get(document[key]).run()
         if result is None:
             r.table(table).insert(document).run()
         else:
             if document['timestamp'] > result['timestamp']:
                 r.table(table).insert(document, conflict='replace').run()
Example #5
0
 def upload_documents(self,
                      table,
                      documents,
                      database,
                      replace=False,
                      **kwargs):
     if replace:
         print("Deleting documents in database:" + database + "." + table)
         r.table(table).delete().run()
     print("Inserting ", len(documents), "documents")
     self.upload_to_rethinkdb(database, table, documents, **kwargs)
Example #6
0
def create_indexes():
    from .registry import model_registry, index_registry

    for model, index_set in index_registry.all().items():
        model_cls = model_registry.get(model)
        created_indexes = r.table(model_cls.table_name).index_list().run()
        for index in index_set:
            if index not in created_indexes:
                result = r.table(
                    model_cls.table_name).index_create(index).run()
                if result['created'] != 1:
                    raise RuntimeError(
                        'Could not create index %s for table %s' %
                        (index, model_cls.table_name))
        r.table(model_cls.table_name).index_wait().run()
Example #7
0
    def download(self, subtype=None, output=True, **kwargs):
        '''
        download documents from table
        '''
        import time
        start_time = time.time()
        self.connect_rethink(**kwargs)
        self.vdb_download = vdb_download(database=self.database, virus=self.virus)
        select, present, interval, = self.vdb_download.parse_subset_arguments(**kwargs)

        if subtype is not None:
            select.append(('subtype', [subtype]))

        sequence_count = r.table(self.virus).count().run()
        print(sequence_count, "measurements in table:", self.virus)
        print("Downloading titer measurements from the table: " + self.virus)
        measurements = self.rethinkdb_download(self.virus, presents=present, selections=select, intervals=interval, **kwargs)
        print("Downloaded " + str(len(measurements)) + " measurements")
        for m in measurements:
            for k,v in m.items():
                try:
                    m[k] = v.upper()
                except:
                    continue
        if output:
            self.output(measurements, **kwargs)
        print("--- %s minutes to download ---" % ((time.time() - start_time)/60))
Example #8
0
    async def get(self):
        """
        Response example
        {
            "createdAt": "2019-02-18T11:06:53.621000+08:00",
            "email": "*****@*****.**",
            "lastLoggedInAt": "2019-02-21T20:31:29.639000+08:00",
            "secretKey": "S:15cc65f5-3eeb-4fec-8131-355ad03653d4",
            "username": "******",
            "admin": false,
            "groups": [
                {
                    "admin": true,
                    "creator": "*****@*****.**",
                    "id": "1",
                    "name": "g4"
                }
            ]
        }
        """
        # get user groups
        gids = self.current_user.get("groups", {})
        groups = await db.run(
            r.expr(gids.keys()).map(lambda id: r.table("groups").get(id).
                                    without("members")))
        for g in groups:
            g['admin'] = (gids[g['id']] == 2)  # 2: admin, 1: user

        user = self.current_user.copy()
        user["groups"] = groups
        self.write_json(user)
Example #9
0
 def update_timestamp(self, table, document_changes, index, **kwargs):
     '''
     Update the timestamp field in the rethink table if changes have been made to the documents
     '''
     updated_documents = []
     if 'changes' in document_changes:
         for doc in document_changes['changes']:
             if doc['new_val'] is not None:
                 updated_documents.append({
                     index:
                     doc['new_val'][index],
                     'timestamp':
                     self.rethink_io.get_upload_timestamp()
                 })
     if len(updated_documents) > 0:
         r.table(table).insert(updated_documents, conflict='update').run()
Example #10
0
def toolpage(toolname, tool_id):
    tab = ""
    tool = r.table('tools').get(tool_id).run(g.rdb_conn)
    page_title = tool['name']
    related_category = choice(tool['category'])

    related_tools = r.table('tools').filter(lambda row: row['category'].filter(lambda attr: attr == related_category).count() > 0).filter(lambda row: row['name']!=tool['name']).pluck('name').run()
    if list(related_tools).__len__() == 0: related_tools = False

    return render_template('tool_page.html', 
                  tab = tab, 
           page_title = page_title, 
                 tool = tool,
     related_category = related_category,
        related_tools = related_tools,
    )   
Example #11
0
    def download(self, subtype=None, **kwargs):
        '''
        download documents from table
        '''
        start_time = time.time()
        self.connect_rethink(**kwargs)
        self.vdb_download = vdb_download(database=self.database,
                                         virus=self.virus)
        select, present, interval, = self.vdb_download.parse_subset_arguments(
            **kwargs)

        if subtype is not None:
            select.append(('subtype', [subtype]))

        measurement_count = r.table(self.virus).count().run()
        print(measurement_count, "measurements in table:", self.virus)
        print("Downloading titer measurements from the table: " + self.virus)
        measurements = self.rethinkdb_download(self.virus,
                                               presents=present,
                                               selections=select,
                                               intervals=interval,
                                               **kwargs)
        self.rename_strains_with_passage(measurements)
        print("Downloaded " + str(len(measurements)) + " measurements")
        print("--- %s minutes to download ---" %
              ((time.time() - start_time) / 60))
        return measurements
Example #12
0
 def table(self, name):
     """
     Returns:
         TableHelper
     """
     pkey = self.__tables.get(name, {}).get("primary_key")
     return TableHelper(self, r.table(name), pkey=pkey)
Example #13
0
 def upload_to_rethinkdb(self,
                         database,
                         table,
                         documents,
                         overwrite=False,
                         optimal_upload=200,
                         **kwargs):
     if len(documents) > optimal_upload:
         list_documents = [
             documents[x:x + optimal_upload]
             for x in range(0, len(documents), optimal_upload)
         ]
     else:
         list_documents = [documents]
     print("Uploading to rethinkdb in " + str(len(list_documents)) +
           " batches of " + str(optimal_upload) + " documents at a time")
     inserted = 0
     replaced = 0
     for list_docs in list_documents:
         try:
             if not overwrite:
                 document_changes = r.table(table).insert(
                     list_docs,
                     conflict=lambda id, old_doc, new_doc:
                     rethinkdb_updater(id, old_doc, new_doc),
                     return_changes=True).run()
             else:
                 document_changes = r.table(table).insert(
                     list_docs,
                     conflict=lambda id, old_doc, new_doc:
                     rethinkdb_updater_overwrite(id, old_doc, new_doc),
                     return_changes=True).run()
         except:
             raise Exception("Couldn't insert new documents into database",
                             database + "." + table)
         else:
             self.update_timestamp(table, document_changes, **kwargs)
             if document_changes['errors'] > 0:
                 print("Errors were made when inserting the documents",
                       document_changes['errors'])
                 print(document_changes['errors'])
             inserted += document_changes['inserted']
             replaced += document_changes['replaced']
     print("Ended up inserting " + str(inserted) + " documents into " +
           database + "." + table)
     print("Ended up updating " + str(replaced) + " documents in " +
           database + "." + table)
Example #14
0
    def flush_one(self, tablename):
        self._log.info("flush "+tablename)
        try:
            connection = self.rethinkdb_waiting_connecton()
            tables = Rethinkdb.db(self._database).table_list().run(connection)

            if tablename not in tables:
                Rethinkdb.db(self._database).table_create(tablename).run(connection)
                self._log.info("create table "+tablename)
            values = self._buffer[tablename]
            
            Rethinkdb.table(tablename).index_wait().run(connection)
            Rethinkdb.table(tablename).insert(values).run(connection)
        except Exception as e:
            self._log.error("Can't insert data to {}:{}[{}][{}] ({})".format(self._ip, self._port, self._database, tablename, e))

        del self._buffer[tablename]
Example #15
0
 def report(self, name, metric, value, tags, id_):
     try:
         table_name = self._get_table_name(metric)
         self._create_table(metric)
         payload = self._get_payload(name, value, tags, id_)
         return r.table(table_name).insert(payload).run(self.connection)
     except Exception as e:
         logger = logging.getLogger(__name__)
         logger.exception(e)
Example #16
0
def delete_tool(tool_id):
    # todo: should be checking for user auth
    deleted = r.table('tools').get(tool_id).delete().run(g.rdb_conn)
    if deleted['deleted'] == 1:
        flash('That shit is gooooooone.', 'success')
    else:
        flash('It wasn\'t deleted! Oh my!', 'error')
    
    return redirect(url_for('homepage'))
Example #17
0
def get_query(channel, kwargs):
    query = r.table('messages').filter(r.row['channel'] == channel)
    if 'client' in kwargs:
        query = query.filter(r.row['client'] == kwargs['client'])
    if 'sender' in kwargs:
        query = query.filter(r.row['origin'] == kwargs['sender'])
    if 'pluck' in kwargs:
        query = query.pluck(kwargs['pluck'])
    return query
Example #18
0
def categorypage(category_name):
    tab = category_name
    page_title = category_name
    tools = r.table('tools').filter(
        lambda row: row['category'].filter(lambda attr: attr == category_name).count() > 0
    ).run(g.rdb_conn)
    return render_template('tool_listing.html', 
        tab=tab, 
        page_title=page_title, 
        tools = tools,
    )
Example #19
0
async def on_message(message):
    conn = r.connect("localhost", 28015, db="rationalBridge")
    messageData={ #Convert our message into a format for json serialization.
        'timestamp':utcify(message.timestamp),
        'orig_timestamp':utcify(message.timestamp),
        'tts':message.tts,
        'discord_type':str(message.type),
        'author_id': "discord:{}".format(message.author.id),
        'author_name': "discord:{}".format(message.author.name),
        'server_id': "discord:{}".format(message.author.server.id),
        'server_name': "discord:{}".format(message.author.server.name),#This really should be rethinkd equievlent to a join, but I am lazy
        'content': message.content,
        'embeds': message.embeds,
        'room_id': "discord:{}".format(message.channel.id),
        'room_name': "discord:{}".format(message.channel.name),
    }
    if message.edited_timestamp:
        messageData['timestamp'] = utcify(message.edited_timestamp)
    if messageData['author_id'] != "discord:" + clientId:
        r.table("messages").insert(messageData).run(conn)
Example #20
0
 def rq(self, filter=False):
   if not filter:
     filter = self._filter()
   rq = r.table(self._db_table)
   if len(filter)>0:
     rq = rq.filter(filter)
   if not self._order_by == None:
     rq = rq.order_by(*tuple([order if not order[:1] == '-' else r.desc(order[1:]) for order in list(self._order_by) ]))
   if not self._limit == 0:
     rq = rq.limit(int(self._limit))
   return rq
Example #21
0
def pull_categories():

    # pull all categories from all tools
    # todo: gotta fix this, it's a terrible way to approach this
    categories = set()
    all_categories = r.table('tools').filter(lambda row: row['category'].count() > 0).pluck('category').run(g.rdb_conn)
    for categoryset in all_categories:
        for item in categoryset['category']:
            categories.add(item)

    return dict({'categories': sorted(categories)})
Example #22
0
def homepage():
    tab = ""
    page_title = "Latest Tools"
    page_subtitle = "Check out all the latest tools that have been added to awesometoolbox."
    tools = r.table('tools').order_by('name').run(g.rdb_conn)

    return render_template('tool_listing.html', 
        tab=tab, 
        tools = tools,
        page_title=page_title, 
        page_subtitle=page_subtitle,
    )
Example #23
0
 def rethinkdb_download(self,
                        sequence_table,
                        virus_table,
                        index='strain',
                        **kwargs):
     '''
     Default command merges documents from the sequence table and virus table
     Chain rethinkdb filter and has_fields commands to the default command
     Return documents from the database that are left after filtering
     '''
     command = r.table(sequence_table).merge(lambda sequence: r.table(
         virus_table).get(sequence[index]))  # via merge
     # command = r.table(sequence_table).eq_join("strain", r.table(virus_table)).zip() # via eq_join, testing suggests that this is slower than the above merge strategy
     command = self.add_present_command(command, **kwargs)
     command = self.add_selections_command(command, **kwargs)
     command = self.add_intervals_command(command, **kwargs)
     command = self.add_public_command(command, **kwargs)
     command = self.add_private_command(command, **kwargs)
     sequences = list(command.run())
     sequences = filter(None, sequences)
     return list(sequences)
Example #24
0
 def update_locations(self, database, table, preview, **kwargs):
     print("Updating location fields")
     viruses = list(r.table(table).run())
     self.define_countries("source-data/geo_synonyms.tsv")
     self.define_regions("source-data/geo_regions.tsv")
     viruses = self.reassign_new_locations(viruses, self.location_fields, **kwargs)
     if not preview:
         print("Updating " + str(len(viruses)) + " virus locations in " + self.database + "." + self.viruses_table)
         del kwargs['overwrite']
         self.upload_to_rethinkdb(database, table, viruses, overwrite=True, index='strain')
     else:
         print("Preview of updates to be made, remove --preview to make updates to database")
Example #25
0
def new_tool():
    
    page_title = "Add tool"

    if request.form:
        form = request.form
    else:
        form = ToolForm()

    if request.method == 'POST':
        
        if not request.form['name'] or not request.form['description'] or not request.form['category'] or not request.form['link']:
            flash('This form isn\'t very long. Go ahead and try again.', 'error')

        elif r.table('tools').filter({'name':request.form['name']}).count().run(g.rdb_conn) != 0:
            flash('That tool already seems to exist.', 'error')

        elif not url_exists(request.form['link']):
            flash('That url doesn\'t seem quite right.', 'error')

        else:
            slug = sub('[^A-Za-z0-9]+', '', request.form['name'].lower())

            insertion = r.table('tools').insert({
                       'name': request.form['name'], # todo: need to make this check for uniqueness
                'description': request.form['description'],
                   'category': request.form['category'].replace(", ", ",").split(','), 
                       'link': request.form['link'],
                       'slug': slug,
            }).run(g.rdb_conn)

            if insertion['errors'] == 0:
                flash('Thanks for adding your tool!', 'success')
                return redirect('/tool/' + str(insertion['generated_keys'][0]) + "/" + slug)
                    # return redirect(url_for('toolpage', tool_id = str(insertion['generated_keys'][0])))
                    # should be using url_for as above, but it kept throwing a builderror
            
            else: flash('AH F**K', 'error')

    return render_template('tool_form.html', form=form)
Example #26
0
  def save(self):
    # Try and be lazy about saving. Only save if our values have actually
    # changed
    if not self._changed:
      return False

    # Validate any defined fields and set any defaults
    doc = self.__dict__
    if len(self._meta) > 0:
      for key in self._meta.keys():
        setattr(self, key, self._meta[key].validate(doc[key]))

    # id being none means we should insert
    if self.id == None:
      if 'id' in doc:
        del doc['id']
      self.changed(False)
      return self.evaluate_insert(r.table(self._db_table).insert(doc).run())

    # id found; update
    self.changed(False)
    return self.evaluate_update(r.table(self._db_table).filter({'id': self.id}).update(self.__dict__).run())
Example #27
0
 def rethinkdb_download(self, table, **kwargs):
     '''
     Default command merges documents from the sequence table and virus table
     Chain rethinkdb filter and has_fields commands to the default command
     Return documents from the database that are left after filtering
     '''
     # take each sequence and merge with corresponding virus document
     command = r.table(table)
     command = self.vdb_download.add_present_command(command, **kwargs)
     command = self.vdb_download.add_selections_command(command, **kwargs)
     command = self.vdb_download.add_intervals_command(command, **kwargs)
     sequences = list(command.run())
     return list(sequences)
Example #28
0
    def post(self):
        """
            Authentication
        """

        try:
            post_email = self.get_argument("email")
        except MissingArgumentError as e:
            self.render(self.application.templates + "/login.html",
                        message="email missing")
            return

        try:
            post_password = self.get_argument("password")
        except MissingArgumentError as e:
            self.render(self.application.templates + "/login.html",
                        message="password missing")
            return

        _, email = parseaddr(post_email)
        if not email:
            self.render(self.application.templates + "/login.html",
                        message="wrong email")
            return

        if not post_password:
            self.render(self.application.templates + "/login.html",
                        message="empty password")
            return
        else:
            password = post_password

        feed = yield r.table('users').filter({
            "email": email
        }).run(self.application.dbconnection)
        yield feed.fetch_next()
        user = yield feed.next()

        if not user:
            self.render(self.application.templates + "/login.html",
                        message="user does not exist")
            return

        if not check_password(password, user['password']):
            self.render(self.application.templates + "/login.html",
                        message="password does not match")
            return

        self.set_current_user(user['id'])
        self.redirect("/")
Example #29
0
    def compile(self):
        q = self.table

        # Seperate filter argument
        if self.filter_raw:
            q = self.table.filter(reduce(lambda a, b: a & b, self.filter_raw))

        # Joins cause a merge function to be executed
        if self._join:
            for k, v in self._join.iteritems():
                q = q.merge(lambda z: {
                    k: r.table(k).get(z[v])
                })
        return q
Example #30
0
    def save(self):
        self._run_callbacks('before_save')

        fields_dict = self.fields.as_dict()
        try:
            # Attempt update
            id_ = fields_dict['id']
            result = (r.table(self.table_name).get(id_).replace(r.row
                        .without(r.row.keys().difference(list(fields_dict.keys())))
                        .merge(fields_dict), return_changes='always').run())

        except KeyError:
            # Resort to insert
            result = (r.table(self.table_name).insert(fields_dict, return_changes=True)
                      .run())

        if result['errors'] > 0:
            raise OperationError(result['first_error'])

        # Force overwrite so that related caches are flushed
        self.fields.__dict__ = result['changes'][0]['new_val']

        self._run_callbacks('after_save')
Example #31
0
    def delete(self):
        self._run_callbacks('before_delete')

        try:
            id_ = getattr(self.fields, 'id')
            result = r.table(self.table_name).get(id_).delete().run()
        except AttributeError:
            raise OperationError('Cannot delete %r (object not saved or '
                                 'already deleted)' % self)

        if result['errors'] > 0:
            raise OperationError(result['first_error'])

        # Remove any reference to the deleted object
        for field in self.fields.related:
            delattr(self.fields, field)
        delattr(self.fields, 'id')

        self._run_callbacks('after_delete')
Example #32
0
async def watch_db():
    #    conn = await r.connect("localhost", 28015, db="rationalBridge")
    #    messages = r.table("messages").changes(include_initial=True)['new_val'].run(conn)
    messages = SpQuery(r.table("messages").changes()['new_val'],
                       "localhost",
                       28015,
                       db="rationalBridge").run()
    async for message in messages:
        serviceType, roomName = message['room_name'].split(":")
        if roomName.startswith("rd-") and serviceType == "discord":
            channels = []
            for server in client.servers:
                channels = channels + list(server.channels)
            for channel in channels:
                if channel.name == roomName and "discord:" + channel.id != message[
                        'room_id']:
                    msgStr = """```xml\n <{} {}={}>\n```\n""".format(
                        message['author_name'].split(":")[1],
                        message['server_name'], roomName)
                    msgStr = msgStr + message['content']
                    await client.send_message(channel, msgStr)
                    lastUser[message['room_name']] = message['author_id']
def update_todo(todo_id):
    return jsonify(r.table('todos').get(todo_id).replace(request.json).run(g.rdb_conn))
Example #34
0
 def cq(cls):
     """RethinkDB query prefix for queries on the Document's DB table.
     """
     return r.table(cls._tablename)
Example #35
0
def foo(x):
    print "x"    # 'x' gets printed once
    return r.table('bar').insert(x)
def patch_todo(todo_id):
    return jsonify(r.table('todos').get(todo_id).update(request.json).run(g.rdb_conn))
Example #37
0
 def process_item(self, item, spider):
     if self.conn:
         r.table(self.table_name).insert(item).run(self.conn)
     print(item)
     return item
def print_feed():
    for change in r.table('messages').changes().run():
        print(change['new_val'])
        sys.stdout.flush()
def get_todos():
    selection = list(r.table('todos').run(g.rdb_conn))
    return json.dumps(selection)
def main(mmCIFPath, logPath):
    # Start time
    start = time.time()
    # Logging
    logging.basicConfig(
        filename=logPath,
        level=logging.DEBUG
    )

    # Connect to DB
    try:
        conn = r.connect()
        logging.info('Connected to DB')
        print('Connected to DB')
    except Exception as e:
        logging.debug(e)
        print(e)

    # Create DB and connect to it
    try:
        r.db_create('pdb_compounds').run()
        conn.use('pdb_compounds')
        logging.info('Created DB and connected to it')
        print('Created DB and connected to it')
    except Exception as e:
        logging.debug(e)
        print(e)

    # Create table
    try:
        r.db('pdb_compounds') \
            .table_create('compounds', primary_key='_chem_comp.id') \
            .run()
        logging.info('Created Table: compounds')
        print('Created Table: compounds')
    except Exception as e:
        logging.debug(e)
        print(e)

    # Iterate through the mmCIF files and write to DB
    for cifFile in glob.iglob(os.path.join(mmCIFPath, '*.cif')):
        try:
            data = MMCIF2Dict(cifFile)
            dataJSON = json.dumps(data)         # Creates JSON string
            dataDecoded = json.loads(dataJSON)  # Needed to create valid JSON
            # Insert the data into the DB
            result = r.table('compounds').insert(dataDecoded).run()
            logging.info(
                'Insertion: ID: {id} | Error: {error} | ' \
                'Inserted: {inserted}'.format(
                    id=data['_chem_comp.id'],
                    error=result['errors'],
                    inserted=result['inserted']
                )
            )
            print('Success: ', cifFile)
        except Exception as e:
            logging.debug(
                'File: {filename} | Error: {error}'.format(
                    filename=cifFile,
                    error=e
                )
            )
            print('Error: ', cifFile)
    # Close DB Connection
    conn.close()
    logging.info('Disconnected to DB')
    print('Disconnected from DB')

    # End time
    end = time.time()
    timeTaken = (end - start) / 60
    logging.info('Time Taken: {time} mins'.format(time=timeTaken))
    print('Time Taken: {time} mins'.format(time=timeTaken))
def get_todo(todo_id):
    todo = r.table('todos').get(todo_id).run(g.rdb_conn)
    return json.dumps(todo)
def new_todo():
    inserted = r.table('todos').insert(request.json).run(g.rdb_conn)
    return jsonify(id=inserted['generated_keys'][0])
Example #43
0
def get_posts_count(user):
    return r.table('posts')
            .filter(lambda post: { post.user_id: user.user_id })
            .count()
Example #44
0
#dependecies
from rethinkdb import r

#connecting to localhost
r.connect('localhost', 28015).repl()

#creating connection to the database 
connection = r.connect(db='DB_NAME')

#creating tables
r.table_create("table_name").run(connection)

variable = r.table("table_name")

#inserting into database
variable.insert({
    'id' : ---,
    'name' : ---,
    
}).run(connection)

#to print the database
for _ in variable.run(connection):
    print(_)
Example #45
0
def get_posts_count(user_id):
    return r.table('posts')
            .filter({ 'user_id': user_id })
            .count()
Example #46
0
def test_query():
    """ test queries on rdb """

    ###################################################
    # Try optimization with seconday index

    """
    q = query.get_table_query(t3in)
    # cursor = q \
    #     .concat_map(lambda obj: obj['images']['transcriptions_split']) \
    #     .limit(2) \
    #     .run()

    # print("TEST", list(cursor))
    # exit(1)

    q = query.get_table_query(t2in)

    cursor = q \
        .concat_map(lambda obj: obj['steps']
            .concat_map(lambda steps: steps['data']
                .filter(lambda row:
                    (row['position'] == 1)
                    & (r.expr(["", None]).contains(row['value']).not_())
                )
                .map(lambda data:
                    {
                        'step': steps['step'],
                        'position': data['position'],
                        'value': data['value'],
                    }))
            .without('position')
            .filter(lambda row: row['value'].match('(?i)^c')) \
        ) \
        .limit(7).run()
    print("TEST", list(cursor))
    exit(1)
    """

    index = "title"
    search = "Limoges_33"
    q = query.get_table_query(t2in)

    # test = q.concat_map(
    #     lambda doc: doc['steps'].concat_map(
    #         lambda step: step['data']['value'])) \
    #     .run()
    # print("TEST", test)
    # exit(1)

    if index in q.index_list().run():
        print("Dropping")
        q.index_drop(index).run()
    print("Creating")
    q.index_create(index,
        lambda doc: doc['steps'].concat_map(
        lambda step: step['data']['value']),
        multi=True
        ) \
        .run()
    print("Waiting")
    q.index_wait(index).run()
    print("Done")

    print("Status", q.index_status().run())
    cursor = q.get_all(search, index=index).run()
    print("Test key:\n", list(cursor))
    exit(1)

    ###################################################
    document = '5fc8d3f4-59ee-43ca-9543-6241bb820882'
    extra = {
        'data': [
            {'value': 'Test', 'name': 'Personnages', 'position': 1, 'hash': '035ca5c7'},
            {'value': 'From paolo', 'name': 'Artistes', 'position': 2, 'hash': '01b1020a'}
        ],
        'latest_db_info':
            {'timestamp': 1458124102.565326, 'user_id': '1a33400a', 'ip': '109.89.122.137'},
        'step': 4
    }

    q = query.get_table_query(t2in)
    cursor = q.get(document).run()
    cursor['steps'].append(extra)
    print(cursor, type(cursor))

    changes = q.get(document).replace(cursor).run()
    #     .update(lambda row: row['steps'].append(extra))
    print(changes)
    # print(cursor.run())
    # print(list(cursor.run()))
    print("DEBUG"); exit(1)

    ##################################

    q = query.get_table_query(t4in)
    cursor = q \
        .filter({'type': 'welcome'}).without('type') \
        .eq_join("id", r.table(t3in), index="record") \
        .zip() \
        .filter({'type': 'welcome'})
    print(list(cursor.run()))
    print("DEBUG"); exit(1)

    cursor = q \
        .concat_map(
            lambda doc: doc['images'].has_fields(
                {'transcriptions': True}).map(
                    lambda image: {
                        'word': image['transcriptions_split'],
                        'record': doc['record'],
                    }
                )).distinct() \
        .filter(lambda mapped: mapped['word'].contains('grati')) \
        .run()

    # print(len(list(cursor)))
    # exit(1)
    for obj in cursor:
        print("TEST", obj)
        exit(1)
    exit(1)

    # # http://stackoverflow.com/a/34647904/2114395
    # cursor = q \
    #     .concat_map(
    #         lambda doc: doc['steps']
    #         .concat_map(lambda step: step['data']
    #                     .concat_map(lambda data:
    #                     [{'record': doc['record'], 'step': data}]))) \
    #     .filter(lambda doc:
    #             doc['step']['value'].match('mog').
    #             and_(doc['step']['name'].match('Numero de page'))) \
    #     .run()

    # for obj in cursor:
    #     print("TEST", obj)
    #     exit(1)

    cursor = q \
        .concat_map(r.row['steps']) \
        .filter(
            lambda row: row['step'] == 3
            ) \
        .concat_map(r.row['data']) \
        .filter(
            lambda row: row['position'] == 1
        ).pluck('value').distinct()['value'].run()
    print(list(cursor))
def delete_todo(todo_id):
    return jsonify(r.table('todos').get(todo_id).delete().run(g.rdb_conn))
Example #48
0
# Copyright 2010-2012 RethinkDB, all rights reserved.

from rethinkdb import r

# Connections
conn = r.connect('newton', 5151)
conn = r.connect('newton')              # Default port
conn = r.connect()                      # Localhost, default port
conn = r.connect([('newton', 5151),
                  ('dr-doom', 5252)])   # A list in case of failure
  # Note: reconnect automatically or force them to call connect again?

# Get the entire table (the following queries are equivalent)
q = r.db('foo').table('bar')
q = r.table('foo.bar')

# Default database
conn = r.connect('newton', db='foo')
q = r.table('bar').run(conn)

conn.run(q)       # Either throws an error, or returns a Python iterator (or an element, see below)
# => iterator or scalar
conn.run([q1, q2, q3]) # Packing queries, asynchronisity of queries?
# => [a1, a2, a3]

# We can also start a query off a connection
r.db('foo').table('bar').run(conn)
q.run(conn)

# Default connection mode
r.set_default_conn(conn)
Example #49
0
def get_posts_count(user):
    return r.table('posts')
            .filter(r['user_id'].equals(user.user_id))
            .count()
Example #50
0
 def get_table(cls):
     table = r.table(cls._table)
     return table
Example #51
0
def get_posts_count(user):
    return r.table('posts')
            .filter(lambda post: post.user_id == user.age })
            .count()
Example #52
0
 def __init__(self, model_cls, query=None):
     self.model_cls = model_cls
     self.query = query or r.table(model_cls.table_name)
Example #53
0
def get_posts_count(user):
    return r.table('posts')
            .filter(lambda post: { post.user_id: table.filter(lambda t: iofrijoef) })
            .count()