Exemple #1
0
def _infraction_list_filtered(view, params=None, query_filter=None):
    params = params or {}
    query_filter = query_filter or {}
    active = parse_bool(params.get("active"))
    hidden = parse_bool(params.get("hidden"))
    expand = parse_bool(params.get("expand"))
    search = params.get("search")

    if active is not None:
        query_filter["active"] = active

    query = _merged_query(view, expand, query_filter)

    query = query.filter(
        # let all infractions through the filter if we want to
        # view hidden infractions as well as non-hidden ones.
        # otherwise, only accept non-hidden infractions
        # or those with no hidden property (for older infractions)
        lambda infr: rethinkdb.branch(hidden, True,
                                      (~infr["hidden"]).default(True)))

    if search is not None:
        query = query.filter(lambda row: rethinkdb.branch(
            row["reason"].eq(None), False, row["reason"].match(search)))

    query = query.order_by(*INFRACTION_ORDER)
    infractions = view.db.run(query.coerce_to("array"))

    return jsonify(infractions)
Exemple #2
0
def go():
    with except_printer():
        r.expr({'err': r.error('bob')}).run(c)
    with except_printer():
        r.expr([1, 2, 3, r.error('bob')]).run(c)
    with except_printer():
        (((r.expr(1) + 1) - 8) * r.error('bob')).run(c)
    with except_printer():
        r.expr([1, 2, 3]).append(r.error('bob')).run(c)
    with except_printer():
        r.expr([1, 2, 3, r.error('bob')])[1:].run(c)
    with except_printer():
        r.expr({'a': r.error('bob')})['a'].run(c)
    with except_printer():
        r.db('test').table('test').filter(
            lambda a: a.contains(r.error('bob'))).run(c)
    with except_printer():
        r.expr(1).do(lambda x: r.error('bob')).run(c)
    with except_printer():
        r.expr(1).do(lambda x: x + r.error('bob')).run(c)
    with except_printer():
        r.branch(
            r.db('test').table('test').get(0)['a'].contains(r.error('bob')),
            r.expr(1), r.expr(2)).run(c)
    with except_printer():
        r.expr([1, 2]).reduce(lambda a, b: a + r.error("bob")).run(c)
Exemple #3
0
def infer(d):
    if d['type'] == 'Beacon':
        r.table("aps").insert({
            'bssid': d['mac'],
            'essid': d['essid'],
            "lastSeen": r.now()
        }, conflict="update").run()
    elif d['type'] == 'Probe':
        r.table('stations').insert({
            'mac': d['mac'], 'probes': [], 'aps': []}).run()
        if len(d['essid']):
            r.table('stations').get(d['mac']).update({
                'probes': r.branch(~r.row['probes'].contains(d['essid']),
                                   r.row['probes'].append(d['essid']),
                                   r.row['probes']),
                "lastSeen": r.now()
            }).run()
    elif d['type'] == 'Data':
        ap_mac, station_mac = order_data_frame(d)
        r.table('stations').insert({
            'mac': station_mac, 'probes': [], 'aps': []}).run()
        r.table('stations').get(station_mac).update({
            'aps': r.branch(~r.row['aps'].contains(ap_mac),
                            r.row['aps'].append(ap_mac),
                            r.row['aps']),
            "lastSeen": r.now()
        }).run()
Exemple #4
0
def read(conn, table, key, default=None):
    record = run_query(table.get(key), conn)
    if record is None:
        return None, default
    while record['intent'] is not None:
        record_xid = record['xid']
        tx = run_query(TX_TBL.get(record_xid), conn)
        if tx is None:
            tx_status = 'aborted'
        else:
            tx_status = tx['status']
        if tx_status == 'pending':
            if abort(conn, record_xid):
                tx_status = 'aborted'
            else:
                continue
        if tx_status == 'aborted':
            result = run_query(
                table.get(key).update(rethinkdb.branch(
                    XID_ROW.eq(record_xid) & INTENT_ROW.ne(None),
                    {'intent': None}, {}),
                                      return_changes='always'), conn)
            record = result['changes'][0]['new_val']
        elif tx['status'] == 'committed':
            result = run_query(
                table.get(key).update(rethinkdb.branch(
                    XID_ROW.eq(record_xid) & INTENT_ROW.ne(None), {
                        'intent': None,
                        'value': INTENT_ROW
                    }, {}),
                                      return_changes='always'), conn)
            record = result['changes'][0]['new_val']
    return record['xid'], record.get('value', default)
Exemple #5
0
def go():
    with except_printer():
        r.connect(host="localhost", port="123abc")
    with except_printer():
        r.expr({'err': r.error('bob')}).run(c)
    with except_printer():
        r.expr([1,2,3, r.error('bob')]).run(c)
    with except_printer():
        (((r.expr(1) + 1) - 8) * r.error('bob')).run(c)
    with except_printer():
        r.expr([1,2,3]).append(r.error('bob')).run(c)
    with except_printer():
        r.expr([1,2,3, r.error('bob')])[1:].run(c)
    with except_printer():
        r.expr({'a':r.error('bob')})['a'].run(c)
    with except_printer():
        r.db('test').table('test').filter(lambda a: a.contains(r.error('bob'))).run(c)
    with except_printer():
        r.expr(1).do(lambda x: r.error('bob')).run(c)
    with except_printer():
        r.expr(1).do(lambda x: x + r.error('bob')).run(c)
    with except_printer():
        r.branch(r.db('test').table('test').get(0)['a'].contains(r.error('bob')), r.expr(1), r.expr(2)).run(c)
    with except_printer():
        r.expr([1,2]).reduce(lambda a,b: a + r.error("bob")).run(c)
Exemple #6
0
 def get_bar_data(question_data):
     r.branch(
         (
             r.expr(question_data["response_format"] == Question().RESPONSE_MULTIPLE_CHOICE)
             | (question_data["response_format"] == Question().RESPONSE_RATING)
         ),
         r.branch(
             (question_data["response_format"] == Question().RESPONSE_MULTIPLE_CHOICE),
             {
                 "labels": question[1].distinct(),
                 "series": [
                     question[1].distinct().do(lambda val: question[1].filter(lambda foo: foo == val).count())
                 ],
             },
             (question_data["response_format"] == Question().RESPONSE_RATING),
             {
                 "labels": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                 "series": [
                     r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).map(
                         lambda val: question[1].filter(lambda foo: foo == val).count()
                     )
                 ],
             },
             [],
         ),
         [],
     )
Exemple #7
0
def create_table(progress, conn, db, table, create_args, sindexes):
    # Make sure that the table is ready if it exists, or create it
    r.branch(
        r.db(db).table_list().contains(table),
        r.db(db).table(table).wait(timeout=30),
        r.db(db).table_create(table, **create_args)).run(conn)

    if progress[0] is None:
        progress[0] = 0

    # Recreate secondary indexes - assume that any indexes that already exist are wrong
    # and create them from scratch
    indexes = r.db(db).table(table).index_list().run(conn)
    created_indexes = list()
    try:
        for sindex in sindexes[progress[0]:]:
            if isinstance(sindex, dict) and all(
                    k in sindex for k in ('index', 'function')):
                if sindex['index'] in indexes:
                    r.db(db).table(table).index_drop(sindex['index']).run(conn)
                r.db(db).table(table).index_create(
                    sindex['index'], sindex['function']).run(conn)
                created_indexes.append(sindex['index'])
            progress[0] += 1
        r.db(db).table(table).index_wait(r.args(created_indexes)).run(conn)
    except RuntimeError:
        raise RuntimeError("Sindex warning")
Exemple #8
0
    def claim_sites(self, n=1):
        result = (
            self.rr.table('sites').get_all(
                r.args(
                    r.db(self.rr.dbname).table(
                        'sites', read_mode='majority').between(
                            ['ACTIVE', r.minval], ['ACTIVE', r.maxval],
                            index='sites_last_disclaimed').order_by(
                                r.desc('claimed'), 'last_disclaimed').
                    fold({},
                         lambda acc, site: acc.merge(
                             r.branch(
                                 site.has_fields('job_id'),
                                 r.object(
                                     site['job_id'].coerce_to('string'), acc[
                                         site['job_id'].coerce_to('string')].
                                     default(0).add(1)), {})),
                         emit=lambda acc, site, new_acc: r.branch(
                             r.and_(
                                 r.or_(
                                     site['claimed'].not_(), site[
                                         'last_claimed'].lt(r.now().sub(60 * 60
                                                                        ))),
                                 r.or_(
                                     site.has_fields('max_claimed_sites').not_(
                                     ), new_acc[site['job_id'].coerce_to(
                                         'string')].le(site['max_claimed_sites'
                                                            ]))), [site['id']],
                             [])).limit(n))).
            update(
                # try to avoid a race condition resulting in multiple
                # brozzler-workers claiming the same site
                # see https://github.com/rethinkdb/rethinkdb/issues/3235#issuecomment-60283038
                r.branch(
                    r.or_(r.row['claimed'].not_(),
                          r.row['last_claimed'].lt(r.now().sub(60 * 60))), {
                              'claimed': True,
                              'last_claimed': r.now()
                          }, {}),
                return_changes=True)).run()

        self._vet_result(result,
                         replaced=list(range(n + 1)),
                         unchanged=list(range(n + 1)))
        sites = []
        for i in range(result["replaced"]):
            if result["changes"][i]["old_val"]["claimed"]:
                self.logger.warn(
                    "re-claimed site that was still marked 'claimed' "
                    "because it was last claimed a long time ago "
                    "at %s, and presumably some error stopped it from "
                    "being disclaimed",
                    result["changes"][i]["old_val"]["last_claimed"])
            site = brozzler.Site(self.rr, result["changes"][i]["new_val"])
            sites.append(site)
        if sites:
            return sites
        else:
            raise brozzler.NothingToClaim
Exemple #9
0
    def claim_sites(self, n=1):
        self.logger.trace('claiming up to %s sites to brozzle', n)
        result = (
            self.rr.table('sites').get_all(r.args(
                r.db(self.rr.dbname).table('sites', read_mode='majority')
                .between(
                    ['ACTIVE', r.minval], ['ACTIVE', r.maxval],
                    index='sites_last_disclaimed')
                .order_by(r.desc('claimed'), 'last_disclaimed')
                .fold(
                    {}, lambda acc, site: acc.merge(
                        r.branch(
                            site.has_fields('job_id'),
                            r.object(
                                site['job_id'].coerce_to('string'),
                                acc[site['job_id'].coerce_to('string')].default(0).add(1)),
                            {})),
                    emit=lambda acc, site, new_acc: r.branch(
                        r.and_(
                            r.or_(
                                site['claimed'].not_(),
                                site['last_claimed'].lt(r.now().sub(60*60))),
                            r.or_(
                                site.has_fields('max_claimed_sites').not_(),
                                new_acc[site['job_id'].coerce_to('string')].le(site['max_claimed_sites']))),
                            [site['id']], []))
                .limit(n)))
            .update(
                # try to avoid a race condition resulting in multiple
                # brozzler-workers claiming the same site
                # see https://github.com/rethinkdb/rethinkdb/issues/3235#issuecomment-60283038
                r.branch(
                    r.or_(
                      r.row['claimed'].not_(),
                      r.row['last_claimed'].lt(r.now().sub(60*60))),
                    {'claimed': True, 'last_claimed': r.now()},
                    {}),
                return_changes=True)).run()

        self._vet_result(
                result, replaced=list(range(n+1)),
                unchanged=list(range(n+1)))
        sites = []
        for i in range(result["replaced"]):
            if result["changes"][i]["old_val"]["claimed"]:
                self.logger.warn(
                        "re-claimed site that was still marked 'claimed' "
                        "because it was last claimed a long time ago "
                        "at %s, and presumably some error stopped it from "
                        "being disclaimed",
                        result["changes"][i]["old_val"]["last_claimed"])
            site = brozzler.Site(self.rr, result["changes"][i]["new_val"])
            sites.append(site)
        self.logger.debug('claimed %s sites', len(sites))
        if sites:
            return sites
        else:
            raise brozzler.NothingToClaim
Exemple #10
0
 def update_many(self, table, f, u, limit=None):
     conn = yield self.conn
     result = 0
     if limit:
         result = yield r.table(table).filter(f).limit(limit).update(
             lambda item: r.branch(f(item), u, {})).run(conn)
     else:
         result = yield r.table(table).filter(f).update(
             lambda item: r.branch(f(item), u, {})).run(conn)
     return result['replaced']
Exemple #11
0
def rethinkdb_date_greater(greater_date, comparison_date, relaxed_interval):
    return r.branch(
        r.lt(greater_date[0], comparison_date[0]), False,
        r.eq(greater_date[0], comparison_date[0]),
        r.branch(
            r.eq(greater_date[1], 'XX').or_(r.eq(comparison_date[1], 'XX')),
            relaxed_interval, r.lt(greater_date[1], comparison_date[1]), False,
            r.eq(greater_date[1], comparison_date[1]),
            r.branch(
                r.eq(greater_date[2], 'XX').or_(r.eq(comparison_date[2],
                                                     'XX')), relaxed_interval,
                r.lt(greater_date[2], comparison_date[2]), False, True), True),
        True)
Exemple #12
0
 async def resetattempts(self, ctx, user: str = None):
     """Resets the attempts of a user"""
     if not user:
         user = ctx.author
     else:
         user = arg.get_server_member(ctx, user)
         if not user:
             return await ctx.send("I could not find that user :no_entry:")
     data = r.table("antiad").get(str(ctx.guild.id))
     if str(user.id) not in data["users"].map(lambda x: x["id"]).run(
             self.db, durability="soft"):
         return await ctx.send(
             "This user doesn't have any attempts :no_entry:")
     else:
         if data["users"].filter(
                 lambda x: x["id"] == str(user.id))[0]["attempts"].run(
                     self.db, durability="soft") == 0:
             return await ctx.send(
                 "This user doesn't have any attempts :no_entry:")
         else:
             await ctx.send("**{}** attempts have been reset.".format(user))
             data.update({
                 "users":
                 r.row["users"].map(lambda x: r.branch(
                     x["id"] == str(user.id), x.merge({"attempts": 0}), x))
             }).run(self.db, durability="soft")
Exemple #13
0
 def _bucket_batch_update_reql(self, bucket, batch):
     return self.rr.table(
         self.table).get(bucket).replace(lambda old: r.branch(
             old.eq(None), batch[bucket],
             old.merge({
                 "total": {
                     "urls":
                     old["total"]["urls"].add(batch[bucket]["total"]["urls"]
                                              ),
                     "wire_bytes":
                     old["total"]["wire_bytes"].add(batch[bucket]["total"][
                         "wire_bytes"]),
                 },
                 "new": {
                     "urls":
                     old["new"]["urls"].add(batch[bucket]["new"]["urls"]),
                     "wire_bytes":
                     old["new"]["wire_bytes"].add(batch[bucket]["new"][
                         "wire_bytes"]),
                 },
                 "revisit": {
                     "urls":
                     old["revisit"]["urls"].add(batch[bucket]["revisit"][
                         "urls"]),
                     "wire_bytes":
                     old["revisit"]["wire_bytes"].add(batch[bucket][
                         "revisit"]["wire_bytes"]),
                 },
             })))
Exemple #14
0
 def _grades_overtime(doc, val):
     return {
         'grade_data_averages':
         r.branch(
             ((doc.get_field('grades').count() > 0) &
              ((val['group'] % 10) != 4)), {
                  'percent_a':
                  val['reduction'].get_field('percent_a').avg().default(
                      None),
                  'percent_b':
                  val['reduction'].get_field('percent_b').avg().default(
                      None),
                  'percent_c':
                  val['reduction'].get_field('percent_c').avg().default(
                      None),
                  'percent_d':
                  val['reduction'].get_field('percent_d').avg().default(
                      None),
                  'percent_f':
                  val['reduction'].get_field('percent_f').avg().default(
                      None),
                  'percent_incomplete':
                  val['reduction'].get_field(
                      'percent_incomplete').avg().default(None),
                  'percent_c_minus_or_below':
                  val['reduction'].get_field(
                      'percent_c_minus_or_below').avg().default(None),
                  'average_grade':
                  val['reduction'].get_field('average_grade').avg().default(
                      None),
              }, None)
     }
Exemple #15
0
def rethinkdb_updater_overwrite(id, old_doc, new_doc):
    return (new_doc.keys().set_union(old_doc.keys()).map(lambda key:
        r.branch(old_doc.keys().contains(key).and_(new_doc.keys().contains(key).not_()),
            [key, old_doc[key]],
            new_doc.keys().contains(key).and_(old_doc.keys().contains(key).not_()),
            [key, new_doc[key]],
            r.branch(key.eq('sequences'),
                [key, old_doc['sequences'].set_union(new_doc['sequences'])],
                key.eq('number_sequences'),
                [key, old_doc['sequences'].set_union(new_doc['sequences']).count()],
                key.eq('timestamp').or_(key.eq('virus_inclusion_date')).or_(key.eq('sequence_inclusion_date')),
                [key, old_doc[key]],
                [key, new_doc[key]]
            )
        )
    )).coerce_to('object')
Exemple #16
0
def rethinkdb_updater_overwrite(id, old_doc, new_doc):
    return (new_doc.keys().set_union(old_doc.keys()).map(lambda key: r.branch(
        old_doc.keys().contains(key).and_(new_doc.keys().contains(key).not_()
                                          ), [key, old_doc[key]],
        new_doc.keys().contains(key).and_(old_doc.keys().contains(key).not_()),
        [key, new_doc[key]],
        r.branch(
            key.eq('sequences'), [
                key, old_doc['sequences'].set_union(new_doc['sequences'])
            ], key.eq('number_sequences'), [
                key, old_doc['sequences'].set_union(new_doc['sequences']).
                count()
            ],
            key.eq('timestamp').or_(key.eq('virus_inclusion_date')).or_(
                key.eq('sequence_inclusion_date')), [key, old_doc[key]],
            [key, new_doc[key]])))).coerce_to('object')
Exemple #17
0
 def _bucket_batch_update_reql(self, bucket, new):
     return self.rr.table(
         self.table).get(bucket).replace(lambda old: r.branch(
             old.eq(None), new,
             old.merge({
                 'total': {
                     'urls':
                     old['total']['urls'].add(new['total']['urls']),
                     'wire_bytes':
                     old['total']['wire_bytes'].add(new['total'][
                         'wire_bytes']),
                 },
                 'new': {
                     'urls':
                     old['new']['urls'].add(new['new']['urls']),
                     'wire_bytes':
                     old['new']['wire_bytes'].add(new['new']['wire_bytes']),
                 },
                 'revisit': {
                     'urls':
                     old['revisit']['urls'].add(new['revisit']['urls']),
                     'wire_bytes':
                     old['revisit']['wire_bytes'].add(new['revisit'][
                         'wire_bytes']),
                 },
             })))
Exemple #18
0
 def _merge(row):
     return {
         "active":
         rethinkdb.branch(
             _is_timed_infraction(row["type"]),
             rethinkdb.branch(
                 (row["closed"].default(False).eq(True)) |
                 (row["active"].default(True).eq(False)), False,
                 rethinkdb.branch(row["expires_at"].eq(None), True,
                                  row["expires_at"] > rethinkdb.now())),
             False),
         "closed":
         row["closed"].default(False),
         "_timed":
         _is_timed_infraction(row["type"])
     }
Exemple #19
0
def _update_legacy(conn, block_num, address, resource, data_type):
    """ Update the legacy sync tables (expansion by object type name)
    """
    try:
        data = {
            "id": address,
            "start_block_num": int(block_num),
            "end_block_num": int(sys.maxsize),
            **resource,
        }

        query = (
            r.table(TABLE_NAMES[data_type]).get(address).replace(
                lambda doc: r.branch(
                    # pylint: disable=singleton-comparison
                    (doc == None),  # noqa
                    r.expr(data),
                    doc.merge(resource),
                )))
        result = query.run(conn)
        if result["errors"] > 0:
            LOGGER.warning("error updating legacy state table:\n%s\n%s",
                           result, query)

    except Exception as err:  # pylint: disable=broad-except
        LOGGER.warning("_update_legacy %s error:", type(err))
        LOGGER.warning(err)
Exemple #20
0
 def _expanded_grades_stats(doc):
     return r.branch(((doc.get_field('grades').count() > 0)), {
             'percent_a': doc['grade_data'].get_field('percent_a').avg().default(None),
             'percent_b': doc['grade_data'].get_field('percent_b').avg().default(None),
             'percent_c': doc['grade_data'].get_field('percent_c').avg().default(None),
             'percent_d': doc['grade_data'].get_field('percent_d').avg().default(None),
             'percent_f': doc['grade_data'].get_field('percent_f').avg().default(None),
             'percent_incomplete': doc['grade_data'].get_field('percent_incomplete').avg().default(None),
             'percent_c_minus_or_below': doc['grade_data'].get_field('percent_c_minus_or_below').avg().default(None),
             'average_grade': doc['grade_data'].get_field('average_grade').avg().default(None),
             'GR_percent_a': doc['grade_data'].filter({'level': 'GR'}).get_field('percent_a').avg().default(None),
             'GR_percent_b': doc['grade_data'].filter({'level': 'GR'}).get_field('percent_b').avg().default(None),
             'GR_percent_c': doc['grade_data'].filter({'level': 'GR'}).get_field('percent_c').avg().default(None),
             'GR_percent_d': doc['grade_data'].filter({'level': 'GR'}).get_field('percent_d').avg().default(None),
             'GR_percent_f': doc['grade_data'].filter({'level': 'GR'}).get_field('percent_f').avg().default(None),
             'GR_percent_incomplete': doc['grade_data'].filter({'level': 'GR'}).get_field('percent_incomplete').avg().default(None),
             'GR_percent_c_minus_or_below': doc['grade_data'].filter({'level': 'GR'}).get_field('percent_c_minus_or_below').avg().default(None),
             'GR_average_grade': doc['grade_data'].filter({'level': 'GR'}).get_field('average_grade').avg().default(None),
             'UD_percent_a': doc['grade_data'].filter({'level': 'UD'}).get_field('percent_a').avg().default(None),
             'UD_percent_b': doc['grade_data'].filter({'level': 'UD'}).get_field('percent_b').avg().default(None),
             'UD_percent_c': doc['grade_data'].filter({'level': 'UD'}).get_field('percent_c').avg().default(None),
             'UD_percent_d': doc['grade_data'].filter({'level': 'UD'}).get_field('percent_d').avg().default(None),
             'UD_percent_f': doc['grade_data'].filter({'level': 'UD'}).get_field('percent_f').avg().default(None),
             'UD_percent_incomplete': doc['grade_data'].filter({'level': 'UD'}).get_field('percent_incomplete').avg().default(None),
             'UD_percent_c_minus_or_below': doc['grade_data'].filter({'level': 'UD'}).get_field('percent_c_minus_or_below').avg().default(None),
             'UD_average_grade': doc['grade_data'].filter({'level': 'UD'}).get_field('average_grade').avg().default(None),
             'LD_percent_a': doc['grade_data'].filter({'level': 'LD'}).get_field('percent_a').avg().default(None),
             'LD_percent_b': doc['grade_data'].filter({'level': 'LD'}).get_field('percent_b').avg().default(None),
             'LD_percent_c': doc['grade_data'].filter({'level': 'LD'}).get_field('percent_c').avg().default(None),
             'LD_percent_d': doc['grade_data'].filter({'level': 'LD'}).get_field('percent_d').avg().default(None),
             'LD_percent_f': doc['grade_data'].filter({'level': 'LD'}).get_field('percent_f').avg().default(None),
             'LD_percent_incomplete': doc['grade_data'].filter({'level': 'LD'}).get_field('percent_incomplete').avg().default(None),
             'LD_percent_c_minus_or_below': doc['grade_data'].filter({'level': 'LD'}).get_field('percent_c_minus_or_below').avg().default(None),
             'LD_average_grade': doc['grade_data'].filter({'level': 'LD'}).get_field('average_grade').avg().default(None),
         }, None)
Exemple #21
0
 def _general_overtime(doc, val):
     return {
         "total_fcqs": val["reduction"].count(),
         "total_forms_requested": val["reduction"].sum("forms_requested"),
         "total_forms_returned": val["reduction"].sum("forms_returned"),
         "denver_data_averages": r.branch(
             ((doc.get_field("campus").default(None) == "DN") & (val["group"] <= 20144)),
             {
                 "r_fairness": val["reduction"].get_field("denver_data").get_field("r_fairness").avg().default(None),
                 "r_presentation": val["reduction"]
                 .get_field("denver_data")
                 .get_field("r_presentation")
                 .avg()
                 .default(None),
                 "r_workload": val["reduction"].get_field("denver_data").get_field("r_workload").avg().default(None),
                 "r_diversity": val["reduction"]
                 .get_field("denver_data")
                 .get_field("r_diversity")
                 .avg()
                 .default(None),
                 "r_accessibility": val["reduction"]
                 .get_field("denver_data")
                 .get_field("r_accessibility")
                 .avg()
                 .default(None),
                 "r_learning": val["reduction"].get_field("denver_data").get_field("r_learning").avg().default(None),
             },
             None,
         ),
     }
Exemple #22
0
 def _general_overtime(doc, val):
     return {
         'total_fcqs':
         val['reduction'].count(),
         'total_forms_requested':
         val['reduction'].sum('forms_requested'),
         'total_forms_returned':
         val['reduction'].sum('forms_returned'),
         'denver_data_averages':
         r.branch(
             ((doc.get_field('campus').default(None) == 'DN') &
              (val['group'] <= 20144)), {
                  'r_fairness':
                  val['reduction'].get_field('denver_data').get_field(
                      'r_fairness').avg().default(None),
                  'r_presentation':
                  val['reduction'].get_field('denver_data').get_field(
                      'r_presentation').avg().default(None),
                  'r_workload':
                  val['reduction'].get_field('denver_data').get_field(
                      'r_workload').avg().default(None),
                  'r_diversity':
                  val['reduction'].get_field('denver_data').get_field(
                      'r_diversity').avg().default(None),
                  'r_accessibility':
                  val['reduction'].get_field('denver_data').get_field(
                      'r_accessibility').avg().default(None),
                  'r_learning':
                  val['reduction'].get_field('denver_data').get_field(
                      'r_learning').avg().default(None),
              }, None)
     }
Exemple #23
0
def drop_fork(conn, block_num):
    """Deletes all resources from a particular block_num
    """
    block_results = (
        r.table("blocks")
        .filter(lambda rsc: rsc["block_num"].ge(block_num))
        .delete()
        .run(conn)
    )

    resource_results = (
        r.table_list()
        .for_each(
            lambda table_name: r.branch(
                r.eq(table_name, "blocks"),
                [],
                r.eq(table_name, "auth"),
                [],
                r.table(table_name)
                .filter(lambda rsc: rsc["start_block_num"].ge(block_num))
                .delete(),
            )
        )
        .run(conn)
    )

    return {k: v + resource_results[k] for k, v in block_results.items()}
Exemple #24
0
def rql_highest_revs(query, field):
    """
    r.db("psh").table("images").groupedMapReduce(
      function(image) {
        return image('dockerfile')
      },
      function(image) {
        return {rev: image('rev'), id: image('id')}
      },
      function(left, right) {
        return r.branch(left('rev').gt(right('rev')), left, right)
      }
    ).map(
      function(group) {
        return group('reduction')("id")
      }
    )
    """
    ids = query.grouped_map_reduce(
        lambda image: image[field],
        lambda image: {"rev": image["rev"], "id": image["id"]},
        lambda left, right: r.branch(left["rev"]>right["rev"], left, right)
    ).map(lambda group: group["reduction"]["id"]).coerce_to("array").run()

    return query.filter(lambda doc: r.expr(ids).contains(doc["id"]))
Exemple #25
0
 def to_branch(self, test, error_msg):
     '''Turns a normal test into an ugly but helpful branch/error test'''
     return lambda v: r.branch(
         test(v),
         True,
         r.error(self.path + ' ' + error_msg),
     )
Exemple #26
0
def rql_highest_revs(query, field):
    """
    r.db("psh").table("images").groupedMapReduce(
      function(image) {
        return image('dockerfile')
      },
      function(image) {
        return {rev: image('rev'), id: image('id')}
      },
      function(left, right) {
        return r.branch(left('rev').gt(right('rev')), left, right)
      }
    ).map(
      function(group) {
        return group('reduction')("id")
      }
    )
    """
    ids = query.grouped_map_reduce(
        lambda image: image[field], lambda image: {
            "rev": image["rev"],
            "id": image["id"]
        },
        lambda left, right: r.branch(left["rev"] > right["rev"], left, right)
    ).map(lambda group: group["reduction"]["id"]).coerce_to("array").run()

    return query.filter(lambda doc: r.expr(ids).contains(doc["id"]))
Exemple #27
0
 def _expanded_grades_overtime(doc, val):
     return r.branch(((doc.get_field('grades').count() > 0) & ((val['group'] % 10) != 4)), {
                 'percent_a': val['reduction'].get_field('percent_a').avg().default(None),
                 'percent_b': val['reduction'].get_field('percent_b').avg().default(None),
                 'percent_c': val['reduction'].get_field('percent_c').avg().default(None),
                 'percent_d': val['reduction'].get_field('percent_d').avg().default(None),
                 'percent_f': val['reduction'].get_field('percent_f').avg().default(None),
                 'percent_incomplete': val['reduction'].get_field('percent_incomplete').avg().default(None),
                 'percent_c_minus_or_below': val['reduction'].get_field('percent_c_minus_or_below').avg().default(None),
                 'average_grade': val['reduction'].get_field('average_grade').avg().default(None),
                 'GR_percent_a': val['reduction'].filter({'level': 'GR'}).get_field('percent_a').avg().default(None),
                 'GR_percent_b': val['reduction'].filter({'level': 'GR'}).get_field('percent_b').avg().default(None),
                 'GR_percent_c': val['reduction'].filter({'level': 'GR'}).get_field('percent_c').avg().default(None),
                 'GR_percent_d': val['reduction'].filter({'level': 'GR'}).get_field('percent_d').avg().default(None),
                 'GR_percent_f': val['reduction'].filter({'level': 'GR'}).get_field('percent_f').avg().default(None),
                 'GR_percent_incomplete': val['reduction'].filter({'level': 'GR'}).get_field('percent_incomplete').avg().default(None),
                 'GR_percent_c_minus_or_below': val['reduction'].filter({'level': 'GR'}).get_field('percent_c_minus_or_below').avg().default(None),
                 'GR_average_grade': val['reduction'].filter({'level': 'GR'}).get_field('average_grade').avg().default(None),
                 'UD_percent_a': val['reduction'].filter({'level': 'UD'}).get_field('percent_a').avg().default(None),
                 'UD_percent_b': val['reduction'].filter({'level': 'UD'}).get_field('percent_b').avg().default(None),
                 'UD_percent_c': val['reduction'].filter({'level': 'UD'}).get_field('percent_c').avg().default(None),
                 'UD_percent_d': val['reduction'].filter({'level': 'UD'}).get_field('percent_d').avg().default(None),
                 'UD_percent_f': val['reduction'].filter({'level': 'UD'}).get_field('percent_f').avg().default(None),
                 'UD_percent_incomplete': val['reduction'].filter({'level': 'UD'}).get_field('percent_incomplete').avg().default(None),
                 'UD_percent_c_minus_or_below': val['reduction'].filter({'level': 'UD'}).get_field('percent_c_minus_or_below').avg().default(None),
                 'UD_average_grade': val['reduction'].filter({'level': 'UD'}).get_field('average_grade').avg().default(None),
                 'LD_percent_a': val['reduction'].filter({'level': 'LD'}).get_field('percent_a').avg().default(None),
                 'LD_percent_b': val['reduction'].filter({'level': 'LD'}).get_field('percent_b').avg().default(None),
                 'LD_percent_c': val['reduction'].filter({'level': 'LD'}).get_field('percent_c').avg().default(None),
                 'LD_percent_d': val['reduction'].filter({'level': 'LD'}).get_field('percent_d').avg().default(None),
                 'LD_percent_f': val['reduction'].filter({'level': 'LD'}).get_field('percent_f').avg().default(None),
                 'LD_percent_incomplete': val['reduction'].filter({'level': 'LD'}).get_field('percent_incomplete').avg().default(None),
                 'LD_percent_c_minus_or_below': val['reduction'].filter({'level': 'LD'}).get_field('percent_c_minus_or_below').avg().default(None),
                 'LD_average_grade': val['reduction'].filter({'level': 'LD'}).get_field('average_grade').avg().default(None),
             }, None)
Exemple #28
0
 def get_body(item):
     return {
         'join': r.branch(
             item.has_fields('body_id'),
             r.table('response_body').get(item['body_id']),
             None
         )
     }
Exemple #29
0
def commit(conn, xid, changes):
    result = run_query(
        TX_TBL.get(xid).update(
            rethinkdb.branch(STATUS_ROW.eq('pending'), {
                'status': 'committed',
                'changes': changes
            }, rethinkdb.error('precondition failed'))), conn)
    return result['errors'] == 0
Exemple #30
0
 def query(self, attr_names=None, entity_type=None, entity_id=None):
     op = rt.table(self.TABLE_NAME)
     if attr_names:
         op = op.pluck(attr_names)
     if entity_id:
         op = op.filter(lambda x: rt.branch(x['entity_id'] == entity_id, True, False))
     res = op.run(self.conn)
     return self.translate_to_ngsi(res)
def do_fix(db, collection=None):

	if collection is None:
		bad_meta, bad_tables = find_spurious_meta_and_tables(r.table('__METADATA__').run(db), r.table_list().run(db))
		
		if len(bad_meta) == 0 and len(bad_tables) == 0:
			return 0, 0

		r.table('__METADATA__').get_all(*bad_meta).delete().run(db)

		for table in bad_tables:
			r.table_drop(table).run(db)

		return len(bad_meta), len(bad_tables)

	#else
	check_collection_name(collection)

	meta = r.table('__METADATA__').get(collection).run(db)

	if meta is None:
		raise BadCollection('collection {} does not exist.'.format(collection))

	doing_init = meta.get('doing_init')
	appending_filenames = meta.get('appending_filenames')
	


	if not collection in r.table_list().run(db):
		raise BadCollection("this is a spurious collection.")

	if doing_init:
		do_delete(db, collection)
		return 'doing_init'

	if appending_filenames:
		bad_samples = [k for k in meta['samples'] if meta['samples'][k] in appending_filenames]
		result = r.table(collection) \
					.filter(r.row['IDs'].keys().set_intersection(appending_filenames) != [])\
					.replace(lambda x: r.branch(x['IDs'].keys().set_difference(appending_filenames) == [],
						None, # delete record
						x.merge({
							'IDs': r.literal(x['IDs'].without(appending_filenames)),
							'QUALs': r.literal(x['QUALs'].without(appending_filenames)),
							'FILTERs': r.literal(x['FILTERs'].without(appending_filenames)),
							'INFOs': r.literal(x['INFOs'].without(appending_filenames)),
							'samples': r.literal(x['samples'].without(bad_samples)),
							}))).run(db)
		
		r.table('__METADATA__').get(collection)\
			.replace(lambda x: x.merge({
				'vcfs': r.literal(x['vcfs'].without(appending_filenames)),
				'samples': r.literal(x['samples'].without(bad_samples))
				}).without('appending_filenames')).run(db)

		return appending_filenames, bad_samples, result['deleted'], result['replaced']

	return None
Exemple #32
0
def abort(conn, xid):
    result = run_query(
        TX_TBL.get(xid).update(rethinkdb.branch(STATUS_ROW.eq('pending'),
                                                {'status': 'aborted'}, {}),
                               return_changes='always'), conn)
    if result['skipped'] == 1:
        return True
    else:
        return result['changes'][0]['new_val']['status'] == 'aborted'
Exemple #33
0
def clear(conn, xid, committed, table, keys):
    update = {'intent': None}
    if committed:
        update['value'] = INTENT_ROW
    for key in keys:
        run_query(
            table.get(key).update(
                rethinkdb.branch(
                    XID_ROW.eq(xid) & INTENT_ROW.ne(None), update, {})), conn)
Exemple #34
0
 def get_job(schedule):
     return {
         'latest_job': r.branch(
             schedule.has_fields('latest_job_id'),
             r.table('job')
              .get(schedule['latest_job_id'])
              .pluck('run_state', 'started_at', 'completed_at'),
             None
         ),
     }
Exemple #35
0
 def prop_check(v):
     props = []
     for prop, prop_schema in arg.items():
         sub_path = self.path + '/' + prop
         props.append(
             r.branch(
                 v.has_fields(prop),
                 r.do(v[prop], validate(prop_schema, sub_path)),
                 True,
             ))
     return r.and_(*props)
Exemple #36
0
 def _grades_stats(doc):
     return r.branch(((doc.get_field('grades').count() > 0)), {
             'percent_a': doc['grade_data'].get_field('percent_a').avg().default(None),
             'percent_b': doc['grade_data'].get_field('percent_b').avg().default(None),
             'percent_c': doc['grade_data'].get_field('percent_c').avg().default(None),
             'percent_d': doc['grade_data'].get_field('percent_d').avg().default(None),
             'percent_f': doc['grade_data'].get_field('percent_f').avg().default(None),
             'percent_incomplete': doc['grade_data'].get_field('percent_incomplete').avg().default(None),
             'percent_c_minus_or_below': doc['grade_data'].get_field('percent_c_minus_or_below').avg().default(None),
             'average_grade': doc['grade_data'].get_field('average_grade').avg().default(None),
         }, None)
Exemple #37
0
 def _grades_overtime(doc, val):
     return r.branch(((doc.get_field('grades').count() > 0) & ((val['group'] % 10) != 4)), {
             'percent_a': val['reduction'].get_field('percent_a').avg().default(None),
             'percent_b': val['reduction'].get_field('percent_b').avg().default(None),
             'percent_c': val['reduction'].get_field('percent_c').avg().default(None),
             'percent_d': val['reduction'].get_field('percent_d').avg().default(None),
             'percent_f': val['reduction'].get_field('percent_f').avg().default(None),
             'percent_incomplete': val['reduction'].get_field('percent_incomplete').avg().default(None),
             'percent_c_minus_or_below': val['reduction'].get_field('percent_c_minus_or_below').avg().default(None),
             'average_grade': val['reduction'].get_field('average_grade').avg().default(None),
         }, None)
Exemple #38
0
 def to_reql(self):
     '''Convert this context to a reql function'''
     # Emit soft checks, bunching together all checks that are
     # conditional on a particular type being asserted
     for soft_type, checks in self.soft_checks.items():
         self.conjunction.append(lambda v: r.branch(
             v.type_of() == schema_to_reql_type[soft_type],
             conjunct(checks)(v),
             True  # if type doesn't match, it's ok
         ))
     return conjunct(self.conjunction)
Exemple #39
0
def create_table(progress, conn, db, table, create_args, sindexes):
    # Make sure that the table is ready if it exists, or create it
    r.branch(r.db(db).table_list().contains(table),
        r.db(db).table(table).wait(timeout=30),
        r.db(db).table_create(table, **create_args)).run(conn)

    if progress[0] is None:
        progress[0] = 0

    # Recreate secondary indexes - assume that any indexes that already exist are wrong
    # and create them from scratch
    indexes = r.db(db).table(table).index_list().run(conn)
    created_indexes = list()
    for sindex in sindexes[progress[0]:]:
        if isinstance(sindex, dict) and all(k in sindex for k in ('index', 'function')):
            if sindex['index'] in indexes:
                r.db(db).table(table).index_drop(sindex['index']).run(conn)
            r.db(db).table(table).index_create(sindex['index'], sindex['function']).run(conn)
            created_indexes.append(sindex['index'])
        progress[0] += 1
    r.db(db).table(table).index_wait(r.args(created_indexes)).run(conn)
Exemple #40
0
def rethinkdb_date_greater(greater_date, comparison_date, relaxed_interval):
    return r.branch(
        r.lt(greater_date[0], comparison_date[0]),
        False,
        r.eq(greater_date[0], comparison_date[0]),
        r.branch(
            r.eq(greater_date[1], "XX").or_(r.eq(comparison_date[1], "XX")),
            relaxed_interval,
            r.lt(greater_date[1], comparison_date[1]),
            False,
            r.eq(greater_date[1], comparison_date[1]),
            r.branch(
                r.eq(greater_date[2], "XX").or_(r.eq(comparison_date[2], "XX")),
                relaxed_interval,
                r.lt(greater_date[2], comparison_date[2]),
                False,
                True,
            ),
            True,
        ),
        True,
    )
Exemple #41
0
 def get_pie_data(question_data):
     return r.branch(
         (
             r.expr(question_data["response_format"] == Question().RESPONSE_MULTIPLE_CHOICE)
             | (question_data["response_format"] == Question().RESPONSE_TRUE_OR_FALSE)
         ),
         question[1]
         .group(lambda r: r)
         .count()
         .ungroup()
         .map(lambda gr: {"name": gr["group"].coerce_to("string"), "value": gr["reduction"]}),
         [],
     )
Exemple #42
0
 def get_top_assets_by_s2(self, q, asset_num):
     return q.group('base_asset')\
     .map(r.row['close']).map(lambda x: {
     'count': 1,
     'sum': x,
     'min': x,
     'max': x,
     'diff': 0 # M2,n:  sum((val-mean)^2)
     }).reduce(lambda a, b: {
     'count': r.add(a['count'], b['count']),
     'sum': r.add(a['sum'], b['sum']),
     'min': r.branch(a['min'].lt(b['min']), a['min'], b['min']),
     'max': r.branch(a['max'].gt(b['max']), a['max'], b['max']),
     'diff': r.add(
         a['diff'],
         b['diff'],
         r.do(
         r.sub(a['sum'].div(a['count']), b['sum'].div(b['count'])),
         r.div(a['count'].mul(b['count']), a['count'].add(b['count'])),
         lambda avgdelta, weight: r.mul(avgdelta, avgdelta, weight)
         )
     )
     }).ungroup().map(lambda g: {
         'asset': g['group'],
         'count': g['reduction']['count'],
         'sum': g['reduction']['sum'],
         'min': g['reduction']['min'],
         'max': g['reduction']['max'],
         's2': r.branch(g['reduction']['count'].gt(1), r.div(g['reduction']['diff'], g['reduction']['count'].sub(1)), 0)
         }).merge(lambda d: r.do(
         r.div(d['sum'], d['count']),
         lambda avg: {
         'avg': avg,
     }))\
     .order_by(r.desc('s2'))\
     .limit(asset_num)\
     .pluck('asset')\
     .map(lambda a:a['asset'])
Exemple #43
0
 def _general_overtime(doc, val):
     return {
         'total_fcqs': val['reduction'].count(),
         'total_forms_requested': val['reduction'].sum('forms_requested'),
         'total_forms_returned': val['reduction'].sum('forms_returned'),
         'denver_data_averages': r.branch(((doc.get_field('campus').default(None) == 'DN') & (val['group'] <= 20144)), {
             'r_fairness': val['reduction'].get_field('denver_data').get_field('r_fairness').avg().default(None),
             'r_presentation': val['reduction'].get_field('denver_data').get_field('r_presentation').avg().default(None),
             'r_workload': val['reduction'].get_field('denver_data').get_field('r_workload').avg().default(None),
             'r_diversity': val['reduction'].get_field('denver_data').get_field('r_diversity').avg().default(None),
             'r_accessibility': val['reduction'].get_field('denver_data').get_field('r_accessibility').avg().default(None),
             'r_learning': val['reduction'].get_field('denver_data').get_field('r_learning').avg().default(None),
         }, None)
     }
Exemple #44
0
 def get_data(question_data, question):
     return r.branch(
         (r.expr(question_data['response_format'] == Question().RESPONSE_MULTIPLE_CHOICE) | (question_data['response_format'] == Question().RESPONSE_RATING) | (question_data['response_format'] == Question().RESPONSE_TRUE_OR_FALSE)),
         r.branch(
             (question_data['response_format'] == Question().RESPONSE_MULTIPLE_CHOICE),
             {
                 'labels': question_data['options'],
                 'series': [r.expr(question[1]).reduce(lambda left, right: left.map(right, lambda leftVal, rightVal: leftVal + rightVal))]
             },
             (question_data['response_format'] == Question().RESPONSE_TRUE_OR_FALSE),
             {
                 'labels': question_data['options'],
                 'series': r.expr(question[1]).reduce(lambda left, right: left.map(right, lambda leftVal, rightVal: leftVal + rightVal))
             },
             (question_data['response_format'] == Question().RESPONSE_RATING),
             {
                 'labels': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                 'series': [r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).map(lambda val: question[1].filter(lambda foo: foo == val).count())]
             },
             []
         ),
         []
     )
Exemple #45
0
def get_init_movie():
    id_movie = request.args.get('id_movie', '')
    # See if we have cache this data
    movie = r.table("movie").get(id_movie).do( lambda movie:
        r.branch(
            (movie == None) | (~movie.has_fields("similar_movies_id")), # If we didn't find the movie or didn't find the similar movies
            movie,                                                      # We just return the movie/None
            movie.merge({                                               # Else we add a field similar_movies with the relevant data
                "similar_movies": movie["similar_movies_id"].map(lambda similar_movie_id:
                    r.table("movie").get(similar_movie_id)
                )
            })
        )).run( g.rdb_conn )

    if movie is None:
        # Movie not found
        # Fetch similar movies from Rotten Tomatoes and save it
        movie = fetch_movie(id_movie)
        if "id" in movie: # If id is defined, we have a valid object
            r.table("movie").insert( movie ).run( g.rdb_conn, noreply=True) # Dump it in the database
        else:
            answer = {"error": "Movie not found. API rate limit reached?"}
            return json.dumps(answer)

    if "similar_movies" not in movie:
        # Movie found or fetched but similar movies not available.
        http_answer = fetch_movie(movie["id"])

        if "movies" in http_answer: # We found some similar movies
            # Dump the similar movies data in the database
            r.table("movie").insert( http_answer["movies"] ).run( noreply=True)

            # Update the similar movie of the current movie
            similar_movies_id = map(get_id, http_answer["movies"])
            r.table("movie").get(id_movie).update({"similar_movies_id": similar_movies_id }).run( g.rdb_conn, noreply=True)

            # Update the returned object
            movie["similar_movies"] = http_answer["movies"]
            return json.dumps(movie)
        else:
            # We could get the movie but not similar movies
            answer = {
                "error": "No similar movies returned. API rate limit reached?",
                "movie": movie
            }
            return json.dumps(answer)
    else:
        # Movie and its similar ones found
        return json.dumps(movie)
def update_with_date_random(db_name, tbl_name, user_object, id):
    return r.db(db_name).table(tbl_name).get(id).replace(lambda doc: r.branch(
        (doc == None),
        doc.merge(doc).merge({
            'created_at':
            r.time(random.randrange(1995, 2015, 1), random.randrange(1, 12, 1),
                   random.randrange(1, 30, 1), 'Z')
        }),
        doc.merge(doc).merge(
            {
                'created_at':
                r.time(random.randrange(1995, 2015, 1),
                       random.randrange(1, 12, 1), random.randrange(1, 30, 1),
                       'Z')
            }, {'updated_at': r.now()}))).run()
Exemple #47
0
    def summary_status_query(self, sort, order_func, filter_func):
        # TODO: Fix me should work before group also to avoid elimination by max
        query = r.db(self.db_name).table(self.table_summary)\
            .filter(r.row["host"]
                    .match("^" + self.answers["host"]) & r.branch(filter_func, filter_func, True))\
            .limit(int(self.arguments.get("--rlimit")))\
            .order_by(order_func(sort))

        if self.arguments.get("--verbose"):
            print "Rl > ", query
        cursor = query.run()

        for document in cursor:
            item = self._reduction_filter(document, False)
            self._format_summary_item(item)
Exemple #48
0
def project_collaborators(project_id, only_owner=False):
    q = projects.get(project_id)["users"].coerce_to("array")
    if only_owner:
        q = q.map(lambda u: r.branch(u[1]["group"] == "owner", u, False)
                  ).filter(lambda x: x)
    q = q.map(lambda u: (u[1]["group"], accounts.get(u[0]).pluck(
        "account_id", "first_name", "last_name", "email_address")))
    for group, u in q.run():
        fn, ln = u['first_name'], u['last_name']
        try:
            eml = u["email_address"]
            # print("name:  %s %s" % (fn, ln))
            # print("email: %s" % eml)
            print("%s %s <%s>" % (fn, ln, eml))  # , group, u["account_id"]))
        except:
            print("FIXME no email for %s = %s %s" % (fn, ln, k))
    def inventory_status_query(self, sort, order_func, filter_func):
        # TODO: Fix me should work before group also to avoid elimination by max
        query = r.db(self.db_name).table(self.table_summary)\
            .order_by("@timestamp")\
            .filter(r.branch(bool(filter_func), filter_func, True))\
            .group("host")\
            .max("@timestamp").ungroup()\
            .order_by(order_func(r.row['reduction'][sort]))

        if self.arguments.get("--verbose"):
            print "Rl > ", query
        cursor = query.run()

        for document in cursor:
            item = self._reduction_filter(document, True)
            self._format_summary_item(item)
Exemple #50
0
 def test_branch_1(self, conn):
     expected = [
         {'id': 'one', 'value': 5, 'over_20': False},
         {'id': 'three', 'value': 22, 'over_20': True},
         {'id': 'two', 'value': 12, 'over_20': False},
         {'id': 'four', 'value': 31, 'over_20': True}
     ]
     result = r.db('x').table('t').map(
         r.branch(
             r.row['value'] > 20,
             r.row.merge({'over_20': True}),
             r.row.merge({'over_20': False})
         )
     ).run(conn)
     result = list(result)
     assertEqUnordered(expected, list(result))
Exemple #51
0
    def _host_failure_query(self, hostname, limit, sort, order_func, filter_func, not_truncate):
        # Query
        query = r.db(self.db_name).table(self.table_failure)\
            .filter(r.row["host"].match("^" + str(hostname)) &
                    r.branch(bool(filter_func), filter_func, True))\
            .limit(int(limit))\
            .order_by(order_func(sort))

        if self.arguments.get("--verbose"):
            print "Rl > ", query

        cursor = query.run()

        # Loop through items and format them
        for document in cursor:
            self._format_failure_item(document, not_truncate)
Exemple #52
0
def project_collaborators(project_id, only_owner=False):
    q = projects.get(project_id)["users"].coerce_to("array")
    if only_owner:
        q = q.map(lambda u: r.branch(u[1]["group"] == "owner", u, False)).filter(lambda x: x)
    q = q.map(lambda u: (
        u[1]["group"],
            accounts.get(u[0]).pluck("account_id", "first_name", "last_name", "email_address")))
    for group, u in q.run():
        fn, ln = u['first_name'], u['last_name']
        try:
            eml = u["email_address"]
            # print("name:  %s %s" % (fn, ln))
            # print("email: %s" % eml)
            print("%s %s <%s>" % (fn, ln, eml))  # , group, u["account_id"]))
        except:
            print("FIXME no email for %s = %s %s" % (fn, ln, group))
Exemple #53
0
def hold(conn, event):
    snum = str(event.get('snum'))
    unum = str(event.get('unum'))

    smap = {}
    umap = {}
    smap = r.db(TIX).table(VENU).get(0).get_field(SMAP).run(conn)
    umap = r.db(TIX).table(VENU).get(0).get_field(UMAP).run(conn)
    smap[snum] = 'held'
    umap[snum] = unum
    result = r.db(TIX).table(VENU).get(0).update(lambda VENU:
        r.branch(
            VENU[SMAP][snum] == 'free',
            {SMAP: smap, UMAP: umap, TS: time.time()},
            {}
        )
    ).run(conn)
    if result:
        return result
Exemple #54
0
 def claim_site(self, worker_id):
     # XXX keep track of aggregate priority and prioritize sites accordingly?
     while True:
         result = (
             self.r.table("sites", read_mode="majority")
             .between(["ACTIVE", rethinkdb.minval], ["ACTIVE", rethinkdb.maxval], index="sites_last_disclaimed")
             .order_by(index="sites_last_disclaimed")
             .filter(
                 (rethinkdb.row["claimed"] != True) | (rethinkdb.row["last_claimed"] < rethinkdb.now() - 2 * 60 * 60)
             )
             .limit(1)
             .update(
                 # try to avoid a race condition resulting in multiple
                 # brozzler-workers claiming the same site
                 # see https://github.com/rethinkdb/rethinkdb/issues/3235#issuecomment-60283038
                 rethinkdb.branch(
                     (rethinkdb.row["claimed"] != True)
                     | (rethinkdb.row["last_claimed"] < rethinkdb.now() - 2 * 60 * 60),
                     {"claimed": True, "last_claimed_by": worker_id, "last_claimed": rethinkstuff.utcnow()},
                     {},
                 ),
                 return_changes=True,
             )
         ).run()
         self._vet_result(result, replaced=[0, 1], unchanged=[0, 1])
         if result["replaced"] == 1:
             if result["changes"][0]["old_val"]["claimed"]:
                 self.logger.warn(
                     "re-claimed site that was still marked 'claimed' "
                     "because it was last claimed a long time ago "
                     "at %s, and presumably some error stopped it from "
                     "being disclaimed",
                     result["changes"][0]["old_val"]["last_claimed"],
                 )
             site = brozzler.Site(**result["changes"][0]["new_val"])
         else:
             raise brozzler.NothingToClaim
         # XXX This is the only place we enforce time limit for now. Worker
         # loop should probably check time limit. Maybe frontier needs a
         # housekeeping thread to ensure that time limits get enforced in a
         # timely fashion.
         if not self._enforce_time_limit(site):
             return site
Exemple #55
0
 def _grades_overtime(doc, val):
     return {
         "grade_data_averages": r.branch(
             ((doc.get_field("grades").count() > 0) & ((val["group"] % 10) != 4)),
             {
                 "percent_a": val["reduction"].get_field("percent_a").avg().default(None),
                 "percent_b": val["reduction"].get_field("percent_b").avg().default(None),
                 "percent_c": val["reduction"].get_field("percent_c").avg().default(None),
                 "percent_d": val["reduction"].get_field("percent_d").avg().default(None),
                 "percent_f": val["reduction"].get_field("percent_f").avg().default(None),
                 "percent_incomplete": val["reduction"].get_field("percent_incomplete").avg().default(None),
                 "percent_c_minus_or_below": val["reduction"]
                 .get_field("percent_c_minus_or_below")
                 .avg()
                 .default(None),
                 "average_grade": val["reduction"].get_field("average_grade").avg().default(None),
             },
             None,
         )
     }
Exemple #56
0
def initialize_sindexes(sindexes, connection, db, table):
    # This assumes sindexes are never deleted
    #  if they are and a table is loaded, there could be problems
    sindex_count = len(r.db(db).table(table).index_list().run(connection))
    for sindex in sindexes:
        # Sindexes are named as their type of sindex (below) plus a unique number
        sindex_name = sindex + str(sindex_count)
        sindex_count += 1
        sindex_fn = None
        if sindex == "constant":
            sindex_fn = lambda x: 1
        elif sindex == "simple":
            sindex_fn = lambda x: r.branch(x["value"] % 2 == 0, "odd", "even")
        elif sindex == "complex":
            sindex_fn = lambda x: complex_sindex_fn(x, db, table)
        elif sindex == "long":
            sindex_fn = long_sindex_fn
        else:
            raise RuntimeError("Unknown sindex type")
        print "Adding sindex '%s'..." % sindex_name
        r.db(db).table(table).index_create(sindex_name, sindex_fn).run(connection)
Exemple #57
0
 def _bucket_batch_update_reql(self, bucket, new):
     return self.rr.table(self.table).get(bucket).replace(
         lambda old: r.branch(
             old.eq(None), new, old.merge({
                 'total': {
                     'urls': old['total']['urls'].add(new['total']['urls']),
                     'wire_bytes': old['total']['wire_bytes'].add(
                         new['total']['wire_bytes']),
                     },
                 'new': {
                     'urls': old['new']['urls'].add(new['new']['urls']),
                     'wire_bytes': old['new']['wire_bytes'].add(
                         new['new']['wire_bytes']),
                     },
                 'revisit': {
                     'urls': old['revisit']['urls'].add(
                         new['revisit']['urls']),
                     'wire_bytes': old['revisit']['wire_bytes'].add(
                         new['revisit']['wire_bytes']),
                     },
             })))
Exemple #58
0
def get_movie():
    id_movie = request.args.get('id_movie', '')

    # Get the similar movies
    result = r.table("movie").get(id_movie).do(lambda movie:
        r.branch( (movie != None) & (movie.has_fields("similar_movies_id")),
            r.expr({"id":id_movie}).merge({"similar_movies": movie["similar_movies_id"].map(lambda similar_movie_id:
                r.table("movie").get(similar_movie_id)
            )}),
            None
        )).run( g.rdb_conn )

    if result is None:
        # Fetch similar movies from Rotten Tomatoes and save it
        debug("Get movie: Fetching data from Rotten tomatoes for %s." % (id_movie))

        url = "http://api.rottentomatoes.com/api/public/v1.0/movies/"+str(id_movie)+"/similar.json?apikey="+API_KEY
        http_answer = do_http_request(url, None)
        if "movies" in http_answer:
            # Rename the field movies
            http_answer["similar_movies"] = http_answer["movies"]
            http_answer.pop("movies", None)

            # Dunp data in the database
            r.table("movie").insert( http_answer["similar_movies"] ).run( g.rdb_conn, noreply=True)

            # Update the original movie with its similar ones
            similar_movies_id = map(get_id, http_answer["similar_movies"])
            r.table("movie").get(id_movie).update({"similar_movies_id": similar_movies_id }).run( g.rdb_conn, noreply=True)

            # Add the id of the original movie so we can keep track of it in the js callback
            http_answer['id'] = id_movie
            return json.dumps(http_answer)
        else:
            answer = {"error": "No similar movies returned. API rate limit reached?"}
            return json.dumps(answer)
    else:
        debug("Get movie: found cache for %s." % (id_movie))
        return json.dumps(result)
Exemple #59
0
 def _bucket_batch_update_reql(self, bucket, batch):
     return self.rr.table(self.table).get(bucket).replace(
         lambda old: r.branch(
             old.eq(None), batch[bucket], old.merge({
                 "total": {
                     "urls": old["total"]["urls"].add(
                         batch[bucket]["total"]["urls"]),
                     "wire_bytes": old["total"]["wire_bytes"].add(
                         batch[bucket]["total"]["wire_bytes"]),
                     },
                 "new": {
                     "urls": old["new"]["urls"].add(
                         batch[bucket]["new"]["urls"]),
                     "wire_bytes": old["new"]["wire_bytes"].add(
                         batch[bucket]["new"]["wire_bytes"]),
                     },
                 "revisit": {
                     "urls": old["revisit"]["urls"].add(
                         batch[bucket]["revisit"]["urls"]),
                     "wire_bytes": old["revisit"]["wire_bytes"].add(
                         batch[bucket]["revisit"]["wire_bytes"]),
                     },
             })))
Exemple #60
0
def update_with_date_random(db_name,tbl_name, user_object,id):
    return r.db(db_name).table(tbl_name).get(id).replace(
        lambda doc: r.branch(
            (doc == None),
            doc.merge(doc).merge({'created_at': r.time(random.randrange(1995,2015,1), random.randrange(1,12,1), random.randrange(1,30,1), 'Z')}),
            doc.merge(doc).merge({'created_at': r.time(random.randrange(1995,2015,1), random.randrange(1,12,1), random.randrange(1,30,1), 'Z')},{'updated_at': r.now()}))).run()