コード例 #1
0
def get_multiple(index, data=None):
    """
    Get multiple records by ID
    """
    try:
        serverClient = FaunaClient(
            secret=os.environ.get("FAUNA_SERVER_SECRET"))
        res_arr = []
        if data is None:
            res = serverClient.query(
                q.map_(q.lambda_("data", q.get(q.var("data"))),
                       q.paginate(q.match(q.index(index)))))
            res_arr.extend(res["data"])
        elif isinstance(data, list):
            for x in data:
                res = serverClient.query(
                    q.map_(q.lambda_("data", q.get(q.var("data"))),
                           q.paginate(q.match(q.index(index), q.casefold(x)))))
                res_arr.extend(res["data"])
        else:
            res = serverClient.query(
                q.map_(q.lambda_("data", q.get(q.var("data"))),
                       q.paginate(q.match(q.index(index), q.casefold(data)))))
            res_arr.extend(res["data"])

        arr = []
        for x in res_arr:
            x["data"]["ref_id"] = x["ref"].id()
            arr.append(x["data"])
        return arr
    except Exception as ex:
        raise ex
コード例 #2
0
 def test_lambda_expr(self):
     self.assertJson(query.lambda_("a", query.var("a")),
                     '{"expr":{"var":"a"},"lambda":"a"}')
     self.assertJson(
         query.lambda_(["a", "b"], query.add(query.var("a"),
                                             query.var("b"))),
         '{"expr":{"add":[{"var":"a"},{"var":"b"}]},"lambda":["a","b"]}')
コード例 #3
0
def processHeroInformation(match_data):
    radiant_win = match_data['result']['radiant_win']
    players = match_data['result']['players']

    win_heros = []
    heros_in_game = []

    for player in players:
        win_flag = getWinFlag(player, radiant_win)

        heros_in_game.append(player['hero_id'])
        if win_flag:
            win_heros.append(player['hero_id'])

    hero_list=client.query(
        q.map_(
            q.lambda_(
                'hero',
                q.get(q.ref(q.collection('heroes'), q.var('hero')))
            ),
            heros_in_game
        )
    )

    update_hero_list = []
    for hero_info in hero_list:
        ref = hero_info['ref']
        data = hero_info['data']

        if data['id'] in win_heros:
            data['wins'] += 1
            data['games'] += 1
        else:
            data['games'] += 1

        for player in players:
            if player['hero_id'] == data['id']:
                getItemsData(player, data)

        update_info = {}
        update_info['ref'] = ref
        update_info['data'] = data

        update_hero_list.append(update_info)

    client.query(
        q.map_(
            q.lambda_(
                'hero',
                q.update(
                    q.select(['ref'], q.var('hero')),
                    { 'data': q.select(['data'], q.var('hero')) }
                )
            ),
            update_hero_list
        )
    )
コード例 #4
0
def updatePairInformationForTeam(hero_ids, team_win):
    key_list=[]
    for k in range(0, len(hero_ids)):
        for j in range(k + 1, len(hero_ids)):
            if hero_ids[k] < hero_ids[j]:
                key = format(hero_ids[k], '03d') + format(hero_ids[j], '03d')
            else:
                key = format(hero_ids[j], '03d') + format(hero_ids[k], '03d')
            key_list.append(key)

    try: 
        hero_data_list=client.query(
            q.map_(
                q.lambda_(
                    'hero_pair',
                    q.get(q.ref(q.collection('hero_pairs'), q.var('hero_pair')))
                ),
                key_list
            )
        )
    except Exception as e:
        logging.info(e)
        logging.info(key_list)

    hero_team_list=[]
    for hero_data in hero_data_list :
        hero_team_dictionary = {}
        hero_pair_ref=hero_data['ref']
        hero_pair_data = hero_data['data']
        hero_pair_data['games']+=1
        if team_win:
            hero_pair_data['wins'] += 1
        hero_team_dictionary['ref']=hero_pair_ref
        hero_team_dictionary['data']=hero_pair_data
        hero_team_list.append(hero_team_dictionary)

    client.query(
        q.map_(
            q.lambda_(
                'hero_pair',
                q.update(
                    q.select(['ref'],q.var('hero_pair')),
                    {'data': q.select(['data'], q.var('hero_pair'))}
                )
            ),
            hero_team_list
        )
    )
コード例 #5
0
def processTemporalHeroInformation(match_data):
    radiant_win = match_data['result']['radiant_win']

    players = match_data['result']['players']
    temporal_hero_list = []
    for player in players:
        win_flag = False
        if player['player_slot'] <= 4 and radiant_win:
            win_flag = True
        elif player['player_slot'] > 4 and not radiant_win:
            win_flag = True

        temporal_hero = {}
        temporal_hero['id'] = player['hero_id']
        temporal_hero['win'] = win_flag
        temporal_hero['match_start_time'] = pytz.utc.localize(datetime.utcfromtimestamp(match_data['result']['start_time']))
        temporal_hero_list.append(temporal_hero)

    client.query(
        q.map_(
            q.lambda_(
                'temporal_hero',
                q.create(q.collection('heroes_temporal'), { "data": q.var('temporal_hero') })
            ),
            temporal_hero_list
        )
    )
コード例 #6
0
ファイル: subscribe.py プロジェクト: Awesome-RJ/anime_alarm
def unsubscribe(update: Update, context: CallbackContext):
    user = User(update.effective_chat.id)
    try:
        animes_watched = client.query(
            q.let({'bot_user': q.ref(q.collection(users), user.chat_id)},
                  q.if_(
                      q.exists(q.var('bot_user')),
                      q.map_(
                          q.lambda_('doc_ref', q.get(q.var('doc_ref'))),
                          q.select(['data', 'animes_watching'],
                                   q.get(q.var('bot_user')))), [])))

        for anime in animes_watched:
            markup = [[
                InlineKeyboardButton('Unsubscribe',
                                     callback_data='unsubscribe=' +
                                     anime['ref'].id())
            ]]
            context.bot.send_message(chat_id=user.chat_id,
                                     text=anime['data']['title'],
                                     reply_markup=InlineKeyboardMarkup(markup))

        # update last command
        user.last_command = ''

        if not animes_watched:
            context.bot.send_message(
                chat_id=user.chat_id,
                text='You are currently not subscribed to any anime')
    except Exception as err:
        log_error(err)
コード例 #7
0
def _create_documents(let_params, records, build_document):
    results = []
    idx = 0

    while True:
        if idx > len(records):
            break

        end_idx = idx + BATCH_LIMIT
        batch = _execute_with_retries(
            q.let(
                let_params,
                q.map_(
                    q.lambda_(
                        "document",
                        q.create(
                            q.var("collection"),
                            {"data": build_document(q.var("document"))},
                        ),
                    ),
                    records[idx:end_idx],
                ),
            )
        )
        results.extend(batch)
        idx = end_idx

    return results
コード例 #8
0
    def test_count_mean_sum(self):
        data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
        self._q(query.create_collection({"name": "countmeansum_test"}))
        self._q(
            query.create_index({
                "name": "countmeansum_idx",
                "source": query.collection("countmeansum_test"),
                "active": True,
                "values": [{
                    "field": ["data", "value"]
                }]
            }))
        self._q(
            query.foreach(
                query.lambda_(
                    "x",
                    query.create(
                        query.collection("countmeansum_test"),
                        {"data": {
                            "value": query.add(query.var("x"), 2)
                        }})), data))

        m = query.match(query.index("countmeansum_idx"))
        expected = [9, 5.0, 45, 9, 7.0, 63]

        self.assertEqual(
            self._q([
                query.count(data),
                query.mean(data),
                query.sum(data),
                query.count(m),
                query.mean(m),
                query.sum(m)
            ]), expected)
コード例 #9
0
def _load_matches(data):
    matches = data["matches"]
    records = list(matches.values())
    build_document = lambda match: q.to_object(
        q.map_(
            q.lambda_(
                ["key", "value"],
                [
                    q.var("key"),
                    q.if_(
                        q.equals(q.var("key"), "winner_id"),
                        _assign_ref(q.var("teams"), q.var("team_map"), q.var("value")),
                        q.var("value"),
                    ),
                ],
            ),
            q.to_array(match),
        )
    )
    let_params = {
        "collection": q.collection("matches"),
        "teams": q.collection("teams"),
        "team_map": data["teams"],
    }

    documents = _create_documents(let_params, records, build_document)

    for record, document in zip(records, documents):
        record["id"] = document["ref"].id()
コード例 #10
0
    def test_documents(self):
        aCollection = "col_test_documents"
        anIndex = "idx_test_documents"

        self._q(query.create_collection({"name": aCollection}))
        self._q(
            query.create_index({
                "name": anIndex,
                "source": query.collection(aCollection),
                "active": True
            }))

        count = 56
        data = [{} for x in range(count)]
        self._q(
            query.foreach(
                query.lambda_(
                    "x",
                    query.create(query.collection(aCollection),
                                 {"data": query.var("x")})), data))

        self.assertEqual(
            self._q(
                query.select([0],
                             query.count(
                                 query.paginate(
                                     query.documents(
                                         query.collection(aCollection)))))),
            count)
        self.assertEqual(
            self._q(query.count(query.documents(
                query.collection(aCollection)))), count)
コード例 #11
0
ファイル: select.py プロジェクト: tipresias/tipresias
def _sort_document_set(document_set: QueryExpression,
                       order_by: typing.Optional[sql.OrderBy]):
    if order_by is None:
        return q.paginate(document_set, size=common.MAX_PAGE_SIZE)

    if len(order_by.columns) > 1:
        raise exceptions.NotSupportedError(
            "Ordering by multiple columns is not yet supported.")

    ordered_column = order_by.columns[0]
    assert ordered_column.table_name is not None

    ordered_document_set = q.join(
        document_set,
        q.index(
            common.index_name(
                ordered_column.table_name,
                column_name=ordered_column.name,
                index_type=common.IndexType.SORT,
            )),
    )
    if order_by.direction == sql.OrderDirection.DESC:
        ordered_document_set = q.reverse(ordered_document_set)

    return q.map_(
        q.lambda_(["_", "ref"], q.var("ref")),
        q.paginate(ordered_document_set, size=common.MAX_PAGE_SIZE),
    )
コード例 #12
0
def _delete_data():
    resources = [q.functions(), q.indexes(), q.collections()]
    delete = lambda res: q.foreach(
        q.lambda_("res", q.delete(q.var("res"))), q.paginate(res)
    )
    delete_queries = [delete(res) for res in resources]

    _execute_with_retries(q.do(*delete_queries))
コード例 #13
0
 def get_all_students(self):
     result = self.clientf.query(
         query.map_(
             query.lambda_("x", query.get(query.var('x'))),
             query.paginate(query.match(query.index('all_students')),
                            size=1000)))
     students = result['data']
     students = [student['data'] for student in students]
     return students
コード例 #14
0
    def test_create_accprov_with_roles(self):
        providerName = "provider_with_roles"
        issuerName = "issuer_%s" % (self._randStr())
        fullUri = "https: //$%s.auth0.com" % (self._randStr(4))
        roleOneName = "role_one_%s" % (self._randStr(4))
        roleTwoName = "role_two_%s" % (self._randStr(4))

        self.admin_client.query(
            query.create_role({
                "name":
                roleOneName,
                "privileges": [
                    {
                        "resource": query.databases(),
                        "actions": {
                            "read": True
                        },
                    },
                ],
            }))

        self.admin_client.query(
            query.create_role({
                "name":
                roleTwoName,
                "privileges": [
                    {
                        "resource": query.databases(),
                        "actions": {
                            "read": True
                        },
                    },
                ],
            }))

        provider = self.admin_client.query(
            query.create_access_provider({
                "name":
                providerName,
                "issuer":
                issuerName,
                "jwks_uri":
                fullUri,
                "roles": [
                    query.role(roleOneName),
                    {
                        "role": query.role(roleTwoName),
                        "predicate": query.query(query.lambda_("x", True)),
                    },
                ],
            }))

        self.assertEqual(provider["name"], providerName)
        self.assertEqual(provider["issuer"], issuerName)
        self.assertEqual(provider["jwks_uri"], fullUri)
        self.assertTrue(isinstance(provider["roles"], list))
コード例 #15
0
ファイル: __init__.py プロジェクト: Awesome-RJ/anime_alarm
def get_subscribed_users_for_anime(anime_doc_id):
    """
    This function gets all the user subscribed to a particular anime
    """
    subscribed_users = client.query(
        q.map_(
            q.lambda_('doc_ref', q.get(q.var('doc_ref'))),
            q.paginate(q.match(q.index(all_users_by_anime),
                               q.ref(q.collection(animes), str(anime_doc_id))),
                       size=100000)))
    subscribed_users = subscribed_users['data']
    return subscribed_users
コード例 #16
0
def _assign_ids_to_teams(teams):
    result = _execute_with_retries(
        q.map_(
            q.lambda_("team", q.get(q.var("team"))),
            q.paginate(q.match(q.index("teams_all"))),
        )
    )
    team_documents = result["data"]

    for team in teams.values():
        team_document = next(
            doc for doc in team_documents if doc["data"]["name"] == team["name"]
        )
        team["id"] = team_document["ref"].id()
コード例 #17
0
    def unsubscribe_from_anime(self, anime_doc_id: str):
        try:
            anime = client.query(
                q.get(q.ref(q.collection(animes), anime_doc_id)))
            client.query(
                q.let(
                    {
                        'anime_ref':
                        q.ref(q.collection(animes), anime_doc_id),
                        'bot_user':
                        q.ref(q.collection(users), self.chat_id),
                        'followers':
                        q.select(['data', 'followers'],
                                 q.get(q.var('anime_ref'))),
                    },
                    q.do(
                        q.update(
                            q.var('anime_ref'), {
                                'data': {
                                    'followers': q.subtract(
                                        q.var('followers'), 1)
                                }
                            }),
                        q.update(
                            q.var('bot_user'), {
                                'data': {
                                    'animes_watching':
                                    q.filter_(
                                        q.lambda_(
                                            'watched_anime_ref',
                                            q.not_(
                                                q.equals(
                                                    q.var('watched_anime_ref'),
                                                    q.var('anime_ref')))),
                                        q.select(['data', 'animes_watching'],
                                                 q.get(q.var('bot_user'))))
                                }
                            }),
                        q.if_(q.equals(q.var('followers'), 1),
                              q.delete(q.var('anime_ref')), 'successful!'))))

            updater.bot.send_message(chat_id=self.chat_id,
                                     text='You have stopped following ' +
                                     anime['data']['title'])
        except errors.NotFound:
            logger.info(
                'Somehow, a user {0} almost unsubscribed from an anime that did not exist'
                .format(self.chat_id))
        except Exception as err:
            log_error(err)
コード例 #18
0
def getTopHeroPairs(hero_id, n):
    logging.info(f'Query: Fetching top {n} partners for hero: {hero_id}')
    key_list = []
    id_list = []
    for k in range(1, hero_id):
        key = format(k, '03d') + format(hero_id, '03d')
        key_list.append(key)
        id_list.append(k)
    for k in range(hero_id + 1, 130):
        key = format(hero_id, '03d') + format(k, '03d')
        key_list.append(key)
        id_list.append(k)
    try:
        hero_data_list = client.query(
            q.map_(
                q.lambda_(
                    'hero_pair',
                    q.get(q.ref(q.collection('hero_pairs'),
                                q.var('hero_pair')))), key_list))
        logging.debug(
            'Finished querying the hero_pair collection successfully')

        hero_team_list = []
        for i in range(0, len(hero_data_list)):
            hero_team_dictionary = {'partner_id': id_list[i]}
            if hero_data_list[i]['data']['games'] > 0:
                hero_team_dictionary['win_rate'] = format(
                    hero_data_list[i]['data']['wins'] /
                    hero_data_list[i]['data']['games'], '.4f')
            else:
                hero_team_dictionary['win_rate'] = '0.0000'
            hero_team_list.append(hero_team_dictionary)

        hero_team_list = sorted(hero_team_list,
                                key=lambda i: i['win_rate'],
                                reverse=True)

        logging.info('Returning from the function getTopHeroPairs')
        return hero_team_list[0:n]

    except Exception as e:
        logging.error(e)
        logging.error('Could not fetch from hero_pairs collection')
コード例 #19
0
ファイル: common.py プロジェクト: tipresias/tipresias
def convert_to_ref_set(collection_name: str,
                       index_match: QueryExpression) -> QueryExpression:
    """Convert value-based match set to set of refs.

    Params:
    -------
    collection_name: Name of the source collection for the index.
    index_match: Match set of the index. Index must have values attribute of the form
        [{"field": ["data", <field>]}, {"field": ["ref"}]}]
    """
    return q.join(
        index_match,
        q.lambda_(
            ["value", "ref"],
            q.match(
                q.index(index_name(collection_name, index_type=IndexType.REF)),
                q.var("ref"),
            ),
        ),
    )
コード例 #20
0
 def get_converted_documents(self) -> Iterator[Document]:
     results = self.client.query(
         q.filter_(
             q.lambda_(
                 ['original_name', 'converted', 'ref'],
                 q.var('converted')
             ),
             q.paginate(q.match(q.index('documents')))
         )
     )
     documents = []
     for data in results['data']:
         documents.append(
             Document(
                 original_name=data[0],
                 converted=data[1],
                 ref=data[2]
             )
         )
     return documents
コード例 #21
0
ファイル: delete.py プロジェクト: tipresias/tipresias
def translate_delete(sql_query: sql.SQLQuery) -> typing.List[QueryExpression]:
    """Translate a DELETE SQL query into an equivalent FQL query.

    Params:
    -------
    sql_query: An SQLQuery instance.

    Returns:
    --------
    An FQL query expression.
    """
    tables = sql_query.tables

    if len(tables) > 1:
        document_set = common.join_collections(sql_query)
    else:
        document_set = common.build_document_set_union(tables[0],
                                                       sql_query.filter_groups)

    return q.map_(q.lambda_("ref", q.delete(q.var("ref"))),
                  q.paginate(document_set))
コード例 #22
0
def things():

    userSecret = request.headers.get('fauna-user-secret')
    client = FaunaClient(secret=userSecret)

    try:
        result = client.query(
            q.map_(q.lambda_("ref", q.get(q.var("ref"))),
                   q.paginate(q.documents(q.collection("Things")))))

        things = map(
            lambda doc: {
                "id": doc["ref"].id(),
                "name": doc["data"]["name"],
                "color": doc["data"]["color"]
            }, result["data"])

        return {"things": list(things)}

    except faunadb.errors.Unauthorized as exception:
        error = exception.errors[0]
        return {"code": error.code, "description": error.description}, 401
コード例 #23
0
def _translate_drop_default(table_name: str,
                            column_name: str) -> QueryExpression:
    drop_default = q.map_(
        q.lambda_(
            "column_info_ref",
            q.update(q.var("column_info_ref"), {"data": {
                "default_": None
            }}),
        ),
        q.paginate(_fetch_column_info_refs(table_name, column_name)),
    )

    return q.let(
        {
            "altered_docs": drop_default,
            # Should only be one document that matches the unique combination
            # of collection and field name, so we just select the first.
            "altered_ref": q.select([0, "ref"], q.var("altered_docs")),
        },
        {"data": [{
            "id": q.var("altered_ref")
        }]},
    )
コード例 #24
0
def extractMatchesFeatureMatrix(prediction_ref_ids):
    try:
        logging.info(f'[START] Extracting Matches Feature Matrix')
        predictionData = client.query(
            q.map_(
                q.lambda_(
                    'pred_ref_id',
                    q.get(
                        q.ref(q.collection('match_prediction'),
                              q.var('pred_ref_id')))), prediction_ref_ids))

        featuresLists = []
        for pred in predictionData:
            featuresLists.append(pred['data']['vector'])

        dataFrame = pd.DataFrame(featuresLists)
        dataFrame = dataFrame.iloc[:, :-2]
        logging.info(f'[FINISHED] Extracting Matches Feature Matrix')

        trainModel(dataFrame)

    except Exception as e:
        logging.error(f'Error occurred {str(e)}')
コード例 #25
0
ファイル: test_pickle.py プロジェクト: raiprabh/dota-pipeline
def testPrediction():
    matchPreds = client.query(
        q.paginate(q.match(q.index('all_match_prediction')), size=100))

    matchPredsData = matchPreds['data']
    pred_ids = []

    for pred in matchPredsData:
        pred_id = pred.value['id']
        pred_ids.append(pred_id)

    predictionData = client.query(
        q.map_(
            q.lambda_(
                'matchPred',
                q.get(
                    q.ref(q.collection('match_prediction'),
                          q.var('matchPred')))), pred_ids))

    featuresLists = []

    for pred in predictionData:
        featuresLists.append(pred['data']['vector'])

    dataFrame = pd.DataFrame(featuresLists)
    dataFrame = dataFrame.iloc[:, :-2]

    X_test = dataFrame.iloc[:, :-1]
    y_test = dataFrame.iloc[:, -1]
    clf = pickle.load(open('../data/model_file.p', 'rb'))
    print("Trained Model parameters:")
    print("Kernel: ", clf.kernel)
    print("Win Labels:", clf.classes_)
    print("Gamma:", clf.gamma)
    y_pred = clf.predict(X_test)
    acc = clf.score(X_test, y_test)
    print("Model Accuracy = ", acc * 100, "%")
コード例 #26
0
def recommend(update: Update, context: CallbackContext):
    chat_id = update.effective_chat.id
    results = client.query(
        q.map_(q.lambda_(['followers', 'doc_ref'], q.get(q.var('doc_ref'))),
               q.paginate(q.match(q.index(sort_anime_by_followers)), size=5)))

    context.bot.send_message(
        chat_id=chat_id,
        text='Here are the top animes people using Anime Alarm are watching')

    for anime in results['data']:
        if anime['data']['link'].startswith('https://tinyurl.com/') or anime[
                'data']['link'].startswith('https://bit.ly/'):
            link = anime['data']['link']
        else:
            link = shorten(anime['data']['link'])
        markup = [[
            InlineKeyboardButton('Subscribe',
                                 callback_data='subscribe=' + link)
        ]]
        context.bot.send_message(chat_id=chat_id,
                                 reply_markup=InlineKeyboardMarkup(markup),
                                 text=str(results['data'].index(anime) + 1) +
                                 '. ' + anime['data']['title'])
コード例 #27
0
def translate_drop(
        statement: token_groups.Statement) -> typing.List[QueryExpression]:
    """Translate a DROP SQL query into an equivalent FQL query.

    Params:
    -------
    statement: An SQL statement returned by sqlparse.

    Returns:
    --------
    An FQL query expression.
    """
    idx, _ = statement.token_next_by(m=(token_types.Keyword, "TABLE"))
    _, table_identifier = statement.token_next_by(i=token_groups.Identifier,
                                                  idx=idx)
    table_name = table_identifier.value

    deleted_collection = q.select("ref", q.delete(q.collection(table_name)))
    return [
        q.do(
            q.map_(
                q.lambda_("ref", q.delete(q.var("ref"))),
                q.paginate(
                    q.union(
                        q.match(
                            q.index(
                                fql.index_name(
                                    "information_schema_tables_",
                                    column_name="name_",
                                    index_type=fql.IndexType.TERM,
                                )),
                            table_name,
                        ),
                        fql.convert_to_ref_set(
                            "information_schema_columns_",
                            q.range(
                                q.match(
                                    q.index(
                                        fql.index_name(
                                            "information_schema_columns_",
                                            column_name="table_name_",
                                            index_type=fql.IndexType.VALUE,
                                        ))),
                                [table_name],
                                [table_name],
                            ),
                        ),
                        fql.convert_to_ref_set(
                            "information_schema_indexes_",
                            q.range(
                                q.match(
                                    q.index(
                                        fql.index_name(
                                            "information_schema_indexes_",
                                            column_name="table_name_",
                                            index_type=fql.IndexType.VALUE,
                                        ))),
                                [table_name],
                                [table_name],
                            ),
                        ),
                    ), ),
            ),
            q.let(
                {"collection": deleted_collection},
                {"data": [{
                    "id": q.var("collection")
                }]},
            ),
        )
    ]
コード例 #28
0
ファイル: select.py プロジェクト: tipresias/tipresias
def translate_select(sql_query: sql.SQLQuery) -> QueryExpression:
    """Translate a SELECT SQL query into an equivalent FQL query.

    Params:
    -------
    sql_query: An SQLQuery instance.

    Returns:
    --------
    An FQL query expression based on the SQL query.
    """
    document_pages = _define_document_pages(sql_query)
    selected_table = next(table for table in sql_query.tables
                          if table.has_columns)

    get_field_value = lambda function_value, raw_value: q.if_(
        q.equals(function_value, common.NULL),
        q.if_(q.equals(raw_value, common.NULL), None, raw_value),
        q.select([common.DATA, 0], function_value),
    )

    calculate_function_value = lambda document_set, function_name: q.if_(
        q.is_null(function_name),
        common.NULL,
        q.if_(
            q.equals(function_name, sql.Function.COUNT.value),
            q.count(document_set),
            common.NULL,
        ),
    )

    # With aggregation functions, standard behaviour is to include the first value
    # if any column selections are part of the query, at least until we add support
    # for GROUP BY
    get_first_document = lambda documents: q.if_(q.is_empty(documents), [{}],
                                                 q.take(1, documents))

    translate_document_fields = lambda maybe_documents: q.let(
        {
            # We map over selected_fields to build document object
            # to maintain the order of fields as queried. Otherwise,
            # SQLAlchemy gets confused and assigns values to the incorrect keys.
            "selected_column_info":
            [[col.table_name, col.name, col.function_name]
             for col in sql_query.columns],
            "has_functions":
            any(col.function_name for col in sql_query.columns),
            "maybe_document_set":
            q.if_(
                q.var("has_functions"),
                get_first_document(maybe_documents),
                maybe_documents,
            ),
            "field_alias_map":
            sql_query.alias_map,
        },
        q.map_(
            q.lambda_(
                "maybe_document",
                q.let(
                    {
                        "document":
                        q.if_(
                            q.is_ref(q.var("maybe_document")),
                            {
                                # We use the selected table name here instead of deriving
                                # the collection name from the document ref in order to
                                # save a 'get' call from inside of a map, which could get
                                # expensive.
                                selected_table.name:
                                q.merge(
                                    q.select(
                                        common.DATA,
                                        q.get(q.var("maybe_document")),
                                    ),
                                    {"ref": q.var("maybe_document")},
                                ),
                            },
                            q.var("maybe_document"),
                        ),
                    },
                    q.to_object(
                        q.map_(
                            q.lambda_(
                                [
                                    "collection_name", "field_name",
                                    "function_name"
                                ],
                                q.let(
                                    {
                                        "function_value":
                                        calculate_function_value(
                                            maybe_documents,
                                            q.var("function_name")),
                                        "raw_value":
                                        q.select(
                                            [
                                                q.var("collection_name"),
                                                q.var("field_name"),
                                            ],
                                            q.var("document"),
                                            default=common.NULL,
                                        ),
                                    },
                                    [
                                        q.select(
                                            [
                                                q.var("collection_name"),
                                                q.var("field_name"),
                                            ],
                                            q.var("field_alias_map"),
                                        ),
                                        get_field_value(
                                            q.var("function_value"),
                                            q.var("raw_value")),
                                    ],
                                ),
                            ),
                            q.var("selected_column_info"),
                        )),
                ),
            ),
            q.var("maybe_document_set"),
        ),
    )

    return q.let(
        {
            "maybe_documents":
            document_pages,
            "translated_documents":
            translate_document_fields(q.var("maybe_documents")),
            "result":
            q.distinct(q.var("translated_documents"))
            if sql_query.distinct else q.var("translated_documents"),
        },
        # Paginated sets hold an array of results in a 'data' field, so we try to flatten it
        # in case we're dealing with pages instead of an array of results which doesn't
        # have such nesting
        {common.DATA: q.select(common.DATA, q.var("result"), q.var("result"))},
    )
コード例 #29
0
ファイル: common.py プロジェクト: tipresias/tipresias
def _define_match_set(query_filter: sql.Filter) -> QueryExpression:
    field_name = query_filter.column.name
    comparison_value = query_filter.value
    index_name_for_collection = functools.partial(index_name,
                                                  query_filter.table_name)
    convert_to_collection_ref_set = functools.partial(convert_to_ref_set,
                                                      query_filter.table_name)

    get_info_indexes_with_references = lambda collection_name, field_name: q.map_(
        q.lambda_("info_index_ref", q.get(q.var("info_index_ref"))),
        q.paginate(
            q.match(
                q.index(
                    index_name(
                        "information_schema_indexes_",
                        column_name="name_",
                        index_type=IndexType.TERM,
                    )),
                index_name(
                    collection_name,
                    column_name=field_name,
                    index_type=IndexType.REF,
                ),
            ), ),
    )

    index_name_for_field = functools.partial(index_name_for_collection,
                                             field_name)
    equality_range = q.range(
        q.match(q.index(index_name_for_field(IndexType.VALUE))),
        [comparison_value],
        [comparison_value],
    )

    if query_filter.checks_whether_equal:
        if field_name == "ref":
            assert isinstance(comparison_value, str)
            return q.singleton(
                q.ref(q.collection(query_filter.table_name), comparison_value))

        return q.let(
            {
                "ref_index":
                q.index(index_name_for_field(IndexType.REF)),
                "term_index":
                q.index(index_name_for_field(IndexType.TERM)),
                "info_indexes":
                get_info_indexes_with_references(query_filter.table_name,
                                                 field_name),
                "comparison_value":
                comparison_value,
            },
            q.if_(
                q.exists(q.var("ref_index")),
                q.match(
                    q.var("ref_index"),
                    get_foreign_key_ref(
                        q.var("comparison_value"),
                        # Assumes that there is only one reference per foreign key
                        # and that it refers to the associated collection's ID field
                        # (e.g. {'associated_table': 'id'}).
                        # This is enforced via NotSupported errors when creating collections.
                        q.select([0, DATA, "referred_table_"],
                                 q.var("info_indexes")),
                    ),
                ),
                q.if_(
                    q.exists(q.var("term_index")),
                    q.match(
                        q.var("term_index"),
                        q.var("comparison_value"),
                    ),
                    convert_to_collection_ref_set(equality_range),
                ),
            ),
        )

    # In the building of Filter objects from SQL tokens, we enforce the convention
    # of <column name> <operator> <value> for WHERE clauses, so we build the FQL queries
    # assuming that '>' means 'column value greater than literal value'. I can't think
    # of a good way to centralize the knowledge of this convention across
    # all query translation, so I'm leaving this note as a warning.
    if query_filter.checks_whether_greater_than:
        inclusive_comparison_range = q.range(
            q.match(q.index(index_name_for_field(IndexType.VALUE))),
            [comparison_value],
            [],
        )
        return convert_to_collection_ref_set(
            q.difference(inclusive_comparison_range, equality_range))

    if query_filter.checks_whether_greater_than_or_equal:
        inclusive_comparison_range = q.range(
            q.match(q.index(index_name_for_field(IndexType.VALUE))),
            [comparison_value],
            [],
        )
        return convert_to_collection_ref_set(inclusive_comparison_range)

    if query_filter.checks_whether_less_than:
        inclusive_comparison_range = q.range(
            q.match(q.index(index_name_for_field(IndexType.VALUE))),
            [],
            [comparison_value],
        )
        return convert_to_collection_ref_set(
            q.difference(inclusive_comparison_range, equality_range))

    if query_filter.checks_whether_less_than_or_equal:
        inclusive_comparison_range = q.range(
            q.match(q.index(index_name_for_field(IndexType.VALUE))),
            [],
            [comparison_value],
        )
        return convert_to_collection_ref_set(inclusive_comparison_range)

    raise exceptions.NotSupportedError(
        f"Unsupported comparison {query_filter.comparison} was received.")
コード例 #30
0
  def test_typecheckfns(self):
    coll = query.collection("typecheck_coll")
    db = query.database("typecheck_db")
    fn = query.function("typecheck_fn")
    index = query.index("typecheck_index")
    self.admin_client.query(query.create_collection({"name": "typecheck_coll"}))
    self.admin_client.query(query.create_index(
        {"name": "typecheck_index", "source": coll, "active": True}))
    doc = self.admin_client.query(query.create(
        coll, {"data": {}, "credentials": {"password": "******"}}))
    self.admin_client.query(query.create_database({"name": "typecheck_db"}))
    function = self._q(query.create_function(
        {"name": "typecheck_fn", "body": query.query(query.lambda_("x", query.now()))}))

    key = self.admin_client.query(
        query.create_key({"database": db, "role": "admin"}))
    token = self._q(query.login(doc["ref"], {"password": "******"}))
    credentials = self._q(query.select(['data', 0], query.paginate(query.credentials())))
    role = self.admin_client.query(query.create_role(
        {"name": "typecheck_role", "membership": [], "privileges": []}))

    values = [
        None,
        bytearray([12,3,4,5]),
        credentials,
        90,
        3.14,
        True,
        query.to_date(query.now()),
        query.date("1970-01-01"),
        query.now(),
        query.epoch(1, "second"),
        query.time("1970-01-01T00:00:00Z"),
        {"x": 10},
        query.get(doc["ref"]),
        query.paginate(query.collections()),
        [1, 2, 3],
        "a string",
        coll,
        query.collections(),
        query.match(index),
        query.union(query.match(index)),
        doc["ref"],
        query.get(doc["ref"]),
        index,
        db,
        coll,
        token["ref"],
        role["ref"],
        key["ref"],
        function["ref"],
        query.get(function["ref"]),
        query.query(query.lambda_("x", query.var("x"))),
    ]
    pairs = [
      ["array", query.is_array],
      ["object", query.is_object],
      ["string", query.is_string],
      ["null", query.is_null],
      ["number", query.is_number],
      ["bytes", query.is_bytes],
      ["date", query.is_date],
      ["timestamp", query.is_timestamp],
      ["set", query.is_set],
      ["ref", query.is_ref],
      ["boolean", query.is_boolean],
      ["double", query.is_double],
      ["integer", query.is_integer],
      ["database", query.is_database],
      ["index", query.is_index],
      ["collection", query.is_collection],
      ["token", query.is_token],
      ["function", query.is_function],
      ["collection", query.is_collection],
      ["role", query.is_role],
      ["credentials", query.is_credentials],
      ["key", query.is_key],
    ]
    expected = {
      "array":       1,
      "boolean":     1,
      "bytes":       1,
      "collection":  3,
      "credentials": 1,
      "database":    1,
      "date":        2,
      "double":      1,
      "function":    2,
      "integer":     1,
      "index":       1,
      "key":         1,
      "null":        1,
      "number":      2,
      "object":      5,
      "ref":         11,
      "role":        1,
      "set":         3,
      "string":      1,
      "timestamp":   3,
      "token":       1,
    }

    q = []
    for p in pairs:
      d = dict()
      d[p[0]] = query.count(query.filter_(query.lambda_("v", p[1](query.var("v"))), query.var("vals")))
      q.append(d)

    actual = self._q(query.let({"vals": values}, query.merge({}, q)))
    self.assertEqual(actual, expected)