Пример #1
0
def render_graphs(query, minimal=False):
    try:
        if "query_parse" in errors:
            del errors["query_parse"]
        query = parse_query(query)
    except Exception, e:
        errors["query_parse"] = ("Couldn't parse query", e)
Пример #2
0
def render_graphs(query, minimal=False):
    try:
        if "query_parse" in errors:
            del errors["query_parse"]
        query = parse_query(query)
    except Exception, e:
        errors["query_parse"] = ("Couldn't parse query", e)
Пример #3
0
def items(query=None, user=LOGGED_IN_USER, username=USER):
    """
    Given an LQL query and a library identity, return the items that match.
    """
    library = Library.objects.get()
    items = Item.objects.all()
    query_json = '[]'

    if query:
        from query import execute_query, parse_query
        parsed = parse_query(query)
        items = execute_query(items, parsed)
        # this json representation of the query that was just used to filter
        # the items will be passed back to the template as a json string
        # so the front end can render the query widget. A string representation
        # is not passed because there is no way to parse LQL in javascript (yet).
        query_json = jsonify(parsed)

    items = items.all()
    all_keys = []
    return {
        'parsed_query_json': query_json,
        'library_items': items,
        'items_count': len(items),
        'keys': all_keys,
    }
Пример #4
0
    def query(self, **kwargs):
        """This method is used to first say which documents should be
        affected and later what to do with these documents. They can be
        removed or updated after they have been selected.

        c = Collection('test')
        c.query(name='jack').delete()
        c.query(name='jack').update(set__name='john')
        """

        return Query(self, parse_query(kwargs))
Пример #5
0
def search_subseries(patches, query_str):
    sub_series = []

    tokens = query.tokenize_query(query_str)
    q, _ = query.parse_query(tokens)

    for series in patches:
        if not query.eval_query(series, q):
            continue

        sub_series.append(series)

    return sub_series
Пример #6
0
def search_subseries(patches, query_str):
    sub_series = []

    tokens = query.tokenize_query(query_str)
    q, _ = query.parse_query(tokens)
    
    for series in patches:
        if not query.eval_query(series, q):
            continue
    
        sub_series.append(series)

    return sub_series
Пример #7
0
    def find_one(self, **kwargs):
        """Find one single document. Mainly this is used to retrieve
        documents by unique key.
        """

        if '_id' in kwargs:
            args = pymongo.objectid.ObjectId(str(kwargs['_id']))
        else:
            args = parse_query(kwargs)

        doc = self.collection.find_one(args)

        if doc is None:
            return None

        return self.model_class(doc)
Пример #8
0
        cards = conn.execute(
            "SELECT *, 0 FROM cards_v1 WHERE chara_id = ? ORDER BY rarity DESC, sort_key DESC",
            (restrict_char_id, ))

    cards = list(starmap(filterable_card_data_t, cards.fetchall()))

    conn.close()

    cards = list(filter(lambda x: all(f(x) for f in query.filters), cards))

    if query.ordinal:
        try:
            return [
                sorted(cards,
                       key=lambda x: (20 - x.rarity, x.sort_key),
                       reverse=1)[-query.ordinal]
            ]
        except IndexError:
            raise InvalidQueryError(
                "There aren't that many cards of her, nya.")

    return cards


if __name__ == '__main__':
    #build_ark()
    from query import parse_query
    print(exec_query(parse_query("token kaede")))
    print(exec_query(parse_query("groove kaede")))
Пример #9
0
    parser = argparse.ArgumentParser()
    parser.add_argument('--source',
                       default='source.csv',

                       help='input source data file name')
    parser.add_argument('--query',
                        default='query.txt',
                        help='query file name')
    parser.add_argument('--output',
                        default='output.txt',
                        help='output file name')
    args = parser.parse_args()
    
    titles = load_titles(args.source)
    querys = load_querys(args.query)
     
    index = {}
    # build index by ngram & extract english word by regular expression
    # didn't use word segmentation because some word in query like 蔡英文 cannot segmented by jieba
    # and segmentation also consume time , I just implement the 100% accuracy baseline model first 
    build_index(titles,index)

    print('output result')
    #output results
    
    with open(args.output,'w') as f:
        for i,query in enumerate(querys):
            res = parse_query(query,index)
            line = [str(n) for n in res]
            line = ','.join(line)+'\n'
            f.write(line)