Esempio n. 1
0
 def test_wildcard_search_cleaned_up(self):
     from adhocracy.lib.search.query import add_wildcard_query
     search = SolrSearch(interface)
     query = add_wildcard_query(search, 'text', 'one** two*')
     self.assertEqual(
         query.params(),
         [('q', '(text:one OR text:one*) AND (text:two OR text:two*)')])
Esempio n. 2
0
    def test_wildcard_search_ignore_none(self):
        from adhocracy.lib.search.query import add_wildcard_query
        search = SolrSearch(interface)

        query = add_wildcard_query(search, 'text', None)
        self.assertEqual(
            query.params(),
            [('q', '*:*')])
Esempio n. 3
0
    def test_wildcard_search_added_to_search(self):
        from adhocracy.lib.search.query import add_wildcard_query
        search = SolrSearch(interface).query(text='passedin')

        query = add_wildcard_query(search, 'text', 'wild')
        self.assertEqual(
            query.params(),
            [('q', 'text:passedin AND (text:wild OR text:wild*)')])
def check_bad_query_data(kwargs):
    solr_search = SolrSearch(interface)
    try:
        solr_search.query(**kwargs).params()
    except SolrError:
        pass
    else:
        assert False
def check_multiple_call_data(arg_kw_list, query_output, filter_output):
    solr_search = SolrSearch(interface)
    q = solr_search.query()
    f = solr_search.query()
    for args, kwargs in arg_kw_list:
        q = q.query(*args, **kwargs)
        f = f.filter(*args, **kwargs)
    qp = q.params()
    fp = f.params()
    check_equal_with_debug(qp, query_output)
    check_equal_with_debug(fp, filter_output)
def test_complex_boolean_queries():
    solr_search = SolrSearch(interface)
    for query, output in complex_boolean_queries:
        check_complex_boolean_query(solr_search, query, output)
def check_query_data(method, args, kwargs, output):
    solr_search = SolrSearch(interface)
    p = getattr(solr_search, method)(*args, **kwargs).params()
    check_equal_with_debug(p, output)
Esempio n. 8
0
    def get(rows, start, **kwargs):
        """
    Input
      id
      start_date
      end_date
      phrase
      rows - the number of records to get from solr
      start - where to start getting records in solr (offset)
      frame
      order
      states - list of 2 letter state abbreviations

    Output
      List of output
    """

        solr_query = Speech.build_sunburnt_query(**kwargs).paginate(
            rows=rows, start=start)

        if kwargs.get('order') and kwargs.get('order') not in [
                "frame", "tfidf", "idf", "termFreq"
        ]:
            solr_query = solr_query.sort_by(kwargs.get('order'))

        # solr_query = solr_query.terms('speaking').terms(tf=True)
        params = solr_query.params()
        dict_params = dict(params)

        dict_params['norm'] = 'norm(speaking)'
        dict_params['tf'] = 'tf(speaking, %s)' % kwargs.get('phrase')
        dict_params['idf'] = 'idf(speaking, %s)' % kwargs.get('phrase')
        dict_params['tfidf'] = 'mul($tf, $idf)'
        dict_params['termFreq'] = 'termfreq(speaking, %s)' % kwargs.get(
            'phrase')
        dict_params['fl'] = "*, score, $norm, $termFreq, $tf, $idf, $tfidf"
        dict_params['q'] += " AND {!frange l=8}$tfidf"
        if kwargs.get('order') == None or kwargs.get('order') == "tfidf":
            dict_params["sort"] = "$tfidf desc"

        if kwargs.get('frame') and kwargs.get(
                'order') == "frame" and kwargs.get('analysis_id'):

            from app.models.analysis import Analysis

            frame_words = Frame.get(Frame.id == kwargs['frame']).word_string
            # analysis_obj = Analysis.get(Analysis.id == kwargs['analysis_id'])
            # key = "%s - %s" % (kwargs.get('start_date'), kwargs.get('end_date'))
            # vocabulary_proba = json.loads(analysis_obj.speech_windows)[key]
            # frame_vocabulary_proba =  { word: (abs(exp(vocabulary_proba.get(word)[0]) - exp(vocabulary_proba.get(word)[1]))) if vocabulary_proba.get(word) != None else 0 for word in frame_words.split() }
            # dict_params['frameFreq'] = "mul(sum(" + ", ".join(map(lambda word: "mul(termfreq(speaking,\"%s\"), %f)" % (word, frame_vocabulary_proba[word]), frame_words.split())) + "), $norm)"

            dict_params['frameFreq'] = "mul(sum(" + ", ".join(
                map(
                    lambda word: "mul(termfreq(speaking,\"%s\"), %f)" %
                    (word, 1), frame_words.split())) + "), $norm)"

            if dict_params.get('fl'):
                dict_params['fl'] += ", $frameFreq"
            else:
                dict_params['fl'] = '$frameFreq'

            dict_params["sort"] = "$frameFreq desc"

        params = zip(dict_params.keys(), dict_params.values())

        # print params

        result = si.schema.parse_response(si.conn.select(params))
        q = SolrSearch(si)
        response = q.transform_result(result, q.result_constructor)

        speeches = response.result.docs
        highlighting = response.highlighting
        term_vectors = response.term_vectors

        current_count = response.result.numFound
        current_start = response.result.start

        # TODO: improve this
        if kwargs.get('frame') and kwargs.get('highlight'):
            frame = Frame.get(Frame.id == kwargs['frame'])
            # pdb.set_trace()
            for speech in speeches:
                speech = Speech.highlight_speech(speech, frame)

        speeches_dict = {
            'count': current_count,
            'start': current_start,
            'speeches': speeches,
            'term_vectors': term_vectors,
            'highlighting': highlighting
        }

        return speeches_dict