Ejemplo n.º 1
0
    def test_add_document(self, session, index, job):
        session.add(job)
        session.commit()

        index = Index()
        index.add_document(job.to_document())

        hits = index.search(job.title)
        assert len(hits) == 1
        assert int(hits[0]['id']) == job.id
Ejemplo n.º 2
0
    def test_add_document(self, session, index, job):
        session.add(job)
        session.commit()

        index = Index()
        index.add_document(job.to_document())

        hits = index.search(job.title)
        assert len(hits) == 1
        assert int(hits[0]['id']) == job.id
Ejemplo n.º 3
0
def main(query, session):
    name = settings.SEARCH_INDEX_NAME
    directory = settings.SEARCH_INDEX_DIRECTORY

    if not IndexManager.exists(name, directory):
        die('Search index does not exist!')

    if isinstance(query, str):
        query = unicode(query, 'utf-8')

    index = Index()
    for result in index.search(query):
        job = session.query(Job).get(result['id'])
        print job
Ejemplo n.º 4
0
def index(app):
    # By creating the index we are actually clearing it, so any tests that
    # require a clean index, should require this feature.
    name = app.config['SEARCH_INDEX_NAME']
    directory = app.config['SEARCH_INDEX_DIRECTORY']
    IndexManager.create(Schema, name, directory)
    return Index(name, directory)
Ejemplo n.º 5
0
    def search_jobs(self, query, sort=None, limit=None):
        index = Index()
        hits = []

        kwargs = dict()
        if sort:
            kwargs['sort'] = sort
        if limit:
            kwargs['limit'] = limit

        for hit in index.search(query, **kwargs):
            job = db.session.query(Job).get(hit['id'])
            # Make sure that we don't accidentally return an unpublished job
            # that happened to be in the search index.
            if job and job.published:
                hits.append(job)

        return hits
Ejemplo n.º 6
0
    def search_jobs(self, query, sort=None, limit=None):
        index = Index()
        hits = []

        kwargs = dict()
        if sort:
            kwargs['sort'] = sort
        if limit:
            kwargs['limit'] = limit

        for hit in index.search(query, **kwargs):
            job = db.session.query(Job).get(hit['id'])
            # Make sure that we don't accidentally return an unpublished job
            # that happened to be in the search index.
            if job and job.published:
                hits.append(job)

        return hits
Ejemplo n.º 7
0
    def test_search_limit(self, session, index, job):
        doc = job.to_document()
        timestamp = doc['created']

        bulk = []
        for i in range(15):
            doc = copy.deepcopy(doc)
            doc['id'] = unicode(i)
            doc['created'] = timestamp - timedelta(days=i)
            bulk.append(doc)

        index = Index()
        index.add_document_bulk(bulk)

        # Search with ascending sort, should return the ids in reverse order.
        hits = index.search(job.title, sort=('created', 'asc'))
        assert [int(hit['id']) for hit in hits] == range(15)[::-1]

        # Search with descending sort.
        hits = index.search(job.title, sort=('created', 'desc'))
        assert [int(hit['id']) for hit in hits] == range(15)
Ejemplo n.º 8
0
def main(should_create, index_all, session):
    name = settings.SEARCH_INDEX_NAME
    directory = settings.SEARCH_INDEX_DIRECTORY

    if should_create:
        print blue("You've asked to (re)create index '{}'.".format(name))
        IndexManager.create(Schema, name, directory)

    if not IndexManager.exists(name, directory):
        die('Search index does not exist!')

    index = Index()

    start = time.time()

    kwargs = {} if index_all else {'published': True}
    jobs = session.query(Job).filter_by(**kwargs).all()

    index.add_document_bulk([job.to_document() for job in jobs])
    duration = time.time() - start

    print green("{0} documents added okay in {1:.2f} ms.".format(len(jobs), duration))
Ejemplo n.º 9
0
    def test_delete_document(self, session, index, job):
        session.add(job)
        session.commit()

        doc = job.to_document()

        index = Index()
        index.add_document(doc)

        hits = index.search(job.title)
        assert len(hits) == 1

        index.delete_document(doc['id'])

        hits = index.search(job.title)
        assert len(hits) == 0
Ejemplo n.º 10
0
def main(should_create, index_all, session):
    name = settings.SEARCH_INDEX_NAME
    directory = settings.SEARCH_INDEX_DIRECTORY

    if should_create:
        print blue("You've asked to (re)create index '{}'.".format(name))
        IndexManager.create(Schema, name, directory)

    if not IndexManager.exists(name, directory):
        die('Search index does not exist!')

    index = Index()

    start = time.time()

    kwargs = {} if index_all else {'published': True}
    jobs = session.query(Job).filter_by(**kwargs).all()

    index.add_document_bulk([job.to_document() for job in jobs])
    duration = time.time() - start

    print green("{0} documents added okay in {1:.2f} ms.".format(
        len(jobs), duration))
Ejemplo n.º 11
0
    def test_update_document(self, session, index, job):
        session.add(job)
        session.commit()

        doc = job.to_document()

        index = Index()
        index.add_document(doc)

        doc['job_type'] = u'updated'
        index.update_document(doc)

        hits = index.search(u'updated')
        assert len(hits) == 1
        assert int(hits[0]['id']) == job.id
Ejemplo n.º 12
0
    def test_delete_document(self, session, index, job):
        session.add(job)
        session.commit()

        doc = job.to_document()

        index = Index()
        index.add_document(doc)

        hits = index.search(job.title)
        assert len(hits) == 1

        index.delete_document(doc['id'])

        hits = index.search(job.title)
        assert len(hits) == 0
Ejemplo n.º 13
0
    def test_update_document(self, session, index, job):
        session.add(job)
        session.commit()

        doc = job.to_document()

        index = Index()
        index.add_document(doc)

        doc['job_type'] = u'updated'
        index.update_document(doc)

        hits = index.search(u'updated')
        assert len(hits) == 1
        assert int(hits[0]['id']) == job.id
Ejemplo n.º 14
0
    def test_search_limit(self, session, index, job):
        doc = job.to_document()
        timestamp = doc['created']

        bulk = []
        for i in range(15):
            doc = copy.deepcopy(doc)
            doc['id'] = unicode(i)
            doc['created'] = timestamp - timedelta(days=i)
            bulk.append(doc)

        index = Index()
        index.add_document_bulk(bulk)

        # Search with ascending sort, should return the ids in reverse order.
        hits = index.search(job.title, sort=('created', 'asc'))
        assert [int(hit['id']) for hit in hits] == range(15)[::-1]

        # Search with descending sort.
        hits = index.search(job.title, sort=('created', 'desc'))
        assert [int(hit['id']) for hit in hits] == range(15)
Ejemplo n.º 15
0
def index(job):
    index = Index()
    document = job.to_document()
    rv = index.add_document(document)
    logger.info(u"Job ({}) added to index.".format(job.id))
    return rv
Ejemplo n.º 16
0
def deindex(job):
    index = Index()
    document = job.to_document()
    rv = index.delete_document(document['id'])
    logger.info(u"Job ({}) deleted from index.".format(job.id))
    return rv
Ejemplo n.º 17
0
def index(job):
    index = Index()
    document = job.to_document()
    rv = index.add_document(document)
    logger.info(u"Job ({}) added to index.".format(job.id))
    return rv
Ejemplo n.º 18
0
def deindex(job):
    index = Index()
    document = job.to_document()
    rv = index.delete_document(document['id'])
    logger.info(u"Job ({}) deleted from index.".format(job.id))
    return rv
Ejemplo n.º 19
0
 def search(self, query):
     index = Index()
     return index.search(query)
Ejemplo n.º 20
0
 def search(self, query):
     index = Index()
     return index.search(query)