コード例 #1
0
    def audit(self, page=1, format=None):
        "Audit log"
        total_found = 0
        search_time = 0
        num_items = session.get('auditlog_num_items', 50)
        q = request.GET.get('q', None)
        kwds = {}
        if q:
            conn = SphinxClient()
            conn.SetMatchMode(SPH_MATCH_EXTENDED2)
            if page == 1:
                conn.SetLimits(0, num_items, 500)
            else:
                page = int(page)
                offset = (page - 1) * num_items
                conn.SetLimits(offset, num_items, 500)
            q = clean_sphinx_q(q)
            results = conn.Query(q, 'auditlog, auditlog_rt')
            q = restore_sphinx_q(q)
            if results and results['matches']:
                ids = [hit['id'] for hit in results['matches']]
                query = Session.query(AuditLog)\
                        .filter(AuditLog.id.in_(ids))\
                        .order_by(desc('timestamp'))\
                        .all()
                total_found = results['total_found']
                search_time = results['time']
                logcount = total_found
                kwds['presliced_list'] = True
            else:
                query = []
                lcount = 0
                logcount = 0
        else:
            query = Session.query(AuditLog)\
                    .order_by(desc('timestamp'))
            lcount = Session.query(AuditLog)\
                    .order_by(desc('timestamp'))
        if not 'logcount' in locals():
            logcount = lcount.count()
        items = paginate.Page(query,
                              page=int(page),
                              items_per_page=num_items,
                              item_count=logcount,
                              **kwds)
        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            jdict = convert_settings_to_json(items)
            if q:
                encoded = json.loads(jdict)
                encoded['q'] = q
                jdict = json.dumps(encoded)
            return jdict

        c.page = items
        c.q = q
        c.total_found = total_found
        c.search_time = search_time
        return render('/status/audit.html')
コード例 #2
0
ファイル: status.py プロジェクト: baruwaproject/baruwa2
    def audit(self, page=1, format=None):
        "Audit log"
        total_found = 0
        search_time = 0
        num_items = session.get('auditlog_num_items', 50)
        qry = request.GET.get('q', None)
        kwds = {}
        if qry:
            conn = SphinxClient()
            sphinxopts = extract_sphinx_opts(config['sphinx.url'])
            conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
            conn.SetMatchMode(SPH_MATCH_EXTENDED2)
            if page == 1:
                conn.SetLimits(0, num_items, 500)
            else:
                page = int(page)
                offset = (page - 1) * num_items
                conn.SetLimits(offset, num_items, 500)
            qry = clean_sphinx_q(qry)
            results = conn.Query(qry, 'auditlog, auditlog_rt')
            qry = restore_sphinx_q(qry)
            if results and results['matches']:
                ids = [hit['id'] for hit in results['matches']]
                query = Session.query(AuditLog)\
                        .filter(AuditLog.id.in_(ids))\
                        .order_by(desc('timestamp'))\
                        .all()
                total_found = results['total_found']
                search_time = results['time']
                logcount = total_found
                kwds['presliced_list'] = True
            else:
                query = []
                lcount = 0
                logcount = 0
        else:
            query = Session.query(AuditLog)\
                    .order_by(desc('timestamp'))
            lcount = Session.query(AuditLog)\
                    .order_by(desc('timestamp'))
        if 'logcount' not in locals():
            logcount = lcount.count()
        items = paginate.Page(query, page=int(page),
                            items_per_page=num_items,
                            item_count=logcount, **kwds)
        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            jdict = convert_settings_to_json(items)
            if qry:
                encoded = json.loads(jdict)
                encoded['q'] = qry
                jdict = json.dumps(encoded)
            return jdict

        c.page = items
        c.q = qry
        c.total_found = total_found
        c.search_time = search_time
        return self.render('/status/audit.html')
コード例 #3
0
ファイル: accounts.py プロジェクト: aureg/baruwa2
 def search(self, format=None):
     "Search for accounts"
     total_found = 0
     search_time = 0
     num_items = session.get('accounts_num_items', 10)
     q = request.GET.get('q', '')
     d = request.GET.get('d', None)
     kwds = {'presliced_list': True}
     page = int(request.GET.get('p', 1))
     conn = SphinxClient()
     conn.SetMatchMode(SPH_MATCH_EXTENDED2)
     conn.SetFieldWeights(dict(username=50, email=30,
                             firstname=10, lastname=10))
     if page == 1:
         conn.SetLimits(0, num_items, 500)
     else:
         page = int(page)
         offset = (page - 1) * num_items
         conn.SetLimits(offset, num_items, 500)
     if d:
         conn.SetFilter('domains', [int(d),])
     if c.user.is_domain_admin:
         #crcs = get_dom_crcs(Session, c.user)
         domains = Session.query(Domain.id).join(dom_owns,
                     (oas, dom_owns.c.organization_id ==
                     oas.c.organization_id))\
                     .filter(oas.c.user_id == c.user.id)
         conn.SetFilter('domains', [domain[0] for domain in domains])
     q = clean_sphinx_q(q)
     results = conn.Query(q, 'accounts, accounts_rt')
     q = restore_sphinx_q(q)
     if results and results['matches']:
         ids = [hit['id'] for hit in results['matches']]
         total_found = results['total_found']
         search_time = results['time']
         users = Session.query(User.id,
                                 User.username,
                                 User.firstname,
                                 User.lastname,
                                 User.email,
                                 User.active,
                                 User.local,
                                 User.account_type)\
                             .filter(User.id.in_(ids))\
                             .order_by(User.id)\
                             .all()
         usercount = total_found
     else:
         users = []
         usercount = 0
     c.q = q
     c.d = d
     c.total_found = total_found
     c.search_time = search_time
     c.page = paginate.Page(users, page=int(page),
                             items_per_page=num_items,
                             item_count=usercount, **kwds)
     return render('/accounts/searchresults.html')
コード例 #4
0
    def index(self, page=1, format=None):
        "index page"
        total_found = 0
        search_time = 0
        num_items = session.get('organizations_num_items', 10)
        qry = request.GET.get('q', None)
        kwds = {}
        if qry:
            kwds['presliced_list'] = True
            conn = SphinxClient()
            sphinxopts = extract_sphinx_opts(config['sphinx.url'])
            conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
            conn.SetMatchMode(SPH_MATCH_EXTENDED2)
            if page == 1:
                conn.SetLimits(0, num_items, 500)
            else:
                page = int(page)
                offset = (page - 1) * num_items
                conn.SetLimits(offset, num_items, 500)
            qry = clean_sphinx_q(qry)
            try:
                results = conn.Query(qry, 'organizations, organizations_rt')
            except (socket.timeout, struct.error):
                redirect(request.path_qs)
            qry = restore_sphinx_q(qry)
            if results and results['matches']:
                ids = [hit['id'] for hit in results['matches']]
                orgs = Session.query(Group)\
                        .filter(Group.id.in_(ids))\
                        .all()
                total_found = results['total_found']
                search_time = results['time']
                orgcount = total_found
            else:
                orgs = []
                ocount = 0
                orgcount = 0
        else:
            orgs = Session.query(Group)
            ocount = Session.query(Group.id)
        if 'orgcount' not in locals():
            orgcount = ocount.count()
        items = paginate.Page(orgs,
                              page=int(page),
                              items_per_page=num_items,
                              item_count=orgcount,
                              **kwds)
        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = convert_org_to_json(items)
            return data

        c.page = items
        c.q = qry
        c.total_found = total_found
        c.search_time = search_time
        return self.render('/organizations/index.html')
コード例 #5
0
    def index(self, page=1, format=None):
        "index page"
        total_found = 0
        search_time = 0
        num_items = session.get('organizations_num_items', 10)
        qry = request.GET.get('q', None)
        kwds = {}
        if qry:
            kwds['presliced_list'] = True
            conn = SphinxClient()
            sphinxopts = extract_sphinx_opts(config['sphinx.url'])
            conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
            conn.SetMatchMode(SPH_MATCH_EXTENDED2)
            if page == 1:
                conn.SetLimits(0, num_items, 500)
            else:
                page = int(page)
                offset = (page - 1) * num_items
                conn.SetLimits(offset, num_items, 500)
            qry = clean_sphinx_q(qry)
            try:
                results = conn.Query(qry, 'organizations, organizations_rt')
            except (socket.timeout, struct.error):
                redirect(request.path_qs)
            qry = restore_sphinx_q(qry)
            if results and results['matches']:
                ids = [hit['id'] for hit in results['matches']]
                orgs = Session.query(Group)\
                        .filter(Group.id.in_(ids))\
                        .all()
                total_found = results['total_found']
                search_time = results['time']
                orgcount = total_found
            else:
                orgs = []
                ocount = 0
                orgcount = 0
        else:
            orgs = Session.query(Group)
            ocount = Session.query(Group.id)
        if 'orgcount' not in locals():
            orgcount = ocount.count()
        items = paginate.Page(orgs, page=int(page),
                            items_per_page=num_items,
                            item_count=orgcount,
                            **kwds)
        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = convert_org_to_json(items)
            return data

        c.page = items
        c.q = qry
        c.total_found = total_found
        c.search_time = search_time
        return self.render('/organizations/index.html')
コード例 #6
0
ファイル: status.py プロジェクト: baruwaproject/baruwa2
def export_auditlog(format, query):
    "Export the audit log"
    logger = export_auditlog.get_logger()
    filename = 'auditlog-%s.%s' % (export_auditlog.request.id, format)
    content_type = 'text/csv' if format == 'csv' else 'application/pdf'
    results = dict(id=export_auditlog.request.id,
                    f=None,
                    content_type=content_type,
                    filename=filename,
                    errormsg='')
    try:
        dbquery = Session.query(AuditLog)
        if query:
            conn = SphinxClient()
            sphinxopts = extract_sphinx_opts(config['sphinx.url'])
            conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
            conn.SetMatchMode(SPH_MATCH_EXTENDED2)
            conn.SetLimits(0, 500, 500)
            query = clean_sphinx_q(query)
            qresults = conn.Query(query, 'auditlog, auditlog_rt')
            if qresults and qresults['matches']:
                ids = [hit['id'] for hit in qresults['matches']]
                dbquery = dbquery.filter(AuditLog.id.in_(ids))

        dbquery = dbquery.order_by(desc('timestamp')).all()
        if format == 'pdf':
            PS = ParagraphStyle('auditlogp',
                                    fontName='Helvetica',
                                    fontSize=8,
                                    borderPadding=(2, 2, 2, 2))
            rows = [(Paragraph(item.timestamp.strftime('%Y-%m-%d %H:%M'), PS),
                    Paragraph(wrap_string(item.username, 27), PS),
                    Paragraph(wrap_string(item.info, 33), PS),
                    Paragraph(wrap_string(item.hostname, 27), PS),
                    Paragraph(wrap_string(item.remoteip, 15), PS),
                    Paragraph(CATEGORY_MAP[item.category], PS))
                    for item in dbquery]
            pdf = build_pdf(rows)
            results['f'] = pdf
        elif format == 'csv':
            rows = [item.tojson() for item in dbquery]
            keys = ('timestamp',
                    'username',
                    'info',
                    'hostname',
                    'remoteip',
                    'category')
            results['f'] = build_csv(rows, keys)
        logger.info("Audit Log export complete: %s" % results['filename'])
        return results
    except (DatabaseError), err:
        results['errormsg'] = str(err)
        logger.info("Audit Log export FAILURE: %s" % str(err))
        return results
コード例 #7
0
ファイル: domains.py プロジェクト: l3dlp-sandbox/baruwa2
    def search(self, format=None):
        "Search for domains"
        total_found = 0
        search_time = 0
        num_items = session.get('domains_num_items', 10)
        qry = request.GET.get('q', '')
        org = request.GET.get('o', None)
        page = int(request.GET.get('p', 1))
        # if q:
        kwds = {'presliced_list': True}
        conn = SphinxClient()
        sphinxopts = extract_sphinx_opts(config['sphinx.url'])
        conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
        conn.SetMatchMode(SPH_MATCH_EXTENDED2)
        if page == 1:
            conn.SetLimits(0, num_items, 500)
        else:
            offset = (page - 1) * num_items
            conn.SetLimits(offset, num_items, 500)
        if org:
            conn.SetFilter('orgs', [int(org)])
        if c.user.is_domain_admin:
            crcs = get_dom_crcs(Session, c.user)
            conn.SetFilter('domain_name', crcs)
        qry = clean_sphinx_q(qry)
        try:
            results = conn.Query(qry, 'domains, domains_rt')
        except (socket.timeout, struct.error):
            redirect(request.path_qs)
        qry = restore_sphinx_q(qry)
        if results and results['matches']:
            ids = [hit['id'] for hit in results['matches']]
            domains = Session.query(Domain)\
                    .options(joinedload('organizations'))\
                    .filter(Domain.id.in_(ids))\
                    .all()
            total_found = results['total_found']
            search_time = results['time']
            domaincount = total_found
        else:
            domains = []
            domaincount = 0

        c.page = paginate.Page(domains,
                               page=page,
                               items_per_page=num_items,
                               item_count=domaincount,
                               **kwds)
        c.q = qry
        c.org = org
        c.total_found = total_found
        c.search_time = search_time
        return self.render('/domains/searchresults.html')
コード例 #8
0
    def index(self, page=1, format=None):
        "index page"
        total_found = 0
        search_time = 0
        num_items = session.get('organizations_num_items', 10)
        q = request.GET.get('q', None)
        kwds = {}
        if q:
            kwds['presliced_list'] = True
            conn = SphinxClient()
            conn.SetMatchMode(SPH_MATCH_EXTENDED2)
            if page == 1:
                conn.SetLimits(0, num_items, 500)
            else:
                page = int(page)
                offset = (page - 1) * num_items
                conn.SetLimits(offset, num_items, 500)
            q = clean_sphinx_q(q)
            results = conn.Query(q, 'organizations, organizations_rt')
            q = restore_sphinx_q(q)
            if results and results['matches']:
                ids = [hit['id'] for hit in results['matches']]
                orgs = Session.query(Group)\
                        .filter(Group.id.in_(ids))\
                        .all()
                total_found = results['total_found']
                search_time = results['time']
                orgcount = total_found
            else:
                orgs = []
                ocount = 0
                orgcount = 0
        else:
            orgs = Session.query(Group)
            ocount = Session.query(Group.id)
        if not 'orgcount' in locals():
            orgcount = ocount.count()
        items = paginate.Page(orgs,
                              page=int(page),
                              items_per_page=num_items,
                              item_count=orgcount,
                              **kwds)
        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = convert_org_to_json(items)
            return data

        c.page = items
        c.q = q
        c.total_found = total_found
        c.search_time = search_time
        return render('/organizations/index.html')
コード例 #9
0
ファイル: domains.py プロジェクト: baruwaproject/baruwa2
    def search(self, format=None):
        "Search for domains"
        total_found = 0
        search_time = 0
        num_items = session.get('domains_num_items', 10)
        qry = request.GET.get('q', '')
        org = request.GET.get('o', None)
        page = int(request.GET.get('p', 1))
        # if q:
        kwds = {'presliced_list': True}
        conn = SphinxClient()
        sphinxopts = extract_sphinx_opts(config['sphinx.url'])
        conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
        conn.SetMatchMode(SPH_MATCH_EXTENDED2)
        if page == 1:
            conn.SetLimits(0, num_items, 500)
        else:
            offset = (page - 1) * num_items
            conn.SetLimits(offset, num_items, 500)
        if org:
            conn.SetFilter('orgs', [int(org)])
        if c.user.is_domain_admin:
            crcs = get_dom_crcs(Session, c.user)
            conn.SetFilter('domain_name', crcs)
        qry = clean_sphinx_q(qry)
        try:
            results = conn.Query(qry, 'domains, domains_rt')
        except (socket.timeout, struct.error):
            redirect(request.path_qs)
        qry = restore_sphinx_q(qry)
        if results and results['matches']:
            ids = [hit['id'] for hit in results['matches']]
            domains = Session.query(Domain)\
                    .options(joinedload('organizations'))\
                    .filter(Domain.id.in_(ids))\
                    .all()
            total_found = results['total_found']
            search_time = results['time']
            domaincount = total_found
        else:
            domains = []
            domaincount = 0

        c.page = paginate.Page(domains, page=page,
                                items_per_page=num_items,
                                item_count=domaincount,
                                **kwds)
        c.q = qry
        c.org = org
        c.total_found = total_found
        c.search_time = search_time
        return self.render('/domains/searchresults.html')
コード例 #10
0
ファイル: organizations.py プロジェクト: aureg/baruwa2
    def index(self, page=1, format=None):
        "index page"
        total_found = 0
        search_time = 0
        num_items = session.get('organizations_num_items', 10)
        q = request.GET.get('q', None)
        kwds = {}
        if q:
            kwds['presliced_list'] = True
            conn = SphinxClient()
            conn.SetMatchMode(SPH_MATCH_EXTENDED2)
            if page == 1:
                conn.SetLimits(0, num_items, 500)
            else:
                page = int(page)
                offset = (page - 1) * num_items
                conn.SetLimits(offset, num_items, 500)
            q = clean_sphinx_q(q)
            results = conn.Query(q, 'organizations, organizations_rt')
            q = restore_sphinx_q(q)
            if results and results['matches']:
                ids = [hit['id'] for hit in results['matches']]
                orgs = Session.query(Group)\
                        .filter(Group.id.in_(ids))\
                        .all()
                total_found = results['total_found']
                search_time = results['time']
                orgcount = total_found
            else:
                orgs = []
                ocount = 0
                orgcount = 0
        else:
            orgs = Session.query(Group)
            ocount = Session.query(Group.id)
        if not 'orgcount' in locals():
            orgcount = ocount.count()
        items = paginate.Page(orgs, page=int(page),
                            items_per_page=num_items,
                            item_count=orgcount,
                            **kwds)
        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = convert_org_to_json(items)
            return data

        c.page = items
        c.q = q
        c.total_found = total_found
        c.search_time = search_time
        return render('/organizations/index.html')
コード例 #11
0
ファイル: status.py プロジェクト: l3dlp-sandbox/baruwa2
def export_auditlog(format, query):
    "Export the audit log"
    logger = export_auditlog.get_logger()
    filename = 'auditlog-%s.%s' % (export_auditlog.request.id, format)
    content_type = 'text/csv' if format == 'csv' else 'application/pdf'
    results = dict(id=export_auditlog.request.id,
                   f=None,
                   content_type=content_type,
                   filename=filename,
                   errormsg='')
    try:
        dbquery = Session.query(AuditLog)
        if query:
            conn = SphinxClient()
            sphinxopts = extract_sphinx_opts(config['sphinx.url'])
            conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
            conn.SetMatchMode(SPH_MATCH_EXTENDED2)
            conn.SetLimits(0, 500, 500)
            query = clean_sphinx_q(query)
            qresults = conn.Query(query, 'auditlog, auditlog_rt')
            if qresults and qresults['matches']:
                ids = [hit['id'] for hit in qresults['matches']]
                dbquery = dbquery.filter(AuditLog.id.in_(ids))

        dbquery = dbquery.order_by(desc('timestamp')).all()
        if format == 'pdf':
            PS = ParagraphStyle('auditlogp',
                                fontName='Helvetica',
                                fontSize=8,
                                borderPadding=(2, 2, 2, 2))
            rows = [(Paragraph(item.timestamp.strftime('%Y-%m-%d %H:%M'), PS),
                     Paragraph(wrap_string(item.username, 27),
                               PS), Paragraph(wrap_string(item.info, 33), PS),
                     Paragraph(wrap_string(item.hostname, 27), PS),
                     Paragraph(wrap_string(item.remoteip, 15),
                               PS), Paragraph(CATEGORY_MAP[item.category], PS))
                    for item in dbquery]
            pdf = build_pdf(rows)
            results['f'] = pdf
        elif format == 'csv':
            rows = [item.tojson() for item in dbquery]
            keys = ('timestamp', 'username', 'info', 'hostname', 'remoteip',
                    'category')
            results['f'] = build_csv(rows, keys)
        logger.info("Audit Log export complete: %s" % results['filename'])
        return results
    except (DatabaseError), err:
        results['errormsg'] = str(err)
        logger.info("Audit Log export FAILURE: %s" % str(err))
        return results
コード例 #12
0
ファイル: domains.py プロジェクト: aureg/baruwa2
    def search(self, format=None):
        "Search for domains"
        total_found = 0
        search_time = 0
        num_items = session.get("domains_num_items", 10)
        q = request.GET.get("q", "")
        org = request.GET.get("o", None)
        page = int(request.GET.get("p", 1))
        # if q:
        kwds = {"presliced_list": True}
        conn = SphinxClient()
        conn.SetMatchMode(SPH_MATCH_EXTENDED2)
        if page == 1:
            conn.SetLimits(0, num_items, 500)
        else:
            offset = (page - 1) * num_items
            conn.SetLimits(offset, num_items, 500)
        if org:
            conn.SetFilter("orgs", [int(org)])
        if c.user.is_domain_admin:
            crcs = get_dom_crcs(Session, c.user)
            conn.SetFilter("domain_name", crcs)
        q = clean_sphinx_q(q)
        results = conn.Query(q, "domains, domains_rt")
        q = restore_sphinx_q(q)
        if results and results["matches"]:
            ids = [hit["id"] for hit in results["matches"]]
            domains = Session.query(Domain).options(joinedload("organizations")).filter(Domain.id.in_(ids)).all()
            total_found = results["total_found"]
            search_time = results["time"]
            domaincount = total_found
        else:
            domains = []
            domaincount = 0

        c.page = paginate.Page(domains, page=page, items_per_page=num_items, item_count=domaincount, **kwds)
        c.q = q
        c.org = org
        c.total_found = total_found
        c.search_time = search_time
        return render("/domains/searchresults.html")
コード例 #13
0
 def search(self, format=None):
     "Search for accounts"
     total_found = 0
     search_time = 0
     num_items = session.get('accounts_num_items', 10)
     qry = request.GET.get('q', '')
     doms = request.GET.get('d', None)
     kwds = {'presliced_list': True}
     page = int(request.GET.get('p', 1))
     conn = SphinxClient()
     sphinxopts = extract_sphinx_opts(config['sphinx.url'])
     conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
     conn.SetMatchMode(SPH_MATCH_EXTENDED2)
     conn.SetFieldWeights(
         dict(username=50, email=30, firstname=10, lastname=10))
     if page == 1:
         conn.SetLimits(0, num_items, 500)
     else:
         page = int(page)
         offset = (page - 1) * num_items
         conn.SetLimits(offset, num_items, 500)
     if doms:
         conn.SetFilter('domains', [
             int(doms),
         ])
     if c.user.is_domain_admin:
         # crcs = get_dom_crcs(Session, c.user)
         domains = Session.query(Domain.id).join(dom_owns,
                     (oas, dom_owns.c.organization_id ==
                     oas.c.organization_id))\
                     .filter(oas.c.user_id == c.user.id)
         conn.SetFilter('domains', [domain[0] for domain in domains])
     qry = clean_sphinx_q(qry)
     try:
         results = conn.Query(qry, 'accounts, accounts_rt')
     except (socket.timeout, struct.error):
         redirect(request.path_qs)
     qry = restore_sphinx_q(qry)
     if results and results['matches']:
         ids = [hit['id'] for hit in results['matches']]
         total_found = results['total_found']
         search_time = results['time']
         users = Session.query(User.id,
                                 User.username,
                                 User.firstname,
                                 User.lastname,
                                 User.email,
                                 User.active,
                                 User.local,
                                 User.account_type)\
                             .filter(User.id.in_(ids))\
                             .order_by(User.id)\
                             .all()
         usercount = total_found
     else:
         users = []
         usercount = 0
     c.q = qry
     c.d = doms
     c.total_found = total_found
     c.search_time = search_time
     c.page = paginate.Page(users,
                            page=int(page),
                            items_per_page=num_items,
                            item_count=usercount,
                            **kwds)
     return self.render('/accounts/searchresults.html')
コード例 #14
0
ファイル: messages.py プロジェクト: TetraAsh/baruwa2
    def search(self, format=None):
        "Search for messages"
        q = request.GET.get('q', None)
        if q is None:
            redirect(url(controller='messages', action='listing'))
        index = 'messages, messagesdelta, messages_rt'
        action = request.GET.get('a', 'listing')
        if not action in ['listing', 'quarantine', 'archive']:
            action = 'listing'
        if action == 'archive':
            index = 'archive archivedelta'
        try:
            page = int(request.GET.get('page', 1))
        except ValueError:
            page = 1
        num_items = session.get('msgs_search_num_results', 50)
        conn = SphinxClient()
        conn.SetMatchMode(SPH_MATCH_EXTENDED2)
        #conn.SetSortMode(SPH_SORT_EXTENDED, "timestamp DESC")
        if action == 'quarantine':
            conn.SetFilter('isquarantined', [True,])
        if page == 1:
            conn.SetLimits(0, num_items, 500)
        else:
            offset = (page - 1) * num_items
            conn.SetLimits(offset, num_items, 500)
        if not c.user.is_superadmin:
            filter_sphinx(Session, c.user, conn)
        else:
            conn.SetSelect('timestamp')
        q = clean_sphinx_q(q)
        results = conn.Query(q, index)
        q = restore_sphinx_q(q)
        if results and results['matches']:
            #import pprint
            #pprint.pprint(results)
            ids = [hit['id'] for hit in results['matches']]
            filters = session.get('filter_by', None)
            if action == 'archive':
                messages = self._get_archived().filter(
                            Archive.id.in_(ids))
                query = UserFilter(Session, c.user, messages, True)
                messages = query.filter()
                if filters:
                    dynq = DynaQuery(Message, messages, filters)
                    messages = dynq.generate()
            else:
                messages = self._get_messages().filter(
                            Message.id.in_(ids))
                query = UserFilter(Session, c.user, messages)
                messages = query.filter()
                if filters:
                    dynq = DynaQuery(Message, messages, filters)
                    messages = dynq.generate()
            total_found = results['total']
            search_time = results['time']
            messages = messages.order_by(desc('timestamp'))
        else:
            print '=' * 100
            print conn.GetLastError()
            messages = []
            results = dict(matches=[], total=0)
            total_found = 0
            search_time = 0

        pages = paginator(dict(page=page, results_per_page=num_items,
                                total=results['total'],
                                items=len(results['matches']),
                                q=q))

        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = dict(action=action,
                        total_found=total_found,
                        search_time=search_time,
                        paginator=pages,
                        items=[jsonify_msg_list(msg) for msg in messages])
            return json.dumps(data)

        c.messages = messages
        c.action = action
        c.total_found = total_found
        c.search_time = search_time
        c.page = pages
        return render('/messages/searchresults.html')
コード例 #15
0
    def search(self, format=None):
        "Search for messages"
        q = request.GET.get('q', None)
        if q is None:
            redirect(url(controller='messages', action='listing'))
        index = 'messages, messages_rt'
        action = request.GET.get('a', 'listing')
        if not action in ['listing', 'quarantine', 'archive']:
            action = 'listing'
        if action == 'archive':
            index = 'archive'
        try:
            page = int(request.GET.get('page', 1))
        except ValueError:
            page = 1
        num_items = session.get('msgs_search_num_results', 50)
        conn = SphinxClient()
        conn.SetMatchMode(SPH_MATCH_EXTENDED2)
        #conn.SetSortMode(SPH_SORT_EXTENDED, "timestamp DESC")
        if action == 'quarantine':
            conn.SetFilter('isquarantined', [True,])
        if page == 1:
            conn.SetLimits(0, num_items, 500)
        else:
            offset = (page - 1) * num_items
            conn.SetLimits(offset, num_items, 500)
        if not c.user.is_superadmin:
            filter_sphinx(Session, c.user, conn)
        else:
            conn.SetSelect('timestamp')
        q = clean_sphinx_q(q)
        results = conn.Query(q, index)
        q = restore_sphinx_q(q)
        if results and results['matches']:
            #import pprint
            #pprint.pprint(results)
            ids = [hit['id'] for hit in results['matches']]
            filters = session.get('filter_by', None)
            if index == 'archive':
                messages = self._get_archived().filter(
                            Archive.id.in_(ids))
                query = UserFilter(Session, c.user, messages, True)
                messages = query.filter()
                if filters:
                    dynq = DynaQuery(Message, messages, filters)
                    messages = dynq.generate()
            else:
                messages = self._get_messages().filter(
                            Message.id.in_(ids))
                query = UserFilter(Session, c.user, messages)
                messages = query.filter()
                if filters:
                    dynq = DynaQuery(Message, messages, filters)
                    messages = dynq.generate()
            total_found = results['total']
            search_time = results['time']
            messages = messages.order_by(desc('timestamp'))
        else:
            print '=' * 100
            print conn.GetLastError()
            messages = []
            results = dict(matches=[], total=0)
            total_found = 0
            search_time = 0

        pages = paginator(dict(page=page, results_per_page=num_items,
                                total=results['total'],
                                items=len(results['matches']),
                                q=q))

        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = dict(action=action,
                        total_found=total_found,
                        search_time=search_time,
                        paginator=pages,
                        items=[jsonify_msg_list(msg) for msg in messages])
            return json.dumps(data)

        c.messages = messages
        c.action = action
        c.total_found = total_found
        c.search_time = search_time
        c.page = pages
        return render('/messages/searchresults.html')
コード例 #16
0
    def search(self, format=None):
        "Search for messages"
        qry = request.GET.get('q', None)
        if qry is None:
            redirect(url(controller='messages', action='listing'))
        index = 'messages, messagesdelta, messages_rt'
        action = request.GET.get('a', 'listing')
        if action not in ['listing', 'quarantine', 'archive']:
            action = 'listing'
        if action == 'archive':
            index = 'archive archivedelta'
        try:
            page = int(request.GET.get('page', 1))
        except ValueError:
            page = 1
        num_items = session.get('msgs_search_num_results', 50)
        conn = SphinxClient()
        sphinxopts = extract_sphinx_opts(config['sphinx.url'])
        conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
        conn.SetMatchMode(SPH_MATCH_EXTENDED2)
        if action == 'quarantine':
            conn.SetFilter('isquarantined', [
                True,
            ])
        if page == 1:
            conn.SetLimits(0, num_items, 500)
        else:
            offset = (page - 1) * num_items
            conn.SetLimits(offset, num_items, 500)
        if not c.user.is_superadmin:
            filter_sphinx(Session, c.user, conn)
        else:
            conn.SetSelect('timestamp')
        qry = clean_sphinx_q(qry)
        try:
            results = conn.Query(qry, index)
        except (socket.timeout, struct.error):
            redirect(request.path_qs)
        qry = restore_sphinx_q(qry)
        if results and results['matches']:
            ids = [hit['id'] for hit in results['matches']]
            filters = session.get('filter_by', None)
            if action == 'archive':
                messages = get_archived().filter(Archive.id.in_(ids))
                query = UserFilter(Session, c.user, messages, True)
                messages = query.filter()
                if filters:
                    dynq = DynaQuery(Message, messages, filters)
                    messages = dynq.generate()
            else:
                messages = get_messages().filter(Message.id.in_(ids))
                query = UserFilter(Session, c.user, messages)
                messages = query.filter()
                if filters:
                    dynq = DynaQuery(Message, messages, filters)
                    messages = dynq.generate()
            total_found = results['total']
            search_time = results['time']
            messages = messages.order_by(desc('timestamp'))
        else:
            messages = []
            results = dict(matches=[], total=0)
            total_found = 0
            search_time = 0

        pages = paginator(
            dict(page=page,
                 results_per_page=num_items,
                 total=results['total'],
                 items=len(results['matches']),
                 q=qry))

        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = dict(action=action,
                        total_found=total_found,
                        search_time=search_time,
                        paginator=pages,
                        items=[jsonify_msg_list(msg) for msg in messages])
            return json.dumps(data)

        c.messages = messages
        c.action = action
        c.total_found = total_found
        c.search_time = search_time
        c.page = pages
        return self.render('/messages/searchresults.html')