def get_search_results(self, req, terms, filters): if not 'discussion' in filters: return # Create context. context = Context.from_request(req) context.realm = 'discussion-core' # Get database access. db = self.env.get_db_cnx() cursor = db.cursor() # Search in topics. query, args = search_to_sql(db, ['author', 'subject', 'body'], terms) columns = ('id', 'forum', 'time', 'subject', 'body', 'author') sql = ("SELECT id, forum, time, subject, body, author " "FROM topic " " WHERE %s" % (query,)) self.log.debug(sql) cursor.execute(sql, args) for row in cursor: row = dict(zip(columns, row)) row['time'] = to_datetime(row['time'], utc) yield (req.href.discussion('topic', row['id']) + '#-1', "Topic #%d: %s" % (row['id'], shorten_line(row['subject'])), row['time'], row['author'], shorten_result(row['body'], [query])) # Search in messages query, args = search_to_sql(db, ['m.author', 'm.body', 't.subject'], terms) columns = ('id', 'forum', 'topic', 'time', 'author', 'body', 'subject') sql = ("SELECT m.id, m.forum, m.topic, m.time, m.author, m.body, " "t.subject " "FROM message m " "LEFT JOIN " "(SELECT subject, id " "FROM topic) t " "ON t.id = m.topic " "WHERE %s" % (query)) self.log.debug(sql) cursor.execute(sql, args) for row in cursor: row = dict(zip(columns, row)) row['time'] = to_datetime(row['time'], utc) yield (req.href.discussion('message', row['id']) + '#%s' % ( row['id']), "Message #%d: %s" % (row['id'], shorten_line( row['subject'])), row['time'], row['author'], shorten_result( row['body'], [query]))
def changeset_added(self, repos, changeset): """Called after a changeset has been added to a repository.""" #Index the commit message so = FullTextSearchObject( self.project, changeset.resource, title=u'[%s]: %s' % (changeset.rev, shorten_line(changeset.message)), oneline=shorten_result(changeset.message), body=changeset.message, author=changeset.author, created=changeset.date, changed=changeset.date, ) self.backend.create(so, quiet=True) self._update_changeset(changeset) # Index the file contents of this revision, a changeset can involve # thousands of files - so submit in batches to avoid exceeding the # available file handles sos = (so for so in self._changes(repos, changeset)) for chunk in grouper(sos, 25): try: self.backend.add(chunk, quiet=True) self.log.debug("Indexed %i repository changes at revision %i", len(chunk), changeset.rev) finally: for so in chunk: if hasattr(so.body, 'close'): so.body.close()
def get_search_results(self, req, query, filters): if 'repo' not in filters: return for filename, reponame in self.search_backend.find_words(query): repo = self.env.get_repository(reponame=reponame, authname=req.authname) node = repo.get_node(filename) if node.kind == Node.DIRECTORY: yield (self.env.href.browser(reponame, filename), "%s (in %s)" % (filename, reponame), change.date, change.author, 'Directory') else: found = 0 mimeview = Mimeview(self.env) content = mimeview.to_unicode(node.get_content().read(), node.get_content_type()) for n, line in enumerate(content.splitlines()): line = line.lower() for q in query: idx = line.find(q) if idx != -1: found = n + 1 break if found: break change = repo.get_changeset(node.rev) yield (self.env.href.browser(reponame, filename ) + (found and '#L%i' % found or '' ), "%s (in %s)" % (filename, reponame), change.date, change.author, shorten_result(content, query))
def ticket_created(self, ticket): ticketsystem = TicketSystem(self.env) resource_name = get_resource_shortname(self.env, ticket.resource) resource_desc = ticketsystem.get_resource_description(ticket.resource, format='summary') # make sure we will index customerrequest name not id cr_id = ticket['customerrequest'] if cr_id: db = self.env.get_read_db() cursor = db.cursor() cursor.execute("SELECT name FROM public.customer_requests " " WHERE id='%s'" % cr_id) row = cursor.fetchone() if row: ticket.values['customerrequest'] = row[0] so = FullTextSearchObject( self.project, ticket.resource, title = u"%(title)s: %(message)s" % {'title': resource_name, 'message': resource_desc}, author = ticket.values.get('reporter'), changed = ticket.values.get('changetime'), created = ticket.values.get('time'), tags = ticket.values.get('keywords'), involved = re.split(r'[;,\s]+', ticket.values.get('cc', '')) or ticket.values.get('reporter'), popularity = 0, #FIXME oneline = shorten_result(ticket.values.get('description', '')), body = u'%r' % (ticket.values,), status = ticket.values.get('status'), comments = [t[4] for t in ticket.get_changelog()], ) self.backend.create(so, quiet=True) self._update_ticket(ticket) self.log.debug("Ticket added for indexing: %s", ticket)
def get_search_results(self, req, terms, filters): if not 'wiki' in filters: return db = self.env.get_db_cnx() sql_query, args = search_to_sql(db, ['w1.name', 'w1.author', 'w1.text'], terms) cursor = db.cursor() cursor.execute("SELECT w1.name,w1.time,w1.author,w1.text " "FROM wiki w1," "(SELECT name,max(version) AS ver " "FROM wiki GROUP BY name) w2 " "WHERE w1.version = w2.ver AND w1.name = w2.name " "AND " + sql_query, args) wiki_realm = Resource('wiki') for name, ts, author, text in cursor: page = wiki_realm(id=name) if 'WIKI_VIEW' in req.perm(page): yield (get_resource_url(self.env, page, req.href), '%s: %s' % (name, shorten_line(text)), datetime.fromtimestamp(ts, utc), author, shorten_result(text, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, wiki_realm, terms): yield result
def get_search_results(self, req, terms, filters): if not 'wiki' in filters: return with self.env.db_query as db: sql_query, args = search_to_sql( db, ['w1.name', 'w1.author', 'w1.text'], terms) wiki_realm = Resource(self.realm) for name, ts, author, text in db( """ SELECT w1.name, w1.time, w1.author, w1.text FROM wiki w1,(SELECT name, max(version) AS ver FROM wiki GROUP BY name) w2 WHERE w1.version = w2.ver AND w1.name = w2.name AND """ + sql_query, args): page = wiki_realm(id=name) if 'WIKI_VIEW' in req.perm(page): yield (get_resource_url(self.env, page, req.href), '%s: %s' % (name, shorten_line(text)), from_utimestamp(ts), author, shorten_result(text, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, wiki_realm, terms): yield result
def get_search_results(self, req, terms, filters): if not 'wiki' in filters: return db = self.env.get_db_cnx() sql_query, args = search_to_sql(db, ['w1.name', 'w1.author', 'w1.text'], terms) cursor = db.cursor() cursor.execute( "SELECT w1.name,w1.time,w1.author,w1.text " "FROM wiki w1," "(SELECT name,max(version) AS ver " "FROM wiki GROUP BY name) w2 " "WHERE w1.version = w2.ver AND w1.name = w2.name " "AND " + sql_query, args) wiki_realm = Resource('wiki') for name, ts, author, text in cursor: page = wiki_realm(id=name) if 'WIKI_VIEW' in req.perm(page): yield (get_resource_url(self.env, page, req.href), '%s: %s' % (name, shorten_line(text)), from_utimestamp(ts), author, shorten_result(text, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, wiki_realm, terms): yield result
def get_list_pages(request, dbp, obj, resource): require_permission(request.req, resource, dbp.env) artifact_id = request.req.args.get('artifact', None) if artifact_id is None: raise Exception("No artifact was specified.") dbp.load_artifact(artifact_id) artifact = dbp.pool.get_item(artifact_id) results = [] for pagename, page_version_id, ref_count in dbp.get_wiki_page_ref_counts(artifact): page = WikiPage(dbp.env, pagename) results.append( {'href': get_resource_url(dbp.env, page.resource, request.req.href), 'title': pagename, 'date': user_time(request.req, format_datetime, page.time), 'author': page.author, 'excerpt': shorten_result(page.text)} ) data = { 'context': Context.from_request(request.req, resource), 'artifact': artifact, 'results': results, } return 'list_pages.html', data, None
def _build_summary(self, text, query): """Build a summary which highlights the search terms.""" if not query: return text[:500] if not text: return '' return shorten_result(text, query.split(), maxlen=500)
def get_search_results(self, req, query, filters): if 'docs' not in filters: return if not req.perm.has_permission('WIKI_VIEW'): return root = self.config.get('docs', 'root', '') repos = self.env.get_repository(req.authname) node = repos.get_node(root, None) if not isinstance(query, list): results = [] for term in re.split('(".*?")|(\'.*?\')|(\s+)', query): if term != None and term.strip() != '': if term[0] == term[-1] == "'" or term[0] == term[-1] == '"': term = term[1:-1] results.append(term) query = results query = [q.lower() for q in query] patterns = [] for q in query: q = re.sub('\s+', '\s+', q) p = re.compile(q, re.IGNORECASE | re.MULTILINE) patterns.append(p) to_unicode = Mimeview(self.env).to_unicode def walk(node): if node.path.endswith('.txt') or node.path.endswith('.rst'): yield node if node.kind == Node.DIRECTORY: for subnode in node.get_entries(): for result in walk(subnode): yield result for node in walk(node): matched = 1 content_length = node.get_content_length() if content_length > (1024 * 1024): continue content = node.get_content() if not content: continue content = to_unicode(content.read(), node.get_content_type()) for p in patterns: if p.search(content) is None: matched = 0 break if matched: change = repos.get_changeset(node.rev) path = node.path[len(root)+1:] yield (self.env.href.docs(path), path, change.date, change.author, shorten_result(content.replace('\n', ' '), query))
def _index_milestone(self, milestone): so = FullTextSearchObject( self.project, milestone.resource, title=u"%s: %s" % (milestone.name, shorten_line(milestone.description)), changed=milestone.completed or milestone.due or datetime.now(utc), involved=(), popularity=0, # FIXME oneline=shorten_result(milestone.description), body=milestone.description, ) self.backend.create(so, quiet=True)
def milestone_created(self, milestone): so = FullTextSearchObject( self.project, milestone.resource, title = milestone.name, changed = milestone.completed or milestone.due or datetime.now(utc), involved = (), popularity = 0, #FIXME oneline = shorten_result(milestone.description), body = milestone.description, ) self.backend.create(so, quiet=True) self.log.debug("Milestone created for indexing: %s", milestone)
def _fill_so(self, changeset, node): so = FullTextSearchObject( self.project, node.resource, title = node.path, oneline = u'[%s]: %s' % (changeset.rev, shorten_result(changeset.message)), comments = [changeset.message], changed = node.get_last_modified(), action = 'CREATE', author = changeset.author, created = changeset.date, body = node.get_content(), ) return so
def get_search_results(self,req,terms,filters): db = self.env.get_db_cnx() sql_query, args = search_to_sql(db, ['owner','proj_name','proj_full_name','description'],terms) cursor=db.cursor() cursor.execute('select owner, proj_name,proj_full_name,description,exam_time from project where stat=1 and'+ sql_query, args) for owner, proj_name, proj_full_name,description,exam_time in cursor: yield ( self.env.config.get('projectsmanager','base_url')+'/%s'%proj_name, proj_full_name, from_utimestamp(exam_time*1000000), owner, shorten_result("Description: "+description,terms,maxlen=100) )
def post_list_search_relatedpages_json(request, dbp, obj, resource): require_permission(request.req, resource, dbp.env) unparsed_spec = request.req.args.get('spec', '') spec_name = json.loads(unparsed_spec) if unparsed_spec else '' attributes = json.loads(request.req.args.get('attributes', '[]')) if attributes is None: raise Exception("No artifacts specified.") artifacts_array = [] for artifact in attributes: try: dbp.load_artifact(artifact) full_artifact = dbp.pool.get_item(artifact) #artifacts_array.append(full_artifact) results = [] for pagename, page_version_id, ref_count in dbp.get_wiki_page_ref_counts( full_artifact): page = WikiPage(dbp.env, pagename) results.append({ 'href': get_resource_url(dbp.env, page.resource, request.req.href), 'title': pagename, 'date': user_time(request.req, format_datetime, page.time), 'author': page.author, 'excerpt': shorten_result(page.text) }) artifacts_array.append({ 'id': full_artifact.get_id(), 'href': request.req.href.customartifacts('artifact', full_artifact.get_id(), action='view'), 'title': unicode(full_artifact), 'results': results }) except ValueError: continue _return_as_json(request, artifacts_array) return
def get_search_results(self, req, terms, filters): if 'asa-filter' in filters: for a_id, attr_name, attr_value, vid, time, author in Searcher.search(self.env, terms): dbp = DBPool(self.env, InstancePool()) dbp.load_artifact(a_id) artifact = dbp.pool.get_item(a_id) res = Resource('asa', a_id, vid) link = get_resource_url(self.env, res, req.href) title = unicode(artifact) text = u"Custom Artifact of the type {0}.".format(artifact.__class__.get_name()) text += u" {0}: {1}".format(attr_name, shorten_result(attr_value, terms)) yield (link, title, time, author, text) return
def milestone_created(self, milestone): so = FullTextSearchObject( self.project, milestone.resource, title=u'%s: %s' % (milestone.name, shorten_line(milestone.description)), changed=milestone.completed or milestone.due or datetime.now(utc), involved=(), popularity=0, #FIXME oneline=shorten_result(milestone.description), body=milestone.description, ) self.backend.create(so, quiet=True) self.log.debug("Milestone created for indexing: %s", milestone)
def get_search_results(self, req, terms, filters): """Overriding search results for Tickets""" if not "ticket" in filters: return ticket_realm = Resource("ticket") with self.env.db_query as db: sql, args = search_to_sql( db, ["summary", "keywords", "description", "reporter", "cc", db.cast("id", "text")], terms ) sql2, args2 = search_to_sql(db, ["newvalue"], terms) sql3, args3 = search_to_sql(db, ["value"], terms) ticketsystem = TicketSystem(self.env) if req.args.get("product"): productsql = "product='%s' AND" % req.args.get("product") else: productsql = "" for summary, desc, author, type, tid, ts, status, resolution in db( """SELECT summary, description, reporter, type, id, time, status, resolution FROM ticket WHERE (%s id IN ( SELECT id FROM ticket WHERE %s UNION SELECT ticket FROM ticket_change WHERE field='comment' AND %s UNION SELECT ticket FROM ticket_custom WHERE %s )) """ % (productsql, sql, sql2, sql3), args + args2 + args3, ): t = ticket_realm(id=tid) if "TICKET_VIEW" in req.perm(t): yield ( req.href.ticket(tid), tag_( "%(title)s: %(message)s", title=tag.span(get_resource_shortname(self.env, t), class_=status), message=ticketsystem.format_summary(summary, status, resolution, type), ), from_utimestamp(ts), author, shorten_result(desc, terms), ) # Attachments for result in AttachmentModule(self.env).get_search_results(req, ticket_realm, terms): yield result
def _fill_so(self, changeset, node): so = FullTextSearchObject( self.project, node.resource, title=node.path, oneline=u'[%s]: %s' % (changeset.rev, shorten_result(changeset.message)), comments=[changeset.message], changed=node.get_last_modified(), action='CREATE', author=changeset.author, created=changeset.date) if node.content_length <= self.max_size: so.body = node.get_content() return so
def changeset_added(self, repos, changeset): """Called after a changeset has been added to a repository.""" #Index the commit message so = FullTextSearchObject( self.project, changeset.resource, title=u'[%s]: %s' % (changeset.rev, shorten_line(changeset.message)), oneline=shorten_result(changeset.message), body=changeset.message, author=changeset.author, created=changeset.date, changed=changeset.date, ) self.backend.create(so, quiet=True) self._update_changeset(changeset)
def get_search_results(self, req, terms, filters): """Overriding search results for Tickets""" if not 'ticket' in filters: return ticket_realm = Resource('ticket') with self.env.db_query as db: sql, args = search_to_sql(db, [ 'summary', 'keywords', 'description', 'reporter', 'cc', db.cast('id', 'text') ], terms) sql2, args2 = search_to_sql(db, ['newvalue'], terms) sql3, args3 = search_to_sql(db, ['value'], terms) ticketsystem = TicketSystem(self.env) if req.args.get('product'): productsql = "product='%s' AND" % req.args.get('product') else: productsql = "" for summary, desc, author, type, tid, ts, status, resolution in \ db("""SELECT summary, description, reporter, type, id, time, status, resolution FROM ticket WHERE (%s id IN ( SELECT id FROM ticket WHERE %s UNION SELECT ticket FROM ticket_change WHERE field='comment' AND %s UNION SELECT ticket FROM ticket_custom WHERE %s )) """ % (productsql, sql, sql2, sql3), args + args2 + args3): t = ticket_realm(id=tid) if 'TICKET_VIEW' in req.perm(t): yield (req.href.ticket(tid), tag_("%(title)s: %(message)s", title=tag.span(get_resource_shortname( self.env, t), class_=status), message=ticketsystem.format_summary( summary, status, resolution, type)), from_utimestamp(ts), author, shorten_result(desc, terms)) # Attachments for result in AttachmentModule(self.env) \ .get_search_results(req, ticket_realm, terms): yield result
def add_bulk_changesets(self, changesets): sos = [] for changeset in changesets: so = FullTextSearchObject( self.project, changeset.resource, title=u'[%s]: %s' % (changeset.rev, shorten_line(changeset.message)), oneline=shorten_result(changeset.message), body=changeset.message, author=changeset.author, created=changeset.date, changed=changeset.date, ) self._update_changeset(changeset) sos.append(so) self.backend.add(sos, quiet=True)
def get_search_results(self, req, terms, filters): if not 'changeset' in filters: return repos = self.env.get_repository(req.authname) db = self.env.get_db_cnx() sql, args = search_to_sql(db, ['rev', 'message', 'author'], terms) cursor = db.cursor() cursor.execute("SELECT rev,time,author,message " "FROM revision WHERE " + sql, args) for rev, ts, author, log in cursor: if not repos.authz.has_permission_for_changeset(rev): continue yield (req.href.changeset(rev), '[%s]: %s' % (rev, shorten_line(log)), datetime.fromtimestamp(ts, utc), author, shorten_result(log, terms))
def _index_wiki_page(self, page): history = list(page.get_history()) so = FullTextSearchObject( self.project, page.resource, title=u"%s: %s" % (page.name, shorten_line(page.text)), author=page.author, changed=page.time, created=history[-1][1], # .time of oldest version tags=self._page_tags(page.resource.realm, page.name), involved=list(set(r[2] for r in history)), popularity=0, # FIXME oneline=shorten_result(page.text), body=page.text, comments=[r[3] for r in history], ) self.backend.create(so, quiet=True) self.log.debug("WikiPage created for indexing: %s", page.name)
def _index_changeset(self, repos, changeset): # Index the commit message so = FullTextSearchObject( self.project, changeset.resource, title=u"[%s]: %s" % (changeset.rev, shorten_line(changeset.message)), oneline=shorten_result(changeset.message), body=changeset.message, author=changeset.author, created=changeset.date, changed=changeset.date, ) success = self.backend.create(so, quiet=True) if not self.fulltext_index_svn_nodes: return def _changes(repos, changeset): for path, kind, change, base_path, base_rev in changeset.get_changes(): if change == Changeset.MOVE: yield FullTextSearchObject(self.project, "source", base_path, repos.resource, action="DELETE") elif change == Changeset.DELETE: yield FullTextSearchObject(self.project, "source", path, repos.resource, action="DELETE") if change in (Changeset.ADD, Changeset.EDIT, Changeset.COPY, Changeset.MOVE): node = repos.get_node(path, changeset.rev) so = FullTextSearchObject( self.project, node.resource, title=node.path, oneline=u"[%s]: %s" % (changeset.rev, shorten_result(changeset.message)), comments=[changeset.message], changed=node.get_last_modified(), author=changeset.author, created=changeset.date, ) if node.content_length <= self.max_size: stream = node.get_content() if stream: so.body = stream.read() so.extract = True yield so for so in _changes(repos, changeset): self.backend.create(so, quiet=True)
def wiki_page_added(self, page): history = list(page.get_history()) so = FullTextSearchObject( self.project, page.resource, title=u'%s: %s' % (page.name, shorten_line(page.text)), author=page.author, changed=page.time, created=history[-1][1], # .time of oldest version tags=self._page_tags(page.resource.realm, page.name), involved=list(set(r[2] for r in history)), popularity=0, #FIXME oneline=shorten_result(page.text), body=page.text, comments=[r[3] for r in history], ) self.backend.create(so, quiet=True) self._update_wiki(page) self.log.debug("WikiPage created for indexing: %s", page.name)
def _index_ticket(self, ticket): ticketsystem = TicketSystem(self.env) resource_name = get_resource_shortname(self.env, ticket.resource) resource_desc = ticketsystem.get_resource_description(ticket.resource, format="summary") so = FullTextSearchObject( self.project, ticket.resource, title=u"%(title)s: %(message)s" % {"title": resource_name, "message": resource_desc}, author=ticket.values.get("reporter"), changed=ticket.values.get("changetime"), created=ticket.values.get("time"), tags=ticket.values.get("keywords"), involved=re.split(r"[;,\s]+", ticket.values.get("cc", "")) or ticket.values.get("reporter"), popularity=0, # FIXME oneline=shorten_result(ticket.values.get("description", "")), body=u"%r" % (ticket.values,), comments=[t[4] for t in ticket.get_changelog()], ) self.backend.create(so, quiet=True) self.log.debug("Ticket added for indexing: %s", ticket)
def _process_doc(self, doc): ui_doc = dict(doc) if doc['product']: product_href = ProductEnvironment(self.env, doc['product']).href # pylint: disable=too-many-function-args ui_doc["href"] = product_href(doc['type'], doc['id']) else: ui_doc["href"] = self.req.href(doc['type'], doc['id']) if doc['content']: ui_doc['content'] = shorten_result(doc['content']) if doc['time']: ui_doc['date'] = user_time(self.req, format_datetime, doc['time']) is_free_text_view = self.view is None if is_free_text_view: participant = self.allowed_participants[doc['type']] ui_doc['title'] = participant.format_search_results(doc) return ui_doc
def get_search_results(self, req, resource_realm, terms): """Return a search result generator suitable for ISearchSource. Search results are attachments on resources of the given `resource_realm.realm` whose filename, description or author match the given terms. """ with self.env.db_query as db: sql_query, args = search_to_sql( db, ['filename', 'description', 'author'], terms) for id, time, filename, desc, author in db(""" SELECT id, time, filename, description, author FROM attachment WHERE type = %s AND """ + sql_query, (resource_realm.realm,) + args): attachment = resource_realm(id=id).child(self.realm, filename) if 'ATTACHMENT_VIEW' in req.perm(attachment): yield (get_resource_url(self.env, attachment, req.href), get_resource_shortname(self.env, attachment), from_utimestamp(time), author, shorten_result(desc, terms))
def get_search_results(self, req, terms, filters): if not 'mailinglist' in filters: return mailinglist_realm = Resource('mailinglist') lists = {} for mailinglist in Mailinglist.select(self.env): if "MAILINGLIST_VIEW" in req.perm(mailinglist.resource): lists[mailinglist.id] = mailinglist if not lists: self.log.debug("This user can't view any lists, so not searching.") return db = self.env.get_read_db() sql, args = search_to_sql(db, ['subject','body','from_email','from_name'], terms) cursor = db.cursor() query = """ SELECT id, subject, body, from_name, from_email, date, list, conversation FROM mailinglistmessages WHERE list IN (%s) AND %s """ % (",".join(map(str,lists.keys())), sql,) self.log.debug("Search query: %s", query) cursor.execute(query, args) for mid, subject, body, from_name, from_email, date, mlist, conversation in cursor: # build resource ourself to speed things up m = mailinglist_realm(id="%s/%d/%d" % (lists[mlist].emailaddress, conversation, mid)) if 'MAILINGLIST_VIEW' in req.perm(m): yield (req.href.mailinglist(m.id), tag("%s: %s" % (lists[mlist].name, subject)), datetime.fromtimestamp(date, utc), "%s <%s>" % (from_name, from_email), shorten_result(body, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, mailinglist_realm, terms): yield result
def get_search_results(self, req, terms, filters): if not 'milestone' in filters: return term_regexps = search_to_regexps(terms) milestone_realm = Resource('milestone') for name, due, completed, description \ in MilestoneCache(self.env).milestones.itervalues(): if any(r.search(description) or r.search(name) for r in term_regexps): milestone = milestone_realm(id=name) if 'MILESTONE_VIEW' in req.perm(milestone): dt = (completed if completed else due if due else datetime.now(utc)) yield (get_resource_url(self.env, milestone, req.href), get_resource_name(self.env, milestone), dt, '', shorten_result(description, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, milestone_realm, terms): yield result
def get_search_results(self, req, terms, filters): if 'milestone' not in filters: return term_regexps = search_to_regexps(terms) milestone_realm = Resource(self.realm) for name, due, completed, description \ in MilestoneCache(self.env).milestones.itervalues(): if all(r.search(description) or r.search(name) for r in term_regexps): milestone = milestone_realm(id=name) if 'MILESTONE_VIEW' in req.perm(milestone): dt = (completed if completed else due if due else datetime_now(utc)) yield (get_resource_url(self.env, milestone, req.href), get_resource_name(self.env, milestone), dt, '', shorten_result(description, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, milestone_realm, terms): yield result
def get_search_results(self, req, terms, filters): if not 'ticket' in filters: return db = self.env.get_db_cnx() sql, args = search_to_sql(db, ['b.newvalue'], terms) sql2, args2 = search_to_sql(db, ['summary', 'keywords', 'description', 'reporter', 'cc'], terms) cursor = db.cursor() cursor.execute("SELECT DISTINCT a.summary,a.description,a.reporter, " "a.keywords,a.id,a.time,a.status FROM ticket a " "LEFT JOIN ticket_change b ON a.id = b.ticket " "WHERE (b.field='comment' AND %s ) OR %s" % (sql, sql2), args + args2) for summary, desc, author, keywords, tid, date, status in cursor: ticket = '#%d: ' % tid if status == 'closed': ticket = Markup('<span style="text-decoration: line-through">' '#%s</span>: ', tid) self.log.debug("get_search_results - %s" % summary ) yield (req.href.ticket(tid), ticket + shorten_line(summary), datetime.fromtimestamp(date,utc), author, shorten_result(desc, terms), summary)
def get_search_results(self, req, terms, filters): if not 'milestone' in filters: return with self.env.db_query as db: sql_query, args = search_to_sql(db, ['name', 'description'], terms) milestone_realm = Resource('milestone') for name, due, completed, description in db(""" SELECT name, due, completed, description FROM milestone WHERE """ + sql_query, args): milestone = milestone_realm(id=name) if 'MILESTONE_VIEW' in req.perm(milestone): dt = (from_utimestamp(completed) if completed else from_utimestamp(due) if due else datetime.now(utc)) yield (get_resource_url(self.env, milestone, req.href), get_resource_name(self.env, milestone), dt, '', shorten_result(description, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, milestone_realm, terms): yield result
def post_list_search_relatedpages_json(request, dbp, obj, resource): require_permission(request.req, resource, dbp.env) unparsed_spec = request.req.args.get('spec', '') spec_name = json.loads(unparsed_spec) if unparsed_spec else '' attributes = json.loads(request.req.args.get('attributes', '[]')) if attributes is None: raise Exception("No artifacts specified.") artifacts_array = [] for artifact in attributes: try: dbp.load_artifact(artifact) full_artifact = dbp.pool.get_item(artifact) #artifacts_array.append(full_artifact) results = [] for pagename, page_version_id, ref_count in dbp.get_wiki_page_ref_counts(full_artifact): page = WikiPage(dbp.env, pagename) results.append( {'href': get_resource_url(dbp.env, page.resource, request.req.href), 'title': pagename, 'date': user_time(request.req, format_datetime, page.time), 'author': page.author, 'excerpt': shorten_result(page.text)} ) artifacts_array.append( {'id': full_artifact.get_id(), 'href': request.req.href.customartifacts('artifact', full_artifact.get_id(), action='view'), 'title': unicode(full_artifact), 'results' : results}) except ValueError: continue _return_as_json(request, artifacts_array) return
def ticket_created(self, ticket): ticketsystem = TicketSystem(self.env) resource_name = get_resource_shortname(self.env, ticket.resource) resource_desc = ticketsystem.get_resource_description(ticket.resource, format='summary') so = FullTextSearchObject( self.project, ticket.resource, title = u"%(title)s: %(message)s" % {'title': resource_name, 'message': resource_desc}, author = ticket.values.get('reporter'), changed = ticket.values.get('changetime'), created = ticket.values.get('time'), tags = ticket.values.get('keywords'), involved = re.split(r'[;,\s]+', ticket.values.get('cc', '')) or ticket.values.get('reporter'), popularity = 0, #FIXME oneline = shorten_result(ticket.values.get('description', '')), body = u'%r' % (ticket.values,), comments = [t[4] for t in ticket.get_changelog()], ) self.backend.create(so, quiet=True) self._update_ticket(ticket) self.log.debug("Ticket added for indexing: %s", ticket)
def get_search_results(self, req, resource_realm, terms): """Return a search result generator suitable for ISearchSource. Search results are attachments on resources of the given `resource_realm.realm` whose filename, description or author match the given terms. """ db = self.env.get_db_cnx() sql_query, args = search_to_sql(db, ['filename', 'description', 'author'], terms) cursor = db.cursor() cursor.execute("SELECT id,time,filename,description,author " "FROM attachment " "WHERE type = %s " "AND " + sql_query, (resource_realm.realm, ) + args) for id, time, filename, desc, author in cursor: attachment = resource_realm(id=id).child('attachment', filename) if 'ATTACHMENT_VIEW' in req.perm(attachment): yield (get_resource_url(self.env, attachment, req.href), get_resource_shortname(self.env, attachment), datetime.fromtimestamp(time, utc), author, shorten_result(desc, terms))
def get_search_results(self, req, terms, filters): if 'mailarchive' in filters: db = self.env.get_db_cnx() sql_query, args = search_to_sql(db, ['m1.messageid', 'm1.subject', 'm1.fromname', 'm1.fromaddr', 'm1.text'], terms) cursor = db.cursor() cursor.execute("SELECT m1.id, m1.subject, m1.fromname, m1.fromaddr, m1.text, m1.utcdate as localdate " "FROM mailarc m1 " "WHERE " "" + sql_query, args) mailarchive_realm = Resource('mailarchive') for id,subject,fromname,fromaddr, text,localdate in cursor: resource = mailarchive_realm(id=id,version=None) if 'MAILARCHIVE_VIEW' not in req.perm(resource): continue yield (req.href.mailarchive(id), subject, datetime.fromtimestamp(localdate, utc), get_author(fromname,fromaddr), shorten_result(text, terms))
def get_search_results(self, req, terms, filters): if not 'wiki' in filters: return with self.env.db_query as db: sql_query, args = search_to_sql(db, ['w1.name', 'w1.author', 'w1.text'], terms) wiki_realm = Resource('wiki') for name, ts, author, text in db(""" SELECT w1.name, w1.time, w1.author, w1.text FROM wiki w1,(SELECT name, max(version) AS ver FROM wiki GROUP BY name) w2 WHERE w1.version = w2.ver AND w1.name = w2.name AND """ + sql_query, args): page = wiki_realm(id=name) if 'WIKI_VIEW' in req.perm(page): yield (get_resource_url(self.env, page, req.href), '%s: %s' % (name, shorten_line(text)), from_utimestamp(ts), author, shorten_result(text, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, wiki_realm, terms): yield result
def get_search_results(self, req, terms, filters): if not 'milestone' in filters: return db = self.env.get_db_cnx() sql_query, args = search_to_sql(db, ['name', 'description'], terms) cursor = db.cursor() cursor.execute( "SELECT name,due,completed,description " "FROM milestone " "WHERE " + sql_query, args) milestone_realm = Resource('milestone') for name, due, completed, description in cursor: milestone = milestone_realm(id=name) if 'MILESTONE_VIEW' in req.perm(milestone): yield (get_resource_url(self.env, milestone, req.href), get_resource_name(self.env, milestone), datetime.fromtimestamp(completed or due, utc), '', shorten_result(description, terms)) # Attachments for result in AttachmentModule(self.env).get_search_results( req, milestone_realm, terms): yield result
class TracRepoSearchPlugin(Component): """ Search the source repository. """ implements(ISearchSource, IPermissionRequestor) def _get_filters(self): includes = [ glob for glob in self.env.config.get( 'repo-search', 'include', '').split(os.path.pathsep) if glob ] excludes = [ glob for glob in self.env.config.get( 'repo-search', 'exclude', '').split(os.path.pathsep) if glob ] return (includes, excludes) def walk_repo(self, repo): """ Walk all nodes in the repo that match the filters. """ includes, excludes = self._get_filters() def searchable(path): # Exclude paths for exclude in excludes: if fnmatch(path, exclude): return 0 # Include paths for include in includes: if fnmatch(path, include): return 1 return not includes def do_walk(path): node = repo.get_node(path) basename = posixpath.basename(path) if searchable(node.path): yield node if node.kind == Node.DIRECTORY: for subnode in node.get_entries(): for result in do_walk(subnode.path): yield result for node in do_walk('/'): yield node # IPermissionRequestor methods def get_permission_actions(self): yield 'REPO_SEARCH' # ISearchSource methods def get_search_filters(self, req): if req.perm.has_permission('REPO_SEARCH'): yield ('repo', 'Source Repository', 0) def get_search_results(self, req, query, filters): if 'repo' not in filters: return repo = self.env.get_repository(authname=req.authname) if not isinstance(query, list): query = query.split() query = [q.lower() for q in query] db = self.env.get_db_cnx() include, excludes = self._get_filters() to_unicode = Mimeview(self.env).to_unicode # Use indexer if possible, otherwise fall back on brute force search. try: from tracreposearch.indexer import Indexer self.indexer = Indexer(self.env) self.indexer.reindex() walker = lambda repo, query: [ repo.get_node(filename) for filename in self.indexer.find_words(query) ] except TracError, e: self.env.log.warning(e) self.env.log.warning('Falling back on full repository walk') def full_walker(repo, query): for node in self.walk_repo(repo): # Search content matched = 1 content = node.get_content() if not content: continue content = to_unicode(content.read().lower(), node.get_content_type()) for term in query: if term not in content: matched = 0 break if matched: yield node walker = full_walker if not req.perm.has_permission('REPO_SEARCH'): return def match_name(name): for term in query: if term not in name: return 0 return 1 for node in walker(repo, query): change = repo.get_changeset(node.rev) if node.kind == Node.DIRECTORY: yield (self.env.href.browser(node.path), node.path, change.date, change.author, 'Directory') else: found = 0 content = to_unicode(node.get_content().read(), node.get_content_type()) for n, line in enumerate(content.splitlines()): line = line.lower() for q in query: idx = line.find(q) if idx != -1: found = n + 1 break if found: break yield (self.env.href.browser(node.path) + (found and '#L%i' % found or ''), node.path, change.date, change.author, shorten_result(content, query))
def process_request(self, req): add_stylesheet(req, 'common/css/trac.css') add_stylesheet(req, 'common/css/search.css') add_script(req, 'common/js/jquery.js') add_script(req, 'common/js/folding.js') add_stylesheet(req, 'supose/css/supose.css') # raise Exception( path ) query = req.args.get('q', '') path = req.args.get('p', '') file = req.args.get('f', '') contents = req.args.get('c', '') others = req.args.get('o', '') revisions = req.args.get('r', '') if not path: path = "/" data = self._prepare_data(req, query, "", "") data['path'] = path data['file'] = file data['others'] = others data['revs'] = revisions data['content'] = contents to_unicode = Mimeview(self.env).to_unicode if query or contents or others: if query: data['quickjump'] = self._check_quickjump(req, query, path) elif contents: data['quickjump'] = self._check_quickjump(req, contents, path) elif others: data['quickjump'] = self._check_quickjump(req, others, path) if not req.perm.has_permission('REPO_SEARCH'): return supose = self.env.config.get('supose', 'supose' ) if not supose: raise Exception( "supose have to be specified in the section supose of trac.ini" ) index = self.env.config.get('supose', 'index' ) if not index: raise Exception( "index folder have to be specified in the section supose of trac.ini" ) repo = self.env.get_repository(authname=req.authname) autoindex = self.config.getbool('supose', 'autoindex') if autoindex: indexedrev = self.env.config.get('supose', 'indexedrev' ) # Index with SupoSE youngest = int( repo.youngest_rev ) #raise Exception( int( indexedrev ) < youngest ) base = re.search("(svn:.*:)(.*:.*)", repo.get_base()) if not base: base = re.search("(svn:.*:)(.*)", repo.get_base()) base = base.group(2) if base.startswith('/'): base = "file://" + base else: base = "file:///" + base if not indexedrev: # First Scan first_scan_cmd = supose + " scan --url " first_scan_cmd += base first_scan_cmd += " --create --index " first_scan_cmd += index scan_res = os.popen( first_scan_cmd ).read() indexedrev = youngest self.env.config.set('supose', 'indexedrev', youngest) self.env.config.save() indexedrev = int( indexedrev ) if int( indexedrev ) < youngest: new_index_cmd = supose + " scan --url " new_index_cmd += base new_index_cmd += " --fromrev " + str( indexedrev ) new_index_cmd += " --index " + index scan_res = os.popen( new_index_cmd ).read() # raise Exception( new_index_cmd ) self.env.config.set('supose', 'indexedrev', youngest) self.env.config.save() # SupoSE search supose_cmd = str( supose ) supose_cmd += " search --fields revision " supose_cmd += "filename path contents --index " supose_cmd += index + " --query " supose_query = "\"" + query + "\"" if contents: supose_query = "+contents:\"" + contents + "\"" if path: if path != "/": if path[0] != "/": supose_query += " +path:/" else: supose_query += " +path:" supose_query += path if path[len(path)-1] !="/": supose_query += "/" supose_query += "*" if file: supose_query += " +filename:"+file if revisions: revqrange = re.split( "-", revisions ) if len(revqrange) == 1: supose_query += " +revision:" + revisions if len(revqrange) == 2: supose_query += " +revision:[" + revqrange[0] + " TO " + revqrange[1] + "]" if others: supose_query += " " + others repo_res = os.popen( supose_cmd + supose_query ).read() data['querystring'] = supose_query repo_reg = "(.*[\d]+:[ ]+REVISION:)([\d]+)" repo_reg += "( +FILENAME:)(.+)" repo_reg += "( +PATH:)(.+)" repo_reg += "( +CONTENTS:)" hits = re.split( repo_reg, repo_res ) # raise Exception( hits ) spit_len = 8 rng = range( 1, len(hits), spit_len ) search_res = tag.div( "testing", id = "reposearch") results = [] exist_file = [] for r in rng: rev = hits[r+1] filename = hits[r+3] path = hits[r+5] full_path = path+filename+rev if not full_path in exist_file: exist_file.append( full_path ) contents = to_unicode( hits[r+7] ) change = repo.get_changeset(rev) href = self.env.href.browser( path + filename ) + "?rev=" + rev title = path + filename + "@" + rev date = change.date author = change.author excerpt = shorten_result( contents, query ) results.extend( self.generate_result( href, title, date, author, excerpt ) ) if results: data.update(self._prepare_results(req, "", results)) return 'reposearch.html', data, None
def get_view_artifact(request, dbp, obj, resource): require_permission(request.req, resource, dbp.env) artifact_url = request.req.href.customartifacts('artifact/{0}'.format( obj.get_id())) spec_name, spec_url, values = _get_artifact_details(obj, request.req) # Getting wiki pages that refer the artifact related_pages = [] from trac.wiki.formatter import OutlineFormatter from trac.web.chrome import web_context class NullOut(object): def write(self, *args): pass for pagename, page_version_id, ref_count in dbp.get_wiki_page_ref_counts( obj): page = WikiPage(dbp.env, pagename) fmt = OutlineFormatter(dbp.env, web_context(request.req)) fmt.format(page.text, NullOut()) title = '' text = page.text if fmt.outline: title = fmt.outline[0][2] text = re.sub('[=]+[ ]+' + title + '[ ]+[=]+\s?', '', text) related_pages.append({ 'href': get_resource_url(dbp.env, page.resource, request.req.href), 'title': title if title else pagename, 'date': user_time(request.req, format_datetime, page.time), 'author': page.author, 'excerpt': shorten_result(text) }) # Getting artifacts that this artifact refers to referred_artifacts = [] from AdaptiveArtifacts import get_artifact_id_names_from_text for attribute_name, value in obj.get_values(): for related_artifact_id, related_artifact_text in get_artifact_id_names_from_text( unicode(value)): if dbp.pool.get_item(related_artifact_id) is None: dbp.load_artifact(related_artifact_id) referred_artifacts.append( (dbp.pool.get_item(related_artifact_id), "%s (%s)" % (related_artifact_text, attribute_name))) # Getting artifacts whose attribute values refer this artifact referring_artifacts = [] for related_artifact_id, related_artifact_version_id, ref_count in dbp.get_related_artifact_ref_counts( obj): if dbp.pool.get_item(related_artifact_id) is None: dbp.load_artifact(related_artifact_id) artifact = dbp.pool.get_item(related_artifact_id) url = request.req.href.customartifacts('artifact/%d' % (artifact.get_id(), ), action='view') rel_spec_name = artifact.__class__.get_name( ) if not artifact.__class__ is Instance else None rel_spec_url = request.req.href.customartifacts( 'spec', artifact.__class__.get_id(), action='view'), id_version, time, author, ipnr, comment, readonly = dbp.get_latest_version_details( artifact.get_id()) referring_artifacts.append({ 'href': url, 'spec_name': rel_spec_name, 'spec_url': rel_spec_url, 'author': author, 'date': user_time(request.req, format_datetime, time), 'artifact': artifact }) # Build yuml url class YUMLDiagram(object): def __init__(self): self.classes = [] self.base_url = "http://yuml.me/diagram/plain/class/" self._diagram = "" self.is_incomplete = False def add_class(self, header, body, associations): self.classes.append({ 'header': header, 'body': body, 'associations': associations }) def serialize(self): for yuml_class in self.classes: yuml_fragment = "[" + yuml_class['header'] if yuml_class['body']: yuml_fragment += "|" + ";".join(yuml_class['body']) yuml_fragment += "]," self._diagram += yuml_fragment if yuml_class['associations']: for association_target, association_label, in yuml_class[ 'associations']: yuml_fragment = "[%s]-%s>[%s]," % ( yuml_class['header'], association_label, association_target) self._diagram += yuml_fragment def get_dsl_text(self): return self._diagram.encode('utf8').replace(" ", " ") def get_url(self): #Could be used for GET requests, as long as it doesn't exceed the maximum URL size #return self.base_url + quote(self.get_dsl_text(), "[],;:->=") from urllib2 import Request, urlopen from urllib import urlencode try: image_filename = urlopen( Request(yuml.base_url, data=urlencode({'dsl_text': yuml.get_dsl_text()}))).read() except HTTPError: return "" return self.base_url + image_filename yuml = YUMLDiagram() def artifact_to_yuml_class(rel_artifact, include_values=True): def sanitize(value): if type(value) == list: value = ",".join(value) for i, j in { "[": "(", "]": ")", ",": ".", ";": ".", "->": "-", "|": "\\", }.iteritems(): value = value.replace(i, j) return value if len(value) < 128 else "..." rel_artifact_title = unicode(rel_artifact) rel_spec_name = (u" : " + rel_artifact.__class__.get_name() ) if not rel_artifact.__class__ is Instance else u"" header = rel_artifact_title + rel_spec_name body = [] if include_values: for attribute_name, value in rel_artifact.get_values(): body.append("%s = %s" % (sanitize(attribute_name), sanitize(value))) return {'header': sanitize(header), 'body': body, 'associations': []} yuml_class = artifact_to_yuml_class(obj) yuml_class['body'].append( '{bg:orange}') # color the main artifact differently yuml_class['associations'] = [ (artifact_to_yuml_class(rel_artifact, False)['header'], rel_artifact_text) for rel_artifact, rel_artifact_text in referred_artifacts ] yuml.add_class(**yuml_class) for rel_artifact in referring_artifacts: rel_yuml_class = artifact_to_yuml_class(rel_artifact['artifact']) rel_yuml_class['associations'] = [ (artifact_to_yuml_class(obj, False)['header'], "") ] yuml.add_class(**rel_yuml_class) yuml.serialize() # track access dbp.track_it("artifact", obj.get_id(), "view", request.req.authname, str(datetime.now())) data = { 'context': Context.from_request(request.req, resource), 'spec_name': spec_name, 'spec_url': spec_url, 'artifact': obj, 'artifact_url': artifact_url, 'artifacts_values': values, 'related_pages': related_pages, 'related_artifacts': referring_artifacts, 'show_diagram': dbp.env.config.getbool('asa', 'show_diagram', default=True), 'yuml_url': yuml.get_url(), } return 'view_artifact_%s.html' % (request.get_format(), ), data, None