def index(self): # Return a rendered template p = safe_int(request.GET.get('page'), 1) c.user = User.get(request.authuser.user_id) c.following = UserFollowing.query() \ .filter(UserFollowing.user_id == request.authuser.user_id) \ .options(joinedload(UserFollowing.follows_repository)) \ .all() journal = self._get_journal_data(c.following) def url_generator(**kw): return url.current(filter=c.search_term, **kw) c.journal_pager = Page(journal, page=p, items_per_page=20, url=url_generator) c.journal_day_aggregate = self._get_daily_aggregate(c.journal_pager) if request.environ.get('HTTP_X_PARTIAL_XHR'): return render('journal/journal_data.html') repos_list = Repository.query(sorted=True) \ .filter_by(owner_id=request.authuser.user_id).all() repos_data = RepoModel().get_repos_as_dict(repos_list=repos_list, admin=True) #data used to render the grid c.data = repos_data return render('journal/journal.html')
def index(self): users_log = UserLog.query() \ .options(joinedload(UserLog.user)) \ .options(joinedload(UserLog.repository)) #FILTERING c.search_term = request.GET.get('filter') users_log = _journal_filter(users_log, c.search_term) users_log = users_log.order_by(UserLog.action_date.desc()) p = safe_int(request.GET.get('page'), 1) def url_generator(**kw): return url.current(filter=c.search_term, **kw) c.users_log = Page(users_log, page=p, items_per_page=10, url=url_generator) if request.environ.get('HTTP_X_PARTIAL_XHR'): return render('admin/admin_log.html') return render('admin/admin.html')
def index(self): not_default_user = not request.authuser.is_default_user c.show_private = request.GET.get('private') and not_default_user c.show_public = request.GET.get('public') and not_default_user gists = Gist().query() \ .filter_by(is_expired=False) \ .order_by(Gist.created_on.desc()) # MY private if c.show_private and not c.show_public: gists = gists.filter(Gist.gist_type == Gist.GIST_PRIVATE) \ .filter(Gist.owner_id == request.authuser.user_id) # MY public elif c.show_public and not c.show_private: gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) \ .filter(Gist.owner_id == request.authuser.user_id) # MY public+private elif c.show_private and c.show_public: gists = gists.filter(or_(Gist.gist_type == Gist.GIST_PUBLIC, Gist.gist_type == Gist.GIST_PRIVATE)) \ .filter(Gist.owner_id == request.authuser.user_id) # default show ALL public gists if not c.show_public and not c.show_private: gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) c.gists = gists p = safe_int(request.GET.get('page'), 1) c.gists_pager = Page(c.gists, page=p, items_per_page=10) return render('admin/gists/index.html')
def index(self, repo_name): p = safe_int(request.GET.get('page'), 1) size = safe_int(request.GET.get('size'), 10) try: collection = c.db_repo_scm_instance.get_changesets(reverse=True) except EmptyRepositoryError as e: h.flash(e, category='warning') collection = [] c.cs_pagination = Page(collection, page=p, items_per_page=size) page_revisions = [x.raw_id for x in list(c.cs_pagination)] c.cs_comments = c.db_repo.get_comments(page_revisions) c.cs_statuses = c.db_repo.statuses(page_revisions) c.ssh_repo_url = None if request.authuser.is_default_user: username = None else: username = request.authuser.username if c.ssh_enabled: c.ssh_repo_url = c.db_repo.clone_url( clone_uri_tmpl=c.clone_ssh_tmpl) c.clone_repo_url = c.db_repo.clone_url(clone_uri_tmpl=c.clone_uri_tmpl, with_id=False, username=username) c.clone_repo_url_id = c.db_repo.clone_url( clone_uri_tmpl=c.clone_uri_tmpl, with_id=True, username=username) if c.db_repo.enable_statistics: c.show_stats = True else: c.show_stats = False stats = Statistics.query() \ .filter(Statistics.repository == c.db_repo) \ .scalar() c.stats_percentage = 0 if stats and stats.languages: lang_stats_d = ext_json.loads(stats.languages) lang_stats = [(x, { "count": y, "desc": LANGUAGES_EXTENSIONS_MAP.get(x, '?') }) for x, y in lang_stats_d.items()] lang_stats.sort(key=lambda k: (-k[1]['count'], k[0])) c.trending_languages = lang_stats[:10] else: c.trending_languages = [] c.enable_downloads = c.db_repo.enable_downloads c.readme_data, c.readme_file = \ self.__get_readme_data(c.db_repo) return render('summary/summary.html')
def followers(self, repo_name): p = safe_int(request.GET.get('page'), 1) repo_id = c.db_repo.repo_id d = UserFollowing.get_repo_followers(repo_id) \ .order_by(UserFollowing.follows_from) c.followers_pager = Page(d, page=p, items_per_page=20) if request.environ.get('HTTP_X_PARTIAL_XHR'): return render('/followers/followers_data.html') return render('/followers/followers.html')
def mark_all_read(self): if request.environ.get('HTTP_X_PARTIAL_XHR'): nm = NotificationModel() # mark all read nm.mark_all_read_for_user(request.authuser.user_id, filter_=request.GET.getall('type')) Session().commit() c.user = request.authuser notif = nm.query_for_user(request.authuser.user_id, filter_=request.GET.getall('type')) c.notifications = Page(notif, page=1, items_per_page=10) return render('admin/notifications/notifications_data.html')
def forks(self, repo_name): p = safe_int(request.GET.get('page'), 1) repo_id = c.db_repo.repo_id d = [] for r in Repository.get_repo_forks(repo_id): if not HasRepoPermissionLevel('read')(r.repo_name, 'get forks check'): continue d.append(r) c.forks_pager = Page(d, page=p, items_per_page=20) if request.environ.get('HTTP_X_PARTIAL_XHR'): return render('/forks/forks_data.html') return render('/forks/forks.html')
def show_all(self, repo_name): c.from_ = request.GET.get('from_') or '' c.closed = request.GET.get('closed') or '' p = safe_int(request.GET.get('page'), 1) q = PullRequest.query(include_closed=c.closed, sorted=True) if c.from_: q = q.filter_by(org_repo=c.db_repo) else: q = q.filter_by(other_repo=c.db_repo) c.pull_requests = q.all() c.pullrequests_pager = Page(c.pull_requests, page=p, items_per_page=100) return render('/pullrequests/pullrequest_show_all.html')
def public_journal(self): # Return a rendered template p = safe_int(request.GET.get('page'), 1) c.following = UserFollowing.query() \ .filter(UserFollowing.user_id == request.authuser.user_id) \ .options(joinedload(UserFollowing.follows_repository)) \ .all() journal = self._get_journal_data(c.following) c.journal_pager = Page(journal, page=p, items_per_page=20) c.journal_day_aggregate = self._get_daily_aggregate(c.journal_pager) if request.environ.get('HTTP_X_PARTIAL_XHR'): return render('journal/journal_data.html') return render('journal/public_journal.html')
def index(self, format='html'): c.user = request.authuser notif = NotificationModel().query_for_user( request.authuser.user_id, filter_=request.GET.getall('type')) p = safe_int(request.GET.get('page'), 1) c.notifications = Page(notif, page=p, items_per_page=10) c.pull_request_type = Notification.TYPE_PULL_REQUEST c.comment_type = [ Notification.TYPE_CHANGESET_COMMENT, Notification.TYPE_PULL_REQUEST_COMMENT ] _current_filter = request.GET.getall('type') c.current_filter = 'all' if _current_filter == [c.pull_request_type]: c.current_filter = 'pull_request' elif _current_filter == c.comment_type: c.current_filter = 'comment' return render('admin/notifications/notifications.html')
def index(self, repo_name=None): c.repo_name = repo_name c.formated_results = [] c.runtime = '' c.cur_query = request.GET.get('q', None) c.cur_type = request.GET.get('type', 'content') c.cur_search = search_type = {'content': 'content', 'commit': 'message', 'path': 'path', 'repository': 'repository' }.get(c.cur_type, 'content') index_name = { 'content': IDX_NAME, 'commit': CHGSET_IDX_NAME, 'path': IDX_NAME }.get(c.cur_type, IDX_NAME) schema_defn = { 'content': SCHEMA, 'commit': CHGSETS_SCHEMA, 'path': SCHEMA }.get(c.cur_type, SCHEMA) log.debug('IDX: %s', index_name) log.debug('SCHEMA: %s', schema_defn) if c.cur_query: cur_query = c.cur_query.lower() log.debug(cur_query) if c.cur_query: p = safe_int(request.GET.get('page'), 1) highlight_items = set() try: idx = open_dir(config['app_conf']['index_dir'], indexname=index_name) searcher = idx.searcher() qp = QueryParser(search_type, schema=schema_defn) if c.repo_name: # use "repository_rawname:" instead of "repository:" # for case-sensitive matching cur_query = u'repository_rawname:%s %s' % (c.repo_name, cur_query) try: query = qp.parse(unicode(cur_query)) # extract words for highlight if isinstance(query, Phrase): highlight_items.update(query.words) elif isinstance(query, Prefix): highlight_items.add(query.text) else: for i in query.all_terms(): if i[0] in ['content', 'message']: highlight_items.add(i[1]) matcher = query.matcher(searcher) log.debug('query: %s', query) log.debug('hl terms: %s', highlight_items) results = searcher.search(query) res_ln = len(results) c.runtime = '%s results (%.3f seconds)' % ( res_ln, results.runtime ) def url_generator(**kw): q = urllib.quote(safe_str(c.cur_query)) return update_params("?q=%s&type=%s" \ % (q, safe_str(c.cur_type)), **kw) repo_location = RepoModel().repos_path c.formated_results = Page( WhooshResultWrapper(search_type, searcher, matcher, highlight_items, repo_location), page=p, item_count=res_ln, items_per_page=10, url=url_generator ) except QueryParserError: c.runtime = _('Invalid search query. Try quoting it.') searcher.close() except (EmptyIndexError, IOError): log.error(traceback.format_exc()) log.error('Empty Index data') c.runtime = _('There is no index to search in. ' 'Please run whoosh indexer') except Exception: log.error(traceback.format_exc()) c.runtime = _('An error occurred during search operation.') # Return a rendered template return render('/search/search.html')
def index(self, repo_name, revision=None, f_path=None): limit = 2000 default = 100 if request.GET.get('size'): c.size = max(min(safe_int(request.GET.get('size')), limit), 1) session['changelog_size'] = c.size session.save() else: c.size = int(session.get('changelog_size', default)) # min size must be 1 c.size = max(c.size, 1) p = safe_int(request.GET.get('page'), 1) branch_name = request.GET.get('branch', None) if (branch_name and branch_name not in c.db_repo_scm_instance.branches and branch_name not in c.db_repo_scm_instance.closed_branches and not revision): raise HTTPFound(location=url('changelog_file_home', repo_name=c.repo_name, revision=branch_name, f_path=f_path or '')) if revision == 'tip': revision = None c.changelog_for_path = f_path try: if f_path: log.debug('generating changelog for path %s', f_path) # get the history for the file ! tip_cs = c.db_repo_scm_instance.get_changeset() try: collection = tip_cs.get_file_history(f_path) except (NodeDoesNotExistError, ChangesetError): # this node is not present at tip ! try: cs = self.__get_cs(revision, repo_name) collection = cs.get_file_history(f_path) except RepositoryError as e: h.flash(e, category='warning') raise HTTPFound(location=h.url('changelog_home', repo_name=repo_name)) else: collection = c.db_repo_scm_instance.get_changesets( start=0, end=revision, branch_name=branch_name, reverse=True) c.total_cs = len(collection) c.cs_pagination = Page(collection, page=p, item_count=c.total_cs, items_per_page=c.size, branch=branch_name) page_revisions = [x.raw_id for x in c.cs_pagination] c.cs_comments = c.db_repo.get_comments(page_revisions) c.cs_statuses = c.db_repo.statuses(page_revisions) except EmptyRepositoryError as e: h.flash(e, category='warning') raise HTTPFound( location=url('summary_home', repo_name=c.repo_name)) except (RepositoryError, ChangesetDoesNotExistError, Exception) as e: log.error(traceback.format_exc()) h.flash(e, category='error') raise HTTPFound( location=url('changelog_home', repo_name=c.repo_name)) c.branch_name = branch_name c.branch_filters = [('', _('None'))] + \ [(k, k) for k in c.db_repo_scm_instance.branches] if c.db_repo_scm_instance.closed_branches: prefix = _('(closed)') + ' ' c.branch_filters += [('-', '-')] + \ [(k, prefix + k) for k in c.db_repo_scm_instance.closed_branches] revs = [] if not f_path: revs = [x.revision for x in c.cs_pagination] c.jsdata = graph_data(c.db_repo_scm_instance, revs) c.revision = revision # requested revision ref c.first_revision = c.cs_pagination[ 0] # pagination is never empty here! return render('changelog/changelog.html')
def index(self, repo_name=None): c.repo_name = repo_name c.formated_results = [] c.runtime = '' c.cur_query = request.GET.get('q', None) c.cur_type = request.GET.get('type', 'content') c.cur_search = search_type = {'content': 'content', 'commit': 'message', 'path': 'path', 'repository': 'repository' }.get(c.cur_type, 'content') index_name = { 'content': IDX_NAME, 'commit': CHGSET_IDX_NAME, 'path': IDX_NAME }.get(c.cur_type, IDX_NAME) schema_defn = { 'content': SCHEMA, 'commit': CHGSETS_SCHEMA, 'path': SCHEMA }.get(c.cur_type, SCHEMA) log.debug('IDX: %s', index_name) log.debug('SCHEMA: %s', schema_defn) if c.cur_query: cur_query = c.cur_query.lower() log.debug(cur_query) if c.cur_query: p = safe_int(request.GET.get('page'), 1) highlight_items = set() index_dir = config['index_dir'] try: if not exists_in(index_dir, index_name): raise EmptyIndexError idx = open_dir(index_dir, indexname=index_name) searcher = idx.searcher() qp = QueryParser(search_type, schema=schema_defn) if c.repo_name: # use "repository_rawname:" instead of "repository:" # for case-sensitive matching cur_query = 'repository_rawname:%s %s' % (c.repo_name, cur_query) try: query = qp.parse(cur_query) # extract words for highlight if isinstance(query, Phrase): highlight_items.update(query.words) elif isinstance(query, Prefix): highlight_items.add(query.text) else: for i in query.all_terms(): if i[0] in ['content', 'message']: highlight_items.add(i[1]) matcher = query.matcher(searcher) log.debug('query: %s', query) log.debug('hl terms: %s', highlight_items) results = searcher.search(query) res_ln = len(results) c.runtime = '%s results (%.3f seconds)' % ( res_ln, results.runtime ) repo_location = RepoModel().repos_path c.formated_results = Page( WhooshResultWrapper(search_type, searcher, matcher, highlight_items, repo_location), page=p, item_count=res_ln, items_per_page=10, type=c.cur_type, q=c.cur_query, ) except QueryParserError: c.runtime = _('Invalid search query. Try quoting it.') searcher.close() except EmptyIndexError: log.error("Empty search index - run 'kallithea-cli index-create' regularly") c.runtime = _('The server has no search index.') except Exception: log.error(traceback.format_exc()) c.runtime = _('An error occurred during search operation.') # Return a rendered template return render('/search/search.html')