def index(self): def _branchtags(localrepo): bt_closed = {} for bn, heads in localrepo.branchmap().iteritems(): tip = heads[-1] if 'close' in localrepo.changelog.read(tip)[5]: bt_closed[bn] = tip return bt_closed cs_g = c.db_repo_scm_instance.get_changeset c.repo_closed_branches = {} if c.db_repo.repo_type == 'hg': bt_closed = _branchtags(c.db_repo_scm_instance._repo) _closed_branches = [( safe_unicode(n), cs_g(binascii.hexlify(h)), ) for n, h in bt_closed.items()] c.repo_closed_branches = OrderedDict( sorted(_closed_branches, key=lambda ctx: ctx[0], reverse=False)) _branches = [(safe_unicode(n), cs_g(h)) for n, h in c.db_repo_scm_instance.branches.items()] c.repo_branches = OrderedDict( sorted(_branches, key=lambda ctx: ctx[0], reverse=False)) return render('branches/branches.html')
def index(self): c.repo_tags = OrderedDict() tags = [(name, c.db_repo_scm_instance.get_changeset(hash_)) for \ name, hash_ in c.db_repo_scm_instance.tags.items()] ordered_tags = sorted(tags, key=lambda x: x[1].date, reverse=True) for name, cs_tag in ordered_tags: c.repo_tags[name] = cs_tag return render('tags/tags.html')
def index(self): if c.db_repo_scm_instance.alias != 'hg': raise HTTPNotFound() c.repo_bookmarks = OrderedDict() bookmarks = [(name, c.db_repo_scm_instance.get_changeset(hash_)) for \ name, hash_ in c.db_repo_scm_instance._repo._bookmarks.items()] ordered_tags = sorted(bookmarks, key=lambda x: x[1].date, reverse=True) for name, cs_book in ordered_tags: c.repo_bookmarks[name] = cs_book return render('bookmarks/bookmarks.html')
class ChangesetController(BaseRepoController): def __before__(self): super(ChangesetController, self).__before__() c.affected_files_cut_off = 60 def __load_data(self): repo_model = RepoModel() c.users_array = repo_model.get_users_js() c.user_groups_array = repo_model.get_user_groups_js() def _index(self, revision, method): c.anchor_url = anchor_url c.ignorews_url = _ignorews_url c.context_url = _context_url c.fulldiff = fulldiff = request.GET.get('fulldiff') #get ranges of revisions if preset rev_range = revision.split('...')[:2] enable_comments = True c.cs_repo = c.db_repo try: if len(rev_range) == 2: enable_comments = False rev_start = rev_range[0] rev_end = rev_range[1] rev_ranges = c.db_repo_scm_instance.get_changesets( start=rev_start, end=rev_end) else: rev_ranges = [c.db_repo_scm_instance.get_changeset(revision)] c.cs_ranges = list(rev_ranges) if not c.cs_ranges: raise RepositoryError('Changeset range returned empty result') except (ChangesetDoesNotExistError, ), e: log.error(traceback.format_exc()) msg = _('Such revision does not exist for this repository') h.flash(msg, category='error') raise HTTPNotFound() c.changes = OrderedDict() c.lines_added = 0 # count of lines added c.lines_deleted = 0 # count of lines removes c.changeset_statuses = ChangesetStatus.STATUSES comments = dict() c.statuses = [] c.inline_comments = [] c.inline_cnt = 0 # Iterate over ranges (default changeset view is always one changeset) for changeset in c.cs_ranges: inlines = [] if method == 'show': c.statuses.extend([ ChangesetStatusModel().get_status(c.db_repo.repo_id, changeset.raw_id) ]) # Changeset comments comments.update( (com.comment_id, com) for com in ChangesetCommentsModel().get_comments( c.db_repo.repo_id, revision=changeset.raw_id)) # Status change comments - mostly from pull requests comments.update(( st.changeset_comment_id, st.comment ) for st in ChangesetStatusModel().get_statuses( c.db_repo.repo_id, changeset.raw_id, with_revisions=True)) inlines = ChangesetCommentsModel()\ .get_inline_comments(c.db_repo.repo_id, revision=changeset.raw_id) c.inline_comments.extend(inlines) c.changes[changeset.raw_id] = [] cs2 = changeset.raw_id cs1 = changeset.parents[ 0].raw_id if changeset.parents else EmptyChangeset().raw_id context_lcl = get_line_ctx('', request.GET) ign_whitespace_lcl = ign_whitespace_lcl = get_ignore_ws( '', request.GET) _diff = c.db_repo_scm_instance.get_diff( cs1, cs2, ignore_whitespace=ign_whitespace_lcl, context=context_lcl) diff_limit = self.cut_off_limit if not fulldiff else None diff_processor = diffs.DiffProcessor( _diff, vcs=c.db_repo_scm_instance.alias, format='gitdiff', diff_limit=diff_limit) cs_changes = OrderedDict() if method == 'show': _parsed = diff_processor.prepare() c.limited_diff = False if isinstance(_parsed, LimitedDiffContainer): c.limited_diff = True for f in _parsed: st = f['stats'] c.lines_added += st['added'] c.lines_deleted += st['deleted'] fid = h.FID(changeset.raw_id, f['filename']) diff = diff_processor.as_html( enable_comments=enable_comments, parsed_lines=[f]) cs_changes[fid] = [ cs1, cs2, f['operation'], f['filename'], diff, st ] else: # downloads/raw we only need RAW diff nothing else diff = diff_processor.as_raw() cs_changes[''] = [None, None, None, None, diff, None] c.changes[changeset.raw_id] = cs_changes #sort comments in creation order c.comments = [com for com_id, com in sorted(comments.items())] # count inline comments for __, lines in c.inline_comments: for comments in lines.values(): c.inline_cnt += len(comments) if len(c.cs_ranges) == 1: c.changeset = c.cs_ranges[0] c.parent_tmpl = ''.join( ['# Parent %s\n' % x.raw_id for x in c.changeset.parents]) if method == 'download': response.content_type = 'text/plain' response.content_disposition = 'attachment; filename=%s.diff' \ % revision[:12] return diff elif method == 'patch': response.content_type = 'text/plain' c.diff = safe_unicode(diff) return render('changeset/patch_changeset.html') elif method == 'raw': response.content_type = 'text/plain' return diff elif method == 'show': self.__load_data() if len(c.cs_ranges) == 1: return render('changeset/changeset.html') else: c.cs_ranges_org = None c.cs_comments = {} revs = [ctx.revision for ctx in reversed(c.cs_ranges)] c.jsdata = json.dumps(graph_data(c.db_repo_scm_instance, revs)) return render('changeset/changeset_range.html')
def get_commits_stats(repo_name, ts_min_y, ts_max_y, recurse_limit=100): log = get_logger(get_commits_stats) DBS = get_session() lockkey = __get_lockkey('get_commits_stats', repo_name, ts_min_y, ts_max_y) lockkey_path = config['app_conf']['cache_dir'] log.info('running task with lockkey %s' % lockkey) try: lock = l = DaemonLock(file_=jn(lockkey_path, lockkey)) # for js data compatibility cleans the key for person from ' akc = lambda k: person(k).replace('"', "") co_day_auth_aggr = {} commits_by_day_aggregate = {} repo = Repository.get_by_repo_name(repo_name) if repo is None: return True repo = repo.scm_instance repo_size = repo.count() # return if repo have no revisions if repo_size < 1: lock.release() return True skip_date_limit = True parse_limit = int(config['app_conf'].get('commit_parse_limit')) last_rev = None last_cs = None timegetter = itemgetter('time') dbrepo = DBS.query(Repository)\ .filter(Repository.repo_name == repo_name).scalar() cur_stats = DBS.query(Statistics)\ .filter(Statistics.repository == dbrepo).scalar() if cur_stats is not None: last_rev = cur_stats.stat_on_revision if last_rev == repo.get_changeset().revision and repo_size > 1: # pass silently without any work if we're not on first revision or # current state of parsing revision(from db marker) is the # last revision lock.release() return True if cur_stats: commits_by_day_aggregate = OrderedDict( json.loads(cur_stats.commit_activity_combined)) co_day_auth_aggr = json.loads(cur_stats.commit_activity) log.debug('starting parsing %s' % parse_limit) lmktime = mktime last_rev = last_rev + 1 if last_rev >= 0 else 0 log.debug('Getting revisions from %s to %s' % (last_rev, last_rev + parse_limit)) for cs in repo[last_rev:last_rev + parse_limit]: log.debug('parsing %s' % cs) last_cs = cs # remember last parsed changeset k = lmktime([ cs.date.timetuple()[0], cs.date.timetuple()[1], cs.date.timetuple()[2], 0, 0, 0, 0, 0, 0 ]) if akc(cs.author) in co_day_auth_aggr: try: l = [ timegetter(x) for x in co_day_auth_aggr[akc(cs.author)]['data'] ] time_pos = l.index(k) except ValueError: time_pos = None if time_pos >= 0 and time_pos is not None: datadict = \ co_day_auth_aggr[akc(cs.author)]['data'][time_pos] datadict["commits"] += 1 datadict["added"] += len(cs.added) datadict["changed"] += len(cs.changed) datadict["removed"] += len(cs.removed) else: if k >= ts_min_y and k <= ts_max_y or skip_date_limit: datadict = { "time": k, "commits": 1, "added": len(cs.added), "changed": len(cs.changed), "removed": len(cs.removed), } co_day_auth_aggr[akc(cs.author)]['data']\ .append(datadict) else: if k >= ts_min_y and k <= ts_max_y or skip_date_limit: co_day_auth_aggr[akc(cs.author)] = { "label": akc(cs.author), "data": [{ "time": k, "commits": 1, "added": len(cs.added), "changed": len(cs.changed), "removed": len(cs.removed), }], "schema": ["commits"], } #gather all data by day if k in commits_by_day_aggregate: commits_by_day_aggregate[k] += 1 else: commits_by_day_aggregate[k] = 1 overview_data = sorted(commits_by_day_aggregate.items(), key=itemgetter(0)) if not co_day_auth_aggr: co_day_auth_aggr[akc(repo.contact)] = { "label": akc(repo.contact), "data": [0, 1], "schema": ["commits"], } stats = cur_stats if cur_stats else Statistics() stats.commit_activity = json.dumps(co_day_auth_aggr) stats.commit_activity_combined = json.dumps(overview_data) log.debug('last revision %s' % last_rev) leftovers = len(repo.revisions[last_rev:]) log.debug('revisions to parse %s' % leftovers) if last_rev == 0 or leftovers < parse_limit: log.debug('getting code trending stats') stats.languages = json.dumps(__get_codes_stats(repo_name)) try: stats.repository = dbrepo stats.stat_on_revision = last_cs.revision if last_cs else 0 DBS.add(stats) DBS.commit() except: log.error(traceback.format_exc()) DBS.rollback() lock.release() return False # final release lock.release() # execute another task if celery is enabled if len(repo.revisions) > 1 and CELERY_ON and recurse_limit > 0: recurse_limit -= 1 run_task(get_commits_stats, repo_name, ts_min_y, ts_max_y, recurse_limit) if recurse_limit <= 0: log.debug('Breaking recursive mode due to reach of recurse limit') return True except LockHeld: log.info('LockHeld') return 'Task with key %s already running' % lockkey
def get_commits_stats(repo_name, ts_min_y, ts_max_y, recurse_limit=100): log = get_logger(get_commits_stats) DBS = get_session() lockkey = __get_lockkey('get_commits_stats', repo_name, ts_min_y, ts_max_y) lockkey_path = config['app_conf']['cache_dir'] log.info('running task with lockkey %s', lockkey) try: lock = l = DaemonLock(file_=jn(lockkey_path, lockkey)) # for js data compatibility cleans the key for person from ' akc = lambda k: person(k).replace('"', "") co_day_auth_aggr = {} commits_by_day_aggregate = {} repo = Repository.get_by_repo_name(repo_name) if repo is None: return True repo = repo.scm_instance repo_size = repo.count() # return if repo have no revisions if repo_size < 1: lock.release() return True skip_date_limit = True parse_limit = int(config['app_conf'].get('commit_parse_limit')) last_rev = None last_cs = None timegetter = itemgetter('time') dbrepo = DBS.query(Repository) \ .filter(Repository.repo_name == repo_name).scalar() cur_stats = DBS.query(Statistics) \ .filter(Statistics.repository == dbrepo).scalar() if cur_stats is not None: last_rev = cur_stats.stat_on_revision if last_rev == repo.get_changeset().revision and repo_size > 1: # pass silently without any work if we're not on first revision or # current state of parsing revision(from db marker) is the # last revision lock.release() return True if cur_stats: commits_by_day_aggregate = OrderedDict(json.loads( cur_stats.commit_activity_combined)) co_day_auth_aggr = json.loads(cur_stats.commit_activity) log.debug('starting parsing %s', parse_limit) lmktime = mktime last_rev = last_rev + 1 if last_rev >= 0 else 0 log.debug('Getting revisions from %s to %s', last_rev, last_rev + parse_limit ) for cs in repo[last_rev:last_rev + parse_limit]: log.debug('parsing %s', cs) last_cs = cs # remember last parsed changeset k = lmktime([cs.date.timetuple()[0], cs.date.timetuple()[1], cs.date.timetuple()[2], 0, 0, 0, 0, 0, 0]) if akc(cs.author) in co_day_auth_aggr: try: l = [timegetter(x) for x in co_day_auth_aggr[akc(cs.author)]['data']] time_pos = l.index(k) except ValueError: time_pos = None if time_pos >= 0 and time_pos is not None: datadict = \ co_day_auth_aggr[akc(cs.author)]['data'][time_pos] datadict["commits"] += 1 datadict["added"] += len(cs.added) datadict["changed"] += len(cs.changed) datadict["removed"] += len(cs.removed) else: if k >= ts_min_y and k <= ts_max_y or skip_date_limit: datadict = {"time": k, "commits": 1, "added": len(cs.added), "changed": len(cs.changed), "removed": len(cs.removed), } co_day_auth_aggr[akc(cs.author)]['data'] \ .append(datadict) else: if k >= ts_min_y and k <= ts_max_y or skip_date_limit: co_day_auth_aggr[akc(cs.author)] = { "label": akc(cs.author), "data": [{"time":k, "commits":1, "added":len(cs.added), "changed":len(cs.changed), "removed":len(cs.removed), }], "schema": ["commits"], } #gather all data by day if k in commits_by_day_aggregate: commits_by_day_aggregate[k] += 1 else: commits_by_day_aggregate[k] = 1 overview_data = sorted(commits_by_day_aggregate.items(), key=itemgetter(0)) if not co_day_auth_aggr: co_day_auth_aggr[akc(repo.contact)] = { "label": akc(repo.contact), "data": [0, 1], "schema": ["commits"], } stats = cur_stats if cur_stats else Statistics() stats.commit_activity = json.dumps(co_day_auth_aggr) stats.commit_activity_combined = json.dumps(overview_data) log.debug('last revision %s', last_rev) leftovers = len(repo.revisions[last_rev:]) log.debug('revisions to parse %s', leftovers) if last_rev == 0 or leftovers < parse_limit: log.debug('getting code trending stats') stats.languages = json.dumps(__get_codes_stats(repo_name)) try: stats.repository = dbrepo stats.stat_on_revision = last_cs.revision if last_cs else 0 DBS.add(stats) DBS.commit() except: log.error(traceback.format_exc()) DBS.rollback() lock.release() return False # final release lock.release() # execute another task if celery is enabled if len(repo.revisions) > 1 and CELERY_ON and recurse_limit > 0: recurse_limit -= 1 run_task(get_commits_stats, repo_name, ts_min_y, ts_max_y, recurse_limit) if recurse_limit <= 0: log.debug('Breaking recursive mode due to reach of recurse limit') return True except LockHeld: log.info('LockHeld') return 'Task with key %s already running' % lockkey
def diff(self, repo_name, f_path): ignore_whitespace = request.GET.get('ignorews') == '1' line_context = safe_int(request.GET.get('context'), 3) diff2 = request.GET.get('diff2', '') diff1 = request.GET.get('diff1', '') or diff2 c.action = request.GET.get('diff') c.no_changes = diff1 == diff2 c.f_path = f_path c.big_diff = False c.anchor_url = anchor_url c.ignorews_url = _ignorews_url c.context_url = _context_url c.changes = OrderedDict() c.changes[diff2] = [] #special case if we want a show rev only, it's impl here #to reduce JS and callbacks if request.GET.get('show_rev'): if str2bool(request.GET.get('annotate', 'False')): _url = url('files_annotate_home', repo_name=c.repo_name, revision=diff1, f_path=c.f_path) else: _url = url('files_home', repo_name=c.repo_name, revision=diff1, f_path=c.f_path) raise HTTPFound(location=_url) try: if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) try: node1 = c.changeset_1.get_node(f_path) if node1.is_dir(): raise NodeError('%s path is a %s not a file' % (node1, type(node1))) except NodeDoesNotExistError: c.changeset_1 = EmptyChangeset(cs=diff1, revision=c.changeset_1.revision, repo=c.db_repo_scm_instance) node1 = FileNode(f_path, '', changeset=c.changeset_1) else: c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) node1 = FileNode(f_path, '', changeset=c.changeset_1) if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) try: node2 = c.changeset_2.get_node(f_path) if node2.is_dir(): raise NodeError('%s path is a %s not a file' % (node2, type(node2))) except NodeDoesNotExistError: c.changeset_2 = EmptyChangeset(cs=diff2, revision=c.changeset_2.revision, repo=c.db_repo_scm_instance) node2 = FileNode(f_path, '', changeset=c.changeset_2) else: c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) node2 = FileNode(f_path, '', changeset=c.changeset_2) except (RepositoryError, NodeError): log.error(traceback.format_exc()) raise HTTPFound(location=url('files_home', repo_name=c.repo_name, f_path=f_path)) if c.action == 'download': _diff = diffs.get_gitdiff(node1, node2, ignore_whitespace=ignore_whitespace, context=line_context) diff = diffs.DiffProcessor(_diff, format='gitdiff') diff_name = '%s_vs_%s.diff' % (diff1, diff2) response.content_type = 'text/plain' response.content_disposition = ( 'attachment; filename=%s' % diff_name ) return diff.as_raw() elif c.action == 'raw': _diff = diffs.get_gitdiff(node1, node2, ignore_whitespace=ignore_whitespace, context=line_context) diff = diffs.DiffProcessor(_diff, format='gitdiff') response.content_type = 'text/plain' return diff.as_raw() else: fid = h.FID(diff2, node2.path) line_context_lcl = get_line_ctx(fid, request.GET) ign_whitespace_lcl = get_ignore_ws(fid, request.GET) lim = request.GET.get('fulldiff') or self.cut_off_limit c.a_rev, c.cs_rev, a_path, diff, st, op = diffs.wrapped_diff(filenode_old=node1, filenode_new=node2, cut_off_limit=lim, ignore_whitespace=ign_whitespace_lcl, line_context=line_context_lcl, enable_comments=False) c.file_diff_data = [(fid, fid, op, a_path, node2.path, diff, st)] return render('files/file_diff.html')