def index(self): def _branchtags(localrepo): bt_closed = {} for bn, heads in localrepo.branchmap().iteritems(): tip = heads[-1] if 'close' in localrepo.changelog.read(tip)[5]: bt_closed[bn] = tip return bt_closed cs_g = c.rhodecode_repo.get_changeset c.repo_closed_branches = {} if c.rhodecode_db_repo.repo_type == 'hg': bt_closed = _branchtags(c.rhodecode_repo._repo) _closed_branches = [( safe_unicode(n), cs_g(binascii.hexlify(h)), ) for n, h in bt_closed.items()] c.repo_closed_branches = OrderedDict( sorted(_closed_branches, key=lambda ctx: ctx[0], reverse=False)) _branches = [(safe_unicode(n), cs_g(h)) for n, h in c.rhodecode_repo.branches.items()] c.repo_branches = OrderedDict( sorted(_branches, key=lambda ctx: ctx[0], reverse=False)) return render('branches/branches.html')
def _calculate_file_changes(self, old_diff_data, new_diff_data): old_files = OrderedDict() for diff_data in old_diff_data.parsed_diff: old_files[diff_data['filename']] = md5_safe(diff_data['raw_diff']) added_files = [] modified_files = [] removed_files = [] for diff_data in new_diff_data.parsed_diff: new_filename = diff_data['filename'] new_hash = md5_safe(diff_data['raw_diff']) old_hash = old_files.get(new_filename) if not old_hash: # file is not present in old diff, means it's added added_files.append(new_filename) else: if new_hash != old_hash: modified_files.append(new_filename) # now remove a file from old, since we have seen it already del old_files[new_filename] # removed files is when there are present in old, but not in NEW, # since we remove old files that are present in new diff, left-overs # if any should be the removed files removed_files.extend(old_files.keys()) return FileChangeTuple(added_files, modified_files, removed_files)
def example_refs(self): section_1_refs = OrderedDict((('a', 'a_id'), ('b', 'b_id'))) example_refs = [ ('section_1', section_1_refs, 't1'), ('section_2', {'c': 'c_id'}, 't2'), ] return example_refs
def index(self): c.repo_tags = OrderedDict() tags = [(name, c.rhodecode_repo.get_changeset(hash_)) for \ name, hash_ in c.rhodecode_repo.tags.items()] ordered_tags = sorted(tags, key=lambda x: x[1].date, reverse=True) for name, cs_tag in ordered_tags: c.repo_tags[name] = cs_tag return render('tags/tags.html')
def _get_bookmarks(self): if self.is_empty(): return {} def get_name(ctx): return ctx[0] _bookmarks = [(safe_unicode(n), hexlify(h)) for n, h in self._remote.bookmarks().items()] return OrderedDict(sorted(_bookmarks, key=get_name))
def _get_refs_entry(self, value, reverse): if self.is_empty(): return {} def get_name(ctx): return ctx[0] _branches = [ (safe_unicode(x[0]), x[1][0]) for x in self._parsed_refs.iteritems() if x[1][1] == value] return OrderedDict(sorted(_branches, key=get_name, reverse=reverse))
def _get_tags(self): if self.is_empty(): return {} def get_name(ctx): return ctx[0] _tags = [( safe_unicode(n), hexlify(h), ) for n, h in self._remote.tags().items()] return OrderedDict(sorted(_tags, key=get_name, reverse=True))
def index(self): if c.rhodecode_repo.alias != 'hg': raise HTTPNotFound() c.repo_bookmarks = OrderedDict() bookmarks = [(name, c.rhodecode_repo.get_changeset(hash_)) for \ name, hash_ in c.rhodecode_repo._repo._bookmarks.items()] ordered_tags = sorted(bookmarks, key=lambda x: x[1].date, reverse=True) for name, cs_book in ordered_tags: c.repo_bookmarks[name] = cs_book return render('bookmarks/bookmarks.html')
def settings_supervisor(self): c.rhodecode_ini = rhodecode.CONFIG c.active = 'supervisor' c.supervisor_procs = OrderedDict([ (SUPERVISOR_MASTER, {}), ]) c.log_size = 10240 supervisor = SupervisorModel() _connection = supervisor.get_connection( c.rhodecode_ini.get('supervisor.uri')) c.connection_error = None try: _connection.supervisor.getAllProcessInfo() except Exception as e: c.connection_error = str(e) log.exception("Exception reading supervisor data") return render('admin/settings/settings.html') groupid = c.rhodecode_ini.get('supervisor.group_id') # feed our group processes to the main for proc in supervisor.get_group_processes(_connection, groupid): c.supervisor_procs[proc['name']] = {} for k in c.supervisor_procs.keys(): try: # master process info if k == SUPERVISOR_MASTER: _data = supervisor.get_master_state(_connection) _data['name'] = 'supervisor master' _data['description'] = 'pid %s, id: %s, ver: %s' % ( _data['pid'], _data['id'], _data['ver']) c.supervisor_procs[k] = _data else: procid = groupid + ":" + k c.supervisor_procs[k] = supervisor.get_process_info( _connection, procid) except Exception as e: log.exception("Exception reading supervisor data") c.supervisor_procs[k] = {'_rhodecode_error': str(e)} return render('admin/settings/settings.html')
def _get_branches(self, active=True, closed=False): """ Gets branches for this repository Returns only not closed active branches by default :param active: return also active branches :param closed: return also closed branches """ if self.is_empty(): return {} def get_name(ctx): return ctx[0] _branches = [( safe_unicode(n), hexlify(h), ) for n, h in self._remote.branches(active, closed).items()] return OrderedDict(sorted(_branches, key=get_name, reverse=False))
def _tags_or_branches(self, config_section): found_items = {} if self.is_empty(): return {} for pattern in self._patterns_from_section(config_section): pattern = vcspath.sanitize(pattern) tip = self.get_commit() try: if pattern.endswith('*'): basedir = tip.get_node(vcspath.dirname(pattern)) directories = basedir.dirs else: directories = (tip.get_node(pattern), ) except NodeDoesNotExistError: continue found_items.update((safe_unicode(n.path), self.commit_ids[-1]) for n in directories) def get_name(item): return item[0] return OrderedDict(sorted(found_items.items(), key=get_name))
def diff(self, repo_name, f_path): ignore_whitespace = request.GET.get('ignorews') == '1' line_context = request.GET.get('context', 3) diff1 = request.GET.get('diff1', '') diff2 = request.GET.get('diff2', '') c.action = request.GET.get('diff') c.no_changes = diff1 == diff2 c.f_path = f_path c.big_diff = False c.anchor_url = anchor_url c.ignorews_url = _ignorews_url c.context_url = _context_url c.changes = OrderedDict() c.changes[diff2] = [] #special case if we want a show rev only, it's impl here #to reduce JS and callbacks if request.GET.get('show_rev'): if str2bool(request.GET.get('annotate', 'False')): _url = url('files_annotate_home', repo_name=c.repo_name, revision=diff1, f_path=c.f_path) else: _url = url('files_home', repo_name=c.repo_name, revision=diff1, f_path=c.f_path) return redirect(_url) try: if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: c.changeset_1 = c.rhodecode_repo.get_changeset(diff1) try: node1 = c.changeset_1.get_node(f_path) if node1.is_dir(): raise NodeError('%s path is a %s not a file' % (node1, type(node1))) except NodeDoesNotExistError: c.changeset_1 = EmptyChangeset(cs=diff1, revision=c.changeset_1.revision, repo=c.rhodecode_repo) node1 = FileNode(f_path, '', changeset=c.changeset_1) else: c.changeset_1 = EmptyChangeset(repo=c.rhodecode_repo) node1 = FileNode(f_path, '', changeset=c.changeset_1) if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: c.changeset_2 = c.rhodecode_repo.get_changeset(diff2) try: node2 = c.changeset_2.get_node(f_path) if node2.is_dir(): raise NodeError('%s path is a %s not a file' % (node2, type(node2))) except NodeDoesNotExistError: c.changeset_2 = EmptyChangeset(cs=diff2, revision=c.changeset_2.revision, repo=c.rhodecode_repo) node2 = FileNode(f_path, '', changeset=c.changeset_2) else: c.changeset_2 = EmptyChangeset(repo=c.rhodecode_repo) node2 = FileNode(f_path, '', changeset=c.changeset_2) except (RepositoryError, NodeError): log.error(traceback.format_exc()) return redirect(url('files_home', repo_name=c.repo_name, f_path=f_path)) if c.action == 'download': _diff = diffs.get_gitdiff(node1, node2, ignore_whitespace=ignore_whitespace, context=line_context) diff = diffs.DiffProcessor(_diff, format='gitdiff') diff_name = '%s_vs_%s.diff' % (diff1, diff2) response.content_type = 'text/plain' response.content_disposition = ( 'attachment; filename=%s' % diff_name ) return diff.as_raw() elif c.action == 'raw': _diff = diffs.get_gitdiff(node1, node2, ignore_whitespace=ignore_whitespace, context=line_context) diff = diffs.DiffProcessor(_diff, format='gitdiff') response.content_type = 'text/plain' return diff.as_raw() else: fid = h.FID(diff2, node2.path) line_context_lcl = get_line_ctx(fid, request.GET) ign_whitespace_lcl = get_ignore_ws(fid, request.GET) lim = request.GET.get('fulldiff') or self.cut_off_limit _, cs1, cs2, diff, st = diffs.wrapped_diff(filenode_old=node1, filenode_new=node2, cut_off_limit=lim, ignore_whitespace=ign_whitespace_lcl, line_context=line_context_lcl, enable_comments=False) op = '' filename = node1.path cs_changes = { 'fid': [cs1, cs2, op, filename, diff, st] } c.changes = cs_changes return render('files/file_diff.html')
class ChangesetController(BaseRepoController): @LoginRequired() @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', 'repository.admin') def __before__(self): super(ChangesetController, self).__before__() c.affected_files_cut_off = 60 def index(self, revision): c.anchor_url = anchor_url c.ignorews_url = _ignorews_url c.context_url = _context_url limit_off = request.GET.get('fulldiff') #get ranges of revisions if preset rev_range = revision.split('...')[:2] enable_comments = True try: if len(rev_range) == 2: enable_comments = False rev_start = rev_range[0] rev_end = rev_range[1] rev_ranges = c.rhodecode_repo.get_changesets(start=rev_start, end=rev_end) else: rev_ranges = [c.rhodecode_repo.get_changeset(revision)] c.cs_ranges = list(rev_ranges) if not c.cs_ranges: raise RepositoryError('Changeset range returned empty result') except (RepositoryError, ChangesetDoesNotExistError, Exception), e: log.error(traceback.format_exc()) h.flash(str(e), category='warning') return redirect(url('home')) c.changes = OrderedDict() c.lines_added = 0 # count of lines added c.lines_deleted = 0 # count of lines removes cumulative_diff = 0 c.cut_off = False # defines if cut off limit is reached c.comments = [] c.inline_comments = [] c.inline_cnt = 0 # Iterate over ranges (default changeset view is always one changeset) for changeset in c.cs_ranges: c.comments.extend(ChangesetCommentsModel()\ .get_comments(c.rhodecode_db_repo.repo_id, changeset.raw_id)) inlines = ChangesetCommentsModel()\ .get_inline_comments(c.rhodecode_db_repo.repo_id, changeset.raw_id) c.inline_comments.extend(inlines) c.changes[changeset.raw_id] = [] try: changeset_parent = changeset.parents[0] except IndexError: changeset_parent = None #================================================================== # ADDED FILES #================================================================== for node in changeset.added: fid = h.FID(revision, node.path) line_context_lcl = get_line_ctx(fid, request.GET) ign_whitespace_lcl = get_ignore_ws(fid, request.GET) lim = self.cut_off_limit if cumulative_diff > self.cut_off_limit: lim = -1 if limit_off is None else None size, cs1, cs2, diff, st = wrapped_diff( filenode_old=None, filenode_new=node, cut_off_limit=lim, ignore_whitespace=ign_whitespace_lcl, line_context=line_context_lcl, enable_comments=enable_comments) cumulative_diff += size c.lines_added += st[0] c.lines_deleted += st[1] c.changes[changeset.raw_id].append( ('added', node, diff, cs1, cs2, st)) #================================================================== # CHANGED FILES #================================================================== for node in changeset.changed: try: filenode_old = changeset_parent.get_node(node.path) except ChangesetError: log.warning('Unable to fetch parent node for diff') filenode_old = FileNode(node.path, '', EmptyChangeset()) fid = h.FID(revision, node.path) line_context_lcl = get_line_ctx(fid, request.GET) ign_whitespace_lcl = get_ignore_ws(fid, request.GET) lim = self.cut_off_limit if cumulative_diff > self.cut_off_limit: lim = -1 if limit_off is None else None size, cs1, cs2, diff, st = wrapped_diff( filenode_old=filenode_old, filenode_new=node, cut_off_limit=lim, ignore_whitespace=ign_whitespace_lcl, line_context=line_context_lcl, enable_comments=enable_comments) cumulative_diff += size c.lines_added += st[0] c.lines_deleted += st[1] c.changes[changeset.raw_id].append( ('changed', node, diff, cs1, cs2, st)) #================================================================== # REMOVED FILES #================================================================== for node in changeset.removed: c.changes[changeset.raw_id].append( ('removed', node, None, None, None, (0, 0))) # count inline comments for path, lines in c.inline_comments: for comments in lines.values(): c.inline_cnt += len(comments) if len(c.cs_ranges) == 1: c.changeset = c.cs_ranges[0] c.changes = c.changes[c.changeset.raw_id] return render('changeset/changeset.html') else: return render('changeset/changeset_range.html')
def get_commits_stats(repo_name, ts_min_y, ts_max_y, recurse_limit=100): log = get_logger(get_commits_stats) DBS = get_session() lockkey = __get_lockkey('get_commits_stats', repo_name, ts_min_y, ts_max_y) lockkey_path = config['app_conf']['cache_dir'] log.info('running task with lockkey %s' % lockkey) try: lock = l = DaemonLock(file_=jn(lockkey_path, lockkey)) # for js data compatibility cleans the key for person from ' akc = lambda k: person(k).replace('"', "") co_day_auth_aggr = {} commits_by_day_aggregate = {} repo = Repository.get_by_repo_name(repo_name) if repo is None: return True repo = repo.scm_instance repo_size = repo.count() # return if repo have no revisions if repo_size < 1: lock.release() return True skip_date_limit = True parse_limit = int(config['app_conf'].get('commit_parse_limit')) last_rev = None last_cs = None timegetter = itemgetter('time') dbrepo = DBS.query(Repository)\ .filter(Repository.repo_name == repo_name).scalar() cur_stats = DBS.query(Statistics)\ .filter(Statistics.repository == dbrepo).scalar() if cur_stats is not None: last_rev = cur_stats.stat_on_revision if last_rev == repo.get_changeset().revision and repo_size > 1: # pass silently without any work if we're not on first revision or # current state of parsing revision(from db marker) is the # last revision lock.release() return True if cur_stats: commits_by_day_aggregate = OrderedDict( json.loads(cur_stats.commit_activity_combined)) co_day_auth_aggr = json.loads(cur_stats.commit_activity) log.debug('starting parsing %s' % parse_limit) lmktime = mktime last_rev = last_rev + 1 if last_rev >= 0 else 0 log.debug('Getting revisions from %s to %s' % (last_rev, last_rev + parse_limit)) for cs in repo[last_rev:last_rev + parse_limit]: log.debug('parsing %s' % cs) last_cs = cs # remember last parsed changeset k = lmktime([ cs.date.timetuple()[0], cs.date.timetuple()[1], cs.date.timetuple()[2], 0, 0, 0, 0, 0, 0 ]) if akc(cs.author) in co_day_auth_aggr: try: l = [ timegetter(x) for x in co_day_auth_aggr[akc(cs.author)]['data'] ] time_pos = l.index(k) except ValueError: time_pos = None if time_pos >= 0 and time_pos is not None: datadict = \ co_day_auth_aggr[akc(cs.author)]['data'][time_pos] datadict["commits"] += 1 datadict["added"] += len(cs.added) datadict["changed"] += len(cs.changed) datadict["removed"] += len(cs.removed) else: if k >= ts_min_y and k <= ts_max_y or skip_date_limit: datadict = { "time": k, "commits": 1, "added": len(cs.added), "changed": len(cs.changed), "removed": len(cs.removed), } co_day_auth_aggr[akc(cs.author)]['data']\ .append(datadict) else: if k >= ts_min_y and k <= ts_max_y or skip_date_limit: co_day_auth_aggr[akc(cs.author)] = { "label": akc(cs.author), "data": [{ "time": k, "commits": 1, "added": len(cs.added), "changed": len(cs.changed), "removed": len(cs.removed), }], "schema": ["commits"], } #gather all data by day if k in commits_by_day_aggregate: commits_by_day_aggregate[k] += 1 else: commits_by_day_aggregate[k] = 1 overview_data = sorted(commits_by_day_aggregate.items(), key=itemgetter(0)) if not co_day_auth_aggr: co_day_auth_aggr[akc(repo.contact)] = { "label": akc(repo.contact), "data": [0, 1], "schema": ["commits"], } stats = cur_stats if cur_stats else Statistics() stats.commit_activity = json.dumps(co_day_auth_aggr) stats.commit_activity_combined = json.dumps(overview_data) log.debug('last revison %s' % last_rev) leftovers = len(repo.revisions[last_rev:]) log.debug('revisions to parse %s' % leftovers) if last_rev == 0 or leftovers < parse_limit: log.debug('getting code trending stats') stats.languages = json.dumps(__get_codes_stats(repo_name)) try: stats.repository = dbrepo stats.stat_on_revision = last_cs.revision if last_cs else 0 DBS.add(stats) DBS.commit() except: log.error(traceback.format_exc()) DBS.rollback() lock.release() return False # final release lock.release() # execute another task if celery is enabled if len(repo.revisions) > 1 and CELERY_ON and recurse_limit > 0: recurse_limit -= 1 run_task(get_commits_stats, repo_name, ts_min_y, ts_max_y, recurse_limit) if recurse_limit <= 0: log.debug('Breaking recursive mode due to reach of recurse limit') return True except LockHeld: log.info('LockHeld') return 'Task with key %s already running' % lockkey
class ChangesetController(BaseRepoController): @LoginRequired() @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', 'repository.admin') def __before__(self): super(ChangesetController, self).__before__() c.affected_files_cut_off = 60 repo_model = RepoModel() c.users_array = repo_model.get_users_js() c.users_groups_array = repo_model.get_users_groups_js() def index(self, revision, method='show'): c.anchor_url = anchor_url c.ignorews_url = _ignorews_url c.context_url = _context_url c.fulldiff = fulldiff = request.GET.get('fulldiff') #get ranges of revisions if preset rev_range = revision.split('...')[:2] enable_comments = True try: if len(rev_range) == 2: enable_comments = False rev_start = rev_range[0] rev_end = rev_range[1] rev_ranges = c.rhodecode_repo.get_changesets(start=rev_start, end=rev_end) else: rev_ranges = [c.rhodecode_repo.get_changeset(revision)] c.cs_ranges = list(rev_ranges) if not c.cs_ranges: raise RepositoryError('Changeset range returned empty result') except (RepositoryError, ChangesetDoesNotExistError, Exception), e: log.error(traceback.format_exc()) h.flash(str(e), category='error') raise HTTPNotFound() c.changes = OrderedDict() c.lines_added = 0 # count of lines added c.lines_deleted = 0 # count of lines removes c.changeset_statuses = ChangesetStatus.STATUSES c.comments = [] c.statuses = [] c.inline_comments = [] c.inline_cnt = 0 # Iterate over ranges (default changeset view is always one changeset) for changeset in c.cs_ranges: inlines = [] if method == 'show': c.statuses.extend([ChangesetStatusModel().get_status( c.rhodecode_db_repo.repo_id, changeset.raw_id)]) c.comments.extend(ChangesetCommentsModel()\ .get_comments(c.rhodecode_db_repo.repo_id, revision=changeset.raw_id)) #comments from PR st = ChangesetStatusModel().get_statuses( c.rhodecode_db_repo.repo_id, changeset.raw_id, with_revisions=True) # from associated statuses, check the pull requests, and # show comments from them prs = set([x.pull_request for x in filter(lambda x: x.pull_request != None, st)]) for pr in prs: c.comments.extend(pr.comments) inlines = ChangesetCommentsModel()\ .get_inline_comments(c.rhodecode_db_repo.repo_id, revision=changeset.raw_id) c.inline_comments.extend(inlines) c.changes[changeset.raw_id] = [] cs2 = changeset.raw_id cs1 = changeset.parents[0].raw_id if changeset.parents else EmptyChangeset() context_lcl = get_line_ctx('', request.GET) ign_whitespace_lcl = ign_whitespace_lcl = get_ignore_ws('', request.GET) _diff = c.rhodecode_repo.get_diff(cs1, cs2, ignore_whitespace=ign_whitespace_lcl, context=context_lcl) diff_limit = self.cut_off_limit if not fulldiff else None diff_processor = diffs.DiffProcessor(_diff, vcs=c.rhodecode_repo.alias, format='gitdiff', diff_limit=diff_limit) cs_changes = OrderedDict() if method == 'show': _parsed = diff_processor.prepare() c.limited_diff = False if isinstance(_parsed, LimitedDiffContainer): c.limited_diff = True for f in _parsed: st = f['stats'] if st[0] != 'b': c.lines_added += st[0] c.lines_deleted += st[1] fid = h.FID(changeset.raw_id, f['filename']) diff = diff_processor.as_html(enable_comments=enable_comments, parsed_lines=[f]) cs_changes[fid] = [cs1, cs2, f['operation'], f['filename'], diff, st] else: # downloads/raw we only need RAW diff nothing else diff = diff_processor.as_raw() cs_changes[''] = [None, None, None, None, diff, None] c.changes[changeset.raw_id] = cs_changes #sort comments by how they were generated c.comments = sorted(c.comments, key=lambda x: x.comment_id) # count inline comments for __, lines in c.inline_comments: for comments in lines.values(): c.inline_cnt += len(comments) if len(c.cs_ranges) == 1: c.changeset = c.cs_ranges[0] c.parent_tmpl = ''.join(['# Parent %s\n' % x.raw_id for x in c.changeset.parents]) if method == 'download': response.content_type = 'text/plain' response.content_disposition = 'attachment; filename=%s.diff' \ % revision[:12] return diff elif method == 'patch': response.content_type = 'text/plain' c.diff = safe_unicode(diff) return render('changeset/patch_changeset.html') elif method == 'raw': response.content_type = 'text/plain' return diff elif method == 'show': if len(c.cs_ranges) == 1: return render('changeset/changeset.html') else: return render('changeset/changeset_range.html')
def index(self, repo_name): c.dbrepo = dbrepo = c.rhodecode_db_repo c.following = self.scm_model.is_following_repo( repo_name, self.rhodecode_user.user_id) def url_generator(**kw): return url('shortlog_home', repo_name=repo_name, size=10, **kw) c.repo_changesets = RepoPage(c.rhodecode_repo, page=1, items_per_page=10, url=url_generator) if self.rhodecode_user.username == 'default': # for default(anonymous) user we don't need to pass credentials username = '' password = '' else: username = str(self.rhodecode_user.username) password = '******' parsed_url = urlparse(url.current(qualified=True)) default_clone_uri = '{scheme}://{user}{pass}{netloc}{path}' uri_tmpl = config.get('clone_uri', default_clone_uri) uri_tmpl = uri_tmpl.replace('{', '%(').replace('}', ')s') decoded_path = safe_unicode(urllib.unquote(parsed_url.path)) uri_dict = { 'user': username, 'pass': password, 'scheme': parsed_url.scheme, 'netloc': parsed_url.netloc, 'path': decoded_path } uri = uri_tmpl % uri_dict # generate another clone url by id uri_dict.update({ 'path': decoded_path.replace(repo_name, '_%s' % c.dbrepo.repo_id) }) uri_id = uri_tmpl % uri_dict c.clone_repo_url = uri c.clone_repo_url_id = uri_id c.repo_tags = OrderedDict() for name, hash_ in c.rhodecode_repo.tags.items()[:10]: try: c.repo_tags[name] = c.rhodecode_repo.get_changeset(hash_) except ChangesetError: c.repo_tags[name] = EmptyChangeset(hash_) c.repo_branches = OrderedDict() for name, hash_ in c.rhodecode_repo.branches.items()[:10]: try: c.repo_branches[name] = c.rhodecode_repo.get_changeset(hash_) except ChangesetError: c.repo_branches[name] = EmptyChangeset(hash_) td = date.today() + timedelta(days=1) td_1m = td - timedelta(days=calendar.mdays[td.month]) td_1y = td - timedelta(days=365) ts_min_m = mktime(td_1m.timetuple()) ts_min_y = mktime(td_1y.timetuple()) ts_max_y = mktime(td.timetuple()) if dbrepo.enable_statistics: c.show_stats = True c.no_data_msg = _('No data loaded yet') run_task(get_commits_stats, c.dbrepo.repo_name, ts_min_y, ts_max_y) else: c.show_stats = False c.no_data_msg = _('Statistics are disabled for this repository') c.ts_min = ts_min_m c.ts_max = ts_max_y stats = self.sa.query(Statistics)\ .filter(Statistics.repository == dbrepo)\ .scalar() c.stats_percentage = 0 if stats and stats.languages: c.no_data = False is dbrepo.enable_statistics lang_stats_d = json.loads(stats.languages) c.commit_data = stats.commit_activity c.overview_data = stats.commit_activity_combined lang_stats = ((x, { "count": y, "desc": LANGUAGES_EXTENSIONS_MAP.get(x) }) for x, y in lang_stats_d.items()) c.trending_languages = json.dumps( sorted(lang_stats, reverse=True, key=lambda k: k[1])[:10]) last_rev = stats.stat_on_revision + 1 c.repo_last_rev = c.rhodecode_repo.count()\ if c.rhodecode_repo.revisions else 0 if last_rev == 0 or c.repo_last_rev == 0: pass else: c.stats_percentage = '%.2f' % ((float( (last_rev)) / c.repo_last_rev) * 100) else: c.commit_data = json.dumps({}) c.overview_data = json.dumps([[ts_min_y, 0], [ts_max_y, 10]]) c.trending_languages = json.dumps({}) c.no_data = True c.enable_downloads = dbrepo.enable_downloads if c.enable_downloads: c.download_options = self._get_download_links(c.rhodecode_repo) c.readme_data, c.readme_file = self.__get_readme_data( c.rhodecode_db_repo.repo_name, c.rhodecode_repo) return render('summary/summary.html')
def diff(self, repo_name, f_path): ignore_whitespace = request.GET.get('ignorews') == '1' line_context = request.GET.get('context', 3) diff1 = request.GET.get('diff1', '') diff2 = request.GET.get('diff2', '') c.action = request.GET.get('diff') c.no_changes = diff1 == diff2 c.f_path = f_path c.big_diff = False c.anchor_url = anchor_url c.ignorews_url = _ignorews_url c.context_url = _context_url c.changes = OrderedDict() c.changes[diff2] = [] try: if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: c.changeset_1 = c.rhodecode_repo.get_changeset(diff1) node1 = c.changeset_1.get_node(f_path) else: c.changeset_1 = EmptyChangeset(repo=c.rhodecode_repo) node1 = FileNode('.', '', changeset=c.changeset_1) if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: c.changeset_2 = c.rhodecode_repo.get_changeset(diff2) node2 = c.changeset_2.get_node(f_path) else: c.changeset_2 = EmptyChangeset(repo=c.rhodecode_repo) node2 = FileNode('.', '', changeset=c.changeset_2) except RepositoryError: return redirect( url('files_home', repo_name=c.repo_name, f_path=f_path)) if c.action == 'download': _diff = diffs.get_gitdiff(node1, node2, ignore_whitespace=ignore_whitespace, context=line_context) diff = diffs.DiffProcessor(_diff, format='gitdiff') diff_name = '%s_vs_%s.diff' % (diff1, diff2) response.content_type = 'text/plain' response.content_disposition = ('attachment; filename=%s' % diff_name) return diff.raw_diff() elif c.action == 'raw': _diff = diffs.get_gitdiff(node1, node2, ignore_whitespace=ignore_whitespace, context=line_context) diff = diffs.DiffProcessor(_diff, format='gitdiff') response.content_type = 'text/plain' return diff.raw_diff() else: fid = h.FID(diff2, node2.path) line_context_lcl = get_line_ctx(fid, request.GET) ign_whitespace_lcl = get_ignore_ws(fid, request.GET) lim = request.GET.get('fulldiff') or self.cut_off_limit _, cs1, cs2, diff, st = diffs.wrapped_diff( filenode_old=node1, filenode_new=node2, cut_off_limit=lim, ignore_whitespace=ign_whitespace_lcl, line_context=line_context_lcl, enable_comments=False) c.changes = [( '', node2, diff, cs1, cs2, st, )] return render('files/file_diff.html')
def get_commits_stats(repo_name, ts_min_y, ts_max_y): log = get_logger(get_commits_stats) DBS = get_session() lockkey = __get_lockkey('get_commits_stats', repo_name, ts_min_y, ts_max_y) lockkey_path = config['here'] log.info('running task with lockkey %s' % lockkey) try: lock = l = DaemonLock(file_=jn(lockkey_path, lockkey)) # for js data compatibility cleans the key for person from ' akc = lambda k: person(k).replace('"', "") co_day_auth_aggr = {} commits_by_day_aggregate = {} repo = Repository.get_by_repo_name(repo_name) if repo is None: return True repo = repo.scm_instance repo_size = repo.count() # return if repo have no revisions if repo_size < 1: lock.release() return True skip_date_limit = True parse_limit = int(config['app_conf'].get('commit_parse_limit')) last_rev = None last_cs = None timegetter = itemgetter('time') dbrepo = DBS.query(Repository)\ .filter(Repository.repo_name == repo_name).scalar() cur_stats = DBS.query(Statistics)\ .filter(Statistics.repository == dbrepo).scalar() if cur_stats is not None: last_rev = cur_stats.stat_on_revision if last_rev == repo.get_changeset().revision and repo_size > 1: # pass silently without any work if we're not on first revision or # current state of parsing revision(from db marker) is the # last revision lock.release() return True if cur_stats: commits_by_day_aggregate = OrderedDict(json.loads( cur_stats.commit_activity_combined)) co_day_auth_aggr = json.loads(cur_stats.commit_activity) log.debug('starting parsing %s' % parse_limit) lmktime = mktime last_rev = last_rev + 1 if last_rev >= 0 else 0 log.debug('Getting revisions from %s to %s' % ( last_rev, last_rev + parse_limit) ) for cs in repo[last_rev:last_rev + parse_limit]: log.debug('parsing %s' % cs) last_cs = cs # remember last parsed changeset k = lmktime([cs.date.timetuple()[0], cs.date.timetuple()[1], cs.date.timetuple()[2], 0, 0, 0, 0, 0, 0]) if akc(cs.author) in co_day_auth_aggr: try: l = [timegetter(x) for x in co_day_auth_aggr[akc(cs.author)]['data']] time_pos = l.index(k) except ValueError: time_pos = False if time_pos >= 0 and time_pos is not False: datadict = \ co_day_auth_aggr[akc(cs.author)]['data'][time_pos] datadict["commits"] += 1 datadict["added"] += len(cs.added) datadict["changed"] += len(cs.changed) datadict["removed"] += len(cs.removed) else: if k >= ts_min_y and k <= ts_max_y or skip_date_limit: datadict = {"time": k, "commits": 1, "added": len(cs.added), "changed": len(cs.changed), "removed": len(cs.removed), } co_day_auth_aggr[akc(cs.author)]['data']\ .append(datadict) else: if k >= ts_min_y and k <= ts_max_y or skip_date_limit: co_day_auth_aggr[akc(cs.author)] = { "label": akc(cs.author), "data": [{"time":k, "commits":1, "added":len(cs.added), "changed":len(cs.changed), "removed":len(cs.removed), }], "schema": ["commits"], } #gather all data by day if k in commits_by_day_aggregate: commits_by_day_aggregate[k] += 1 else: commits_by_day_aggregate[k] = 1 overview_data = sorted(commits_by_day_aggregate.items(), key=itemgetter(0)) if not co_day_auth_aggr: co_day_auth_aggr[akc(repo.contact)] = { "label": akc(repo.contact), "data": [0, 1], "schema": ["commits"], } stats = cur_stats if cur_stats else Statistics() stats.commit_activity = json.dumps(co_day_auth_aggr) stats.commit_activity_combined = json.dumps(overview_data) log.debug('last revison %s' % last_rev) leftovers = len(repo.revisions[last_rev:]) log.debug('revisions to parse %s' % leftovers) if last_rev == 0 or leftovers < parse_limit: log.debug('getting code trending stats') stats.languages = json.dumps(__get_codes_stats(repo_name)) try: stats.repository = dbrepo stats.stat_on_revision = last_cs.revision if last_cs else 0 DBS.add(stats) DBS.commit() except: log.error(traceback.format_exc()) DBS.rollback() lock.release() return False # final release lock.release() # execute another task if celery is enabled if len(repo.revisions) > 1 and CELERY_ON: run_task(get_commits_stats, repo_name, ts_min_y, ts_max_y) return True except LockHeld: log.info('LockHeld') return 'Task with key %s already running' % lockkey
def diff(self, repo_name, f_path): ignore_whitespace = request.GET.get('ignorews') == '1' line_context = request.GET.get('context', 3) diff1 = request.GET.get('diff1', '') path1, diff1 = parse_path_ref(diff1, default_path=f_path) diff2 = request.GET.get('diff2', '') c.action = request.GET.get('diff') c.no_changes = diff1 == diff2 c.f_path = f_path c.big_diff = False c.ignorews_url = _ignorews_url c.context_url = _context_url c.changes = OrderedDict() c.changes[diff2] = [] if not any((diff1, diff2)): h.flash( 'Need query parameter "diff1" or "diff2" to generate a diff.', category='error') raise HTTPBadRequest() # special case if we want a show commit_id only, it's impl here # to reduce JS and callbacks if request.GET.get('show_rev') and diff1: if str2bool(request.GET.get('annotate', 'False')): _url = url('files_annotate_home', repo_name=c.repo_name, revision=diff1, f_path=path1) else: _url = url('files_home', repo_name=c.repo_name, revision=diff1, f_path=path1) return redirect(_url) try: node1 = self._get_file_node(diff1, path1) node2 = self._get_file_node(diff2, f_path) except (RepositoryError, NodeError): log.exception("Exception while trying to get node from repository") return redirect( url('files_home', repo_name=c.repo_name, f_path=f_path)) if all( isinstance(node.commit, EmptyCommit) for node in (node1, node2)): raise HTTPNotFound c.commit_1 = node1.commit c.commit_2 = node2.commit if c.action == 'download': _diff = diffs.get_gitdiff(node1, node2, ignore_whitespace=ignore_whitespace, context=line_context) diff = diffs.DiffProcessor(_diff, format='gitdiff') diff_name = '%s_vs_%s.diff' % (diff1, diff2) response.content_type = 'text/plain' response.content_disposition = ('attachment; filename=%s' % (diff_name, )) charset = self._get_default_encoding() if charset: response.charset = charset return diff.as_raw() elif c.action == 'raw': _diff = diffs.get_gitdiff(node1, node2, ignore_whitespace=ignore_whitespace, context=line_context) diff = diffs.DiffProcessor(_diff, format='gitdiff') response.content_type = 'text/plain' charset = self._get_default_encoding() if charset: response.charset = charset return diff.as_raw() else: fid = h.FID(diff2, node2.path) line_context_lcl = get_line_ctx(fid, request.GET) ign_whitespace_lcl = get_ignore_ws(fid, request.GET) __, commit1, commit2, diff, st, data = diffs.wrapped_diff( filenode_old=node1, filenode_new=node2, diff_limit=self.cut_off_limit_diff, file_limit=self.cut_off_limit_file, show_full_diff=request.GET.get('fulldiff'), ignore_whitespace=ign_whitespace_lcl, line_context=line_context_lcl, ) c.lines_added = data['stats']['added'] if data else 0 c.lines_deleted = data['stats']['deleted'] if data else 0 c.files = [data] c.commit_ranges = [c.commit_1, c.commit_2] c.ancestor = None c.statuses = [] c.target_repo = c.rhodecode_db_repo c.filename1 = node1.path c.filename = node2.path c.binary_file = node1.is_binary or node2.is_binary operation = data['operation'] if data else '' commit_changes = { # TODO: it's passing the old file to the diff to keep the # standard but this is not being used for this template, # but might need both files in the future or a more standard # way to work with that 'fid': [commit1, commit2, operation, c.filename, diff, st, data] } c.changes = commit_changes return render('files/file_diff.html')
def _index(self, commit_id_range, method): c.ignorews_url = _ignorews_url c.context_url = _context_url c.fulldiff = fulldiff = request.GET.get('fulldiff') # get ranges of commit ids if preset commit_range = commit_id_range.split('...')[:2] enable_comments = True try: pre_load = [ 'affected_files', 'author', 'branch', 'date', 'message', 'parents' ] if len(commit_range) == 2: enable_comments = False commits = c.rhodecode_repo.get_commits( start_id=commit_range[0], end_id=commit_range[1], pre_load=pre_load) commits = list(commits) else: commits = [ c.rhodecode_repo.get_commit(commit_id=commit_id_range, pre_load=pre_load) ] c.commit_ranges = commits if not c.commit_ranges: raise RepositoryError( 'The commit range returned an empty result') except CommitDoesNotExistError: msg = _('No such commit exists for this repository') h.flash(msg, category='error') raise HTTPNotFound() except Exception: log.exception("General failure") raise HTTPNotFound() c.changes = OrderedDict() c.lines_added = 0 c.lines_deleted = 0 c.commit_statuses = ChangesetStatus.STATUSES c.comments = [] c.statuses = [] c.inline_comments = [] c.inline_cnt = 0 c.files = [] # Iterate over ranges (default commit view is always one commit) for commit in c.commit_ranges: if method == 'show': c.statuses.extend([ ChangesetStatusModel().get_status( c.rhodecode_db_repo.repo_id, commit.raw_id) ]) c.comments.extend(ChangesetCommentsModel().get_comments( c.rhodecode_db_repo.repo_id, revision=commit.raw_id)) # comments from PR st = ChangesetStatusModel().get_statuses( c.rhodecode_db_repo.repo_id, commit.raw_id, with_revisions=True) # from associated statuses, check the pull requests, and # show comments from them prs = set( x.pull_request for x in filter(lambda x: x.pull_request is not None, st)) for pr in prs: c.comments.extend(pr.comments) inlines = ChangesetCommentsModel().get_inline_comments( c.rhodecode_db_repo.repo_id, revision=commit.raw_id) c.inline_comments.extend(inlines.iteritems()) c.changes[commit.raw_id] = [] commit2 = commit commit1 = commit.parents[0] if commit.parents else EmptyCommit() # fetch global flags of ignore ws or context lines context_lcl = get_line_ctx('', request.GET) ign_whitespace_lcl = get_ignore_ws('', request.GET) _diff = c.rhodecode_repo.get_diff( commit1, commit2, ignore_whitespace=ign_whitespace_lcl, context=context_lcl) # diff_limit will cut off the whole diff if the limit is applied # otherwise it will just hide the big files from the front-end diff_limit = self.cut_off_limit_diff file_limit = self.cut_off_limit_file diff_processor = diffs.DiffProcessor(_diff, format='gitdiff', diff_limit=diff_limit, file_limit=file_limit, show_full_diff=fulldiff) commit_changes = OrderedDict() if method == 'show': _parsed = diff_processor.prepare() c.limited_diff = isinstance(_parsed, diffs.LimitedDiffContainer) for f in _parsed: c.files.append(f) st = f['stats'] c.lines_added += st['added'] c.lines_deleted += st['deleted'] fid = h.FID(commit.raw_id, f['filename']) diff = diff_processor.as_html( enable_comments=enable_comments, parsed_lines=[f]) commit_changes[fid] = [ commit1.raw_id, commit2.raw_id, f['operation'], f['filename'], diff, st, f ] else: # downloads/raw we only need RAW diff nothing else diff = diff_processor.as_raw() commit_changes[''] = [None, None, None, None, diff, None, None] c.changes[commit.raw_id] = commit_changes # sort comments by how they were generated c.comments = sorted(c.comments, key=lambda x: x.comment_id) # count inline comments for __, lines in c.inline_comments: for comments in lines.values(): c.inline_cnt += len(comments) if len(c.commit_ranges) == 1: c.commit = c.commit_ranges[0] c.parent_tmpl = ''.join('# Parent %s\n' % x.raw_id for x in c.commit.parents) if method == 'download': response.content_type = 'text/plain' response.content_disposition = ('attachment; filename=%s.diff' % commit_id_range[:12]) return diff elif method == 'patch': response.content_type = 'text/plain' c.diff = safe_unicode(diff) return render('changeset/patch_changeset.html') elif method == 'raw': response.content_type = 'text/plain' return diff elif method == 'show': if len(c.commit_ranges) == 1: return render('changeset/changeset.html') else: c.ancestor = None c.target_repo = c.rhodecode_db_repo return render('changeset/changeset_range.html')
def _repo(self, branches=None): repo = mock.Mock() repo.branches = OrderedDict((name, '0') for name in branches or []) repo.tags = {} return repo