def make_template_context(self, repo, namespace, rev, path): repo, rev, path, commit = _get_repo_and_rev(repo, namespace, rev, path) try: submodule_rev = tree_lookup_path(repo.__getitem__, commit.tree, encode_for_git(path))[1] except KeyError: raise NotFound("Parent path for submodule missing") try: (submodule_url, submodule_path) = _get_submodule(repo, commit, encode_for_git(path)) except KeyError: submodule_url = None submodule_path = None # TODO(jelmer): Rather than printing an information page, # redirect to the page in klaus for the repository at # submodule_path, revision submodule_rev. self.context = { "view": self.view_name, "repo": repo, "rev": rev, "commit": commit, "branches": repo.get_branch_names(exclude=rev), "tags": repo.get_tag_names(), "path": path, "subpaths": list(subpaths(path)) if path else None, "submodule_url": force_unicode(submodule_url), "submodule_path": force_unicode(submodule_path), "submodule_rev": force_unicode(submodule_rev), "base_href": None, }
def make_template_context(self, repo, rev, path): repo, rev, path, commit = _get_repo_and_rev(repo, rev, path) try: submodule_rev = tree_lookup_path(repo.__getitem__, commit.tree, encode_for_git(path))[1] except KeyError: raise NotFound("Parent path for submodule missing") try: (submodule_url, submodule_path) = _get_submodule(repo, commit, encode_for_git(path)) except KeyError: submodule_url = None submodule_path = None # TODO(jelmer): Rather than printing an information page, # redirect to the page in klaus for the repository at # submodule_path, revision submodule_rev. self.context = { 'view': self.view_name, 'repo': repo, 'rev': rev, 'commit': commit, 'branches': repo.get_branch_names(exclude=rev), 'tags': repo.get_tag_names(), 'path': path, 'subpaths': list(subpaths(path)) if path else None, 'submodule_url': force_unicode(submodule_url), 'submodule_path': force_unicode(submodule_path), 'submodule_rev': force_unicode(submodule_rev), 'base_href': None, }
def make_template_context(self, repo, rev, path): repo, rev, path, commit = _get_repo_and_rev(repo, rev, path) try: submodule_rev = tree_lookup_path( repo.__getitem__, commit.tree, encode_for_git(path))[1] except KeyError: raise NotFound("Parent path for submodule missing") try: (submodule_url, submodule_path) = _get_submodule( repo, commit, encode_for_git(path)) except KeyError: submodule_url = None submodule_path = None # TODO(jelmer): Rather than printing an information page, # redirect to the page in klaus for the repository at # submodule_path, revision submodule_rev. self.context = { 'view': self.view_name, 'repo': repo, 'rev': rev, 'commit': commit, 'branches': repo.get_branch_names(exclude=rev), 'tags': repo.get_tag_names(), 'path': path, 'subpaths': list(subpaths(path)) if path else None, 'submodule_url': force_unicode(submodule_url), 'submodule_path': force_unicode(submodule_path), 'submodule_rev': force_unicode(submodule_rev), 'base_href': None, }
def get_description(self): """Like Dulwich's `get_description`, but returns None if the file contains Git's default text "Unnamed repository[...]". """ description = super(FancyRepo, self).get_description() if description: description = force_unicode(description) if not description.startswith("Unnamed repository;"): return force_unicode(description)
def cloneurl(self): """Retrieve the gitweb notion of the public clone URL of this repo.""" f = self.get_named_file("cloneurl") if f is not None: return force_unicode(f.read()) c = self.get_config() try: return force_unicode(c.get(b"gitweb", b"url")) except KeyError: return None
def make_template_context(self, *args): super(IndexView, self).make_template_context(*args) self.context['base_href'] = url_for('blob', repo=self.context['repo'].name, rev=self.context['rev'], path='') self.context['page'] = 0 history_length = 10 history = self.context['repo'].history( self.context['commit'], self.context['path'], history_length + 1, skip=0, ) if len(history) == history_length + 1: # At least one more commit for next page left more_commits = True # We don't want show the additional commit on this page history.pop() else: more_commits = False self.context.update({ 'history': history, 'more_commits': more_commits, }) try: (readme_filename, readme_data) = self._get_readme() except KeyError: self.context.update({ 'is_markup': None, 'rendered_code': None, }) else: readme_base_url = url_for( 'raw', repo=self.context['repo'].name, rev=self.context['rev'], path=os.path.dirname(self.context['path']), ) readme_filename = force_unicode(readme_filename) readme_data = force_unicode(readme_data) self.context.update({ 'is_markup': markup.can_render(readme_filename), 'rendered_code': highlight_or_render(readme_data, readme_base_url, readme_filename) })
def make_template_context(self, *args): super(IndexView, self).make_template_context(*args) self.context["base_href"] = url_for( "blob", repo=self.context["repo"].namespaced_name, rev=self.context["rev"], path="", ) self.context["page"] = 0 history_length = 10 history = self.context["repo"].history( self.context["commit"], self.context["path"], history_length + 1, skip=0, ) if len(history) == history_length + 1: # At least one more commit for next page left more_commits = True # We don't want show the additional commit on this page history.pop() else: more_commits = False self.context.update( { "history": history, "more_commits": more_commits, } ) try: (readme_filename, readme_data) = self._get_readme() except KeyError: self.context.update( { "is_markup": None, "rendered_code": None, } ) else: readme_filename = force_unicode(readme_filename) readme_data = force_unicode(readme_data) self.context.update( { "is_markup": markup.can_render(readme_filename), "rendered_code": highlight_or_render(readme_data, readme_filename), } )
def make_context(self, *args): super(BlobView, self).make_context(*args) if guess_is_binary(self.context['blob_or_tree']): self.context.update({ 'is_markup': False, 'is_binary': True, 'is_image': False, }) if guess_is_image(self.context['filename']): self.context.update({ 'is_image': True, }) else: render_markup = 'markup' not in request.args rendered_code = pygmentize( force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup ) self.context.update({ 'too_large': sum(map(len, self.context['blob_or_tree'].chunked)) > 100*1024, 'is_markup': markup.can_render(self.context['filename']), 'render_markup': render_markup, 'rendered_code': rendered_code, 'is_binary': False, })
def render_code(self, render_markup): should_use_ctags = current_app.should_use_ctags( self.context['repo'], self.context['commit']) if should_use_ctags: if ctags is None: raise ImportError( "Ctags enabled but python-ctags not installed") ctags_base_url = url_for(self.view_name, repo=self.context['repo'].name, rev=self.context['rev'], path='') ctags_tagsfile = CTAGS_CACHE.get_tagsfile( self.context['repo'].path, self.context['commit'].id) ctags_args = { 'ctags': ctags.CTags(ctags_tagsfile.encode( sys.getfilesystemencoding())), 'ctags_baseurl': ctags_base_url, } else: ctags_args = {} return highlight_or_render( force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup, **ctags_args)
def render_code(self, render_markup): should_use_ctags = current_app.should_use_ctags(self.context['repo'], self.context['commit']) if should_use_ctags: if ctags is None: raise ImportError("Ctags enabled but python-ctags not installed") ctags_base_url = url_for( self.view_name, repo=self.context['repo'].name, rev=self.context['rev'], path='' ) ctags_tagsfile = CTAGS_CACHE.get_tagsfile( self.context['repo'].path, self.context['commit'].id ) ctags_args = { 'ctags': ctags.CTags(ctags_tagsfile.encode(sys.getfilesystemencoding())), 'ctags_baseurl': ctags_base_url, } else: ctags_args = {} return highlight_or_render( force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup, **ctags_args )
def commit_diff(self, commit): from klaus.utils import guess_is_binary, force_unicode if commit.parents: parent_tree = self[commit.parents[0]].tree else: parent_tree = None changes = self.object_store.tree_changes(parent_tree, commit.tree) for (oldpath, newpath), (oldmode, newmode), (oldsha, newsha) in changes: try: if newsha and guess_is_binary(self[newsha]) or oldsha and guess_is_binary(self[oldsha]): yield { "is_binary": True, "old_filename": oldpath or "/dev/null", "new_filename": newpath or "/dev/null", "chunks": None, } continue except KeyError: # newsha/oldsha are probably related to submodules. # Dulwich will handle that. pass stringio = cStringIO.StringIO() dulwich.patch.write_object_diff( stringio, self.object_store, (oldpath, oldmode, oldsha), (newpath, newmode, newsha) ) files = prepare_udiff(force_unicode(stringio.getvalue()), want_header=False) if not files: # the diff module doesn't handle deletions/additions # of empty files correctly. yield {"old_filename": oldpath or "/dev/null", "new_filename": newpath or "/dev/null", "chunks": []} else: yield files[0]
def make_template_context(self, *args): super(IndexView, self).make_template_context(*args) self.context['base_href'] = url_for( 'blob', repo=self.context['repo'].name, rev=self.context['rev'], path='' ) self.context['page'] = 0 history_length = 10 history = self.context['repo'].history( self.context['commit'], self.context['path'], history_length + 1, skip=0, ) if len(history) == history_length + 1: # At least one more commit for next page left more_commits = True # We don't want show the additional commit on this page history.pop() else: more_commits = False self.context.update({ 'history': history, 'more_commits': more_commits, }) try: (readme_filename, readme_data) = self._get_readme() except KeyError: self.context.update({ 'is_markup': None, 'rendered_code': None, }) else: self.context.update({ 'is_markup': markup.can_render(readme_filename), 'rendered_code': highlight_or_render( force_unicode(readme_data), force_unicode(readme_filename), ), })
def commit_diff(self, commit): from klaus.utils import guess_is_binary, force_unicode if commit.parents: parent_tree = self[commit.parents[0]].tree else: parent_tree = None summary = {'nfiles': 0, 'nadditions': 0, 'ndeletions': 0} file_changes = [] # the changes in detail dulwich_changes = self.object_store.tree_changes( parent_tree, commit.tree) for (oldpath, newpath), (oldmode, newmode), (oldsha, newsha) in dulwich_changes: summary['nfiles'] += 1 try: # Check for binary files -- can't show diffs for these if newsha and guess_is_binary(self[newsha]) or \ oldsha and guess_is_binary(self[oldsha]): file_changes.append({ 'is_binary': True, 'old_filename': oldpath or '/dev/null', 'new_filename': newpath or '/dev/null', 'chunks': None }) continue except KeyError: # newsha/oldsha are probably related to submodules. # Dulwich will handle that. pass stringio = cStringIO.StringIO() dulwich.patch.write_object_diff(stringio, self.object_store, (oldpath, oldmode, oldsha), (newpath, newmode, newsha)) files = prepare_udiff(force_unicode(stringio.getvalue()), want_header=False) if not files: # the diff module doesn't handle deletions/additions # of empty files correctly. file_changes.append({ 'old_filename': oldpath or '/dev/null', 'new_filename': newpath or '/dev/null', 'chunks': [], 'additions': 0, 'deletions': 0, }) else: change = files[0] summary['nadditions'] += change['additions'] summary['ndeletions'] += change['deletions'] file_changes.append(change) return summary, file_changes
def cloneurl(self): """Retrieve the gitweb notion of the public clone URL of this repo.""" f = self.get_named_file('cloneurl') if f is not None: return f.read() c = self.get_config() try: return force_unicode(c.get(b'gitweb', b'url')) except KeyError: return None
def commit_diff(self, commit): from klaus.utils import guess_is_binary, force_unicode if commit.parents: parent_tree = self[commit.parents[0]].tree else: parent_tree = None summary = {'nfiles': 0, 'nadditions': 0, 'ndeletions': 0} file_changes = [] # the changes in detail dulwich_changes = self.object_store.tree_changes(parent_tree, commit.tree) for (oldpath, newpath), (oldmode, newmode), (oldsha, newsha) in dulwich_changes: summary['nfiles'] += 1 try: # Check for binary files -- can't show diffs for these if newsha and guess_is_binary(self[newsha]) or \ oldsha and guess_is_binary(self[oldsha]): file_changes.append({ 'is_binary': True, 'old_filename': oldpath or '/dev/null', 'new_filename': newpath or '/dev/null', 'chunks': None }) continue except KeyError: # newsha/oldsha are probably related to submodules. # Dulwich will handle that. pass stringio = cStringIO.StringIO() dulwich.patch.write_object_diff(stringio, self.object_store, (oldpath, oldmode, oldsha), (newpath, newmode, newsha)) files = prepare_udiff(force_unicode(stringio.getvalue()), want_header=False) if not files: # the diff module doesn't handle deletions/additions # of empty files correctly. file_changes.append({ 'old_filename': oldpath or '/dev/null', 'new_filename': newpath or '/dev/null', 'chunks': [], 'additions': 0, 'deletions': 0, }) else: change = files[0] summary['nadditions'] += change['additions'] summary['ndeletions'] += change['deletions'] file_changes.append(change) return summary, file_changes
def render_code(self, render_markup): should_use_ctags = current_app.should_use_ctags(self.context["repo"], self.context["commit"]) if should_use_ctags: ctags_base_url = url_for(self.view_name, repo=self.context["repo"].name, rev=self.context["rev"], path="") ctags_tagsfile = CTAGS_CACHE.get_tagsfile(self.context["repo"].path, self.context["commit"].id) ctags_args = {"ctags": ctags.CTags(ctags_tagsfile), "ctags_baseurl": ctags_base_url} else: ctags_args = {} return pygmentize( force_unicode(self.context["blob_or_tree"].data), self.context["filename"], render_markup, **ctags_args )
def make_template_context(self, *args): super(FileView, self).make_template_context(*args) if self.context['can_render']: render_markup = 'markup' not in request.args rendered_code = pygmentize( force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup ) self.context.update({ 'is_markup': markup.can_render(self.context['filename']), 'render_markup': render_markup, 'rendered_code': rendered_code, })
def make_template_context(self, *args): super(BlameView, self).make_template_context(*args) if self.context['can_render']: rendered_code = pygmentize( force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup=False, ) line_commits = self.context['repo'].blame(self.context['commit'], self.context['path']) replace_dupes(line_commits, None) self.context.update({ 'rendered_code': rendered_code, 'line_commits': line_commits, })
def get_readme(self): rev = self.get_default_branch() commit = self.get_commit(rev) tree = self.get_blob_or_tree(commit, "/") for item in tree.items(): if item.path.startswith("README."): content = self[item.sha].data if can_render(item.path): return {"rendered": True, "content": render(item.path, content)} else: return {"rendered": False, "content": force_unicode(content)} return None
def make_template_context(self, *args): super(BlobView, self).make_template_context(*args) if not isinstance(self.context['blob_or_tree'], Blob): raise NotFound("Not a blob") binary = guess_is_binary(self.context['blob_or_tree']) too_large = sum(map(len, self.context['blob_or_tree'].chunked)) > 100 * 1024 if binary: self.context.update({ 'is_markup': False, 'is_binary': True, 'is_image': False, }) if guess_is_image(self.context['filename']): self.context.update({ 'is_image': True, }) elif too_large: self.context.update({ 'too_large': True, 'is_markup': False, 'is_binary': False, }) else: render_markup = 'markup' not in request.args rendered_code = pygmentize( force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup) self.context.update({ 'too_large': False, 'is_markup': markup.can_render(self.context['filename']), 'render_markup': render_markup, 'rendered_code': rendered_code, 'is_binary': False, })
def make_context(self, *args): super(BlobView, self).make_context(*args) render_markup = 'markup' not in request.args rendered_code = pygmentize(force_unicode(self.context['blob'].data), self.context['filename'], render_markup) self.context.update({ 'too_large': sum(map(len, self.context['blob'].chunked)) > 100 * 1024, 'is_markup': markup.can_render(self.context['filename']), 'render_markup': render_markup, 'rendered_code': rendered_code, 'is_binary': guess_is_binary(self.context['blob']), 'is_image': guess_is_image(self.context['filename']), })
def commit_diff(self, commit): from klaus.utils import guess_is_binary, force_unicode if commit.parents: parent_tree = self[commit.parents[0]].tree else: parent_tree = None changes = self.object_store.tree_changes(parent_tree, commit.tree) for (oldpath, newpath), (oldmode, newmode), (oldsha, newsha) in changes: try: if newsha and guess_is_binary(self[newsha]) or \ oldsha and guess_is_binary(self[oldsha]): yield { 'is_binary': True, 'old_filename': oldpath or '/dev/null', 'new_filename': newpath or '/dev/null', 'chunks': None } continue except KeyError: # newsha/oldsha are probably related to submodules. # Dulwich will handle that. pass stringio = StringIO.StringIO() dulwich.patch.write_object_diff(stringio, self.object_store, (oldpath, oldmode, oldsha), (newpath, newmode, newsha)) files = prepare_udiff(force_unicode(stringio.getvalue()), want_header=False) if not files: # the diff module doesn't handle deletions/additions # of empty files correctly. yield { 'old_filename': oldpath or '/dev/null', 'new_filename': newpath or '/dev/null', 'chunks': [] } else: yield files[0]
def render_code(self, render_markup): should_use_ctags = current_app.should_use_ctags( self.context['repo'], self.context['commit']) if should_use_ctags: ctags_base_url = url_for(self.view_name, repo=self.context['repo'].name, rev=self.context['rev'], path='') ctags_tagsfile = CTAGS_CACHE.get_tagsfile( self.context['repo'].path, self.context['commit'].id) ctags_args = { 'ctags': ctags.CTags(ctags_tagsfile), 'ctags_baseurl': ctags_base_url, } else: ctags_args = {} return pygmentize(force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup, **ctags_args)
def make_context(self, *args): super(BlobView, self).make_context(*args) if self.context['blob'] == None or not hasattr(self.context['blob'], 'chunked'): raise RedirectException(url_for('history', repo=self.context['repo'].name, commit_id='master', path=self.context['path'])) render_markup = 'markup' not in request.args rendered_code = pygmentize( force_unicode(self.context['blob'].data), self.context['filename'], render_markup ) self.context.update({ 'too_large': sum(map(len, self.context['blob'].chunked)) > 100*1024, 'is_markup': markup.can_render(self.context['filename']), 'render_markup': render_markup, 'rendered_code': rendered_code, 'is_binary': guess_is_binary(self.context['blob']), 'is_image': guess_is_image(self.context['filename']), })
def get_context_data(self, **ctx): context = super(BlobView, self).get_context_data(**ctx) if not isinstance(context['blob_or_tree'], Blob): raise RepoException("Not a blob") binary = guess_is_binary(context['blob_or_tree']) too_large = sum(map(len, context['blob_or_tree'].chunked)) > 100 * 1024 if binary: context.update({ 'is_markup': False, 'is_binary': True, 'is_image': False, }) if guess_is_image(context['filename']): context.update({ 'is_image': True, }) elif too_large: context.update({ 'too_large': True, 'is_markup': False, 'is_binary': False, }) else: render_markup = 'markup' not in self.request.GET rendered_code = pygmentize( force_unicode(context['blob_or_tree'].data), context['filename'], render_markup ) context.update({ 'too_large': False, 'is_markup': markup.can_render(context['filename']), 'render_markup': render_markup, 'rendered_code': rendered_code, 'is_binary': False, }) return context
def make_template_context(self, *args): super(BlobView, self).make_template_context(*args) if not isinstance(self.context['blob_or_tree'], Blob): raise NotFound("Not a blob") binary = guess_is_binary(self.context['blob_or_tree']) too_large = sum(map(len, self.context['blob_or_tree'].chunked)) > 100*1024 if binary: self.context.update({ 'is_markup': False, 'is_binary': True, 'is_image': False, }) if guess_is_image(self.context['filename']): self.context.update({ 'is_image': True, }) elif too_large: self.context.update({ 'too_large': True, 'is_markup': False, 'is_binary': False, }) else: render_markup = 'markup' not in request.args rendered_code = pygmentize( force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup ) self.context.update({ 'too_large': False, 'is_markup': markup.can_render(self.context['filename']), 'render_markup': render_markup, 'rendered_code': rendered_code, 'is_binary': False, })
def test_covers_all_cli_options(): manpage = force_unicode(check_output(["man", "./klaus.1"])) def assert_in_manpage(s): clean = lambda x: re.sub('(.\\x08)|\s', '', x) assert clean(s) in clean(manpage), "%r not found in manpage" % s mock_parser = mock.Mock() with mock.patch('argparse.ArgumentParser') as mock_cls: mock_cls.return_value = mock_parser klaus_cli.make_parser() for args, kwargs in mock_parser.add_argument.call_args_list: if kwargs.get('metavar') == 'DIR': continue for string in args: assert_in_manpage(string) if 'help' in kwargs: assert_in_manpage(kwargs['help']) if 'choices' in kwargs: for choice in kwargs['choices']: assert_in_manpage(choice)
def get_context_data(self, **ctx): context = super(BlobView, self).get_context_data(**ctx) if not isinstance(context['blob_or_tree'], Blob): raise RepoException("Not a blob") binary = guess_is_binary(context['blob_or_tree']) too_large = sum(map(len, context['blob_or_tree'].chunked)) > 100 * 1024 if binary: context.update({ 'is_markup': False, 'is_binary': True, 'is_image': False, }) if guess_is_image(context['filename']): context.update({ 'is_image': True, }) elif too_large: context.update({ 'too_large': True, 'is_markup': False, 'is_binary': False, }) else: render_markup = 'markup' not in self.request.GET rendered_code = pygmentize( force_unicode(context['blob_or_tree'].data), context['filename'], render_markup) context.update({ 'too_large': False, 'is_markup': markup.can_render(context['filename']), 'render_markup': render_markup, 'rendered_code': rendered_code, 'is_binary': False, }) return context
def test_covers_all_cli_options(): manpage = force_unicode(subprocess.check_output(["man", "./klaus.1"])) def assert_in_manpage(s): clean = lambda x: re.sub('(.\\x08)|\\s', '', x) assert clean(s) in clean(manpage), "%r not found in manpage" % s mock_parser = mock.Mock() with mock.patch('argparse.ArgumentParser') as mock_cls: mock_cls.return_value = mock_parser klaus_cli.make_parser() for args, kwargs in mock_parser.add_argument.call_args_list: if kwargs.get('metavar') == 'DIR': continue for string in args: assert_in_manpage(string) if 'help' in kwargs: assert_in_manpage(kwargs['help']) if 'choices' in kwargs: for choice in kwargs['choices']: assert_in_manpage(choice)
def make_template_context(self, *args): super(BlameView, self).make_template_context(*args) if not isinstance(self.context['blob_or_tree'], Blob): raise NotFound("Not a blob") binary = guess_is_binary(self.context['blob_or_tree']) too_large = sum(map(len, self.context['blob_or_tree'].chunked)) > 100*1024 if binary: self.context.update({ 'is_markup': False, 'is_binary': True, 'is_image': False, }) if guess_is_image(self.context['filename']): self.context.update({ 'is_image': True, }) elif too_large: self.context.update({ 'too_large': True, 'is_markup': False, 'is_binary': False, }) else: self.context.update({ 'too_large': False, 'is_markup': markup.can_render(self.context['filename']), 'is_binary': False, 'rendered_code': pygmentize( force_unicode(self.context['blob_or_tree'].data), self.context['filename'], render_markup=False , linenos=False), 'authors': list(self.context["repo"].blame(self.context["commit"], self.context["path"])) })
def _get_description(self): description = super(FancyRepo, self).get_description() if description: description = force_unicode(description) if not description.startswith("Unnamed repository;"): return force_unicode(description)
def test_ascii(self): self.assertEqual(u'foo', utils.force_unicode(b'foo'))
def test_utf8(self): self.assertEqual(u'f\xce', utils.force_unicode(b'f\xc3\x8e'))
def get_description(self): description_file = self.get_named_file("description") if description_file: description = force_unicode(description_file.read()) if not description.startswith("Unnamed repository;"): return description
def test_ascii(self): self.assertEqual(u"foo", utils.force_unicode(b"foo"))
def get_description(self): description_file = self.get_named_file('description') if description_file: description = force_unicode(description_file.read()) if not description.startswith("Unnamed repository;"): return description
def test_utf8(self): self.assertEqual(u"f\xce", utils.force_unicode(b"f\xc3\x8e"))