def create_root(cls, site=None, title="Root", request=None, **kwargs): if not site: site = Site.objects.get_current() root_nodes = cls.objects.root_nodes().filter(site=site) if not root_nodes: # (get_or_create does not work for MPTT models??) article = Article() revision = ArticleRevision(title=title, **kwargs) if request: revision.set_from_request(request) article.add_revision(revision, save=True) article.save() root = cls.objects.create(site=site, article=article) article.add_object_relation(root) else: root = root_nodes[0] return root
def get_context_data(self, **kwargs): context = super(GroupDescriptionDetailView, self).get_context_data(**kwargs) workgroup = context['workgroup'] # First be sure that the home Wiki article already exists try: home_article = Article.get_for_object(workgroup) except ArticleForObject.DoesNotExist: return redirect('workgroup-detail', slug=workgroup.slug) # now check that the description article exists try: desc_article = Article.get_for_object(home_article) except ArticleForObject.DoesNotExist: desc_article = Article.objects.create() desc_article.add_object_relation(home_article) revision = ArticleRevision(title="description of %s" % workgroup.name, content='') desc_article.add_revision(revision) context.update({ 'wiki_article': desc_article, }) return context
def get_context_data(self, **kwargs): """ Adds the member of the associated ML if there's one """ context = super(GroupDetailView, self).get_context_data(**kwargs) workgroup = context['workgroup'] # Look up mailing list members context.update(lookup_ml_membership(workgroup)) # Wiki try: article = Article.get_for_object(workgroup) except ArticleForObject.DoesNotExist: article = Article.objects.create() article.add_object_relation(workgroup) revision = ArticleRevision(title=workgroup.name, content='') article.add_revision(revision) context['wiki_article'] = article context['group_projects'] = workgroup.projects.prefetch_related( 'locations').all().order_by('?') return context
def create_urlpath( cls, parent, slug, site=None, title="Root", article_kwargs={}, request=None, article_w_permissions=None, **revision_kwargs): """ Utility function: Creates a new urlpath with an article and a new revision for the article :returns: A new URLPath instance """ if not site: site = Site.objects.get_current() article = Article(**article_kwargs) article.add_revision(ArticleRevision(title=title, **revision_kwargs), save=True) article.save() newpath = cls.objects.create( site=site, parent=parent, slug=slug, article=article) article.add_object_relation(newpath) return newpath
def get_lost_and_found(): if ns.lost_and_found: return ns.lost_and_found try: ns.lost_and_found = URLPath.objects.get( slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site) except URLPath.DoesNotExist: article = Article(group_read=True, group_write=False, other_read=False, other_write=False) article.add_revision( ArticleRevision( content=_( 'Articles who lost their parents\n' '===============================\n\n' 'The children of this article have had their parents deleted. You should probably find a new home for them.'), title=_("Lost and found"))) ns.lost_and_found = URLPath.objects.create( slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site, article=article) article.add_object_relation(ns.lost_and_found) return ns.lost_and_found
def on_article_delete(instance, *args, **kwargs): # If an article is deleted, then throw out its URLPaths # But move all descendants to a lost-and-found node. site = get_current_site(get_current_request()) # Get the Lost-and-found path or create a new one try: lost_and_found = URLPath.objects.get(slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site) except URLPath.DoesNotExist: article = Article(group_read = True, group_write = False, other_read = False, other_write = False) article.add_revision(ArticleRevision( content=_(u'Articles who lost their parents\n' '===============================\n\n' 'The children of this article have had their parents deleted. You should probably find a new home for them.'), title=_(u"Lost and found"))) lost_and_found = URLPath.objects.create(slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site, article=article) article.add_object_relation(lost_and_found) for urlpath in URLPath.objects.filter(articles__article=instance, site=site): # Delete the children for child in urlpath.get_children(): child.move_to(lost_and_found) # ...and finally delete the path itself # TODO: This should be unnecessary because of URLPath.article(...ondelete=models.CASCADE) urlpath.delete()
def initialize_wiki(): try: root = URLPath.root() except NoRootURL: print("Root URL not found, creating...") root = URLPath.create_root(title="QUOREM Wiki", content=get_content_from_file( "quorem/static/markdown/docs/root.md")) article_revision = ArticleRevision( title=root.article.current_revision.title, content=get_content_from_file("quorem/static/markdown/docs/root.md")) root.article.add_revision(article_revision) try: investigation = URLPath.get_by_path("investigation") except URLPath.DoesNotExist: print("Investigation page not found, creating...") URLPath.create_urlpath(root, slug="investigation", title="List of Investigations", content="""This page lists the investigations that are present in your QUOREM database. You may edit anything on this page, except the Automated Report section.\r\n\r\n""") try: protocol = URLPath.get_by_path("protocol") except URLPath.DoesNotExist: print("Protocol page not found, creating...") URLPath.create_urlpath(root, slug="protocol", title="List of Protocols", content="""This page lists the protocols that are present in your QUOREM database. You may edit anything on this page, except the Automated Report section.\r\n\r\n""") try: pipeline = URLPath.get_by_path("pipeline") except URLPath.DoesNotExist: print("Pipeline page not found, creating...") URLPath.create_urlpath(root, slug="pipeline", title="List of Pipelines", content="""This page lists the pipelines that are present in your QUOREM database. You may edit anything on this page, except the Automated Report section.\r\n\r\n""") try: sample = URLPath.get_by_path("sample") except URLPath.DoesNotExist: print("Sample page not found, creating...") URLPath.create_urlpath( root, slug="sample", title="List of Samples", content= "This page lists the samples that are present in your QUOREM database. You may edit anything on this page, except the Automated Report section.\r\n\r\n" ) initialize_documentation(root)
def refresh_automated_report(slug, pk=None): Investigation = apps.get_model('db.Investigation') Sample = apps.get_model('db.Sample') BiologicalReplicate = apps.get_model('db.BiologicalReplicate') BiologicalReplicateProtocol = apps.get_model( 'db.BiologicalReplicateProtocol') ComputationalPipeline = apps.get_model('db.ComputationalPipeline') slug_to_model = {'investigation': Investigation, 'sample': Sample} if pk is None: article = URLPath.get_by_path(slug).article else: try: article = URLPath.get_by_path("%s/%d" % (slug, pk)).article except URLPath.DoesNotExist: try: obj = slug_to_model[slug].objects.get(pk=pk) except slug_to_model[slug].DoesNotExist: raise ValueError("No such pk found, no wiki entry to update") try: root = URLPath.get_by_path(slug) except URLPath.DoesNotExist: raise ValueError( "No such slug found, is your wiki initialized?") print("Creating new article") try: slug_to_model[slug]._meta.get_field("name") title = obj.name except: title = "%s %d" % (slug, pk) article = URLPath.create_urlpath(root, slug="%d" % (pk,), title=title, content="This page has been automatically" \ "generated. You may edit at will").article current_content = article.current_revision.content md = markdown.Markdown() inv_html = md.convert(current_content) new_content = "" skip_until_h1 = False added = False for line in md.lines: if skip_until_h1 & (not line.startswith("# ")): continue elif skip_until_h1 & line.startswith("# "): skip_until_h1 = False if line == '# Automated Report': new_content += get_wiki_report(slug, pk=pk) skip_until_h1 = True added = True else: new_content += line + "\r\n" if not added: new_content += get_wiki_report(slug, pk=pk) article_revision = ArticleRevision(title=article.current_revision.title, content=new_content) article.add_revision(article_revision)
def create_article(cls, parent, slug, site=None, title="Root", article_kwargs={}, **kwargs): """Utility function: Create a new urlpath with an article and a new revision for the article""" if not site: site = get_current_site(get_current_request()) article = Article(**article_kwargs) article.add_revision(ArticleRevision(title=title, **kwargs), save=True) article.save() newpath = cls.objects.create(site=site, parent=parent, slug=slug, article=article) article.add_object_relation(newpath) return newpath
def update_wiki(self): if (self.slug == "root"): return #Already made in get_root() if not made try: wiki_page = URLPath.get_by_path(self.prefix + "/" + self.slug) #Create a new revision and update with the template content article = wiki_page.article article_revision = ArticleRevision( title=article.current_revision.title, content=self.content) article.add_revision(article_revision) except URLPath.DoesNotExist: print("Creating wiki page for slug %s prefix %s" % (self.slug, self.prefix)) wiki_page = URLPath.create_urlpath(self.root, slug=self.slug, title=self.title, content=self.content)
def create_article(bustopic,title="Root", article_kwargs={}, content="",user_message="",request=None): """Utility function: Create a new urlpath with an article and a new revision for the article""" article = Article(**article_kwargs) ar = ArticleRevision() ar.content = content ar.user_message = user_message ar.deleted = False if request: ar.set_from_request(request) else: ar.ip_address = None ar.user = get_default_user() article.add_revision(ar, save=True) article.save() bustopic.article=article bustopic.save() return ar
def update_wiki(self): try: print("Retrieving slug %s" % (self.slug,)) wiki_page = URLPath.get_by_path(self.slug) #Create a new revision and update with the template content article = wiki_page.article article_revision = ArticleRevision(title=article.current_revision.title, content=self.content) article.add_revision(article_revision) except URLPath.DoesNotExist: print("Creating a new article") if self.root is None: self._refresh_root() print("Using root %s" % (self.root,)) print("Pushing to slug %s"% (self.slug,)) base_slug = self.slug.split("/")[-1] wiki_page = URLPath.create_urlpath(self.root, slug=base_slug, title=self.title, content=self.content)
def import_page(self, api, site, page, current_site, url_root, user_matching, replace_existing): import pypandoc # Filter titles, to avoid stranges charaters. title = only_printable(page.title) urltitle = slugify(only_printable(urllib.unquote(page.urltitle))[:50]) added = 1 while urltitle in self.articles_worked_on: title = only_printable(page.title) + " " + str(added) urltitle = only_printable( slugify((urllib.unquote(page.urltitle))[:47] + " " + str(added))) added += 1 self.articles_worked_on.append(urltitle) print("Working on %s (%s)" % (title, urltitle)) # Check if the URL path already exists try: urlp = URLPath.objects.get(slug=urltitle) self.matching_old_link_new_link[ page.title] = urlp.article.get_absolute_url() if not replace_existing: print("\tAlready existing, skipping...") return print("\tDestorying old version of the article") urlp.article.delete() except URLPath.DoesNotExist: pass # Create article article = Article() for history_page in page.getHistory()[-2:][::-1]: try: if history_page['user'] in user_matching: user = get_user_model().objects.get( pk=user_matching[history_page['user']]) else: user = get_user_model().objects.get( username=history_page['user']) except get_user_model().DoesNotExist: print( "\tCannot found user with username=%s. Use --user-matching \"%s:<user_pk>\" to manualy set it" % ( history_page['user'], history_page['user'], )) user = None article_revision = ArticleRevision() article_revision.content = pypandoc.convert( history_page['*'], 'md', 'mediawiki') article_revision.title = title article_revision.user = user article_revision.owner = user article.add_revision(article_revision, save=True) article_revision.created = history_page['timestamp'] article_revision.save() # Updated lastest content WITH expended templates # TODO ? Do that for history as well ? article_revision.content = pypandoc.convert( striptags(page.getWikiText(True, True).decode('utf-8')).replace( '__NOEDITSECTION__', '').replace('__NOTOC__', ''), 'md', 'mediawiki') article_revision.save() article.save() upath = URLPath.objects.create(site=current_site, parent=url_root, slug=urltitle, article=article) article.add_object_relation(upath) self.matching_old_link_new_link[ page.title] = upath.article.get_absolute_url() self.articles_imported.append((article, article_revision))
def create_revision(bustopic, title, content, summary,request): revision = ArticleRevision() revision.inherit_predecessor(bustopic.article) if not title: revision.title=bustopic.article.current_revision.title else: revision.title = title revision.content = content revision.user_message = summary revision.deleted = False revision.set_from_request(request) revision.save() return revision
def import_page(self, api, site, page, current_site, url_root, user_matching, replace_existing): import pypandoc # Filter titles, to avoid stranges charaters. title = only_printable(page.title) urltitle = slugify(only_printable(urllib.unquote(page.urltitle))[:50]) added = 1 while urltitle in self.articles_worked_on: title = only_printable(page.title) + " " + str(added) urltitle = only_printable(slugify((urllib.unquote(page.urltitle))[:47] + " " + str(added))) added += 1 self.articles_worked_on.append(urltitle) print "Working on %s (%s)" % (title, urltitle) # Check if the URL path already exists try: urlp = URLPath.objects.get(slug=urltitle) self.matching_old_link_new_link[page.title] = urlp.article.get_absolute_url() if not replace_existing: print "\tAlready existing, skipping..." return print "\tDestorying old version of the article" urlp.article.delete() except URLPath.DoesNotExist: pass # Create article article = Article() for history_page in page.getHistory()[-2:][::-1]: try: if history_page['user'] in user_matching: user = get_user_model().objects.get(pk=user_matching[history_page['user']]) else: user = get_user_model().objects.get(username=history_page['user']) except get_user_model().DoesNotExist: print "\tCannot found user with username=%s. Use --user-matching \"%s:<user_pk>\" to manualy set it" % (history_page['user'], history_page['user'], ) user = None article_revision = ArticleRevision() article_revision.content = pypandoc.convert(history_page['*'], 'md', 'mediawiki') article_revision.title = title article_revision.user = user article_revision.owner = user article.add_revision(article_revision, save=True) article_revision.created = history_page['timestamp'] article_revision.save() # Updated lastest content WITH expended templates # TODO ? Do that for history as well ? article_revision.content = pypandoc.convert(striptags(page.getWikiText(True, True).decode('utf-8')).replace('__NOEDITSECTION__', '').replace('__NOTOC__', ''), 'md', 'mediawiki') article_revision.save() article.save() upath = URLPath.objects.create(site=current_site, parent=url_root, slug=urltitle, article=article) article.add_object_relation(upath) self.matching_old_link_new_link[page.title] = upath.article.get_absolute_url() self.articles_imported.append((article, article_revision))
def import_page( self, api, site, page, current_site, url_root, user_matching, replace_existing, ): from wikitools.pagelist import listFromQuery # Filter titles, to avoid stranges charaters. title = page.title urltitle = title urltitle = urltitle.replace("ø", "o") urltitle = urltitle.replace("æ", "ae") urltitle = urltitle.replace("å", "a") urltitle = urltitle.replace("Ø", "O") urltitle = urltitle.replace("Æ", "AE") urltitle = urltitle.replace("Å", "A") urltitle = only_printable(urltitle) urltitle = slugify(only_printable(urllib.parse.unquote(urltitle))[:50]) added = 1 while urltitle in self.articles_worked_on: title = only_printable("{} {}".format(page.title, added)) urltitle = slugify( "{} {}".format(only_printable(urllib.parse.unquote(page.urltitle))[:47], added) ) added += 1 self.articles_worked_on.append(urltitle) print("Working on {} ({})".format(title, urltitle)) print(url_root) print(urltitle) print() # Check if the URL path already exists try: urlp = URLPath.objects.get(slug=urltitle) self.matching_old_link_new_link[ page.title] = urlp.article.get_absolute_url() if not replace_existing: print("\tAlready existing, skipping...") return print("\tDestorying old version of the article") urlp.article.delete() except URLPath.DoesNotExist: pass # Create article article = Article() history_page = page.getHistory()[0] try: if history_page['user'] in user_matching: user = get_user_model().objects.get( pk=user_matching[ history_page['user']]) else: user = get_user_model().objects.get( username=history_page['user']) except get_user_model().DoesNotExist: user = None except Exception: print("Couldn't find user. Something is weird.") article_revision = ArticleRevision() article_revision.content = refactor(page.getWikiText()) article_revision.title = title article_revision.user = user article_revision.owner = user article_revision.content = re.sub("\[\[.*(Category|Kategori).*\]\]\n", "", article_revision.content) article.add_revision(article_revision, save=True) article_revision.created = history_page['timestamp'] article_revision.save() # Updated lastest content WITH expended templates # TODO ? Do that for history as well ? article_revision.save() article.save() upath = URLPath.objects.create( site=current_site, parent=url_root, slug=urltitle, article=article) article.add_object_relation(upath) self.matching_old_link_new_link[ page.title] = upath.article.get_absolute_url() self.articles_imported.append((article, article_revision))
def import_page( self, api, site, page, current_site, url_root, user_matching, replace_existing, ): from wikitools.pagelist import listFromQuery # Filter titles, to avoid stranges charaters. title = page.title urltitle = title urltitle = urltitle.replace("ø", "o") urltitle = urltitle.replace("æ", "ae") urltitle = urltitle.replace("å", "a") urltitle = urltitle.replace("Ø", "O") urltitle = urltitle.replace("Æ", "AE") urltitle = urltitle.replace("Å", "A") urltitle = only_printable(urltitle) urltitle = slugify(only_printable(urllib.parse.unquote(urltitle))[:50]) added = 1 while urltitle in self.articles_worked_on: title = only_printable("{} {}".format(page.title, added)) urltitle = slugify("{} {}".format( only_printable(urllib.parse.unquote(page.urltitle))[:47], added)) added += 1 self.articles_worked_on.append(urltitle) print("Working on {} ({})".format(title, urltitle)) print(url_root) print(urltitle) print() # Check if the URL path already exists try: urlp = URLPath.objects.get(slug=urltitle) self.matching_old_link_new_link[ page.title] = urlp.article.get_absolute_url() if not replace_existing: print("\tAlready existing, skipping...") return print("\tDestorying old version of the article") urlp.article.delete() except URLPath.DoesNotExist: pass # Create article article = Article() history_page = page.getHistory()[0] try: if history_page['user'] in user_matching: user = get_user_model().objects.get( pk=user_matching[history_page['user']]) else: user = get_user_model().objects.get( username=history_page['user']) except get_user_model().DoesNotExist: user = None except Exception: print("Couldn't find user. Something is weird.") article_revision = ArticleRevision() article_revision.content = refactor(page.getWikiText()) article_revision.title = title article_revision.user = user article_revision.owner = user article_revision.content = re.sub("\[\[.*(Category|Kategori).*\]\]\n", "", article_revision.content) article.add_revision(article_revision, save=True) article_revision.created = history_page['timestamp'] article_revision.save() # Updated lastest content WITH expended templates # TODO ? Do that for history as well ? article_revision.save() article.save() upath = URLPath.objects.create(site=current_site, parent=url_root, slug=urltitle, article=article) article.add_object_relation(upath) self.matching_old_link_new_link[ page.title] = upath.article.get_absolute_url() self.articles_imported.append((article, article_revision))