def on_article_delete(instance, *args, **kwargs): # If an article is deleted, then throw out its URLPaths # But move all descendants to a lost-and-found node. site = Site.objects.get_current() # Get the Lost-and-found path or create a new one try: lost_and_found = URLPath.objects.get(slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site) except URLPath.DoesNotExist: article = Article(group_read=True, group_write=False, other_read=False, other_write=False) article.add_revision( ArticleRevision( content=_( u"Articles who lost their parents\n" "===============================\n\n" "The children of this article have had their parents deleted. You should probably find a new home for them." ), title=_(u"Lost and found"), ) ) lost_and_found = URLPath.objects.create( slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site, article=article ) article.add_object_relation(lost_and_found) for urlpath in URLPath.objects.filter(articles__article=instance, site=site): # Delete the children for child in urlpath.get_children(): child.move_to(lost_and_found) # ...and finally delete the path itself # TODO: This should be unnecessary because of URLPath.article(...ondelete=models.CASCADE) urlpath.delete()
def get_context_data(self, **kwargs): context = super(GroupDescriptionDetailView, self).get_context_data(**kwargs) workgroup = context['workgroup'] # First be sure that the home Wiki article already exists try: home_article = Article.get_for_object(workgroup) except ArticleForObject.DoesNotExist: return redirect('workgroup-detail', slug=workgroup.slug) # now check that the description article exists try: desc_article = Article.get_for_object(home_article) except ArticleForObject.DoesNotExist: desc_article = Article.objects.create() desc_article.add_object_relation(home_article) revision = ArticleRevision(title="description of %s" % workgroup.name, content='') desc_article.add_revision(revision) context.update({ 'wiki_article': desc_article, }) return context
def get_lost_and_found(): if ns.lost_and_found: return ns.lost_and_found try: ns.lost_and_found = URLPath.objects.get( slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site) except URLPath.DoesNotExist: article = Article(group_read=True, group_write=False, other_read=False, other_write=False) article.add_revision( ArticleRevision( content=_( 'Articles who lost their parents\n' '===============================\n\n' 'The children of this article have had their parents deleted. You should probably find a new home for them.'), title=_("Lost and found"))) ns.lost_and_found = URLPath.objects.create( slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site, article=article) article.add_object_relation(ns.lost_and_found) return ns.lost_and_found
def dispatch(self, request, workgroup_slug, *args, **kwargs): self.workgroup = get_object_or_404(WorkGroup, slug=workgroup_slug) article = Article.get_for_object(Article.get_for_object(self.workgroup)) self.sidebar_plugins = plugin_registry.get_sidebar() self.sidebar = [] return super(WikiEdit, self).dispatch(request, article, *args, **kwargs)
def dispatch(self, request, workgroup_slug, *args, **kwargs): self.workgroup = get_object_or_404(WorkGroup, slug=workgroup_slug) article = Article.get_for_object(Article.get_for_object( self.workgroup)) self.sidebar_plugins = plugin_registry.get_sidebar() self.sidebar = [] return super(WikiEdit, self).dispatch(request, article, *args, **kwargs)
def create_article(cls, parent, slug, is_dir, site=None, title="Root", article_kwargs={}, request=None, article_w_permissions=None, **revision_kwargs): """ Utility function: Creates a new urlpath with an article and a new revision for the article :returns: A new URLPath instance """ if not site: site = Site.objects.get_current() article = Article(**article_kwargs) article.add_revision(ArticleRevision(title=title, **revision_kwargs), save=True) article.is_dir = is_dir article.save() newpath = cls.objects.create(site=site, parent=parent, slug=slug, article=article) article.add_object_relation(newpath) return newpath
def get_context_data(self, **kwargs): """ Adds the member of the associated ML if there's one """ context = super(GroupDetailView, self).get_context_data(**kwargs) workgroup = context['workgroup'] # Look up mailing list members context.update(lookup_ml_membership(workgroup)) # Wiki try: article = Article.get_for_object(workgroup) except ArticleForObject.DoesNotExist: article = Article.objects.create() article.add_object_relation(workgroup) revision = ArticleRevision(title=workgroup.name, content='') article.add_revision(revision) context['wiki_article'] = article context['group_projects'] = workgroup.projects.all() return context
def get_context_data(self, **kwargs): """ Adds the member of the associated ML if there's one """ context = super(GroupDetailView, self).get_context_data(**kwargs) workgroup = context['workgroup'] # Look up mailing list members context.update(lookup_ml_membership(workgroup)) # Wiki try: article = Article.get_for_object(workgroup) except ArticleForObject.DoesNotExist: article = Article.objects.create() article.add_object_relation(workgroup) revision = ArticleRevision(title=workgroup.name, content='') article.add_revision(revision) context['wiki_article'] = article context['group_projects'] = workgroup.projects.prefetch_related( 'locations').all().order_by('?') return context
def full_dehydrate(self, bundle, for_list=False): bundle = ModelResource.full_dehydrate(self, bundle, for_list) if bundle.obj.picture: thumbnailer = get_thumbnailer(bundle.obj.picture) thumbnail_options = { 'size': (ResizeThumbApi.width, ResizeThumbApi.height) } bundle.data["thumb"] = thumbnailer.get_thumbnail( thumbnail_options).url else: bundle.data["thumb"] = None if for_list is False: bundle.data["tags"] = [ tag.name for tag in Tag.objects.get_for_object(bundle.obj) ] if (bundle.obj.picture): thumbnail_options = { 'size': (ResizeDisplay.width, ResizeDisplay.width) } bundle.data["image"] = thumbnailer.get_thumbnail( thumbnail_options).url else: bundle.data["image"] = None try: bundle.data["article"] = Article.get_for_object( bundle.obj).render() except ArticleForObject.DoesNotExist: bundle.data["article"] = None return bundle
def create_urlpath( cls, parent, slug, site=None, title="Root", article_kwargs={}, request=None, article_w_permissions=None, **revision_kwargs): """ Utility function: Creates a new urlpath with an article and a new revision for the article :returns: A new URLPath instance """ if not site: site = Site.objects.get_current() article = Article(**article_kwargs) article.add_revision(ArticleRevision(title=title, **revision_kwargs), save=True) article.save() newpath = cls.objects.create( site=site, parent=parent, slug=slug, article=article) article.add_object_relation(newpath) return newpath
def create_article(cls, parent, slug, site=None, title="Root", article_kwargs={}, **kwargs): """Utility function: Create a new urlpath with an article and a new revision for the article""" if not site: site = get_current_site(get_current_request()) article = Article(**article_kwargs) article.add_revision(ArticleRevision(title=title, **kwargs), save=True) article.save() newpath = cls.objects.create(site=site, parent=parent, slug=slug, article=article) article.add_object_relation(newpath) return newpath
def create_article(bustopic,title="Root", article_kwargs={}, content="",user_message="",request=None): """Utility function: Create a new urlpath with an article and a new revision for the article""" article = Article(**article_kwargs) ar = ArticleRevision() ar.content = content ar.user_message = user_message ar.deleted = False if request: ar.set_from_request(request) else: ar.ip_address = None ar.user = get_default_user() article.add_revision(ar, save=True) article.save() bustopic.article=article bustopic.save() return ar
def on_article_delete(instance, *args, **kwargs): # If an article is deleted, then throw out its URLPaths # But move all descendants to a lost-and-found node. site = get_current_site(get_current_request()) # Get the Lost-and-found path or create a new one try: lost_and_found = URLPath.objects.get(slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site) except URLPath.DoesNotExist: article = Article(group_read = True, group_write = False, other_read = False, other_write = False) article.add_revision(ArticleRevision( content=_(u'Articles who lost their parents\n' '===============================\n\n' 'The children of this article have had their parents deleted. You should probably find a new home for them.'), title=_(u"Lost and found"))) lost_and_found = URLPath.objects.create(slug=settings.LOST_AND_FOUND_SLUG, parent=URLPath.root(), site=site, article=article) article.add_object_relation(lost_and_found) for urlpath in URLPath.objects.filter(articles__article=instance, site=site): # Delete the children for child in urlpath.get_children(): child.move_to(lost_and_found) # ...and finally delete the path itself # TODO: This should be unnecessary because of URLPath.article(...ondelete=models.CASCADE) urlpath.delete()
def create_root(cls, site=None, title="Root", **kwargs): if not site: site = Site.objects.get_current() root_nodes = cls.objects.root_nodes().filter(site=site) if not root_nodes: # (get_or_create does not work for MPTT models??) article = Article() article.add_revision(ArticleRevision(title=title, **kwargs), save=True) article.save() root = cls.objects.create(site=site, article=article) article.add_object_relation(root) else: root = root_nodes[0] return root
def create_article(cls, parent, slug, site=None, title="Root", article_kwargs={}, **kwargs): """Utility function: Create a new urlpath with an article and a new revision for the article""" if not site: site = Site.objects.get_current() article = Article(**article_kwargs) article.add_revision(ArticleRevision(title=title, **kwargs), save=True) article.save() newpath = cls.objects.create(site=site, parent=parent, slug=slug, article=article) article.add_object_relation(newpath) return newpath
def create_root(cls, site=None, title="Root", request=None, **kwargs): if not site: site = Site.objects.get_current() root_nodes = cls.objects.root_nodes().filter(site=site) if not root_nodes: article = Article() revision = ArticleRevision(title=title, **kwargs) if request: revision.set_from_request(request) article.add_revision(revision, save=True) article.save() root = cls.objects.create(site=site, article=article) article.add_object_relation(root) else: root = root_nodes[0] return root
def get_context_data(self, **kwargs): context = super(GroupDescriptionDetailView, self).get_context_data(**kwargs) workgroup = context['workgroup'] # First be sure that the home Wiki article already exists try: home_article = Article.get_for_object(workgroup) except ArticleForObject.DoesNotExist: return redirect('workgroup-detail', slug=workgroup.slug) # now check that the description article exists try: desc_article = Article.get_for_object(home_article) except ArticleForObject.DoesNotExist: desc_article = Article.objects.create() desc_article.add_object_relation(home_article) revision = ArticleRevision(title="description of %s" %workgroup.name, content='') desc_article.add_revision(revision) context.update({ 'wiki_article' : desc_article, }) return context
def full_dehydrate(self, bundle, for_list=False): bundle = ModelResource.full_dehydrate(self, bundle, for_list) if bundle.obj.picture: thumbnailer = get_thumbnailer(bundle.obj.picture) thumbnail_options = {'size': (ResizeThumbApi.width, ResizeThumbApi.height)} bundle.data["thumb"] = thumbnailer.get_thumbnail(thumbnail_options).url else: bundle.data["thumb"] = None if for_list is False: bundle.data["tags"] = [tag.name for tag in Tag.objects.get_for_object(bundle.obj)] if(bundle.obj.picture): thumbnail_options = {'size': (ResizeDisplay.width, ResizeDisplay.width)} bundle.data["image"] = thumbnailer.get_thumbnail(thumbnail_options).url else: bundle.data["image"] = None try: bundle.data["article"] = Article.get_for_object(bundle.obj).render() except ArticleForObject.DoesNotExist: bundle.data["article"] = None return bundle
def import_page(self, api, site, page, current_site, url_root, user_matching, replace_existing): import pypandoc # Filter titles, to avoid stranges charaters. title = only_printable(page.title) urltitle = slugify(only_printable(urllib.unquote(page.urltitle))[:50]) added = 1 while urltitle in self.articles_worked_on: title = only_printable(page.title) + " " + str(added) urltitle = only_printable( slugify((urllib.unquote(page.urltitle))[:47] + " " + str(added))) added += 1 self.articles_worked_on.append(urltitle) print("Working on %s (%s)" % (title, urltitle)) # Check if the URL path already exists try: urlp = URLPath.objects.get(slug=urltitle) self.matching_old_link_new_link[ page.title] = urlp.article.get_absolute_url() if not replace_existing: print("\tAlready existing, skipping...") return print("\tDestorying old version of the article") urlp.article.delete() except URLPath.DoesNotExist: pass # Create article article = Article() for history_page in page.getHistory()[-2:][::-1]: try: if history_page['user'] in user_matching: user = get_user_model().objects.get( pk=user_matching[history_page['user']]) else: user = get_user_model().objects.get( username=history_page['user']) except get_user_model().DoesNotExist: print( "\tCannot found user with username=%s. Use --user-matching \"%s:<user_pk>\" to manualy set it" % ( history_page['user'], history_page['user'], )) user = None article_revision = ArticleRevision() article_revision.content = pypandoc.convert( history_page['*'], 'md', 'mediawiki') article_revision.title = title article_revision.user = user article_revision.owner = user article.add_revision(article_revision, save=True) article_revision.created = history_page['timestamp'] article_revision.save() # Updated lastest content WITH expended templates # TODO ? Do that for history as well ? article_revision.content = pypandoc.convert( striptags(page.getWikiText(True, True).decode('utf-8')).replace( '__NOEDITSECTION__', '').replace('__NOTOC__', ''), 'md', 'mediawiki') article_revision.save() article.save() upath = URLPath.objects.create(site=current_site, parent=url_root, slug=urltitle, article=article) article.add_object_relation(upath) self.matching_old_link_new_link[ page.title] = upath.article.get_absolute_url() self.articles_imported.append((article, article_revision))
def import_page(self, api, site, page, current_site, url_root, user_matching, replace_existing): import pypandoc # Filter titles, to avoid stranges charaters. title = only_printable(page.title) urltitle = slugify(only_printable(urllib.unquote(page.urltitle))[:50]) added = 1 while urltitle in self.articles_worked_on: title = only_printable(page.title) + " " + str(added) urltitle = only_printable(slugify((urllib.unquote(page.urltitle))[:47] + " " + str(added))) added += 1 self.articles_worked_on.append(urltitle) print "Working on %s (%s)" % (title, urltitle) # Check if the URL path already exists try: urlp = URLPath.objects.get(slug=urltitle) self.matching_old_link_new_link[page.title] = urlp.article.get_absolute_url() if not replace_existing: print "\tAlready existing, skipping..." return print "\tDestorying old version of the article" urlp.article.delete() except URLPath.DoesNotExist: pass # Create article article = Article() for history_page in page.getHistory()[-2:][::-1]: try: if history_page['user'] in user_matching: user = get_user_model().objects.get(pk=user_matching[history_page['user']]) else: user = get_user_model().objects.get(username=history_page['user']) except get_user_model().DoesNotExist: print "\tCannot found user with username=%s. Use --user-matching \"%s:<user_pk>\" to manualy set it" % (history_page['user'], history_page['user'], ) user = None article_revision = ArticleRevision() article_revision.content = pypandoc.convert(history_page['*'], 'md', 'mediawiki') article_revision.title = title article_revision.user = user article_revision.owner = user article.add_revision(article_revision, save=True) article_revision.created = history_page['timestamp'] article_revision.save() # Updated lastest content WITH expended templates # TODO ? Do that for history as well ? article_revision.content = pypandoc.convert(striptags(page.getWikiText(True, True).decode('utf-8')).replace('__NOEDITSECTION__', '').replace('__NOTOC__', ''), 'md', 'mediawiki') article_revision.save() article.save() upath = URLPath.objects.create(site=current_site, parent=url_root, slug=urltitle, article=article) article.add_object_relation(upath) self.matching_old_link_new_link[page.title] = upath.article.get_absolute_url() self.articles_imported.append((article, article_revision))
def import_page( self, api, site, page, current_site, url_root, user_matching, replace_existing, ): from wikitools.pagelist import listFromQuery # Filter titles, to avoid stranges charaters. title = page.title urltitle = title urltitle = urltitle.replace("ø", "o") urltitle = urltitle.replace("æ", "ae") urltitle = urltitle.replace("å", "a") urltitle = urltitle.replace("Ø", "O") urltitle = urltitle.replace("Æ", "AE") urltitle = urltitle.replace("Å", "A") urltitle = only_printable(urltitle) urltitle = slugify(only_printable(urllib.parse.unquote(urltitle))[:50]) added = 1 while urltitle in self.articles_worked_on: title = only_printable("{} {}".format(page.title, added)) urltitle = slugify( "{} {}".format(only_printable(urllib.parse.unquote(page.urltitle))[:47], added) ) added += 1 self.articles_worked_on.append(urltitle) print("Working on {} ({})".format(title, urltitle)) print(url_root) print(urltitle) print() # Check if the URL path already exists try: urlp = URLPath.objects.get(slug=urltitle) self.matching_old_link_new_link[ page.title] = urlp.article.get_absolute_url() if not replace_existing: print("\tAlready existing, skipping...") return print("\tDestorying old version of the article") urlp.article.delete() except URLPath.DoesNotExist: pass # Create article article = Article() history_page = page.getHistory()[0] try: if history_page['user'] in user_matching: user = get_user_model().objects.get( pk=user_matching[ history_page['user']]) else: user = get_user_model().objects.get( username=history_page['user']) except get_user_model().DoesNotExist: user = None except Exception: print("Couldn't find user. Something is weird.") article_revision = ArticleRevision() article_revision.content = refactor(page.getWikiText()) article_revision.title = title article_revision.user = user article_revision.owner = user article_revision.content = re.sub("\[\[.*(Category|Kategori).*\]\]\n", "", article_revision.content) article.add_revision(article_revision, save=True) article_revision.created = history_page['timestamp'] article_revision.save() # Updated lastest content WITH expended templates # TODO ? Do that for history as well ? article_revision.save() article.save() upath = URLPath.objects.create( site=current_site, parent=url_root, slug=urltitle, article=article) article.add_object_relation(upath) self.matching_old_link_new_link[ page.title] = upath.article.get_absolute_url() self.articles_imported.append((article, article_revision))
def import_page( self, api, site, page, current_site, url_root, user_matching, replace_existing, ): from wikitools.pagelist import listFromQuery # Filter titles, to avoid stranges charaters. title = page.title urltitle = title urltitle = urltitle.replace("ø", "o") urltitle = urltitle.replace("æ", "ae") urltitle = urltitle.replace("å", "a") urltitle = urltitle.replace("Ø", "O") urltitle = urltitle.replace("Æ", "AE") urltitle = urltitle.replace("Å", "A") urltitle = only_printable(urltitle) urltitle = slugify(only_printable(urllib.parse.unquote(urltitle))[:50]) added = 1 while urltitle in self.articles_worked_on: title = only_printable("{} {}".format(page.title, added)) urltitle = slugify("{} {}".format( only_printable(urllib.parse.unquote(page.urltitle))[:47], added)) added += 1 self.articles_worked_on.append(urltitle) print("Working on {} ({})".format(title, urltitle)) print(url_root) print(urltitle) print() # Check if the URL path already exists try: urlp = URLPath.objects.get(slug=urltitle) self.matching_old_link_new_link[ page.title] = urlp.article.get_absolute_url() if not replace_existing: print("\tAlready existing, skipping...") return print("\tDestorying old version of the article") urlp.article.delete() except URLPath.DoesNotExist: pass # Create article article = Article() history_page = page.getHistory()[0] try: if history_page['user'] in user_matching: user = get_user_model().objects.get( pk=user_matching[history_page['user']]) else: user = get_user_model().objects.get( username=history_page['user']) except get_user_model().DoesNotExist: user = None except Exception: print("Couldn't find user. Something is weird.") article_revision = ArticleRevision() article_revision.content = refactor(page.getWikiText()) article_revision.title = title article_revision.user = user article_revision.owner = user article_revision.content = re.sub("\[\[.*(Category|Kategori).*\]\]\n", "", article_revision.content) article.add_revision(article_revision, save=True) article_revision.created = history_page['timestamp'] article_revision.save() # Updated lastest content WITH expended templates # TODO ? Do that for history as well ? article_revision.save() article.save() upath = URLPath.objects.create(site=current_site, parent=url_root, slug=urltitle, article=article) article.add_object_relation(upath) self.matching_old_link_new_link[ page.title] = upath.article.get_absolute_url() self.articles_imported.append((article, article_revision))