def test_multiple_matches_returns_max_3_top_matches(self): home_page = Page.objects.get(slug='home') match_1 = Page(title='Bridget Masinga', slug='bridgetmasinga', live=True, first_published_at=datetime.now()) match_2 = Page(title='Bridget Masinga1', slug='bridgetmasinga1', live=True, first_published_at=datetime.now()) match_3 = Page(title='Bridget Masinga2', slug='bridgetmasinga2', live=True, first_published_at=datetime.now()) match_4 = Page(title='Bridget Masinga3', slug='bridgetmasinga3', live=True, first_published_at=datetime.now()) home_page.add_child(instance=match_1) home_page.add_child(instance=match_2) home_page.add_child(instance=match_3) home_page.add_child(instance=match_4) result = suggest_page_from_misspelled_slug('/bridget', home_page) assert len(result) == 3
def setUp(self): root = Page.objects.get(pk=1) root_a = Page( title='Home A', slug='home-a') root.add_child(instance=root_a) root_b = Page( title='Home B', slug='home-b') root.add_child(instance=root_b) self.index_a = NewsIndex(title='News A', slug='news-a') root_a.add_child(instance=self.index_a) self.index_b = NewsIndex(title='News B', slug='news-b') root_b.add_child(instance=self.index_b) self.site_a = Site.objects.create( hostname='site-a.com', root_page=root_a) self.site_b = Site.objects.create( hostname='site-b.org', root_page=root_b) self.item_a = NewsItem.objects.create( newsindex=self.index_a, title='Post A', date=dt(2015, 8, 1)) self.item_b = NewsItem.objects.create( newsindex=self.index_b, title='Post B', date=dt(2015, 8, 2))
def test_page(self): page = Page(title='testpage', slug='test', path='000100019999', depth=3) page.save() return page
def node(name, show_in_menus=True, live=True): return Page( slug=name, title=name, show_in_menus=show_in_menus, live=live, )
def test_slug_scopes_to_site(self): home_page = Page.objects.get(slug='home') article_index_one = Page(title='First Index', slug='firstindex') article_index_two = Page(title='Second Index', slug='secondindex') home_page.add_child(instance=article_index_one) home_page.add_child(instance=article_index_two) article_one = Page(title='Article One', slug='articleone') article_two = Page(title='Article Two', slug='articletwo') article_index_one.add_child(instance=article_one) article_index_two.add_child(instance=article_two) result = slug_matches_one_page_exactly('/shows/articleone/', article_index_two) assert result is None
def test_multiple_matches_returns_best_matching_page(self): home_page = Page.objects.get(slug='home') better_matching_article = Page(title='Workzone with Bridget Masinga', slug='workzonewithbridgetmasinga', live=True, first_published_at=datetime.now()) poorer_matching_article = Page(title='Bridget Masinga', slug='bridgetmasinga', live=True, first_published_at=datetime.now()) home_page.add_child(instance=better_matching_article) home_page.add_child(instance=poorer_matching_article) result = suggest_page_from_misspelled_slug('/workzonbridgetmasing/', home_page) assert better_matching_article in result
def test_search_scopes_to_site_root_page(self): home_page = Page.objects.get(slug='home') root_article = Page( title='Justin Bieber', slug='justin-bieber' ) non_root_article = Page( depth=0, path='0002', title='Justin Bieber Again', slug='justin-bieber-again' ) home_page.add_child(instance=root_article) non_root_article.save() result = pg_full_text_search('Justin Bieber', home_page) assert list(result) == [root_article]
def test_can_create_at(self): # Pages are not `is_creatable`, and should not be creatable self.assertFalse(Page.can_create_at(Page())) # SimplePage can be created under a simple page self.assertTrue(SimplePage.can_create_at(SimplePage())) # StandardIndex can be created under a Page, but not a SimplePage self.assertTrue(StandardIndex.can_create_at(Page())) self.assertFalse(StandardIndex.can_create_at(SimplePage())) # The Business pages are quite restrictive in their structure self.assertTrue(BusinessSubIndex.can_create_at(BusinessIndex())) self.assertTrue(BusinessChild.can_create_at(BusinessIndex())) self.assertTrue(BusinessChild.can_create_at(BusinessSubIndex())) self.assertFalse(BusinessChild.can_create_at(SimplePage())) self.assertFalse(BusinessSubIndex.can_create_at(SimplePage()))
def test_no_matching_slug(self): home_page = Page.objects.get(slug='home') article = Page(title='Workzone with Bridget Masinga', slug='workzonewithbridgetmasinga') home_page.add_child(instance=article) result = slug_matches_one_page_exactly('/post/noresult/', home_page) assert result is None
def test_fallback_wagtail_serve(self): publish_page(Page(title='wagtail title', slug='title')) view = TestView.as_view( flag_name=self.flag_name, fallback_view=wagtail_fail_through ) response = view(self.request(path='/title/')) self.assertContains(response, '<title>wagtail title</title>')
def test_multiple_articles_with_same_slug_returns_none(self): home_page = Page.objects.get(slug='home') article_index_one = Page(title='First Index', slug='firstindex') article_index_two = Page(title='Second Index', slug='secondindex') home_page.add_child(instance=article_index_one) home_page.add_child(instance=article_index_two) article_one = Page(title='Workzone with Bridget Masinga', slug='workzonewithbridgetmasinga') article_two = Page(title='Workzone with Bridget Masinga', slug='workzonewithbridgetmasinga') article_index_one.add_child(instance=article_one) article_index_two.add_child(instance=article_two) result = slug_matches_one_page_exactly( '/shows/workzonewithbridgetmasinga/', home_page) assert result is None
def test_search_results_found(self): home_page = Page.objects.get(slug='home') bieber_article = Page( title='Justin Bieber', slug='justin-bieber' ) home_page.add_child(instance=bieber_article) result = pg_full_text_search('Justin Bieber', home_page) assert list(result) == [bieber_article]
def test_multiple_slug_depth(self): home_page = Page.objects.get(slug='home') article = Page(title='Workzone with Bridget Masinga', slug='workzonewithbridgetmasinga', live=True, first_published_at=datetime.now()) home_page.add_child(instance=article) result = slug_matches_one_page_exactly( '/test/post/page/shows/workzonewithbridgetmasinga/', home_page) assert result == article
def setUp(self): p = Page() p.title = 'Root Page' p.slug = 'root-page' p.depth = 0 p.save() s = Site() s.root_page = p s.is_default_site = True s.hostname = 'localhost' s.port = 80 s.save()
def test_endless_recursion(self): root = get_default_site().root_page # Create a tree with recursive loop. tree = [Page(slug='recursive')] tree.append(tree) # Try to create that tree. treeutils.create_tree(root, tree) # See the results (hopefully without falling into endless recursion). result = treeutils.grow_tree(root.get_descendants(), slug_getter) self.assertEqual(result, ['recursive'])
def test_can_move_to(self): self.assertTrue(SimplePage().can_move_to(SimplePage())) # StandardIndex should only be allowed under a Page self.assertTrue(StandardIndex().can_move_to(Page())) self.assertFalse(StandardIndex().can_move_to(SimplePage())) # The Business pages are quite restrictive in their structure self.assertTrue(BusinessSubIndex().can_move_to(BusinessIndex())) self.assertTrue(BusinessChild().can_move_to(BusinessIndex())) self.assertTrue(BusinessChild().can_move_to(BusinessSubIndex())) self.assertFalse(BusinessChild().can_move_to(SimplePage())) self.assertFalse(BusinessSubIndex().can_move_to(SimplePage()))
def test_auto_recache(root_page, example_svg_upload): page = Page(title="nnep", slug="nnep") page.set_url_path(root_page) root_page.add_child(instance=page) page.save() assert page.url map = ImageMap.objects.create(svg=example_svg_upload) map.regions.create(element_id='blue', link_page=page) map.recache_svg(save=True) assert 'nnep' in map.rendered_svg page.slug = 'ffflop' page.save() # The `post_save` triggers will get called... assert 'ffflop' in ImageMap.objects.get(pk=map.pk).rendered_svg
def test_search_does_not_return_copied_pages(self): home_page = Page.objects.get(slug='home') bieber_article = Page( title='Justin Bieber', slug='justin-bieber' ) home_page.add_child(instance=bieber_article) bieber_article_copy = bieber_article.copy( update_attrs={'slug': 'justin-bieber-copy'} ) result = pg_full_text_search('Justin Bieber', home_page) assert list(result) == [bieber_article]
def tree_of_pages(self): """Tree fixture with nested lists.""" return [ Page(slug='p1'), Page(slug='p2'), [ Page(slug='p21'), Page(slug='p22'), [Page(slug='p221'), [ Page(slug='p2211'), ]] ], Page(slug='p3'), ]
def test_get_root_page__more_than_one(self): """ If there are multiple root pages in the database, `get_root_page` should raise a MultipleObjectsReturned exception """ # Create a new root page Page.add_root(instance=Page(title="Route 2")) with self.assertRaises(Site.MultipleObjectsReturned) as cm: get_root_page() self.assertEqual( cm.exception.args, ('Foliage can\'t auto-determine the root page. ' 'More than one Page exists with depth 1 in the database!', ))
def test_valid_post_redirects__to_the_forms_post_redirect_page(self): redirect_to = self.page.add_child(instance=Page(title="another", slug="another")) form = self.test_form() form.post_redirect_page = redirect_to form.save() fake_request = self.rf.post('/fake/', { 'name': 'Bill', 'form_id': form.pk, 'form_reference': 'some-ref' }) fake_request.user = AnonymousUser() response = process_form(self.page, fake_request) response.client = Client() self.assertRedirects(response, redirect_to.get_url(fake_request))
def test_get_site__more_than_one(self): """ If there are multiple sites in the database, `get_site` should raise a MultipleObjectsReturned exeption """ # Create a second site with a new root page root_page = Page.add_root(instance=Page(title="Second Sight")) Site.objects.create(hostname='secondsight.com', port=80, root_page=root_page) with self.assertRaises(Site.MultipleObjectsReturned) as cm: get_site() self.assertEqual(cm.exception.args, ("Foliage can't auto-determine the Wagtail Site. " "More than one Site exists in the database!", ))
def setUp(self): root = Page.objects.first() other_home = Page(title='Other Root') root.add_child(instance=other_home) self.default_site = Site.objects.get(is_default_site=True) self.other_site = Site.objects.create(hostname='other', root_page=other_home) self.test_setting = TestSetting.objects.create( title='Site title', email='*****@*****.**', site=self.default_site) self.other_setting = TestSetting.objects.create( title='Other title', email='*****@*****.**', site=self.other_site)
def test_rendering(root_page, example_svg_upload, dummy_wagtail_doc): page = Page(title="nnep", slug="nnep") page.set_url_path(root_page) root_page.add_child(instance=page) page.save() assert page.url map = ImageMap.objects.create(svg=example_svg_upload) map.regions.create(element_id='green', link_external='/foobar', target='_blank') map.regions.create(element_id='blue', link_page=page, target='_top') map.regions.create(element_id='red', link_document=dummy_wagtail_doc) svg = map.rendered_svg assert '/foobar' in svg assert '_blank' in svg assert 'nnep' in svg assert '_top' in svg assert ('documents/%s' % dummy_wagtail_doc.pk) in svg
def setUp(self): super().setUp() root_page = get_default_site().root_page self.root = root_page.add_child(instance=Page(slug='naujienos')) User.objects.create_user('user') User.objects.create_superuser('admin', '*****@*****.**', 'secret')
from content.models import ContentPage from kehmet.models import KehmetContentPage, KehmetFrontPage from django.contrib.contenttypes.models import ContentType from django.db import transaction, models from wagtail.wagtailcore.models import Page cp_type = ContentType.objects.get_for_model(ContentPage) k_root = Page.objects.get(url_path='/digietu/kehmet/') pages = k_root.get_descendants().type(ContentPage) dummy_page = Page(title="dummy", path="1234", slug="dummy-slug", depth=1) from pprint import pprint def convert_page(page, target_model): try: page.kehmetcontentpage return except: pass kcp_type = ContentType.objects.get_for_model(target_model) cp_page = page.specific kcp_page = target_model(body=cp_page.body, page_ptr=page) for f in kcp_page._meta.fields: setattr(kcp_page, f.name, getattr(cp_page, f.name)) super(Page, kcp_page).save() for f in dummy_page._meta.fields:
def test_page_not_routable_on_site_returns_none(self): new_page = Page(title='test', slug='foo') save_new_page(new_page, root=self.root) new_site = Site.objects.create(hostname='test', root_page=new_page) self.assertIsNone(get_url_parts_for_site(self.root, new_site))
def test_get_page_from_content(self): new_page = Page(title='test', slug='foo') save_new_page(new_page, root=self.root) self.assertEqual(get_url_parts_for_site(new_page, self.content), (self.content.id, self.content.root_url, '/foo/'))
def setUp(self): self.site_root = Site.objects.get(is_default_site=True).root_page self.policy_compliance_page = Page( title='Policy & Compliance', slug='policy-compliance' ) save_new_page(self.policy_compliance_page, root=self.site_root) self.enforcement_page = Page(title='Enforcement', slug='enforcement') save_new_page(self.enforcement_page, root=self.policy_compliance_page) self.actions_page = Page(title='Actions', slug='actions') save_new_page(self.actions_page, root=self.enforcement_page) self.test_all_data_page = DocumentDetailPage( title="Great Test Page", live=True, preview_description='This is a great test page.' ) save_new_page(self.test_all_data_page, root=self.actions_page) set_stream_data( self.test_all_data_page, 'sidefoot', [ { 'type': 'related_metadata', 'value': { 'content': [ { 'type': 'text', 'value': { 'heading': 'Status', 'blob': '<p>Inactive or resolved</p>' }, }, { 'type': 'text', 'value': { 'heading': 'File number', 'blob': '<p>2012-CFPB-0001</p>' }, }, { 'type': 'date', 'value': { 'heading': 'Date filed', 'date': datetime.date(2012, 7, 18) }, } ], }, }, ] ) set_stream_data( self.test_all_data_page, 'content', [ { 'type': 'full_width_text', 'value': [ { 'type': 'content', 'value': 'CONTENT' } ] }, ] ) self.test_no_data_page = DocumentDetailPage( title="Terrible Test Page", live=False, preview_description='This is a terrible test page.' ) save_new_page(self.test_no_data_page, root=self.actions_page) self.test_wrong_page = DocumentDetailPage( title="Wrong Test Page", live=True, preview_description='This is the wrong test page.' ) save_new_page(self.test_wrong_page, root=self.enforcement_page)
class Command(LabelCommand): """ Command object for importing a WordPress blog into WagtailPress via a WordPress eXtended RSS (WXR) file. """ help = 'Import a Wordpress blog into WagtailPress.' label = 'WXR file' args = 'wordpress.xml' option_list = LabelCommand.option_list + ( make_option('--noautoexcerpt', action='store_false', dest='auto_excerpt', default=True, help='Do NOT generate an excerpt if not present.'), make_option('--author', dest='author', default='', help='All imported entries belong to specified author')) p = Page() p.title = 'WagtailPress Import ' + str(datetime.now()) p.slug = p.title.lower().replace(' ', '-') p.depth = 0 p.save() ROOT_PAGE = p.pk REVERSE_STATUS = { 'pending': DRAFT, 'draft': DRAFT, 'auto-draft': DRAFT, 'inherit': DRAFT, 'publish': PUBLISHED, 'future': PUBLISHED, 'trash': HIDDEN, 'private': PUBLISHED } def __init__(self): """ Init the Command and add custom styles. """ super(Command, self).__init__() self.style.TITLE = self.style.SQL_FIELD self.style.STEP = self.style.SQL_COLTYPE self.style.ITEM = self.style.HTTP_INFO disconnect_entry_signals() disconnect_discussion_signals() def write_out(self, message, verbosity_level=1): """ Convenient method for outputing. """ if self.verbosity and self.verbosity >= verbosity_level: sys.stdout.write(smart_str(message)) sys.stdout.flush() def handle_label(self, wxr_file, **options): global WP_NS self.verbosity = int(options.get('verbosity', 1)) self.auto_excerpt = options.get('auto_excerpt', True) self.default_author = options.get('author') if self.default_author: try: self.default_author = Author.objects.get( **{Author.USERNAME_FIELD: self.default_author}) except Author.DoesNotExist: raise CommandError('Invalid username for default author') self.write_out( self.style.TITLE( 'Starting migration from Wordpress to Zinnia %s:\n' % __version__)) tree = ET.parse(wxr_file) WP_NS = WP_NS % self.guess_wxr_version(tree) self.authors = self.import_authors(tree) self.categories = self.import_categories( tree.findall('channel/{%s}category' % WP_NS)) self.import_tags(tree.findall('channel/{%s}tag' % WP_NS)) self.import_entries(tree.findall('channel/item')) def guess_wxr_version(self, tree): """ We will try to guess the wxr version used to complete the wordpress xml namespace name. """ for v in ('1.2', '1.1', '1.0'): try: tree.find('channel/{%s}wxr_version' % (WP_NS % v)).text return v except AttributeError: pass raise CommandError('Cannot resolve the wordpress namespace') def import_authors(self, tree): """ Retrieve all the authors used in posts and convert it to new or existing author and return the conversion. """ self.write_out(self.style.STEP('- Importing authors\n')) post_authors = set() for item in tree.findall('channel/item'): post_type = item.find('{%s}post_type' % WP_NS).text if post_type == 'post': post_authors.add( item.find( '{http://purl.org/dc/elements/1.1/}creator').text) self.write_out('> %i authors found.\n' % len(post_authors)) authors = {} for post_author in post_authors: if self.default_author: authors[post_author] = self.default_author else: authors[post_author] = self.migrate_author( post_author.replace(' ', '-')) return authors def migrate_author(self, author_name): """ Handle actions for migrating the authors. """ action_text = "The author '%s' needs to be migrated to an user:\n"\ "1. Use an existing user ?\n"\ "2. Create a new user ?\n"\ "Please select a choice: " % self.style.ITEM(author_name) while 42: selection = input(smart_str(action_text)) if selection and selection in '12': break if selection == '1': users = Author.objects.all() if users.count() == 1: username = users[0].get_username() preselected_user = username usernames = [username] usernames_display = ['[%s]' % username] else: usernames = [] usernames_display = [] preselected_user = None for user in users: username = user.get_username() if username == author_name: usernames_display.append('[%s]' % username) preselected_user = username else: usernames_display.append(username) usernames.append(username) while 42: user_text = "1. Select your user, by typing " \ "one of theses usernames:\n"\ "%s or 'back'\n"\ "Please select a choice: " % \ ', '.join(usernames_display) user_selected = input(user_text) if user_selected in usernames: break if user_selected == '' and preselected_user: user_selected = preselected_user break if user_selected.strip() == 'back': return self.migrate_author(author_name) return users.get(**{users[0].USERNAME_FIELD: user_selected}) else: create_text = "2. Please type the email of " \ "the '%s' user or 'back': " % author_name author_mail = input(create_text) if author_mail.strip() == 'back': return self.migrate_author(author_name) try: return Author.objects.create_user(author_name, author_mail) except IntegrityError: return Author.objects.get( **{Author.USERNAME_FIELD: author_name}) def import_categories(self, category_nodes): """ Import all the categories from 'wp:category' nodes, because categories in 'item' nodes are not necessarily all the categories and returning it in a dict for database optimizations. """ self.write_out(self.style.STEP('- Importing categories\n')) categories = {} for category_node in category_nodes: title = category_node.find('{%s}cat_name' % WP_NS).text[:255] slug = category_node.find('{%s}category_nicename' % WP_NS).text[:255] try: parent = category_node.find('{%s}category_parent' % WP_NS).text[:255] except TypeError: parent = None self.write_out('> %s... ' % title) category, created = Category.objects.get_or_create( slug=slug, defaults={ 'title': title, 'parent': categories.get(parent) }) categories[title] = category self.write_out(self.style.ITEM('OK\n')) return categories def import_tags(self, tag_nodes): """ Import all the tags form 'wp:tag' nodes, because tags in 'item' nodes are not necessarily all the tags, then use only the nicename, because it's like a slug and the true tag name may be not valid for url usage. """ self.write_out(self.style.STEP('- Importing tags\n')) for tag_node in tag_nodes: tag_name = tag_node.find('{%s}tag_slug' % WP_NS).text[:50] self.write_out('> %s... ' % tag_name) Tag.objects.get_or_create(name=tag_name) self.write_out(self.style.ITEM('OK\n')) def get_entry_tags(self, categories): """ Return a list of entry's tags, by using the nicename for url compatibility. """ tags = [] for category in categories: domain = category.attrib.get('domain', 'category') if 'tag' in domain and category.attrib.get('nicename'): tags.append(category.attrib.get('nicename')) return tags def get_entry_categories(self, category_nodes): """ Return a list of entry's categories based on imported categories. """ categories = [] for category_node in category_nodes: domain = category_node.attrib.get('domain') if domain == 'category': categories.append(self.categories[category_node.text]) return categories def import_entry(self, title, content, item_node): """ Importing an entry but some data are missing like related entries, start_publication and end_publication. start_publication and creation_date will use the same value, wich is always in Wordpress $post->post_date. """ creation_date = datetime.strptime( item_node.find('{%s}post_date' % WP_NS).text, '%Y-%m-%d %H:%M:%S') if settings.USE_TZ: creation_date = timezone.make_aware(creation_date, pytz.timezone('GMT')) excerpt = strip_tags( item_node.find('{%sexcerpt/}encoded' % WP_NS).text or '') if not excerpt: if self.auto_excerpt: excerpt = Truncator(strip_tags(content)).words(50) else: excerpt = '' # Prefer use this function than # item_node.find('{%s}post_name' % WP_NS).text # Because slug can be not well formated slug = slugify(title)[:255] or 'post-%s' % item_node.find( '{%s}post_id' % WP_NS).text entry_dict = { 'title': title, 'content': content, 'excerpt': excerpt, 'tags': ', '.join(self.get_entry_tags(item_node.findall('category'))), 'status': self.REVERSE_STATUS[item_node.find('{%s}status' % WP_NS).text], 'comment_enabled': item_node.find('{%s}comment_status' % WP_NS).text == 'open', 'pingback_enabled': item_node.find('{%s}ping_status' % WP_NS).text == 'open', 'featured': item_node.find('{%s}is_sticky' % WP_NS).text == '1', 'password': item_node.find('{%s}post_password' % WP_NS).text or '', 'login_required': item_node.find('{%s}status' % WP_NS).text == 'private', 'last_update': timezone.now() } entry_dict['trackback_enabled'] = entry_dict['pingback_enabled'] entry, created = Entry.objects.get_or_create( slug=slug, creation_date=creation_date, defaults=entry_dict) if created: entry.categories.add( *self.get_entry_categories(item_node.findall('category'))) entry.authors.add(self.authors[item_node.find( '{http://purl.org/dc/elements/1.1/}creator').text]) entry.sites.add(self.ROOT_PAGE) return entry, created def find_image_id(self, metadatas): for meta in metadatas: if meta.find('{%s}meta_key' % WP_NS).text == '_thumbnail_id': return meta.find('{%s}meta_value' % WP_NS).text def import_entries(self, items): """ Loops over items and find entry to import, an entry need to have 'post_type' set to 'post' and have content. """ self.write_out(self.style.STEP('- Importing entries\n')) for item_node in items: title = (item_node.find('title').text or '')[:255] post_type = item_node.find('{%s}post_type' % WP_NS).text content = item_node.find( '{http://purl.org/rss/1.0/modules/content/}encoded').text if post_type == 'post' and content and title: self.write_out('> %s... ' % title) entry, created = self.import_entry(title, content, item_node) if created: self.write_out(self.style.ITEM('OK\n')) image_id = self.find_image_id( item_node.findall('{%s}postmeta' % WP_NS)) if image_id: self.import_image(entry, items, image_id) self.import_comments( entry, item_node.findall('{%s}comment' % WP_NS)) else: self.write_out( self.style.NOTICE('SKIPPED (already imported)\n')) else: self.write_out('> %s... ' % title, 2) self.write_out(self.style.NOTICE('SKIPPED (not a post)\n'), 2) def import_image(self, entry, items, image_id): for item in items: post_type = item.find('{%s}post_type' % WP_NS).text if (post_type == 'attachment' and item.find('{%s}post_id' % WP_NS).text == image_id): title = 'Attachment %s' % item.find('title').text self.write_out(' > %s... ' % title) image_url = item.find('{%s}attachment_url' % WP_NS).text img_tmp = NamedTemporaryFile(delete=True) img_tmp.write(urlopen(image_url).read()) img_tmp.flush() entry.image.save(os.path.basename(image_url), File(img_tmp)) self.write_out(self.style.ITEM('OK\n')) def import_comments(self, entry, comment_nodes): """ Loops over comments nodes and import then in django_comments. """ for comment_node in comment_nodes: is_pingback = comment_node.find('{%s}comment_type' % WP_NS).text == PINGBACK is_trackback = comment_node.find('{%s}comment_type' % WP_NS).text == TRACKBACK title = 'Comment #%s' % (comment_node.find( '{%s}comment_id' % WP_NS).text) self.write_out(' > %s... ' % title) content = comment_node.find('{%s}comment_content' % WP_NS).text if not content: self.write_out(self.style.NOTICE('SKIPPED (unfilled)\n')) return submit_date = datetime.strptime( comment_node.find('{%s}comment_date_gmt' % WP_NS).text, '%Y-%m-%d %H:%M:%S') if settings.USE_TZ: submit_date = timezone.make_aware(submit_date, pytz.timezone('GMT')) approvation = comment_node.find('{%s}comment_approved' % WP_NS).text is_public = True is_removed = False if approvation != '1': is_removed = True if approvation == 'spam': is_public = False comment_dict = { 'content_object': entry, 'site': self.ROOT_PAGE, 'user_name': comment_node.find('{%s}comment_author' % WP_NS).text[:50], 'user_email': comment_node.find('{%s}comment_author_email' % WP_NS).text or '', 'user_url': comment_node.find('{%s}comment_author_url' % WP_NS).text or '', 'comment': content, 'submit_date': submit_date, 'ip_address': comment_node.find('{%s}comment_author_IP' % WP_NS).text or None, 'is_public': is_public, 'is_removed': is_removed, } print(comment_dict) """ comment = comments.get_model()(**comment_dict) comment.save() if is_pingback: comment.flags.create( user=get_user_flagger(), flag=PINGBACK) if is_trackback: comment.flags.create( user=get_user_flagger(), flag=TRACKBACK) """ self.write_out(self.style.ITEM('OK\n')) entry.comment_count = entry.comments.count() entry.pingback_count = entry.pingbacks.count() entry.trackback_count = entry.trackbacks.count() entry.save(force_update=True)