def import_posts(self): category = self.get_category() self.write_out(self.style.STEP("- Importing entries\n")) for post in self.blogger_manager.get_posts(self.blogger_blog_id): creation_date = convert_blogger_timestamp(post.published.text) status = DRAFT if is_draft(post) else PUBLISHED title = post.title.text or "" content = post.content.text or "" slug = slugify(post.title.text or get_post_id(post))[:255] try: entry = Entry.objects.get(creation_date=creation_date, slug=slug) output = self.style.NOTICE("> Skipped %s (already migrated)\n" % entry) except Entry.DoesNotExist: entry = Entry(status=status, title=title, content=content, creation_date=creation_date, slug=slug) if self.default_author: entry.author = self.default_author entry.tags = ",".join([slugify(cat.term) for cat in post.category]) entry.last_update = convert_blogger_timestamp(post.updated.text) entry.save() entry.sites.add(self.SITE) entry.categories.add(category) entry.authors.add(self.default_author) try: self.import_comments(entry, post) except gdata_service.RequestError: # comments not available for this post pass output = self.style.ITEM( "> Migrated %s + %s comments\n" % (entry.title, len(Comment.objects.for_model(entry))) ) self.write_out(output)
def import_entries(self, feed_entries): """Import entries""" for feed_entry in feed_entries: self.write_out('> %s... ' % feed_entry.title) creation_date = datetime(*feed_entry.date_parsed[:6]) slug = slugify(feed_entry.title)[:255] if Entry.objects.filter(creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug): self.write_out( self.style.NOTICE('SKIPPED (already imported)\n')) continue categories = self.import_categories(feed_entry) entry_dict = { 'title': feed_entry.title[:255], 'content': feed_entry.description, 'excerpt': feed_entry.get('summary'), 'status': PUBLISHED, 'creation_date': creation_date, 'start_publication': creation_date, 'last_update': datetime.now(), 'slug': slug } if not entry_dict['excerpt'] and self.auto_excerpt: entry_dict['excerpt'] = truncate_words( strip_tags(feed_entry.description), 50) if self.category_tag: entry_dict['tags'] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get('author_detail'): try: user = User.objects.create_user( slugify(feed_entry.author_detail.get('name')), feed_entry.author_detail.get('email', '')) except IntegrityError: user = User.objects.get( username=slugify(feed_entry.author_detail.get('name'))) entry.authors.add(user) self.write_out(self.style.ITEM('OK\n'))
def import_entries(self, feed_entries): """Import entries""" for feed_entry in feed_entries: self.write_out('> %s... ' % feed_entry.title) creation_date = datetime(*feed_entry.date_parsed[:6]) slug = slugify(feed_entry.title)[:255] if Entry.objects.filter(creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug): self.write_out(self.style.NOTICE( 'SKIPPED (already imported)\n')) continue categories = self.import_categories(feed_entry) entry_dict = {'title': feed_entry.title[:255], 'content': feed_entry.description, 'excerpt': feed_entry.get('summary'), 'status': PUBLISHED, 'creation_date': creation_date, 'start_publication': creation_date, 'last_update': datetime.now(), 'slug': slug} if not entry_dict['excerpt'] and self.auto_excerpt: entry_dict['excerpt'] = truncate_words( strip_tags(feed_entry.description), 50) if self.category_tag: entry_dict['tags'] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get('author_detail'): try: user = User.objects.create_user( slugify(feed_entry.author_detail.get('name')), feed_entry.author_detail.get('email', '')) except IntegrityError: user = User.objects.get( username=slugify(feed_entry.author_detail.get('name'))) entry.authors.add(user) self.write_out(self.style.ITEM('OK\n'))
def import_posts(self): category = self.get_category() self.write_out(self.style.STEP('- Importing entries\n')) for post in self.blogger_manager.get_posts(self.blogger_blog_id): creation_date = convert_blogger_timestamp(post.published.text) status = DRAFT if is_draft(post) else PUBLISHED title = post.title.text or '' content = post.content.text or '' slug = slugify(post.title.text or get_post_id(post))[:255] try: entry = Entry.objects.get(creation_date=creation_date, slug=slug) output = self.style.NOTICE('> Skipped %s (already migrated)\n' % entry) except Entry.DoesNotExist: entry = Entry(status=status, title=title, content=content, creation_date=creation_date, slug=slug) if self.default_author: entry.author = self.default_author entry.tags = ','.join([slugify(cat.term) for cat in post.category]) entry.last_update = convert_blogger_timestamp( post.updated.text) entry.save() entry.sites.add(self.SITE) entry.categories.add(category) entry.authors.add(self.default_author) try: self.import_comments(entry, post) except gdata_service.RequestError: # comments not available for this post pass output = self.style.ITEM('> Migrated %s + %s comments\n' % (entry.title, len(Comment.objects.for_model(entry)))) self.write_out(output)