def import_posts(self): category = self.get_category() self.write_out(self.style.STEP('- Importing entries\n')) for post in self.blogger_manager.get_posts(self.blogger_blog_id): creation_date = convert_blogger_timestamp(post.published.text) status = DRAFT if is_draft(post) else PUBLISHED title = post.title.text or '' content = post.content.text or '' slug = slugify(post.title.text or get_post_id(post))[:255] try: entry = Entry.objects.get(creation_date=creation_date, slug=slug) output = self.style.NOTICE('> Skipped %s (already migrated)\n' % entry) except Entry.DoesNotExist: entry = Entry(status=status, title=title, content=content, creation_date=creation_date, slug=slug) if self.default_author: entry.author = self.default_author entry.tags = ','.join([slugify(cat.term) for cat in post.category]) entry.last_update = convert_blogger_timestamp(post.updated.text) entry.save() entry.sites.add(self.SITE) entry.categories.add(category) entry.authors.add(self.default_author) try: self.import_comments(entry, post) except gdata_service.RequestError: # comments not available for this post pass output = self.style.ITEM('> Migrated %s + %s comments\n' % (entry.title, len(Comment.objects.for_model(entry)))) self.write_out(output)
def import_entries(self, feed_entries): """Import entries""" for feed_entry in feed_entries: self.write_out("> %s... " % feed_entry.title) creation_date = datetime(*feed_entry.date_parsed[:6]) slug = slugify(feed_entry.title)[:255] if Entry.objects.filter( creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug, ): self.write_out(self.style.NOTICE("SKIPPED (already imported)\n")) continue categories = self.import_categories(feed_entry) entry_dict = { "title": feed_entry.title[:255], "content": feed_entry.description, "excerpt": feed_entry.get("summary"), "status": PUBLISHED, "creation_date": creation_date, "start_publication": creation_date, "last_update": datetime.now(), "slug": slug, } if not entry_dict["excerpt"] and self.auto_excerpt: entry_dict["excerpt"] = Truncator("...").words(50, strip_tags(feed_entry.description)) if self.tags: entry_dict["tags"] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.image_enclosure: for enclosure in feed_entry.enclosures: if "image" in enclosure.get("type") and enclosure.get("href"): img_tmp = NamedTemporaryFile(delete=True) img_tmp.write(urlopen(enclosure["href"]).read()) img_tmp.flush() entry.image.save(slug, File(img_tmp)) break if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get("author_detail"): try: user = User.objects.create_user( slugify(feed_entry.author_detail.get("name")), feed_entry.author_detail.get("email", "") ) except IntegrityError: user = User.objects.get(username=slugify(feed_entry.author_detail.get("name"))) entry.authors.add(user) self.write_out(self.style.ITEM("OK\n"))
def import_entries(self, feed_entries): """Import entries""" for feed_entry in feed_entries: self.write_out('> %s... ' % feed_entry.title) creation_date = datetime(*feed_entry.date_parsed[:6]) slug = slugify(feed_entry.title)[:255] if Entry.objects.filter(creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug): self.write_out(self.style.NOTICE( 'SKIPPED (already imported)\n')) continue categories = self.import_categories(feed_entry) entry_dict = {'title': feed_entry.title[:255], 'content': feed_entry.description, 'excerpt': feed_entry.get('summary'), 'status': PUBLISHED, 'creation_date': creation_date, 'start_publication': creation_date, 'last_update': datetime.now(), 'slug': slug} if not entry_dict['excerpt'] and self.auto_excerpt: entry_dict['excerpt'] = Truncator('...').words( 50, strip_tags(feed_entry.description)) if self.tags: entry_dict['tags'] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.image_enclosure: for enclosure in feed_entry.enclosures: if 'image' in enclosure.get('type') \ and enclosure.get('href'): img_tmp = NamedTemporaryFile(delete=True) img_tmp.write(urlopen(enclosure['href']).read()) img_tmp.flush() entry.image.save(slug, File(img_tmp)) break if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get('author_detail'): try: user = User.objects.create_user( slugify(feed_entry.author_detail.get('name')), feed_entry.author_detail.get('email', '')) except IntegrityError: user = User.objects.get( username=slugify(feed_entry.author_detail.get('name'))) entry.authors.add(user) self.write_out(self.style.ITEM('OK\n'))
def import_entries(self, feed_entries): """Import entries""" for feed_entry in feed_entries: self.write_out('> %s... ' % feed_entry.title) creation_date = datetime(*feed_entry.date_parsed[:6]) slug = slugify(feed_entry.title)[:255] if Entry.objects.filter(creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug): self.write_out(self.style.NOTICE( 'SKIPPED (already imported)\n')) continue categories = self.import_categories(feed_entry) entry_dict = {'title': feed_entry.title[:255], 'content': feed_entry.description, 'excerpt': feed_entry.get('summary'), 'status': PUBLISHED, 'creation_date': creation_date, 'start_publication': creation_date, 'last_update': datetime.now(), 'slug': slug} if not entry_dict['excerpt'] and self.auto_excerpt: entry_dict['excerpt'] = truncate_words( strip_tags(feed_entry.description), 50) if self.tags: entry_dict['tags'] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.image_enclosure: for enclosure in feed_entry.enclosures: if 'image' in enclosure.get('type') \ and enclosure.get('href'): img_tmp = NamedTemporaryFile(delete=True) img_tmp.write(urlopen(enclosure['href']).read()) img_tmp.flush() entry.image.save(slug, File(img_tmp)) break if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get('author_detail'): try: user = User.objects.create_user( slugify(feed_entry.author_detail.get('name')), feed_entry.author_detail.get('email', '')) except IntegrityError: user = User.objects.get( username=slugify(feed_entry.author_detail.get('name'))) entry.authors.add(user) self.write_out(self.style.ITEM('OK\n'))
def import_entries(self, feed_entries): """Import entries""" for feed_entry in feed_entries: self.write_out('> %s... ' % feed_entry.title) creation_date = datetime(*feed_entry.date_parsed[:6]) slug = slugify(feed_entry.title)[:255] if Entry.objects.filter(creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug): self.write_out( self.style.NOTICE('SKIPPED (already imported)\n')) continue categories = self.import_categories(feed_entry) entry_dict = { 'title': feed_entry.title[:255], 'content': feed_entry.description, 'excerpt': feed_entry.get('summary'), 'status': PUBLISHED, 'creation_date': creation_date, 'start_publication': creation_date, 'last_update': datetime.now(), 'slug': slug } if not entry_dict['excerpt'] and self.auto_excerpt: entry_dict['excerpt'] = truncate_words( strip_tags(feed_entry.description), 50) if self.category_tag: entry_dict['tags'] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get('author_detail'): try: user = User.objects.create_user( slugify(feed_entry.author_detail.get('name')), feed_entry.author_detail.get('email', '')) except IntegrityError: user = User.objects.get( username=slugify(feed_entry.author_detail.get('name'))) entry.authors.add(user) self.write_out(self.style.ITEM('OK\n'))
def load_blog(self, item): """ Import content as blog entry :param item: entry in json format """ slug = item['slug'] if Entry.objects.filter(slug=slug).exists(): return status_dict = { 'draft': DRAFT, 'publish': PUBLISHED, } content = self.linebreaks(self.process_images(item['content'])) entry = Entry( title=item['title'], slug=slug, status=status_dict[item['post_status']], creation_date=self.localtime(item['date']), content=content, ) if item['meta'].get('author'): entry.display_author = item['meta']['author'].split(',')[0] if item['tags'] and item['tags'].strip(): entry.tags = item['tags'] if item['thumbnail']: resp = requests.get(item['thumbnail']) entry.image.save(item['thumbnail'].split('/')[-1], ImageFile(StringIO(resp.content)), save=False) entry.save() entry.sites.add(Site.objects.get_current()) for category_title in item['categories'].split(', '): if category_title and category_title.strip(): category_slug = slugify(category_title) category, _ = Category.objects.get_or_create( slug=category_slug, defaults={'title': category_title}) entry.categories.add(category)
def import_posts(self): category = self.get_category() for post in self.blogger_manager.get_posts(self.blogger_blog_id): creation_date = convert_blogger_timestamp(post.published.text) status = DRAFT if is_draft(post) else PUBLISHED title = post.title.text or '' content = post.content.text or '' slug = slugify(post.title.text or get_post_id(post))[:255] try: entry = Entry.objects.get(sites=self.SITE, authors=self.default_author, categories=category, status=status, title=title, content=content, creation_date=creation_date, slug=slug) output = self.style.TITLE('Skipped %s (already migrated)\n' % entry) continue except Entry.DoesNotExist: entry = Entry(status=status, title=title, content=content, creation_date=creation_date, slug=slug) if self.default_author: entry.author = self.default_author entry.tags = ','.join( [slugify(cat.term) for cat in post.category]) entry.last_update = convert_blogger_timestamp( post.updated.text) entry.save() entry.sites.add(self.SITE) entry.categories.add(category) entry.authors.add(self.default_author) try: self.import_comments(entry, post) except gdata_service.RequestError: # comments not available for this post pass output = self.style.TITLE( 'Migrated %s + %s comments\n' % (entry, len(Comment.objects.for_model(entry)))) self.write_out(output)
def setUp(self): super(NewsTestCase, self).setUp() current_site = Site.objects.get_current() entry = Entry( title = 'Test News', slug = 'test-news', content = 'Test content of this test news item.', excerpt = 'Test content...', tags = 'Redmap,', status = PUBLISHED, creation_date = datetime(2012, 10, 30, 12, 30), ) entry.save() news_category, created = Category.objects.get_or_create(slug='news') entry.categories.add(news_category) entry.sites.add(current_site) entry.save() article = Entry( title = 'Test Article', slug = 'test-article', content = 'Test content of this test article.', excerpt = 'Test content...', tags = 'Redmap,', status = PUBLISHED, creation_date = datetime(2012, 10, 30, 12, 30), ) article.save() articles_category, created = Category.objects.get_or_create( slug='articles' ) article.categories.add(articles_category) article.sites.add(current_site) article.save()