class TestWidthHeightOperation(ImageOperationTestCase): operation_class = image_operations.WidthHeightOperation filter_spec_tests = [ ('width-800', dict(method='width', size=800)), ('height-600', dict(method='height', size=600)), ] filter_spec_error_tests = [ 'width', 'width-800x600', 'width-abc', 'width-800-c100', ] run_tests = [ # Basic usage of width ('width-400', Image(width=1000, height=500), [ ('resize', ((400, 200), ), {}), ]), # Basic usage of height ('height-400', Image(width=1000, height=500), [ ('resize', ((800, 400), ), {}), ]), ]
def create_images_from_urls_in_content(self, body): """create Image objects and transfer image files to media root""" soup = BeautifulSoup(body, "html5lib") for img in soup.findAll('img'): old_url = img['src'] if 'width' in img: width = img['width'] if 'height' in img: height = img['height'] else: width = 100 height = 100 path, file_ = os.path.split(img['src']) if not img['src']: continue # Blank image try: remote_image = urllib.request.urlretrieve(img['src']) except (urllib.error.HTTPError, urllib.error.URLError, UnicodeEncodeError): print("Unable to import " + img['src']) continue image = Image(title=file_, width=width, height=height) try: image.file.save(file_, File(open(remote_image[0], 'rb'))) image.save() new_url = image.file.url body = body.replace(old_url, new_url) body = self.convert_html_entities(body) except TypeError: print("Unable to import image {}".format(remote_image[0])) return body
class TestMinMaxOperation(ImageOperationTestCase): operation_class = image_operations.MinMaxOperation filter_spec_tests = [ ('min-800x600', dict(method='min', width=800, height=600)), ('max-800x600', dict(method='max', width=800, height=600)), ] filter_spec_error_tests = [ 'min', 'min-800', 'min-abc', 'min-800xabc', 'min-800x600-', 'min-800x600-c100', 'min-800x600x10', ] run_tests = [ # Basic usage of min ('min-800x600', Image(width=1000, height=1000), [ ('resize', ((800, 800), ), {}), ]), # Basic usage of max ('max-800x600', Image(width=1000, height=1000), [ ('resize', ((600, 600), ), {}), ]), ]
def process_content_image(self, content): self.stdout.write('\tGenerate and replace entry content images....') if content: root = lxml.html.fromstring(content) for img_node in root.iter('img'): parent_node = img_node.getparent() if 'wp-content' in img_node.attrib[ 'src'] or 'files' in img_node.attrib['src']: image = self._import_image(img_node.attrib['src']) if image: title = img_node.attrib.get( 'title') or img_node.attrib.get('alt') new_image = WagtailImage(file=File(file=image), title=title) new_image.save() if parent_node.tag == 'a': parent_node.addnext( ET.XML(self._image_to_embed(new_image))) parent_node.drop_tree() else: parent_node.append( ET.XML(self._image_to_embed(new_image))) img_node.drop_tag() content = ET.tostring(root) return content
def create_images_from_urls_in_content(self, body): """create Image objects and transfer image files to media root""" soup = BeautifulSoup(body, "html5lib") for img in soup.findAll('img'): old_url = img['src'] if 'width' in img: width = img['width'] if 'height' in img: height = img['height'] else: width = 100 height = 100 path, file_ = os.path.split(img['src']) if not img['src']: continue # Blank image try: remote_image = urllib.request.urlretrieve(img['src']) except urllib.error.HTTPError: print("Unable to import " + img['src']) continue except urllib.error.URLError: print("URL error - try again " + img['src']) continue image = Image(title=file_, width=width, height=height) image.file.save(file_, File(open(remote_image[0], 'rb'))) image.save() new_url = image.file.url body = body.replace(old_url, new_url) body = self.convert_html_entities(body) return body
def create_images_from_urls_in_content(self, body): """create Image objects and transfer image files to media root""" soup = BeautifulSoup(body, "html5lib") for img in soup.findAll('img'): if 'width' in img: width = img['width'] if 'height' in img: height = img['height'] else: width = 100 height = 100 try: path, file_ = os.path.split(img['src']) if not img['src']: continue # Blank image if img['src'].startswith('data:'): continue # Embedded image old_url = img['src'] headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'User-Agent': "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0" } req = requests.get(self.prepare_url(img['src']), headers=headers, timeout=10) if req.status_code == 200: remote_image = tempfile.NamedTemporaryFile() remote_image.write(req.content) else: remote_image = None except (urllib.error.HTTPError, urllib.error.URLError, UnicodeEncodeError, requests.exceptions.SSLError, KeyError, requests.exceptions.ConnectionError, requests.exceptions.MissingSchema, requests.exceptions.InvalidSchema, requests.exceptions.InvalidURL): logging.warning("Unable to import image: " + img['src']) continue if len(file_) > 255: file_ = file_[:255] image = Image(title=file_, width=width, height=height) try: if remote_image and os.path.getsize(remote_image.name) > 0: #TODO: Log error of files that don't import for manual fix imageFile = File(open(remote_image.name, 'rb')) image.file.save(file_, imageFile) image.save() remote_image.close() new_url = image.file.url body = body.replace(old_url, new_url) body = self.convert_html_entities(body) except TypeError: logging.warning("Unable to import image: " + img['src']) #print("Unable to import image {}".format(remote_image[0])) pass return body
def create_blog_pages(self, posts, blog_index, *args, **options): """create Blog post entries from wordpress data""" for post in posts: post_id = post.get('ID') title = post.get('title') if title: new_title = self.convert_html_entities(title) title = new_title slug = post.get('slug') description = post.get('description') if description: description = self.convert_html_entities(description) body = post.get('content') # get image info from content and create image objects body = self.create_images_from_urls_in_content(body) # author/user data author = post.get('author') user = self.create_user(author) categories = post.get('terms') # format the date date = post.get('date')[:10] try: new_entry = BlogPage.objects.get(slug=slug) new_entry.title = title new_entry.body = body new_entry.owner = user new_entry.save() except BlogPage.DoesNotExist: new_entry = blog_index.add_child(instance=BlogPage( title=title, slug=slug, search_description="description", date=date, body=body, owner=user)) featured_image = post.get('featured_image') if featured_image is not None: title = post['featured_image']['title'] source = post['featured_image']['source'] path, file_ = os.path.split(source) source = source.replace('stage.swoon', 'swoon') try: remote_image = urllib.request.urlretrieve( self.prepare_url(source)) width = 640 height = 290 header_image = Image(title=title, width=width, height=height) header_image.file.save( file_, File(open(remote_image[0], 'rb'))) header_image.save() except UnicodeEncodeError: header_image = None print('unable to set header image {}'.format(source)) else: header_image = None new_entry.header_image = header_image new_entry.save() if categories: self.create_categories_and_tags(new_entry, categories) if self.should_import_comments: self.import_comments(post_id, slug)
def add_to_collection(item, collection): description_file = item + ".description.txt" if os.path.isfile(description_file): description = open(description_file, 'r').read() else: description = "" file_name = os.path.basename(item) title = ".".join(file_name.split(".")[:-1]) title = title.replace("_", " ") thumbnail_file = item + ".thumbnail.jpeg" if os.path.isfile(thumbnail_file): f = open(thumbnail_file, 'r') thumbnail = Image() thumbnail.file.save(os.path.basename(item) + ".jpeg", File(f)) thumbnail.title = "Thumbnail for " + title thumbnail.save() else: thumbnail = None extension = file_name.split(".")[-1] if extension in MOVIE_EXTENSIONS: cls = models.Movie elif extension in EBOOK_EXTENSIONS: cls = models.EBook else: return slug = slugify(title) path = collection.path + "{pos:s}".format(pos=str(collection.numchild + 1).zfill(4)) try: obj = collection.get_children().filter(slug=slug)[0] # Ensure that other objects with same slug and path are deleted others = collection.get_children().filter(slug=slug).exclude(id=obj.id) if others.exists(): logger.warn("Other objects with same path existed and were deleted. File: " + item) others.delete() except IndexError: obj = cls(path=path) obj.numchild=0 obj.depth=collection.depth + 1 obj.show_in_menus=False obj.resource_link=item obj.url_path=os.path.join(collection.url_path, slug) + "/" obj.slug=slug obj.title=title obj.live=True obj.short_description=description obj.author=options["author"] obj.duration="" obj.thumbnail=thumbnail obj.save() collection.numchild += 1 collection.save()
def save_images_to_cms(self): '''Save images to the database with: - title: the file name - tags: the directory containing the image''' for img_path, dirname, img_name in self.imgs: image = Image(title=img_name, file=ImageFile(open(img_path, "rb"), name=os.path.basename(img_path)), tags=img_name) # is this correct?? image.save()
def import_header_image(self, entry, items, image_id): self.stdout.write('\tImport header images....') for item in items: post_type = item.find(u'{{{0:s}}}post_type'.format(WP_NS)).text if post_type == 'attachment' and item.find(u'{{{0:s}}}post_id'.format(WP_NS)).text == image_id: title = item.find('title').text image_url = item.find(u'{{{0:s}}}attachment_url'.format(WP_NS)).text img = self._import_image(image_url) new_image = WagtailImage(file=File(file=img, name=title), title=title) new_image.save() entry.header_image = new_image entry.save()
def import_header_image(self, entry, items, image_id): self.stdout.write('\tImport header images....') for item in items: post_type = item.find(u'{{{0:s}}}post_type'.format(WP_NS)).text if post_type == 'attachment' and item.find( u'{{{0:s}}}post_id'.format(WP_NS)).text == image_id: title = item.find('title').text image_url = item.find( u'{{{0:s}}}attachment_url'.format(WP_NS)).text img = self._import_image(image_url) new_image = WagtailImage(file=File(file=img, name=title), title=title) new_image.save() entry.header_image = new_image entry.save()
def generate_photo(request, page): if isinstance(page, PostPage) and not page.photo: im = Vizhash(page.title, 64).identicon() buffer = BytesIO() im.save(fp=buffer, format="PNG") content_file = ContentFile(buffer.getvalue()) image_file = InMemoryUploadedFile(content_file, None, page.title, "image/png", content_file.tell, len(buffer.getvalue()), None) image = Image(title=page.title, file=image_file, width=im.width, height=im.height, created_at=page.created, file_size=len(buffer.getvalue())) image.save() page.photo = image page.save()
def process_content_image(self, content): self.stdout.write('\tGenerate and replace entry content images....') if content: root = lxml.html.fromstring(content) for img_node in root.iter('img'): parent_node = img_node.getparent() if 'wp-content' in img_node.attrib['src'] or 'files' in img_node.attrib['src']: img = self._import_image(img_node.attrib['src']) title = img_node.attrib.get('title') or img_node.attrib.get('alt') new_image = WagtailImage(file=File(file=img, name=title), title=title) new_image.save() if parent_node.tag == 'a': parent_node.addnext(ET.XML(self._image_to_embed(new_image))) parent_node.drop_tree() else: parent_node.append(ET.XML(self._image_to_embed(new_image))) img_node.drop_tag() content = ET.tostring(root) return content
def process_content_image(self, content): self.stdout.write('\tGenerate and replace entry content images....') if content: root = lxml.html.fromstring(content) for img_node in root.iter('img'): parent_node = img_node.getparent() if 'bp.blogspot.com' in img_node.attrib['src']: self.stdout.write('\t\t{}'.format(img_node.attrib['src'])) image = self._import_image(img_node.attrib['src']) title = img_node.attrib['src'].rsplit('/', 1)[1] new_image = WagtailImage(file=File(file=image, name=title), title=title) new_image.save() if parent_node.tag == 'a': parent_node.addnext(ET.XML(self._image_to_embed(new_image))) parent_node.drop_tree() else: parent_node.append(ET.XML(self._image_to_embed(new_image))) img_node.drop_tag() content = ET.tostring(root) return content
def test_cache_key_fill_filter_with_focal_point(self): image = Image( width=1000, height=1000, focal_point_width=100, focal_point_height=100, focal_point_x=500, focal_point_y=500, ) fil = Filter(spec='fill-100x100') cache_key = fil.get_cache_key(image) self.assertEqual(cache_key, '0bbe3b2f')
def import_entries(self): self.stdout.write("Importing entries...") entries = ZinniaEntry.objects.all() for entry in entries: self.stdout.write(entry.title) # Header images if entry.image: header_image = WagtailImage(file=entry.image, title=os.path.basename(entry.image.url)) self.stdout.write('\tImported header image: {}'.format(entry.image)) header_image.save() else: header_image = None self.stdout.write('\tGenerate and replace entry content images....') if entry.content: root = lxml.html.fromstring(entry.content) for el in root.iter('img'): if el.attrib['src'].startswith(settings.MEDIA_URL): old_image = el.attrib['src'].replace(settings.MEDIA_URL, '') with open('{}/{}'.format(settings.MEDIA_ROOT, old_image), 'r') as image_file: new_image = WagtailImage(file=File(file=image_file, name=os.path.basename(old_image)), title=os.path.basename(old_image)) new_image.save() el.attrib['src'] = new_image.file.url self.stdout.write('\t\t{}'.format(new_image.file.url)) # New content with images replaced content = lxml.html.tostring(root, pretty_print=True) else: content = entry.content # Create page try: page = EntryPage.objects.get(slug=entry.slug) except EntryPage.DoesNotExist: page = EntryPage( title=entry.title, body=content, slug=entry.slug, go_live_at=entry.start_publication, expire_at=entry.end_publication, first_published_at=entry.creation_date, date=entry.creation_date, owner=entry.authors.first(), seo_title=entry.title, search_description=entry.excerpt, live=entry.is_visible, header_image=header_image ) self.blogpage.add_child(instance=page) revision = self.blogpage.save_revision() revision.publish() self.import_entry_categories(entry, page) self.import_entry_tags(entry, page) page.save() page.save_revision(changed=False) self.entries[entry.pk] = page
def fetch_hut_images(): for hpage in HutPage.objects.all(): if hpage.link_url: try: r = requests.get(hpage.link_url, timeout=settings.API_TIMEOUT) except requests.exceptions.RequestException as e: logger.exception(str(e)) else: soup = BeautifulSoup(r.content, 'html5lib') a_tag = soup.find_all("a", {"class": "fancybox-gallery"}) if a_tag: img_tag = a_tag[0].find_all("img") if img_tag: img_url = 'http://www.doc.govt.nz/%s' % img_tag[0].get( 'src') logger.debug("Hut %s using img %s from HTML body.", str(hpage.pk), img_url) else: page = metadata_parser.MetadataParser(url=hpage.link_url) img_url = page.get_metadata_link('image') logger.debug("Hut %s using img %s from HTML meta", str(hpage.pk), img_url) if img_url: try: response = requests.get(img_url, timeout=settings.API_TIMEOUT) except requests.exceptions.RequestException as e: logger.exception(str(e)) image = Image(title=hpage.title, file=ImageFile(BytesIO(response.content), name=img_url.split('/')[-1])) image.save() hpage.meta_image = image hpage.save() else: logger.debug("No img found for hut %s", str(hpage.pk))
def handle(self, *args, **options): browser = Browser('phantomjs') dims = (1600, 1000) browser.driver.set_window_size(dims[0], dims[1]) for project in ProjectPage.objects.all(): links = project.links.filter(public=True, type='main') if not links: continue # Use only the first link for now link = links[0] print("Visiting %s (%s)" % (link.url, link)) browser.visit(link.url) assert browser.status_code.is_success() time.sleep(5) with tempfile.NamedTemporaryFile(suffix='.png', prefix='project') as tmpf: browser.driver.save_screenshot(tmpf.name) pil_image = PILImage.open(tmpf) pil_image = pil_image.crop((0, 0, dims[0], dims[1])) tmpf.seek(0) tmpf.truncate(0) pil_image.save(tmpf, format='PNG') title = '%s screenshot' % project.title try: image = Image.objects.get(title=title) except Image.DoesNotExist: image = Image(title=title) image.file = ImageFile(tmpf) image.save() project.image = image project.save(update_fields=['image']) browser.quit()
def create_images_from_urls_in_content(self, body): """create Image objects and transfer image files to media root""" soup = BeautifulSoup(body, "html5lib") for img in soup.findAll('img'): old_url = img['src'] if 'width' in img: width = img['width'] if 'height' in img: height = img['height'] else: width = 100 height = 100 path, file_ = os.path.split(img['src']) if not img['src']: continue # Blank image if img['src'].startswith('data:'): continue # Embedded image try: remote_image = urllib.request.urlretrieve( self.prepare_url(img['src'])) except (urllib.error.HTTPError, urllib.error.URLError, UnicodeEncodeError, ValueError): print("Unable to import " + img['src']) continue image = Image(title=file_, width=width, height=height) try: image.file.save(file_, File(open(remote_image[0], 'rb'))) image.save() new_url = image.file.url body = body.replace(old_url, new_url) body = self.convert_html_entities(body) except TypeError: print("Unable to import image {}".format(remote_image[0])) return body
def test_run(self): image = Image(**image_kwargs) # Make operation operation = self.operation_class(*filter_spec.split('-')) # Make operation recorder operation_recorder = WillowOperationRecorder( (image.width, image.height)) # Run operation.run(operation_recorder, image, {}) # Check self.assertEqual(operation_recorder.ran_operations, expected_output)
class TestDoNothingOperation(ImageOperationTestCase): operation_class = image_operations.DoNothingOperation filter_spec_tests = [ ('original', dict()), ('blahblahblah', dict()), ('123456', dict()), ] filter_spec_error_tests = [ 'cannot-take-multiple-parameters', ] run_tests = [ ('original', Image(width=1000, height=1000), []), ]
from io import BytesIO import requests from django.core.files.images import ImageFile from wagtail.wagtailimages.models import Image # event is a model object, substitute your model # filename and title are up to you # in my model, event.event_image is a ForeignKey to wagtailimages.Image response = requests.get(url) image = Image(title=title, file=ImageFile(BytesIO(response.content), name=filename)) image.save() event.event_image = image event.save()
def test_cache_key_fill_filter(self): image = Image(width=1000, height=1000) fil = Filter(spec='fill-100x100') cache_key = fil.get_cache_key(image) self.assertEqual(cache_key, '2e16d0ba')
def test_cache_key(self): image = Image(width=1000, height=1000) fil = Filter(spec='max-100x100') cache_key = fil.get_cache_key(image) self.assertEqual(cache_key, '')
def add_to_collection(item, collection): description_file = item + ".description.txt" if os.path.isfile(description_file): description = open(description_file, 'r').read() else: description = "" file_name = os.path.basename(item) title = ".".join(file_name.split(".")[:-1]) title = title.replace("_", " ") thumbnail_file = item + ".thumbnail.jpeg" if os.path.isfile(thumbnail_file): f = open(thumbnail_file, 'r') thumbnail = Image() thumbnail.file.save(os.path.basename(item) + ".jpeg", File(f)) thumbnail.title = "Thumbnail for " + title thumbnail.save() else: thumbnail = None extension = file_name.split(".")[-1] if extension in MOVIE_EXTENSIONS: cls = models.Movie elif extension in EBOOK_EXTENSIONS: cls = models.EBook else: return slug = slugify(title) path = collection.path + "{pos:s}".format( pos=str(collection.numchild + 1).zfill(4)) try: obj = collection.get_children().filter(slug=slug)[0] # Ensure that other objects with same slug and path are deleted others = collection.get_children().filter(slug=slug).exclude( id=obj.id) if others.exists(): logger.warn( "Other objects with same path existed and were deleted. File: " + item) others.delete() except IndexError: obj = cls(path=path) obj.numchild = 0 obj.depth = collection.depth + 1 obj.show_in_menus = False obj.resource_link = item obj.url_path = os.path.join(collection.url_path, slug) + "/" obj.slug = slug obj.title = title obj.live = True obj.short_description = description obj.author = options["author"] obj.duration = "" obj.thumbnail = thumbnail obj.save() collection.numchild += 1 collection.save()
class TestFillOperation(ImageOperationTestCase): operation_class = image_operations.FillOperation filter_spec_tests = [ ('fill-800x600', dict(width=800, height=600, crop_closeness=0)), ('hello-800x600', dict(width=800, height=600, crop_closeness=0)), ('fill-800x600-c0', dict(width=800, height=600, crop_closeness=0)), ('fill-800x600-c100', dict(width=800, height=600, crop_closeness=1)), ('fill-800x600-c50', dict(width=800, height=600, crop_closeness=0.5)), ('fill-800x600-c1000', dict(width=800, height=600, crop_closeness=1)), ('fill-800000x100', dict(width=800000, height=100, crop_closeness=0)), ] filter_spec_error_tests = [ 'fill', 'fill-800', 'fill-abc', 'fill-800xabc', 'fill-800x600-', 'fill-800x600x10', 'fill-800x600-d100', ] run_tests = [ # Basic usage ('fill-800x600', Image(width=1000, height=1000), [ ('crop', ((0, 125, 1000, 875), ), {}), ('resize', ((800, 600), ), {}), ]), # Basic usage with an oddly-sized original image # This checks for a rounding precision issue (#968) ('fill-200x200', Image(width=539, height=720), [ ('crop', ((0, 90, 539, 629), ), {}), ('resize', ((200, 200), ), {}), ]), # Closeness shouldn't have any effect when used without a focal point ('fill-800x600-c100', Image(width=1000, height=1000), [ ('crop', ((0, 125, 1000, 875), ), {}), ('resize', ((800, 600), ), {}), ]), # Should always crop towards focal point. Even if no closeness is set ('fill-80x60', Image( width=1000, height=1000, focal_point_x=1000, focal_point_y=500, focal_point_width=0, focal_point_height=0, ), [ # Crop the largest possible crop box towards the focal point ('crop', ((0, 125, 1000, 875), ), {}), # Resize it down to final size ('resize', ((80, 60), ), {}), ]), # Should crop as close as possible without upscaling ('fill-80x60-c100', Image( width=1000, height=1000, focal_point_x=1000, focal_point_y=500, focal_point_width=0, focal_point_height=0, ), [ # Crop as close as possible to the focal point ('crop', ((920, 470, 1000, 530), ), {}), # No need to resize, crop should've created an 80x60 image ]), # Ditto with a wide image # Using a different filter so method name doesn't clash ('fill-100x60-c100', Image( width=2000, height=1000, focal_point_x=2000, focal_point_y=500, focal_point_width=0, focal_point_height=0, ), [ # Crop to the right hand side ('crop', ((1900, 470, 2000, 530), ), {}), ]), # Make sure that the crop box never enters the focal point ('fill-50x50-c100', Image( width=2000, height=1000, focal_point_x=1000, focal_point_y=500, focal_point_width=100, focal_point_height=20, ), [ # Crop a 100x100 box around the entire focal point ('crop', ((950, 450, 1050, 550), ), {}), # Resize it down to 50x50 ('resize', ((50, 50), ), {}), ]), # Test that the image is never upscaled ('fill-1000x800', Image(width=100, height=100), [ ('crop', ((0, 10, 100, 90), ), {}), ]), # Test that the crop closeness gets capped to prevent upscaling ('fill-1000x800-c100', Image( width=1500, height=1000, focal_point_x=750, focal_point_y=500, focal_point_width=0, focal_point_height=0, ), [ # Crop a 1000x800 square out of the image as close to the # focal point as possible. Will not zoom too far in to # prevent upscaling ('crop', ((250, 100, 1250, 900), ), {}), ]), # Test for an issue where a ZeroDivisionError would occur when the # focal point size, image size and filter size match # See: #797 ('fill-1500x1500-c100', Image( width=1500, height=1500, focal_point_x=750, focal_point_y=750, focal_point_width=1500, focal_point_height=1500, ), [ # This operation could probably be optimised out ('crop', ((0, 0, 1500, 1500), ), {}), ]) ]
def create_blog_pages(self, posts, blog_index, *args, **options): """create Blog post entries from wordpress data""" for post in posts: title = post.get('title') print(title) if title: new_title = self.convert_html_entities(title) title = new_title # TODO: Fix hardcoded replacement slug = post.get('slug') + "-html" description = post.get('description') if description: description = self.convert_html_entities(description) body = post.get('content') # get image info from content and create image objects body = self.create_images_from_urls_in_content(body) body = self.format_code_in_content(body) body = self.replace_twilioinc_urls(body) # author/user data author = post.get('author') user = self.create_user(author) categories = post.get('terms') # format the date date = post.get('date')[:10] try: new_entry = BlogPage.objects.get(slug=slug) new_entry.title = title new_entry.body = body new_entry.owner = user new_entry.author = user new_entry.save() except BlogPage.DoesNotExist: new_entry = blog_index.add_child(instance=BlogPage( title=title, slug=slug, search_description="description", date=date, body=body, owner=user, author=user)) print("Owner:") print(new_entry.owner) featured_image = post.get('featured_image') header_image = None if featured_image is not None and "source" in post['featured_image']: if 'title' in post['featured_image']: title = post['featured_image']['title'] else: title = "Featured Image" source = post['featured_image']['source'] path, file_ = os.path.split(source) source = source.replace('stage.swoon', 'swoon') try: headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'User-Agent': "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0" } req = requests.get(self.prepare_url(source), headers=headers, timeout=10) remote_image = tempfile.NamedTemporaryFile() remote_image.write(req.content) #remote_image = urllib.request.urlretrieve( # self.prepare_url(source)) width = 640 height = 290 if os.path.getsize(remote_image.name): #TODO: Capture error for manual download header_image = Image(title=title, width=width, height=height) header_image.file.save( file_, File(open(remote_image.name, 'rb'))) header_image.save() except UnicodeEncodeError: header_image = None print('unable to set header image {}'.format(source)) else: header_image = None new_entry.header_image = header_image new_entry.save() if categories: self.create_categories_and_tags(new_entry, categories) if self.should_import_comments: self.import_comments(post_id, slug)