def set_featured_media(self, page: BlogPage, post): featured_media_id: int = post.get("featured_media") if not featured_media_id: return featured_medias: list = post["_embedded"].get("wp:featuredmedia") if featured_medias is None: return for feature_media in featured_medias: if feature_media.get("id") == featured_media_id: source_url = feature_media["source_url"] try: # Wordpress 5.3 API nests title in "rendered" title = feature_media["title"]["rendered"] except TypeError: # Fallback for older (or newer?) wordpress title = feature_media["title"] details = feature_media["media_details"] resp = requests.get(source_url, stream=True) if resp.status_code != requests.codes.ok: print("Unable to import " + source_url) continue fp = BytesIO() fp.write(resp.content) image = Image(title=title, width=details["width"], height=details["height"]) image.file.save(details["file"], File(fp)) image.save() page.header_image = image
def handle(self, *args, **options): news_page = NewsPage.objects.live().first() blog_file = options['file'] with open(blog_file) as json_blogs: data = json.load(json_blogs) posts = [p for p in data if p['model'] == 'blog.post'] for p in posts: fields = p['fields'] if not PostPage.objects.filter(slug=fields['slug']).exists(): if fields['link'] or fields['citation']: body = "{}<a href='{}'>{}</a>".format(fields['body'], fields['link'], fields['citation']) else: body = fields['body'] info = { 'title': fields['title'], 'slug': fields['slug'], 'body': body, 'subtitle': fields['tease'].replace("<p>", "").replace("</p>","")[:510], 'date': fields['publish'].replace("'", "")[:10], 'highlight': fields['highlight'] } post = PostPage(**info) news_page.add_child(instance=post) for c in fields['categories']: post.categories.add(PostCategory.objects.get(pk=c)) r = requests.get('https://cmcf.lightsource.ca/media/{}'.format(fields['image'])) if r.status_code == requests.codes.ok: image = Image(title=fields['title'], file=ImageFile(BytesIO(r.content), name=fields['image'])) image.save() post.image = image post.save()
def get_body(message): if message["attachment_media_type"] == "image": im = "" title = message["attachment_media_object"]["filename"] try: im = Image.objects.get(title=title).id except Exception: http_res = requests.get(message["attachment_uri"]) image_file = ImageFile(BytesIO(http_res.content), name=title) image = Image(title=title, file=image_file) image.save() im = image.id block = blocks.StructBlock([("message", blocks.TextBlock()), ("image", ImageChooserBlock())]) block_value = block.to_python({ "message": message["answer"], "image": im }) return block_value else: block = blocks.StructBlock([ ("message", blocks.TextBlock()), ]) block_value = block.to_python({"message": message["answer"]}) return block_value
def handle(self, *args, **options): self.stdout.write(self.style.SUCCESS("Importing images")) if not options["from_dir"].endswith("/"): options["from_dir"] = options["from_dir"] + "/" for filepath in sorted(glob(options["from_dir"] + "*-90x90.png")): with open(filepath, "rb") as image_file: name = filepath.split("/")[-1][:-10] image = Image(title=name) image.file = ImageFile(file=image_file, name=name + ".png") image.file_size = image.file.size image.file.seek(0) image._set_file_hash(image.file.read()) image.file.seek(0) # Reindex the image to make sure all tags are indexed search_index.insert_or_update_object(image) image.save() image.tags.add("illustration") self.stdout.write( self.style.SUCCESS(f"{image.pk},{image.title}")) self.stdout.write(self.style.SUCCESS("Importing images finished"))
def handle(self, *args, **options): # Get the only instance of Magazine Index Page magazine_index_page = MagazineIndexPage.objects.get() with open(options["file"]) as import_file: issues = csv.DictReader(import_file) for issue in issues: response = requests.get(issue["cover_image_url"]) image_file = BytesIO(response.content) image = Image( title=issue["title"] + " cover image", file=ImageFile(image_file, name=issue["cover_image_file_name"]), ) image.save() import_issue = MagazineIssue( title=issue["title"], publication_date=issue["publication_date"], first_published_at=issue["publication_date"], issue_number=issue["issue_number"], cover_image=image, ) # Add issue to site page hiererchy magazine_index_page.add_child(instance=import_issue) magazine_index_page.save() self.stdout.write("All done!")
def create_images_from_urls_in_content(self, body): """create Image objects and transfer image files to media root""" from bs4 import BeautifulSoup soup = BeautifulSoup(body, "html5lib") for img in soup.findAll("img"): if "width" in img: width = img["width"] if "height" in img: height = img["height"] else: width = 100 height = 100 _, file_name = os.path.split(img["src"]) if not img["src"]: continue # Blank image if img["src"].startswith("data:"): continue # Embedded image resp = requests.get(self.prepare_url(img["src"]), stream=True) if resp.status_code != requests.codes.ok: print("Unable to import " + img["src"]) continue fp = BytesIO() fp.write(resp.content) image = Image(title=file_name, width=width, height=height) image.file.save(file_name, File(fp)) image.save() if img.has_attr("srcset"): img["srcset"] = "" new_url = image.get_rendition("original").url img["src"] = new_url soup.body.hidden = True return soup.body
def create_images_from_urls_in_content(self, body): """create Image objects and transfer image files to media root""" soup = BeautifulSoup(body, "html5lib") for img in soup.findAll('img'): old_url = img['src'] if 'width' in img: width = img['width'] if 'height' in img: height = img['height'] else: width = 100 height = 100 path, file_ = os.path.split(img['src']) if not img['src']: continue # Blank image if img['src'].startswith('data:'): continue # Embedded image try: remote_image = urllib.request.urlretrieve( self.prepare_url(img['src'])) except (urllib.error.HTTPError, urllib.error.URLError, UnicodeEncodeError, ValueError): print("Unable to import " + img['src']) continue image = Image(title=file_, width=width, height=height) try: image.file.save(file_, File(open(remote_image[0], 'rb'))) image.save() new_url = image.file.url body = body.replace(old_url, new_url) body = self.convert_html_entities(body) except TypeError: print("Unable to import image {}".format(remote_image[0])) return body
def get_image(base_url, image_id): # TODO: guard against non-existent images image_attributes = get_image_attributes(base_url, image_id) image_file = requests.get(image_attributes["file"]) image = Image(title=image_attributes["title"], file=ImageFile(BytesIO(image_file.content), name=image_attributes["title"])) image.save() return image
def test_norun(self): image = Image(**image_kwargs) # Make operation operation = self.operation_class(*filter_spec.split('-')) # Make operation recorder context = DummyImageTransform((image.width, image.height)) # Attempt (and hopefully fail) to run with self.assertRaises(ValueError): operation.run(context, image)
def create_image(self, image_file, title, filename, tag): image = Image(title=title) image.file = ImageFile(file=image_file, name=filename) image.file_size = image.file.size image.file.seek(0) image._set_file_hash(image.file.read()) image.file.seek(0) # Reindex the image to make sure all tags are indexed search_index.insert_or_update_object(image) image.save() image.tags.add(tag) return image
def test_cache_key_fill_filter_with_focal_point(self): image = Image( width=1000, height=1000, focal_point_width=100, focal_point_height=100, focal_point_x=500, focal_point_y=500, ) fil = Filter(spec='fill-100x100') cache_key = fil.get_cache_key(image) self.assertEqual(cache_key, '0bbe3b2f')
def test_norun(self): image = Image(**image_kwargs) # Make operation operation = self.operation_class(*filter_spec.split('-')) # Make operation recorder operation_recorder = WillowOperationRecorder( (image.width, image.height)) # Attempt (and hopefully fail) to run with self.assertRaises(ValueError): operation.run(operation_recorder, image, {})
def parse_media_blocks(media_urls): media_blocks = [] for url in media_urls.split(", "): domain = urlparse(url).netloc if domain in ["vimeo.com", "www.youtube.com"]: embed = get_embed(url) embed_tuple = ("embed", embed) media_blocks.append(embed_tuple) else: # The default should be to fetch a PDF or image file (i.e. from westernfriend.org) response = requests.get(url) content_type = response.headers["content-type"] file_name = url.split("/")[-1] file_bytes = BytesIO(response.content) if content_type == "application/pdf": # Create file document_file = File(file_bytes, name=file_name) document = Document( title=file_name, file=document_file, ) document.save() document_link_block = ("document", document) media_blocks.append(document_link_block) elif content_type in ["image/jpeg", "image/png"]: # create image image_file = ImageFile(file_bytes, name=file_name) image = Image( title=file_name, file=image_file, ) image.save() image_block = ("image", image) media_blocks.append(image_block) else: print(url) print(content_type) print("-----") return media_blocks
def parse_results(self): media_files = self.results for r in media_files: sub_site = r.get('source') collection_name = SOURCES[sub_site] collection = Collection.objects.get(name=collection_name) source_url = r.get('source_url') media_type = r.get('media_type') media_name = source_url.split('/')[-1] response = requests.get(source_url) title = r.get('title') # if the title id blank it causes an error if not title: title = 'No title was available' if response: if media_type == 'file': # save to documents media_file = File(BytesIO(response.content), name=media_name) file = Document(title=title, file=media_file, collection=collection) file.save() file.created_at = r.get('date') file.save() elif media_type == 'image': # save to images image_file = ImageFile(BytesIO(response.content), name=media_name) image = Image(title=title, file=image_file, collection=collection) image.save() image.created_at = r.get('date') image.save() else: sys.stdout.write( '⚠️ Got no response. Error has been logged importer/log/import_media_files.txt\n' ) with open('importer/log/import_media_files.txt', 'a') as the_file: the_file.write('{}\n'.format(r)) if self.next: time.sleep(self.sleep_between_fetches) self.fetch_url(self.next) self.parse_results() return Document.objects.count() + Image.objects.count(), 0
def test_run(self): image = Image(**image_kwargs) # Make operation operation = self.operation_class(*filter_spec.split('-')) # Make operation recorder operation_recorder = WillowOperationRecorder((image.width, image.height)) # Run operation.run(operation_recorder, image, {}) # Check self.assertEqual(operation_recorder.ran_operations, expected_output)
def create_image(self, instance): data = self.collect_data(instance) image_description = self.get_image_description(instance) tags = image_description.get('tags') and image_description.pop('tags') image = Image(**image_description) snippet_image = self.create_snippet_image(data) file_name = self.get_file_name() image.file.save(file_name, snippet_image, save=False) image.save() if tags: image.tags.set(*tags, clear=True) image.save() return image
def test_run(self): image = Image(**image_kwargs) # Make operation operation = self.operation_class(*filter_spec.split('-')) # Make context context = DummyImageTransform((image.width, image.height)) # Run context = operation.run(context, image) # Check self.assertEqual(context.operations, expected_output)
def _set_image(self, obj, attr_name, folder_path, img_path): """helper to set images for objects""" img_path = folder_path.joinpath(img_path) # create and set the file if it does not yet exist qs = Image.objects.filter(title=img_path.name) if not qs.exists(): with open(img_path, "rb") as f: # setting name= is important. otherwise it uses the entire file path as # name, which leaks server filesystem structure to the outside. image_file = File(f, name=img_path.stem) image = Image(title=img_path.name, file=image_file.open()) image.save() else: image = qs[0] setattr(obj, attr_name, image) obj.save()
def mock(title): """ Mock a ImageBlock :param title: String :return: Stream content """ url = str.strip(constants.URL_IMAGE_MOCK_1) filename = "%s.png" % title try: ret = Image.objects.get(title=title) except Image.DoesNotExist: response = requests.get(url) file = ImageFile(BytesIO(response.content), name=filename) ret = Image(title=title, file=file) ret.save() return {'type': 'image', 'value': ret.id}
def handle(self, *args, **options): self.stdout.write(self.style.SUCCESS("Importing images")) with open(options["speakers_csv"], "r") as file: for row in DictReader(file): if not row["filename"]: self.stdout.write( self.style.WARNING( f"====> skipping {row['post_name']}, {row['post_title']}" )) continue image_path = os.path.join(options["from_dir"], row["filename"]) with open(image_path, "rb") as image_file: image = Image(title=row["post_title"]) if row["filename"].lower().endswith(".jpg") or row[ "filename"].lower().endswith(".jpeg"): image_filename = f"{row['first_name']} {row['last_name']}.jpg" elif row["filename"].lower().endswith(".png"): image_filename = f"{row['first_name']} {row['last_name']}.png" else: raise ValueError("Unknown file format") image.file = ImageFile(file=image_file, name=image_filename) image.file_size = image.file.size image.file.seek(0) image._set_file_hash(image.file.read()) image.file.seek(0) # Reindex the image to make sure all tags are indexed search_index.insert_or_update_object(image) image.save() image.tags.add("speaker") speaker = Speaker.objects.get(wordpress_id=row["ID"]) speaker.photo = image speaker.save() self.stdout.write( self.style.SUCCESS(f"{image.pk},{image.title}")) self.stdout.write(self.style.SUCCESS("Importing images finished"))
def update_thumbnail(self): generated_file = False if self.video.thumbnail: file = open(self.video.thumbnail.path, 'rb') file = File(file) else: clip = cv2.VideoCapture(self.video.file.path) ret, frame = clip.read() generated_file = 'thumbnail.jpeg' cv2.imwrite(generated_file, frame) file = open(generated_file, 'rb') file = File(file) thumbnail = Image(title=text_processing.html_to_str( self.english_title), file=file) thumbnail.save() self.video_thumbnail = thumbnail self.save() if generated_file: os.remove(generated_file) return thumbnail
def get_or_create_image(self, row): image = None if self.image_path and row['filename']: path = os.path.join(self.image_path, row['filename']) if os.path.exists(path): image_title = re.sub(r'[^\w\.]', '-', row['filename'].strip()).lower() hash = get_hash_from_file(path) image = Image.objects.filter(file__contains=hash).first() if not image: new_name = re.sub(r'.*\.', hash + '.', row['filename']) image = Image(file=ImageFile(File(open(path, 'rb')), name=new_name), title=image_title) image.save() _print_operation(image, True, 'title') return image
def create_wagtail_image_from_remote(image_url=None, images_folder='original_images', collection=None): basename = os.path.basename(image_url) db_file_field = os.path.join(images_folder, basename).replace('\\', '/') destination_image = os.path.join( settings.MEDIA_ROOT, images_folder, os.path.basename(image_url) ) if collection is None: collection = get_behance_collection() r = requests.get(image_url) if Image.objects.filter(file=db_file_field).count() == 0: if r.status_code == 200: with open(destination_image, 'wb') as f: f.write(r.content) local_image = PILImage.open(destination_image) width, height = local_image.size img = Image() img.file = db_file_field img.title = basename img.width = width img.height = height img.collection = collection img.save() return img else: return Image.objects.get(file=db_file_field) return None
def form_valid(self, form): image_file = form.cleaned_data['image_file'] # print(image_file.name) photographer = Photographer( age_range=form.cleaned_data['age_range'], gender_category=form.cleaned_data['gender'], gender_other=form.cleaned_data['gender_other'], ) photographer.save() image = Image(title=image_file.name, file=image_file) image.save() photo = form.save() photo.image = image photo.photographer = photographer photo.save() self.request.session['reference_number'] = photo.reference_number return super().form_valid(form)
def fetch_and_create_image(self, url, image_title): ''' fetches, creates image object returns tuple with Image object and context dictionary containing request URL ''' context = { "file_url": url, "foreign_title": image_title, } try: image_file = requests.get(url) local_image = Image(title=image_title, file=ImageFile(BytesIO(image_file.content), name=image_title)) local_image.save() return (local_image, context) except Exception as e: context.update({ "exception": e, }) raise ImageCreationFailed(context, None)
def handle(self, *args, **options): # Get the only instance of Magazine Index Page magazine_index_page = MagazineIndexPage.objects.get() with open(options["file"]) as import_file: issues = csv.DictReader(import_file) issues_list = list(issues) for issue in tqdm(issues_list, desc="Issues", unit="row"): response = requests.get(issue["cover_image_url"]) image_file = BytesIO(response.content) image = Image( title=issue["title"] + " cover image", file=ImageFile(image_file, name=issue["cover_image_file_name"]), ) image.save() publication_date_tz_aware = make_aware( datetime.strptime(issue["publication_date"], "%Y-%m-%d")) import_issue = MagazineIssue( title=issue["title"], publication_date=publication_date_tz_aware, first_published_at=publication_date_tz_aware, issue_number=issue["issue_number"], cover_image=image, ) # Add issue to site page hiererchy magazine_index_page.add_child(instance=import_issue) magazine_index_page.save() self.stdout.write("All done!")
def test_cache_key_fill_filter(self): image = Image(width=1000, height=1000) fil = Filter(spec='fill-100x100') cache_key = fil.get_cache_key(image) self.assertEqual(cache_key, '2e16d0ba')
def test_cache_key(self): image = Image(width=1000, height=1000) fil = Filter(spec='max-100x100') cache_key = fil.get_cache_key(image) self.assertEqual(cache_key, '')
def create_blog_pages(self, posts, blog_index, *args, **options): """create Blog post entries from wordpress data""" for post in posts: post_id = post.get('ID') title = post.get('title') if title: new_title = self.convert_html_entities(title) title = new_title slug = post.get('slug') description = post.get('description') if description: description = self.convert_html_entities(description) body = post.get('content') if not "<p>" in body: body = linebreaks(body) # get image info from content and create image objects body = self.create_images_from_urls_in_content(body) # author/user data author = post.get('author') user = self.create_user(author) categories = post.get('terms') # format the date date = post.get('date')[:10] try: new_entry = BlogPage.objects.get(slug=slug) new_entry.title = title new_entry.body = body new_entry.owner = user new_entry.author = user new_entry.latest_revision_created_at = date new_entry.save() except BlogPage.DoesNotExist: new_entry = blog_index.add_child( instance=BlogPage(title=title, slug=slug, search_description="description", date=date, latest_revision_created_at=date, body=body, owner=user, author=user)) featured_image = post.get('featured_image') if featured_image is not None: title = post['featured_image']['title'] source = post['featured_image']['source'] path, file_ = os.path.split(source) source = source.replace('stage.swoon', 'swoon') try: remote_image = urllib.request.urlretrieve( self.prepare_url(source)) width = 640 height = 290 header_image = Image(title=title, width=width, height=height) header_image.file.save(file_, File(open(remote_image[0], 'rb'))) header_image.save() except UnicodeEncodeError: header_image = None print('unable to set header image {}'.format(source)) else: header_image = None new_entry.header_image = header_image new_entry.save() if categories: self.create_categories_and_tags(new_entry, categories) if self.should_import_comments: self.import_comments(post_id, slug)
def handle(self, *args, **options): assigned_mediums = { '7 deadly sins': 'Collage', 'attempt to mimic water': 'Sculpture', 'bound': 'Collage', 'collage 2019': 'Collage', 'collage 2020': 'Collage', 'commissions': 'Mixed Media', 'containment': 'Mixed Media', 'dissimulation': 'Sculpture', 'dusk': 'Mixed Media', 'emily owens as a rock': 'Sculpture', 'endless nightmare': 'Painting', 'ephemeral embedding': 'Sculpture', 'exit landscape': 'Painting', 'fertile ground': 'Mixed Media', 'growth': 'Painting', 'house plant with artificial shadow': 'Mixed Media', 'i dreamt of you': 'Painting', 'imagined landscapes and watersources': 'Painting', 'imitation of a chair': 'Video', 'inconspicuous growth': 'Painting', 'moss': 'Painting', 'new mexico': 'Painting', 'no place (utopic traces)': 'Painting', 'object in the environment': 'Photography', 'paintings 2013': 'Painting', 'paintings 2014': 'Painting', 'paintings in flux': 'Painting', 'process': 'Photography', 'push (traces)': 'Video', 'representing abstraction': 'Mixed Media', 'return to soil': 'Mixed Media', 'shore': 'Mixed Media', 'tarot series': 'Collage', 'the zodiac': 'Collage', 'twin peaks': 'Collage', 'utopia': 'Painting', 'voyage: an expedition into materiality': 'Mixed Media' } self.stdout.write("Trying to add mediums...") mediums = [ 'Collage', 'Painting', 'Photography', 'Mixed Media', 'Sculpture', 'Video' ] post_date = datetime.today() - timedelta(days=365) for m in mediums: existing_mediums = [ i.name for i in InstallationMedium.objects.all() ] if m in existing_mediums: self.stdout.write(f"{m} already exists as a medium.") else: medium = InstallationMedium(name=m) medium.save() wd = os.path.abspath(os.getcwd()) print(wd) with open(os.path.join(wd, 'organized.json'), 'r') as f: data = json.load(f) # ['data'] for page in reversed(data): name = page['name'] body = page.get('body', '') items = page['items'] gallery = Gallery.objects.first() installation = InstallationPage( title=name, slug=slugify(name), date=post_date, body=json.dumps([{ 'type': 'paragraph', 'value': body }]) if len(body) else None, mediums=[ InstallationMedium.objects.get( name=assigned_mediums[name]) ]) self.stdout.write(f"Initialized page {name}") gallery.add_child(instance=installation) # installation.save_revision().publish() # saved_items = [] image_counter = 0 for item_data in items: gallery_item = GalleryItem( title=name, slug=slugify(name) + str(image_counter), description=item_data.get('description', '')) gallery_images = [] for img_data in item_data['images']: filename = img_data['filename'] path = os.path.join( os.path.join(wd, 'migration_images'), filename) with open(path, "rb") as imagefile: image = Image( file=ImageFile(BytesIO(imagefile.read()), name=filename), title=name + '-' + filename.rsplit('.', 1)[0]) image.save() gallery_image = GalleryImage( image=image, caption=img_data['caption']) gallery_images.append(gallery_image) gallery_item.gallery_images.add(gallery_image) self.stdout.write( f" Saved image {filename} to database") installation.add_child(instance=gallery_item) image_counter += 1 # installation.gallery_images=saved_items installation.save_revision().publish() self.stdout.write(f" Attached images to {name}.") self.stdout.write( f"Published page {name} with {str(len(items))} images.") post_date = post_date + timedelta(days=1) self.stdout.write( 'Finalizing homepage and publishing all page objects...') all_images = list(GalleryImage.objects.all()) random_images = random.sample(all_images, 5) for img_obj in random_images: img = img_obj.image featured_img = HomePageImage(home_page_image=img) featured_img.save() for item in GalleryItem.objects.all(): item.save_revision().publish() self.stdout.write('Done.')