def fetch_and_create_image(self, url, image_title): ''' fetches, creates image object returns tuple with Image object and context dictionary containing request URL ''' context = { "file_url": url, "foreign_title": image_title, } try: image_file = requests.get(url) local_image = Image( title=image_title, file=ImageFile( BytesIO(image_file.content), name=image_title ) ) local_image.save() return (local_image, context) except Exception as e: context.update({ "exception": e, }) raise ImageCreationFailed(context, None)
def get_body(message): if message["attachment_media_type"] == "image": im = "" title = message["attachment_media_object"]["filename"] try: im = Image.objects.get(title=title).id except Exception: http_res = requests.get(message["attachment_uri"]) image_file = ImageFile(BytesIO(http_res.content), name=title) image = Image(title=title, file=image_file) image.save() im = image.id block = blocks.StructBlock([("message", blocks.TextBlock()), ("image", ImageChooserBlock())]) block_value = block.to_python({ "message": message["answer"], "image": im }) return block_value else: block = blocks.StructBlock([ ("message", blocks.TextBlock()), ]) block_value = block.to_python({"message": message["answer"]}) return block_value
def handle(self, *args, **options): self.stdout.write(self.style.SUCCESS("Importing images")) if not options["from_dir"].endswith("/"): options["from_dir"] = options["from_dir"] + "/" for filepath in sorted(glob(options["from_dir"] + "*-90x90.png")): with open(filepath, "rb") as image_file: name = filepath.split("/")[-1][:-10] image = Image(title=name) image.file = ImageFile(file=image_file, name=name + ".png") image.file_size = image.file.size image.file.seek(0) image._set_file_hash(image.file.read()) image.file.seek(0) # Reindex the image to make sure all tags are indexed search_index.insert_or_update_object(image) image.save() image.tags.add("illustration") self.stdout.write( self.style.SUCCESS(f"{image.pk},{image.title}")) self.stdout.write(self.style.SUCCESS("Importing images finished"))
def handle(self, *args, **options): # Get the only instance of Magazine Index Page magazine_index_page = MagazineIndexPage.objects.get() with open(options["file"]) as import_file: issues = csv.DictReader(import_file) for issue in issues: response = requests.get(issue["cover_image_url"]) image_file = BytesIO(response.content) image = Image( title=issue["title"] + " cover image", file=ImageFile(image_file, name=issue["cover_image_file_name"]), ) image.save() import_issue = MagazineIssue( title=issue["title"], publication_date=issue["publication_date"], first_published_at=issue["publication_date"], issue_number=issue["issue_number"], cover_image=image, ) # Add issue to site page hiererchy magazine_index_page.add_child(instance=import_issue) magazine_index_page.save() self.stdout.write("All done!")
def set_featured_media(self, page: BlogPage, post): featured_media_id: int = post.get("featured_media") if not featured_media_id: return featured_medias: list = post["_embedded"].get("wp:featuredmedia") if featured_medias is None: return for feature_media in featured_medias: if feature_media.get("id") == featured_media_id: source_url = feature_media["source_url"] try: # Wordpress 5.3 API nests title in "rendered" title = feature_media["title"]["rendered"] except TypeError: # Fallback for older (or newer?) wordpress title = feature_media["title"] details = feature_media["media_details"] resp = requests.get(source_url, stream=True) if resp.status_code != requests.codes.ok: print("Unable to import " + source_url) continue fp = BytesIO() fp.write(resp.content) image = Image(title=title, width=details["width"], height=details["height"]) image.file.save(details["file"], File(fp)) image.save() page.header_image = image
def create_images_from_urls_in_content(self, body): """create Image objects and transfer image files to media root""" from bs4 import BeautifulSoup soup = BeautifulSoup(body, "html5lib") for img in soup.findAll("img"): if "width" in img: width = img["width"] if "height" in img: height = img["height"] else: width = 100 height = 100 _, file_name = os.path.split(img["src"]) if not img["src"]: continue # Blank image if img["src"].startswith("data:"): continue # Embedded image resp = requests.get(self.prepare_url(img["src"]), stream=True) if resp.status_code != requests.codes.ok: print("Unable to import " + img["src"]) continue fp = BytesIO() fp.write(resp.content) image = Image(title=file_name, width=width, height=height) image.file.save(file_name, File(fp)) image.save() if img.has_attr("srcset"): img["srcset"] = "" new_url = image.get_rendition("original").url img["src"] = new_url soup.body.hidden = True return soup.body
def process_content_image(self, content): self.stdout.write('\tGenerate and replace entry content images....') if content: root = lxml.html.fromstring(content) for img_node in root.iter('img'): parent_node = img_node.getparent() if 'wp-content' in img_node.attrib[ 'src'] or 'files' in img_node.attrib['src']: image = self._import_image(img_node.attrib['src']) if image: title = img_node.attrib.get( 'title') or img_node.attrib.get('alt') new_image = WagtailImage(file=File(file=image), title=title) new_image.save() if parent_node.tag == 'a': parent_node.addnext( ET.XML(self._image_to_embed(new_image))) parent_node.drop_tree() else: parent_node.append( ET.XML(self._image_to_embed(new_image))) img_node.drop_tag() content = ET.tostring(root) return content
def create_images_from_urls_in_content(self, body): """create Image objects and transfer image files to media root""" soup = BeautifulSoup(body, "html5lib") for img in soup.findAll('img'): old_url = img['src'] if 'width' in img: width = img['width'] if 'height' in img: height = img['height'] else: width = 100 height = 100 path, file_ = os.path.split(img['src']) if not img['src']: continue # Blank image if img['src'].startswith('data:'): continue # Embedded image try: remote_image = urllib.request.urlretrieve( self.prepare_url(img['src'])) except (urllib.error.HTTPError, urllib.error.URLError, UnicodeEncodeError, ValueError): print("Unable to import " + img['src']) continue image = Image(title=file_, width=width, height=height) try: image.file.save(file_, File(open(remote_image[0], 'rb'))) image.save() new_url = image.file.url body = body.replace(old_url, new_url) body = self.convert_html_entities(body) except TypeError: print("Unable to import image {}".format(remote_image[0])) return body
def handle(self, *args, **options): news_page = NewsPage.objects.live().first() blog_file = options['file'] with open(blog_file) as json_blogs: data = json.load(json_blogs) posts = [p for p in data if p['model'] == 'blog.post'] for p in posts: fields = p['fields'] if not PostPage.objects.filter(slug=fields['slug']).exists(): if fields['link'] or fields['citation']: body = "{}<a href='{}'>{}</a>".format(fields['body'], fields['link'], fields['citation']) else: body = fields['body'] info = { 'title': fields['title'], 'slug': fields['slug'], 'body': body, 'subtitle': fields['tease'].replace("<p>", "").replace("</p>","")[:510], 'date': fields['publish'].replace("'", "")[:10], 'highlight': fields['highlight'] } post = PostPage(**info) news_page.add_child(instance=post) for c in fields['categories']: post.categories.add(PostCategory.objects.get(pk=c)) r = requests.get('https://cmcf.lightsource.ca/media/{}'.format(fields['image'])) if r.status_code == requests.codes.ok: image = Image(title=fields['title'], file=ImageFile(BytesIO(r.content), name=fields['image'])) image.save() post.image = image post.save()
def get_image(base_url, image_id): # TODO: guard against non-existent images image_attributes = get_image_attributes(base_url, image_id) image_file = requests.get(image_attributes["file"]) image = Image(title=image_attributes["title"], file=ImageFile(BytesIO(image_file.content), name=image_attributes["title"])) image.save() return image
def get_image(base_url, image_id): # TODO: guard against non-existent images image_attributes = get_image_attributes(base_url, image_id) image_file = requests.get(image_attributes["file"]) image = Image( title=image_attributes["title"], file=ImageFile( BytesIO(image_file.content), name=image_attributes["title"] ) ) image.save() return image
def create_image(self, image_file, title, filename, tag): image = Image(title=title) image.file = ImageFile(file=image_file, name=filename) image.file_size = image.file.size image.file.seek(0) image._set_file_hash(image.file.read()) image.file.seek(0) # Reindex the image to make sure all tags are indexed search_index.insert_or_update_object(image) image.save() image.tags.add(tag) return image
def create_image(self, text): # create a Wagtail image with a random coloured background # and a text overlay colours = tuple(random.sample(range(255), 3)) image = Image.new("RGB", (600, 400), color=colours) d = ImageDraw.Draw(image) d.text((10, 10), text, fill=(255, 255, 255)) f = BytesIO() image.save(f, format="png") filename = text.replace(" ", "-").lower() + "-%s-%s-%s.fake" % colours wagtail_image = WagtailImage(title=text, file=ImageFile(f, name=filename)) wagtail_image.save() return wagtail_image
def parse_media_blocks(media_urls): media_blocks = [] for url in media_urls.split(", "): domain = urlparse(url).netloc if domain in ["vimeo.com", "www.youtube.com"]: embed = get_embed(url) embed_tuple = ("embed", embed) media_blocks.append(embed_tuple) else: # The default should be to fetch a PDF or image file (i.e. from westernfriend.org) response = requests.get(url) content_type = response.headers["content-type"] file_name = url.split("/")[-1] file_bytes = BytesIO(response.content) if content_type == "application/pdf": # Create file document_file = File(file_bytes, name=file_name) document = Document( title=file_name, file=document_file, ) document.save() document_link_block = ("document", document) media_blocks.append(document_link_block) elif content_type in ["image/jpeg", "image/png"]: # create image image_file = ImageFile(file_bytes, name=file_name) image = Image( title=file_name, file=image_file, ) image.save() image_block = ("image", image) media_blocks.append(image_block) else: print(url) print(content_type) print("-----") return media_blocks
def parse_results(self): media_files = self.results for r in media_files: sub_site = r.get('source') collection_name = SOURCES[sub_site] collection = Collection.objects.get(name=collection_name) source_url = r.get('source_url') media_type = r.get('media_type') media_name = source_url.split('/')[-1] response = requests.get(source_url) title = r.get('title') # if the title id blank it causes an error if not title: title = 'No title was available' if response: if media_type == 'file': # save to documents media_file = File(BytesIO(response.content), name=media_name) file = Document(title=title, file=media_file, collection=collection) file.save() file.created_at = r.get('date') file.save() elif media_type == 'image': # save to images image_file = ImageFile(BytesIO(response.content), name=media_name) image = Image(title=title, file=image_file, collection=collection) image.save() image.created_at = r.get('date') image.save() else: sys.stdout.write( '⚠️ Got no response. Error has been logged importer/log/import_media_files.txt\n' ) with open('importer/log/import_media_files.txt', 'a') as the_file: the_file.write('{}\n'.format(r)) if self.next: time.sleep(self.sleep_between_fetches) self.fetch_url(self.next) self.parse_results() return Document.objects.count() + Image.objects.count(), 0
def create_image(self, instance): data = self.collect_data(instance) image_description = self.get_image_description(instance) tags = image_description.get('tags') and image_description.pop('tags') image = Image(**image_description) snippet_image = self.create_snippet_image(data) file_name = self.get_file_name() image.file.save(file_name, snippet_image, save=False) image.save() if tags: image.tags.set(*tags, clear=True) image.save() return image
def _set_image(self, obj, attr_name, folder_path, img_path): """helper to set images for objects""" img_path = folder_path.joinpath(img_path) # create and set the file if it does not yet exist qs = Image.objects.filter(title=img_path.name) if not qs.exists(): with open(img_path, "rb") as f: # setting name= is important. otherwise it uses the entire file path as # name, which leaks server filesystem structure to the outside. image_file = File(f, name=img_path.stem) image = Image(title=img_path.name, file=image_file.open()) image.save() else: image = qs[0] setattr(obj, attr_name, image) obj.save()
def import_header_image(self, entry, items, image_id): self.stdout.write('\tImport header images....') for item in items: post_type = item.find(u'{{{0:s}}}post_type'.format( self.WP_NS)).text if post_type == 'attachment' and item.find( u'{{{0:s}}}post_id'.format(self.WP_NS)).text == image_id: title = item.find('title').text image_url = item.find(u'{{{0:s}}}attachment_url'.format( self.WP_NS)).text image = self._import_image(image_url) if image: new_image = WagtailImage(file=File(file=image), title=title) new_image.save() entry.header_image = new_image entry.save()
def mock(title): """ Mock a ImageBlock :param title: String :return: Stream content """ url = str.strip(constants.URL_IMAGE_MOCK_1) filename = "%s.png" % title try: ret = Image.objects.get(title=title) except Image.DoesNotExist: response = requests.get(url) file = ImageFile(BytesIO(response.content), name=filename) ret = Image(title=title, file=file) ret.save() return {'type': 'image', 'value': ret.id}
def handle(self, *args, **options): self.stdout.write(self.style.SUCCESS("Importing images")) with open(options["speakers_csv"], "r") as file: for row in DictReader(file): if not row["filename"]: self.stdout.write( self.style.WARNING( f"====> skipping {row['post_name']}, {row['post_title']}" )) continue image_path = os.path.join(options["from_dir"], row["filename"]) with open(image_path, "rb") as image_file: image = Image(title=row["post_title"]) if row["filename"].lower().endswith(".jpg") or row[ "filename"].lower().endswith(".jpeg"): image_filename = f"{row['first_name']} {row['last_name']}.jpg" elif row["filename"].lower().endswith(".png"): image_filename = f"{row['first_name']} {row['last_name']}.png" else: raise ValueError("Unknown file format") image.file = ImageFile(file=image_file, name=image_filename) image.file_size = image.file.size image.file.seek(0) image._set_file_hash(image.file.read()) image.file.seek(0) # Reindex the image to make sure all tags are indexed search_index.insert_or_update_object(image) image.save() image.tags.add("speaker") speaker = Speaker.objects.get(wordpress_id=row["ID"]) speaker.photo = image speaker.save() self.stdout.write( self.style.SUCCESS(f"{image.pk},{image.title}")) self.stdout.write(self.style.SUCCESS("Importing images finished"))
def update_thumbnail(self): generated_file = False if self.video.thumbnail: file = open(self.video.thumbnail.path, 'rb') file = File(file) else: clip = cv2.VideoCapture(self.video.file.path) ret, frame = clip.read() generated_file = 'thumbnail.jpeg' cv2.imwrite(generated_file, frame) file = open(generated_file, 'rb') file = File(file) thumbnail = Image(title=text_processing.html_to_str( self.english_title), file=file) thumbnail.save() self.video_thumbnail = thumbnail self.save() if generated_file: os.remove(generated_file) return thumbnail
def get_or_create_image(self, row): image = None if self.image_path and row['filename']: path = os.path.join(self.image_path, row['filename']) if os.path.exists(path): image_title = re.sub(r'[^\w\.]', '-', row['filename'].strip()).lower() hash = get_hash_from_file(path) image = Image.objects.filter(file__contains=hash).first() if not image: new_name = re.sub(r'.*\.', hash + '.', row['filename']) image = Image(file=ImageFile(File(open(path, 'rb')), name=new_name), title=image_title) image.save() _print_operation(image, True, 'title') return image
def create_wagtail_image_from_remote(image_url=None, images_folder='original_images', collection=None): basename = os.path.basename(image_url) db_file_field = os.path.join(images_folder, basename).replace('\\', '/') destination_image = os.path.join( settings.MEDIA_ROOT, images_folder, os.path.basename(image_url) ) if collection is None: collection = get_behance_collection() r = requests.get(image_url) if Image.objects.filter(file=db_file_field).count() == 0: if r.status_code == 200: with open(destination_image, 'wb') as f: f.write(r.content) local_image = PILImage.open(destination_image) width, height = local_image.size img = Image() img.file = db_file_field img.title = basename img.width = width img.height = height img.collection = collection img.save() return img else: return Image.objects.get(file=db_file_field) return None
def form_valid(self, form): image_file = form.cleaned_data['image_file'] # print(image_file.name) photographer = Photographer( age_range=form.cleaned_data['age_range'], gender_category=form.cleaned_data['gender'], gender_other=form.cleaned_data['gender_other'], ) photographer.save() image = Image(title=image_file.name, file=image_file) image.save() photo = form.save() photo.image = image photo.photographer = photographer photo.save() self.request.session['reference_number'] = photo.reference_number return super().form_valid(form)
def fetch_and_create_image(self, url, image_title): ''' fetches, creates image object returns tuple with Image object and context dictionary containing request URL ''' context = { "file_url": url, "foreign_title": image_title, } try: image_file = requests.get(url) local_image = Image(title=image_title, file=ImageFile(BytesIO(image_file.content), name=image_title)) local_image.save() return (local_image, context) except Exception as e: context.update({ "exception": e, }) raise ImageCreationFailed(context, None)
def handle(self, *args, **options): # Get the only instance of Magazine Index Page magazine_index_page = MagazineIndexPage.objects.get() with open(options["file"]) as import_file: issues = csv.DictReader(import_file) issues_list = list(issues) for issue in tqdm(issues_list, desc="Issues", unit="row"): response = requests.get(issue["cover_image_url"]) image_file = BytesIO(response.content) image = Image( title=issue["title"] + " cover image", file=ImageFile(image_file, name=issue["cover_image_file_name"]), ) image.save() publication_date_tz_aware = make_aware( datetime.strptime(issue["publication_date"], "%Y-%m-%d")) import_issue = MagazineIssue( title=issue["title"], publication_date=publication_date_tz_aware, first_published_at=publication_date_tz_aware, issue_number=issue["issue_number"], cover_image=image, ) # Add issue to site page hiererchy magazine_index_page.add_child(instance=import_issue) magazine_index_page.save() self.stdout.write("All done!")
def handle(self, *args, **options): collection_root = Collection.get_first_root_node() try: collection = Collection.objects.get(name='Temporary Images') except Collection.DoesNotExist: collection = collection_root.add_child(name='Temporary Images') try: image = Image.objects.get(title='Hero Image') except Image.DoesNotExist: path = 'importer/bin/homepage-hero-image.jpg' load_image = open(path, "rb").read() image_file = ImageFile(BytesIO(load_image), name='homepage-hero-image.jpg') image = Image(title='Hero Image', file=image_file, collection=collection) image.save() home_page = HomePage.objects.filter(title='Home')[0] """ home page body is a streamfield. make a dict for the promo_group block if a decision is made to alter the home page this will need to be updated""" home_page_stream_field = [ { 'type': 'promo_group', 'value': { 'column': 'one-half', 'size': '', 'heading_level': '3', 'promos': [ { 'url': 'https://staging.nhsei.rkh.co.uk/publication/nhs-england-improvement/', 'heading': 'Latest publications', 'description': 'See our most recent publications and search for documents in our publications library', 'content_image': None, 'alt_text': '' }, { 'url': 'https://staging.nhsei.rkh.co.uk/news/nhs-england-improvement/', 'heading': 'News', 'description': 'Our headline announcements', 'content_image': None, 'alt_text': '' }, { 'url': 'https://staging.nhsei.rkh.co.uk/gp/', 'heading': 'General Practice', 'description': 'Supporting GPs and GP-led services across our local communities ', 'content_image': None, 'alt_text': '' }, { 'url': 'https://staging.nhsei.rkh.co.uk/diabetes/', 'heading': 'Diabetes', 'description': 'Improving outcomes for people with diabetes', 'content_image': None, 'alt_text': '' }, ] } }, { 'type': 'warning_callout', 'value': { 'title': 'Are you looking for health advice?', 'heading_level': '3', 'body': '<p><a href="https://www.nhs.uk/">Find advice on health conditions, symptoms, healthy living, medicines and how to get help.</a></p>', } }, ] home_page.body = json.dumps(home_page_stream_field) home_page.hero_heading = "Supporting the NHS" home_page.hero_text = "to improve people’s care" home_page.hero_image = image rev = home_page.save_revision() home_page.save() rev.publish()
def create_blog_pages(self, posts, blog_index, *args, **options): """create Blog post entries from wordpress data""" for post in posts: post_id = post.get('ID') title = post.get('title') if title: new_title = self.convert_html_entities(title) title = new_title slug = post.get('slug') description = post.get('description') if description: description = self.convert_html_entities(description) body = post.get('content') if not "<p>" in body: body = linebreaks(body) # get image info from content and create image objects body = self.create_images_from_urls_in_content(body) # author/user data author = post.get('author') user = self.create_user(author) categories = post.get('terms') # format the date date = post.get('date')[:10] try: new_entry = BlogPage.objects.get(slug=slug) new_entry.title = title new_entry.body = body new_entry.owner = user new_entry.author = user new_entry.latest_revision_created_at = date new_entry.save() except BlogPage.DoesNotExist: new_entry = blog_index.add_child( instance=BlogPage(title=title, slug=slug, search_description="description", date=date, latest_revision_created_at=date, body=body, owner=user, author=user)) featured_image = post.get('featured_image') if featured_image is not None: title = post['featured_image']['title'] source = post['featured_image']['source'] path, file_ = os.path.split(source) source = source.replace('stage.swoon', 'swoon') try: remote_image = urllib.request.urlretrieve( self.prepare_url(source)) width = 640 height = 290 header_image = Image(title=title, width=width, height=height) header_image.file.save(file_, File(open(remote_image[0], 'rb'))) header_image.save() except UnicodeEncodeError: header_image = None print('unable to set header image {}'.format(source)) else: header_image = None new_entry.header_image = header_image new_entry.save() if categories: self.create_categories_and_tags(new_entry, categories) if self.should_import_comments: self.import_comments(post_id, slug)
class SnippetImageTestCase(TestCase): text = 'What time is it?' def setUp(self): self.root_page = HomePage.objects.first() self.background = WagtailImage(title='Snippet image') with open(BACKGROUND_FILE, 'rb') as background_file: self.background.file.save('background.jpg', background_file) self.background.save() def test_version(self): self.assertEqual(__version__, '0.1.4') def test_should_be_created(self): instance = HomePage( title=self.text, status=Statuses.DRAFT, ) self.root_page.add_child(instance=instance) self.assertIsNone(instance.snippet_image_field) def test_with_background(self): instance = HomePage( title=self.text, background=self.background, status=Statuses.PUBLISH, ) self.root_page.add_child(instance=instance) instance.save() rms = compare_image(instance.snippet_image_field.file.path, PURPOSE_IMAGE_WITH_BACKGROUND) self.assertEqual(rms, 0) @override_settings( SNIPPET_IMAGE_DEFAULT_OVERLAY=None, SNIPPET_IMAGE_DEFAULT_SIZE=(1200, 630), ) def test_with_size(self): instance = HomePage( title=self.text, status=Statuses.PUBLISH, ) self.root_page.add_child(instance=instance) rms = compare_image(instance.snippet_image_field.file.path, PURPOSE_IMAGE_WITH_SIZE) self.assertEqual(rms, 0) @override_settings( SNIPPET_IMAGE_DEFAULT_OVERLAY=None, SNIPPET_IMAGE_DEFAULT_SIZE=(1200, 630), ) def test_with_size_and_background(self): instance = HomePage( title=self.text, background=self.background, status=Statuses.PUBLISH, ) self.root_page.add_child(instance=instance) rms = compare_image(instance.snippet_image_field.file.path, PURPOSE_IMAGE_WITH_SIZE_AND_BACKGROUND) self.assertEqual(rms, 0) def test_without_background(self): instance = HomePage( title=self.text, status=Statuses.PUBLISH, ) self.root_page.add_child(instance=instance) rms = compare_image(instance.snippet_image_field.file.path, PURPOSE_IMAGE_WITHOUT_BACKGROUND) self.assertEqual(rms, 0)
def handle(self, *args, **options): assigned_mediums = { '7 deadly sins': 'Collage', 'attempt to mimic water': 'Sculpture', 'bound': 'Collage', 'collage 2019': 'Collage', 'collage 2020': 'Collage', 'commissions': 'Mixed Media', 'containment': 'Mixed Media', 'dissimulation': 'Sculpture', 'dusk': 'Mixed Media', 'emily owens as a rock': 'Sculpture', 'endless nightmare': 'Painting', 'ephemeral embedding': 'Sculpture', 'exit landscape': 'Painting', 'fertile ground': 'Mixed Media', 'growth': 'Painting', 'house plant with artificial shadow': 'Mixed Media', 'i dreamt of you': 'Painting', 'imagined landscapes and watersources': 'Painting', 'imitation of a chair': 'Video', 'inconspicuous growth': 'Painting', 'moss': 'Painting', 'new mexico': 'Painting', 'no place (utopic traces)': 'Painting', 'object in the environment': 'Photography', 'paintings 2013': 'Painting', 'paintings 2014': 'Painting', 'paintings in flux': 'Painting', 'process': 'Photography', 'push (traces)': 'Video', 'representing abstraction': 'Mixed Media', 'return to soil': 'Mixed Media', 'shore': 'Mixed Media', 'tarot series': 'Collage', 'the zodiac': 'Collage', 'twin peaks': 'Collage', 'utopia': 'Painting', 'voyage: an expedition into materiality': 'Mixed Media' } self.stdout.write("Trying to add mediums...") mediums = [ 'Collage', 'Painting', 'Photography', 'Mixed Media', 'Sculpture', 'Video' ] post_date = datetime.today() - timedelta(days=365) for m in mediums: existing_mediums = [ i.name for i in InstallationMedium.objects.all() ] if m in existing_mediums: self.stdout.write(f"{m} already exists as a medium.") else: medium = InstallationMedium(name=m) medium.save() wd = os.path.abspath(os.getcwd()) print(wd) with open(os.path.join(wd, 'organized.json'), 'r') as f: data = json.load(f) # ['data'] for page in reversed(data): name = page['name'] body = page.get('body', '') items = page['items'] gallery = Gallery.objects.first() installation = InstallationPage( title=name, slug=slugify(name), date=post_date, body=json.dumps([{ 'type': 'paragraph', 'value': body }]) if len(body) else None, mediums=[ InstallationMedium.objects.get( name=assigned_mediums[name]) ]) self.stdout.write(f"Initialized page {name}") gallery.add_child(instance=installation) # installation.save_revision().publish() # saved_items = [] image_counter = 0 for item_data in items: gallery_item = GalleryItem( title=name, slug=slugify(name) + str(image_counter), description=item_data.get('description', '')) gallery_images = [] for img_data in item_data['images']: filename = img_data['filename'] path = os.path.join( os.path.join(wd, 'migration_images'), filename) with open(path, "rb") as imagefile: image = Image( file=ImageFile(BytesIO(imagefile.read()), name=filename), title=name + '-' + filename.rsplit('.', 1)[0]) image.save() gallery_image = GalleryImage( image=image, caption=img_data['caption']) gallery_images.append(gallery_image) gallery_item.gallery_images.add(gallery_image) self.stdout.write( f" Saved image {filename} to database") installation.add_child(instance=gallery_item) image_counter += 1 # installation.gallery_images=saved_items installation.save_revision().publish() self.stdout.write(f" Attached images to {name}.") self.stdout.write( f"Published page {name} with {str(len(items))} images.") post_date = post_date + timedelta(days=1) self.stdout.write( 'Finalizing homepage and publishing all page objects...') all_images = list(GalleryImage.objects.all()) random_images = random.sample(all_images, 5) for img_obj in random_images: img = img_obj.image featured_img = HomePageImage(home_page_image=img) featured_img.save() for item in GalleryItem.objects.all(): item.save_revision().publish() self.stdout.write('Done.')
def wagtail_image(db) -> Image: with open(image_fixture["file"], 'rb') as _file: file_data = File(_file) image = Image(title="test_home") image.file.save(name="test_home", content=file_data) image.save()
def import_entries(self): self.stdout.write("Importing entries...") entries = ZinniaEntry.objects.all() for entry in entries: self.stdout.write(entry.title) # Header images if entry.image: header_image = WagtailImage(file=entry.image, title=os.path.basename( entry.image.url)) self.stdout.write('\tImported header image: {}'.format( entry.image)) header_image.save() else: header_image = None self.stdout.write( '\tGenerate and replace entry content images....') if entry.content: root = lxml.html.fromstring(entry.content) for el in root.iter('img'): if el.attrib['src'].startswith(settings.MEDIA_URL): # fix media chunks path naming e.g. /media/chinks/media/stuff.jpg will fail img_path = el.attrib['src'] old_image = img_path[len(settings.MEDIA_URL):] try: with open( '{}/{}'.format(settings.MEDIA_ROOT, old_image), 'r') as image_file: new_image = WagtailImage( file=File( file=image_file, name=os.path.basename(old_image)), title=os.path.basename(old_image)) new_image.save() el.attrib['src'] = new_image.file.url self.stdout.write('\t\t{}'.format( new_image.file.url)) except Exception as e: # handle image encoding errors like none utf-8 cahrs print(e) print("error handling image, move on... entry:" + str(entry.id)) # New content with images replaced content = lxml.html.tostring(root, pretty_print=True) else: content = entry.content # decode, somehow the content is a byte array if len(content) != 0: content = content.decode() # First, convert the html to json, with the appropriate block type # we convertet the blody from a RichTextField to a StreamField import json content = json.dumps([{'type': 'html', 'value': content}]) # fix empty author entrys (puput will not render the page if no author is set) author = entry.authors.first() if author == None: from zinnia.models.author import Author author = Author.objects.first() # Create page try: page = EntryPage.objects.get(slug=entry.slug) except EntryPage.DoesNotExist: page = EntryPage( title=entry.title, body=content, #fix missing excerpt transfer excerpt=entry.excerpt, slug=entry.slug, go_live_at=entry.start_publication, expire_at=entry.end_publication, first_published_at=entry.creation_date, date=entry.creation_date, owner=author, seo_title=entry.title, search_description=entry.excerpt, live=entry.is_visible, header_image=header_image) self.blogpage.add_child(instance=page) revision = self.blogpage.save_revision() revision.publish() self.import_entry_categories(entry, page) self.import_entry_tags(entry, page) page.save() page.save_revision(changed=False) self.entries[entry.pk] = page