def test_transplant_m(self): """'transplant' on memory. """ o = io.BytesIO() piexif.transplant(I1, I2, o) self.assertEqual(piexif.load(I1), piexif.load(o.getvalue())) Image.open(o).close()
def makeImageG(ejeYMakeImage, rutaImagen, lat, lon, alt): # global wavelenghtsLista ejeY = "" ejeX = "" espectrocor = [] wavecor = [] ejeXMakeImage = wavelenghtsLista ejeY = np.array(ejeYMakeImage, dtype=np.float32) ejeX = np.array(ejeXMakeImage, dtype=np.float32) for i in range(230, 890): espectrocor.append(ejeY[i]) for i in range(230, 890): wavecor.append(ejeX[i]) plt.figure(1) ax = plt.subplot(111) plt.plot(wavecor, espectrocor)#, label='Negro') ax.set_ylim(min(espectrocor), max(espectrocor)) plt.legend() # rutaImagen= "D:/Tesis/Api/Flask/imagenEspectroC%s.png" %(str(j)) resultadoMakeImage= plt.savefig(rutaImagen, format="jpg") plt.cla() plt.clf() plt.close() rel_path = '/testy/base.jpg' filePath = FileManagement.to_relative(rel_path) piexif.transplant(filePath, rutaImagen) set_gps_location(rutaImagen, lat, lon, alt) return resultadoMakeImage
def generate_thumbnails(filename, thumbs_folder): base = basename(filename) name, ext = splitext(base) secret = random_string() # Also add random to original new_original = join(thumbs_folder, "%s-%s%s" % (name, secret, ext)) shutil.copyfile(filename, new_original) generated = {"original": new_original} for thumb_name, dim in THUMBNAILS.items(): secret = random_string() # I want each thumbnail have a different random string so you cannot # guess the other size from the URL out_name = join(thumbs_folder, "%s--%s-%s%s" % (name, thumb_name, secret, ext)) orig = Image.open(new_original) rotation = read_rotation(orig) orig.thumbnail((dim, dim)) if thumb_name not in KEEP_EXIF: # Only rotate those that don't have exif copied orig = orig.rotate(rotation, expand=True) orig.save(out_name, format="JPEG", quality=THUMB_QUALITY, progressive=True) generated[thumb_name] = out_name if thumb_name in KEEP_EXIF: try: piexif.transplant(new_original, out_name) except ValueError: # Original did not have EXIF to transplant pass return generated
def generate_thumbnails(jpg_data, export_dir, long_edge=1024): """Formats a timedelta object as a string. Parameters ---------- jpg_data : list of dictionaries Requires that "filepath" and "filename" is in dictionary keys, which is easily provided by find_imgs() prior to this function. export_dir : str Directory path that is used for exporting thumbnails. long_edge : int, optional By default, images will be resized to 1024 pixels on the long edge of the image. Smaller sizes speed up performance, but decrease acuity. """ if not os.path.exists(export_dir): os.makedirs(export_dir) timer = (len(jpg_data) // 10, time.time()) for i, jpg in enumerate(jpg_data): if not i % timer[0] and i: stop_watch(i, timer) from_path = jpg["filepath"] to_path = os.path.join(export_dir, jpg["filename"]) im = cv2.imread(from_path) resized = resize_long_edge(im, long_edge) cv2.imwrite(to_path, resized) piexif.transplant(from_path, to_path)
def save(image: Image, base: pathlib.Path, stem: str, kind: str, first_path: Optional[pathlib.Path] = None) -> None: out_path = base.joinpath(stem + '_' + kind + '.JPG') ImageOps.autocontrast(image, cutoff=0.1).save(out_path) if first_path is not None: piexif.transplant(str(first_path), str(out_path)) # use EXIF from first_path image
def multi_crop_img_lst(ROIs, out_paths, in_path, pic_lst): """ crop all ROIs out of entire list of pictures in the in_path folder save them to the out_paths list """ ## get a list of all the pics in the in_path folder for full_pic_path in tq(pic_lst): ## Create new file name for the croped img pic_name = full_pic_path.rsplit("\\", 1)[-1] date = in_path.split("\\")[-3].split("_")[0] orient = in_path.split("\\")[-2].split("_")[-1] # new_pic_name = pic_name.strip(".JPG").strip("DSC_")+"_CROPED_"+date+"_"+orient+".jpg" new_pic_name = pic_name.strip(".JPG").strip("DSC_") + "_CROPED.jpg" ## Load img img = cv2.imread(full_pic_path) # ===== if anything should be donebefore cropping the imgs - add code here ==== # size = (3000,2000) # img = cv2.resize(img, size) # ============================================================================= ## Loop over selected ROIs for j, ROI in enumerate(ROIs): ## Crope the img x, y, w, h = ROI[0], ROI[1], ROI[2], ROI[3] croped_img = img[y:y + h, x:x + w] # ===== if anything should be done with the **croped** imgs add code here ===== # rtd = img_procesing.rotate_img(croped_img,180) # ============================================================================= ## create window for every ROI cv2.namedWindow("croping_" + str(ROI), cv2.WINDOW_NORMAL) cv2.imshow("croping_" + str(ROI), croped_img) ## Press Esc OR q key to stop k = cv2.waitKey(1) & 0xff if k == 27 or k == ord('q'): break ## Save the img to file out_path = out_paths[j] + "\\" + out_paths[j][ -1] + "_" + new_pic_name cv2.imwrite(out_path, croped_img) piexif.transplant(full_pic_path, out_path) ## If we broke off we should stop this loop as well if k == 27 or k == ord('q'): print("\n\n!!! You Stoped !!!") break
def reduce_filesize(image_path, image_quality=50): im = Image.open(image_path) width, height = im.size if width > 16000: n_w = 16000 n_h = int(n_w / width * height) im = im.resize((n_w, n_h), Image.ANTIALIAS) resized_pano = "/tmp/resized.jpg" im.save(resized_pano, quality=image_quality, optimize=True) transplant(image_path, resized_pano) os.remove(image_path) os.rename(resized_pano, image_path)
def test_transplant(self): piexif.transplant(INPUT_FILE1, INPUT_FILE_PEN, "transplant.jpg") i = Image.open("transplant.jpg") i.close() exif_src = piexif.load(INPUT_FILE1) img_src = piexif.load(INPUT_FILE_PEN) generated = piexif.load("transplant.jpg") self.assertEqual(exif_src, generated) self.assertNotEqual(img_src, generated) piexif.transplant(INPUT_FILE1, "transplant.jpg") self.assertEqual(piexif.load(INPUT_FILE1), piexif.load("transplant.jpg")) os.remove("transplant.jpg")
def _recv_rect_image(self, image): cv_image = self.cv_bridge.imgmsg_to_cv2(image, desired_encoding="bgr8") filename, _ = self.images.pop(0) out_file = os.path.join(self.out_dir, os.path.basename(filename)) rospy.loginfo("Processing %s", filename) try: os.makedirs(self.out_dir) except OSError as e: rospy.logerr(e) except e: rospy.logerr(e) return params = [cv2.IMWRITE_JPEG_QUALITY, 100] cv2.imwrite(out_file, cv_image, params) piexif.transplant(filename, out_file) rospy.loginfo("Saved %s", out_file)
def keep_existing(self, existing_image: ImageInfo, incoming_image: ImageInfo): self.logger.info(f"Keeping existing: {existing_image}") self.logger.info(f" Deleting incoming: {incoming_image}") if existing_image.path.name.lower().endswith( ".jpg" ) and incoming_image.ts > OLD_TS and incoming_image.ts < existing_image.ts: self.logger.info( f" But preserving incoming's exif: {incoming_image}") try: piexif.transplant(incoming_image.path.as_posix(), existing_image.path.as_posix()) except ValueError as e: self.logger.warning(f"Failed to transplant exif: {e}") self.reload(existing_image.path) self.recycle(incoming_image)
def copy_exif(origin=None, target=None): origin = origin if origin else input("Origin file: ") target = target if target else input("Target file: ") if origin == target: return if os.path.isfile(origin) and os.path.isfile(target): piexif.transplant(origin, target) print_info(origin, target) elif os.path.isdir(origin) and os.path.isdir(target): for root, dirs, files in os.walk(target): for file_ in files: target_file_path = os.path.join(root, file_) origin_file_path = target_file_path.replace(target, origin) if os.path.isfile(origin_file_path): piexif.transplant(origin_file_path, target_file_path) print_info(origin_file_path, target_file_path) else: return
def img_resize(step, rk): I_PATH = "./raw_images" O_PATH = "./images" shutil.rmtree(O_PATH) if not os.path.exists(O_PATH): os.makedirs(O_PATH) for num in range(1568, 1751, step): img_name = "IMG_{0}.jpg".format(num) i_file = os.path.join(I_PATH, img_name) o_file = os.path.join(O_PATH, img_name) img = cv2.imread(i_file, cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR) width = int(img.shape[1] / rk) height = int(img.shape[0] / rk) dim = (width, height) resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA) cv2.imwrite(o_file, resized) piexif.transplant(i_file, o_file) print img_name
def img_optimizer(image, s3bucket): """ Optimizes JPEG file size :param image: image S3 object key :param s3bucket: S3 bucket :return: tuple of status and optimized file name in Lambda instance """ try: s3client = boto3.client('s3', region_name=AWS_REGION) temp_ifile = '/tmp/tmp_ifile.jpg' with open(temp_ifile, 'wb') as data: s3client.download_fileobj(s3bucket, image, data) file_src = temp_ifile file_dst = file_src.split('.')[0] + '_.' + file_src.split('.')[1] img = Image.open(file_src) """ Applying downsampling antialias algorithm without actual resize gives 30-40% lower file size. Downsize with 0.97 aspect ratio gives up to 50% decrease. """ aspect = 0.97 new_size = (int(float(img.size[0]) * float(aspect)), int(float(img.size[1]) * float(aspect))) new_img = img.resize(new_size, Image.ANTIALIAS) new_img.save(file_dst) piexif.transplant(file_src, file_dst) return 'SUCCESS', file_dst except Exception as err: print('Image optimization failed:', err) return 'FAILED', file_src
def image_blur(self, img, target_path): # image detection img_name = os.path.basename(img) try: detections = self.detector.detectObjectsFromImage( input_image=img, output_image_path='./temp/d-{}'.format(img_name)) finally: pass # open image imageObject = Image.open(img) image_draw = ImageDraw.Draw(imageObject) for eachObject in detections: if eachObject["name"] in ('car', 'bus', 'truck', 'motorcycle', 'person'): print(eachObject['name']) array = eachObject["box_points"] x0 = array[0] y0 = array[1] # print (x0,y0) cropped = imageObject.crop(array) blur = cropped.filter(ImageFilter.GaussianBlur(radius=7)) imageObject.paste(blur, array) else: print(eachObject['name'] + ' is not vehicle') des_img = target_path imageObject.save(des_img) try: piexif.transplant(img, des_img) except ValueError: pass finally: pass
def image_blur(img): # image detection img_dir = os.path.dirname(img) img_name = os.path.basename(img) img_num = os.path.splitext(img_name) print(img_name) detector = ObjectDetection() detector.setModelTypeAsRetinaNet() detector.setModelPath( r"C:\Users\Streckenkontrolle\Documents\liscensedetect/resnet50_coco_best_v2.0.1.h5" ) detector.loadModel() detections = detector.detectObjectsFromImage( input_image=img, output_image_path='./temp/d-{}'.format(img_name)) # open image imageObject = Image.open(img) image_draw = ImageDraw.Draw(imageObject) for eachObject in detections: if eachObject["name"] in ('car', 'bus', 'truck', 'person'): print(eachObject['name']) array = eachObject["box_points"] x0 = array[0] y0 = array[1] # print (x0,y0) cropped = imageObject.crop(array) blur = cropped.filter(ImageFilter.GaussianBlur(radius=5)) imageObject.paste(blur, array) else: print(eachObject['name'] + ' is not vehicle') des_img = r'X:/Loehne/dkblock/{}'.format(img_name) imageObject.save(des_img) piexif.transplant(img, des_img)
def test_03_transfer(self): source = "../samples/Nikon_COOLPIX_P1.jpg" target = "../samples/Canon_40D.jpg" piexif.transplant(source, target) exif_dict = piexif.load(target) self.assertIsNotNone(exif_dict)
import piexif def parse_args(): parser = argparse.ArgumentParser( description="Generate a test image by transplanting exif") parser.add_argument("input_image", help="path to imput image containing exif") parser.add_argument("output_image", help="path to generated output image") return parser.parse_args() if __name__ == "__main__": args = parse_args() with open(args.input_image, "rb") as fin: input_string = fin.read() empty_image = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data/empty_exif.jpg") with open(empty_image, "rb") as f: image_string = f.read() output_bytes = io.BytesIO() piexif.transplant(input_string, image_string, output_bytes) with open(args.output_image, "w") as fout: fout.write(output_bytes.read())
def optimize_jpeg( src: Union[pathlib.Path, io.BytesIO], dst: Optional[pathlib.Path] = None, quality: Optional[int] = 85, fast_mode: Optional[bool] = True, keep_exif: Optional[bool] = True, **options, ) -> Union[pathlib.Path, io.BytesIO]: """method to optimize JPEG files using a pure python external optimizer quality: JPEG quality (integer between 1 and 100) values: 50 | 55 | 35 | 100 | XX keep_exif: Whether to keep EXIF data in JPEG (boolean) values: True | False fast_mode: Use the supplied quality value. If turned off, optimizer will get dynamic quality value to ensure better compression values: True | False""" ensure_matches(src, "JPEG") img = Image.open(src) orig_size = (os.path.getsize(src) if isinstance(src, pathlib.Path) else src.getbuffer().nbytes) had_exif = False if (isinstance(src, io.BytesIO) and piexif.load(src.getvalue())["Exif"]) or (isinstance( src, pathlib.Path) and piexif.load(str(src))["Exif"]): had_exif = True # only use progressive if file size is bigger use_progressive_jpg = orig_size > 10240 # 10KiB if fast_mode: quality_setting = quality else: quality_setting, _ = jpeg_dynamic_quality(img) if dst is None: dst = io.BytesIO() img.save( dst, quality=quality_setting, optimize=True, progressive=use_progressive_jpg, format="JPEG", ) if isinstance(dst, io.BytesIO): dst.seek(0) if keep_exif and had_exif: piexif.transplant( exif_src=str(src.resolve()) if isinstance(src, pathlib.Path) else src.getvalue(), image=str(dst.resolve()) if isinstance(dst, pathlib.Path) else dst.getvalue(), new_file=dst, ) return dst
def transplant_sample(): piexif.transplant(os.path.join("tests", "images", "01.jpg"), os.path.join("tests", "images", "02.jpg"), "transplant_sample.jpg")
def preprocess(self, img): """ Pre-processes an image by cropping its edges, adding a red scale, masking existing scales and converting to jpg. @param img: path to the image or folder of images to process @return None """ if os.path.isfile(os.path.abspath(os.path.expanduser(img))): #if not self.output_dir: # output_dir = f'{os.path.split(os.path.isfile(os.path.abspath(os.path.expanduser(img))))[0]}/preprocessed' # os.makedirs(output_dir) if os.path.split(os.path.abspath( os.path.expanduser(img)))[0] == self.output_dir: #raise ValueError( # 'You have provided identical paths for the source and destination images.' + # 'This would cause your file to be overwritten. Execution has been halted.') return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: You have provided identical paths for the source and destination image.' }) # read the image try: scan = cv2.imread(os.path.abspath(os.path.expanduser(img))) except: return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: Unable to open source file.' }) #Check for error state if scan is None: return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: Unable to open source file.' }) dims = scan.shape # crop the edges if self.crop: if self.crop < 0: #print(f'You have attempted to crop a negative number of pixels.') #raise ValueError('You have attempted to crop a negative number of pixels.') return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: You have attempted to crop a negative number of pixels.' }) if self.crop > dims[0] or self.crop > dims[1]: #raise ValueError('You have attempted to crop away more pixels than are available in the image.') return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: You have attempted to crop away more pixels than are available in the image.' }) scan = scan[self.crop:dims[0] - self.crop, self.crop:dims[1] - self.crop] # mask scale if self.mask_pixels: if self.mask_offset_y < 0 or self.mask_offset_x < 0 or self.mask_pixels < 0: #raise ValueError("You have attempted to mask a negative number of pixels.") return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: You have attempted to mask a negative number of pixels.' }) if self.mask_offset_y + self.mask_pixels > dims[ 0] or self.mask_offset_x + self.mask_pixels > dims[1]: #raise ValueError("You have attempted to mask more pixels than are available in the image.") return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: You have attempted to mask more pixels than are available in the image.' }) scan[self.mask_offset_y:self.mask_offset_y + self.mask_pixels, self.mask_offset_x:self.mask_offset_x + self.mask_pixels, 0] = 255 # b channel scan[self.mask_offset_y:self.mask_offset_y + self.mask_pixels, self.mask_offset_x:self.mask_offset_x + self.mask_pixels, 1] = 255 # g channel scan[self.mask_offset_y:self.mask_offset_y + self.mask_pixels, self.mask_offset_x:self.mask_offset_x + self.mask_pixels, 2] = 255 # r channel # add scale if self.red_scale: if self.red_scale_pixels > dims[ 0] or self.red_scale_pixels > dims[1]: #raise ValueError("You have attempted to place a scale bar beyond the margins of the image.") return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: You have attempted to place a scale bar beyond the margins of the image.' }) scan[0:self.red_scale_pixels, 0:self.red_scale_pixels, 0] = 0 # b channel scan[0:self.red_scale_pixels, 0:self.red_scale_pixels, 1] = 0 # g channel scan[0:self.red_scale_pixels, 0:self.red_scale_pixels, 2] = 255 # red channel # file name file_name = os.path.basename( os.path.abspath(os.path.expanduser(img))) file_name = f'{os.path.splitext(file_name)[0]}.jpg' file_name = os.path.join( os.path.abspath(os.path.expanduser(self.output_dir)), file_name) # save as jpg try: metadata = ef.Image(os.path.abspath(os.path.expanduser(img))) except: #Image write failure cv2.imwrite(file_name, scan) return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: EXIF data could not be loaded from source image.' }) #Create the processed image (even if EXIF data isn't viable) cv2.imwrite(file_name, scan) #if (not metadata.has_exif) or not(hasattr(metadata, 'x_resolution') or hasattr(metadata, 'Xresolution')): if (not metadata.has_exif ) or not (hasattr(metadata, 'x_resolution') or hasattr(metadata, 'Xresolution')): # EXIF has no resolution data present return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: EXIF Resolution Data Not transferred to Pre-processed image.' }) else: try: piexif.transplant(os.path.abspath(os.path.expanduser(img)), file_name) except: #Return a data frame to the caller to indicate success return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Error: Unable to copy EXIF data to processed image.' }) #Return a data frame to the caller to indicate success return pd.DataFrame(data={ 'filename': [img], 'Pre-process Result': 'No Error' }) elif os.path.isdir(os.path.abspath(os.path.expanduser(img))): if os.path.abspath(os.path.expanduser(img)) == self.output_dir: return None images = os.listdir(os.path.abspath(os.path.expanduser(img))) images = [ os.path.join(os.path.abspath(os.path.expanduser(img)), i) for i in images ] # create a workers pool and start processing pool = multiprocessing.Pool(self.workers) results = pool.map(self.preprocess, images) pool.close() pool.join() return pd.concat(results) else: #os.rmdir(output_dir) return pd.DataFrame( data={ 'filename': [img], 'Pre-process Result': 'Unable to open file or directory.' })
def estimate(self, img: str) -> DataFrame: """ Estimate leaf area for a given image or directory of images. TO DO: filter images only in the folder - ask the user for extension? @param img: path to the scan or images folder. respects tilde expansion @return pandas DF with the file name of the input and the estimated area(s) """ if os.path.isfile(os.path.abspath(os.path.expanduser(img))): # read the image resolution if not self.res: with open(os.path.expanduser(img), 'rb') as image_meta: try: metadata = ef.Image(image_meta) except: return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'Unable to access EXIF Data for image.' }) if (not metadata.has_exif ) or not (hasattr(metadata, 'x_resolution') or hasattr(metadata, 'Xresolution')): #raise ValueError("Image of unknown resolution. Please specify the res argument in dpi.") return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'Image of unknown resolution. Please specify the res argument in dpi.' }) if hasattr(metadata, 'x_resolution'): if not metadata.x_resolution == metadata.y_resolution: #raise ValueError( "X and Y resolutions differ in Image. This is unusual, and may indicate a problem.") return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'X and Y resolutions differ in Image. This is unusual, and may indicate a problem.' }) else: self.res = metadata.x_resolution elif hasattr(metadata, 'Xresolution'): if not metadata.Xresolution == metadata.Yresolution: #raise ValueError( "X and Y resolutions differ in Image. This is unusual, and may indicate a problem.") return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'X and Y resolutions differ in Image. This is unusual, and may indicate a problem.' }) else: self.res = metadata.Xresolution else: if not metadata.XResolution == metadata.YResolution: #raise ValueError( "X and Y resolutions differ in Image. This is unusual, and may indicate a problem.") return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'X and Y resolutions differ in Image. This is unusual, and may indicate a problem.' }) else: self.res = metadata.XResolution # read the scan try: scan = cv2.imread(os.path.expanduser(img)) except: return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'Unable to open image for processing. Check the file format.' }) if scan is None: return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'Unable to open image for processing. Check the file format.' }) # transfer to grayscale scan = cv2.cvtColor(scan, cv2.COLOR_BGR2GRAY) # classify leaf and background if self.threshold < 0 or self.threshold > 255: #raise ValueError("Threshold must be an integer between 0 and 255.") return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'Error: Threshold must be an integer between 0 and 255.' }) scan = cv2.threshold(scan, self.threshold, 255, cv2.THRESH_BINARY_INV)[1] # label leaflets leaflets = measure.label(scan, background=0) # count number of pixels in each label leaflets = np.unique(leaflets, return_counts=True) # create mask to remove dirt and background mask = np.ones(len(leaflets[1]), dtype=bool) # remove small patches if self.cut_off < 0: #raise ValueError("cutoff for small specks must not be negative.") return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'cutoff for small specks must not be negative.' }) mask[leaflets[1] < self.cut_off] = False # remove background pixels mask[leaflets[0] == 0] = False # background is labeled as 0 # apply mask areas = leaflets[1][mask] # convert from pixels to cm2 res = self.res / 2.54 # 2.54 cm in an inch res = res * res # pixels per cm^2 areas = areas / res # save image if self.output_dir: if os.path.isdir(self.output_dir): write_to = os.path.join( os.path.expanduser(self.output_dir), os.path.basename(img)) cv2.imwrite(write_to, scan) if not self.res: #If we are supplying the resolution, we don't don't touch the exif data piexif.transplant( os.path.abspath(os.path.expanduser(img)), write_to) if self.combine: return pd.DataFrame( data={ 'filename': [img], 'Area': [areas.sum()], 'Resolution': self.res, 'Error': 'No Error' }) else: return pd.DataFrame( data={ 'filename': [img] * areas.shape[0], 'Area': areas, 'Resolution': self.res, 'Error': 'No Error' }) elif os.path.isdir(os.path.abspath(os.path.expanduser(img))): if os.path.abspath(os.path.expanduser(img)) == self.output_dir: return None # obtain a list of images images = os.listdir(os.path.abspath(os.path.expanduser(img))) images = [os.path.join(img, i) for i in images] # print(self.workers) # create a workers pool and start processing pool = multiprocessing.Pool(self.workers) results = pool.map(self.estimate, images) pool.close() pool.join() # unify the results into a single dataframe return pd.concat(results) else: #raise ValueError('Your input {img} needs to be a path to an image or a directory.') return pd.DataFrame( data={ 'filename': [img], 'Area': None, 'Resolution': None, 'Error': 'Your input {img} needs to be a path to an image or a directory.' })
import os import piexif def parse_args(): parser = argparse.ArgumentParser(description="Generate a test image by transplanting exif") parser.add_argument("input_image", help="path to imput image containing exif") parser.add_argument("output_image", help="path to generated output image") return parser.parse_args() if __name__ == "__main__": args = parse_args() with open(args.input_image, "rb") as fin: input_string = fin.read() empty_image = os.path.join( os.path.abspath(os.path.dirname(__file__)), "data/empty_exif.jpg") with open(empty_image, "rb") as f: image_string = f.read() output_bytes = io.BytesIO() piexif.transplant(input_string, image_string, output_bytes) with open(args.output_image, "w") as fout: fout.write(output_bytes.read())
def optimize_jpg(self): try: img = Image.open(self.src_path) orig_format = img.format orig_mode = img.mode folder, filename = os.path.split(self.src_path) if folder == '': folder = os.getcwd() temp_file_path = os.path.join(folder + "/~temp~" + filename) orig_size = os.path.getsize(self.src_path) orig_colors, final_colors = 0, 0 result_format = "JPEG" try: had_exif = True if piexif.load( self.src_path)['Exif'] else False except piexif.InvalidImageDataError: # Not a supported format had_exif = False except ValueError: # No exif info had_exif = False # TODO: Check if we can provide a more specific treatment of piexif exceptions. except Exception: had_exif = False if self.max_w or self.max_h: img, was_downsized = downsize_img(img, self.max_w, self.max_h) else: was_downsized = False if self.grayscale: img = make_grayscale(img) # only use progressive if file size is bigger use_progressive_jpg = orig_size > 10000 if self.fast_mode: quality = self.quality else: quality, jpgdiff = self.jpeg_dynamic_quality(img) try: img.save(temp_file_path, quality=quality, optimize=True, progressive=use_progressive_jpg, format=result_format) except IOError: ImageFile.MAXBLOCK = img.size[0] * img.size[1] img.save(temp_file_path, quality=quality, optimize=True, progressive=use_progressive_jpg, format=result_format) if self.keep_exif and had_exif: try: piexif.transplant(os.path.expanduser(self.src_path), temp_file_path) has_exif = True except ValueError: has_exif = False # TODO: Check if we can provide a more specific treatment of piexif exceptions. except Exception: had_exif = False else: has_exif = False # Only replace the original file if compression did save any space final_size = os.path.getsize(temp_file_path) if self.no_size_comparison or (orig_size - final_size > 0): shutil.move(temp_file_path, os.path.expanduser(self.src_path)) was_optimized = True else: final_size = orig_size was_optimized = False try: os.remove(temp_file_path) except OSError as e: details = 'Error while removing temporary file.' show_img_exception(e, self.src_path, details) return self.src_path except Exception as e: print(e)
# check that repair file does not already exist. if not isfile(repdir + imgfile.replace('.', 'r.')): startTime = datetime.now() img = imread(orgdir + imgfile) for pix in pixel_map: img[pix[0][0], pix[0][1]] = img[pix[1][0], pix[1][1]] # save file with an 'r' at the end of the filename to indicate # it was repaired. quality=98 appears to create a file of # approx the same size as the original. imsave(repdir + imgfile.replace('.', 'r.'), img, quality=98) # copy the EXIF data from the original file to the repaired file. transplant(orgdir + imgfile, repdir + imgfile.replace('.', 'r.')) elapsed_time = datetime.now() - startTime s = 'orig file = {}, mask file = {}, ' s += 'elapsed time = {} num of pixel remappings = {}, ' print(s.format(imgfile, maskfilename, len(pixel_map), elapsed_time)) else: s = 'Error: File {} already exists' print(s.format(repdir + imgfile)) else: s = 'Error: File {} does not end with .jpg or jpeg' print(s.format(repdir + imgfile)) else: # repair all files in the 'originals' folder.
def optimize_jpg(t: Task) -> TaskResult: """ Try to reduce file size of a JPG image. Expects a Task object containing all the parameters for the image processing. If file reduction is successful, this function will replace the original file with the optimized version and return some report data (file path, image format, image color mode, original file size, resulting file size, and resulting status of the optimization. :param t: A Task object containing all the parameters for the image processing. :return: A TaskResult object containing information for single file report. """ img = Image.open(t.src_path) orig_format = img.format orig_mode = img.mode folder, filename = os.path.split(t.src_path) if folder == '': folder = os.getcwd() temp_file_path = os.path.join(folder + "/~temp~" + filename) orig_size = os.path.getsize(t.src_path) orig_colors, final_colors = 0, 0 result_format = "JPEG" try: had_exif = True if piexif.load(t.src_path)['Exif'] else False except piexif.InvalidImageDataError: # Not a supported format had_exif = False except ValueError: # No exif info had_exif = False # TODO: Check if we can provide a more specific treatment of piexif exceptions. except Exception: had_exif = False if t.max_w or t.max_h: img, was_downsized = downsize_img(img, t.max_w, t.max_h) else: was_downsized = False if t.grayscale: img = make_grayscale(img) # only use progressive if file size is bigger use_progressive_jpg = orig_size > 10000 if t.fast_mode: quality = t.quality else: quality, jpgdiff = jpeg_dynamic_quality(img) try: img.save(temp_file_path, quality=quality, optimize=True, progressive=use_progressive_jpg, format=result_format) except IOError: ImageFile.MAXBLOCK = img.size[0] * img.size[1] img.save(temp_file_path, quality=quality, optimize=True, progressive=use_progressive_jpg, format=result_format) if t.keep_exif and had_exif: try: piexif.transplant(os.path.expanduser(t.src_path), temp_file_path) has_exif = True except ValueError: has_exif = False # TODO: Check if we can provide a more specific treatment of piexif exceptions. except Exception: had_exif = False else: has_exif = False # Only replace the original file if compression did save any space final_size = os.path.getsize(temp_file_path) if t.no_size_comparison or (orig_size - final_size > 0): shutil.move(temp_file_path, os.path.expanduser(t.src_path)) was_optimized = True else: final_size = orig_size was_optimized = False try: os.remove(temp_file_path) except OSError as e: details = 'Error while removing temporary file.' show_img_exception(e, t.src_path, details) return TaskResult(t.src_path, orig_format, result_format, orig_mode, img.mode, orig_colors, final_colors, orig_size, final_size, was_optimized, was_downsized, had_exif, has_exif)
def test_transplant_fail1(self): with self.assertRaises(ValueError): piexif.transplant(I1, I2, False)
def test_transplant_fail2(self): with self.assertRaises(ValueError): piexif.transplant(NOEXIF_FILE, I2, "foo.jpg")
def transplant_meta(src, dst): """ transplants unmodified metadata from one file to another. """ piexif.transplant(src, dst)
def cli(source, target): """Transfer EXIF metadata from one file to another""" piexif.transplant(source, target) print("EXIF metadata has been successfully transfered.")
PATH_TO_IMG = Path("/data/Gabon_trainingData") # PATH_TO_MAIN = Path("/home/jupyter/") # PATH_TO_TRAIN_DF = PATH_TO_MAIN / "inspect_data_split_validation" PATH_TO_IMG_RESC = Path("/data_rescaled/") filename_sizes = "sizes.npy" sizes_from_file = np.load(filename_sizes).tolist() # x, y, image x_y_image = sizes_from_file exceptions = [] for x_y_image in tqdm(x_y_image): try: filename = x_y_image[2] new_filename = "resc_" + filename if os.path.isfile(PATH_TO_IMG_RESC / new_filename): continue img = Image.open(PATH_TO_IMG / filename) y = 384 scale = 384 / int(x_y_image[1]) x = round(scale * int(x_y_image[0])) img = img.resize((x, y), Image.ANTIALIAS) img.save(PATH_TO_IMG_RESC / new_filename) piexif.transplant(PATH_TO_IMG / filename, PATH_TO_IMG_RESC / new_filename) except Exception as e: exceptions.append([x_y_image, e]) np.save("exceptions.npy", exceptions)
def execute(self, files, out_dir, resize=True, max_dim=None, resize_units=None, compress=False, api_key=None): savings_KB = 0 total_orig_size = 0 for idx, file in enumerate(files): total_orig_size = total_orig_size + os.stat(file).st_size / 1024. log = None if (resize and max_dim is not None and str(max_dim).isdigit()): print('Resizing "' + os.path.basename(file) + '" (' + str(idx + 1) + ' of ' \ + str(len(files)) + ')... ', end='') if resize_units is None: resize_units = 'px' log = image_compressor.resize(file, out_dir=out_dir, suffix='_small', max_dim=max_dim, max_dim_units=resize_units) if not log['success']: return [False, log['message']] else: print(log['message']) savings_KB = savings_KB + log['saved'] if (compress and api_key is not None): print('Compressing "' + os.path.basename(file) + '" (' + str(idx + 1) + ' of ' \ + str(len(files)) + ')... ', end='') in_file = log['result'] if log is not None else file log = image_compressor.compress(api_key=api_key, file=in_file, out_dir=out_dir, suffix='') if not log['success']: return [False, log['message']] else: print(log['message']) savings_KB = savings_KB + log['saved'] # restore EXIF tags from original file to final result (they are stripped by PIL and TinyPNG) # PNGs don't have EXIF data, so skip this for them # Also, sometimes you get EXIF warnings from the piexif library when it tries to copy Unicode. Ignore those. if file[-4:].lower() in ['.jpg', 'jpeg']: try: piexif.transplant(file, log['result']) except Exception as e: # log the error, but not an issue (source might not have EXIF data) self.logger.error(e, exc_info=True) msg = 'Saved a total of ' + str(round(savings_KB, 0)) + ' KB = ' \ + str(round(savings_KB * 100 / total_orig_size, 1)) + '%' return [True, msg]
torch.cuda.synchronize() for i in range(args.batch_size): ud = usefuldims[i] # pytorch represents images as [channels, height, width] # TODO test leaving on GPU longer tensimg = xbatch[i][:, ud[1]:ud[3], ud[0]:ud[2]].cpu().detach() absx0, absy0 = tuple(usefulstarts[i].tolist()) tensimg = make_seamless_edges(tensimg, absx0, absy0) if args.debug: os.makedirs('dbg', exist_ok=True) torchvision.utils.save_image( xbatch[i], 'dbg/crop' + str(n_count) + '_' + str(i) + '_1.jpg') torchvision.utils.save_image( tensimg, 'dbg/crop' + str(n_count) + '_' + str(i) + '_2.jpg') print(tensimg.shape) print((absx0, absy0, ud)) newimg[:, absy0:absy0 + tensimg.shape[1], absx0:absx0 + tensimg.shape[2]] = newimg[:, absy0:absy0 + tensimg.shape[1], absx0:absx0 + tensimg.shape[2]].add(tensimg) torchvision.utils.save_image(newimg, args.output) if args.output[:-4] == '.jpg' and args.exif_method == 'piexif': piexif.transplant(args.input, args.output) elif args.exif_method != 'noexif': cmd = [ 'exiftool', '-TagsFromFile', args.input, args.output, '-overwrite_original' ] subprocess.run(cmd) print('Elapsed time: ' + str(time.time() - start_time) + ' seconds')