def _metadata_exif(self, metadata_file_path): """Read exif metadata from a jpg or tiff file using exifread.""" # TODO: can we shorten this method somehow? with open(self.src_path, 'rb') as img: tags = None try: tags = exifread.process_file(img, debug=True) except Exception as e: self.add_error(e, "Error while trying to grab full metadata for file {}; retrying for partial data.".format(self.src_path)) if tags is None: try: tags = exifread.process_file(img, debug=True) except Exception as e: self.add_error(e, "Failed to get any metadata for file {}.".format(self.src_path)) return False for tag in sorted(tags.keys()): # These tags are long and obnoxious/binary so we don't add them if tag not in ('JPEGThumbnail', 'TIFFThumbnail'): tag_string = str(tags[tag]) # Exifreader truncates data. if len(tag_string) > 25 and tag_string.endswith(", ... ]"): tag_value = tags[tag].values tag_string = str(tag_value) with open(metadata_file_path, 'w+') as metadata_file: metadata_file.write("Key: {}\tValue: {}\n".format(tag, tag_string)) # TODO: how do we want to log metadata? self.set_property('metadata', 'exif') return True
def _metadata_exif(self, metadataFile): img = open(self.cur_file.src_path, 'rb') tags = None try: tags = exifread.process_file(img, debug=True) except Exception as e: print("Error while trying to grab full metadata for file {}; retrying for partial data.".format(self.cur_file.src_path)) print(e) if tags is None: try: tags = exifread.process_file(img, debug=True) except Exception as e: print("Failed to get any metadata for file {}.".format(self.cur_file.src_path)) print(e) img.close() return False for tag in sorted(tags.keys()): # These are long and obnoxious/binary if tag not in ('JPEGThumbnail', 'TIFFThumbnail'): printable = str(tags[tag]) # Exifreader truncates data. if len(printable) > 25 and printable.endswith(", ... ]"): value = tags[tag].values if isinstance(value, basestring): printable = value else: printable = str(value) metadataFile.write("Key: {}\tValue: {}\n".format(tag, printable)) self.cur_file.add_log_details('metadata', 'exif') img.close() return True
def analyze_pictures(src_folder, dest_folder, pictures): prevpic = None curpath = None curtags = None filedic = {} cr2s = [] curpanorama = [] count = 0 date_of_previous_panorama = None for pic in pictures: print(pic) if os.path.splitext(pic)[1].lower() == ".cr2": cr2s.append(pic) if os.path.splitext(pic)[1].lower() in ['.jpg', '.jpeg', '.png', '.mov']: # Get File and ExifTags curpath = src_folder + pic with open(curpath, 'rb') as cf: curtags = exifread.process_file(cf, details=False) # Calculate Subfolder filedic[pic] = get_folder(dest_folder, get_date(curpath, curtags).isoformat()) if prevpic is not None: prevpath = src_folder + prevpic with open(prevpath, 'rb') as pf: prevtags = exifread.process_file(pf, details=False) if fulfill_panorama_criterias(prevpath, curpath, prevtags, curtags): if len(curpanorama) == 0: curpanorama.append(prevpic) curpanorama.append(pic) elif len(curpanorama) > 0: # Just finished a panorama if date_of_previous_panorama and not date_of_previous_panorama == get_date(prevpath, prevtags): count = 0 count += 1 for x in curpanorama: filedic[x] = filedic[x] + "Panorama " + str(count) + "/" curpanorama = [] date_of_previous_panorama = get_date(prevpath, prevtags) prevpic = pic # If last picture was part of panorama if len(curpanorama) > 0: if not date_of_previous_panorama == get_date(curpath, curtags): count = 0 count += 1 for x in curpanorama: filedic[x] = filedic[x] + "Panorama " + str(count) + "/" # There has to be a JPG for each CR2 (-> RAW+L ; only RAW not supported yet) for x in cr2s: tmp = x.rsplit('.', 1)[0] + '.JPG' filedic[x] = filedic[tmp] return filedic
def test_NotfulfillPanoramaCriterias1(self): path1 = 'test_files/P1010003.JPG' pic1 = open(path1, 'rb') pic1_tags = exifread.process_file(pic1, details=False) path2 = 'test_files/P1010004.JPG' pic2 = open(path2, 'rb') pic2_tags = exifread.process_file(pic2, details=False) case = sorter.fulfill_panorama_criterias(path1, path2, pic1_tags, pic2_tags)
def test_fulfillPanoramaCriterias1(self): path1 = 'test_files/IMG_5627.JPG' pic1 = open(path1, 'rb') pic1_tags = exifread.process_file(pic1, details=False) path2 = 'test_files/IMG_5628.JPG' pic2 = open(path2, 'rb') pic2_tags = exifread.process_file(pic2, details=False) case = sorter.fulfill_panorama_criterias(path1, path2, pic1_tags, pic2_tags) self.assertTrue(case)
def EXIF(self, file=None): try: if file: tags = exifread.process_file(file) else: with self.image.storage.open(self.image.name, 'rb') as file: tags = exifread.process_file(file, details=False) return tags except: return {}
def __init__(self, filename, details=False): ''' Initialize EXIF object with FILE as filename or fileobj ''' self.filename = filename if type(filename)==str: with open(filename, 'rb') as fileobj: self.tags = exifread.process_file(fileobj, details=details) else: self.tags = exifread.process_file(filename, details=details)
def compare(*args): """"define your compare method here""" f1 = open(path_name1, 'rb') tags1 = exifread.process_file(f1) #stores EXIF data of first image file f2 = open(path_name2, 'rb') tags2 = exifread.process_file(f2) #stores EXIF data of second image file if tags1 == tags2: print "Stop!,%s and %s are similar"%(y[j],y[k]) else: print(" No similar Images found ")
def read_exif(filename, upload_date, is_image): exif = exifread.process_file(open(filename, "rb")) timestamp = None year, month, day = upload_date.year, upload_date.month, upload_date.day exif_read = bool(exif) if "EXIF DateTimeOriginal" in exif: timestamp = str(exif["EXIF DateTimeOriginal"]) # fmt='2015:12:04 00:50:53' year, month, day = timestamp.split(" ")[0].split(":") year, month, day = int(year), int(month), int(day) dims = None, None if is_image: dims = Image.open(filename).size brand = str(exif.get("Image Make", "Unknown camera")) model = str(exif.get("Image Model", "")) return { "year": year, "month": month, "day": day, "timestamp": timestamp, "camera": "%s %s" % (brand, model), "orientation": str(exif.get("Image Orientation", "Horizontal (normal)")), "width": dims[0], "height": dims[1], "size": os.stat(filename).st_size, "exif_read": exif_read, }
def verify_exif(filename): ''' Check that image file has the required EXIF fields. Incompatible files will be ignored server side. ''' # required tags in IFD name convention required_exif = ["GPS GPSLongitude", "GPS GPSLatitude", "EXIF DateTimeOriginal", "Image Orientation", "GPS GPSImgDirection"] description_tag = "Image ImageDescription" with open(filename, 'rb') as f: tags = exifread.process_file(f) # make sure no Mapillary tags if description_tag in tags: if "MAPSequenceUUID" in tags[description_tag].values: print("File contains Mapillary EXIF tags, use upload.py instead.") return False # make sure all required tags are there for rexif in required_exif: if not rexif in tags: print("Missing required EXIF tag: {0}".format(rexif)) return False return True
def apply(filename, site): apply.printExif += 1 if apply.printExif==1: print ' EXIF for %s :' % filename # Read the EXIF data : f = open(filename, 'rb') tags = exifread.process_file(f) data = {} for tag in tags.keys(): if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename'): if (tags[tag].field_type!=7 or not all(v==0 for v in tags[tag].values)): c = tag.split() current = data for k in range(0, len(c)-1): if current.get(c[k])==None: current[c[k]] = {} current = current[c[k]] current[c[-1]] = str(tags[tag]) if apply.printExif==1: print u' TAG <%s> VALUE <%s>' % (tag, tags[tag]) else: if apply.printExif==1: print u' OMITTED TAG <%s> VALUE <%s>' % (tag, tags[tag]) f.close() return (data, '')
def get_new_image_name_translations(indir, outdir): """ Renames input images using timestamp of EXIF's DateTimeOriginal tag. """ origpaths = glob.glob("%s/*.[jJ][pP][gG]" % indir) d = {} for p in origpaths: try: f = open(p) exif = exifread.process_file(f) except: raise finally: f.close() try: orientation = str(exif["Image Orientation"]) except KeyError: orientation = None try: origtime = str(exif["EXIF DateTimeOriginal"]) time_extracted = True except KeyError: print("No exif data!") time_extracted = False if time_extracted: ts = int(time.mktime(datetime.datetime.strptime(origtime, "%Y:%m:%d %H:%M:%S").timetuple())) else: ts = int(time.time()) newpath = os.path.join(outdir, str(ts) + ".jpg") d[p] = {"newpath": newpath, "orientation": orientation } return d
def image_to_meta(context, use_exif=True, use_iptc=True, use_xmp=True): meta = {} if hasattr(context, "image") and context.image: image = context.image else: return {"iptc": {}, "exif": {}, "xmp": {}} data = image.data io = StringIO(data) io.seek(0) if use_iptc: meta["iptc"] = IPTCInfo(io, force=True) io.seek(0) else: meta["iptc"] = {} if use_exif: meta["exif"] = exifread.process_file(io) io.seek(0) else: meta["exif"] = {} if use_xmp: meta["xmp"] = xmp.parse(image.data) else: meta["xmp"] = {} io.close() return meta
def process_local_dir(fn,prefix,preflong,pictype): size = 0 sz = 0 count = 0 for root, subFolders, files in os.walk(fn): for ele in files: fname = os.path.join(root, ele) #full path name if ele.endswith(".JPG") and (preflong in fname): sz = os.path.getsize(fname) size += sz count += 1 with open(fname, 'rb') as fjpeg: tags = exifread.process_file(fjpeg) stop_tag = 'Image DateTime' dt_tag = vars(tags[stop_tag])['printable'] #dt_tag: 2014:08:01 19:06:50 dt = datetime.strptime(dt_tag.split()[0], "%Y:%m:%d") pref = '{0}{1}'.format(dt.year,dt.month) if pref == '20157': d = (dt_tag.split()[0]).replace(':','-') t = dt_tag.split()[1] newfname = '{0}_{1}_{2}_XXXX.jpg'.format(prefix,d,t) print '{0},{1}'.format(fname,newfname) if pref in sum_sz: sum_sz[pref] += sz sum_cnt[pref] += 1 else: sum_sz[pref] = sz sum_cnt[pref] = 1 return size,count
def print_image_tags(file): from exifread import process_file tags = process_file(open(file), strict=True) for tag in tags.keys(): print "Key: %s, value %s" % (tag, tags[tag])
def date_taken_info(filename): # Read file open_file = open(filename, 'rb') # Return Exif tags tags = exifread.process_file(open_file, stop_tag='Image DateTime') try: # Grab date taken datetaken_string = tags['Image DateTime'] datetaken_object = datetime.datetime.strptime(datetaken_string.values, '%Y:%m:%d %H:%M:%S') # Date day = str(datetaken_object.day).zfill(2) month = str(datetaken_object.month).zfill(2) year = str(datetaken_object.year) # Time second = str(datetaken_object.second).zfill(2) minute = str(datetaken_object.minute).zfill(2) hour = str(datetaken_object.hour).zfill(2) # New Filename output = [day,month,year,day + month + year + '-' + hour + minute + second] return output except: return None
def testEXIF(image): if not image.lower().endswith('.jpg'): return False try: f = open(image, 'r') except IOError: print "IOError", image return False global lat, lon, latC, lonC lat = '' lon = '' lonC = '' latC = '' tags = exifread.process_file(f) for tag in tags: if "Lon" in tag: if "Ref" not in tag: lon = tags[tag] else: lonC = str(tags[tag]) if "Lat" in tag: if "Ref" not in tag: lat = tags[tag] else: latC = str(tags[tag]) if lat != '': return True return False
def import_images(source_path): """ Find attached storage and run importing all existing images """ copied, total = 0, 0 for r, d, files in os.walk(source_path): images = filter(lambda x: x.upper().endswith('JPG'), files) for image in images: total += 1 with open(os.path.join(source_path, r, image), 'rb') as f: try: created = str(exifread.process_file(f)['Image DateTime']) except: continue d = datetime.strptime(created.split()[0], '%Y:%m:%d') targetpath = os.path.join( DATABASE_PHOTOS_PATH, '{:02}'.format(d.month), '{:02}'.format(d.day)) target = os.path.join(targetpath, image) if not os.path.exists(target): try: os.makedirs(targetpath) except: pass shutil.copyfile(os.path.join(source_path, r, image), target) copied += 1 sys.stdout.write('.')
def getEXIFTime(fileName): # get the image time based on EXIF metadata, esle use mod time newName = "" f = open(fileName,'rb') try: # tags = exifread.process_file(f) tags = exifread.process_file(f, stop_tag='EXIF DateTimeOriginal', details=False) # tags = exifread.process_file(f,stop_tag='EXIF DateTime') # print(tags['EXIF DateTimeOriginal']) except: # the error will be caught in the tag processing pass try: # get the exif version date time EXIFDateTime = str(tags['EXIF DateTimeOriginal']) # print("EXIFDateTime ="+EXIFDateTime) #exclude filetype newName = EXIFDateTime.replace(':','-') #+"."+fileName[-3:] except: # else use the file modified date (Creation gets changed on copy) print ("Couldn't read EXIF date on " + fileName + "\nUsing mod time") # exclude filetype newName = getModTime(fileName) # + "."+fileName[-3:] f.close() return newName
def resolution_calc(camera, image): x = 0 try: camera.resolutions[0] = Image.open(image).size except IOError: with open(image, "rb") as fh: exif_tags = exifread.process_file( fh, details=False) try: width = exif_tags["Image ImageWidth"].values[0] height = exif_tags["Image ImageLength"].values[0] if (camera.orientation in ("90", "270")): temp = height height = width width = temp camera.resolutions[0] = (width, height) except KeyError: camera.resolutions[0] = (0, 0) if (camera.orientation in ("90", "270")): camera.resolutions[0] = (camera.resolutions[0][1], camera.resolutions[0][0]) for resize_resolution in camera.resolutions: if resize_resolution[1] is None: try: img = camera.resolutions[0] if camera.orientation in ("90", "270"): new_res = (img[0] * resize_resolution[0] / img[1], resize_resolution[0]) else: new_res = (resize_resolution[0], img[1] * resize_resolution[0] / img[0]) log.debug("One resolution arguments, '{0:d}'".format(new_res[0])) camera.resolutions[x] = new_res except Exception as e: log.debug("Wouldn't calculate resolution arguments" + str(e)) x = x + 1 return camera
def get_resolution(image, camera): """Return various resolution numbers for an image.""" try: if "raw" in camera.image_types and os.path.splitext(image)[-1].lower().strip(".") in RAW_FORMATS: with open(image, "rb") as fh: exif_tags = exifread.process_file( fh, details=False) try: width = exif_tags["Image ImageWidth"].values[0] height = exif_tags["Image ImageLength"].values[0] if ("rotated 90" in str(exif_tags["Image Orientation"]).lower()): temp = height height = width width = temp image_resolution = (width, height) except KeyError: image_resolution = (0, 0) else: try: image_resolution = Image.open(image).size except ValueError: print("Value Error?") image_resolution = (0, 0) except IOError: image_resolution = (0, 0) folder, res = "originals", 'fullres' return res, image_resolution, folder
def extract_and_attach_metadata(mediaitem, filepath): if mediaitem.media_type_cd == 100: try: media_file = open(filepath, 'rb') tags = exifread.process_file(media_file, details=False) org_date_tag = tags.get('EXIF DateTimeOriginal') org_date = datetime.now() if org_date_tag: org_date = datetime.strptime(str(org_date_tag), '%Y:%m:%d %H:%M:%S') else: org_date_tag = tags.get('EXIF DateTimeDigitized') if org_date_tag: org_date = datetime.strptime(str(org_date_tag), '%Y:%m:%d %H:%M:%S') else: org_date_tag = os.stat(filepath).st_birthtime if org_date_tag: org_date = datetime.fromtimestamp(org_date_tag) else: org_date_tag = os.stat(filepath).st_ctime if org_date_tag: org_date = datetime.fromtimestamp(org_date_tag) mediaitem.origin_date = org_date except: logging.error('failed to extract metadata for: ' + str(mediaitem)) file_size = os.stat(filepath).st_size mediaitem.file_size = file_size logging.log(logging.DEBUG, str(mediaitem) + ' - set file size = ' + str(file_size))
def get_lat_long(f): tags = exifread.process_file(f) for tag in tags.keys(): if "GPS GPSLongitude" == tag: gps_long = tags[tag] if "GPS GPSLatitude" == tag: gps_latt = tags[tag] if "GPS GPSLongitudeRef" == tag: gps_long_ref = tags[tag] #print "long ref", gps_long_ref if "GPS GPSLatitudeRef" == tag: gps_latt_ref = tags[tag] print "latt ref", gps_latt_ref if gps_long and gps_latt and gps_long_ref and gps_latt_ref: lat = convert_to_degrees(gps_latt) if gps_latt_ref != "N": lat = 0 - lat lon = convert_to_degrees(gps_long) if gps_long_ref != "E": lon = 0 - lon return lat, lon return None, None
def get_exif(picture): """ :param picture: Is the image file user selects. Extract EXIF data. :return: """ image = open(picture, 'rb') tags = exifread.process_file(image) gps_out = [] what_i_need = ('GPS GPSLatitude', 'GPS GPSLatitudeRef', 'GPS GPSLongitude', 'GPS GPSLongitudeRef') for tag in what_i_need: try: val = "%s" % tags[tag] if isinstance(val, list): gps_out.extend(map(str, float(val))) else: gps_out.append(val) except KeyError: print('Key %s does not exists' % tag) # Had a list of strings and had to do following code so they wouldn't break when I calculated them. list1 = gps_out[0] list2 = gps_out[2] latitude = list1.strip("[]").split(",") longitude = list2.strip("[]").split(",") last_number_lat = latitude[2].split("/") last_number_long = longitude[2].split("/") latitude[2] = int(last_number_lat[0]) / int(last_number_lat[1]) longitude[2] = int(last_number_long[0]) / int(last_number_long[1]) latlong_to_degress(latitude, longitude)
def create_thumbnail(self, cache_dir, file_name, subdir, size): cache_file = os.path.join(cache_dir, file_name) if not os.path.exists(cache_file): # determine orientation angle angle = 0 file_path = os.path.join(subdir, file_name) with open(file_path, 'rb') as picture: try: tags = exifread.process_file(picture) angle = ROTATION[tags['Image Orientation'].printable] self.log.info('Rotating by angle: ' + str(angle)) except UnicodeDecodeError: self.log.info('Error while querying orientation of: ' + file_path) except KeyError: self.log.info('No EXIF tags found for: ' + file_path) try: im = Image.open(file_path) rot = im.rotate(angle) # make it square w, h = rot.size offset = min(rot.size)/2 box = [int(w/2 - offset), int(h/2 - offset), int(w/2 + offset), int(h/2 + offset)] crop = rot.crop(box) crop.thumbnail(size) crop.save(cache_file, rot.format) self.log.info('Caching: ' + file_name) except IOError: self.log.info('Error while opening: ' + file_path) except UnicodeDecodeError: self.log.info('Error while processing: ' + file_path)
def extract_metadata(file): # load the EXIF data file.seek(0) tags = exifread.process_file(file, details=False) print tags # title name = None if "Image ImageDescription" in tags: name = tags['Image ImageDescription'].values else: name = secure_filename(file.filename) # GPS coordinates longitude = get_coordinate('GPS GPSLongitude', 'GPS GPSLongitudeRef', tags) latitude = get_coordinate('GPS GPSLatitude', 'GPS GPSLatitudeRef', tags) altitude = compute_single_ratio(tags['GPS GPSAltitude'].values[0]) \ if 'GPS GPSAltitude' in tags else None metadata = {"name": name, "latitude": latitude, "longitude": longitude, "altitude": altitude} return metadata
def populate_exif(event): """ Populate EXIF data and location(if not present). Not all EXIF data is stored. GPS coordinates are converted to floats. Test image with GPS data: http://bit.ly/1VTFA5W """ url = event.fields['url'].new_value response = requests.get(url) image_file = StringIO(response.content) raw_exif = exifread.process_file(image_file, details=False) # EXIF data exclude = ('Thumbnail', 'Interoperability', 'MakerNote', 'GPS') exif_data = {key: val.printable for key, val in raw_exif.items() if key.split()[0] not in exclude} event.set_field_value('exif', exif_data) # GPS data loc_field = event.fields.get('location') if not loc_field or not loc_field.new_value: gps_data = {key.split()[-1]: val.values for key, val in raw_exif.items() if key.startswith('GPS')} lat, lon = get_lat_lon(gps_data) event.set_field_value('location', {'lat': lat, 'lon': lon})
def render_thumb(width, imgfile): try: imgpath=pkg_resources.resource_filename('vishwin_http.views', 'img/' + imgfile) thumbfile=width + 'px-' + imgfile thumbpath=pkg_resources.resource_filename('vishwin_http.views', 'generated/thumb/' + thumbfile) # get file modified time for original; will throw exception if not found mtime_orig=os.path.getmtime(imgpath) if not (os.path.isfile(thumbpath)) or (os.path.getmtime(thumbpath) < mtime_orig): img=open(imgpath, 'rb') tags=exifread.process_file(img, details=False, stop_tag='Image_Orientation') img.close() # reopen using PIL img=Image.open(imgpath) # upon transpose, format attribute in object is cleared format=img.format if 'Image Orientation' in tags: if tags['Image Orientation'].values[0]>=5: # rotations in PIL(low) are anti-clockwise, would be easier if clockwise was default img=img.transpose(Image.ROTATE_270) if (tags['Image Orientation'].values[0]==(3 or 4)) or tags['Image Orientation'].values[0]>=7: img=img.transpose(Image.ROTATE_180) # flipped images if tags['Image Orientation'].values[0]==(2 or 4 or 5 or 7): img=img.transpose(Image.FLIP_LEFT_RIGHT) img.thumbnail((int(width), int(width)/(img.size[0]/img.size[1]))) img.save(thumbpath, format) img.close() return send_file(thumbpath, mimetype=guess_type(imgpath)[0]) except OSError: abort(404)
def tags(self): if self._tags == None: if self.file_container != None: self._tags = self.file_container.tags else: self._tags = exifread.process_file(self.handle, details=False) return self._tags
def do_post(path): print "Posting..." t = Tumblpy("","", "","") tbuff = textview.get_buffer() article_text = "" if isWeather.get_active(): article_text = get_date_desc() + weatherProvider.get_weather() article_text = article_text + tbuff.get_text(tbuff.get_start_iter(), tbuff.get_end_iter()) blog_url = t.post('user/info') blog_url = blog_url['user']['blogs'][1]['url'] if path.get_text() !="No image": photo = open(path.get_text(), 'rb') ephoto = open(path.get_text(), 'rb') tags = "catumblr , "+ platform.node() etags = exifread.process_file(ephoto) if etags.has_key('Image Model'): tags = "catumblr , "+ platform.node() + ", " + str(etags['Image Model']) p_params = {'type':'photo', 'caption': article_text, 'data': photo, 'tags':tags} ephoto.close() else: tags = "catumblr , "+ platform.node() time_caption = strftime("%Y-%m-%d %H:%M:%S", gmtime()) p_params = {'type':'text', 'body': article_text, 'caption': time_caption, 'tags':tags} post = t.post('post', blog_url=blog_url, params=p_params) print post # returns id if posted successfully
import os, sys import exifread import datetime print('Input file') f = sys.argv[1] handle = open(f, 'rb') tags = exifread.process_file(handle) DateTime = tags["Image DateTime"] DateTimeOriginal = tags["EXIF DateTimeOriginal"] DateTimeDigitized = tags["EXIF DateTimeDigitized"] print('EXIF DATA') print('DateTime') print(DateTime) print('DateTimeOriginal') print(DateTimeOriginal) print('DateTimeDigitized') print(DateTimeDigitized) ctime = datetime.datetime.fromtimestamp(os.path.getctime(f)) #create time mtime = datetime.datetime.fromtimestamp(os.path.getmtime(f)) #modify time atime = datetime.datetime.fromtimestamp(os.path.getatime(f)) #access time atimea = atime.strftime('%Y:%m:%d %H:%M:%S') mtimea = mtime.strftime('%Y:%m:%d %H:%M:%S') ctimea = ctime.strftime('%Y:%m:%d %H:%M:%S') print('\n') print('SYSTEM DATA')
inDirPath = sys.argv[1] outDirPath = sys.argv[2] filesList = glob.glob(inDirPath + '/**/*', recursive=True) for filePath in filesList: filename = os.path.basename(filePath) if filename.endswith(".nef") or filename.endswith( ".jpg") or filename.endswith(".jpeg") or filename.endswith( ".png") or filename.endswith(".mp4"): print("### " + filename + " ####################") nefFile = open(filePath, 'rb') try: exifData = exifread.process_file(nefFile) except Exception as exc: print("Cant parse exif for " + filename + "; skipping") continue print(exifData) #cameraID = str(exifData.get("Image Model")) date = str(exifData.get("EXIF DateTimeOriginal", "Unknown_Date")) #try get a date from file if date == "Unknown_Date": os.makedirs(str(outDirPath + "/" + date), exist_ok=True) nefFolderName = date nefOutputPath = outDirPath + "/" + date + "/" + filename else:
def extract_exif_data(jpeg_file_path): """Extract JPEG image EXIF data""" jpeg_file = open(jpeg_file_path, 'rb') exif_data = exifread.process_file(jpeg_file) return exif_data
def iso_from_exif(img_path: Path): tags = exifread.process_file(img_path.open("rb"), details=False) return tags["EXIF ISOSpeedRatings"].values[0]
def read_roi_file(fpath): """ """ if isinstance(fpath, str): fp = open(fpath, 'rb') exif_data = exif.process_file(fp) fp.close() tag = exif_data['Image Tag 0xC697'] data = bytes(tag.values)[12:] # a = bs.ConstBitStream(bytes=s) name = os.path.splitext(os.path.basename(fpath))[0] else: # raise an error return None size = len(data) code = '>' roi = {} magic = get_byte(data, list(range(4))) magic = "".join([chr(c) for c in magic]) # TODO: raise error if magic != 'Iout' version = get_short(data, OFFSET['VERSION_OFFSET']) type = get_byte(data, OFFSET['TYPE']) subtype = get_short(data, OFFSET['SUBTYPE']) top = get_short(data, OFFSET['TOP']) left = get_short(data, OFFSET['LEFT']) if top > 6000: top -= 2**16 if left > 6000: left -= 2**16 bottom = get_short(data, OFFSET['BOTTOM']) right = get_short(data, OFFSET['RIGHT']) width = right - left height = bottom - top n = get_short(data, OFFSET['N_COORDINATES']) options = get_short(data, OFFSET['OPTIONS']) position = get_int(data, OFFSET['POSITION']) hdr2Offset = get_int(data, OFFSET['HEADER2_OFFSET']) sub_pixel_resolution = ( options == OPTIONS['SUB_PIXEL_RESOLUTION']) and version >= 222 draw_offset = sub_pixel_resolution and (options == OPTIONS['DRAW_OFFSET']) sub_pixel_rect = version >= 223 and sub_pixel_resolution and ( type == ROI_TYPE['rect'] or type == ROI_TYPE['oval']) # Untested if sub_pixel_rect: packed_data = fp.read(16) s = struct.Struct(code + '4f') xd, yd, widthd, heightd = s.unpack(packed_data) # Untested if hdr2Offset > 0 and hdr2Offset + HEADER_OFFSET['IMAGE_SIZE'] + 4 <= size: channel = get_int(data, hdr2Offset + HEADER_OFFSET['C_POSITION']) slice = get_int(data, hdr2Offset + HEADER_OFFSET['Z_POSITION']) frame = get_int(data, hdr2Offset + HEADER_OFFSET['T_POSITION']) overlayLabelColor = get_int( data, hdr2Offset + HEADER_OFFSET['OVERLAY_LABEL_COLOR']) overlayFontSize = get_short( data, hdr2Offset + HEADER_OFFSET['OVERLAY_FONT_SIZE']) imageOpacity = get_byte(data, hdr2Offset + HEADER_OFFSET['IMAGE_OPACITY']) imageSize = get_int(data, hdr2Offset + HEADER_OFFSET['IMAGE_SIZE']) is_composite = get_int(data, OFFSET['SHAPE_ROI_SIZE']) > 0 # Not implemented # if is_composite: # if version >= 218: # # Not implemented # pass # if channel > 0 or slice > 0 or frame > 0: # pass if type == ROI_TYPE['rect']: roi = {'type': 'rectangle'} if sub_pixel_rect: roi.update(dict(left=xd, top=yd, width=widthd, height=heightd)) else: roi.update(dict(left=left, top=top, width=width, height=height)) roi['arc_size'] = get_short(data, OFFSET['ROUNDED_RECT_ARC_SIZE']) elif type == ROI_TYPE['oval']: roi = {'type': 'oval'} if sub_pixel_rect: roi.update(dict(left=xd, top=yd, width=widthd, height=heightd)) else: roi.update(dict(left=left, top=top, width=width, height=height)) elif type == ROI_TYPE['line']: roi = {'type': 'line'} x1 = get_float(data, OFFSET['X1']) y1 = get_float(data, OFFSET['Y1']) x2 = get_float(data, OFFSET['X2']) y2 = get_float(data, OFFSET['Y2']) if subtype == SUBTYPES['ARROW']: # Not implemented pass else: roi.update(dict(x1=x1, x2=x2, y1=y1, y2=y2)) roi['draw_offset'] = draw_offset elif type in [ ROI_TYPE[t] for t in [ "polygon", "freehand", "traced", "polyline", "freeline", "angle", "point" ] ]: x = [] y = [] base1 = OFFSET['COORDINATES'] base2 = base1 + 2 * n for i in range(n): xtmp = get_short(data, base1 + i * 2) if xtmp < 0: xtmp = 0 ytmp = get_short(data, base2 + i * 2) if ytmp < 0: ytmp = 0 x.append(left + xtmp) y.append(top + ytmp) if sub_pixel_resolution: xf = [] yf = [] base1 = OFFSET['COORDINATES'] + 4 * n base2 = base1 + 4 * n for i in range(n): xf.append(get_float(data, base1 + i * 4)) yf.append(get_float(data, base2 + i * 4)) if type == ROI_TYPE['point']: roi = {'type': 'point'} if sub_pixel_resolution: roi.update(dict(x=xf, y=yf, n=n)) else: roi.update(dict(x=x, y=y, n=n)) if type == ROI_TYPE['polygon']: roi = {'type': 'polygon'} elif type == ROI_TYPE['freehand']: roi = {'type': 'freehand'} if subtype == SUBTYPES['ELLIPSE']: ex1 = get_float(data, OFFSET['X1']) ey1 = get_float(data, OFFSET['Y1']) ex2 = get_float(data, OFFSET['X2']) ey2 = get_float(data, OFFSET['Y2']) roi['aspect_ratio'] = get_float(data, OFFSET['ELLIPSE_ASPECT_RATIO']) roi.update(dict(ex1=ex1, ey1=ey1, ex2=ex2, ey2=ey2)) elif type == ROI_TYPE['traced']: roi = {'type': 'traced'} elif type == ROI_TYPE['polyline']: roi = {'type': 'polyline'} elif type == ROI_TYPE['freeline']: roi = {'type': 'freeline'} elif type == ROI_TYPE['angle']: roi = {'type': 'angle'} else: roi = {'type': 'freeroi'} if sub_pixel_resolution: roi.update(dict(x=xf, y=yf, n=n)) else: roi.update(dict(x=x, y=y, n=n)) else: # TODO: raise an error for 'Unrecognized ROI type' pass roi['name'] = name if version >= 218: # Not implemented # Read stroke width, stroke color and fill color pass if version >= 218 and subtype == SUBTYPES['TEXT']: # Not implemented # Read test ROI pass if version >= 218 and subtype == SUBTYPES['IMAGE']: # Not implemented # Get image ROI pass roi['position'] = position # if channel > 0 or slice > 0 or frame > 0: # roi['position'] = dict(channel=channel, slice=slice, frame=frame) return roi
import exifread '''Uses exifread package (install using "sudo pip install exifread")''' f = open("../rawimages/HDRset_1/_DSC1718.ARW.tiff", 'rb') tags = exifread.process_file(f, details=False) f.close() #Only output exif tags for tag in tags.keys(): if 'EXIF' in tag: print "%s: %s" % (tag, tags[tag]) #Output specific values fnumber = tags["EXIF FNumber"] exposureTime = tags["EXIF ExposureTime"] focalLength = tags["EXIF FocalLength"] ISOspeed = tags["EXIF ISOSpeedRatings"] print fnumber, exposureTime, focalLength, ISOspeed ''' Documentation for @ https://pypi.python.org/pypi/ExifRead Example: processing tags -Code for tag in tags.keys(): #omit some tags that tend to be too long or boring if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename', 'EXIF MakerNote'): print "Key: %s, value %s" % (tag, tags[tag])
import exifread f = open(r'C:\Users\jacob\Desktop\Python\exif\img_1771.jpg', 'rb') tags = exifread.process_file(f) print("\nBelow is some information hidden in your image:\n") for tag in tags.keys(): if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'FileName', 'EXIF Makernote'): print("\t%s: %s" % (tag, tags[tag]))
def _get_all_exif(self, pathname): img = open(pathname, 'rb') tags = exifread.process_file(img) return tags
def uploadImage(self, image): """ Upload a single image. Returns the photoid, or None on failure. """ folderTag = image[len(IMAGE_DIR):] if self.uploaded.has_key(folderTag): stats = os.stat(image) logging.debug('The file %s already exists: mtime=%d, size=%d', image, stats.st_mtime, stats.st_size) data = self.uploaded[folderTag] if not isinstance(data, tuple): logging.error( 'Should not have non-tuple data but continuing in any case' ) self.uploaded[folderTag] = (data, stats.st_mtime, stats.st_size) return None else: photo_id = data[0] mtime = data[1] filesize = data[2] if mtime != stats.st_mtime or filesize != stats.st_size: logging.info('File has changed since previous time') logging.info('Removing %s from Flickr before updating', data[0]) photo = flickr.Photo(data[0]) try: photo.delete() del self.uploaded[folderTag] del self.uploaded[photo_id] except flickr.FlickrError: logging.info('File does not exist, adding') else: return None try: logging.debug("Getting EXIF for %s", image) f = open(image, 'rb') try: exiftags = exifread.process_file(f) except MemoryError: exiftags = {} f.close() #print exiftags[XPKEYWORDS] #print folderTag # make one tag equal to original file path with spaces replaced by # # and start it with # (for easier recognition) since space is # used as TAG separator by flickr # this is needed for later syncing flickr with folders # look for / \ _ . and replace them with SPACE to make real Tags realTags = re.sub(r'[/\\_.]', ' ', os.path.dirname(folderTag)).strip() if configdict.get('full_folder_tags', 'false').startswith('true'): realTags = os.path.dirname(folderTag).split(os.sep) realTags = (' '.join('"' + item + '"' for item in realTags)) picTags = '"#' + folderTag + '" ' + realTags #check if we need to override photo dates if configdict.get('override_dates', '0') == '1': dateTaken = datePosted = '' dateTakenGranularity = configdict.get('date_taken_granularity', '0') #fixed take date if configdict.get('date_taken_type', '0') == '2': datePosted = configdict.get('date_posted_fixed', '') #fixed post date if configdict.get('date_posted_type', '0') == '2': datePosted = configdict.get('date_posted_fixed', '') #Use year and month from config ini, then calculate end of month (note: Flickr does not accept future dates. You'll get current date maximum) if configdict.get('date_posted_granularity', '0') == '4': datePostedY = int( datetime.fromtimestamp(datePosted).strftime("%Y")) datePostedM = int( datetime.fromtimestamp(datePosted).strftime("%m")) datePostedD = calendar.monthrange( datePostedY, datePostedM)[1] datePosted = int( (datetime(datePostedY, datePostedM, datePostedD, 23, 59, 59) - datetime(1970, 1, 1)).total_seconds()) #Use year from config ini, then calculate end of year (note: Flickr does not accept future dates. You'll get current date maximum) if configdict.get('date_posted_granularity', '0') == '6': datePostedY = int( datetime.fromtimestamp(datePosted).strftime("%Y")) datePosted = int( (datetime(datePostedY, 12, 31, 23, 59, 59) - datetime(1970, 1, 1)).total_seconds()) #Convert timestamp to GMT zone dateZone = configdict.get('date_posted_utc', '0') if dateZone != '0': datePosted = datePosted - int(dateZone) * 3600 if exiftags == {}: logging.debug('NO_EXIF_HEADER for %s', image) else: if configdict.get('override_dates', '0') == '1': if 'EXIF DateTimeDigitized' in exiftags: dateExif = str(exiftags['EXIF DateTimeDigitized']) dateExif = dateExif[0:10].replace(':', '-') + dateExif[10:] dateUnix = int((datetime( int(dateExif[0:4]), int(dateExif[5:7]), int(dateExif[8:10]), int(dateExif[11:13]), int(dateExif[14:16]), int(dateExif[17:19])) - datetime(1970, 1, 1)).total_seconds()) if configdict.get('date_taken_type', '0') == '1': dateTaken = dateExif if configdict.get('date_posted_type', '0') == '1': datePosted = dateUnix #Use year and month from dateExif, then calculate end of month (note: Flickr does not accept future dates. You'll get current date maximum) if configdict.get('date_posted_granularity', '0') == '4': datePostedY = int( datetime.fromtimestamp( datePosted).strftime("%Y")) datePostedM = int( datetime.fromtimestamp( datePosted).strftime("%m")) datePostedD = calendar.monthrange( datePostedY, datePostedM)[1] datePosted = int( (datetime(datePostedY, datePostedM, datePostedD, 23, 59, 59) - datetime(1970, 1, 1)).total_seconds()) #Use year from dateExif, then calculate end of year (note: Flickr does not accept future dates. You'll get current date maximum) if configdict.get('date_posted_granularity', '0') == '6': datePostedY = int( datetime.fromtimestamp( datePosted).strftime("%Y")) datePosted = int(( datetime(datePostedY, 12, 31, 23, 59, 59) - datetime(1970, 1, 1)).total_seconds()) #Convert timestamp to GMT zone dateZone = configdict.get('date_posted_utc', '0') if dateZone != '0': datePosted = datePosted - int(dateZone) * 3600 # look for additional tags in EXIF to tag picture with if XPKEYWORDS in exiftags: printable = exiftags[XPKEYWORDS].printable if len(printable) > 4: exifstring = exifread.make_string(eval(printable)) picTags += exifstring.replace(';', ' ') picTags = picTags.strip() logging.info("Uploading image %s with tags %s", image, picTags) photo = ('photo', image, open(image, 'rb').read()) d = { api.token: str(self.token), api.perms: str(self.perms), "tags": str(picTags), "hidden": str(FLICKR["hidden"]), "is_public": str(FLICKR["is_public"]), "is_friend": str(FLICKR["is_friend"]), "is_family": str(FLICKR["is_family"]) } sig = signCall(d) d[api.sig] = sig d[api.key] = FLICKR[api.key] url = buildRequest(api.upload, d, (photo, )) res = getResponse(url) if isGood(res): logging.debug("successful.") photoid = str(res.photoid.text) self.logUpload(photoid, folderTag, image) if configdict.get('override_dates', '0') == '1': self.overrideDates(image, photoid, datePosted, dateTaken, dateTakenGranularity) return photoid else: print "problem.." reportError(res) except KeyboardInterrupt: logging.debug("Keyboard interrupt seen, abandon uploads") print "Stopping uploads..." self.abandonUploads = True return None except: logging.exception("Upload failed") return None
# Changing working directory to unprocessed folder os.chdir('/srv/ObjectDB/unprocessed') cursor = db.cursor() # Create a cursor for MySQL commands # Loop forever while (True): files = os.listdir('.') # Populate list of files if (files != ''): # Only run when folder is not empty for x in files: print('Processing file: ' + x) f = open(x, 'rb') # Open the file filename = uuid.uuid4().hex odapi_output = pickle.dumps(odapi_adapter.get_objects( f.name)) # Pickle odapi_output exif = pickle.dumps(exifread.process_file(f)) # Pickle EXIF os.system('mv ' + x + ' ../processed/' + filename) # Move file to processed folder os.chdir('/srv/ObjectDB/EXIF') with open(filename + "-exif.json", 'wb') as output: # Dump EXIF into /srv/ObjectDB/EXIF output.write(exif) os.chdir('/srv/ObjectDB/odapi_output') with open( filename + "-odapi_output.json", 'wb' ) as output: # Dump ODAPI output into /srv/ObjectDB/odapi_output output.write(odapi_output) os.chdir('/srv/ObjectDB/unprocessed') data_image = ('/srv/ObjectDB/processed/' + filename, '/srv/ObjectDB/EXIF/' + filename + '-exif.json', '/srv/ObjectDB/odapi_output/' + filename +
import exifread # Read in image from disk in binary format o = open("file1.jpg",'r+b') tags = exifread.process_file(o) for tag in tags.keys(): if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'EXIF MakerNote'): print("%s @ %s" % (tag, tags[tag])) print("-----------------------BREAK---------------------") o = open("file2.jpg",'r+b') tags = exifread.process_file(o, stop_tag='UNDEF') print("EXIF ExifImageWidth @ %s" % ( tags['EXIF ExifImageWidth'])) print("EXIF ExifImageLenght @ %s" % ( tags['EXIF ExifImageLength'])) if tags['Image Make'] : # print("Image Make @ %s" % ( tags['Image Make'])) print("TRUE") else : print("FALSE") # print("Image Make @ NULL") # for tag in tags.keys(): # if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'EXIF MakerNote'): # print("%s @ %s" % (tag, tags[tag])) # print("EXIF ExifImageWidth @ %s" % ( tags['EXIF ExifImageWidth'])) print("-----------------------BREAK---------------------") o = open("file3.jpg",'r+b') tags = exifread.process_file(o) for tag in tags.keys():
def parse_exif_values(self, _path_file): # Disable exifread log logging.getLogger('exifread').setLevel(logging.CRITICAL) with open(_path_file, 'rb') as f: tags = exifread.process_file(f, details=False) try: if 'Image Make' in tags: try: self.camera_make = tags['Image Make'].values except UnicodeDecodeError: log.ODM_WARNING("EXIF Image Make might be corrupted") self.camera_make = "unknown" if 'Image Model' in tags: try: self.camera_model = tags['Image Model'].values except UnicodeDecodeError: log.ODM_WARNING("EXIF Image Model might be corrupted") self.camera_model = "unknown" if 'GPS GPSAltitude' in tags: self.altitude = self.float_value(tags['GPS GPSAltitude']) if 'GPS GPSAltitudeRef' in tags and self.int_value( tags['GPS GPSAltitudeRef']) > 0: self.altitude *= -1 if 'GPS GPSLatitude' in tags and 'GPS GPSLatitudeRef' in tags: self.latitude = self.dms_to_decimal( tags['GPS GPSLatitude'], tags['GPS GPSLatitudeRef']) if 'GPS GPSLongitude' in tags and 'GPS GPSLongitudeRef' in tags: self.longitude = self.dms_to_decimal( tags['GPS GPSLongitude'], tags['GPS GPSLongitudeRef']) except (IndexError, ValueError) as e: log.ODM_WARNING("Cannot read basic EXIF tags for %s: %s" % (_path_file, str(e))) try: if 'Image Tag 0xC61A' in tags: self.black_level = self.list_values( tags['Image Tag 0xC61A']) elif 'BlackLevel' in tags: self.black_level = self.list_values(tags['BlackLevel']) if 'EXIF ExposureTime' in tags: self.exposure_time = self.float_value( tags['EXIF ExposureTime']) if 'EXIF FNumber' in tags: self.fnumber = self.float_value(tags['EXIF FNumber']) if 'EXIF ISOSpeed' in tags: self.iso_speed = self.int_value(tags['EXIF ISOSpeed']) elif 'EXIF PhotographicSensitivity' in tags: self.iso_speed = self.int_value( tags['EXIF PhotographicSensitivity']) elif 'EXIF ISOSpeedRatings' in tags: self.iso_speed = self.int_value( tags['EXIF ISOSpeedRatings']) if 'Image BitsPerSample' in tags: self.bits_per_sample = self.int_value( tags['Image BitsPerSample']) if 'EXIF DateTimeOriginal' in tags: str_time = tags['EXIF DateTimeOriginal'].values utc_time = datetime.strptime(str_time, "%Y:%m:%d %H:%M:%S") subsec = 0 if 'EXIF SubSecTime' in tags: subsec = self.int_value(tags['EXIF SubSecTime']) negative = 1.0 if subsec < 0: negative = -1.0 subsec *= -1.0 subsec = float('0.{}'.format(int(subsec))) subsec *= negative ms = subsec * 1e3 utc_time += timedelta(milliseconds=ms) timezone = pytz.timezone('UTC') epoch = timezone.localize(datetime.utcfromtimestamp(0)) self.utc_time = (timezone.localize(utc_time) - epoch).total_seconds() * 1000.0 except Exception as e: log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (_path_file, str(e))) # Extract XMP tags f.seek(0) xmp = self.get_xmp(f) for tags in xmp: try: band_name = self.get_xmp_tag( tags, ['Camera:BandName', '@Camera:BandName']) if band_name is not None: self.band_name = band_name.replace(" ", "") self.set_attr_from_xmp_tag( 'band_index', tags, [ 'DLS:SensorId', # Micasense RedEdge '@Camera:RigCameraIndex', # Parrot Sequoia, Sentera 21244-00_3.2MP-GS-0001 'Camera:RigCameraIndex', # MicaSense Altum ]) self.set_attr_from_xmp_tag( 'radiometric_calibration', tags, [ 'MicaSense:RadiometricCalibration', ]) self.set_attr_from_xmp_tag('vignetting_center', tags, [ 'Camera:VignettingCenter', 'Sentera:VignettingCenter', ]) self.set_attr_from_xmp_tag('vignetting_polynomial', tags, [ 'Camera:VignettingPolynomial', 'Sentera:VignettingPolynomial', ]) self.set_attr_from_xmp_tag('horizontal_irradiance', tags, ['Camera:HorizontalIrradiance'], float) self.set_attr_from_xmp_tag( 'irradiance_scale_to_si', tags, ['Camera:IrradianceScaleToSIUnits'], float) self.set_attr_from_xmp_tag('sun_sensor', tags, [ 'Camera:SunSensor', ], float) self.set_attr_from_xmp_tag('spectral_irradiance', tags, [ 'Camera:SpectralIrradiance', 'Camera:Irradiance', ], float) # Phantom 4 RTK if '@drone-dji:RtkStdLon' in tags: y = float( self.get_xmp_tag(tags, '@drone-dji:RtkStdLon')) x = float( self.get_xmp_tag(tags, '@drone-dji:RtkStdLat')) self.gps_xy_stddev = max(x, y) if '@drone-dji:RtkStdHgt' in tags: self.gps_z_stddev = float( self.get_xmp_tag(tags, '@drone-dji:RtkStdHgt')) else: self.set_attr_from_xmp_tag( 'gps_xy_stddev', tags, ['@Camera:GPSXYAccuracy', 'GPSXYAccuracy'], float) self.set_attr_from_xmp_tag( 'gps_z_stddev', tags, ['@Camera:GPSZAccuracy', 'GPSZAccuracy'], float) if 'DLS:Yaw' in tags: self.set_attr_from_xmp_tag('dls_yaw', tags, ['DLS:Yaw'], float) self.set_attr_from_xmp_tag('dls_pitch', tags, ['DLS:Pitch'], float) self.set_attr_from_xmp_tag('dls_roll', tags, ['DLS:Roll'], float) except Exception as e: log.ODM_WARNING("Cannot read XMP tags for %s: %s" % (_path_file, str(e))) # self.set_attr_from_xmp_tag('center_wavelength', tags, [ # 'Camera:CentralWavelength' # ], float) # self.set_attr_from_xmp_tag('bandwidth', tags, [ # 'Camera:WavelengthFWHM' # ], float) self.width, self.height = get_image_size.get_image_size(_path_file) # Sanitize band name since we use it in folder paths self.band_name = re.sub('[^A-Za-z0-9]+', '', self.band_name)
def getMetadata(image): _file = open(image, 'rb') tags = exifread.process_file(_file) return {i: str(tags[i]) for i in tags.keys()}
def extractTag(self): stemmer = LancasterStemmer() querytagfile = self.querypath.replace("jpg","txt"); imgname = self.querypath[self.querypath.rfind(os.sep)+1:] query_known = self.known_query_tags.get(imgname, None) if (query_known != None): fs = open("test"+os.sep+"test_text_tags.txt","r") fs.seek(query_known) line = fs.readline().split()[1:] string = [] for term in line: try: tag = stemmer.stem(term) if (isinstance(tag, unicode) == False): tag = unicode(tag, "utf-8") string.append(tag) except UnicodeDecodeError: continue string = " ".join(string) self.tags = [string,] fs.close() elif (os.path.isfile(querytagfile)): fs = open(querytagfile,"r") line = fs.readline().split() string = [] for term in line: try: tag = stemmer.stem(term) if (isinstance(tag, unicode) == False): tag = unicode(tag, "utf-8") string.append(tag) except UnicodeDecodeError: continue string = " ".join(string) self.tags = [string,] else: img = open(self.querypath, 'rb') exif = exifread.process_file(img) keyword = "" try: comment = ast.literal_eval(str(eval("exif['Image XPComment']"))) comment = comment[0:-2:2] comment = ''.join(chr(i) for i in comment) comment = comment.replace(";","") except KeyError: comment = "" except SyntaxError: print 'comment',exif['Image XPComment'] try: keyword = ast.literal_eval(str(eval("exif['Image XPKeywords']"))) keyword = keyword[0:-2:2] keyword = ''.join(chr(i) for i in keyword) keyword = keyword.replace(";","") except KeyError: keyword = "" except SyntaxError: print 'keyword', exif['Image XPKeywords'] line = keyword + comment; string = [] for term in line: try: tag = stemmer.stem(term) if (isinstance(tag, unicode) == False): tag = unicode(tag, "utf-8") string.append(tag) except UnicodeDecodeError: continue string = " ".join(string) self.tags = [string,] print "here are the tags" print self.tags img.close()
def exif_rename(top_dir, dry_run=True): top = path.normcase(top_dir) log = codecs.open('log.txt', 'w', 'utf-8') count = 0 for root, dirs, files in os.walk(top): for name in files: base, ext = path.splitext(name) if not ext or ext.lower() not in ['.jpg', '.tiff']: continue rel_path = path.join(root, name) src_path = path.abspath(rel_path) # print("Processing: %s" % src_path) f = open(path.join(root, name), 'rb') tags = exifread.process_file(f) f.close() if not tags: print('No exif tags found: %s\n' % name) log.write('N:' + src_path + '\n') continue exif_d1 = tags.get('EXIF DateTimeDigitized') exif_d2 = tags.get('EXIF DateTimeOriginal') exif_d3 = tags.get('Image DateTime') exif_date_time_obj = exif_d1 or exif_d2 or exif_d3 if not exif_date_time_obj: print("Exif date not found, skip %s" % name) log.write('I:' + src_path + '\n') continue # print("Exif date: %s" % exif_date_time_obj) try: exif_date_time_str = str(exif_date_time_obj) exif_date_time = datetime.strptime(exif_date_time_str, EXIF_DATE_TIME) except: print("Invalid exif date: %s" % name) log.write('D:' + src_path + '\n') continue if not exif_date_time: print("Exif date not found, skip %s" % name) log.write('T:' + src_path + '\n') continue name_str = datetime.strftime(exif_date_time, NAME_DATE_TIME) if name_str == base: # print('Skip exists %s' % dst_path) # log.write('E:'+src_path_u+'\n') continue ### remove duplicate file start ### dst_path = path.join(root, name_str + ext.lower()) if path.exists(dst_path): count += 1 if not dry_run: if os.path.getsize(dst_path) == os.path.getsize(src_path): os.remove(src_path) print('[%s] Delete duplicate %s' % (count, src_path)) else: print('[DRY RUN %s] Delete duplicate %s' % (count, src_path)) continue ### remove duplicate file end ### while path.exists(path.join(root, name_str + ext.lower())): name_str += 'x' dst_path = path.join(root, name_str + ext.lower()) if path.exists(dst_path): # print('Skip exists %s' % src_path) # log.write('E:'+src_path_u+'\n') continue count += 1 if not dry_run: os.rename(src_path, dst_path) print('[%s] Renamed to %s' % (count, dst_path)) else: print('[DRY RUN %s] Renamed to %s' % (count, dst_path)) log.flush() log.close()
def get_exif(self): img = open(self.filepath, 'rb') exif = exifread.process_file(img) img.close() return exif
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref: lat = _convert_to_degress(gps_latitude) if gps_latitude_ref.values[0] != 'N': lat = 0 - lat lon = _convert_to_degress(gps_longitude) if gps_longitude_ref.values[0] != 'E': lon = 0 - lon return lat, lon # Iterates through photos in photos_dir, reads metadata, writes coordinates (if any) and metadata to our geojson object for photo_name in os.listdir(os.path.abspath(photos_dir)): with open(os.path.join(os.path.abspath(photos_dir), photo_name)) as photo: tags = exifread.process_file(photo) if 'GPS GPSLongitude' in tags: datetime_obj = datetime.datetime.strptime( str(tags['Image DateTime']), '%Y:%m:%d %H:%M:%S') lat, lon = get_exif_location(tags) alt_meters = str(tags['GPS GPSAltitude']) date = datetime_obj.strftime('%Y-%m-%d') time = datetime_obj.strftime('%-I:%M:%S %p') name = photo_name output_geojson['features'].append({ "type": "Feature", "properties": { "date": date, "time": time,
def process_directory(dir_path, dir_depth, options): global files_count, processed_count, renamed_count, skipped_count, failed_count if options.max_depth > 0: print('Processing directory path "%s" recursively at depth %d ... ' % (dir_path.resolve(), dir_depth)) else: print('Processing directory path "%s" non-recursively ... ' % dir_path.resolve()) for tmp_path in dir_path.iterdir(): # print(' DEBUG: tmp_path=%r' % tmp_path) if tmp_path.is_dir(): if options.max_depth > dir_depth: process_directory(tmp_path, dir_depth +1, options) continue else: print(' WARNING: Not processing sub-directory path "%s", because of reached maximum depth of %d ... ' % (tmp_path, options.max_depth)) elif not tmp_path.is_file(): print(' INFO: Path "%s" is not a file => ignoring ... ' % tmp_path) continue file_path = tmp_path files_count += 1 # print('Processing file %d: "%s" ... ' % (files_count, file_path)) file_name = file_path.name # parent_dir_path = file_path.parent # print('file_name="%s", parent_dir_path: "%s"' % (file_name, parent_dir_path)) # print file_path.read_hexhash('md5') # print file_path.read_hexhash('sha1') # print file_path.getsize() guessed_mime_type = mimetypes.MimeTypes().guess_type(file_path.as_uri())[0] # print(' DEBUG: guessed_mime_type="%r" ' % (guessed_mime_type,)) if guessed_mime_type is None: print(' WARNING: File path "%s" cannot be quessed its mime-type => skipping ... ' % file_path) continue date_time_search = date_time_search_re.match(file_name) # if date_time_search is not None: # print( DEBUG: date_time_search.groups()=%s, .pos=%d, .string=%s' % (date_time_search.groups(), date_time_search.pos, date_time_search.string)) date_time_str = None if guessed_mime_type.startswith('image'): if options.skip_image is True: print (' INFO: File name "%s" guessed mime-type is image, which is not to be processed => skipping ...' % file_name) skipped_count += 1 continue img_type = imghdr.what(file_path) # print(' DEBUG: img_type=%s' % (img_type,)) if img_type != 'jpeg': print(' WARNING: File path "%s" (image) does not contain JPEG image header => skipping ... ' % file_path) failed_count += 1 continue processed_count += 1 ## optimization(?) for fast mode - skip file already containing some data/time string if options.fast is True: if date_time_search is not None: current_date_time_prefix = date_time_search.groups()[0] current_date_time_str = date_time_search.groups()[1] if current_date_time_prefix == '': print(' WARNING: File name "%s" (image) apparently starts with some date-time string "%s"' % (file_name, current_date_time_str), end='') else: print(' WARNING: File name "%s" (image) apparently contains some date-time string "%s"' % (file_name, current_date_time_str), end='') print(' => fast mode - skipping ... ') skipped_count += 1 continue with open(file_path, 'rb') as img_file: # exif_tags = exifread.process_file(img_file) # for tag_key in exif_tags.keys(): # if tag_key in ['JPEGThumbnail']: # print(' EXIF tag: [%s], value: <binary-image-thumbnail> ' % (tag_key,)) # else: # print(' EXIF tag: [%s], value: [%s] ' % (tag_key, exif_tags[tag_key])) exif_tags = exifread.process_file(img_file, details=False, stop_tag='DateTimeOriginal') # print ' DEBUG: exif_tags=(%d)' % (len(exif_tags),) if 'EXIF DateTimeOriginal' in exif_tags: date_time_str = str(exif_tags['EXIF DateTimeOriginal']).replace(':', '').replace(' ', '_') else: print(' WARNING: File path "%s" (image) is missing an EXIF tag for original/creation date-time => skipping ... ' % file_path) failed_count += 1 continue elif guessed_mime_type.startswith('video'): if options.skip_video is True: print (' INFO: File name "%s" guessed mime-type is video, which is not ot be processed => skipping ...' % file_name) skipped_count += 1 continue processed_count += 1 ## optimization(?) for fast mode - skip file already containing some data/time string if options.fast is True: if date_time_search is not None: current_date_time_prefix = date_time_search.groups()[0] current_date_time_str = date_time_search.groups()[1] if current_date_time_prefix == '': print(' WARNING: File name "%s" (video) apparently starts with some date-time string "%s"' % (file_name, current_date_time_str), end='') else: print(' WARNING: File name "%s" (video) apparently contains some date-time string "%s"' % (file_name, current_date_time_str), end='') print(' => fast mode - skipping ... ') skipped_count += 1 continue try: (date_time, _) = get_mov_timestamps(file_path) except RuntimeError: print(' ERROR! File path "%s" (video) cannot be extracted original/creation date-time => skipping ... ' % file_path) failed_count += 1 continue if date_time is None: print(' WARNING: File path "%s" (video) is missing original/creation date-time => skipping ... ' % file_path) failed_count += 1 continue date_time_str = date_time.strftime("%Y%m%d_%H%M%S") # mov_parser = hachoir_parser.createParser(file_path) # if mov_parser is None: # print(' WARNING! Failed to parse video file - unsupported format => skipping ... ') # mov_parser.stream._input.close() # continue # # print ' DEBUG: mov_parser.getFieldType()=%s, .mime_type=%s ' % (mov_parser.getFieldType(), mov_parser.mime_type) # if mov_parser.getFieldType() != 'MovFile': # print(' WARNING! Failed to parse video file - not a MOV/MP4 file => skipping ... ') # mov_parser.stream._input.close() # continue # processed_count += 1 # moov_atom = next((field for field in mov_parser if field.description == u'Atom: moov'), None) # if moov_atom is None: # print(' ERROR! Failed to parse video file - missing "moov" atom; skipping ... ') # mov_parser.stream._input.close() # continue # movie_atom_list = None # try: # movie_atom_list = moov_atom.getField('movie') # except hachoir_core.field.field.MissingField: # print(' ERROR! Failed to parse video file - missing "movie" atom-list; skipping ... ') # mov_parser.stream._input.close() # continue # mvhd_atom = next((field for field in movie_atom_list if field.description == u'Atom: mvhd'), None) # if movie_atom_list is None: # print(' ERROR! Failed to parse video file - missing "mvhd" atom; skipping ... ') # mov_parser.stream._input.close() # continue # movie_hdr = None # try: # movie_hdr = mvhd_atom.getField('movie_hdr') # except hachoir_core.field.field.MissingField: # print(' ERROR! Failed to parse video file - missing "movie_hdr"; skipping ... ') # mov_parser.stream._input.close() # continue # ## WARNING: it does not work without this dummy iteration # for field in movie_hdr: # pass # creation_date_atom = None # try: # creation_date_atom = movie_hdr.getField('creation_date') # except hachoir_core.field.field.MissingField: # print(' ERROR! Failed to parse video file - missing "creation_date" atom; skipping ... ') # mov_parser.stream._input.close() # continue # # >>> creation_date = mov_parser.getField('/atom[1]/movie/atom[0]/movie_hdr/creation_date') # # >>> creation_date.getFieldType() # # 'TimestampUnix32' # # >>> repr(creation_date) # # "<TimestampUnix32 path='/atom[1]/movie/atom[0]/movie_hdr/creation_date', address=32, size=32>" # # >>> str(creation_date) # # '2017-03-03 14:36:52' # date_time_str = str(creation_date_atom).replace('-', '').replace(' ', '_').replace(':', '') # mov_parser.stream._input.close() else: print(' INFO: File name "%s" guessed mime-type is neither image nor video => skipping ... ' % file_name) skipped_count += 1 continue # print(' DEBUG: date_time_str [%s] ' % (date_time_str,)) if date_time_str is None: print(' ERROR! Failed to determine original/creation date-time for image or video => skipping ... ') failed_count += 1 continue ## verify pattern of the date-time string if date_time_verify_re.match(date_time_str) is None: print(' ERROR! Invalid/unexpected format of determined date-time string "%s" => skipping ... ' % (date_time_str,)) failed_count += 1 continue ## verify current file-name containts either the same or any other date-time string if date_time_search is not None: current_date_time_prefix = date_time_search.groups()[0] current_date_time_str = date_time_search.groups()[1] if current_date_time_prefix == '': if current_date_time_str == date_time_str: print(' INFO: File name "%s" already starts with original/creation date-time string "%s"' % (file_name, date_time_str), end='') else: print(' WARNING: File name "%s" apparently starts with date-time string "%s" other than determined "%s"' % (file_name, current_date_time_str, date_time_str), end='') else: if current_date_time_str == date_time_str: print(' INFO: File name "%s" apparently contains original/creation date-time string "%s"' % (file_name, date_time_str), end='') else: print(' WARNING: File name "%s" apparently contains date-time string "%s" other than determined "%s"' % (file_name, current_date_time_str, date_time_str), end='') if options.force is True: print(' => forcing rename ... ') else: print(' => skipping ... ') skipped_count += 1 continue new_file_name = '' if options.erase is True: new_file_name = date_time_str + file_path.suffix elif file_name.startswith(date_time_str): new_file_name = file_name else: new_file_name = date_time_str + '_' + file_name if new_file_name == file_name: print(' INFO: Keeping original file name "%s" => skipping ... ' % (file_name)) continue renamed_count += 1 # new_file_path = dir_path.joinpath(new_file_name) new_file_path = file_path.with_name(new_file_name) # print('new_file_path=%s' % (new_file_path,)) if new_file_path.exists(): ## TODO: consider using the '--force' option to erase duplicate? if read_sha1_hexhash(file_path) == read_sha1_hexhash(new_file_path): print(' WARNING: New file name "%s" already exists and is identical file to the original file name "%s"; consider removing duplicate => skipping ... ' % (new_file_name, file_name)) else: print(' ERROR: New file name "%s" already exists and is different file than the original file name "%s"; consider manual renaming => skipping ... ' % (new_file_name, file_name)) failed_count += 1 continue if options.dry_run is not True: print(' INFO: Renaming file name "%s" to "%s" ... ' % (file_name, new_file_name)) # try: file_path.rename(new_file_path) # except else: print(' INFO: Dry-run - would be renaming file name "%s" to "%s" ... ' % (file_name, new_file_name))
def dng_blacklevel(file): f = open(file, 'rb') tags = exifread.process_file(f) return tags['Image Tag 0xC61A'].values[0].num
def is_rotated(fp): """Fast version of read_exif(fp).is_rotated, using an exif header subset.""" exif = exifread.process_file(fp, stop_tag="Orientation", details=False) return EXIFInfo(exif).is_rotated
def get_exif(test_img): origin_test_img = test_img.replace('crop', 'images') origin_test_img_pil = open(origin_test_img, 'rb') exif = exifread.process_file(origin_test_img_pil) return exif
def GeraModeloFotoDef(self, context): scn = context.scene tmpdir = tempfile.gettempdir() homeall = expanduser("~") # TESTA CAMERA mypath = scn.my_tool.path # Tem que ter o / no final onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] FotoTeste = onlyfiles[0] with open(mypath + FotoTeste, 'rb') as f_jpg: tags = exifread.process_file(f_jpg, details=True) print(tags['Image Model']) CamModel = str(tags['Image Model']) # print("CamModel:", CamModel) # TESTA MODELO CAMERA if platform.system() == "Linux": camDatabase = homeall + "/Programs/OrtogOnBlender/openMVG/sensor_width_camera_database.txt" if platform.system() == "Darwin": camDatabase = "/OrtogOnBlender/openMVGMACelcap/sensor_width_camera_database.txt" if platform.system() == "Windows": camDatabase = "C:/OrtogOnBlender/openMVGWIN/sensor_width_camera_database.txt" print("EH WINDOWS") infile = open(camDatabase, "r") numlines = 0 found = 0 for line in infile: numlines += 1 while 1: str_found_at = line.find(CamModel) if str_found_at == -1: # string not found in line ... # go to next (ie break out of the while loop) break else: # string found in line found += 1 # more than once in this line? # lets strip string and anything prior from line and # then go through the testing loop again line = line[str_found_at + len(CamModel):] infile.close() print(CamModel, "was found", found, "times in", numlines, "lines") if found == 0: print("Nao apareceu!") with open(camDatabase, 'a') as file: inputCam = CamModel, "; 3.80" print(inputCam) # if platform.system() == "Darwin" or platform.system() == "Windows": # file.write("\n") file.write("\n") file.writelines(inputCam) # Escreve o modelo de camera no arquivo # GERA FOTOGRAMETRIA try: OpenMVGtmpDir = tmpdir + '/OpenMVG' tmpOBJface = tmpdir + '/MVS/scene_dense_mesh_texture2.obj' if platform.system() == "Linux": OpenMVGPath = homeall + '/Programs/OrtogOnBlender/openMVG/software/SfM/SfM_SequentialPipeline.py' OpenMVSPath = homeall + '/Programs/OrtogOnBlender/openMVS/OpenMVS' if platform.system() == "Windows": OpenMVGPath = 'C:/OrtogOnBlender/openMVGWin/software/SfM/SfM_SequentialPipeline.py' OpenMVSPath = 'C:/OrtogOnBlender/openMVSWin/OpenMVS.bat' if platform.system() == "Darwin": # if platform.release() == '15.6.0': # OpenMVGPath = '/OrtogOnBlender/openMVGMACelcap/SfM_SequentialPipeline.py' # OpenMVSPath = '/OrtogOnBlender/openMVSMACelcap/openMVSMAC.sh' # if platform.release() == '17.5.0': # OpenMVGPath = '/OrtogOnBlender/openMVGMACelcap/SfM_SequentialPipeline.py' # OpenMVSPath = '/OrtogOnBlender/openMVSMACelcap/openMVSMAC.sh' # else: # OpenMVGPath = '/OrtogOnBlender/openMVGMAC/SfM_SequentialPipeline.py' # OpenMVSPath = '/OrtogOnBlender/openMVSMAC/openMVSMAC.sh' OpenMVGPath = '/OrtogOnBlender/openMVGMACelcap/SfM_SequentialPipeline.py' OpenMVSPath = '/OrtogOnBlender/openMVSMACelcap/openMVSMAC.sh' shutil.rmtree(tmpdir + '/OpenMVG', ignore_errors=True) shutil.rmtree(tmpdir + '/MVS', ignore_errors=True) # if os.name=='posix': # shutil.rmtree(tmpdir+'/OpenMVG') # shutil.rmtree(tmpdir+'/MVS') # if os.name=='nt': # subprocess.call(['rmdir', '/Q', '/S', tmpdir+'/OpenMVG']) # subprocess.call(['rmdir', '/Q', '/S', tmpdir+'/MVS']) if platform.system() == "Linux": subprocess.call( ['python', OpenMVGPath, scn.my_tool.path, OpenMVGtmpDir]) if platform.system() == "Windows": subprocess.call([ 'C:/OrtogOnBlender/Python27/python', OpenMVGPath, scn.my_tool.path, OpenMVGtmpDir ]) if platform.system() == "Darwin": subprocess.call( ['python', OpenMVGPath, scn.my_tool.path, OpenMVGtmpDir]) subprocess.call(OpenMVSPath, shell=True) # subprocess.call([ 'meshlabserver', '-i', tmpdir+'scene_dense_mesh_texture.ply', '-o', tmpdir+'scene_dense_mesh_texture2.obj', '-om', 'vn', 'wt' ]) bpy.ops.import_scene.obj(filepath=tmpOBJface, filter_glob="*.obj;*.mtl") scene_dense_mesh_texture2 = bpy.data.objects[ 'scene_dense_mesh_texture2'] bpy.ops.object.select_all(action='DESELECT') bpy.context.scene.objects.active = scene_dense_mesh_texture2 bpy.data.objects['scene_dense_mesh_texture2'].select = True bpy.context.object.data.use_auto_smooth = False bpy.context.object.active_material.specular_hardness = 60 bpy.context.object.active_material.diffuse_intensity = 0.6 bpy.context.object.active_material.specular_intensity = 0.3 bpy.context.object.active_material.specular_color = (0.233015, 0.233015, 0.233015) # bpy.ops.object.modifier_add(type='SMOOTH') # bpy.context.object.modifiers["Smooth"].factor = 2 # bpy.context.object.modifiers["Smooth"].iterations = 3 # bpy.ops.object.convert(target='MESH') # bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Smooth") bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN') bpy.ops.view3d.view_all(center=False) bpy.ops.file.pack_all() bpy.ops.object.modifier_add(type='SMOOTH') bpy.context.object.modifiers["Smooth"].factor = 2 bpy.context.object.modifiers["Smooth"].iterations = 3 #bpy.ops.object.convert(target='MESH') # MutRes bpy.ops.object.modifier_add(type='MULTIRES') bpy.context.object.modifiers["Multires"].show_viewport = False bpy.ops.object.multires_subdivide(modifier="Multires") context = bpy.context obj = context.active_object heightTex = bpy.data.textures.new('Texture name', type='IMAGE') heightTex.image = bpy.data.images[ 'scene_dense_mesh_texture2_material_0_map_Kd.jpg'] dispMod = obj.modifiers.new("Displace", type='DISPLACE') dispMod.texture = heightTex bpy.context.object.modifiers["Displace"].texture_coords = 'UV' bpy.context.object.modifiers["Displace"].strength = 0.035 bpy.context.object.modifiers["Displace"].mid_level = 0.5 #Comprime modificadores bpy.context.object.modifiers["Smooth"].show_expanded = False bpy.context.object.modifiers["Multires"].show_expanded = False bpy.context.object.modifiers["Displace"].show_expanded = False bpy.ops.object.shade_smooth() except RuntimeError: bpy.context.window_manager.popup_menu(ERROruntimeFotosDef, title="Atenção!", icon='INFO')
#!/usr/bin/python import sys, os, exifread, datetime from PIL import Image for filename in os.listdir('.'): if 'AVI' in filename: filenum = ''.join(i for i in filename if i.isdigit()) # The number of the file, we'll need this to decrement by 1 to get the file before it copyfrom = int(filenum)-1 # This is the file number we want to copy our exif time from copyfromfilename = "DSCF" + str(copyfrom).zfill(4) + ".JPG" # This is the file name we want to copy our exif time from f = open(copyfromfilename, 'rb') tags = exifread.process_file(f, details=False, stop_tag='DateTimeOriginal') # Get the tags we need date = tags.get('EXIF DateTimeOriginal') # Get the exact tag we need date = datetime.datetime.strptime(str(date), '%Y:%m:%d %H:%M:%S') # Convert the tag to python datetime date = date + datetime.timedelta(0,1) # Add 1 second, so we don't have two files with the same timestamp date = date.strftime('%Y%m%d_%H%M%S') # Convert our date into the format we want for our file name print("Working on AVI #" + str(filenum).zfill(4)) os.system("ffmpeg -loglevel quiet -i " + filename + " -codec copy -metadata ICRD=\"2018-09-23 13:06:11\" " + date + ".AVI") # Create proper file os.remove(filename) # Remove the original # ---- Samples ---- # DSCF0070.AVI #'EXIF DateTimeOriginal': (0x9003) ASCII=2018:09:21 16:35:35 @ 748 # Date/Time Original : 2018:09:23 13:06:10 # Create Date : 2018:09:23 13:06:10 # exiftool -ext JPG '-FileName<CreateDate' -d %Y%m%d_%H%M%S%%-c.%%e . # ffmpeg -loglevel quiet -i DSCF0070.AVI -codec copy -metadata ICRD="2018-09-23 13:06:11" out.avi print("Finished AVIs, fixing JPGs") # exiftool can't write to AVIs, and apparently you have to create a new file for writing to AVIs
def read_exif(fp): """Reads exif data from a file pointer of an image and returns it.""" exif = exifread.process_file(fp) return EXIFInfo(exif)
def get_exif_datetimeoriginal_as_str(filename): with open(filename, "rb") as file: tags = exifread.process_file(file, stop_tag="DateTimeOriginal", details=False) return str(tags["EXIF DateTimeOriginal"])
def main(args): with args.raw_file.open('rb') as f: tags = exifread.process_file(f) pprint(tags)
import exifread import glob import os files = glob.glob('*.jpg') print( "Renaming the {} file(s) in this folder with a .jpg ending to a DateTime serial." .format(len(files))) for f in files: f = open(f, 'rb') # Open the file (using exifread) tags = exifread.process_file(f) # Get the metadata new_name = str(tags['Image DateTime']).replace( ':', '_') + '.jpg' # Get the DateTime, refomat f.close() # close the file (exifread) print('Renaming {} to {}'.format(f.name, new_name)) os.rename(f.name, new_name) # rename (using os)
def __init__(self, fileobj): self.tags = exifread.process_file(fileobj, details=False) fileobj.seek(0) self.xmp = get_xmp(fileobj)
# plate scale for the sensor and optical system pix = 1.18 # open a file to write results to g = open("2021-04-25-test3.txt", "w") # directory containing images dir = '/Users/stingay/Pictures/2021_04_25/' imglist = sorted(os.listdir(dir)) # open first image and extract jpg thumhail to extract timing information rawA = rawpy.imread(dir + imglist[0]) # Canon RP thumb = rawA.extract_thumb() f = open('thumb.jpg', 'wb') f.write(thumb.data) exif = exifread.process_file(open('thumb.jpg', 'rb')) for tag in exif.keys(): if tag == 'Image DateTime': datetag = exif[tag] date_time = str(datetag).split(' ') date = date_time[0].split(':') t = date_time[1].split(':') dateob = datetime.datetime(int(date[0]), int(date[1]), int(date[2]), int(t[0]), int(t[1]), int(t[2])) print(dateob) g.write("{}\n".format(dateob)) # loop over images in directory, detect star in images, and record the coordinate information in the output file n = 0 while len(imglist) > 0: for img in imglist:
def ev_from_exif(img_path: Path): tags = exifread.process_file(img_path.open("rb"), details=False) fnumber = tags["EXIF FNumber"].values[0].num shutter_speed_ratio = tags["EXIF ShutterSpeedValue"].values[0] return (2 * np.log2(fnumber)) + np.log2( shutter_speed_ratio.den / shutter_speed_ratio.num)