def write_infos(): """ use google maps to get gps infos of places uses tags_places.csv as input and fills it with gps and location name output: tags_places_gmaps.csv """ csv.register_dialect('semicolon', delimiter=';', lineterminator='\n') filename = get_info_dir("tags_places.csv") outname = get_info_dir("tags_places_gmaps.csv") outfile = open(outname, "w") writer = csv.DictWriter( outfile, fieldnames=["directory", "name_part", "Location", "gps"], dialect="semicolon") writer.writeheader() with open(filename, "r") as infile: reader = csv.DictReader(infile, dialect="semicolon") for row in reader: search = row["directory"] + " " + row["name_part"] result = get_info(search.strip(" ")) for canidate in result["candidates"]: outdir = OrderedDict() outdir["directory"] = row["directory"] outdir["name_part"] = row["name_part"] loc = canidate["geometry"]["location"] outdir["gps"] = "%f, %f" % (loc["lat"], loc["lng"]) outdir["Location"] = canidate["name"] writer.writerow(outdir) outfile.close()
def create_tags_csv(location: str = ""): """ extract tags from the file name write a csv file with those tags :param location: optional content of directory column This csv can be modified to be used with :func:`write_exif_using_csv` or :func:`placeinfo.write_infos` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. """ inpath = os.getcwd() tag_set = OrderedSet() tag_set_names = OrderedSet() out_filename = get_info_dir("tags_places.csv") tags_places_file, writer = fileop.create_csv_writer( out_filename, ["directory", "name_part"]) filenameAccessors = [ FilenameAccessor(filename) for filename in get_plain_filenames_of_type(image_types, inpath) ] for fileNameAccessor in filenameAccessors: for tag in fileNameAccessor.tags(): tag_set.add(tag) writeToFile(get_info_dir("tags.txt"), location + "\n\t" + "\n\t".join(tag_set) + "\n") for tag in tag_set: tag_set_names.add((location, tag)) writer.writerows(tag_set_names) tags_places_file.close()
def create_counters_csv_per_dir(): """ extract counter from the file name write a csv file with those counters for each directory This csv can be modified to be used with :func:`write_exif_using_csv` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. """ log_function_call(create_tags_csv_per_dir.__name__) inpath = os.getcwd() tag_set_names = OrderedSet() out_filename = get_info_dir("tags_counters.csv") csvfile, writer = fileop.create_csv_writer(out_filename, [ "directory", "name_main", "name_part", "first", "last", "tags3", "description" ]) for (dirpath, dirnames, filenames) in os.walk(inpath): if not inpath == dirpath: continue for dirname in dirnames: filenameAccessors = [ FilenameAccessor(filename) for filename in get_plain_filenames_of_type( image_types, dirpath, dirname) ] if len(filenameAccessors) == 0: continue _add_counter_csv_entries(dirname, filenameAccessors, tag_set_names) writer.writerows(tag_set_names) csvfile.close()
def create_tags_csv_per_dir(): """ extract tags from the file name write a csv file with those tags and group them by toplevel directory This csv can be modified to be used with :func:`write_exif_using_csv` or :func:`placeinfo.write_infos` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. """ log_function_call(create_tags_csv_per_dir.__name__) inpath = os.getcwd() tag_set_names = OrderedSet() out_filename = get_info_dir("tags_per_dir.csv") tags_places_file, writer = fileop.create_csv_writer( out_filename, ["directory", "name_part"]) for (dirpath, dirnames, filenames) in os.walk(inpath): if not inpath == dirpath: continue for dirname in dirnames: tag_set = OrderedSet() filenameAccessors = [ FilenameAccessor(filename) for filename in get_plain_filenames_of_type( image_types, dirpath, dirname) ] if len(filenameAccessors) == 0: continue for fileNameAccessor in filenameAccessors: for tag in fileNameAccessor.tags(): tag_set.add(tag) writeToFile(get_info_dir("tags.txt"), dirname + "\n\t" + "\n\t".join(tag_set) + "\n") dirname_split = dirname.split("_") subnames = [ subname for subname in dirname_split if not subname.isnumeric() ] dirname = "_".join(subnames) for tag in tag_set: tag_set_names.add((dirname, tag)) writer.writerows(tag_set_names) tags_places_file.close()
def _read_timetable_new(filename: str = None): if not filename: filename = get_info_dir("timetable.txt") file = open(filename, 'r') dirNameDict = OrderedDict() for line in file: dir_name, start, end = [ entry.strip(' ').strip('\r\n') for entry in line.split(';') ] if not start or not end: continue start = dt.datetime.strptime(start, _read_timetable.timeformat) end = dt.datetime.strptime(end, _read_timetable.timeformat) dirNameDict[dir_name] = (start, end) file.close() return dirNameDict
def better_gpx_via_timetable(gpxfilename: str): """ crossmatch gpx file with timetable and take only entries for which photos exist :param gpxfilename: input gpx file output: _new1.gpx containing only usefull locations _new2.gpx containing only not usefull locations does not uses exif infos """ def write(dirName_last, gpxfile_out): if dirName != dirName_last: if not dirName_last == "": gpxfile_out.write("</trkseg></trk>\r\n") gpxfile_out.write("<trk><name>" + dirName + "</name><trkseg>\r\n") gpxfile_out.write(line) timefile = get_info_dir("timetable.txt") gpxfilename = get_gps_dir(gpxfilename) dirNameDict = _read_timetable_new(timefile) timeregex = re.compile("(.*<time>)([^<]*)(</time>.*)") gpxfilename_out, ext = gpxfilename.rsplit('.', 1) dirName_last1 = "" dirName_last2 = "" gpxfile_out1 = open(gpxfilename_out + "_new1." + ext, "w") gpxfile_out2 = open(gpxfilename_out + "_new2." + ext, "w") with open(gpxfilename, "r") as gpxfile: for line in gpxfile: match = timeregex.match(line) if not match: if "</gpx>" in line: gpxfile_out1.write("</trkseg></trk>\r\n") gpxfile_out2.write("</trkseg></trk>\r\n") gpxfile_out1.write(line) gpxfile_out2.write(line) continue line = line.replace("wpt", "trkpt") time = match.group(2) time = dt.datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ") dirName = find_dir_with_closest_time_new(dirNameDict, time, 3600) if "unrelated" in dirName: write(dirName_last2, gpxfile_out2) dirName_last2 = dirName else: write(dirName_last1, gpxfile_out1) dirName_last1 = dirName gpxfile_out1.close() gpxfile_out2.close()
def print_timetable(): """ print the time of the first and last picture in a directory to a file """ inpath = os.getcwd() ofile = open(get_info_dir("timetable.txt"), 'a') for (dirpath, dirnames, filenames) in os.walk(inpath): if not inpath == dirpath: continue for dirname in dirnames: if dirname.startswith('.'): continue log().info("Folder: %s", dirname) fotos = get_filename_sorted_dirfiletuples(settings.image_types, inpath, dirname) if not fotos: continue first = _get_time(fotos[0]) last = _get_time(fotos[-1]) ofile.write("%-55s; %12s; %12s\n" % (dirname, first, last)) ofile.close()
def create_names_csv_per_dir(start_after_dir=''): """ extract names from the file path write a csv file with those names for each directory This csv can be modified to be used with :func:`write_exif_using_csv` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. """ log_function_call(create_names_csv_per_dir.__name__) inpath = os.getcwd() tag_set_names = OrderedSet() out_filename = get_info_dir("tags_names.csv") csvfile, writer = fileop.create_csv_writer( out_filename, ["directory", "name_main", "tags"]) for (dirpath, dirnames, filenames) in os.walk(inpath): if is_invalid_path(dirpath): continue filenameAccessors = [ FilenameAccessor(filename) for filename in filterFiles(filenames, image_types) ] if len(filenameAccessors) == 0: continue tags = [] found = False for part in dirpath.split(os.sep): if found: tags += part.split(', ') else: found = part == start_after_dir filenameAccessorLast = filenameAccessors[0] tag_set_names.add( (", ".join(tags), filenameAccessorLast.pre, ', '.join(OrderedSet(tags + [filenameAccessorLast.pre])))) for filenameAccessor in filenameAccessors[1:]: if not filenameAccessor.pre == filenameAccessorLast.pre: tag_set_names.add( (", ".join(tags), filenameAccessor.pre, ', '.join(OrderedSet(tags + [filenameAccessor.pre])))) filenameAccessorLast = filenameAccessor writer.writerows(tag_set_names) csvfile.close()
def order_with_timetable(timefile: str = None): """ use timetable to create folder structure :param timefile: timetable file :return: """ if not timefile: timefile = get_info_dir("timetable.txt") log_function_call(order_with_timetable.__name__, timefile) dirNameDict_firsttime, dirNameDict_lasttime = _read_timetable(timefile) Tagdict = read_exiftags() leng = len(list(Tagdict.values())[0]) log().info('Number of jpg: %d', leng) for i in range(leng): model = create_model(Tagdict, i) time = giveDatetime(model.get_date()) dirName = find_dir_with_closest_time(dirNameDict_firsttime, dirNameDict_lasttime, time) if dirName: move(model.filename, model.dir, os.path.join(os.getcwd(), dirName))
def find_bad_exif(do_move=True, check_date_additional=False, folder: str = r""): """ find files with missing exif data """ log_function_call(find_bad_exif.__name__, do_move) clock = Clock() inpath = os.getcwd() lines_no_tags = OrderedSet() lines_bad_date_additional = OrderedSet() lines_date_missing = OrderedSet() out_filename_no_tags = get_info_dir("no_tags.csv") file_no_tags, writer_no_tags = fileop.create_csv_writer( out_filename_no_tags, ["directory", "name_part"]) out_filename_bad_date_additional = get_info_dir("bad_date_additional.csv") file_bad_date_additional, writer_bad_date_additional = fileop.create_csv_writer( out_filename_bad_date_additional, ["directory", "name_part"]) out_filename_date_missing = get_info_dir("date_missing.csv") file_date_missing, writer_date_missing = fileop.create_csv_writer( out_filename_date_missing, ["directory", "name_part"]) for (dirpath, dirnames, filenames) in os.walk(inpath): if is_invalid_path(dirpath, regex=folder): continue if fileop.count_files(filenames, settings.image_types) == 0: continue Tagdict = read_exiftags(dirpath, settings.image_types, ask=False) if len(list(Tagdict.values())) == 0: continue leng = len(list(Tagdict.values())[0]) for i in range(leng): if (not "Keywords" in Tagdict or not Tagdict["Keywords"][i]) or \ (not "Subject" in Tagdict or not Tagdict["Subject"][i]) or \ (not "Description" in Tagdict or not Tagdict["Description"][i]) or \ (not "User Comment" in Tagdict or not Tagdict["User Comment"][i]): lines_no_tags.add((os.path.basename(dirpath), _remove_counter(Tagdict["File Name"][i]))) if do_move and not "bad_exif" in dirpath: move( Tagdict["File Name"][i], dirpath, dirpath.replace( inpath, os.path.join(inpath, "bad_exif_keywords"))) if not "Date/Time Original" in Tagdict or not Tagdict[ "Date/Time Original"][i]: lines_date_missing.add( (os.path.basename(dirpath), _remove_counter(Tagdict["File Name"][i]))) if do_move and not "bad_exif" in dirpath: move( Tagdict["File Name"][i], dirpath, dirpath.replace( inpath, os.path.join(inpath, "bad_exif_date_missing"))) if check_date_additional and \ (("Date Created" in Tagdict and Tagdict["Date Created"][i]) or ("Time Created" in Tagdict and Tagdict["Time Created"][i]) or ("Create Date" in Tagdict and Tagdict["Create Date"][i]) or ("Modify Date" in Tagdict and Tagdict["Modify Date"][i]) or ("Digital Creation Date" in Tagdict and Tagdict["Digital Creation Date"][i])): lines_bad_date_additional.add( (os.path.basename(dirpath), _remove_counter(Tagdict["File Name"][i]))) if do_move and not "bad_exif" in dirpath: move( Tagdict["File Name"][i], dirpath, dirpath.replace( inpath, os.path.join(inpath, "bad_exif_date_additional"))) writer_no_tags.writerows(lines_no_tags) writer_bad_date_additional.writerows(lines_bad_date_additional) writer_date_missing.writerows(lines_date_missing) file_no_tags.close() file_bad_date_additional.close() file_date_missing.close() clock.finish()