def image_time_stamper(args):
    path = Path(__file__)
    input_photo_directory = os.path.abspath(args.input_path)
    output_photo_directory = os.path.abspath(args.output_directory)
    mode = args.mode.lower()

    is_win_shell = True

    # Validate input paths
    if not os.path.isdir(input_photo_directory):
        if os.path.isdir(os.path.join(path.parent.resolve(), input_photo_directory)):
            input_photo_directory = os.path.join(path.parent.resolve(), input_photo_directory)
            if not os.path.isdir(output_photo_directory):
                output_photo_directory = os.path.join(path.parent.resolve(), output_photo_directory)
        else:
            input('No valid input folder is given!\nInput folder {0} or {1} does not exist!'.format(
                os.path.abspath(input_photo_directory),
                os.path.abspath(os.path.join(path.parent.resolve(), input_photo_directory))))
            input('Press any key to continue')
            quit()

    print('The following input folder will be used:\n{0}'.format(input_photo_directory))
    print('The following output folder will be used:\n{0}'.format(output_photo_directory))

    # Often the exiftool.exe will not be in Windows's PATH
    if args.executable_path == 'No path specified':
        if 'win' in sys.platform and not 'darwin' in sys.platform:
            if os.path.isfile(os.path.join(path.parent.resolve(), 'exiftool.exe')):
                exiftool.executable = os.path.join(path.parent.resolve(), 'exiftool.exe')
            else:
                input("""Executing this script on Windows requires either the "-e" option
                        or store the exiftool.exe file in the working directory.\n\nPress any key to quit...""")
                quit()
        else:
            is_win_shell = False

    else:
        exiftool.executable = args.executable_path

    # Validate input mode
    if mode not in ['manual', 'offset', 'inherit', 'reverse']:
        input("""Mode should be one of "manual", "offset", "inherit", "reverse".\n\nPress any key to quit...""")
        quit()

    # Create destination directory
    if not os.path.isdir(os.path.abspath(output_photo_directory)):
        os.mkdir(output_photo_directory)

    # Get files in directory
    list_of_files = get_files(input_photo_directory)
    print('{0} file(s) have been found in input directory'.format(len(list_of_files)))

    # Get metadata of each file in list_of_images
    print('Fetching metadata from all images....\n')
    with exiftool.ExifTool(win_shell=is_win_shell) as et:
        list_of_metadata = [{'IMAGE_NAME': image, 'METADATA': et.get_metadata(image)} for image in list_of_files]

    # Create dataframe from list_of_metadata with image name in column and metadata in other column
    df_images = pd.DataFrame(list_of_metadata)

    if mode == 'manual':
        start_time = args.start_time
        interval = int(args.interval)
        df_images = update_metadata_manual(df_images, start_time, interval)

    elif mode == 'offset':
        offset = int(args.offset)
        df_images = update_metadata_offset(df_images, offset)

    elif mode == 'inherit':
        df_images = update_metadata_inherit(df_images)

    else:
        df_images = update_metadata_reverse(df_images)

    # For each image, write the DateTimeOriginal into EXIF
    print('Writing metadata to EXIF of {} qualified images...\n'.format(len(df_images.index)))

    if mode != 'reverse':
        with exiftool.ExifTool(win_shell=is_win_shell) as et:
            for row in df_images.iterrows():
                et.execute(
                    bytes('-DateTimeOriginal={0}'.format(row[1]['ORIGINAL_DATETIME'].strftime("%Y:%m:%d %H:%M:%S")),
                          'utf-8'),
                    bytes('{0}'.format(row[1]['IMAGE_NAME']), 'utf-8'))

    else:
        with exiftool.ExifTool(win_shell=is_win_shell) as et:
            for row in df_images.iterrows():
                et.execute(
                    bytes('-GPSTimeStamp={}'.format(
                        row[1]['GPS_DATETIME'].strftime("%H:%M:%S")), 'utf-8'),
                    bytes('-GPSDateStamp={}'.format(row[1]['GPS_DATETIME'].strftime("%Y:%m:%d")), 'utf-8'),
                    bytes("{}".format(row[1]['IMAGE_NAME']), 'utf-8'))

    clean_up_new_files(output_photo_directory, [image for image in df_images['IMAGE_NAME'].values])

    input('\nMetadata successfully added to images.\n\nPress any key to quit')
    quit()
Beispiel #2
0
def make_sequence(args):
    '''
    You define the timelapse series of photos, desired photo spacing (by distance or capture time), and how they should be connected
    IF distance selected, the script calculates the distance between photos
    The script orders the photos in specified order (either capture time or distance)
    The script discards images that don't match the specified spacing condition
    The script calculates the distance, elevation change, time difference, and heading between remaining photos
    The script writes a JSON object into the remaining photos -Exif:ImageDescription tag with this information
    '''

    # Process import parameters
    print('\nInitializing input parameters...\n')

    #  'GPS_DATETIME' for sorting on 'time' or 'IMAGE_NAME' for sorting on 'filename'
    connection_type = args.connection_type.lower()
    CONNECTION_TYPE = 'GPS_DATETIME' if connection_type in [
        'timegps', 'timecapture'
    ] else 'IMAGE_NAME'
    DISCARD = True if args.discard == True else False

    MAX_FRAME_RATE, MIN_TIME_INTERVAL = handle_frame_rate(args.frame_rate)

    MIN_DISTANCE_INTERVAL = float(args.spatial_distance_min)
    MIN_ALTITUDE_INTERVAL = float(args.alt_diff_min)

    TIME_FILTERING = True if MAX_FRAME_RATE < 1000000 else False
    DISTANCE_FITLERING = True if MIN_DISTANCE_INTERVAL > 0 else False
    ALTITUDE_FITLERING = True if MIN_ALTITUDE_INTERVAL > 0 else False

    PATH = Path(__file__)
    INPUT_PHOTO_DIRECTORY = os.path.abspath(args.input_directory)
    OUTPUT_PHOTO_DIRECTORY = os.path.abspath(args.output_directory)

    if not os.path.isdir(os.path.abspath(INPUT_PHOTO_DIRECTORY)):
        if os.path.isdir(
                os.path.join(PATH.parent.resolve(), INPUT_PHOTO_DIRECTORY)):
            INPUT_PHOTO_DIRECTORY = os.path.join(PATH.parent.resolve(),
                                                 INPUT_PHOTO_DIRECTORY)
            if not os.path.isdir(os.path.abspath(OUTPUT_PHOTO_DIRECTORY)):
                OUTPUT_PHOTO_DIRECTORY = os.path.join(PATH.parent.resolve(),
                                                      OUTPUT_PHOTO_DIRECTORY)
        else:
            input('No valid input folder is given!\nInput folder {0} or {1} does not exist!'.format(
                os.path.abspath(INPUT_PHOTO_DIRECTORY), \
                os.path.abspath(os.path.join(PATH.parent.resolve(), INPUT_PHOTO_DIRECTORY))))
            input('Press any key to continue')
            quit()

    print('The following input folder will be used:\n{0}'.format(
        INPUT_PHOTO_DIRECTORY))
    print('The following output folder will be used:\n{0}'.format(
        OUTPUT_PHOTO_DIRECTORY))

    # Often the exiftool.exe will not be in Windows's PATH
    if args.executable_path == 'No path specified':
        if 'win' in sys.platform and not 'darwin' in sys.platform:
            if os.path.isfile(
                    os.path.join(PATH.parent.resolve(), 'exiftool.exe')):
                exiftool.executable = os.path.join(PATH.parent.resolve(),
                                                   'exiftool.exe')
            else:
                input(
                    """Executing this script on Windows requires either the "-e" option
                    or store the exiftool.exe file in the working directory.\n\nPress any key to quit..."""
                )
                quit()
        else:
            pass  # exiftool.executable  = 'exiftool', which if in OS PATH will be OK for mac and linux

    else:
        exiftool.executable = args.executable_path

    # Get files in directory
    list_of_files = get_files(INPUT_PHOTO_DIRECTORY, False)
    print('{0} file(s) have been found in input directory'.format(
        len(list_of_files)))

    # Get metadata of each file in list_of_images
    print('Fetching metadata from all images....\n')
    with exiftool.ExifTool() as et:
        list_of_metadata = [{
            'IMAGE_NAME': image,
            'METADATA': et.get_metadata(image)
        } for image in list_of_files]

    # Create dataframe from list_of_metadata with image name in column and metadata in other column
    df_images = pd.DataFrame(list_of_metadata)

    # Process images or files without metadata based on discard setting.
    print('Checking metadata tags of all images...')
    len_before_disc = len(df_images)
    # keys = ['Composite:GPSDateTime', 'Composite:GPSLatitude', 'Composite:GPSLongitude', 'Composite:GPSAltitude']
    keys = [
        'Composite:GPSLatitude', 'Composite:GPSLongitude',
        'Composite:GPSAltitude'
    ]
    values = ['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'GPS_DATETIME']

    if connection_type in ['timegps', 'filename']:
        keys.append('Composite:GPSDateTime')
    else:
        keys.append('EXIF:DateTimeOriginal')

    df_images[values] = df_images.apply(
        lambda x: parse_metadata(x, keys, DISCARD),
        axis=1,
        result_type='expand')

    # remove discarded images.
    df_images.dropna(axis=0, how='any', inplace=True)
    # Reset index in case an image is dropped due to DISCARD
    df_images.reset_index(inplace=True, drop=True)
    print('{0} images dropped. "DISCARD" is {1}.\n'.format(
        len_before_disc - len(df_images), DISCARD))

    if len(df_images) == 0:
        print(
            'All images were discarded. No images left to process. Exiting program.'
        )
        input('Press any key to quit')
        quit()
    elif len(df_images) == 1:
        print('Only one image to process. No possible links. Exiting program.')
        input('Press any key to quit')
        quit()

    # Convert datetime from string to datetime format
    df_images['GPS_DATETIME'] = df_images.apply(
        lambda x: datetime.datetime.strptime(x['GPS_DATETIME'],
                                             '%Y:%m:%d %H:%M:%SZ')
        if 'Z' in x['GPS_DATETIME'] else datetime.datetime.strptime(
            x['GPS_DATETIME'], '%Y:%m:%d %H:%M:%S'),
        axis=1)

    # Sort images
    df_images.sort_values(CONNECTION_TYPE,
                          axis=0,
                          ascending=True,
                          inplace=True)

    #########################
    # Work with the resulting image dataframe to filter & find the right sequence

    # Calculate the time difference, distance and altitude difference with the NEXT image
    print(
        'Calculating differences of time, distance and altitude between images...'
    )
    for conn_type in ['DELTA_TIME', 'DISTANCE', 'DELTA_ALT']:
        df_images = calculate_to_next(df_images, conn_type)

    # Filter images, drop rows where needed and
    # re-calculate the distance and altitude differences if rows are dropped
    print('Filtering images according to input parameters...')
    len_time = len(df_images)
    df_images = generic_connection(df_images, 'DELTA_TIME', MIN_TIME_INTERVAL)\
        if TIME_FILTERING else df_images
    len_dist = len(df_images)
    print(
        '{0} images discarded due to time spacing intervals'.format(len_time -
                                                                    len_dist))
    df_images = calculate_to_next(df_images,
                                  'DISTANCE') if TIME_FILTERING else df_images
    df_images = generic_connection(
        df_images, 'DISTANCE',
        MIN_DISTANCE_INTERVAL) if DISTANCE_FITLERING else df_images
    len_alt = len(df_images)
    print('{0} images discarded due to distance spacing intervals'.format(
        len_dist - len_alt))
    df_images = calculate_to_next(
        df_images, 'DELTA_ALT') if DISTANCE_FITLERING else df_images
    df_images = generic_connection(
        df_images, 'DELTA_ALT',
        MIN_ALTITUDE_INTERVAL) if ALTITUDE_FITLERING else df_images
    len_final = len(df_images)
    print('{0} images discarded due to altitude spacing intervals\n'.format(
        len_alt - len_final))

    print('\nFinal amount of images to process: {0}\n\n'.format(
        len(df_images)))
    if len(df_images) == 0:
        print(
            'All images were filtered out. No images left to process. Exiting program.'
        )
        input('Press any key to quit')
        quit()
    elif len(df_images) == 1:
        print(
            'Only one image left to process. No possible links. Exiting program.'
        )
        input('Press any key to quit')
        quit()

    # Finally, calculate all differences again to their NEXT image
    print(
        'Calculating final differences of time, distance and altitude between qualified images...'
    )
    for conn_type in ['DELTA_TIME', 'DISTANCE', 'DELTA_ALT']:
        df_images = calculate_to_next(df_images, conn_type)

    # Calculate Azimuth (heading) and Pitch
    print('Calculating heading between qualified images....')
    df_images['AZIMUTH'] = df_images.apply(
        lambda x: calculate_initial_compass_bearing(
            (x['LATITUDE'], x['LONGITUDE']),
            (x['LATITUDE_NEXT'], x['LONGITUDE_NEXT'])),
        axis=1)
    df_images.iat[
        -1,
        df_images.columns.get_loc('AZIMUTH')] = df_images['AZIMUTH'].iloc[-2]
    df_images['PITCH'] = (df_images['ALTITUDE_NEXT'] -
                          df_images['ALTITUDE']) / df_images['DISTANCE']

    # Add additional required data for output json.
    # All related to PREVIOUS image
    print('Setting related data of connected qualified images...')
    df_images['DISTANCE_TO_PREV'] = -1 * df_images['DISTANCE'].shift(
        1, fill_value=0)
    df_images['DELTA_TIME_TO_PREV'] = -1 * df_images['DELTA_TIME'].shift(1)
    df_images['DELTA_ALT_TO_PREV'] = -1 * df_images['DELTA_ALT'].shift(1)
    df_images['PITCH_TO_PREV'] = -1 * df_images['PITCH'].shift(1)
    df_images['AZIMUTH_TO_PREV'] = (df_images['AZIMUTH'].shift(1) + 180) % 360

    df_images.iat[0, df_images.columns.get_loc('DELTA_ALT_TO_PREV')] = 0
    df_images.iat[0, df_images.columns.get_loc('DISTANCE_TO_PREV')] = 0
    df_images.iat[0, df_images.columns.get_loc('DELTA_TIME_TO_PREV')] = 0
    df_images.iat[0, df_images.columns.get_loc('AZIMUTH_TO_PREV')] = 0
    df_images.iat[0, df_images.columns.get_loc('PITCH_TO_PREV')] = 0

    # Add names of the NEXT and PREVIOUS image for quicker reference
    df_images['IMAGE_NAME_NEXT'] = df_images['IMAGE_NAME'].shift(-1)
    df_images['IMAGE_NAME_PREV'] = df_images['IMAGE_NAME'].shift(1)

    # Assign UUID
    df_images['UUID'] = df_images.apply(lambda x: str(uuid.uuid1()), axis=1)
    df_images['UUID_NEXT'] = df_images['UUID'].shift(-1)
    df_images['UUID_PREV'] = df_images['UUID'].shift(1)

    # Create the global JSON structure
    # Main keys will be the image to which the subkeys will be added to

    print('\nGenerating JSON object...')
    sequence_uuid = uuid.uuid1()

    duration_sec = (df_images['GPS_DATETIME'].iloc[-1] -
                    df_images['GPS_DATETIME'].iloc[0]).total_seconds()
    total_distance = df_images['DISTANCE'].sum() / 1000
    report_json = {
        "sequence": {
            "id":
            str(sequence_uuid),
            "distance_km":
            total_distance,
            "earliest_time":
            df_images['GPS_DATETIME'].iloc[0].strftime('%Y:%m:%d %H:%M:%SZ'),
            "latest_time":
            df_images['GPS_DATETIME'].iloc[-1].strftime('%Y:%m:%d %H:%M:%SZ'),
            "duration_sec":
            duration_sec,
            "average_speed_kmh":
            total_distance * 3600 / duration_sec if duration_sec != 0 else 0,
            "uploader_sequence_name":
            None,  # not currently used
            "uploader_sequence_description":
            None,  # not currently used
            "uploader_transport_type":
            None  # not currently used
        },
        "photo": {}
    }

    def get_origin_value(df_row, available_keys):
        for key in available_keys:
            if df_row['METADATA'].get(key):
                return df_row['METADATA'].get(key)
        return ""

    descriptions = {}
    for index, k in df_images.iterrows():
        photo_dict = {
            "id":
            k['UUID'],
            "original_GPSDateTime":
            k['METADATA'].get('Composite:GPSDateTime'),
            "original_originalDateTime":
            k['METADATA'].get('EXIF:DateTimeOriginal'),
            "cli_connection_method":
            connection_type,
            "cli_frame_rate_set":
            MAX_FRAME_RATE,
            "cli_altitude_min_set":
            MIN_ALTITUDE_INTERVAL,
            "cli_distance_min_set":
            MIN_DISTANCE_INTERVAL,
            "original_filename":
            k['IMAGE_NAME'],
            "original_altitude":
            k['METADATA'].get('Composite:GPSAltitude'),
            "original_latitude":
            k['METADATA'].get('Composite:GPSLatitude'),
            "original_longitude":
            k['METADATA'].get('Composite:GPSLongitude'),
            "orignal_gps_direction_ref":
            k['METADATA'].get('EXIF:GPSImgDirectionRef', ""),
            "orignal_gps_speed":
            k['METADATA'].get('EXIF:GPSSpeed', ""),
            "original_heading":
            get_origin_value(
                k, ["XMP:PoseHeadingDegrees", "EXIF:GPSImgDirection"]),
            "original_pitch":
            get_origin_value(k, ["XMP:PosePitchDegrees", "EXIF:GPSPitch"]),
            "original_roll":
            get_origin_value(k, ["XMP:PosePoseRollDegrees", "EXIF:GPSRoll"]),
            "original_camera_make":
            k['METADATA'].get('EXIF:Make'),
            "original_camera_model":
            k['METADATA'].get('EXIF:Model'),
            "original_projection":
            k['METADATA'].get('XMP:ProjectionType'),
            "software_version":
            1.0,  # shows version of sequence maker used from version txt,
            "uploader_photo_from_video":
            None,  # not currently used,
            "uploader_nadir_added":
            None,  # not currently used,
            "uploader_blur_added":
            None,  # not currently used,
            "uploader_gps_track_added":
            None,  # not currently used,
            "uploader_gps_modified":
            None,  # not currently used,
            "uploader_tags":
            None,  # not currently used
            'connections': {
                k['UUID_NEXT']: {
                    'distance_mtrs':
                    k['DISTANCE'],
                    'elevation_mtrs':
                    k['DELTA_ALT'],
                    'heading_deg':
                    k['AZIMUTH'],
                    'pitch_deg':
                    k['PITCH'],
                    'time_sec':
                    k['DELTA_TIME'],
                    'speed_kmh': (k['DISTANCE'] * 3600) /
                    (k['DELTA_TIME'] * 1000) if k['DELTA_TIME'] != 0 else 0
                },
                k['UUID_PREV']: {
                    'distance_mtrs':
                    k['DISTANCE_TO_PREV'],
                    'elevation_mtrs':
                    k['DELTA_ALT_TO_PREV'],
                    'heading_deg':
                    k['AZIMUTH_TO_PREV'],
                    'adj_heading_deg':
                    abs(k['AZIMUTH'] - k['AZIMUTH_TO_PREV']),
                    'pitch_deg':
                    k['PITCH_TO_PREV'],
                    'time_sec':
                    k['DELTA_TIME_TO_PREV'],
                    'speed_kmh': (k['DISTANCE_TO_PREV'] * 3600) /
                    (k['DELTA_TIME_TO_PREV'] * 1000)
                    if k['DELTA_TIME_TO_PREV'] != 0 else 0
                }
            }
        }
        descriptions.update({
            k['UUID']: {
                "photo": photo_dict,
                "sequence": report_json["sequence"].copy(),
            }
        })
        report_json["photo"].update({index + 1: photo_dict.copy()})

    img_id_link = {
        k['UUID']: k['IMAGE_NAME']
        for index, k in df_images.iterrows()
    }

    # Remove the 'nan' links of the first image to its PREVIOUS, and
    # the NEXT image of the last image
    to_del = []
    for image in descriptions.keys():
        for connection in descriptions[image]['photo']['connections'].keys():
            if type(connection) == float:
                to_del.append([image, connection])

    for z, y in to_del:
        del descriptions[z]['photo']['connections'][y]

        # For each image, write the JSON into EXIF::ImageDescription
    print(
        'Writing metadata to EXIF::ImageDescription of qualified images...\n')
    with exiftool.ExifTool() as et:
        for image_uuid in descriptions.keys():
            et.execute(
                bytes(
                    '-ImageDescription={0}'.format(
                        json.dumps(descriptions[image_uuid])), 'utf-8'),
                bytes("{0}".format(img_id_link[image_uuid]), 'utf-8'))

    clean_up_new_files(OUTPUT_PHOTO_DIRECTORY,
                       [image for image in img_id_link.values()])

    print('Writing report json')
    with open("{}.json".format(sequence_uuid), "w") as outfile:
        json.dump(report_json, outfile)

    input('\nMetadata successfully added to images.\n\nPress any key to quit')
    quit()
def make_sequence(args):
    '''
    You define the timelapse series of photos, desired photo spacing (by distance or capture time), and how they should be connected
    IF distance selected, the script calculates the distance between photos
    The script orders the photos in specified order (either capture time or distance)
    The script discards images that don't match the specified spacing condition
    The script calculates the distance, elevation change, time difference, and heading between remaining photos
    The script writes a JSON object into the remaining photos -Exif:ImageDescription tag with this information
    '''

    #Process import parameters
    print('\nInitializing input parameters...\n')

    # 'GPS_DATETIME' for sorting on 'time' or 'IMAGE_NAME' for sorting on 'filename'
    CONNECTION_TYPE = 'GPS_DATETIME' if args.join_mode in [
        'time', 'Time', 't', 'T'
    ] else 'IMAGE_NAME'
    DISCARD = True if args.discard == True else False

    MAX_FRAME_RATE, MIN_TIME_INTERVAL = handle_frame_rate(args.frame_rate)

    MIN_DISTANCE_INTERVAL = float(args.spatial_distance_min)
    MIN_ALTITUDE_INTERVAL = float(args.alt_diff_min)

    TIME_FILTERING = True if MAX_FRAME_RATE < 1000000 else False
    DISTANCE_FITLERING = True if MIN_DISTANCE_INTERVAL > 0 else False
    ALTITUDE_FITLERING = True if MIN_ALTITUDE_INTERVAL > 0 else False

    PATH = Path(__file__)
    INPUT_PHOTO_DIRECTORY = os.path.abspath(args.input_directory)
    OUTPUT_PHOTO_DIRECTORY = os.path.abspath(args.output_directory)

    if not os.path.isdir(os.path.abspath(INPUT_PHOTO_DIRECTORY)):
        if os.path.isdir(
                os.path.join(PATH.parent.resolve(), INPUT_PHOTO_DIRECTORY)):
            INPUT_PHOTO_DIRECTORY = os.path.join(PATH.parent.resolve(),
                                                 INPUT_PHOTO_DIRECTORY)
            if not os.path.isdir(os.path.abspath(OUTPUT_PHOTO_DIRECTORY)):
                OUTPUT_PHOTO_DIRECTORY = os.path.join(PATH.parent.resolve(),
                                                      OUTPUT_PHOTO_DIRECTORY)
        else:
            input('No valid input folder is given!\nInput folder {0} or {1} does not exist!'.format(os.path.abspath(INPUT_PHOTO_DIRECTORY), \
                os.path.abspath(os.path.join(PATH.parent.resolve(), INPUT_PHOTO_DIRECTORY))))
            input('Press any key to continue')
            quit()

    print('The following input folder will be used:\n{0}'.format(
        INPUT_PHOTO_DIRECTORY))
    print('The following output folder will be used:\n{0}'.format(
        OUTPUT_PHOTO_DIRECTORY))

    #Often the exiftool.exe will not be in Windows's PATH
    if args.executable_path == 'No path specified':
        if 'win' in sys.platform and not 'darwin' in sys.platform:
            if os.path.isfile(
                    os.path.join(PATH.parent.resolve(), 'exiftool.exe')):
                exiftool.executable = os.path.join(PATH.parent.resolve(),
                                                   'exiftool.exe')
            else:
                input(
                    """Executing this script on Windows requires either the "-e" option
                    or store the exiftool.exe file in the working directory.\n\nPress any key to quit..."""
                )
                quit()
        else:
            pass  #exiftool.executable  = 'exiftool', which if in OS PATH will be OK for mac and linux

    else:
        exiftool.executable = args.executable_path

    #Get files in directory
    list_of_files = get_files(INPUT_PHOTO_DIRECTORY, False)
    print('{0} file(s) have been found in input directory'.format(
        len(list_of_files)))

    #Get metadata of each file in list_of_images
    print('Fetching metadata from all images....\n')
    with exiftool.ExifTool() as et:
        list_of_metadata = [{
            'IMAGE_NAME': image,
            'METADATA': et.get_metadata(image)
        } for image in list_of_files]

    #Create dataframe from list_of_metadata with image name in column and metadata in other column
    df_images = pd.DataFrame(list_of_metadata)

    #Process images or files without metadata based on discard setting.
    print('Checking metadata tags of all images...')
    len_before_disc = len(df_images)
    keys = [
        'Composite:GPSDateTime', 'Composite:GPSLatitude',
        'Composite:GPSLongitude', 'Composite:GPSAltitude'
    ]
    df_images[['GPS_DATETIME', 'LATITUDE', 'LONGITUDE', 'ALTITUDE'
               ]] = df_images.apply(lambda x: parse_metadata(x, keys, DISCARD),
                                    axis=1,
                                    result_type='expand')

    #remove discarded images.
    df_images.dropna(axis=0, how='any', inplace=True)
    #Reset index in case an image is dropped due to DISCARD
    df_images.reset_index(inplace=True, drop=True)
    print('{0} images dropped. "DISCARD" is {1}.\n'.format(
        len_before_disc - len(df_images), DISCARD))

    if len(df_images) == 0:
        print(
            'All images were discarded. No images left to process. Exiting program.'
        )
        input('Press any key to quit')
        quit()
    elif len(df_images) == 1:
        print('Only one image to process. No possible links. Exiting program.')
        input('Press any key to quit')
        quit()

    #Convert datetime from string to datetime format
    df_images['GPS_DATETIME'] = df_images.apply(
        lambda x: datetime.datetime.strptime(x['GPS_DATETIME'],
                                             '%Y:%m:%d %H:%M:%SZ'),
        axis=1)

    #Sort images
    df_images.sort_values(CONNECTION_TYPE,
                          axis=0,
                          ascending=True,
                          inplace=True)

    #########################
    #Work with the resulting image dataframe to filter & find the right sequence

    #Calculate the time difference, distance and altitude difference with the NEXT image
    print(
        'Calculating differences of time, distance and altitude between images...'
    )
    for conn_type in ['DELTA_TIME', 'DISTANCE', 'DELTA_ALT']:
        df_images = calculate_to_next(df_images, conn_type)

    #Filter images, drop rows where needed and
    #re-calculate the distance and altitude differences if rows are dropped
    print('Filtering images according to input parameters...')
    len_time = len(df_images)
    df_images = generic_connection(
        df_images, 'DELTA_TIME',
        MIN_TIME_INTERVAL) if TIME_FILTERING else df_images
    len_dist = len(df_images)
    print(
        '{0} images discarded due to time spacing intervals'.format(len_time -
                                                                    len_dist))
    df_images = calculate_to_next(df_images,
                                  'DISTANCE') if TIME_FILTERING else df_images
    df_images = generic_connection(
        df_images, 'DISTANCE',
        MIN_DISTANCE_INTERVAL) if DISTANCE_FITLERING else df_images
    len_alt = len(df_images)
    print('{0} images discarded due to distance spacing intervals'.format(
        len_dist - len_alt))
    df_images = calculate_to_next(
        df_images, 'DELTA_ALT') if DISTANCE_FITLERING else df_images
    df_images = generic_connection(
        df_images, 'DELTA_ALT',
        MIN_ALTITUDE_INTERVAL) if ALTITUDE_FITLERING else df_images
    len_final = len(df_images)
    print('{0} images discarded due to altitude spacing intervals\n'.format(
        len_alt - len_final))

    print('\nFinal amount of images to process: {0}\n\n'.format(
        len(df_images)))
    if len(df_images) == 0:
        print(
            'All images were filtered out. No images left to process. Exiting program.'
        )
        input('Press any key to quit')
        quit()
    elif len(df_images) == 1:
        print(
            'Only one image left to process. No possible links. Exiting program.'
        )
        input('Press any key to quit')
        quit()

    #Finally, calculate all differences again to their NEXT image
    print(
        'Calculating final differences of time, distance and altitude between qualified images...'
    )
    for conn_type in ['DELTA_TIME', 'DISTANCE', 'DELTA_ALT']:
        df_images = calculate_to_next(df_images, conn_type)

    #Calculate Azimuth (heading) and Pitch
    print('Calculating heading between qualified images....')
    df_images['AZIMUTH'] = df_images.apply(
        lambda x: calculate_initial_compass_bearing(
            (x['LATITUDE'], x['LONGITUDE']),
            (x['LATITUDE_NEXT'], x['LONGITUDE_NEXT'])),
        axis=1)
    df_images.iat[
        -1,
        df_images.columns.get_loc('AZIMUTH')] = df_images['AZIMUTH'].iloc[-2]
    df_images['PITCH'] = (df_images['ALTITUDE_NEXT'] -
                          df_images['ALTITUDE']) / df_images['DISTANCE']

    #Add additional required data for output json.
    #All related to PREVIOUS image
    print('Setting related data of connected qualified images...')
    df_images['DISTANCE_TO_PREV'] = -1 * df_images['DISTANCE'].shift(1)
    df_images['DELTA_TIME_TO_PREV'] = -1 * df_images['DELTA_TIME'].shift(1)
    df_images['DELTA_ALT_TO_PREV'] = -1 * df_images['DELTA_ALT'].shift(1)
    df_images['PITCH_TO_PREV'] = -1 * df_images['PITCH'].shift(1)
    df_images['AZIMUTH_TO_PREV'] = (df_images['AZIMUTH'].shift(1) + 180) % 360

    df_images.iat[0, df_images.columns.get_loc('DELTA_ALT_TO_PREV')] = 0
    df_images.iat[0, df_images.columns.get_loc('DISTANCE_TO_PREV')] = 0
    df_images.iat[0, df_images.columns.get_loc('DELTA_TIME_TO_PREV')] = 0
    df_images.iat[0, df_images.columns.get_loc('AZIMUTH_TO_PREV')] = 0
    df_images.iat[0, df_images.columns.get_loc('PITCH_TO_PREV')] = 0

    #Add names of the NEXT and PREVIOUS image for quicker reference
    df_images['IMAGE_NAME_NEXT'] = df_images['IMAGE_NAME'].shift(-1)
    df_images['IMAGE_NAME_PREV'] = df_images['IMAGE_NAME'].shift(1)

    #Assign UUID
    df_images['UUID'] = df_images.apply(lambda x: str(uuid.uuid1()), axis=1)
    df_images['UUID_NEXT'] = df_images['UUID'].shift(-1)
    df_images['UUID_PREV'] = df_images['UUID'].shift(1)

    #Create the global JSON structure
    #Main keys will be the image to which the subkeys will be added to
    print('\nGenerating JSON object...')
    descriptions = {
        k['UUID']: {
            'connections': {
                k['UUID_NEXT']: {
                    'distance_mtrs': k['DISTANCE'],
                    'elevation_mtrs': k['DELTA_ALT'],
                    'heading_deg': k['AZIMUTH'],
                    'pitch': k['PITCH'],
                    'time_sec': k['DELTA_TIME']
                },
                k['UUID_PREV']: {
                    'distance_mtrs': k['DISTANCE_TO_PREV'],
                    'elevation_mtrs': k['DELTA_ALT_TO_PREV'],
                    'heading_deg': k['AZIMUTH_TO_PREV'],
                    'pitch': k['PITCH_TO_PREV'],
                    'time_sec': k['DELTA_TIME_TO_PREV']
                }
            },
            'id':
            k['UUID'],
            'create_date':
            datetime.datetime.strftime(datetime.datetime.now(),
                                       '%Y-%m-%d:%H:%M:%S'),
            'software':
            'sequence-maker'
        }
        for index, k in df_images.iterrows()
    }

    img_id_link = {
        k['UUID']: k['IMAGE_NAME']
        for index, k in df_images.iterrows()
    }

    #Remove the 'nan' links of the first image to its PREVIOUS, and
    #the NEXT image of the last image
    to_del = []
    for image in descriptions.keys():
        for connection in descriptions[image]['connections'].keys():
            if type(connection) == float:
                to_del.append([image, connection])

    for z, y in to_del:
        del descriptions[z]['connections'][y]

    #For each image, write the JSON into EXIF::ImageDescription
    print(
        'Writing metadata to EXIF::ImageDescription of qualified images...\n')
    with exiftool.ExifTool() as et:
        for image_uuid in descriptions.keys():
            et.execute(
                bytes(
                    '-ImageDescription={0}'.format(
                        json.dumps(descriptions[image_uuid])), 'utf-8'),
                bytes("{0}".format(img_id_link[image_uuid]), 'utf-8'))

    clean_up_new_files(OUTPUT_PHOTO_DIRECTORY,
                       [image for image in img_id_link.values()])

    input('\nMetadata successfully added to images.\n\nPress any key to quit')
    quit()
Beispiel #4
0
def add_azimuth_pitch(args):
    '''
    Main function.
    A function that uses Exiftool add a calculated azimuth and pitch in relation to its next sequential images
    '''
    #Process import parameters
    print('\nInitializing input parameters...\n')

    CONNECTION_TYPE = 'GPS_DATETIME' if args.connection_type in [
        'time', 'Time', 't', 'T'
    ] else 'IMAGE_NAME'  # 'GPS_DATETIME' for sorting on 'time' or 'IMAGE_NAME' for sorting on 'filename'
    CONNECTION_ORDER = True if args.connection_order in [
        'ascending', 'Ascending', 'a', 'A'
    ] else False  # True for sorting 'ascending' or False for sorting'decending'
    DISCARD = True if args.discard == True else False

    PATH = Path(__file__)
    INPUT_PHOTO_DIRECTORY = os.path.abspath(args.input_directory)
    OUTPUT_PHOTO_DIRECTORY = os.path.abspath(args.output_directory)

    if not os.path.isdir(os.path.abspath(INPUT_PHOTO_DIRECTORY)):
        if os.path.isdir(
                os.path.join(PATH.parent.resolve(), INPUT_PHOTO_DIRECTORY)):
            INPUT_PHOTO_DIRECTORY = os.path.join(PATH.parent.resolve(),
                                                 INPUT_PHOTO_DIRECTORY)
            if not os.path.isdir(os.path.abspath(OUTPUT_PHOTO_DIRECTORY)):
                OUTPUT_PHOTO_DIRECTORY = os.path.join(PATH.parent.resolve(),
                                                      OUTPUT_PHOTO_DIRECTORY)
        else:
            input('No valid input folder is given!\nInput folder {0} or {1} does not exist!'.format(os.path.abspath(INPUT_PHOTO_DIRECTORY), \
                os.path.abspath(os.path.join(PATH.parent.resolve(), INPUT_PHOTO_DIRECTORY))))
            input('Press any key to continue')
            quit()

    print('The following input folder will be used:\n{0}'.format(
        INPUT_PHOTO_DIRECTORY))
    print('The following output folder will be used:\n{0}'.format(
        OUTPUT_PHOTO_DIRECTORY))

    #Often the exiftool.exe will not be in Windows's PATH
    if args.executable_path == 'No path specified':
        if 'win' in sys.platform and not 'darwin' in sys.platform:
            if os.path.isfile(
                    os.path.join(PATH.parent.resolve(), 'exiftool.exe')):
                exiftool.executable = os.path.join(PATH.parent.resolve(),
                                                   'exiftool.exe')
            else:
                input(
                    """Executing this script on Windows requires either the "-e" option
                    or store the exiftool.exe file in the working directory.\n\nPress any key to quit..."""
                )
                quit()
        else:
            pass  #exiftool.executable  = 'exiftool', which if in OS PATH will be OK for mac and linux

    else:
        exiftool.executable = args.executable_path

    #Get files in directory
    list_of_files = get_files(INPUT_PHOTO_DIRECTORY, False)
    print('{0} file(s) have been found in input directory'.format(
        len(list_of_files)))

    #Get metadata of each file in list_of_images
    print('Fetching metadata from all images....\n')
    with exiftool.ExifTool() as et:
        list_of_metadata = [{
            'IMAGE_NAME': image,
            'METADATA': et.get_metadata(image)
        } for image in list_of_files]

    #Create dataframe from list_of_metadata with image name in column and metadata in other column
    df_images = pd.DataFrame(list_of_metadata)

    #Process images or files without metadata based on discard setting.
    print('Checking metadata tags of all images...')
    len_before_disc = len(df_images)
    keys = [
        'Composite:GPSDateTime', 'Composite:GPSLatitude',
        'Composite:GPSLongitude', 'Composite:GPSAltitude'
    ]
    df_images[['GPS_DATETIME', 'LATITUDE', 'LONGITUDE', 'ALTITUDE'
               ]] = df_images.apply(lambda x: parse_metadata(x, keys, DISCARD),
                                    axis=1,
                                    result_type='expand')

    #remove discarded images.
    df_images.dropna(axis=0, how='any', inplace=True)
    print('{0} images dropped. "DISCARD" is {1}.\n'.format(
        len_before_disc - len(df_images), DISCARD))

    if len(df_images) == 0:
        print(
            'All images were discarded. No images left to process. Exiting program.'
        )
        input('Press any key to continue')
        quit()
    elif len(df_images) == 1:
        print('Only one image to process. No possible links. Exiting program.')
        input('Press any key to quit')
        quit()

    #Sort images
    df_images.sort_values(CONNECTION_TYPE,
                          axis=0,
                          ascending=CONNECTION_ORDER,
                          inplace=True)

    #Create new column with next value
    print(
        'Calculating differences of time, distance, azimuth and pitch between images...'
    )
    df_images['GPS_DATETIME_NEXT'] = df_images['GPS_DATETIME'].shift(-1)
    df_images['LATITUDE_NEXT'] = df_images['LATITUDE'].shift(-1)
    df_images['LONGITUDE_NEXT'] = df_images['LONGITUDE'].shift(-1)
    df_images['ALTITUDE_NEXT'] = df_images['ALTITUDE'].shift(-1)

    #Calculate the Azimuth, Distance and resulting Pitch
    df_images['AZIMUTH'] = df_images.apply(
        lambda x: calculate_initial_compass_bearing(
            (x['LATITUDE'], x['LONGITUDE']),
            (x['LATITUDE_NEXT'], x['LONGITUDE_NEXT'])),
        axis=1)
    df_images['DISTANCE'] = df_images.apply(
        lambda x: haversine(x['LONGITUDE'], x['LATITUDE'], x['LONGITUDE_NEXT'],
                            x['LATITUDE_NEXT']),
        axis=1)
    df_images['PITCH'] = (df_images['ALTITUDE_NEXT'] -
                          df_images['ALTITUDE']) / df_images['DISTANCE']

    #Last picture gets value from previous picture for Azimuth, Distance & Pitch
    df_images.iat[
        -1,
        df_images.columns.get_loc('AZIMUTH')] = df_images['AZIMUTH'].iloc[-2]
    df_images.iat[
        -1,
        df_images.columns.get_loc('DISTANCE')] = df_images['DISTANCE'].iloc[-2]
    df_images.iat[
        -1, df_images.columns.get_loc('PITCH')] = df_images['PITCH'].iloc[-2]

    #Edit the metadata of the images
    print('Writing metadata to EXIF & XMP tags of qualified images...\n')
    with exiftool.ExifTool() as et:
        for index, row in df_images.iterrows():
            et.execute(bytes('-GPSPitch={0}'.format(row['PITCH']), 'utf-8'),
                       bytes("{0}".format(row['IMAGE_NAME']), 'utf-8'))
            et.execute(
                bytes('-PoseHeadingDegrees={0}'.format(row['AZIMUTH']),
                      'utf-8'), bytes("{0}".format(row['IMAGE_NAME']),
                                      'utf-8'))
            et.execute(
                bytes('-GPSImgDirection={0}'.format(row['AZIMUTH']), 'utf-8'),
                bytes("{0}".format(row['IMAGE_NAME']), 'utf-8'))
            et.execute(
                bytes('-CameraElevationAngle={0}'.format(row['PITCH']),
                      'utf-8'), bytes("{0}".format(row['IMAGE_NAME']),
                                      'utf-8'))
            et.execute(
                bytes('-PosePitchDegrees={0}'.format(row['PITCH']), 'utf-8'),
                bytes("{0}".format(row['IMAGE_NAME']), 'utf-8'))

    clean_up_new_files(OUTPUT_PHOTO_DIRECTORY,
                       [x for x in df_images['IMAGE_NAME']])

    input('\nMetadata successfully added to images.\n\nPress any key to quit')
    quit()
def geo_tagger(args):
    path = Path(__file__)
    input_photo_directory = os.path.abspath(args.input_path)
    log_path = os.path.abspath(args.track_log) if args.track_log else None
    output_photo_directory = os.path.abspath(args.output_directory)
    mode = args.mode.lower()
    discard = int(args.discard)
    normalise = int(args.normalise)

    is_win_shell = True

    # Validate input paths
    if not os.path.isdir(input_photo_directory):
        if os.path.isdir(os.path.join(path.parent.resolve(), input_photo_directory)):
            input_photo_directory = os.path.join(path.parent.resolve(), input_photo_directory)
            if not os.path.isdir(output_photo_directory):
                output_photo_directory = os.path.join(path.parent.resolve(), output_photo_directory)
        else:
            input('No valid input folder is given!\nInput folder {0} or {1} does not exist!'.format(
                os.path.abspath(input_photo_directory),
                os.path.abspath(os.path.join(path.parent.resolve(), input_photo_directory))))
            input('Press any key to continue')
            quit()

    print('The following input folder will be used:\n{0}'.format(input_photo_directory))
    print('The following output folder will be used:\n{0}'.format(output_photo_directory))

    # Often the exiftool.exe will not be in Windows's PATH
    if args.executable_path == 'No path specified':
        if 'win' in sys.platform and not 'darwin' in sys.platform:
            if os.path.isfile(os.path.join(path.parent.resolve(), 'exiftool.exe')):
                exiftool.executable = os.path.join(path.parent.resolve(), 'exiftool.exe')
            else:
                input("""Executing this script on Windows requires either the "-e" option
                        or store the exiftool.exe file in the working directory.\n\nPress any key to quit...""")
                quit()
        else:
            is_win_shell = False

    else:
        exiftool.executable = args.executable_path

    # Get files in directory
    list_of_files = get_files(input_photo_directory)
    print('{0} file(s) have been found in input directory'.format(len(list_of_files)))

    # Get metadata of each file in list_of_images
    print('Fetching metadata from all images....\n')
    with exiftool.ExifTool(win_shell=is_win_shell) as et:
        list_of_metadata = [{'IMAGE_NAME': image, 'METADATA': et.get_metadata(image)} for image in list_of_files]

    # filter the images based on mode setting.
    if mode == 'missing':
        keys = ['Composite:GPSDateTime', 'Composite:GPSLatitude', 'Composite:GPSLongitude', 'Composite:GPSAltitude',
                'EXIF:GPSDateStamp', 'EXIF:GPSTimeStamp']
        list_of_metadata = [metadata for metadata in list_of_metadata if filter_metadata(metadata, keys)]

        if len(list_of_metadata) == 0:
            input("""There isn't any missing tag file for geotagging.\n\nPress any key to quit...""")
            quit()

    # Create dataframe from list_of_metadata with image name in column and metadata in other column
    df_images = pd.DataFrame(list_of_metadata)
    keys = ['EXIF:DateTimeOriginal']
    df_images[['ORIGINAL_DATETIME']] = df_images.apply(
        lambda x: parse_metadata(x, keys), axis=1, result_type='expand')

    # Sort images
    df_images.sort_values('ORIGINAL_DATETIME', axis=0, ascending=True, inplace=True)
    df_images = df_images.reset_index(drop=True)

    track_logs = {}
    if log_path:
        # Work with the resulting image dataframe to filter by time discard or normalise
        track_logs = load_gps_track_log(log_path)

    if not track_logs:
        print("""Track Logs are empty. So using geo values from image.""")

    df_images[['GPS_DATETIME', 'LATITUDE', 'LONGITUDE', 'ALTITUDE']] = \
        df_images.apply(lambda x: get_geo_data_from_log(x, track_logs), axis=1, result_type='expand')

    df_images = df_images.query('LATITUDE.notnull() or LONGITUDE.notnull()', engine='python')

    if not track_logs and len(df_images.index) == 0:
        input("""Latitude and longitude of all images are empty.\n\nPress any key to quit...""")
        quit()

    if discard > 0:
        df_images = discard_track_logs(df_images, discard)
        if len(df_images) == 0:
            input("""All images has been discarded.\n\nPress any key to quit...""")
            quit()

    elif normalise > 0:
        df_images = normalise_track_logs(df_images, normalise)

    # For each image, write the GEO TAGS into EXIF
    print('Writing metadata to EXIF of qualified images...\n')
    with exiftool.ExifTool(win_shell=is_win_shell) as et:
        for row in df_images.iterrows():
            if row[1]['GPS_DATETIME']:
                et.execute(bytes('-GPSTimeStamp={0}'.format(row[1]['GPS_DATETIME'].strftime("%H:%M:%S")), 'utf-8'),
                           bytes("{0}".format(row[1]['IMAGE_NAME']), 'utf-8'))
                et.execute(bytes('-GPSDateStamp={0}'.format(row[1]['GPS_DATETIME'].strftime("%Y:%m:%d")), 'utf-8'),
                           bytes("{0}".format(row[1]['IMAGE_NAME']), 'utf-8'))

            et.execute(bytes('-GPSLatitude={0}'.format(row[1]['LATITUDE']), 'utf-8'),
                       bytes("{0}".format(row[1]['IMAGE_NAME']), 'utf-8'))

            latitude_ref = 'N' if row[1]['LATITUDE'] > 0 else 'S'
            et.execute(bytes('-GPSLatitudeRef={0}'.format(latitude_ref), 'utf-8'),
                       bytes("{0}".format(row[1]['IMAGE_NAME']), 'utf-8'))

            et.execute(bytes('-GPSLongitude={0}'.format(row[1]['LONGITUDE']), 'utf-8'),
                       bytes("{0}".format(row[1]['IMAGE_NAME']), 'utf-8'))

            longitude_ref = 'E' if row[1]['LONGITUDE'] > 0 else 'W'
            et.execute(bytes('-GPSLongitudeRef={0}'.format(longitude_ref), 'utf-8'),
                       bytes("{0}".format(row[1]['IMAGE_NAME']), 'utf-8'))

            if row[1]['ALTITUDE']:
                altitude_ref = '0' if row[1]['ALTITUDE'] > 0 else '1'
                et.execute(bytes('-GPSAltitude={0}'.format(row[1]['ALTITUDE']), 'utf-8'),
                           bytes("{0}".format(row[1]['IMAGE_NAME']), 'utf-8'))
                et.execute(bytes('-GPSAltitudeRef={0}'.format(altitude_ref), 'utf-8'),
                           bytes("{0}".format(row[1]['IMAGE_NAME']), 'utf-8'))

    clean_up_new_files(output_photo_directory, [image for image in df_images['IMAGE_NAME'].values])

    input('\nMetadata successfully added to images.\n\nPress any key to quit')
    quit()