def sample_video(video_file, import_path, video_sample_interval=2.0, video_start_time=None, video_duration_ratio=1.0, verbose=False): if import_path and not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) # command specific checks video_file = os.path.abspath(video_file) if ( os.path.isfile(video_file) or os.path.isdir(video_file)) else None if not video_file: print("Error, video path " + video_file + " does not exist, exiting...") sys.exit(1) # set sampling path video_sampling_path = processing.sampled_video_frames_rootpath(video_file) import_path = os.path.join(os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.dirname(video_file), video_sampling_path) print("Video sampling path set to {}".format(import_path)) # check video logs video_upload = processing.video_upload(video_file, import_path, verbose) if video_upload: return if os.path.isdir(video_file): video_list = uploader.get_video_file_list(video_file) for video in video_list: extract_frames(video, import_path, video_sample_interval, video_start_time, video_duration_ratio, verbose) else: # single video file extract_frames(video_file, import_path, video_sample_interval, video_start_time, video_duration_ratio, verbose) processing.create_and_log_video_process(video_file, import_path)
def process_upload_params(import_path, user_name, master_upload=False, verbose=False, rerun=False, skip_subfolders=False, video_file=None): # sanity check if video file is passed if video_file and not (os.path.isdir(video_file) or os.path.isfile(video_file)): print("Error, video path " + video_file + " does not exist, exiting...") sys.exit(1) # in case of video processing, adjust the import path if video_file: # set sampling path video_sampling_path = processing.sampled_video_frames_rootpath( video_file) import_path = os.path.join( os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.dirname(video_file), video_sampling_path) # basic check for all if not import_path or not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) # get list of file to process process_file_list = processing.get_process_file_list( import_path, "upload_params_process", rerun, verbose, skip_subfolders) if not len(process_file_list): print("No images to run upload params process") print( "If the images have already been processed and not yet uploaded, they can be processed again, by passing the argument --rerun" ) # sanity checks if not user_name: print("Error, must provide a valid user name, exiting...") processing.create_and_log_process_in_list( process_file_list, "upload_params_process" "failed", verbose) return if not master_upload: try: credentials = uploader.authenticate_user(user_name) except: print("Error, user authentication failed for user " + user_name) processing.create_and_log_process_in_list( process_file_list, "upload_params_process" "failed", verbose) return if credentials == None or "user_upload_token" not in credentials or "user_permission_hash" not in credentials or "user_signature_hash" not in credentials: print("Error, user authentication failed for user " + user_name) processing.create_and_log_process_in_list( process_file_list, "upload_params_process" "failed", verbose) return user_upload_token = credentials["user_upload_token"] user_permission_hash = credentials["user_permission_hash"] user_signature_hash = credentials["user_signature_hash"] user_key = credentials["MAPSettingsUserKey"] for image in process_file_list: # check the status of the sequence processing log_root = uploader.log_rootpath(image) duplicate_flag_path = os.path.join(log_root, "duplicate") upload_params_path = os.path.join(log_root, "upload_params_process.json") if os.path.isfile(upload_params_path): os.remove(upload_params_path) if os.path.isfile(duplicate_flag_path) or master_upload: continue upload_params_properties = processing.get_upload_param_properties( log_root, image, user_name, user_upload_token, user_permission_hash, user_signature_hash, user_key, verbose) processing.create_and_log_process(image, "upload_params_process", "success", upload_params_properties, verbose=verbose) # flag manual upload log_manual_upload = os.path.join(log_root, "manual_upload") open(log_manual_upload, 'a').close()
def process_import_meta_properties(import_path, orientation=None, device_make=None, device_model=None, GPS_accuracy=None, add_file_name=False, add_import_date=False, verbose=False, rerun=False, skip_subfolders=False, video_file=None, custom_meta_data=None, camera_uuid=None): # sanity check if video file is passed if video_file and not (os.path.isdir(video_file) or os.path.isfile(video_file)): print("Error, video path " + video_file + " does not exist, exiting...") sys.exit(1) # in case of video processing, adjust the import path if video_file: # set sampling path video_sampling_path = processing.sampled_video_frames_rootpath( video_file) import_path = os.path.join( os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.dirname(video_file), video_sampling_path) # basic check for all if not import_path or not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) # get list of file to process process_file_list = processing.get_process_file_list( import_path, "import_meta_data_process", rerun, verbose, skip_subfolders) if not len(process_file_list): print("No images to run import meta data process") print( "If the images have already been processed and not yet uploaded, they can be processed again, by passing the argument --rerun" ) # map orientation from degrees to tags if orientation: orientation = processing.format_orientation(orientation) # read import meta from image EXIF and finalize the import # properties process progress_count = 0 for image in process_file_list: progress_count += 1 if verbose: if (progress_count % 50) == 0: sys.stdout.write(".") if (progress_count % 5000) == 0: print("") import_meta_data_properties = get_import_meta_properties_exif( image, verbose) finalize_import_properties_process(image, import_path, orientation, device_make, device_model, GPS_accuracy, add_file_name, add_import_date, verbose, import_meta_data_properties, custom_meta_data, camera_uuid) print("Sub process finished")
def insert_MAPJson(import_path, master_upload=False, verbose=False, rerun=False, skip_subfolders=False, skip_EXIF_insert=False, keep_original=False, video_file=None): # sanity check if video file is passed if video_file and not (os.path.isdir(video_file) or os.path.isfile(video_file)): print("Error, video path " + video_file + " does not exist, exiting...") sys.exit(1) # in case of video processing, adjust the import path if video_file: # set sampling path video_sampling_path = processing.sampled_video_frames_rootpath( video_file) import_path = os.path.join( os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.dirname(video_file), video_sampling_path) # basic check for all if not import_path or not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) # get list of file to process process_file_list = processing.get_process_file_list( import_path, "mapillary_image_description", rerun, verbose, skip_subfolders) if not len(process_file_list): print("No images to run process finalization") print( "If the images have already been processed and not yet uploaded, they can be processed again, by passing the argument --rerun" ) progress_count = 0 for image in process_file_list: progress_count += 1 if verbose: if (progress_count % 50) == 0: sys.stdout.write(".") if (progress_count % 5000) == 0: print("") # check the processing logs log_root = uploader.log_rootpath(image) duplicate_path = os.path.join(log_root, "duplicate") if os.path.isfile(duplicate_path): continue final_mapillary_image_description = processing.get_final_mapillary_image_description( log_root, image, master_upload, verbose, skip_EXIF_insert, keep_original) processing.create_and_log_process(image, "mapillary_image_description", "success", final_mapillary_image_description, verbose=verbose) print("Sub process finished")
def process_sequence_properties(import_path, cutoff_distance=600.0, cutoff_time=60.0, interpolate_directions=False, flag_duplicates=False, duplicate_distance=0.1, duplicate_angle=5, offset_angle=0.0, verbose=False, rerun=False, skip_subfolders=False, video_file=None): # sanity check if video file is passed if video_file and not (os.path.isdir(video_file) or os.path.isfile(video_file)): print("Error, video path " + video_file + " does not exist, exiting...") sys.exit(1) # in case of video processing, adjust the import path if video_file: # set sampling path video_sampling_path = processing.sampled_video_frames_rootpath( video_file) import_path = os.path.join( os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.dirname(video_file), video_sampling_path) # basic check for all if not import_path or not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) sequences = [] if skip_subfolders: process_file_list = processing.get_process_file_list( import_path, "sequence_process", rerun, verbose, True, import_path) if not len(process_file_list): if verbose: print("No images to run sequence process in root " + import_path) print( "If the images have already been processed and not yet uploaded, they can be processed again, by passing the argument --rerun" ) else: # LOAD TIME AND GPS POINTS ------------------------------------ file_list, capture_times, lats, lons, directions = processing.load_geotag_points( process_file_list, verbose) # --------------------------------------- # SPLIT SEQUENCES -------------------------------------- if len(capture_times) and len(lats) and len(lons): sequences.extend( processing.split_sequences(capture_times, lats, lons, file_list, directions, cutoff_time, cutoff_distance, verbose)) # --------------------------------------- else: # sequence limited to the root of the files for root, dirs, files in os.walk(import_path): if os.path.join(".mapillary", "logs") in root: continue if len(files): process_file_list = processing.get_process_file_list( import_path, "sequence_process", rerun, verbose, True, root) if not len(process_file_list): if verbose: print("No images to run sequence process in root " + root) print( "If the images have already been processed and not yet uploaded, they can be processed again, by passing the argument --rerun" ) continue # LOAD TIME AND GPS POINTS ------------------------------------ file_list, capture_times, lats, lons, directions = processing.load_geotag_points( process_file_list, verbose) # --------------------------------------- # SPLIT SEQUENCES -------------------------------------- if len(capture_times) and len(lats) and len(lons): sequences.extend( processing.split_sequences(capture_times, lats, lons, file_list, directions, cutoff_time, cutoff_distance, verbose)) # --------------------------------------- # process for each sequence for sequence in sequences: file_list = sequence["file_list"] directions = sequence["directions"] latlons = sequence["latlons"] capture_times = sequence["capture_times"] # COMPUTE DIRECTIONS -------------------------------------- interpolated_directions = [ compute_bearing(ll1[0], ll1[1], ll2[0], ll2[1]) for ll1, ll2 in zip(latlons[:-1], latlons[1:]) ] if len(interpolated_directions): interpolated_directions.append(interpolated_directions[-1]) else: interpolated_directions.append(directions[-1]) # use interpolated directions if direction not available or if flag for # interpolate_directions for i, d in enumerate(directions): directions[i] = d if ( d is not None and not interpolate_directions ) else (interpolated_directions[i] + offset_angle) % 360.0 # --------------------------------------- # INTERPOLATE TIMESTAMPS, in case of identical timestamps capture_times = processing.interpolate_timestamp(capture_times) final_file_list = file_list[:] final_directions = directions[:] final_capture_times = capture_times[:] # FLAG DUPLICATES -------------------------------------- if flag_duplicates: if verbose: print( "Flagging images as duplicates if consecutive distance difference less than {} and angle difference less than" .format(duplicate_distance, duplicate_angle)) final_file_list = [file_list[0]] final_directions = [directions[0]] final_capture_times = [capture_times[0]] prev_latlon = latlons[0] prev_direction = directions[0] for i, filename in enumerate(file_list[1:]): log_root = uploader.log_rootpath(filename) duplicate_flag_path = os.path.join(log_root, "duplicate") sequence_process_success_path = os.path.join( log_root, "sequence_process_success") k = i + 1 distance = gps_distance(latlons[k], prev_latlon) if directions[k] is not None and prev_direction is not None: direction_diff = diff_bearing(directions[k], prev_direction) else: # dont use bearing difference if no bearings are # available direction_diff = 360 if distance < duplicate_distance and direction_diff < duplicate_angle: open(duplicate_flag_path, "w").close() open(sequence_process_success_path, "w").close() open( sequence_process_success_path + "_" + str(time.strftime("%Y_%m_%d_%H_%M_%S", time.gmtime())), "w").close() else: prev_latlon = latlons[k] prev_direction = directions[k] final_file_list.append(filename) final_directions.append(directions[k]) final_capture_times.append(capture_times[k]) # --------------------------------------- # FINALIZE ------------------------------------ for i in range(0, len(final_file_list), MAX_SEQUENCE_LENGTH): finalize_sequence_processing( str(uuid.uuid4()), final_file_list[i:i + MAX_SEQUENCE_LENGTH], final_directions[i:i + MAX_SEQUENCE_LENGTH], final_capture_times[i:i + MAX_SEQUENCE_LENGTH], import_path, verbose) print("Sub process finished")
def process_geotag_properties(import_path, geotag_source="exif", geotag_source_path=None, offset_time=0.0, offset_angle=0.0, local_time=False, sub_second_interval=0.0, use_gps_start_time=False, verbose=False, rerun=False, skip_subfolders=False, video_file=None): # sanity check if video file is passed if video_file and not (os.path.isdir(video_file) or os.path.isfile(video_file)): print("Error, video path " + video_file + " does not exist, exiting...") sys.exit(1) # in case of video processing, adjust the import path if video_file: # set sampling path video_sampling_path = processing.sampled_video_frames_rootpath( video_file) import_path = os.path.join( os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.dirname(video_file), video_sampling_path) # basic check for all if not import_path or not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) # get list of file to process process_file_list = processing.get_process_file_list( import_path, "geotag_process", rerun, verbose, skip_subfolders) if not len(process_file_list): print("No images to run geotag process") print( "If the images have already been processed and not yet uploaded, they can be processed again, by passing the argument --rerun" ) # sanity checks if geotag_source_path == None and geotag_source != "exif": # if geotagging from external log file, path to the external log file # needs to be provided, if not, exit print( "Error, if geotagging from external log, rather than image EXIF, you need to provide full path to the log file." ) processing.create_and_log_process_in_list(process_file_list, "geotag_process" "failed", verbose) sys.exit(1) elif geotag_source != "exif" and not os.path.isfile( geotag_source_path) and not os.path.isdir(geotag_source_path): print( "Error, " + geotag_source_path + " file source of gps/time properties does not exist. If geotagging from external log, rather than image EXIF, you need to provide full path to the log file." ) processing.create_and_log_process_in_list(process_file_list, "geotag_process" "failed", verbose) sys.exit(1) # function calls if geotag_source == "exif": geotag_properties = processing.geotag_from_exif( process_file_list, import_path, offset_angle, verbose) elif geotag_source == "gpx" or geotag_source == "nmea": geotag_properties = processing.geotag_from_gps_trace( process_file_list, import_path, geotag_source, geotag_source_path, offset_time, offset_angle, local_time, sub_second_interval, use_gps_start_time, verbose) elif geotag_source == "csv": geotag_properties = processing.geotag_from_csv(process_file_list, import_path, geotag_source_path, offset_time, offset_angle, verbose) elif geotag_source == "gopro_video": geotag_properties = processing.geotag_from_gopro_video( process_file_list, import_path, geotag_source_path, offset_time, offset_angle, local_time, sub_second_interval, use_gps_start_time, verbose) elif geotag_source == "blackvue_videos": geotag_properties = processing.geotag_from_blackvue_video( process_file_list, import_path, geotag_source_path, offset_time, offset_angle, local_time, sub_second_interval, use_gps_start_time, verbose) elif geotag_source == "json": geotag_properties = processing.geotag_from_json( process_file_list, import_path, geotag_source_path, offset_time, offset_angle, verbose) print("Sub process finished")
def process_user_properties(import_path, user_name, organization_username=None, organization_key=None, private=False, master_upload=False, verbose=False, rerun=False, skip_subfolders=False, video_file=None): # sanity check if video file is passed if video_file and not (os.path.isdir(video_file) or os.path.isfile(video_file)): print("Error, video path " + video_file + " does not exist, exiting...") sys.exit(1) # in case of video processing, adjust the import path if video_file: # set sampling path video_sampling_path = processing.sampled_video_frames_rootpath( video_file) import_path = os.path.join( os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.dirname(video_file), video_sampling_path) # basic check for all if not import_path or not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) # get list of file to process process_file_list = processing.get_process_file_list( import_path, "user_process", rerun, verbose, skip_subfolders) if not len(process_file_list): print("No images to run user process") print( "If the images have already been processed and not yet uploaded, they can be processed again, by passing the argument --rerun" ) # sanity checks if not user_name: print("Error, must provide a valid user name, exiting...") processing.create_and_log_process_in_list(process_file_list, "user_process", "failed", verbose) sys.exit(1) if private and not organization_username and not organization_key: print( "Error, if the import belongs to a private repository, you need to provide a valid organization user name or key to which the private repository belongs to, exiting..." ) processing.create_and_log_process_in_list(process_file_list, "user_process", "failed", verbose) sys.exit(1) # function calls if not master_upload: user_properties = processing.user_properties(user_name, import_path, process_file_list, organization_username, organization_key, private, verbose) else: user_properties = processing.user_properties_master( user_name, import_path, process_file_list, organization_key, private, verbose) # write data and logs processing.create_and_log_process_in_list(process_file_list, "user_process", "success", verbose, user_properties) print("Sub process ended")
def post_process(import_path, split_import_path=None, video_file=None, summarize=False, move_images=False, move_duplicates=False, move_uploaded=False, save_as_json=False, list_file_status=False, push_images=False, skip_subfolders=False, verbose=False, save_local_mapping=False): # return if nothing specified if not summarize and not move_images and not list_file_status and not push_images and not move_duplicates and not move_uploaded and not save_local_mapping: print("No post processing action specified.") return # sanity check if video file is passed if video_file and not (os.path.isdir(video_file) or os.path.isfile(video_file)): print("Error, video path " + video_file + " does not exist, exiting...") sys.exit(1) # in case of video processing, adjust the import path if video_file: # set sampling path video_sampling_path = processing.sampled_video_frames_rootpath( video_file) import_path = os.path.join(os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.dirname(video_file), video_sampling_path) # basic check for all if not import_path or not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) if save_local_mapping: local_mapping = save_local_mapping(import_path) with open(local_mapping_filepath, "w") as csvfile: csvwriter = csv.writer(csvfile, delimiter=",") for row in local_mapping: csvwriter.writerow(row) else: print("Reading import logs for import path {}...".format(import_path)) # collect logs summary_dict = {} status_list_dict = {} total_files = uploader.get_total_file_list(import_path) total_files_count = len(total_files) # upload logs uploaded_files = uploader.get_success_upload_file_list( import_path, skip_subfolders) uploaded_files_count = len(uploaded_files) failed_upload_files = uploader.get_failed_upload_file_list( import_path, skip_subfolders) failed_upload_files_count = len(failed_upload_files) to_be_finalized_files = uploader.get_finalize_file_list(import_path) to_be_finalized_files_count = len(to_be_finalized_files) summary_dict["total images"] = total_files_count summary_dict["upload summary"] = { "successfully uploaded": uploaded_files_count, "failed uploads": failed_upload_files_count, "uploaded to be finalized": to_be_finalized_files_count } status_list_dict["successfully uploaded"] = uploaded_files status_list_dict["failed uploads"] = failed_upload_files status_list_dict["uploaded to be finalized"] = to_be_finalized_files # process logs summary_dict["process summary"] = {} process_steps = ["user_process", "import_meta_process", "geotag_process", "sequence_process", "upload_params_process", "mapillary_image_description"] process_status = ["success", "failed"] for step in process_steps: process_success = len(processing.get_process_status_file_list( import_path, step, "success", skip_subfolders)) process_failed = len(processing.get_process_status_file_list( import_path, step, "failed", skip_subfolders)) summary_dict["process summary"][step] = { "failed": process_failed, "success": process_success } duplicates_file_list = processing.get_duplicate_file_list( import_path, skip_subfolders) duplicates_file_list_count = len(duplicates_file_list) summary_dict["process summary"]["duplicates"] = duplicates_file_list_count status_list_dict["duplicates"] = duplicates_file_list # processed for upload to_be_uploaded_files = uploader.get_upload_file_list( import_path, skip_subfolders) to_be_uploaded_files_count = len(to_be_uploaded_files) summary_dict["process summary"]["processed_not_yet_uploaded"] = to_be_uploaded_files_count status_list_dict["processed_not_yet_uploaded"] = to_be_uploaded_files # summary if summarize: print("") print("Import summary for import path {} :".format(import_path)) print(json.dumps(summary_dict, indent=4)) if save_as_json: try: processing.save_json(summary_dict, os.path.join( import_path, "mapillary_import_summary.json")) except Exception as e: print("Could not save summary into json at {}, due to {}".format( os.path.join(import_path, "mapillary_import_summary.json"), e)) # list file status if list_file_status: print("") print("List of file status for import path {} :".format(import_path)) print(json.dumps(status_list_dict, indent=4)) if save_as_json: try: processing.save_json(status_list_dict, os.path.join( import_path, "mapillary_import_image_status_list.json")) except Exception as e: print("Could not save image status list into json at {}, due to {}".format( os.path.join(import_path, "mapillary_import_image_status_list.json"), e)) # push images that were uploaded successfully # collect upload params if push_images: to_be_pushed_files = uploader.get_success_only_manual_upload_file_list( import_path, skip_subfolders) params = {} for image in tqdm(to_be_pushed_files, desc="Pushing images"): log_root = uploader.log_rootpath(image) upload_params_path = os.path.join( log_root, "upload_params_process.json") if os.path.isfile(upload_params_path): with open(upload_params_path, "rb") as jf: params[image] = json.load( jf, object_hook=uploader.ascii_encode_dict) # get the s3 locations of the sequences finalize_params = uploader.process_upload_finalization( to_be_pushed_files, params) uploader.finalize_upload(finalize_params) # flag finalization for each file uploader.flag_finalization(to_be_pushed_files) if move_images or move_duplicates or move_uploaded: print("") print("Note that images will be moved along with their mapillary logs in order to preserve the import status") defualt_split_import_path = os.path.join( import_path, "mapillary_import_split_images") if not split_import_path: final_split_path = defualt_split_import_path print("") print( "Split import path not provided and will therefore be set to default path {}".format(defualt_split_import_path)) if split_import_path: if not os.path.isfile(split_import_path): final_split_path = defualt_split_import_path print("Split import path does not exist, split import path will be set to default path {}".format( defualt_split_import_path)) else: final_split_path = split_import_path print("") print("Splitting import path {} into {} based on image import status...".format( import_path, final_split_path)) if move_images: move_duplicates = True move_uploaded = True # move failed uploads if not len(failed_upload_files): print("") print( "There are no failed upload images in the specified import path.") else: failed_upload_path = os.path.join( final_split_path, "upload_failed") if not os.path.isdir(failed_upload_path): os.makedirs(failed_upload_path) for failed in failed_upload_files: failed_upload_image_path = os.path.join( failed_upload_path, os.path.basename(failed)) os.rename(failed, failed_upload_path) failed_upload_log_path = os.path.dirname(uploader.log_rootpath( failed_upload_image_path)) if not os.path.isdir(failed_upload_log_path): os.makedirs(failed_upload_log_path) shutil.move(uploader.log_rootpath(failed), failed_upload_log_path) print("") print("Done moving failed upload images to {}".format( failed_upload_path)) if move_duplicates: if not len(duplicates_file_list): print("") print("There were no duplicates flagged in the specified import path. If you are processing the images with mapillary_tools and would like to flag duplicates, you must specify --advanced --flag_duplicates") else: duplicate_path = os.path.join( final_split_path, "duplicates") if not os.path.isdir(duplicate_path): os.makedirs(duplicate_path) for duplicate in duplicates_file_list: duplicate_image_path = os.path.join( duplicate_path, os.path.basename(duplicate)) os.rename(duplicate, duplicate_image_path) duplicate_log_path = os.path.dirname(uploader.log_rootpath( duplicate_image_path)) if not os.path.isdir(duplicate_log_path): os.makedirs(duplicate_log_path) shutil.move(uploader.log_rootpath(duplicate), duplicate_log_path) print("") print("Done moving duplicate images to {}".format( duplicate_path)) if move_uploaded: if not len(uploaded_files): print("") print( "There are no successfuly uploaded images in the specified import path.") else: upload_success_path = os.path.join( final_split_path, "upload_success") if not os.path.isdir(upload_success_path): os.makedirs(upload_success_path) for uploaded in uploaded_files: uploaded_image_path = os.path.join( upload_success_path, os.path.basename(uploaded)) os.rename(uploaded, upload_success_path) uploaded_log_path = os.path.dirname(uploader.log_rootpath( uploaded_image_path)) if not os.path.isdir(uploaded_log_path): os.makedirs(uploaded_log_path) shutil.move(uploader.log_rootpath(uploaded), uploaded_log_path) print("") print("Done moving successfully uploaded images to {}".format( upload_success_path))