def get_frames_not_processed(all_frames_path, output_path): all_images = file_helper.get_frames_from_folder(all_frames_path) processed = file_helper.get_frames_from_folder(output_path) missing_frames = [ frame for frame in all_images if frame.replace('jpg', 'png') not in processed ] return missing_frames
def apply_depth_inference_on_folder(folder_images_path, output_path): """ This method is used to apply depth inference on a given folder (pretty much self explanatory name, right?) :param folder_images_path: where our images are located :param output_path: where the results of our depth inference will be saved :param checkpoint_path: the checkpoint used to perform the inference :return: """ params = generate_default_monodepth_parameters() output_path = file_helper.guarantee_path_preconditions(output_path) left, model, sess = init_tensorflow(CHECKPOINT_PATH, params) # all_images = file_helper.get_frames_from_folder(folder_images_path) missing_frames = get_not_processed_frames(folder_images_path, output_path) with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: fs = [ executor.submit(apply_inference, folder_images_path, image, left, model, output_path, sess) for image in missing_frames ] concurrent.futures.wait(fs) K.clear_session() return file_helper.get_frames_from_folder(output_path)
def move_files(item, dataset_path, path_to_look, type, output_path): frames_path = os.path.join(dataset_path, type, item, path_to_look) item_frames = file_helper.get_frames_from_folder(frames_path) threads = [] for item_frame in item_frames: thread = Thread(target=move_item, args=(frames_path, item, item_frame, output_path, type)) threads.append(thread) thread.start() for thread in threads: # iterates over the threads thread.join() # waits until the thread has finished work
def extract_new_gray_world_maps(vole_path, converted_images_path, segmented_images_path, output_gray_world_path, sigma, n, p): frames_done = file_helper.get_frames_from_folder(output_gray_world_path) files = [] for i in frames_done: filename = os.path.basename(i) files.append(filename) with ProcessPoolExecutor() as exec: im = os.listdir(str(segmented_images_path) + "/") for i in im: exec.submit(generate_grayworld, converted_images_path, files, i, n, output_gray_world_path, p, segmented_images_path, sigma, vole_path)
def extract_rbd_saliency_folder(folder_path, output_root): frames = file_helper.get_frames_from_folder(folder_path) threads = [] for frame in frames: path_frame = os.path.join(folder_path, frame) output_path = os.path.join(output_root, frame) thread_item = Thread(target=extract_rbd_saliency, args=(path_frame, output_path)) threads.append(thread_item) thread_item.start() for thread in threads: thread.join()
def process_item(output_path, dataset_alias, pai, dataset_type, dataset_root, data_type, item, detector, fa): # path like: /static/results/cbsr/train/fake/ result_path = os.path.join(output_path, dataset_alias, pai, dataset_type, data_type) # path where the new data will be stored on raw_frames_path = os.path.join(result_path, "raw") # backup images on results folder # first of all, we gotta make a backup of all our frames, so we can work on them and not directly with the dataset original_items_path = os.path.join(dataset_root, item) make_copy_frames(original_items_path, raw_frames_path) with concurrent.futures.ProcessPoolExecutor() as executor: # now we're going to iterate over all of our feature maps feature_configs = get_feature_configs(result_path) for config in feature_configs: if config.feature_alias: l.log('Extracting feature: ' + config.feature_alias) file_helper.guarantee_path_preconditions( config.results_unaligned_path) file_helper.guarantee_path_preconditions( config.results_aligned_path) executor.submit(perform_extraction, config.origin_path, config.results_unaligned_path, config.extractor) frames_original = file_helper.get_frames_from_folder(raw_frames_path) # and now align all the frames try: with ProcessPoolExecutor() as executor: for current_frame_name in frames_original: if is_aligned_for_all_extractors(current_frame_name, feature_configs): continue executor.submit(align_single_frame, raw_frames_path, current_frame_name, detector, fa, feature_configs) # align_single_frame(raw_frames_path, current_frame_name, detector, fa, feature_configs) gc.collect() except Exception as exception: l.logE(exception)
def make_copy_frames(origin_frames_path, copy_frames_path): all_original_frames = file_helper.get_frames_from_folder( origin_frames_path) file_helper.guarantee_path_preconditions(copy_frames_path) if not file_helper.count_files_is_same(origin_frames_path, copy_frames_path): for original_frame in all_original_frames: original_frame_path = os.path.join(origin_frames_path, original_frame) original_img = cv2.imread(original_frame_path) output_path = os.path.join(copy_frames_path, original_frame) cv2.imwrite(output_path, original_img) # file_helper.copy_file(, copy_frames_path) l.log("Copy images successfully") else: l.log("Images were already copied")
def extract_illumination_maps(vole_path, config_path, converted_images_path, segmented_images_path, output_illuminated_path): frames_done = file_helper.get_frames_from_folder(output_illuminated_path) files = [] for i in frames_done: filename = os.path.basename(i) files.append(filename) threads = [] with ProcessPoolExecutor() as exec: im = os.listdir(str(segmented_images_path) + "/") for i in im: exec.submit(generate_illuminant, config_path, converted_images_path, files, i, output_illuminated_path, segmented_images_path, vole_path)
def segment_all_images(vole_path, images_path, converted_path, output_path_segmented, sigma, k, min_size, max_intensity, min_intensity): # command = "rm ../data-base/segmented/*.png" # os.system(command) already_existent_files = file_helper.get_frames_from_folder( output_path_segmented) im = os.listdir(images_path) with ProcessPoolExecutor() as exec: for current_image in im: if current_image.replace('jpg', 'png') not in already_existent_files: exec.submit(generate_segments, converted_path, current_image, images_path, k, max_intensity, min_intensity, min_size, output_path_segmented, sigma, vole_path) else: print('Segment already existent!')
def is_processing_done(output_path, total_frames): frames_done = file_helper.get_frames_from_folder(output_path) return len(frames_done) == total_frames
def get_not_processed_frames(all_frames_path, output_path): all_images = file_helper.get_frames_from_folder(all_frames_path) processed = file_helper.get_frames_from_folder(output_path) missing_frames = [frame for frame in all_images if frame not in processed] return missing_frames
def move_files_to_classify(frames_path_origin, type): file_helper.get_frames_from_folder(frames_path_origin)