def temporal_filter_callback(msg): """Runs temporal filtering according to parameters read from the message. Args: msg (dict[str, str]): Message received from RabbitMQ publisher. """ print("Running temporal filtering...") # If such frames do not exist, S3 simply does not download them msg_cp = copy(msg) frames = get_frame_range(msg["filter_first"], msg["filter_last"]) image_types_to_level = [("color", msg["level"]), ("disparity", msg["level"])] if msg["use_foreground_masks"]: image_types_to_level.append(("foreground_masks", msg["level"])) ran_download = download_rig(msg) ran_download |= download_image_types(msg, image_types_to_level, frames) msg_cp[ "disparity"] = "" # disparity_level is automatically populated by app _run_bin(msg_cp) processed_frames = get_frame_range(msg["first"], msg["last"]) ran_upload = upload_image_type(msg, "disparity_time_filtered", processed_frames, level=msg["level"]) _clean_worker(ran_download, ran_upload)
def generate_foreground_masks_callback(msg): """Runs foreground mask generation according to parameters read from the message. Args: msg (dict[str, str]): Message received from RabbitMQ publisher. """ print("Running foreground mask generation...") image_types_to_level = [("color", msg["level"])] ran_download = download_rig(msg) ran_download |= download_image_types(msg, image_types_to_level) ran_download |= download_image_type(msg, "background_color", [msg["background_frame"]], msg["level"]) msg_cp = copy(msg) msg_cp["color"] = local_image_type_path(msg, "color", msg["level"]) msg_cp["background_color"] = local_image_type_path(msg, "background_color", msg["level"]) msg_cp["foreground_masks"] = local_image_type_path(msg, "foreground_masks", msg["dst_level"]) _run_bin(msg_cp) ran_upload = upload_image_type(msg, "foreground_masks", level=msg["dst_level"]) _clean_worker(ran_download, ran_upload)
def _run_upsample(msg, run_upload=True): """Runs disparity upsampling according to parameters read from the message. Args: msg (dict[str, str]): Message received from RabbitMQ publisher. run_upload (bool, optional): Whether or not an upload was performed. Returns: tuple(bool, bool): Respectively whether or not a download and upload were performed. """ image_types_to_level = [(msg["image_type"], msg["level"])] msg_cp = copy(msg) if msg["image_type"] == "disparity": color_image_type = "color" image_types_to_level += [ ("foreground_masks", msg["level"]), ("foreground_masks", msg["dst_level"]), ] msg_cp["foreground_masks_in"] = local_image_type_path( msg, "foreground_masks", msg["level"]) msg_cp["foreground_masks_out"] = local_image_type_path( msg, "foreground_masks", msg["dst_level"]) msg_cp["background_disp"] = local_image_type_path( msg, "background_disp", msg["dst_level"]) download_image_type(msg, "background_disp", [msg["background_frame"]], msg["dst_level"]) msg_cp["background_disp"] = local_image_type_path( msg, "background_disp", msg["dst_level"]) elif msg["image_type"] == "background_disp": color_image_type = "background_color" msg_cp[ "foreground_masks_in"] = "" # Background upsampling doesn't use masks msg_cp["foreground_masks_out"] = "" image_types_to_level.append((color_image_type, msg["dst_level"])) ran_download = download_image_types(msg, image_types_to_level) ran_download |= download_rig(msg) msg_cp["disparity"] = local_image_type_path(msg, msg["image_type"], msg["level"]) msg_cp["output"] = local_image_type_path( msg, config.type_to_upsample_type[msg["image_type"]]) msg_cp["color"] = local_image_type_path(msg, color_image_type, msg["dst_level"]) _run_bin(msg_cp) if run_upload: ran_upload = upload_image_type( msg, config.type_to_upsample_type[msg["image_type"]]) return ran_download, ran_upload
def simple_mesh_renderer_callback(msg): print("Generating exports...") msg_cp = copy(msg) frames = get_frame_range(msg_cp["first"], msg_cp["last"]) ran_download = download_rig(msg) ran_download = download_image_type(msg, msg_cp["color_type"], frames) ran_download |= download_image_type(msg, msg_cp["disparity_type"], frames) msg_cp["color"] = local_image_type_path(msg, msg_cp["color_type"]) msg_cp["disparity"] = local_image_type_path(msg, msg_cp["disparity_type"]) msg_cp["output"] = local_image_type_path(msg, msg_cp["dst_image_type"]) msg_cp["position"] = '"0.0 0.0 0.0"' msg_cp["forward"] = '"-1.0 0.0 0.0"' msg_cp["up"] = '"0.0 0.0 1.0"' _run_bin(msg_cp) ran_upload = upload_image_type(msg, msg_cp["dst_image_type"], frames) _clean_worker(ran_download, ran_upload)
def depth_estimation_callback(msg): """Runs depth estimation according to parameters read from the message. Args: msg (dict[str, str]): Message received from RabbitMQ publisher. """ print("Running depth estimation...") ran_download = False msg_cp = copy(msg) if msg["image_type"] == "disparity": image_types_to_level = [("color", msg["level_start"])] if msg["use_foreground_masks"]: ran_download |= download_image_type(msg, "background_disp", [msg["background_frame"]], msg["level_start"]) image_types_to_level.append( ("foreground_masks", msg["level_start"])) if msg["level_start"] < msg["num_levels"] - 1: image_types_to_level.append(("disparity", msg["level_start"] + 1)) if msg["use_foreground_masks"]: image_types_to_level.append( ("foreground_masks", msg["level_start"] + 1)) else: image_types_to_level = [("background_color", msg["level_start"])] if msg["level_start"] < msg["num_levels"] - 1: image_types_to_level.append( ("background_disp", msg["level_start"] + 1)) msg_cp["color"] = local_image_type_path(msg, "background_color_levels") msg_cp["output_root"] = os.path.join(msg["input_root"], "background") ran_download |= download_rig(msg) ran_download |= download_image_types(msg, image_types_to_level) _run_bin(msg_cp) ran_upload = upload_image_type(msg, msg["image_type"], level=msg["level_end"]) _clean_worker(ran_download, ran_upload)
def resize_images_callback(msg): """Runs image resizing according to parameters read from the message. Args: msg (dict[str, str]): Message received from RabbitMQ publisher. """ print("Running image resizing...") image_types_to_level = [(msg["image_type"], None)] ran_download = download_rig(msg) ran_download |= download_image_types(msg, image_types_to_level) with open(local_rig_path(msg), "r") as f: rig = json.load(f) local_src_dir = local_image_type_path(msg, msg["image_type"]) local_dst_dir = local_image_type_path( msg, config.type_to_levels_type[msg["image_type"]]) resize_frames(local_src_dir, local_dst_dir, rig, msg["first"], msg["last"], msg["threshold"]) # Clean up workspace to prevent using too much disk space on workers for level in msg["dst_level"]: ran_upload = upload_image_type(msg, msg["image_type"], level=level) _clean_worker(ran_download, ran_upload)
def convert_to_binary_callback(msg): """Runs binary conversion according to parameters read from the message. Args: msg (dict[str, str]): Message received from RabbitMQ publisher. """ print("Converting to binary...") msg_cp = copy(msg) ran_download = download_rig(msg) rig_json = os.path.basename(msg["rig"]) ext_index = rig_json.index(".") fused_json = f"{rig_json[:ext_index]}_fused{rig_json[ext_index:]}" if msg["run_conversion"]: image_types_to_level = [ (msg["color_type"], None), (msg["disparity_type"], msg["level"]), ] msg_cp["disparity"] = local_image_type_path(msg, msg["disparity_type"], msg["level"]) msg_cp["color"] = local_image_type_path(msg, msg["color_type"]) msg_cp["fused"] = "" # fusion is done independently from conversion ran_download |= download_image_types(msg, image_types_to_level) # If we only have color levels uploaded to S3, we fall back to level_0 if len(os.listdir(msg_cp["color"])) == 0: ran_download = download_image_types(msg, [(msg["color_type"], 0)]) msg_cp["color"] = local_image_type_path(msg, msg["color_type"], 0) else: image_types_to_level = [("bin", None)] local_fused_dir = local_image_type_path(msg, "fused") # Paths are explicitly emptied to avoid path verifications msg_cp["color"] = "" msg_cp["disparity"] = "" msg_cp["foreground_masks"] = "" msg_cp["fused"] = local_fused_dir ran_download |= download_image_types(msg, image_types_to_level) ran_download |= download( src=os.path.join(remote_image_type_path(msg, "bin"), fused_json), dst=os.path.join(local_image_type_path(msg, "bin"), fused_json), ) msg_cp["bin"] = local_image_type_path(msg, "bin") os.makedirs(msg["bin"], exist_ok=True) _run_bin(msg_cp) if msg["run_conversion"]: ran_upload = upload_image_type(msg, "bin") ran_upload |= upload( src=os.path.join(local_image_type_path(msg, "bin"), fused_json), dst=os.path.join(remote_image_type_path(msg, "bin"), fused_json), ) else: # We use a raw upload since upload_image_type only handles frames but we want to # also upload the fused json here ran_upload = upload( src=local_image_type_path(msg, "fused"), dst=remote_image_type_path(msg, "fused"), filters=["*"], ) _clean_worker(ran_download, ran_upload)