def generate_rollout_amt_visualizations(): setup = P.get_current_parameters()["Setup"] dataset_name = setup.get("viz_dataset_name") or get_eval_tmp_dataset_name( setup["model"], setup["run_name"]) print(f"Generating AMT animations for dataset: {dataset_name}") pic_domain = "sim" data_domain = "real" # Some quick params. TODO: Bring this into json viz_params = { "draw_drone": True, "draw_trajectory": True, "draw_fov": True, "include_vdist": False, "include_layer": None, "include_instr": False } print("Loading data") train_envs, dev_envs, test_envs = get_restricted_env_id_lists() # TODO: Grab the correct env list env_list = test_envs viz = RolloutVisualizer(resolution=400) base_dir = os.path.join(get_rollout_viz_dir(), f"{dataset_name}-{data_domain}") os.makedirs(base_dir, exist_ok=True) for env_id in env_list: try: env_data = load_single_env_from_dataset(dataset_name, env_id, "supervised") except FileNotFoundError as e: print(f"Skipping env: {env_id}") continue if len(env_data) == 0: print(f"Skipping env: {env_id}. Rollout exists but is EMPTY!") continue segs = split_into_segs(env_data) for seg in segs: seg_idx = seg[0]["seg_idx"] seg_name = f"{env_id}:0:{seg_idx}-{data_domain}" gif_filename = f"{seg_name}-roll.gif" instr_filename = f"{seg_name}-instr.txt" # Generate and save gif frames = viz.top_down_visualization(env_id, seg_idx, seg, pic_domain, viz_params) print("Saving GIF") viz.presenter.save_gif(frames, os.path.join(base_dir, gif_filename), fps=5.0) # Save instruction with open(os.path.join(base_dir, instr_filename), "w") as fp: fp.write(seg[0]["instruction"]) print("ding")
def evaluate_saved_rollouts(): params = P.get_current_parameters() setup = params["Setup"] model_name = setup["model"] run_name = setup["run_name"] eval_dname = get_eval_tmp_dataset_name(model_name, run_name) eval_envs = set(list(sorted(get_correct_eval_env_id_list()))) rollouts = load_multiple_env_data(eval_dname) present_envs = set( [rollout[0]["env_id"] for rollout in rollouts if len(rollout) > 0]) missing_envs = eval_envs - present_envs logdir = get_results_dir(run_name) if len(missing_envs) > 0: print(f"Warning! {len(missing_envs)} envs missing: {missing_envs}") #sys.exit(1) log("", logdir) log( "--------------------------------------------------------------------------------------------", logdir) log(f"Evaluating rollouts for run {run_name}", logdir) log(f" using dataset {eval_dname}", logdir) log(f" missing envs {missing_envs}", logdir) log( "--------------------------------------------------------------------------------------------", logdir) evaler1 = DataEvalNL(setup["run_name"] + "1-1", save_images=False, entire_trajectory=False, aug_len=1) evaler1.evaluate_dataset(rollouts) results1 = evaler1.get_results() evaler2 = DataEvalNL(setup["run_name"] + "2-2", save_images=False, entire_trajectory=False, aug_len=2) evaler2.evaluate_dataset(rollouts) results2 = evaler2.get_results() evalerf = DataEvalNL(setup["run_name"] + "1-2", save_images=True, entire_trajectory=False) evalerf.evaluate_dataset(rollouts) resultsf = evalerf.get_results() log(f"Results 1-1:{results1}", logdir) log(f"Results 2-2:{results2}", logdir) log(f"Results 1-2:{resultsf}", logdir) log(f" -- END EVALUATION FOR {run_name}-- ", logdir) log( "--------------------------------------------------------------------------------------------", logdir)
def check_and_prompt_if_data_exists(system_namespaces): existing_datasets = [] for namespace in system_namespaces: P.switch_to_namespace(namespace) setup = P.get_current_parameters()["Setup"] dname = get_eval_tmp_dataset_name(setup["model"], setup["run_name"]) dpath = get_dataset_dir(dname) if os.path.exists(dpath): existing_datasets.append(dname) if len(existing_datasets) > 0: print("The following evaluation rollout datasets already exist:") print(existing_datasets) print("Do you want to continue evaluation and extend these datasets?") while True: char = input("(y/n)>>>>") if char in ["Y", "y"]: return elif char in ["N", "n"]: print( "You may delete/move the existing datasets and run again!") sys.exit(0) else: print(f"Unrecognized input: {char}")
def generate_multiple_rollout_visualizations(): params, system_namespaces = setup_parameter_namespaces() csv_rows = [] id_map_rows = [] # Have a random ID for each example, so that AMT workers can't figure out which IDs correspond to which agent. unique_ids = list(range(10000)) random.shuffle(unique_ids) example_ordinal = 0 for system_namespace in system_namespaces: P.switch_to_namespace(system_namespace) setup = P.get_current_parameters()["Setup"] dataset_name = get_eval_tmp_dataset_name(setup["model"], setup["run_name"]) base_dir = os.path.join(get_rollout_viz_dir(), f"{dataset_name}-{DOMAIN}") batch_out_dir = get_amt_batch_dir() images_out_dir = os.path.join(batch_out_dir, "rollout_animations") landmarks_out_dir = os.path.join(batch_out_dir, "landmarks") os.makedirs(images_out_dir, exist_ok=True) # Copy landmark images to the batch dir try: shutil.copytree(get_landmark_images_dir(), landmarks_out_dir) except Exception as e: print("Failed to copy landmark images. Already did?") files = os.listdir(base_dir) gif_files = [f for f in files if f.endswith("-roll.gif")] instr_files = [f for f in files if f.endswith("-instr.txt")] assert len(gif_files) == len(instr_files) for gif_file in gif_files: example_id = unique_ids[example_ordinal] example_ordinal += 1 # Collect required info about this example seg_string, domain, suffix = gif_file.split("-") env_id, set_idx, seg_idx = seg_string.split(":") env_id, set_idx, seg_idx = int(env_id), int(set_idx), int(seg_idx) instr_file = f"{env_id}:{set_idx}:{seg_idx}-{domain}-instr.txt" instruction = read_text_file(os.path.join(base_dir, instr_file)) lm_stage_names = landmarks_in_env(env_id) landmarks_html = build_landmark_html(lm_stage_names) old_gif_path = os.path.join(base_dir, gif_file) new_gif_filename = f"{example_id}.gif" new_gif_local_path = os.path.join(images_out_dir, new_gif_filename) gif_url = BASE_URL + new_gif_filename # Copy the image to it's destination shutil.copy(old_gif_path, new_gif_local_path) # Create a row for AMT batch table amt_table_row = { "id": example_id, "image_url": gif_url, "landmarks_html": landmarks_html, "instruction": instruction } map_table_row = { "id": example_id, "agent": setup["run_name"], "env_id": env_id, "set_idx": set_idx, "seg_idx": seg_idx } csv_rows.append(amt_table_row) id_map_rows.append(map_table_row) random.shuffle(csv_rows) # Save the tables # CSV batch table amt_table_path = os.path.join(batch_out_dir, "amt_human_eval_batch.csv") with open(amt_table_path, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=CSV_KEYS) writer.writeheader() for row in csv_rows: writer.writerow(row) amt_sandbox_table_path = os.path.join(batch_out_dir, "amt_human_eval_batch_sandbox.csv") with open(amt_sandbox_table_path, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=CSV_KEYS) writer.writeheader() for row in csv_rows[100:599]: writer.writerow(row) amt_tiny_table_path = os.path.join(batch_out_dir, "amt_human_eval_batch_tinytrial.csv") with open(amt_tiny_table_path, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=CSV_KEYS) writer.writeheader() for row in csv_rows[0:20]: writer.writerow(row) # Reverse mapping table id_mapping_path = os.path.join(batch_out_dir, "id_map.csv") with open(id_mapping_path, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=ID_MAP_KEYS) writer.writeheader() for row in id_map_rows: writer.writerow(row)
def multiple_eval_rollout(): params, system_namespaces = setup_parameter_namespaces() setup_overlay = params["MultipleEval"]["SetupOverlay"] domain = "real" if setup_overlay["real_drone"] else "sim" one_at_a_time = params["MultipleEval"]["one_at_a_time"] check_and_prompt_if_data_exists(system_namespaces) # Load the systems # TODO: Check how many can fit in GPU memory. If not too many, perhaps we can move them off-GPU between rounds policies = [] for system_namespace in system_namespaces: P.switch_to_namespace(system_namespace) setup = P.get_current_parameters()["Setup"] policy, _ = load_model(setup["model"], setup["model_file"], domain) policies.append(policy) # ---------------------------------------------------------------------------------------- # Initialize Roller # ---------------------------------------------------------------------------------------- policy_roller = SimplePolicyRoller(instance_id=7, real_drone=setup_overlay["real_drone"], policy=None, oracle=None, no_reward=True) # ---------------------------------------------------------------------------------------- # Collect rollouts # ---------------------------------------------------------------------------------------- eval_envs = list(sorted(get_correct_eval_env_id_list())) count = 0 # Loop over environments for env_id in eval_envs: seg_ids = get_segs_available_for_env(env_id, 0) env_ids = [env_id] * len(seg_ids) print("Beginning rollouts for env: {env_id}") if len(seg_ids) == 0: print(" NO SEGMENTS! Next...") continue # Loop over systems and save data for i, (policy, system_namespace) in enumerate(zip(policies, system_namespaces)): print( f"Rolling policy in namespace {system_namespace} for env: {env_id}" ) P.switch_to_namespace(system_namespace) setup = P.get_current_parameters()["Setup"] if env_data_already_collected(env_id, setup["model"], setup["run_name"]): print(f"Skipping env_id: {env_id}, policy: {setup['model']}") continue eval_dataset_name = get_eval_tmp_dataset_name( setup["model"], setup["run_name"]) policy_roller.set_policy(policy) # when the last policy is done, we should land the drone policy_roller.rollout_segments( env_ids, seg_ids, None, False, 0, save_dataset_name=eval_dataset_name, rl_rollout=False, land_afterwards=(i == len(policies) - 1)) count += 1 if one_at_a_time and count > 0: print("Stopping. Run again to roll-out on the next environment!") break print("Done")
def env_data_already_collected(env_id, model_name, run_name): dname = get_eval_tmp_dataset_name(model_name, run_name) dataset_path = get_dataset_dir(dname) data_file = os.path.join(dataset_path, get_supervised_data_filename(env_id)) return os.path.isfile(data_file)
def generate_rollout_debug_visualizations(): setup = P.get_current_parameters()["Setup"] dataset_name = setup.get("viz_dataset_name") or get_eval_tmp_dataset_name(setup["model"], setup["run_name"]) domain = setup.get("viz_domain") or ("real" if setup.get("real_drone") else "sim") run_name = setup.get("original_run_name") or setup.get("run_name") specific_envs = setup.get("only_specific_envs") # For collecting information for visualization examples specific_segments = [ # running example (6827, 0, 4), # successful examples (6169, 0, 9), (6825, 0, 8), (6857, 0, 9), # failure examples (6169, 0, 2), (6299, 0, 9), (6634, 0, 8), (6856, 0, 9), (6857, 0, 8), ] specific_segments += [ # good sim, lousy real (6419, 0, 5), (6569, 0, 6), (6634, 0, 6), (6917, 0, 7), ] specific_envs = [s[0] for s in specific_segments] # Generate all #specific_envs = list(range(6000, 7000, 1)) #specific_segments = None # Some quick params. TODO: Bring this into json viz_params = { "ego_vdist": False, "draw_landmarks": False, "draw_topdown": True, "draw_drone": True, "draw_trajectory": True, "draw_fov": False, "include_vdist": False, "include_layer": None, "include_instr": False } print("Loading data") train_envs, dev_envs, test_envs = get_restricted_env_id_lists() # TODO: Grab the correct env list env_list = dev_envs viz = RolloutVisualizer(resolution=576) base_dir = os.path.join(get_rollout_debug_viz_dir(), f"{dataset_name}-{domain}") os.makedirs(base_dir, exist_ok=True) for env_id in env_list: if specific_envs and env_id not in specific_envs: print("Skipping", env_id) continue try: env_data = load_single_env_from_dataset(dataset_name, env_id, "supervised") except FileNotFoundError as e: print(f"Skipping env: {env_id}") continue if len(env_data) == 0: print(f"Skipping env: {env_id}. Rollout exists but is EMPTY!") continue segs = split_into_segs(env_data) for seg in segs: lag_start = 1.5 end_lag = 1.5 seg_idx = seg[0]["seg_idx"] if specific_segments and (env_id, 0, seg_idx) not in specific_segments: continue seg_name = f"{env_id}:0:{seg_idx}-{domain}" gif_filename = f"{seg_name}-roll" instr_filename = f"{seg_name}-instr.txt" this_dir = os.path.join(base_dir, gif_filename) os.makedirs(this_dir, exist_ok=True) base_path = os.path.join(this_dir, gif_filename) if os.path.exists(os.path.join(this_dir, instr_filename)): continue # Animation with just the drone frames = viz.top_down_visualization(env_id, seg_idx, seg, domain, viz_params) save_frames(viz, frames, f"{base_path}-exec", fps=5.0, start_lag=lag_start, end_lag=end_lag, formats=Y_FMT) # Save instructionto with open(os.path.join(this_dir, instr_filename), "w") as fp: fp.write(seg[0]["instruction"]) # Animation of action frames = viz.action_visualization(env_id, seg_idx, seg, domain, "action") save_frames(viz, frames, f"{base_path}-action", fps=5.0, start_lag=lag_start, end_lag=end_lag) # Animation of actions #action_frames = viz.grab_frames(env_id, seg_idx, seg, domain, "action", scale=4) #save_frames(viz, action_frames, f"{base_path}-action", fps=5.0, start_lag=lag_start, end_lag=end_lag) # Generate and save gif # Bare top-down view mod_params = deepcopy(viz_params) mod_params["draw_drone"] = False mod_params["draw_trajectory"] = False frames = viz.top_down_visualization(env_id, seg_idx, seg, domain, mod_params) save_frames(viz, frames, f"{base_path}-top-down", fps=5.0, start_lag=lag_start, end_lag=end_lag) mod_params["draw_drone"] = True mod_params["draw_trajectory"] = False frames = viz.top_down_visualization(env_id, seg_idx, seg, domain, mod_params) save_frames(viz, frames, f"{base_path}-top-down-drn", fps=5.0, start_lag=lag_start, end_lag=end_lag) # Egocentric visitation distributions vdist_r_frames = viz.grab_frames(env_id, seg_idx, seg, domain, "v_dist_r_inner") save_frames(viz, vdist_r_frames, f"{base_path}-ego-vdist", fps=5.0, start_lag=lag_start, end_lag=end_lag) # Map struct map_struct_frames = viz.grab_frames(env_id, seg_idx, seg, domain, "map_struct") save_frames(viz, map_struct_frames, f"{base_path}-ego-map-struct", fps=5.0, start_lag=lag_start, end_lag=end_lag) # Egocentric observation mask ego_obs_mask_frames = viz.grab_frames(env_id, seg_idx, seg, domain, "ego_obs_mask") save_frames(viz, ego_obs_mask_frames, f"{base_path}-ego-obs-mask", fps=5.0, start_lag=lag_start, end_lag=end_lag) def save_map_permutations(file_prefix, incl_layer): mod_params = deepcopy(viz_params) if incl_layer == "vdist": mod_params["include_vdist"] = True else: mod_params["include_layer"] = incl_layer print(f"GENERATING: {file_prefix}") # Non-overlaid, without trajectory mod_params["draw_drone"] = False mod_params["draw_topdown"] = False mod_params["draw_trajectory"] = False frames = viz.top_down_visualization(env_id, seg_idx, seg, domain, mod_params) save_frames(viz, frames, f"{file_prefix}", fps=5.0, start_lag=lag_start, end_lag=end_lag, formats=Y_FMT) print(f"GENERATING: {file_prefix}-ov") # Overlaid, without trajectory mod_params["draw_topdown"] = True frames = viz.top_down_visualization(env_id, seg_idx, seg, domain, mod_params) save_frames(viz, frames, f"{file_prefix}-ov", fps=5.0, start_lag=lag_start, end_lag=end_lag, formats=D_FMT) print(f"GENERATING: {file_prefix}-ov-path") # Overlaid, with trajectory mod_params["draw_drone"] = True mod_params["draw_trajectory"] = True frames = viz.top_down_visualization(env_id, seg_idx, seg, domain, mod_params) save_frames(viz, frames, f"{file_prefix}-ov-path", fps=5.0, start_lag=lag_start, end_lag=end_lag, formats=Y_FMT) print(f"GENERATING: {file_prefix}-path") # Non-overlaid, with trajectory mod_params["draw_topdown"] = False mod_params["draw_drone"] = True mod_params["draw_trajectory"] = True frames = viz.top_down_visualization(env_id, seg_idx, seg, domain, mod_params) save_frames(viz, frames, f"{file_prefix}-path", fps=5.0, start_lag=lag_start, end_lag=end_lag, formats=D_FMT) save_map_permutations(f"{base_path}-vdist", "vdist") save_map_permutations(f"{base_path}-semantic-map", "S_W") save_map_permutations(f"{base_path}-semantic-map-gray", "S_W_Gray") save_map_permutations(f"{base_path}-proj-features", "F_W") save_map_permutations(f"{base_path}-grounding-map", "R_W") save_map_permutations(f"{base_path}-grounding-map-gray", "R_W_Gray") save_map_permutations(f"{base_path}-mask", "M_W") save_map_permutations(f"{base_path}-accum-mask", "M_W_accum") save_map_permutations(f"{base_path}-accum-mask-inv", "M_W_accum_inv") # Animation of FPV features fpv_feature_frames = viz.grab_frames(env_id, seg_idx, seg, domain, "F_C") save_frames(viz, fpv_feature_frames, f"{base_path}-features-fpv", fps=5.0, start_lag=lag_start, end_lag=end_lag) # Animation of FPV images fpv_image_frames = viz.grab_frames(env_id, seg_idx, seg, domain, "image", scale=4) save_frames(viz, fpv_image_frames, f"{base_path}-image", fps=5.0, start_lag=lag_start, end_lag=end_lag) frames = viz.overlay_frames(fpv_image_frames, fpv_feature_frames) save_frames(viz, frames, f"{base_path}-features-fpv-ov", fps=5.0, start_lag=lag_start, end_lag=end_lag) num_frames = len(frames) # Clip rollout videos to correct rollout duration and re-save rollout_dir = get_rollout_video_dir(run_name=run_name) if os.path.isdir(rollout_dir): print("Processing rollout videos") actual_rollout_duration = num_frames / 5.0 ceiling_clip = viz.load_video_clip(env_id, seg_idx, seg, domain, "ceiling", rollout_dir) duration_with_lag = lag_start + actual_rollout_duration + end_lag try: if ceiling_clip is not None: if ceiling_clip.duration > duration_with_lag: start = ceiling_clip.duration - end_lag - duration_with_lag ceiling_clip = ceiling_clip.cutout(0, start) #ceiling_clip = ceiling_clip.cutout(duration_with_lag, ceiling_clip.duration) save_frames(viz, ceiling_clip, f"{base_path}-ceiing_cam-clipped", fps=ceiling_clip.fps) corner_clip = viz.load_video_clip(env_id, seg_idx, seg, domain, "corner", rollout_dir) if corner_clip is not None: if corner_clip.duration > actual_rollout_duration + end_lag: start = corner_clip.duration - end_lag - duration_with_lag corner_clip = corner_clip.cutout(0, start) #corner_clip = corner_clip.cutout(duration_with_lag, corner_clip.duration) save_frames(viz, corner_clip, f"{base_path}-corner_cam-clipped", fps=corner_clip.fps) except Exception as e: print("Video encoding error! Copying manually") print(e) try: in_ceil_file = os.path.join(rollout_dir, f"rollout_ceiling_{env_id}-0-{seg_idx}.mkv") in_corn_file = os.path.join(rollout_dir, f"rollout_corner_{env_id}-0-{seg_idx}.mkv") out_ceil_file = f"{base_path}-ceiling_cam-full.mkv" out_corn_file = f"{base_path}-corner_cam-full.mkv" shutil.copy(in_ceil_file, out_ceil_file) shutil.copy(in_corn_file, out_corn_file) except Exception as e: print("Failed copying videos! SKipping") print("ding")