def main(params: Parameters) -> None: root_output_directory = params.creatable_directory("output_directory") curriculum_string = params.string("curriculum", valid_options=STR_TO_CURRICULUM.keys(), default="phase1") language_mode = params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH) language_string = str(language_mode).split(".")[-1].lower() num_samples = params.optional_positive_integer("num_samples") num_noise_objects = params.optional_positive_integer("num_noise_objects") phase1_curriculum_dir = root_output_directory / language_string / curriculum_string phase1_curriculum_dir.mkdir(parents=True, exist_ok=True) # We lazily instantiate the curriculum so we don't need to worry # about any of them we don't actually use. curriculum_to_render = STR_TO_CURRICULUM[curriculum_string]( num_samples, num_noise_objects, phase2_language_generator(language_mode)) sort_by_utterance_length_flag = params.boolean("sort_by_utterance", default=False) if sort_by_utterance_length_flag: random_seed = params.integer("random_seed", default=1) CurriculumToHtmlDumper().dump_to_html_as_sorted_by_utterance_length( curriculum_to_render, output_directory=phase1_curriculum_dir, title="GAILA Phase 1 Curriculum Sorted by Utterance Length", curriculum_string=curriculum_string, random_seed=random_seed, ) else: CurriculumToHtmlDumper().dump_to_html( curriculum_to_render, output_directory=phase1_curriculum_dir, title="GAILA Phase 1 Curriculum", )
def from_parameters(params: Parameters) -> ResourceRequest: return SlurmResourceRequest( partition=params.string("partition"), num_cpus=params.optional_positive_integer("num_cpus"), num_gpus=params.optional_integer("num_gpus"), memory=MemoryAmount.parse(params.string("memory")) if "memory" in params else None, job_time_in_minutes=params.optional_integer("job_time_in_minutes"), )
def _split_into_even_slices(input_source: KeyValueSource[str, bytes], params: Parameters): output_directory = params.creatable_directory("output_dir") slices = params.positive_integer("num_slices") random_seed = params.optional_positive_integer("random_seed") slice_paths = [ output_directory / "{!s}.zip".format(i) for i in range(slices) ] CharSink.to_file(output_directory / "_slices.txt").write("\n".join( str(x) for x in slice_paths)) output_sinks = [ KeyValueSink.zip_bytes_sink(slice_path) for slice_path in slice_paths ] # this is the magic incantation for handling variable-length lists of context managers with ExitStack() as exit_stack: for output_sink in output_sinks: exit_stack.enter_context(output_sink) input_keys = sorted(list(input_source.keys()) # type: ignore ) # guarantee deterministic iteration order if random_seed: random.seed(random_seed) random.shuffle(input_keys) for (i, k) in enumerate(input_keys): output_sinks[i % slices].put(k, input_source[k])
def main( params: Parameters, scenes_iterable_input: Optional[Iterable[Phase1InstanceGroup]] = None, output_directory: Optional[Path] = None, visualizer: Optional[SituationVisualizer] = None, ) -> None: language_mode = params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH) if scenes_iterable_input is None: scenes_iterable: Iterable[Phase1InstanceGroup] = [ make_curriculum(None, None, phase2_language_generator(language_mode)) ] else: scenes_iterable = scenes_iterable_input num_iterations = params.positive_integer("iterations") steps_before_vis = params.positive_integer("steps_before_vis") specific_scene = params.optional_positive_integer("scene") automatically_save_renderings = params.boolean( "automatically_save_renderings", default=False) if "experiment_group_dir" in params: rendering_filename_generator = from_experiment_filename_generator else: rendering_filename_generator = default_filename_generator screenshot_dir = output_directory random.seed(params.integer("seed")) np.random.seed(params.integer("seed")) if params.string("debug_bounding_boxes", default="off") == "on": debug_bounding_boxes = True else: debug_bounding_boxes = False if params.string("gaze_arrows", default="off") == "on": gaze_arrows = True else: gaze_arrows = False # go through curriculum scenes and output geometry types if visualizer is None: viz = SituationVisualizer() else: viz = visualizer viz.clear_scene() model_scales = viz.get_model_scales() for object_type, multiplier in OBJECT_SCALE_MULTIPLIER_MAP.items(): if object_type in model_scales: v3 = model_scales[object_type] new_v3 = (v3[0] * multiplier, v3[1] * multiplier, v3[2] * multiplier) model_scales[object_type] = new_v3 else: model_scales[object_type] = (multiplier, multiplier, multiplier) for model_name, scale in model_scales.items(): logging.info("SCALE: %s -> %s", model_name, scale.__str__()) # used to start a frame from where the previous one left off previous_model_positions: Optional[PositionsMap] = None for scene_number, scene_elements in enumerate( SceneCreator.create_scenes(scenes_iterable)): # If a scene number is provided in the params file, only render that scene if specific_scene and scene_number < specific_scene: continue if specific_scene and scene_number > specific_scene: break scene_filename = rendering_filename_generator(scene_number, scene_elements) if scene_filename in _FILENAMES_USED: continue _FILENAMES_USED.add(scene_filename) print(f"SCENE {scene_number}") viz.set_title(" ".join(token for token in scene_elements.tokens) + " (" + str(scene_elements.current_frame + 1) + "/" + str(scene_elements.total_frames) + ")") # if this is a new scene, forget the positions from the last scene if scene_elements.current_frame == 0: previous_model_positions = None if automatically_save_renderings: # if in auto mode and scene contains an excluded vocab word, skip it skip_scene = False for token in scene_elements.tokens: if token in EXCLUDED_VOCAB: skip_scene = True if skip_scene: continue # for debugging purposes: # SceneCreator.graph_for_each(scene_elements.object_graph, print_obj_names) # bind visualizer and properties to top level rendering function: bound_render_obj = partial(render_obj, viz, scene_elements.property_map, previous_model_positions) # bind visualizer and properties to nested obj rendering function bound_render_nested_obj = partial(render_obj_nested, viz, scene_elements.property_map, previous_model_positions) # render each object in graph SceneCreator.graph_for_each_top_level(scene_elements.object_graph, bound_render_obj, bound_render_nested_obj) # apply scale to top level nodes in scene for node in scene_elements.object_graph: if (node.name not in OBJECT_NAMES_TO_EXCLUDE and node.name.split("_")[0] in OBJECT_SCALE_MULTIPLIER_MAP): viz.multiply_scale( node.name, OBJECT_SCALE_MULTIPLIER_MAP[node.name.split("_")[0]]) # find the Region relations that refer to separate objects: # (e.g. the cookie is in the region of the hand (of the person), not the leg-segment in in the region of the torso). inter_object_in_region_map: DefaultDict[ ObjectPerception, List[Region[ObjectPerception]]] = defaultdict(list) for top_level_node in scene_elements.object_graph: if top_level_node.perceived_obj in scene_elements.in_region_map: inter_object_in_region_map[ top_level_node. perceived_obj] = scene_elements.in_region_map[ top_level_node.perceived_obj] # print(inter_object_in_region_map) # we want to assemble a lookup of the offsets (position) of each object's subobjects. sub_object_offsets = {} for node_name, node in viz.geo_nodes.items(): child_node_to_offset = {} recurse_list: List[NodePath] = node.children while recurse_list: next_batch: List[NodePath] = [] for child in recurse_list: next_batch += child.children # make sure this is a sub-object if child.hasMat() and child.parent.name != node_name: # child has non-identity transformation matrix applied to it (transform differs from parent) # TODO: we could re-export all of the models in such a way to eliminate this extra layer # in the scene graph child_node_to_offset[ child.parent.name] = child.get_pos() recurse_list = next_batch sub_object_offsets[node_name] = child_node_to_offset # handle skipping scene if not automatically_save_renderings: viz.run_for_seconds(1) skip_command = input("type 's' and hit ENTER to skip this scene") if skip_command == "s": viz.clear_scene() viz.run_for_seconds(0.25) continue handle_to_in_region_map = { object_perception.debug_handle: region_list for object_perception, region_list in inter_object_in_region_map.items() } frozen_objects = objects_to_freeze( handle_to_in_region_map, scene_elements.situation, scene_elements.situation_object_to_handle, ) if scene_elements.interpolated_scene_moving_items: # freeze everything not included in the interpolated scene frozen_objects = (immutableset([ key.debug_handle for key in scene_elements.in_region_map.keys() ]) - scene_elements.interpolated_scene_moving_items) # now that every object has been instantiated into the scene, # they need to be re-positioned. repositioned_map = None for repositioned_map in _solve_top_level_positions( top_level_objects=immutableset([ node.perceived_obj for node in scene_elements.object_graph if node.name not in OBJECT_NAMES_TO_EXCLUDE ]), sub_object_offsets=sub_object_offsets, in_region_map=inter_object_in_region_map, model_scales=model_scales, frozen_objects=frozen_objects, iterations=num_iterations, yield_steps=steps_before_vis, previous_positions=previous_model_positions, ): viz.clear_debug_nodes() viz.clear_gaze_arrows() if not automatically_save_renderings: viz.run_for_seconds(0.25) viz.set_positions(repositioned_map) if debug_bounding_boxes: for name in repositioned_map.name_to_position: viz.add_debug_bounding_box( name, repositioned_map.name_to_position[name], repositioned_map.name_to_scale[name], ) if gaze_arrows: for handle, props in scene_elements.property_map.items(): for prop in props: if isinstance( prop, OntologyNode) and prop.handle == "gazed-at": viz.add_gaze_arrow( handle, repositioned_map.name_to_position[handle], repositioned_map.name_to_scale[handle], ) # the visualizer seems to need about a second to render an update if not automatically_save_renderings: viz.run_for_seconds(1) # viz.print_scene_graph() previous_model_positions = None # only store previous positions when continuing to next frame / scene previous_model_positions = repositioned_map viz.run_for_seconds(1) screenshot( automatically_save_renderings=automatically_save_renderings, filename=scene_filename, screenshot_dir=screenshot_dir, viz=viz, ) viz.clear_scene() viz.run_for_seconds(0.25)
def curriculum_from_params(params: Parameters, language_mode: LanguageMode = LanguageMode.ENGLISH): str_to_train_test_curriculum: Mapping[str, Tuple[ CURRICULUM_BUILDER, Optional[CURRICULUM_BUILDER]]] = { "m6-deniz": (make_m6_curriculum, None), "each-object-by-itself": ( build_each_object_by_itself_curriculum_train, build_each_object_by_itself_curriculum_test, ), "pursuit": ( build_pursuit_curriculum, build_each_object_by_itself_curriculum_test, ), "m6-preposition": (build_m6_prepositions_curriculum, None), "m9-objects": (build_gaila_phase1_object_curriculum, None), "m9-attributes": (build_gaila_phase1_attribute_curriculum, None), "chinese-classifiers": (build_classifier_curriculum, None), "m9-relations": (build_gaila_phase1_relation_curriculum, None), "m9-events": (build_gaila_phase1_verb_curriculum, None), "m9-debug": (build_debug_curriculum_train, build_debug_curriculum_test), "m9-complete": (build_gaila_phase_1_curriculum, None), "m13-imprecise-size": (make_imprecise_size_curriculum, None), "m13-imprecise-temporal": (make_imprecise_temporal_descriptions, None), "m13-subtle-verb-distinction": (make_subtle_verb_distinctions_curriculum, None), "m13-object-restrictions": (build_functionally_defined_objects_curriculum, None), "m13-functionally-defined-objects": ( build_functionally_defined_objects_train_curriculum, build_functionally_defined_objects_curriculum, ), "m13-generics": (build_generics_curriculum, None), "m13-complete": (build_gaila_m13_curriculum, None), "m13-verbs-with-dynamic-prepositions": ( make_verb_with_dynamic_prepositions_curriculum, None, ), "m13-shuffled": (build_m13_shuffled_curriculum, build_gaila_m13_curriculum), "m13-relations": (make_prepositions_curriculum, None), "actions-and-generics-curriculum": (build_actions_and_generics_curriculum, None), "m15-object-noise-experiments": ( build_object_learner_experiment_curriculum_train, build_each_object_by_itself_curriculum_test, ), "m18-integrated-learners-experiment": ( integrated_pursuit_learner_experiment_curriculum, integrated_pursuit_learner_experiment_test, ), } curriculum_name = params.string("curriculum", str_to_train_test_curriculum.keys()) language_generator = ( integrated_experiment_language_generator(language_mode) if curriculum_name == "m18-integrated-learners-experiment" else phase2_language_generator(language_mode)) if params.has_namespace("pursuit-curriculum-params"): pursuit_curriculum_params = params.namespace( "pursuit-curriculum-params") else: pursuit_curriculum_params = Parameters.empty() use_path_instead_of_goal = params.boolean("use-path-instead-of-goal", default=False) (training_instance_groups, test_instance_groups) = str_to_train_test_curriculum[curriculum_name] num_samples = params.optional_positive_integer("num_samples") # We need to be able to accept 0 as the number of noise objects but optional_integer doesn't currently # support specifying a range of acceptable values: https://github.com/isi-vista/vistautils/issues/142 num_noise_objects = params.optional_integer("num_noise_objects") if curriculum_name == "pursuit": return ( training_instance_groups( num_samples, num_noise_objects, language_generator, pursuit_curriculum_params=pursuit_curriculum_params, ), test_instance_groups(num_samples, num_noise_objects, language_generator) if test_instance_groups else [], ) # optional argument to use path instead of goal elif use_path_instead_of_goal and curriculum_name in [ "m13-complete", "m13-shuffled", "m13-verbs-with-dynamic-prepositions", ]: return ( training_instance_groups( num_samples, num_noise_objects, language_generator, use_path_instead_of_goal, ), test_instance_groups(num_samples, num_noise_objects, language_generator) if test_instance_groups else [], ) elif curriculum_name in ( "m15-object-noise-experiments", "m18-integrated-learners-experiment", ): return ( training_instance_groups( num_samples, num_noise_objects, language_generator, params=params.namespace_or_empty("train_curriculum"), ), test_instance_groups( 5, 0, language_generator, params=params.namespace_or_empty("test_curriculum"), ) if test_instance_groups else [], ) return ( training_instance_groups(num_samples, num_noise_objects, language_generator), test_instance_groups(num_samples, num_noise_objects, language_generator) if test_instance_groups else [], )
def curriculum_from_params(params: Parameters, language_mode: LanguageMode = LanguageMode.ENGLISH): str_to_train_test_curriculum: Mapping[str, Tuple[ CURRICULUM_BUILDER, Optional[CURRICULUM_BUILDER]]] = { "m6-deniz": (make_m6_curriculum, None), "each-object-by-itself": ( build_each_object_by_itself_curriculum_train, build_each_object_by_itself_curriculum_test, ), "pursuit": ( build_pursuit_curriculum, build_each_object_by_itself_curriculum_test, ), "m6-preposition": (build_m6_prepositions_curriculum, None), "m9-objects": (build_gaila_phase1_object_curriculum, None), "m9-attributes": (build_gaila_phase1_attribute_curriculum, None), "m9-relations": (build_gaila_phase1_relation_curriculum, None), "m9-events": (build_gaila_phase1_verb_curriculum, None), "m9-debug": (build_debug_curriculum_train, build_debug_curriculum_test), "m9-complete": (build_gaila_phase_1_curriculum, None), "m13-imprecise-size": (make_imprecise_size_curriculum, None), "m13-imprecise-temporal": (make_imprecise_temporal_descriptions, None), "m13-subtle-verb-distinction": (make_subtle_verb_distinctions_curriculum, None), "m13-object-restrictions": (build_functionally_defined_objects_curriculum, None), "m13-functionally-defined-objects": ( build_functionally_defined_objects_train_curriculum, build_functionally_defined_objects_curriculum, ), "m13-generics": (build_generics_curriculum, None), "m13-complete": (build_gaila_m13_curriculum, None), "m13-verbs-with-dynamic-prepositions": ( make_verb_with_dynamic_prepositions_curriculum, None, ), "m13-shuffled": (build_m13_shuffled_curriculum, build_gaila_m13_curriculum), "m13-relations": (make_prepositions_curriculum, None), } curriculum_name = params.string("curriculum", str_to_train_test_curriculum.keys()) language_generator = phase2_language_generator(language_mode) if params.has_namespace("pursuit-curriculum-params"): pursuit_curriculum_params = params.namespace( "pursuit-curriculum-params") else: pursuit_curriculum_params = Parameters.empty() (training_instance_groups, test_instance_groups) = str_to_train_test_curriculum[curriculum_name] num_samples = params.optional_positive_integer("num_samples") num_noise_objects = params.optional_positive_integer("num_noise_objects") return ( training_instance_groups(num_samples, num_noise_objects, language_generator) if curriculum_name != "pursuit" else training_instance_groups( num_samples, num_noise_objects, language_generator, pursuit_curriculum_params=pursuit_curriculum_params, ), test_instance_groups(num_samples, num_noise_objects, language_generator) if test_instance_groups else [], )