def main(params: Parameters): curriculum_repository_path = params.creatable_directory( CURRICULUM_REPOSITORY_PATH_PARAMETER ) language_mode = params.enum( LANGUAGE_MODE_PARAMETER, LanguageMode, default=LanguageMode.ENGLISH ) train_curriculum, test_curriculum = curriculum_from_params( params, language_mode=language_mode ) strict_curriculum = ExperimentCurriculum( evaluate_curriculum(train_curriculum), evaluate_curriculum(test_curriculum) ) write_experiment_curriculum( curriculum_repository_path, params, language_mode, strict_curriculum, ignored_parameters=immutableset( IGNORED_PARAMETERS.union( {CURRICULUM_REPOSITORY_PATH_PARAMETER, LANGUAGE_MODE_PARAMETER} ) ), )
def from_parameters( params: Parameters, *, graph_logger: Optional[HypothesisLogger] = None ) -> "ObjectPursuitLearner": # type: ignore log_word_hypotheses_dir = params.optional_creatable_directory( "log_word_hypotheses_dir") if log_word_hypotheses_dir: logging.info("Hypotheses will be logged to %s", log_word_hypotheses_dir) rng = Random() rng.seed(params.optional_integer("random_seed", default=0)) return ObjectPursuitLearner( learning_factor=params.floating_point("learning_factor"), graph_match_confirmation_threshold=params.floating_point( "graph_match_confirmation_threshold"), lexicon_entry_threshold=params.floating_point( "lexicon_entry_threshold"), smoothing_parameter=params.floating_point("smoothing_parameter"), hypothesis_logger=graph_logger, log_learned_item_hypotheses_to=log_word_hypotheses_dir, rng=rng, ontology=GAILA_PHASE_1_ONTOLOGY, language_mode=params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH), )
def main(params: Parameters) -> None: root_output_directory = params.creatable_directory("output_directory") curriculum_string = params.string("curriculum", valid_options=STR_TO_CURRICULUM.keys(), default="phase1") language_mode = params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH) language_string = str(language_mode).split(".")[-1].lower() num_samples = params.optional_positive_integer("num_samples") num_noise_objects = params.optional_positive_integer("num_noise_objects") phase1_curriculum_dir = root_output_directory / language_string / curriculum_string phase1_curriculum_dir.mkdir(parents=True, exist_ok=True) # We lazily instantiate the curriculum so we don't need to worry # about any of them we don't actually use. curriculum_to_render = STR_TO_CURRICULUM[curriculum_string]( num_samples, num_noise_objects, phase2_language_generator(language_mode)) sort_by_utterance_length_flag = params.boolean("sort_by_utterance", default=False) if sort_by_utterance_length_flag: random_seed = params.integer("random_seed", default=1) CurriculumToHtmlDumper().dump_to_html_as_sorted_by_utterance_length( curriculum_to_render, output_directory=phase1_curriculum_dir, title="GAILA Phase 1 Curriculum Sorted by Utterance Length", curriculum_string=curriculum_string, random_seed=random_seed, ) else: CurriculumToHtmlDumper().dump_to_html( curriculum_to_render, output_directory=phase1_curriculum_dir, title="GAILA Phase 1 Curriculum", )
def log_experiment_entry_point(params: Parameters) -> None: experiment_name = params.string("experiment") debug_log_dir = params.optional_creatable_directory("debug_log_directory") graph_logger: Optional[HypothesisLogger] if debug_log_dir: logging.info("Debug graphs will be written to %s", debug_log_dir) graph_logger = HypothesisLogger(debug_log_dir, enable_graph_rendering=True) else: graph_logger = None logger = LearningProgressHtmlLogger.create_logger(params) language_mode = params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH) (training_instance_groups, test_instance_groups) = curriculum_from_params(params, language_mode) execute_experiment( Experiment( name=experiment_name, training_stages=training_instance_groups, learner_factory=learner_factory_from_params( params, graph_logger, language_mode), pre_example_training_observers=[ logger.pre_observer(), CandidateAccuracyObserver("pre-acc-observer"), ], post_example_training_observers=[logger.post_observer()], test_instance_groups=test_instance_groups, test_observers=[logger.test_observer()], sequence_chooser=RandomChooser.for_seed(0), ), log_path=params.optional_creatable_directory("hypothesis_log_dir"), log_hypotheses_every_n_examples=params.integer( "log_hypothesis_every_n_steps", default=250), log_learner_state=params.boolean("log_learner_state", default=True), learner_logging_path=params.optional_creatable_directory( "experiment_group_dir"), starting_point=params.integer("starting_point", default=-1), point_to_log=params.integer("point_to_log", default=0), load_learner_state=params.optional_existing_file("learner_state_path"), )
def from_params( params: Parameters, *, ontology: Optional[Ontology] = None, graph_logger: Optional[GraphLogger] = None, ) -> "ProposeButVerifyObjectLearner": rng = RandomChooser.for_seed( params.optional_integer("random_seed", default=0)) return ProposeButVerifyObjectLearner( graph_match_confirmation_threshold=params.floating_point( "graph_match_confirmation_threshold", default=0.8), graph_logger=graph_logger, rng=rng, ontology=ontology if ontology else GAILA_PHASE_1_ONTOLOGY, language_mode=params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH), )
def from_params( params: Parameters, *, ontology: Optional[Ontology] = None, graph_logger: Optional[GraphLogger] = None, ) -> "CrossSituationalObjectLearner": return CrossSituationalObjectLearner( graph_match_confirmation_threshold=params.floating_point( "graph_match_confirmation_threshold"), lexicon_entry_threshold=params.floating_point( "lexicon_entry_threshold"), smoothing_parameter=params.floating_point("smoothing_parameter"), expected_number_of_meanings=params.floating_point( "expected_number_of_meanings"), graph_logger=graph_logger, ontology=ontology if ontology else GAILA_PHASE_1_ONTOLOGY, language_mode=params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH), )
def main( params: Parameters, scenes_iterable_input: Optional[Iterable[Phase1InstanceGroup]] = None, output_directory: Optional[Path] = None, visualizer: Optional[SituationVisualizer] = None, ) -> None: language_mode = params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH) if scenes_iterable_input is None: scenes_iterable: Iterable[Phase1InstanceGroup] = [ make_curriculum(None, None, phase2_language_generator(language_mode)) ] else: scenes_iterable = scenes_iterable_input num_iterations = params.positive_integer("iterations") steps_before_vis = params.positive_integer("steps_before_vis") specific_scene = params.optional_positive_integer("scene") automatically_save_renderings = params.boolean( "automatically_save_renderings", default=False) if "experiment_group_dir" in params: rendering_filename_generator = from_experiment_filename_generator else: rendering_filename_generator = default_filename_generator screenshot_dir = output_directory random.seed(params.integer("seed")) np.random.seed(params.integer("seed")) if params.string("debug_bounding_boxes", default="off") == "on": debug_bounding_boxes = True else: debug_bounding_boxes = False if params.string("gaze_arrows", default="off") == "on": gaze_arrows = True else: gaze_arrows = False # go through curriculum scenes and output geometry types if visualizer is None: viz = SituationVisualizer() else: viz = visualizer viz.clear_scene() model_scales = viz.get_model_scales() for object_type, multiplier in OBJECT_SCALE_MULTIPLIER_MAP.items(): if object_type in model_scales: v3 = model_scales[object_type] new_v3 = (v3[0] * multiplier, v3[1] * multiplier, v3[2] * multiplier) model_scales[object_type] = new_v3 else: model_scales[object_type] = (multiplier, multiplier, multiplier) for model_name, scale in model_scales.items(): logging.info("SCALE: %s -> %s", model_name, scale.__str__()) # used to start a frame from where the previous one left off previous_model_positions: Optional[PositionsMap] = None for scene_number, scene_elements in enumerate( SceneCreator.create_scenes(scenes_iterable)): # If a scene number is provided in the params file, only render that scene if specific_scene and scene_number < specific_scene: continue if specific_scene and scene_number > specific_scene: break scene_filename = rendering_filename_generator(scene_number, scene_elements) if scene_filename in _FILENAMES_USED: continue _FILENAMES_USED.add(scene_filename) print(f"SCENE {scene_number}") viz.set_title(" ".join(token for token in scene_elements.tokens) + " (" + str(scene_elements.current_frame + 1) + "/" + str(scene_elements.total_frames) + ")") # if this is a new scene, forget the positions from the last scene if scene_elements.current_frame == 0: previous_model_positions = None if automatically_save_renderings: # if in auto mode and scene contains an excluded vocab word, skip it skip_scene = False for token in scene_elements.tokens: if token in EXCLUDED_VOCAB: skip_scene = True if skip_scene: continue # for debugging purposes: # SceneCreator.graph_for_each(scene_elements.object_graph, print_obj_names) # bind visualizer and properties to top level rendering function: bound_render_obj = partial(render_obj, viz, scene_elements.property_map, previous_model_positions) # bind visualizer and properties to nested obj rendering function bound_render_nested_obj = partial(render_obj_nested, viz, scene_elements.property_map, previous_model_positions) # render each object in graph SceneCreator.graph_for_each_top_level(scene_elements.object_graph, bound_render_obj, bound_render_nested_obj) # apply scale to top level nodes in scene for node in scene_elements.object_graph: if (node.name not in OBJECT_NAMES_TO_EXCLUDE and node.name.split("_")[0] in OBJECT_SCALE_MULTIPLIER_MAP): viz.multiply_scale( node.name, OBJECT_SCALE_MULTIPLIER_MAP[node.name.split("_")[0]]) # find the Region relations that refer to separate objects: # (e.g. the cookie is in the region of the hand (of the person), not the leg-segment in in the region of the torso). inter_object_in_region_map: DefaultDict[ ObjectPerception, List[Region[ObjectPerception]]] = defaultdict(list) for top_level_node in scene_elements.object_graph: if top_level_node.perceived_obj in scene_elements.in_region_map: inter_object_in_region_map[ top_level_node. perceived_obj] = scene_elements.in_region_map[ top_level_node.perceived_obj] # print(inter_object_in_region_map) # we want to assemble a lookup of the offsets (position) of each object's subobjects. sub_object_offsets = {} for node_name, node in viz.geo_nodes.items(): child_node_to_offset = {} recurse_list: List[NodePath] = node.children while recurse_list: next_batch: List[NodePath] = [] for child in recurse_list: next_batch += child.children # make sure this is a sub-object if child.hasMat() and child.parent.name != node_name: # child has non-identity transformation matrix applied to it (transform differs from parent) # TODO: we could re-export all of the models in such a way to eliminate this extra layer # in the scene graph child_node_to_offset[ child.parent.name] = child.get_pos() recurse_list = next_batch sub_object_offsets[node_name] = child_node_to_offset # handle skipping scene if not automatically_save_renderings: viz.run_for_seconds(1) skip_command = input("type 's' and hit ENTER to skip this scene") if skip_command == "s": viz.clear_scene() viz.run_for_seconds(0.25) continue handle_to_in_region_map = { object_perception.debug_handle: region_list for object_perception, region_list in inter_object_in_region_map.items() } frozen_objects = objects_to_freeze( handle_to_in_region_map, scene_elements.situation, scene_elements.situation_object_to_handle, ) if scene_elements.interpolated_scene_moving_items: # freeze everything not included in the interpolated scene frozen_objects = (immutableset([ key.debug_handle for key in scene_elements.in_region_map.keys() ]) - scene_elements.interpolated_scene_moving_items) # now that every object has been instantiated into the scene, # they need to be re-positioned. repositioned_map = None for repositioned_map in _solve_top_level_positions( top_level_objects=immutableset([ node.perceived_obj for node in scene_elements.object_graph if node.name not in OBJECT_NAMES_TO_EXCLUDE ]), sub_object_offsets=sub_object_offsets, in_region_map=inter_object_in_region_map, model_scales=model_scales, frozen_objects=frozen_objects, iterations=num_iterations, yield_steps=steps_before_vis, previous_positions=previous_model_positions, ): viz.clear_debug_nodes() viz.clear_gaze_arrows() if not automatically_save_renderings: viz.run_for_seconds(0.25) viz.set_positions(repositioned_map) if debug_bounding_boxes: for name in repositioned_map.name_to_position: viz.add_debug_bounding_box( name, repositioned_map.name_to_position[name], repositioned_map.name_to_scale[name], ) if gaze_arrows: for handle, props in scene_elements.property_map.items(): for prop in props: if isinstance( prop, OntologyNode) and prop.handle == "gazed-at": viz.add_gaze_arrow( handle, repositioned_map.name_to_position[handle], repositioned_map.name_to_scale[handle], ) # the visualizer seems to need about a second to render an update if not automatically_save_renderings: viz.run_for_seconds(1) # viz.print_scene_graph() previous_model_positions = None # only store previous positions when continuing to next frame / scene previous_model_positions = repositioned_map viz.run_for_seconds(1) screenshot( automatically_save_renderings=automatically_save_renderings, filename=scene_filename, screenshot_dir=screenshot_dir, viz=viz, ) viz.clear_scene() viz.run_for_seconds(0.25)
def log_experiment_entry_point(params: Parameters) -> None: experiment_name = params.string("experiment") debug_log_dir = params.optional_creatable_directory("debug_log_directory") graph_logger: Optional[HypothesisLogger] if debug_log_dir: logging.info("Debug graphs will be written to %s", debug_log_dir) graph_logger = HypothesisLogger(debug_log_dir, enable_graph_rendering=True) else: graph_logger = None logger = LearningProgressHtmlLogger.create_logger(params) language_mode = params.enum("language_mode", LanguageMode, default=LanguageMode.ENGLISH) curriculum_repository_path = params.optional_existing_directory( "load_from_curriculum_repository") if curriculum_repository_path: curriculum = read_experiment_curriculum(curriculum_repository_path, params, language_mode) (training_instance_groups, test_instance_groups) = ( curriculum.train_curriculum, curriculum.test_curriculum, ) else: (training_instance_groups, test_instance_groups) = curriculum_from_params(params, language_mode) experiment_group_dir = params.optional_creatable_directory( "experiment_group_dir") resume_from_last_logged_state = params.boolean( "resume_from_latest_logged_state", default=False) # Check if we have explicit observer states to load observers_state = params.optional_existing_file("observers_state_path") test_observer = [] # type: ignore pre_observer = [] # type: ignore post_observer = [] # type: ignore if resume_from_last_logged_state and observers_state: raise RuntimeError( f"Can not resume from last logged state and provide explicit observer state paths" ) if resume_from_last_logged_state: if not experiment_group_dir: raise RuntimeError( "experiment_group_dir must be specified when resume_from_last_logged_state is true." ) # Try to Load Observers for _, observers_state_path in observer_states_by_most_recent( cast(Path, experiment_group_dir) / "observer_state", "observers_state_at_"): try: with observers_state_path.open("rb") as f: observers_holder = pickle.load(f) pre_observer = observers_holder.pre_observers post_observer = observers_holder.post_observers test_observer = observers_holder.test_observers except OSError: logging.warning( "Unable to open observer state at %s; skipping.", str(observers_state_path), ) except pickle.UnpicklingError: logging.warning( "Couldn't unpickle observer state at %s; skipping.", str(observers_state_path), ) if not pre_observer and not post_observer and not test_observer: logging.warning("Reverting to default observers.") pre_observer = [ logger.pre_observer( # type: ignore params=params.namespace_or_empty("pre_observer"), experiment_group_dir=experiment_group_dir, ) ] post_observer = [ logger.post_observer( # type: ignore params=params.namespace_or_empty("post_observer"), experiment_group_dir=experiment_group_dir, ) ] test_observer = [ logger.test_observer( # type: ignore params=params.namespace_or_empty("test_observer"), experiment_group_dir=experiment_group_dir, ) ] elif observers_state: try: with observers_state.open("rb") as f: observers_holder = pickle.load(f) pre_observer = observers_holder.pre_observers post_observer = observers_holder.post_observers test_observer = observers_holder.test_observers except OSError: logging.warning("Unable to open observer state at %s; skipping.", str(observers_state)) except pickle.UnpicklingError: logging.warning( "Couldn't unpickle observer state at %s; skipping.", str(observers_state)) else: pre_observer = [ logger.pre_observer( # type: ignore params=params.namespace_or_empty("pre_observer"), experiment_group_dir=experiment_group_dir, ) ] post_observer = [ logger.post_observer( # type: ignore params=params.namespace_or_empty("post_observer"), experiment_group_dir=experiment_group_dir, ) ] test_observer = [ logger.test_observer( # type: ignore params=params.namespace_or_empty("test_observer"), experiment_group_dir=experiment_group_dir, ) ] execute_experiment( Experiment( name=experiment_name, training_stages=training_instance_groups, learner_factory=learner_factory_from_params( params, graph_logger, language_mode), pre_example_training_observers=pre_observer, post_example_training_observers=post_observer, test_instance_groups=test_instance_groups, test_observers=test_observer, sequence_chooser=RandomChooser.for_seed(0), ), log_path=params.optional_creatable_directory("hypothesis_log_dir"), log_hypotheses_every_n_examples=params.integer( "log_hypothesis_every_n_steps", default=250), log_learner_state=params.boolean("log_learner_state", default=True), learner_logging_path=experiment_group_dir, starting_point=params.integer("starting_point", default=0), point_to_log=params.integer("point_to_log", default=0), load_learner_state=params.optional_existing_file("learner_state_path"), resume_from_latest_logged_state=resume_from_last_logged_state, debug_learner_pickling=params.boolean("debug_learner_pickling", default=False), )