build_widget_ui(obj_attr_mgr, prim_attr_mgr) # %% # @title Scripted vs. Dynamic Motion { display-mode: "form" } # @markdown A quick script to generate video data for AI classification of dynamically dropping vs. kinematically moving objects. remove_all_objects(sim) # @markdown Set the scene as dynamic or kinematic: scenario_is_kinematic = True # @param {type:"boolean"} # add the selected object obj_id_1 = sim.add_object_by_handle(sel_file_obj_handle) # place the object set_object_state_from_agent( sim, obj_id_1, offset=np.array([0, 2.0, -1.0]), orientation=ut.random_quaternion() ) if scenario_is_kinematic: # use the velocity control struct to setup a constant rate kinematic motion sim.set_object_motion_type(habitat_sim.physics.MotionType.KINEMATIC, obj_id_1) vel_control = sim.get_object_velocity_control(obj_id_1) vel_control.controlling_lin_vel = True vel_control.linear_velocity = np.array([0, -1.0, 0]) # simulate and collect observations example_type = "kinematic vs dynamic" observations = simulate(sim, dt=2.0) if make_video: vut.make_video( observations,
def sample_object_state( sim, object_id, from_navmesh=True, maintain_object_up=True, max_tries=100, bb=None ): # check that the object is not STATIC if sim.get_object_motion_type(object_id) is habitat_sim.physics.MotionType.STATIC: print("sample_object_state : Object is STATIC, aborting.") if from_navmesh: if not sim.pathfinder.is_loaded: print("sample_object_state : No pathfinder, aborting.") return False elif not bb: print( "sample_object_state : from_navmesh not specified and no bounding box provided, aborting." ) return False tries = 0 valid_placement = False # Note: following assumes sim was not reconfigured without close scene_collision_margin = stage_attr_mgr.get_template_by_ID(0).margin while not valid_placement and tries < max_tries: tries += 1 # initialize sample location to random point in scene bounding box sample_location = np.array([0, 0, 0]) if from_navmesh: # query random navigable point sample_location = sim.pathfinder.get_random_navigable_point() else: sample_location = np.random.uniform(bb.min, bb.max) # set the test state sim.set_translation(sample_location, object_id) if maintain_object_up: # random rotation only on the Y axis y_rotation = mn.Quaternion.rotation( mn.Rad(random.random() * 2 * math.pi), mn.Vector3(0, 1.0, 0) ) sim.set_rotation(y_rotation * sim.get_rotation(object_id), object_id) else: # unconstrained random rotation sim.set_rotation(ut.random_quaternion(), object_id) # raise object such that lowest bounding box corner is above the navmesh sample point. if from_navmesh: obj_node = sim.get_object_scene_node(object_id) xform_bb = habitat_sim.geo.get_transformed_bb( obj_node.cumulative_bb, obj_node.transformation ) # also account for collision margin of the scene y_translation = mn.Vector3( 0, xform_bb.size_y() / 2.0 + scene_collision_margin, 0 ) sim.set_translation( y_translation + sim.get_translation(object_id), object_id ) # test for penetration with the environment if not sim.contact_test(object_id): valid_placement = True if not valid_placement: return False return True
# %% # @title Scripted vs. Dynamic Motion { display-mode: "form" } # @markdown A quick script to generate video data for AI classification of dynamically dropping vs. kinematically moving objects. remove_all_objects(sim) # @markdown Set the scene as dynamic or kinematic: scenario_is_kinematic = True # @param {type:"boolean"} # add the selected object obj_id_1 = sim.add_object_by_handle(sel_file_obj_handle) # place the object set_object_state_from_agent(sim, obj_id_1, offset=np.array([0, 2.0, -1.0]), orientation=ut.random_quaternion()) if scenario_is_kinematic: # use the velocity control struct to setup a constant rate kinematic motion sim.set_object_motion_type(habitat_sim.MotionType.KINEMATIC, obj_id_1) vel_control = sim.get_object_velocity_control(obj_id_1) vel_control.controlling_lin_vel = True vel_control.linear_velocity = np.array([0, -1.0, 0]) # simulate and collect observations example_type = "kinematic vs dynamic" observations = simulate(sim, dt=2.0) if make_video: vut.make_video(observations, "color_sensor_1st_person", "color", output_path + example_type)