def _init_walls(self, image): """ Intiailizes the walls Args: image (string): String for the file to use as an image for the walls """ texture_image = xml_path_completion("textures/" + image) texture = nvisii.texture.create_from_file(name='wall_texture', path=texture_image) for wall in self.env.model.mujoco_arena.worldbody.findall( "./geom[@material='walls_mat']"): name = wall.get('name') size = [float(x) for x in wall.get('size').split(' ')] pos, quat = self._get_orientation_geom(name) wall_entity = nvisii.entity.create( name=name, mesh=nvisii.mesh.create_box(name=name, size=nvisii.vec3( size[0], size[1], size[2])), transform=nvisii.transform.create(name), material=nvisii.material.create(name)) wall_entity.get_transform().set_position( nvisii.vec3(pos[0], pos[1], pos[2])) wall_entity.get_transform().set_rotation( nvisii.quat(quat[0], quat[1], quat[2], quat[3])) wall_entity.get_material().set_base_color_texture(texture)
def _init_camera(self): """ Intializes the camera for the NVISII renderer """ # intializes the camera self.camera = nvisii.entity.create( name="camera", transform=nvisii.transform.create("camera_transform"), ) self.camera.set_camera( nvisii.camera.create_from_fov(name="camera_camera", field_of_view=1, aspect=float(self.width) / float(self.height))) # Sets the primary camera of the renderer to the camera entity nvisii.set_camera_entity(self.camera) self._camera_configuration(at_vec=nvisii.vec3(0, 0, 1.06), up_vec=nvisii.vec3(0, 0, 1), eye_vec=nvisii.vec3(1.24, 0.0, 1.35), quat=nvisii.quat(-1, 0, 0, 0)) # Environment configuration self._dome_light_intensity = 1 nvisii.set_dome_light_intensity(self._dome_light_intensity) nvisii.set_max_bounce_depth(4)
def _init_floor(self, image): """ Intiailizes the floor Args: image (string): String for the file to use as an image for the floor """ floor_mesh = nvisii.mesh.create_plane(name="plane", size=nvisii.vec2(3, 3)) floor_entity = nvisii.entity.create( name="floor", mesh=floor_mesh, material=nvisii.material.create("plane"), transform=nvisii.transform.create("plane")) floor_entity.get_transform().set_scale(nvisii.vec3(1)) floor_entity.get_transform().set_position(nvisii.vec3(0, 0, 0)) texture_image = xml_path_completion("textures/" + image) texture = nvisii.texture.create_from_file(name='floor_texture', path=texture_image) floor_entity.get_material().set_base_color_texture(texture) floor_entity.get_material().set_roughness(0.4) floor_entity.get_material().set_specular(0)
def _init_lighting(self): # Intiailizes the lighting self.light_1 = nvisii.entity.create( name="light", mesh=nvisii.mesh.create_sphere("light"), transform=nvisii.transform.create("light"), ) self.light_1.set_light(nvisii.light.create("light")) self.light_1.get_light().set_intensity(150) # intensity of the light self.light_1.get_transform().set_scale(nvisii.vec3(0.3)) # scale the light down self.light_1.get_transform().set_position(nvisii.vec3(3, 3, 4)) # sets the position of the light
def interact(): global speed_camera global cursor global rot # nvisii camera matrix cam_matrix = camera.get_transform().get_local_to_world_matrix() dt = nvisii.vec4(0, 0, 0, 0) # translation if nvisii.is_button_held("W"): dt[2] = -speed_camera if nvisii.is_button_held("S"): dt[2] = speed_camera if nvisii.is_button_held("A"): dt[0] = -speed_camera if nvisii.is_button_held("D"): dt[0] = speed_camera if nvisii.is_button_held("Q"): dt[1] = -speed_camera if nvisii.is_button_held("E"): dt[1] = speed_camera # control the camera if nvisii.length(dt) > 0.0: w_dt = cam_matrix * dt camera.get_transform().add_position(nvisii.vec3(w_dt)) # camera rotation cursor[2] = cursor[0] cursor[3] = cursor[1] cursor[0] = nvisii.get_cursor_pos().x cursor[1] = nvisii.get_cursor_pos().y if nvisii.is_button_held("MOUSE_LEFT"): nvisii.set_cursor_mode("DISABLED") rotate_camera = True else: nvisii.set_cursor_mode("NORMAL") rotate_camera = False if rotate_camera: rot.x -= (cursor[0] - cursor[2]) * 0.001 rot.y -= (cursor[1] - cursor[3]) * 0.001 init_rot = nvisii.angleAxis(nvisii.pi() * .5, (1, 0, 0)) yaw = nvisii.angleAxis(rot.x, (0, 1, 0)) pitch = nvisii.angleAxis(rot.y, (1, 0, 0)) camera.get_transform().set_rotation(init_rot * yaw * pitch) # change speed movement if nvisii.is_button_pressed("UP"): speed_camera *= 0.5 print('decrease speed camera', speed_camera) if nvisii.is_button_pressed("DOWN"): speed_camera /= 0.5 print('increase speed camera', speed_camera)
opt.out = '15_camera_motion_car_blur.png' opt.control = True # # # # # # # # # # # # # # # # # # # # # # # # # nvisii.initialize() nvisii.set_dome_light_intensity(.8) nvisii.resize_window(int(opt.width), int(opt.height)) # # # # # # # # # # # # # # # # # # # # # # # # # # load the textures dome = nvisii.texture.create_from_file("dome", "content/teatro_massimo_2k.hdr") # we can add HDR images to act as dome nvisii.set_dome_light_texture(dome) nvisii.set_dome_light_rotation( nvisii.angleAxis(nvisii.pi() * .5, nvisii.vec3(0, 0, 1))) car_speed = 0 car_speed_x = car_speed car_speed_y = -2 * car_speed camera_height = 80 # # # # # # # # # # # # # # # # # # # # # # # # # if not opt.noise is True: nvisii.enable_denoiser() camera = nvisii.entity.create(name="camera", transform=nvisii.transform.create("camera"), camera=nvisii.camera.create( name="camera",
name = "sun", mesh = nvisii.mesh.create_sphere("sphere"), transform = nvisii.transform.create("sun"), light = nvisii.light.create("sun") ) sun.get_transform().set_position((10,10,10)) sun.get_light().set_temperature(5780) sun.get_light().set_intensity(1000) floor = nvisii.entity.create( name="floor", mesh = nvisii.mesh.create_plane("floor"), transform = nvisii.transform.create("floor"), material = nvisii.material.create("floor") ) floor.get_transform().set_position(nvisii.vec3(0,0,0)) floor.get_transform().set_scale(nvisii.vec3(10)) floor.get_material().set_roughness(0.1) floor.get_material().set_base_color(nvisii.vec3(0.5,0.5,0.5)) # Set the collision with the floor mesh # first lets get the vertices vertices = floor.get_mesh().get_vertices() # get the position of the object pos = floor.get_transform().get_position() pos = [pos[0],pos[1],pos[2]] scale = floor.get_transform().get_scale() scale = [scale[0],scale[1],scale[2]] rot = floor.get_transform().get_rotation() rot = [rot[0],rot[1],rot[2],rot[3]]
nvisii.initialize(headless = True, verbose = True) if not opt.noise is True: nvisii.enable_denoiser() camera = nvisii.entity.create( name = "camera", transform = nvisii.transform.create("camera"), camera = nvisii.camera.create( name = "camera", aspect = float(opt.width)/float(opt.height) ) ) camera.get_transform().look_at( nvisii.vec3(0,0,0), # look at (world coordinate) nvisii.vec3(0,0,1), # up vector nvisii.vec3(-2,0,1), # camera_origin ) nvisii.set_camera_entity(camera) # # # # # # # # # # # # # # # # # # # # # # # # # # lets turn off the ambiant lights nvisii.set_dome_light_intensity(0) nvisii.disable_dome_light_sampling() tex = nvisii.texture.create_from_file("tex", "content/gradient.png") obj_entity = nvisii.entity.create(
def adding_mesh_object(name, obj_to_load, texture_to_load, scale=1): global mesh_loaded, visii_pybullet, names_to_export # obj_to_load = toy_to_load + "/meshes/model.obj" # texture_to_load = toy_to_load + "/materials/textures/texture.png" print("loading:", obj_to_load) if texture_to_load is None: toys = load_obj_scene(obj_to_load) toy_parent_transform = visii.entity.get( toys[0]).get_transform().get_parent() toy_parent_transform.set_scale(visii.vec3(scale)) toy_parent_transform.set_position( visii.vec3( random.uniform(0.1, 2), random.uniform(-1, 1), random.uniform(-1, 1), )) toy_parent_transform.set_rotation( visii.quat( random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1), )) name = toys[0] id_pybullet = create_physics(name, mass=np.random.rand() * 5) visii_pybullet.append({ 'visii_id': name, 'bullet_id': id_pybullet, 'base_rot': None, }) gemPos, gemOrn = p.getBasePositionAndOrientation(id_pybullet) force_rand = 10 object_position = 0.01 p.applyExternalForce( id_pybullet, -1, [ random.uniform(-force_rand, force_rand), random.uniform(-force_rand, force_rand), random.uniform(-force_rand, force_rand) ], [ random.uniform(-object_position, object_position), random.uniform(-object_position, object_position), random.uniform(-object_position, object_position) ], flags=p.WORLD_FRAME) for entity_name in toys: names_to_export.append(entity_name) cuboid = add_cuboid(entity_name, debug=False) names_to_export.append(toy_parent_transform.get_name()) else: if obj_to_load in mesh_loaded: toy_mesh = mesh_loaded[obj_to_load] else: toy_mesh = visii.mesh.create_from_file(name, obj_to_load) mesh_loaded[obj_to_load] = toy_mesh toy = visii.entity.create(name=name, transform=visii.transform.create(name), mesh=toy_mesh, material=visii.material.create(name)) toy_rgb_tex = visii.texture.create_from_file(name, texture_to_load) toy.get_material().set_base_color_texture(toy_rgb_tex) toy.get_material().set_roughness(random.uniform(0.1, 0.5)) toy.get_transform().set_scale(visii.vec3(scale)) toy.get_transform().set_position( visii.vec3( random.uniform(0.1, 2), random.uniform(-1, 1), random.uniform(-1, 1), )) toy.get_transform().set_rotation( visii.quat( random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1), )) id_pybullet = create_physics(name, mass=np.random.rand() * 5) visii_pybullet.append({ 'visii_id': name, 'bullet_id': id_pybullet, 'base_rot': None, }) gemPos, gemOrn = p.getBasePositionAndOrientation(id_pybullet) force_rand = 10 object_position = 0.01 p.applyExternalForce( id_pybullet, -1, [ random.uniform(-force_rand, force_rand), random.uniform(-force_rand, force_rand), random.uniform(-force_rand, force_rand) ], [ random.uniform(-object_position, object_position), random.uniform(-object_position, object_position), random.uniform(-object_position, object_position) ], flags=p.WORLD_FRAME) names_to_export.append(name) cuboid = add_cuboid(name, debug=False)
visii.sample_pixel_area(x_sample_interval=(.5, .5), y_sample_interval=(.5, .5)) # visii.set_max_bounce_depth(1) visii.enable_denoiser() camera = visii.entity.create(name="camera", transform=visii.transform.create("camera"), camera=visii.camera.create_perspective_from_fov( name="camera", field_of_view=0.785398, aspect=float(opt.width) / float(opt.height))) # data structure random_camera_movement = { 'at': visii.vec3(1, 0, 0), 'up': visii.vec3(0, 0, 1), 'eye': visii.vec3(0, 0, 0) } camera.get_transform().look_at( at=random_camera_movement['at'], # look at (world coordinate) up=random_camera_movement['up'], # up vector eye=random_camera_movement['eye'], ) visii.set_camera_entity(camera) # # # # # # # # # # # # # # # # # # # # # # # # # # lets turn off the ambiant lights
# set up for pybullet - here we will use indices for # objects with holes vertices = mesh.get_vertices() indices = mesh.get_triangle_indices() ids_pybullet_and_nvisii_names = [] for i in range(opt.nb_objects): name = f"mesh_{i}" obj = nvisii.entity.create(name=name, transform=nvisii.transform.create(name), material=nvisii.material.create(name)) obj.set_mesh(mesh) # transforms pos = nvisii.vec3(random.uniform(-4, 4), random.uniform(-4, 4), random.uniform(2, 5)) rot = nvisii.normalize( nvisii.quat( random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1), )) s = random.uniform(0.2, 0.5) scale = (s, s, s) obj.get_transform().set_position(pos) obj.get_transform().set_rotation(rot) obj.get_transform().set_scale(scale) # pybullet setup
def _update_orientation(self, name, component): """ Update position for an object or a robot in renderer. Args: name (string): name of component component (nvisii entity or scene): Object in renderer and other info for object. """ obj = component.obj parent_body_name = component.parent_body_name geom_pos = component.geom_pos geom_quat = component.geom_quat dynamic = component.dynamic if not dynamic: return self.body_tags = ['robot', 'pedestal', 'gripper', 'peg'] if parent_body_name != 'worldbody': if self.tag_in_name(name): pos = self.env.sim.data.get_body_xpos(parent_body_name) else: pos = self.env.sim.data.get_geom_xpos(name) B = self.env.sim.data.body_xmat[self.env.sim.model.body_name2id( parent_body_name)].reshape((3, 3)) quat_xyzw_body = mat2quat(B) quat_wxyz_body = np.array([ quat_xyzw_body[3], quat_xyzw_body[0], quat_xyzw_body[1], quat_xyzw_body[2] ]) # wxyz nvisii_quat = nvisii.quat(*quat_wxyz_body) * nvisii.quat( *geom_quat) if self.tag_in_name(name): # Add position offset if there are position offset defined in the geom tag homo_mat = T.pose2mat((np.zeros( (1, 3), dtype=np.float32), quat_xyzw_body)) pos_offset = homo_mat @ np.array( [geom_pos[0], geom_pos[1], geom_pos[2], 1.]).transpose() pos = pos + pos_offset[:3] else: pos = [0, 0, 0] nvisii_quat = nvisii.quat(1, 0, 0, 0) # wxyz if isinstance(obj, nvisii.scene): # temp fix -- look into XML file for correct quat if 's_visual' in name: # single robot if len(self.env.robots) == 1: nvisii_quat = nvisii.quat(0, 0.5, 0, 0) # two robots - 0 elif len(self.env.robots) == 2 and 'robot_0' in name: nvisii_quat = nvisii.quat(-0, 0.5, 0.5, 0) # two robots - 1 else: nvisii_quat = nvisii.quat(-0, 0.5, -0.5, 0) obj.transforms[0].set_position(nvisii.vec3(pos[0], pos[1], pos[2])) obj.transforms[0].set_rotation(nvisii_quat) else: obj.get_transform().set_position( nvisii.vec3(pos[0], pos[1], pos[2])) obj.get_transform().set_rotation(nvisii_quat)
# # # # # # # # # # # # # # # # # # # # # # # # # # Lets set some objects in the scene # Create a box that'll act like a room for the objects room = nvisii.entity.create( name="room", mesh=nvisii.mesh.create_box('room'), transform=nvisii.transform.create("room"), material=nvisii.material.create("room"), ) room.get_transform().set_scale((2, 2, 2)) room.get_transform().set_position((0, 0, 2)) mat = nvisii.material.get("room") mat.set_base_color(nvisii.vec3(0.19, 0.16, 0.19)) mat.set_roughness(1) sphere = nvisii.entity.create(name="sphere", mesh=nvisii.mesh.create_sphere("sphere"), transform=nvisii.transform.create("sphere"), material=nvisii.material.create("sphere")) sphere.get_transform().set_position(nvisii.vec3(0.4, 0, 0.2)) sphere.get_transform().set_scale(nvisii.vec3(0.2)) sphere.get_material().set_base_color(nvisii.vec3(0.1, 0.96, 0.4)) sphere.get_material().set_roughness(0.7) sphere.get_material().set_specular(1) sphere2 = nvisii.entity.create(name="sphere2", mesh=nvisii.mesh.create_sphere("sphere2"), transform=nvisii.transform.create("sphere2"),
def add_cuboid(name, debug=False): """ Add cuboid children to the transform tree to a given object for exporting :param name: string name of the nvisii entity to add a cuboid :param debug: bool - add sphere on the nvisii entity to make sure the cuboid is located at the right place. :return: return a list of cuboid in canonical space of the object. """ obj = nvisii.entity.get(name) min_obj = obj.get_mesh().get_min_aabb_corner() max_obj = obj.get_mesh().get_max_aabb_corner() centroid_obj = obj.get_mesh().get_aabb_center() cuboid = [ nvisii.vec3(max_obj[0], max_obj[1], max_obj[2]), nvisii.vec3(min_obj[0], max_obj[1], max_obj[2]), nvisii.vec3(max_obj[0], min_obj[1], max_obj[2]), nvisii.vec3(max_obj[0], max_obj[1], min_obj[2]), nvisii.vec3(min_obj[0], min_obj[1], max_obj[2]), nvisii.vec3(max_obj[0], min_obj[1], min_obj[2]), nvisii.vec3(min_obj[0], max_obj[1], min_obj[2]), nvisii.vec3(min_obj[0], min_obj[1], min_obj[2]), nvisii.vec3(centroid_obj[0], centroid_obj[1], centroid_obj[2]), ] # change the ids to be like ndds / DOPE cuboid = [ cuboid[2], cuboid[0], cuboid[3], cuboid[5], cuboid[4], cuboid[1], cuboid[6], cuboid[7], cuboid[-1] ] cuboid.append( nvisii.vec3(centroid_obj[0], centroid_obj[1], centroid_obj[2])) for i_p, p in enumerate(cuboid): child_transform = nvisii.transform.create(f"{name}_cuboid_{i_p}") child_transform.set_position(p) child_transform.set_scale(nvisii.vec3(0.3)) child_transform.set_parent(obj.get_transform()) if debug: nvisii.entity.create( name=f"{name}_cuboid_{i_p}", mesh=nvisii.mesh.create_sphere(f"{name}_cuboid_{i_p}"), transform=child_transform, material=nvisii.material.create(f"{name}_cuboid_{i_p}")) for i_v, v in enumerate(cuboid): cuboid[i_v] = [v[0], v[1], v[2]] return cuboid
def setColorValue(value): value = value / 100.0 color[2] = value rgb = colorsys.hsv_to_rgb(color[0], color[1], color[2]) teapotahedron.get_material().set_base_color( nvisii.vec3(rgb[0], rgb[1], rgb[2]))
def interact(): global prev_window_size global speed_camera global cursor global init_rot global rot global i window_size = nv.vec2(nv.get_window_size().x, nv.get_window_size().y) if (nv.length(window_size - prev_window_size) > 0): camera.get_camera().set_fov(.8, window_size.x / float(window_size.y)) prev_window_size = window_size # nvisii camera matrix cam_matrix = camera.get_transform().get_local_to_world_matrix() dt = nv.vec4(0, 0, 0, 0) # translation if nv.is_button_held("W"): dt[2] = -speed_camera if nv.is_button_held("S"): dt[2] = speed_camera if nv.is_button_held("A"): dt[0] = -speed_camera if nv.is_button_held("D"): dt[0] = speed_camera if nv.is_button_held("Q"): dt[1] = -speed_camera if nv.is_button_held("E"): dt[1] = speed_camera # control the camera if nv.length(dt) > 0.0: w_dt = cam_matrix * dt camera.get_transform().add_position(nv.vec3(w_dt)) # camera rotation cursor[2] = cursor[0] cursor[3] = cursor[1] cursor[0] = nv.get_cursor_pos().x cursor[1] = nv.get_cursor_pos().y if nv.is_button_held("MOUSE_LEFT"): rotate_camera = True else: rotate_camera = False if rotate_camera: rot.x -= (cursor[0] - cursor[2]) * 0.001 rot.y -= (cursor[1] - cursor[3]) * 0.001 # init_rot = nv.angleAxis(nv.pi() * .5, (1,0,0)) yaw = nv.angleAxis(rot.x, (0, 1, 0)) pitch = nv.angleAxis(rot.y, (1, 0, 0)) camera.get_transform().set_rotation(init_rot * yaw * pitch) # change speed movement if nv.is_button_pressed("UP"): speed_camera *= 0.5 print('decrease speed camera', speed_camera) if nv.is_button_pressed("DOWN"): speed_camera /= 0.5 print('increase speed camera', speed_camera) # Render out an image if nv.is_button_pressed("SPACE"): i = i + 1 nv.render_to_file(nv.get_window_size().x, nv.get_window_size().y, 256, str(i) + ".png")
def update_visual_objects(object_ids, pkg_path, nv_objects=None): # object ids are in pybullet engine # pkg_path is for loading the object geometries # nv_objects refers to the already entities loaded, otherwise it is going # to load the geometries and create entities. if nv_objects is None: nv_objects = {} for object_id in object_ids: for idx, visual in enumerate(p.getVisualShapeData(object_id)): # Extract visual data from pybullet objectUniqueId = visual[0] linkIndex = visual[1] visualGeometryType = visual[2] dimensions = visual[3] meshAssetFileName = visual[4] local_visual_frame_position = visual[5] local_visual_frame_orientation = visual[6] rgbaColor = visual[7] world_link_frame_position = (0, 0, 0) world_link_frame_orientation = (0, 0, 0, 1) if linkIndex == -1: dynamics_info = p.getDynamicsInfo(object_id, -1) inertial_frame_position = dynamics_info[3] inertial_frame_orientation = dynamics_info[4] base_state = p.getBasePositionAndOrientation(objectUniqueId) world_link_frame_position = base_state[0] world_link_frame_orientation = base_state[1] m1 = nv.translate( nv.mat4(1), nv.vec3(inertial_frame_position[0], inertial_frame_position[1], inertial_frame_position[2])) m1 = m1 * nv.mat4_cast( nv.quat(inertial_frame_orientation[3], inertial_frame_orientation[0], inertial_frame_orientation[1], inertial_frame_orientation[2])) m2 = nv.translate( nv.mat4(1), nv.vec3(world_link_frame_position[0], world_link_frame_position[1], world_link_frame_position[2])) m2 = m2 * nv.mat4_cast( nv.quat(world_link_frame_orientation[3], world_link_frame_orientation[0], world_link_frame_orientation[1], world_link_frame_orientation[2])) m = nv.inverse(m1) * m2 q = nv.quat_cast(m) world_link_frame_position = m[3] world_link_frame_orientation = q else: linkState = p.getLinkState(objectUniqueId, linkIndex) world_link_frame_position = linkState[4] world_link_frame_orientation = linkState[5] # Name to use for components object_name = str(objectUniqueId) + "_" + str(linkIndex) meshAssetFileName = meshAssetFileName.decode('UTF-8') if object_name not in nv_objects: # Create mesh component if not yet made if visualGeometryType == p.GEOM_MESH: try: nv_objects[object_name] = nv.import_scene( pkg_path + "/" + meshAssetFileName) except Exception as e: print(e) pass if visualGeometryType != 5: continue if object_name not in nv_objects: continue # Link transform m1 = nv.translate( nv.mat4(1), nv.vec3(world_link_frame_position[0], world_link_frame_position[1], world_link_frame_position[2])) m1 = m1 * nv.mat4_cast( nv.quat(world_link_frame_orientation[3], world_link_frame_orientation[0], world_link_frame_orientation[1], world_link_frame_orientation[2])) # Visual frame transform m2 = nv.translate( nv.mat4(1), nv.vec3(local_visual_frame_position[0], local_visual_frame_position[1], local_visual_frame_position[2])) m2 = m2 * nv.mat4_cast( nv.quat(local_visual_frame_orientation[3], local_visual_frame_orientation[0], local_visual_frame_orientation[1], local_visual_frame_orientation[2])) # Set root transform of visual objects collection to above transform nv_objects[object_name].transforms[0].set_transform(m1 * m2) nv_objects[object_name].transforms[0].set_scale(dimensions) for m in nv_objects[object_name].materials: m.set_base_color( (rgbaColor[0]**2.2, rgbaColor[1]**2.2, rgbaColor[2]**2.2)) # todo... add support for spheres, cylinders, etc # print(visualGeometryType) return nv_objects
c = colorsys.hsv_to_rgb(angle/360,1,1) # for i_c in range(3): image[i,j,0:3] = c return image nvisii.render_to_file( width=int(opt.width), height=int(opt.height), samples_per_pixel=int(opt.spp), file_path=opt.outf + "20_frame1.png" ) obj1.get_transform().set_position(obj1.get_transform().get_position(),previous=True) obj1.get_transform().add_position(nvisii.vec3(0,0.5,0)) obj2.get_transform().set_position(obj2.get_transform().get_position(),previous=True) obj2.get_transform().add_position(nvisii.vec3(0,0,0.5)) obj3.get_transform().set_rotation(obj3.get_transform().get_rotation(),previous=True) obj3.get_transform().add_rotation(nvisii.quat(0,-1,0,0)) motion_vectors_array = nvisii.render_data( width=int(opt.width), height=int(opt.height), start_frame=0, frame_count=1, bounce=int(0), options="diffuse_motion_vectors" )
def load_object(geom, geom_name, geom_type, geom_quat, geom_pos, geom_size, geom_scale, geom_rgba, geom_tex_name, geom_tex_file, class_id, meshes): """ Function that initializes the meshes in the memory. Args: geom (XML element): Object in XML file to load geom_name (str): Name for the object. geom_type (str): Type of the object. Types include "box", "cylinder", or "mesh". geom_quat (array): Quaternion (wxyz) of the object. geom_pos (array): Position of the object. geom_size (array): Size of the object. geom_scale (array): Scale of the object. geom_rgba (array): Color of the object. This is only used if the geom type is not a mesh and there is no specified material. geom_tex_name (str): Name of the texture for the object geom_tex_file (str): File of the texture for the object class_id (int) : Class id for the component meshes (dict): Meshes for the object """ primitive_types = ['box', 'cylinder'] component = None if geom_type == 'box': component = nvisii.entity.create( name=geom_name, mesh=nvisii.mesh.create_box(name=geom_name, size=nvisii.vec3( geom_size[0], geom_size[1], geom_size[2])), transform=nvisii.transform.create(geom_name), material=nvisii.material.create(geom_name)) elif geom_type == 'cylinder': component = nvisii.entity.create( name=geom_name, mesh=nvisii.mesh.create_capped_cylinder(name=geom_name, radius=geom_size[0], size=geom_size[1]), transform=nvisii.transform.create(geom_name), material=nvisii.material.create(geom_name)) elif geom_type == 'mesh': filename = meshes[geom.attrib['mesh']]['file'] filename = os.path.splitext(filename)[0] + '.obj' component = nvisii.import_scene( file_path=filename, position=nvisii.vec3(geom_pos[0], geom_pos[1], geom_pos[2]), scale=(geom_scale[0], geom_scale[1], geom_scale[2]), rotation=nvisii.quat(geom_quat[0], geom_quat[1], geom_quat[2], geom_quat[3])) entity_ids = [] if isinstance(component, nvisii.scene): for i in range(len(component.entities)): entity_ids.append(component.entities[i].get_id()) else: entity_ids.append(component.get_id()) if geom_type in primitive_types: component.get_transform().set_position( nvisii.vec3(float(geom_pos[0]), float(geom_pos[1]), float(geom_pos[2]))) if geom_tex_file is not None and geom_tex_name is not None and geom_type != 'mesh': texture = nvisii.texture.get(geom_tex_name) if texture is None: texture = nvisii.texture.create_from_file(name=geom_tex_name, path=geom_tex_file) component.get_material().set_base_color_texture(texture) else: if 'gripper' in geom_name: if geom_rgba is not None: if isinstance(component, nvisii.scene): for entity in component.entities: entity.get_material().set_base_color( nvisii.vec3(geom_rgba[0], geom_rgba[1], geom_rgba[2])) else: component.get_material().set_base_color( nvisii.vec3(geom_rgba[0], geom_rgba[1], geom_rgba[2])) elif 'hand_visual' in geom_name: for entity in component.entities: entity.get_material().set_base_color( nvisii.vec3(0.05, 0.05, 0.05)) return component, entity_ids
import numpy as np import PIL from PIL import Image import math opt = lambda: None opt.spp = 1024 opt.width = 500 opt.height = 500 opt.noise = False # # # # # # # # # # # # # # # # # # # # # # # # # nvisii.initialize(headless=False, verbose=True) nvisii.set_dome_light_intensity(1) nvisii.set_dome_light_color(nvisii.vec3(1,1,1)) if not opt.noise is True: nvisii.enable_denoiser() camera = nvisii.entity.create( name = "camera", transform = nvisii.transform.create("camera"), camera = nvisii.camera.create( name = "camera", aspect = float(opt.width)/float(opt.height) ) ) camera.get_transform().look_at( at = (0,0,0),