obj2.get_transform().set_position(obj2.get_transform().get_position(), previous=True) obj2.get_transform().add_position(visii.vec3(0, 0, 0.5)) obj3.get_transform().set_rotation(obj3.get_transform().get_rotation(), previous=True) obj3.get_transform().add_rotation(visii.quat(0, -1, 0, 0)) motion_vectors_array = visii.render_data(width=int(opt.width), height=int(opt.height), start_frame=0, frame_count=1, bounce=int(0), options="diffuse_motion_vectors") motion_vectors_array = np.array(motion_vectors_array).reshape( opt.height, opt.width, 4) * -1 motion_vectors_array = np.flipud(motion_vectors_array) image = generate_image_from_motion_vector(motion_vectors_array) cv2.imwrite("20_motion_from_1_to_2.png", image * 255) # frame now has to be set at 1 to have the current image, e.g., the transformed one visii.sample_time_interval((1, 1)) visii.render_to_file(width=int(opt.width), height=int(opt.height), samples_per_pixel=int(opt.spp), file_path=f"20_frame2.png") # let's clean up the GPU visii.deinitialize()
def f(frame_ids): # headless - no window # verbose - output number of frames rendered, etc.. visii.initialize(headless=True, verbose=False) # Use a neural network to denoise ray traced visii.enable_denoiser() # set up dome background negatives = list(glob.glob("negatives/*.jpg")) visii.set_dome_light_intensity(1) # create an entity that will serve as our camera. camera = visii.entity.create(name="camera") # To place the camera into our scene, we'll add a "transform" component. # (All visii objects have a "name" that can be used for easy lookup later.) camera.set_transform(visii.transform.create(name="camera_transform")) # To make our camera entity act like a "camera", we'll add a camera component camera.set_camera( visii.camera.create_from_fov( name="camera_camera", field_of_view=1.4, # note, this is in radians aspect=opt.width / float(opt.height))) # Finally, we'll select this entity to be the current camera entity. # (visii can only use one camera at the time) visii.set_camera_entity(camera) # lets store the camera look at information so we can export it camera_struct_look_at = { 'at': [0, 0, 0], 'up': [0, 0, 1], 'eye': [-1, 0, 0] } # Lets set the camera to look at an object. # We'll do this by editing the transform component. camera.get_transform().look_at(at=camera_struct_look_at['at'], up=camera_struct_look_at['up'], eye=camera_struct_look_at['eye']) # This function loads a mesh ignoring .mtl mesh = visii.mesh.create_from_file(opt.entity, opt.model) # creates visii entity using loaded mesh obj_entity = visii.entity.create( name=opt.entity + "_entity", mesh=mesh, transform=visii.transform.create(opt.entity + "_entity"), material=visii.material.create(opt.entity + "_entity"), ) # obj_entity.get_light().set_intensity(0.05) # you can also set the light color manually # obj_entity.get_light().set_color((1,0,0)) # Add texture to the material material = visii.material.get(opt.entity + "_entity") texture = visii.texture.create_from_file(opt.entity, "./models/Cutie.PNG") material.set_base_color_texture(texture) # Lets add the cuboid to the object we want to export add_cuboid(opt.entity + "_entity", opt.debug) # lets keep track of the entities we want to export entities_to_export = [opt.entity + "_entity"] # Loop where we change and render each frame for i in tqdm(frame_ids): # load a random negtive onto the dome negative = cv2.imread(random.choice(negatives)) # Skip dark backgrounds (20/255) if np.mean(negative) < 20: continue # Fix lighting of background and make it small within the FOV background = make_background(negative) cv2.imwrite("test" + str(i) + ".png", background) dome = visii.texture.create_from_file("dome", "test" + str(i) + ".png") visii.set_dome_light_texture(dome) visii.set_dome_light_rotation( visii.angleAxis(visii.pi() * .5, visii.vec3(0, 0, 1))) stretch_factor = 2 scale = [ random.uniform(1, stretch_factor), # width random.uniform(1, stretch_factor), # length random.uniform(1, stretch_factor) # height ] obj_entity.get_transform().set_scale(scale) # create random rotation while making usre the entity is facing forward in each frame rot = [ random.uniform(-10, 10), # Roll random.uniform(-15, 15), # Pitch random.uniform(-45, 45) # Yaw ] q = Quaternion.from_euler(rot[0], rot[1], rot[2], degrees=True) position = [ random.uniform(0, 4), # X Depth random.uniform(-1, 1), # Y random.uniform(-1, 1) # Z ] # Scale the position based on depth into image to make sure it remains in frame position[1] *= position[0] position[2] *= position[0] obj_entity.get_transform().set_position(tuple(position)) obj_entity.get_transform().set_rotation((q.x, q.y, q.z, q.w)) # use random to make 95 % probability the frame data goes into training and # 5% chance it goes in test folder folder = '' if random.randint(0, 100) < opt.test_percent: folder = opt.entity + '_test/' else: folder = opt.entity + '_training/' # Render the scene visii.render_to_file(width=opt.width, height=opt.height, samples_per_pixel=opt.spp, file_path=opt.out + folder + opt.entity + str(i) + '.png') # set up JSON export_to_ndds_file(filename=opt.out + folder + opt.entity + str(i) + '.json', obj_names=entities_to_export, width=opt.width, height=opt.height, camera_struct=camera_struct_look_at) # remove current negative from the dome visii.clear_dome_light_texture() visii.texture.remove("dome") os.remove("test" + str(i) + ".png") visii.deinitialize()