import numpy as np from math import acos from math import sqrt from math import pi import colorsys import cv2 opt = lambda: None opt.spp = 100 opt.width = 1280 opt.height = 720 opt.noise = False opt.path_obj = 'content/dragon/dragon.obj' # # # # # # # # # # # # # # # # # # # # # # # # # visii.initialize(headless=True, verbose=True) if not opt.noise is True: visii.enable_denoiser() camera = visii.entity.create(name="camera", transform=visii.transform.create("camera"), camera=visii.camera.create( name="camera", aspect=float(opt.width) / float(opt.height))) camera.get_transform().look_at( at=(0, 0.1, 0.1), up=(0, 0, 1), eye=(0, 3.0, 0.2), )
import visii import noise import random import numpy as np opt = lambda: None opt.spp = 400 opt.width = 1920 opt.height = 1080 opt.noise = False opt.out = '15_camera_motion_car_blur.png' opt.control = True # # # # # # # # # # # # # # # # # # # # # # # # # visii.initialize() visii.set_dome_light_intensity(.8) visii.resize_window(int(opt.width), int(opt.height)) # # # # # # # # # # # # # # # # # # # # # # # # # # load the textures dome = visii.texture.create_from_file("dome", "content/teatro_massimo_2k.hdr") # we can add HDR images to act as dome visii.set_dome_light_texture(dome) visii.set_dome_light_rotation( visii.angleAxis(visii.pi() * .5, visii.vec3(0, 0, 1))) car_speed = 0 car_speed_x = car_speed car_speed_y = -2 * car_speed
opt = lambda: None opt.spp = 512 opt.width = 500 opt.height = 500 opt.outf = "09_metadata" # # # # # # # # # # # # # # # # # # # # # # # # # if os.path.isdir(opt.outf): print(f'folder {opt.outf}/ exists') else: os.mkdir(opt.outf) print(f'created folder {opt.outf}/') # # # # # # # # # # # # # # # # # # # # # # # # # visii.initialize(headless=False, verbose=True, lazy_updates=True) visii.enable_denoiser() camera = visii.entity.create(name="camera", transform=visii.transform.create("camera"), camera=visii.camera.create( name="camera", aspect=float(opt.width) / float(opt.height))) camera.get_transform().look_at(at=(0, 0, 0), up=(0, 0, 1), eye=(0, 1, 1)) visii.set_camera_entity(camera) # # # # # # # # # # # # # # # # # # # # # # # # # # Create a scene to use for exporting segmentations
opt.nb_objs = 10000 opt.spp = 16 opt.width = 1920 opt.height = 1080 opt.out = '02_random_scene.png' # visii uses sets of components to represent a scene. # We can increase the max component limit here if necessary. # In this case, we'll need 16 meshes, a material for each object, # and finally a transform for each object as well as one more for the camera. visii.initialize( headless=True, verbose=True, lazy_updates=True, max_entities=opt.nb_objs + 1, max_transforms=opt.nb_objs + 1, max_materials=opt.nb_objs, max_meshes=16 # these are also available # max_lights, max_textures, & max_cameras ) # Turn on the denoiser visii.enable_denoiser() # Create a camera camera = visii.entity.create(name="camera", transform=visii.transform.create("camera"), camera=visii.camera.create( name="camera", aspect=float(opt.width) / float(opt.height)))
def f(frame_ids): # headless - no window # verbose - output number of frames rendered, etc.. visii.initialize(headless=True, verbose=False) # Use a neural network to denoise ray traced visii.enable_denoiser() # set up dome background negatives = list(glob.glob("negatives/*.jpg")) visii.set_dome_light_intensity(1) # create an entity that will serve as our camera. camera = visii.entity.create(name="camera") # To place the camera into our scene, we'll add a "transform" component. # (All visii objects have a "name" that can be used for easy lookup later.) camera.set_transform(visii.transform.create(name="camera_transform")) # To make our camera entity act like a "camera", we'll add a camera component camera.set_camera( visii.camera.create_from_fov( name="camera_camera", field_of_view=1.4, # note, this is in radians aspect=opt.width / float(opt.height))) # Finally, we'll select this entity to be the current camera entity. # (visii can only use one camera at the time) visii.set_camera_entity(camera) # lets store the camera look at information so we can export it camera_struct_look_at = { 'at': [0, 0, 0], 'up': [0, 0, 1], 'eye': [-1, 0, 0] } # Lets set the camera to look at an object. # We'll do this by editing the transform component. camera.get_transform().look_at(at=camera_struct_look_at['at'], up=camera_struct_look_at['up'], eye=camera_struct_look_at['eye']) # This function loads a mesh ignoring .mtl mesh = visii.mesh.create_from_file(opt.entity, opt.model) # creates visii entity using loaded mesh obj_entity = visii.entity.create( name=opt.entity + "_entity", mesh=mesh, transform=visii.transform.create(opt.entity + "_entity"), material=visii.material.create(opt.entity + "_entity"), ) # obj_entity.get_light().set_intensity(0.05) # you can also set the light color manually # obj_entity.get_light().set_color((1,0,0)) # Add texture to the material material = visii.material.get(opt.entity + "_entity") texture = visii.texture.create_from_file(opt.entity, "./models/Cutie.PNG") material.set_base_color_texture(texture) # Lets add the cuboid to the object we want to export add_cuboid(opt.entity + "_entity", opt.debug) # lets keep track of the entities we want to export entities_to_export = [opt.entity + "_entity"] # Loop where we change and render each frame for i in tqdm(frame_ids): # load a random negtive onto the dome negative = cv2.imread(random.choice(negatives)) # Skip dark backgrounds (20/255) if np.mean(negative) < 20: continue # Fix lighting of background and make it small within the FOV background = make_background(negative) cv2.imwrite("test" + str(i) + ".png", background) dome = visii.texture.create_from_file("dome", "test" + str(i) + ".png") visii.set_dome_light_texture(dome) visii.set_dome_light_rotation( visii.angleAxis(visii.pi() * .5, visii.vec3(0, 0, 1))) stretch_factor = 2 scale = [ random.uniform(1, stretch_factor), # width random.uniform(1, stretch_factor), # length random.uniform(1, stretch_factor) # height ] obj_entity.get_transform().set_scale(scale) # create random rotation while making usre the entity is facing forward in each frame rot = [ random.uniform(-10, 10), # Roll random.uniform(-15, 15), # Pitch random.uniform(-45, 45) # Yaw ] q = Quaternion.from_euler(rot[0], rot[1], rot[2], degrees=True) position = [ random.uniform(0, 4), # X Depth random.uniform(-1, 1), # Y random.uniform(-1, 1) # Z ] # Scale the position based on depth into image to make sure it remains in frame position[1] *= position[0] position[2] *= position[0] obj_entity.get_transform().set_position(tuple(position)) obj_entity.get_transform().set_rotation((q.x, q.y, q.z, q.w)) # use random to make 95 % probability the frame data goes into training and # 5% chance it goes in test folder folder = '' if random.randint(0, 100) < opt.test_percent: folder = opt.entity + '_test/' else: folder = opt.entity + '_training/' # Render the scene visii.render_to_file(width=opt.width, height=opt.height, samples_per_pixel=opt.spp, file_path=opt.out + folder + opt.entity + str(i) + '.png') # set up JSON export_to_ndds_file(filename=opt.out + folder + opt.entity + str(i) + '.json', obj_names=entities_to_export, width=opt.width, height=opt.height, camera_struct=camera_struct_look_at) # remove current negative from the dome visii.clear_dome_light_texture() visii.texture.remove("dome") os.remove("test" + str(i) + ".png") visii.deinitialize()