Beispiel #1
0
	def write_images(self, image_batches, file_masks, base_path=None, frame_id=None, use_batch_id=False, format='EXR', y_flip=True, gamma=1.0):
		formats = {'EXR':'.exr', 'PNG':'.png'}
		os.makedirs(base_path, exist_ok=True)
		for image_batch, file_mask in zip(image_batches, file_masks):
			i=0
			for image in image_batch:
				image_shape = shape_list(image)
				if y_flip:# y-flip
					image = tf.reverse(image, axis=[-3])
				if image_shape[-1]==2:
					image = tf.pad(image, [[0,0] for _ in range(len(image_shape)-1)] + [[0,1]])
					image_shape = shape_list(image)
				if frame_id is not None and use_batch_id:
					file_name = file_mask.format(i, frame_id)
				elif frame_id is not None and not use_batch_id:
					file_name = file_mask.format(frame_id)
				elif frame_id is None and use_batch_id:
					file_name = file_mask.format(i)
				else:
					file_name = file_mask
				file_name += formats[format]
				path = os.path.join(base_path, file_name) if base_path is not None else file_name
			#projected_grads = np.flip(projected_grads, axis=0)
				if gamma!=1.0:
					image = gammaCorrection(image, gamma)
				if format=='EXR':
					try:
						imageio.imwrite(path, image, 'EXR-FI')
					except KeyboardInterrupt:
						raise
					except:
						self.log.exception("Failed to write exr image with shape %s to '%s':", image.get_shape().as_list(), path)
						return
				elif format=='PNG':
					image = (np.clip(image, 0.0, 1.0)*255.0).astype(np.uint8)
					try:
						imageio.imwrite(path, image)
					except KeyboardInterrupt:
						raise
					except:
						self.log.exception("Failed to write png image with shape %s to '%s':", image.get_shape().as_list(), path)
						return
				else:
					raise ValueError('format not supported')
				i+=1
Beispiel #2
0
    def _render_shadow_map(self, density_transform, renderer):
        if self.cast_shadows:
            #print(density_transform.data.get_shape(), tf.reduce_mean(density_transform.data))
            shadow_density = renderer.sample_camera(density_transform.data,
                                                    density_transform,
                                                    self.shadow_cam,
                                                    inverse=False,
                                                    use_step_channel=[0])
            #print(shadow_density.get_shape(), tf.reduce_mean(shadow_density))
            shadow_density = renderer.blending.reduce_grid_blend(
                shadow_density, renderer.blend_mode, keep_dims=True)
            #shadow_density = renderer._blend_grid(shadow_density, renderer.blend_mode, keep_dims=True)
            #shift by one cell into depth to avoid cell-self-shadowing (?)
            if renderer.boundary_mode == 'BORDER':
                shadow_shape = shape_list(shadow_density)
                shadow_shape[-4] = 1
                pad = tf.zeros(shadow_shape, dtype=tf.float32)
            elif renderer.boundary_mode == 'CLAMP':
                pad = shadow_density[..., :1, :, :, :]
            elif renderer.boundary_mode == 'WRAP':
                pad = shadow_density[..., -1:, :, :, :]
            else:
                raise ValueError("Unknow boundary_mode %s" %
                                 renderer.boundary_mode)
            shadow_density = tf.concat(
                [pad, shadow_density[..., :-1, :, :, :]], axis=-4)

            if renderer.blend_mode == 'BEER_LAMBERT':
                #shadow_density = tf.math.cumsum(shadow_density, axis=-4, exclusive=True) #+ remove cell shift
                transmission = tf.exp(-shadow_density)
            elif renderer.blend_mode == 'ALPHA':
                #shadow_density = tf.math.cumprod(shadow_density, axis=-4, exclusive=True) #? TODO
                transmission = (1 - tf.clip_by_value(shadow_density, 0, 1))
            else:
                raise ValueError('Unknown blend_mode \'{}\''.format(
                    renderer.blend_mode))
        else:
            transmission = tf.ones([1] + list(self.shadow_resolution) + [1],
                                   dtype=tf.float32)
        if self.cone_mask:
            transmission *= self._get_shadow_mask()

        transmission = tf.squeeze(
            renderer.sample_camera(transmission,
                                   density_transform,
                                   self.shadow_cam,
                                   inverse=True,
                                   use_step_channel=None), 0)
        #print(transmission.get_shape(), tf.reduce_mean(transmission))
        return transmission
Beispiel #3
0
	def render_density(self, density_transform, light_list, camera_list, cut_alpha=True, background=None, monochrome=False, split_cameras=False, custom_ops=None, tonemapping="NONE"):
		
		with self.profiler.sample('Render'):
			## apply lighting to grid
			if custom_ops is not None and "DENSITY" in custom_ops:
				t_density = density_transform.data
				density_transform.set_data(self._apply_custom_ops(t_density, custom_ops, "DENSITY"))
			light_data = self._build_light_grid(density_transform, light_list, monochrome)
			self.log.debug('light shape: %s', tf.shape(light_data))
			data = tf.concat([light_data, density_transform.data], axis=-1)
			del light_data
			## resample to frustum grid
			cam_images = [None]*len(camera_list)
			self.log.debug('render cameras: %d', len(camera_list))
			i=0
			with self.profiler.sample('Render Cameras'):
				with self.profiler.sample('Sort'):
					non_static_cams, static_cams = self._sort_cameras(camera_list)
				cameras = non_static_cams + static_cams
				for cam_size, cams in cameras:
					with self.profiler.sample('Size {} x{}'.format(cam_size, len(cams))):
						if split_cameras:
							images = []
							for cam in cams:
								images.append(self._render_cameras(data, density_transform, [cam], cam_size, custom_ops=custom_ops))
							images = tf.concat(images, axis=0)
						else:
							images = self._render_cameras(data, density_transform, cams, cam_size, custom_ops=custom_ops)
							
						t = tf.exp(-images[...,-1:])
						if cut_alpha:
							images = images[...,:-1]
						img_shape = shape_list(images)
						if background is not None:
							cam_batch_bkgs = tf.stack([tf.broadcast_to(background[camera_list.index(cam)], img_shape[1:]) for cam in cams])
							images += cam_batch_bkgs * t
						with self.profiler.sample('Tonemapping (%s)'%tonemapping):
							images = self._tonemap(images, mode=tonemapping)
						#reorder rendered images to match order of input cameras
						images = tf.split(images, len(cams))
						for img, cam in zip(images, cams):
							if cam.scissor_pad is not None:
								img = tf.pad(img, [(0,0)] + list(cam.scissor_pad))
							cam_images[camera_list.index(cam)] = img
						del images
					i+=1
			if custom_ops is not None and "DENSITY" in custom_ops:
				density_transform.set_data(t_density)
			return cam_images
Beispiel #4
0
	def resample_grid3D_offset(self, data, offsets, target_shape):
		if not isinstance(offsets, tf.Tensor):
			offsets = tf.constant(offsets, dtype=tf.float32)
		offsets_shape = shape_list(offsets)
		if len(offsets_shape)!=2 or offsets_shape[1]!=3:
			raise ValueError("Shape of offsets must be (N,3), is %s"%offsets_shape)
		if len(target_shape)!=3:
			raise ValueError("target_shape must be (3,), is %s"%target_shape)
		data_shape = GridShape.from_tensor(data)
		
		offsets = tf.pad(offsets, ((0,0),(0,1))) #pad to (N,4)
		offsets = tf.reshape(offsets, [offsets_shape[0],1,1,1,4])
		offsets = tf.broadcast_to(offsets, [offsets_shape[0]]+target_shape+[4])
		
		return self._sample_LuT(data, offsets, relative=True)
Beispiel #5
0
	def check_LoD(self, grid_transform, camera, check_inverse=True, name=None):
		_LuT_LoD = self.get_camera_LuT(grid_transform, camera)
		shape_list(_LuT_LoD)
		axes = [_ for _ in range(len(shape_list(_LuT_LoD))-1)]
		LoD_min = tf.reduce_min(_LuT_LoD, axis=axes)[-1].numpy()
		LoD_max = tf.reduce_max(_LuT_LoD, axis=axes)[-1].numpy()
		del _LuT_LoD
		if check_inverse:
			_LuT_LoD = self.get_camera_LuT(grid_transform, camera, inverse=True)
			shape_list(_LuT_LoD)
			axes = [_ for _ in range(len(shape_list(_LuT_LoD))-1)]
			LoD_grad_min = tf.reduce_min(_LuT_LoD, axis=axes)[-1].numpy()
			LoD_grad_max = tf.reduce_max(_LuT_LoD, axis=axes)[-1].numpy()
			del _LuT_LoD
			if name is not None:
				self.log.info("%s stats: shape: %s (%.2f Mi), step: %f, LoD: %f - %f (grad: %f - %f)", name, camera.transform.grid_size, np.prod(camera.transform.grid_size)/(1024*1024), camera.depth_step, LoD_min, LoD_max, LoD_grad_min, LoD_grad_max)
			stats = {"shape":camera.transform.grid_size, "step":camera.depth_step, "LoD_min":LoD_min, "LoD_max":LoD_max, "LoD_grad_min":LoD_grad_min, "LoD_grad_max":LoD_grad_max}
		else:
			if name is not None:
				self.log.info("%s stats: shape: %s (%.2f Mi), step: %f, LoD: %f - %f", name, camera.transform.grid_size, np.prod(camera.transform.grid_size)/(1024*1024), camera.depth_step, LoD_min, LoD_max)
			stats = {"shape":camera.transform.grid_size, "step":camera.depth_step, "LoD_min":LoD_min, "LoD_max":LoD_max}
		return stats
Beispiel #6
0
	def sample_camera(self, data, transformations, cameras, inverse=False, allow_static=True, force_static=False, use_step_channel=None):
		#check data
		data_shape = tf.shape(data).numpy()
		if not len(data_shape)==5: raise ValueError('data must be 5D (NDHWC)')
		if not isinstance(transformations, Iterable):
			transformations =[transformations]
			if data_shape[0]!=1: raise ValueError('transformation and data batch size mismatch.')
			no_batch = True
		else:
			if len(transformations)!=data_shape[0]: raise ValueError('transformation and data batch size mismatch.')
			no_batch = False
		
		# check cameras
		if not isinstance(cameras, Iterable): cameras = [cameras] #compat
		if len(cameras)>1 and not all((cam.transform.grid_size==cameras[0].transform.grid_size for cam in cameras[1:])):
			raise ValueError('all cameras must have the same resolution (DHW). (use Renderer._sort_cameras() for batching.)')
		cam_size = cameras[0].transform.grid_size
		#check static rendering (precomputed LuT)
		sample_lut = False
		if allow_static and any((cam.static is not None for cam in cameras)):
			if not all((cam.static==transformations[0] for cam in cameras)):
				if force_static: raise ValueError('Camera static setup does not match transformation')
				else: self.log.warning('Incorrect static camera setup, falling back to transform rendering for static cameras.')
			else:
				if not no_batch:
					if force_static: raise ValueError('Static cameras only work without data batch.')
					else: self.log.warning('Incorrect static camera setup, falling back to transform rendering for static cameras.')
				else: sample_lut=True
		
		apply_step_channel = False
		if use_step_channel is not None and use_step_channel!=[]:
			if np.isscalar(use_step_channel):
				data = data * use_step_channel
			else:
				step_channel = [_%data_shape[-1] for _ in use_step_channel if ((-data_shape[-1]) <= _ and _ < data_shape[-1])]
				step_channel = sorted(step_channel)
				depth_steps = [cam.depth_step for cam in cameras]
				#if: same for every camera, can multiply before sampling (?). should be fine with lerp and grid before is usually smaller
				if np.all([step==depth_steps[0] for step in depth_steps]):
				#	if: every channel is included -> premultiply with scalar
					if step_channel==list(range(data_shape[-1])):
						data = data * depth_steps[0]
				#	else -> premultiply with channel vector
					else:
						data = data * tf.constant([(depth_steps[0] if _ in step_channel else 1) for _ in range(data_shape[-1])], dtype=tf.float32)
				#else -> multiply after sampling
				else:
					apply_step_channel = True
		
		if sample_lut:
			sampled = self._sample_camera_LuT(data, transformations[0], cameras, inverse)
		else:
			sampled = self._sample_camera_transform(data, transformations, cameras, inverse) #NVDHWC
		
		
		# depth-step size correction
		if apply_step_channel:
			shape = shape_list(sampled)
			step = tf.constant([[(depth_step if _ in step_channel else no_step) for _ in range(shape[-1])] for depth_step in depth_steps], dtype=tf.float32) #VC
			step = tf.reshape(step, (1,shape[-5],1,1,1,shape[-1])) #NVDHWC
			shape[-1]=1
			shape[-5]=1
			step = tf.tile(step, shape)
			sampled = sampled * tf.stop_gradient(step)
		
		
		return tf.squeeze(sampled, 0) if no_batch else sampled