def __renderInputs(self, materials, renderingScene, jitterLightPos, jitterViewPos, mixMaterials): fullSizeMixedMaterial = materials if mixMaterials: alpha = tf.random_uniform([1], minval=0.1, maxval=0.9, dtype=tf.float32, name="mixAlpha") materials1 = materials[::2] materials2 = materials[1::2] fullSizeMixedMaterial = helpers.mixMaterials(materials1, materials2, alpha) if self.inputImageSize >= self.tileSize : if self.fixCrop: xyCropping = (self.inputImageSize - self.tileSize) // 2 xyCropping = [xyCropping, xyCropping] else: xyCropping = tf.random_uniform([2], 0, self.inputImageSize - self.tileSize, dtype=tf.int32) cropped_mixedMaterial = fullSizeMixedMaterial[:,:, xyCropping[0] : xyCropping[0] + self.tileSize, xyCropping[1] : xyCropping[1] + self.tileSize, :] elif self.inputImageSize < self.tileSize: raise Exception("Size of the input is inferior to the size of the rendering, please provide higher resolution maps") cropped_mixedMaterial.set_shape([None, self.nbTargetsToRead, self.tileSize, self.tileSize, 3]) mixedMaterial = helpers.adaptRougness(cropped_mixedMaterial) targetstoRender = helpers.target_reshape(mixedMaterial) #reshape it to be compatible with the rendering algorithm [?, size, size, 12] nbRenderings = 1 rendererInstance = renderer.GGXRenderer(includeDiffuse = True) ## Do renderings of the mixedMaterial targetstoRender = helpers.preprocess(targetstoRender) #Put targets to -1; 1 surfaceArray = helpers.generateSurfaceArray(self.tileSize) inputs = helpers.generateInputRenderings(rendererInstance, targetstoRender, self.batchSize, nbRenderings, surfaceArray, renderingScene, jitterLightPos, jitterViewPos, self.useAmbientLight, useAugmentationInRenderings = self.useAugmentationInRenderings) self.gammaCorrectedInputsBatch = tf.squeeze(inputs, [1]) inputs = tf.pow(inputs, 2.2) # correct gamma if self.logInput: inputs = helpers.logTensor(inputs) inputs = helpers.preprocess(inputs) #Put inputs to -1; 1 targets = helpers.target_deshape(targetstoRender, self.nbTargetsToRead) return targets, inputs
def populateFeedGraph(self, shuffle=False): with tf.name_scope("load_images"): #Create a tensor out of the list of paths filenamesTensor = tf.constant(self.pathList) #Reads a slice of the tensor, for example, if the tensor is of shape [100,2], the slice shape should be [2] (to check if we have problem here) dataset = tf.data.Dataset.from_tensor_slices(filenamesTensor) #for each slice apply the __readImages function dataset = dataset.map(self.__readImages, num_parallel_calls=int( multiprocessing.cpu_count() / 4)) #Authorize repetition of the dataset when one epoch is over. dataset = dataset.repeat() if shuffle: dataset = dataset.shuffle(buffer_size=256, reshuffle_each_iteration=True) #set batch size batched_dataset = dataset.batch(self.batchSize) batched_dataset = batched_dataset.prefetch(buffer_size=4) #Create an iterator to be initialized iterator = batched_dataset.make_initializable_iterator() #Create the node to retrieve next batch paths_batch, inputs_batch, targets_batch, gammadInputBatch = iterator.get_next( ) self.gammaCorrectedInputsBatch = gammadInputBatch reshaped_targets = helpers.target_reshape(targets_batch) randomTopLeftCrop = tf.zeros( [self.batchSize, self.inputNumbers, 2], dtype=tf.int32) inputRealSize = self.inputImageSize targets_batch = helpers.target_deshape(reshaped_targets, self.nbTargetsToRead) #Do the random crop, if the crop is fix, crop in the middle if inputRealSize > self.cropSize: if self.fixCrop: xyCropping = (inputRealSize - self.cropSize) // 2 xyCropping = [xyCropping, xyCropping] else: xyCropping = tf.random_uniform([1], 0, inputRealSize - self.cropSize, dtype=tf.int32) inputs_batch = inputs_batch[:, :, xyCropping[0]:xyCropping[0] + self.cropSize, xyCropping[0]:xyCropping[0] + self.cropSize, :] targets_batch = targets_batch[:, :, xyCropping[0]:xyCropping[0] + self.cropSize, xyCropping[0]:xyCropping[0] + self.cropSize, :] #Figure out how many inputs should be read and if it should be a random amount if self.fixImageNb and self.maxInputToRead > 0: nbInputToUse = [self.maxInputToRead] else: nbInputToUse = tf.random_uniform([1], minval=1, maxval=(self.maxInputToRead + 1), dtype=tf.int32) inputs_batch = inputs_batch[:, :nbInputToUse[0]] #Set shapes inputs_batch.set_shape( [None, None, self.cropSize, self.cropSize, 3]) targets_batch.set_shape( [None, self.nbTargetsToRead, self.cropSize, self.cropSize, 3]) #Populate the object self.stepsPerEpoch = int( math.floor(len(self.pathList) / self.batchSize)) self.inputBatch = inputs_batch self.targetBatch = targets_batch self.iterator = iterator self.pathBatch = paths_batch
def __renderInputs(self, materials, renderingScene, jitterLightPos, jitterViewPos, mixMaterials, isTest, renderSize): mixedMaterial = materials if mixMaterials: alpha = tf.random_uniform([1], minval=0.1, maxval=0.9, dtype=tf.float32, name="mixAlpha") #print("mat2: " + str(materials2)) materials1 = materials[::2] materials2 = materials[1::2] mixedMaterial = helpers.mixMaterials(materials1, materials2, alpha) mixedMaterial.set_shape( [None, self.nbTargetsToRead, renderSize, renderSize, 3]) mixedMaterial = helpers.adaptRougness(mixedMaterial) #These 3 lines below tries to scale the albedos to get more variety and to randomly flatten the normals to disambiguate the normals and albedos. We did not see strong effect for these. #if not isTest and self.useAugmentationInRenderings: # mixedMaterial = helpers.adaptAlbedos(mixedMaterial, self.batchSize) # mixedMaterial = helpers.adaptNormals(mixedMaterial, self.batchSize) reshaped_targets_batch = helpers.target_reshape( mixedMaterial ) #reshape it to be compatible with the rendering algorithm [?, size, size, 12] nbRenderings = self.maxInputToRead if not self.fixImageNb: #If we don't want a constant number of input images, we randomly select a number of input images between 1 and the maximum number of images defined by the user. nbRenderings = tf.random_uniform([1], 1, self.maxInputToRead + 1, dtype=tf.int32)[0] rendererInstance = renderer.GGXRenderer(includeDiffuse=True) ## Do renderings of the mixedMaterial targetstoRender = reshaped_targets_batch pixelsToAdd = 0 targetstoRender = helpers.preprocess( targetstoRender) #Put targets to -1; 1 surfaceArray = helpers.generateSurfaceArray( renderSize, pixelsToAdd ) #Generate a grid Y,X between -1;1 to act as the pixel support of the rendering (computer the direction vector between each pixel and the light/view) #Do the renderings inputs = helpers.generateInputRenderings( rendererInstance, targetstoRender, self.batchSize, nbRenderings, surfaceArray, renderingScene, jitterLightPos, jitterViewPos, self.useAmbientLight, useAugmentationInRenderings=self.useAugmentationInRenderings) #inputs = [helpers.preprocess(input) for input in inputs] randomTopLeftCrop = tf.zeros([self.batchSize, nbRenderings, 2], dtype=tf.int32) averageCrop = 0.0 #If we want to jitter the renderings around (to try to take into account small non alignment), we should handle the material crop a bit differently #We didn't really manage to get satisfying results with the jittering of renderings. But the code could be useful if this is of interest to Ansys. if self.jitterRenderings: randomTopLeftCrop = tf.random_normal( [self.batchSize, nbRenderings, 2], 0.0, 1.0) #renderSize - self.cropSize, dtype=tf.int32) randomTopLeftCrop = randomTopLeftCrop * tf.exp( tf.random_normal( [self.batchSize], 0.0, 1.0)) #renderSize - self.cropSize, dtype=tf.int32) randomTopLeftCrop = randomTopLeftCrop - tf.reduce_mean( randomTopLeftCrop, axis=1, keep_dims=True) randomTopLeftCrop = tf.round(randomTopLeftCrop) randomTopLeftCrop = tf.cast(randomTopLeftCrop, dtype=tf.int32) averageCrop = tf.cast(self.maxJitteringPixels * 0.5, dtype=tf.int32) randomTopLeftCrop = randomTopLeftCrop + averageCrop randomTopLeftCrop = tf.clip_by_value(randomTopLeftCrop, 0, self.maxJitteringPixels) totalCropSize = self.cropSize inputs, targets = helpers.cutSidesOut(inputs, targetstoRender, randomTopLeftCrop, totalCropSize, self.firstAsGuide, averageCrop) print("inputs shape after" + str(inputs.get_shape())) self.gammaCorrectedInputsBatch = inputs tf.summary.image("GammadInputs", helpers.convert(inputs[0, :]), max_outputs=5) inputs = tf.pow(inputs, 2.2) # correct gamma if self.logInput: inputs = helpers.logTensor(inputs) inputs = helpers.preprocess(inputs) targets = helpers.target_deshape(targets, self.nbTargetsToRead) return targets, inputs
def populateInNetworkFeedGraphSpatialMix(self, renderingScene, shuffle=True, imageSize=512, useSpatialMix=True): with tf.name_scope("load_images"): #Create a tensor out of the list of paths filenamesTensor = tf.constant(self.pathList) #Reads a slice of the tensor, for example, if the tensor is of shape [100,2], the slice shape should be [2] (to check if we have problem here) dataset = tf.data.Dataset.from_tensor_slices(filenamesTensor) #for each slice apply the __readImages function dataset = dataset.map(self.__readImagesGT, num_parallel_calls=int( multiprocessing.cpu_count() / 4)) #Authorize repetition of the dataset when one epoch is over. #shuffle = True if shuffle: dataset = dataset.shuffle(buffer_size=16, reshuffle_each_iteration=True) #set batch size dataset = dataset.repeat() toPull = self.batchSize if useSpatialMix: toPull = self.batchSize * 2 batched_dataset = dataset.batch(toPull) batched_dataset = batched_dataset.prefetch(buffer_size=4) #Create an iterator to be initialized iterator = batched_dataset.make_initializable_iterator() #Create the node to retrieve next batch paths_batch, targets_batch = iterator.get_next() inputRealSize = imageSize #Should be input image size but changed tmp if useSpatialMix: threshold = 0.5 perlinNoise = tf.expand_dims(tf.expand_dims( helpers.generate_perlin_noise_2d( (inputRealSize, inputRealSize), (1, 1)), axis=-1), axis=0) perlinNoise = (perlinNoise + 1.0) * 0.5 perlinNoise = perlinNoise >= threshold perlinNoise = tf.cast(perlinNoise, tf.float32) inverted = 1.0 - perlinNoise materialsMixed1 = targets_batch[::2] * perlinNoise materialsMixed2 = targets_batch[1::2] * inverted fullSizeMixedMaterial = materialsMixed1 + materialsMixed2 targets_batch = fullSizeMixedMaterial paths_batch = paths_batch[::2] targetstoRender = helpers.target_reshape( targets_batch ) #reshape it to be compatible with the rendering algorithm [?, size, size, 12] nbRenderings = 1 rendererInstance = renderer.GGXRenderer(includeDiffuse=True) ## Do renderings of the mixedMaterial mixedMaterial = helpers.adaptRougness(targetstoRender) targetstoRender = helpers.preprocess( targetstoRender) #Put targets to -1; 1 surfaceArray = helpers.generateSurfaceArray(inputRealSize) inputs_batch = helpers.generateInputRenderings( rendererInstance, targetstoRender, self.batchSize, nbRenderings, surfaceArray, renderingScene, False, False, self.useAmbientLight, useAugmentationInRenderings=self.useAugmentationInRenderings) targets_batch = helpers.target_deshape(targetstoRender, self.nbTargetsToRead) self.gammaCorrectedInputsBatch = tf.squeeze(inputs_batch, [1]) #tf.summary.image("GammadInputs", helpers.convert(inputs[0, :]), max_outputs=5) inputs_batch = tf.pow(inputs_batch, 2.2) # correct gamma if self.logInput: inputs_batch = helpers.logTensor(inputs_batch) #Do the random crop, if the crop if fix, crop in the middle if inputRealSize > self.tileSize: if self.fixCrop: xyCropping = (inputRealSize - self.tileSize) // 2 xyCropping = [xyCropping, xyCropping] else: xyCropping = tf.random_uniform([1], 0, inputRealSize - self.tileSize, dtype=tf.int32) inputs_batch = inputs_batch[:, :, xyCropping[0]:xyCropping[0] + self.tileSize, xyCropping[0]:xyCropping[0] + self.tileSize, :] targets_batch = targets_batch[:, :, xyCropping[0]:xyCropping[0] + self.tileSize, xyCropping[0]:xyCropping[0] + self.tileSize, :] #Set shapes inputs_batch = tf.squeeze( inputs_batch, [1] ) #Before this the input has a useless dimension in 1 as we have only 1 rendering inputs_batch.set_shape([None, self.tileSize, self.tileSize, 3]) targets_batch.set_shape( [None, self.nbTargetsToRead, self.tileSize, self.tileSize, 3]) #Populate the object self.stepsPerEpoch = int( math.floor(len(self.pathList) / self.batchSize)) self.inputBatch = inputs_batch self.targetBatch = targets_batch self.iterator = iterator self.pathBatch = paths_batch