def setArray(self, dimension, values, floatEncoding='string'): dimensions = dimension dimensions.append(3) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) self._array = Array(dimensions, values, integers, floatEncoding=floatEncoding) self.addElement(self._array)
def setArray(self, dimension, values, floatEncoding='string'): dimensions = [len(values) / dimension, dimension] integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) rawHalfs = not (self.getAttribute('rawHalfs') in [None, False]) self._array = Array(dimensions, values, rawHalfs=rawHalfs, integers=integers, floatEncoding=floatEncoding) self.addElement(self._array)
def setMatrix(self, dimensions, values, floatEncoding='string'): integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) values = Array(dimensions, values, integers, floatEncoding=floatEncoding) self._array = values self.addElement(values)
def readChild(self, element): child = None if element.tag == 'Array': child = Array() child.read(element) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) child.setValuesAreIntegers(integers) self._array = child elif element.tag == 'IndexMap': child = IndexMap() child.read(element) self._indexMaps.append(child) return child
def _createCachedProcess(self): channels = 1 resolution = 65536 cacheValues = [0.0] * resolution for i in range(resolution): # Figure out which half value corresponds to the specific 16 bit integer value sample = uint16ToHalf(i) # Apply the function to the sample value # Should take the channel as input, or process an RGB triple fvalue = self._processRaw(sample) # Store the values for c in range(channels): cacheIndex = i * channels + c cacheValues[cacheIndex] = fvalue #print( "%d, %d, %d: %f -> %f" % (i, c, lutValueIndex, sample, fvalue)) dimensions = [len(cacheValues), channels] self._processCached = Array(dimensions, cacheValues)
def readChild(self, element): child = None if element.tag == 'Array': child = Array() child.read(element) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) child.setValuesAreIntegers(integers) self._array = child return child
class LUT3D(ProcessNode): "A Common LUT Format LUT 3D ProcessNode element" def __init__(self, inBitDepth=bitDepths["FLOAT16"], outBitDepth=bitDepths["FLOAT16"], id="", name="", interpolation='trilinear'): "%s - Initialize the standard class variables" % 'LUT3D' ProcessNode.__init__(self, 'LUT3D', inBitDepth, outBitDepth, id, name) if interpolation != '': self._attributes['interpolation'] = interpolation self._array = None self._indexMaps = [] # __init__ def setIndexMaps(self, valuesR, valuesG=None, valuesB=None): indexMapR = IndexMap(len(valuesR[0]), valuesR) self._indexMaps.append(indexMapR) self.addElement(indexMapR) # Either one or three indexMaps if (valuesG != None and valuesB != None): indexMapG = IndexMap(len(valuesG[0]), valuesG) self._indexMaps.append(indexMapG) self.addElement(indexMapG) indexMapB = IndexMap(len(valuesB[0]), valuesB) self._indexMaps.append(indexMapB) self.addElement(indexMapB) # setIndexMaps def setArray(self, dimension, values, floatEncoding='string'): dimensions = dimension dimensions.append(3) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) self._array = Array(dimensions, values, integers, floatEncoding=floatEncoding) self.addElement(self._array) # setArray def getLUTDimensions(self): return self._array.getDimensions() def getLUTValues(self): return self._array.getValues() def getIndexMapDimensions(self, channel): return self._indexMaps[channel].getDimensions() def getIndexMapValues(self, channel): return self._indexMaps[channel].getValues() def readChild(self, element): child = None if element.tag == 'Array': child = Array() child.read(element) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) child.setValuesAreIntegers(integers) self._array = child elif element.tag == 'IndexMap': child = IndexMap() child.read(element) self._indexMaps.append(child) return child # readChild def process(self, values, stride=0, verbose=False): # Base attributes inBitDepth = self._attributes['inBitDepth'] outBitDepth = self._attributes['outBitDepth'] # Node attributes interpolation = '' if 'interpolation' in self._attributes: interpolation = self._attributes['interpolation'] ''' print( "interpolation : %s" % interpolation ) ''' # Get LUT dimensions dimensions = self.getLUTDimensions() # Handle processing of single values if stride == 0: stride = len(values) # Initialize the output value outValues = np.zeros(len(values), dtype=np.float32) for p in range(int(len(values) / stride)): value = values[p * stride:(p + 1) * stride] outValue = values[p * stride:(p + 1) * stride] # Run each channel through the index map, or base normalization for i in range(min(3, len(value))): # Run through single Index Map then normalize if len(self._indexMaps) > 1: outValue[i] = self._indexMaps[i].process(outValue[i]) outValue[i] /= float(dimensions[i] - 1) # Run through per-channel Index Map then normalize elif len(self._indexMaps) > 0: outValue[i] = self._indexMaps[0].process(outValue[i]) outValue[i] /= float(dimensions[i] - 1) # Normalize from bit-depth else: # Convert input bit depth outValue[i] = bitDepthToNormalized(outValue[i], inBitDepth) # Run color through LUT # trilinear interpolation if interpolation == 'trilinear': outValue[0:3] = self._array.lookup3DTrilinear(outValue) # tetrahedral interpolation elif interpolation == 'tetrahedral': outValue[0:3] = self._array.lookup3DTetrahedral(outValue) # Bit Depth conversion for output is ignored for LUTs # as LUT values are assumed to target a specific bit depth #for i in range(min(3, len(value))): # outValue[i] = normalizedToBitDepth(outValue[i], outBitDepth) # Copy the extra channels for i in range(min(3, stride), stride): outValue[i] = value[i] # Copy to the output array outValues[p * stride:(p + 1) * stride] = outValue return outValues
class LUT1D(ProcessNode): "A Common LUT Format LUT 1D ProcessNode element" def __init__(self, inBitDepth=bitDepths["FLOAT16"], outBitDepth=bitDepths["FLOAT16"], id="", name="", interpolation='linear', rawHalfs='', halfDomain=''): "%s - Initialize the standard class variables" % 'LUT1D' ProcessNode.__init__(self, 'LUT1D', inBitDepth, outBitDepth, id, name) if interpolation != '': self._attributes['interpolation'] = interpolation if rawHalfs != '': self._attributes['rawHalfs'] = rawHalfs if halfDomain != '': self._attributes['halfDomain'] = halfDomain self._array = None self._indexMaps = [] # __init__ def setIndexMaps(self, valuesR, valuesG=None, valuesB=None): indexMapR = IndexMap(len(valuesR[0]), valuesR) self._indexMaps.append(indexMapR) self.addElement(indexMapR) # Either one or three indexMaps if (valuesG != None and valuesB != None): indexMapG = IndexMap(len(valuesG[0]), valuesG) self._indexMaps.append(indexMapG) self.addElement(indexMapG) indexMapB = IndexMap(len(valuesB[0]), valuesB) self._indexMaps.append(indexMapB) self.addElement(indexMapB) # setIndexMaps def setArray(self, dimension, values, floatEncoding='string'): dimensions = [len(values) / dimension, dimension] integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) rawHalfs = not (self.getAttribute('rawHalfs') in [None, False]) self._array = Array(dimensions, values, rawHalfs=rawHalfs, integers=integers, floatEncoding=floatEncoding) self.addElement(self._array) # setArray def getLUTDimensions(self): return self._array.getDimensions() def getLUTValues(self): return self._array.getValues() def getIndexMapDimensions(self, channel): return self._indexMaps[channel].getDimensions() def getIndexMapValues(self, channel): return self._indexMaps[channel].getValues() def readChild(self, element): child = None if element.tag == 'Array': rawHalfs = not (self.getAttribute('rawHalfs') in [None, False]) child = Array(rawHalfs=rawHalfs) child.read(element) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) child.setValuesAreIntegers(integers) self._array = child elif element.tag == 'IndexMap': child = IndexMap() child.read(element) self._indexMaps.append(child) return child # readChild def process(self, values, stride=0, verbose=False): # Base attributes inBitDepth = self._attributes['inBitDepth'] outBitDepth = self._attributes['outBitDepth'] # Node attributes interpolation = '' if 'interpolation' in self._attributes: interpolation = self._attributes['interpolation'] rawHalfs = not (self.getAttribute('rawHalfs') in [None, False]) halfDomain = not (self.getAttribute('halfDomain') in [None, False]) ''' print( "interpolation : %s" % interpolation ) print( "raw halfs : %s" % rawHalfs ) print( "halfs domain : %s" % halfDomain ) ''' # Get LUT dimensions dimensions = self.getLUTDimensions() # Handle processing of single values if stride == 0: stride = len(values) # Initialize the output value outValues = np.zeros(len(values), dtype=np.float32) for p in range(int(len(values) / stride)): value = values[p * stride:(p + 1) * stride] outValue = values[p * stride:(p + 1) * stride] for i in range(min(3, stride)): # Run through single Index Map then normalize if len(self._indexMaps) > 1: outValue[i] = self._indexMaps[i].process(outValue[i]) outValue[i] /= float(dimensions[0] - 1) # Run through per-channel Index Map then normalize elif len(self._indexMaps) > 0: outValue[i] = self._indexMaps[0].process(outValue[i]) outValue[i] /= float(dimensions[0] - 1) # Normalize from bit-depth else: # Convert input bit depth outValue[i] = bitDepthToNormalized(outValue[i], inBitDepth) # Run through LUT # Use Cubic interpolation if interpolation == 'cubic': outValue[i] = self._array.lookup1DCubic(outValue[i], i) # Use halfDomain lookup and interpolation elif halfDomain: outValue[i] = self._array.lookup1DHalfDomain( outValue[i], i, interpolate=True) # Linear interpolation is the default #elif interpolation == 'linear': else: outValue[i] = self._array.lookup1DLinear(outValue[i], i) # Bit Depth conversion for output is ignored for LUTs # as LUT values are assumed to target a specific bit depth #outValue[i] = normalizedToBitDepth(outValue[i], outBitDepth) # Copy the extra channels for i in range(min(3, stride), stride): outValue[i] = value[i] # Copy to the output array outValues[p * stride:(p + 1) * stride] = outValue return outValues
class IndexMap: "A Common LUT Format IndexMap element" def __init__(self, dimension=[], values=[], elementType='IndexMap', useCachedProcess=True): "%s - Initialize the standard class variables" % elementType self._dimension = dimension self._values = values self._elementType = elementType self._useCachedProcess = useCachedProcess self._processCached = None if self._useCachedProcess and self._values != [] and self._dimension != []: self._createCachedProcess() # __init__ def setDimension(self, dimension): self._dimension = dimension def getDimension(self): return self._dimension def setValues(self, values): self._values = values if self._useCachedProcess and self._values != [] and self._dimension != []: self._createCachedEval() def getValues(self): return self._values def setuseCachedProcess(self, useCachedProcess): self._useCachedProcess = useCachedProcess def getUseCachedProcessue(self): return self._useCachedProcess # evaluation and caching def _processRaw(self, value, verbose=False): inputValues = self._values[0] outputValues = self._values[1] # NaNs if np.isnan(value): result = value # Infs elif np.isinf(value): result = value # Normal numbers # Below the input range elif value <= inputValues[0]: result = outputValues[0] # Above the input range elif value >= inputValues[-1]: result = outputValues[-1] # Within the input range else: for i in range(len(inputValues)): if value <= inputValues[i + 1]: inputLow = inputValues[i] inputHigh = inputValues[i + 1] interp = (value - inputLow) / (inputHigh - inputLow) outputLow = outputValues[i] outputHigh = outputValues[i + 1] result = interp * (outputHigh - outputLow) + outputLow break return result # process # _createCachedEval def _createCachedProcess(self): channels = 1 resolution = 65536 cacheValues = [0.0] * resolution for i in range(resolution): # Figure out which half value corresponds to the specific 16 bit integer value sample = uint16ToHalf(i) # Apply the function to the sample value # Should take the channel as input, or process an RGB triple fvalue = self._processRaw(sample) # Store the values for c in range(channels): cacheIndex = i * channels + c cacheValues[cacheIndex] = fvalue #print( "%d, %d, %d: %f -> %f" % (i, c, lutValueIndex, sample, fvalue)) dimensions = [len(cacheValues), channels] self._processCached = Array(dimensions, cacheValues) # _createCachedEval # Read / Write def write(self, tree): element = etree.SubElement(tree, self._elementType) element.set('dim', str(self._dimension)) # XXX # Make this pretty at some point element.text = " ".join( list( map(lambda a, b: "%s@%s" % (float(a), int(b)), self._values[0], self._values[1]))) return element # write def read(self, element): # Store attributes for key, value in six.iteritems(element.attrib): if key == 'dim': self._dimension = int(value) self._values = [] self._values.append( list(map(lambda p: float(p.split('@')[0]), element.text.split()))) self._values.append( list(map(lambda p: float(p.split('@')[1]), element.text.split()))) # read # Process values def process(self, value, verbose=False): # Pull results from cache if self._useCachedProcess: if self._processCached == None: self._createCachedProcess() result = self._processCached.lookup1DHalfDomainInterpolated( value, 0) # Evaluate with base IndexMap values else: result = self._processRaw(value, verbose) return result # process def printInfo(self): print("%20s" % "IndexMap") length = len(self._values[0]) print("%20s : %s" % ("Length", length)) #print( "\t\tvalues : %s" % self._values ) print("%20s" % "Values") if length < 15: print("\t\tmap : %s" % " ".join( map(lambda a, b: "%s,%s" % (a, b), self._values[0], self._values[1]))) else: pairs = list( map(lambda a, b: "%6.9f, %6.9f" % (a, b), self._values[0], self._values[1])) for n in (range(3)): print(" " * 30 + pairs[n]) print(" " * 30 + " ... ") for n in (range(length - 3, length)): print(" " * 30 + pairs[n])