class LUT3D(ProcessNode): "A Common LUT Format LUT 3D ProcessNode element" def __init__(self, inBitDepth=bitDepths["FLOAT16"], outBitDepth=bitDepths["FLOAT16"], id="", name="", interpolation='trilinear'): "%s - Initialize the standard class variables" % 'LUT3D' ProcessNode.__init__(self, 'LUT3D', inBitDepth, outBitDepth, id, name) if interpolation != '': self._attributes['interpolation'] = interpolation self._array = None self._indexMaps = [] # __init__ def setIndexMaps(self, valuesR, valuesG=None, valuesB=None): indexMapR = IndexMap(len(valuesR[0]), valuesR) self._indexMaps.append(indexMapR) self.addElement(indexMapR) # Either one or three indexMaps if (valuesG != None and valuesB != None): indexMapG = IndexMap(len(valuesG[0]), valuesG) self._indexMaps.append(indexMapG) self.addElement(indexMapG) indexMapB = IndexMap(len(valuesB[0]), valuesB) self._indexMaps.append(indexMapB) self.addElement(indexMapB) # setIndexMaps def setArray(self, dimension, values, floatEncoding='string'): dimensions = dimension dimensions.append(3) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) self._array = Array(dimensions, values, integers, floatEncoding=floatEncoding) self.addElement(self._array) # setArray def getLUTDimensions(self): return self._array.getDimensions() def getLUTValues(self): return self._array.getValues() def getIndexMapDimensions(self, channel): return self._indexMaps[channel].getDimensions() def getIndexMapValues(self, channel): return self._indexMaps[channel].getValues() def readChild(self, element): child = None if element.tag == 'Array': child = Array() child.read(element) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) child.setValuesAreIntegers(integers) self._array = child elif element.tag == 'IndexMap': child = IndexMap() child.read(element) self._indexMaps.append(child) return child # readChild def process(self, values, stride=0, verbose=False): # Base attributes inBitDepth = self._attributes['inBitDepth'] outBitDepth = self._attributes['outBitDepth'] # Node attributes interpolation = '' if 'interpolation' in self._attributes: interpolation = self._attributes['interpolation'] ''' print( "interpolation : %s" % interpolation ) ''' # Get LUT dimensions dimensions = self.getLUTDimensions() # Handle processing of single values if stride == 0: stride = len(values) # Initialize the output value outValues = np.zeros(len(values), dtype=np.float32) for p in range(int(len(values) / stride)): value = values[p * stride:(p + 1) * stride] outValue = values[p * stride:(p + 1) * stride] # Run each channel through the index map, or base normalization for i in range(min(3, len(value))): # Run through single Index Map then normalize if len(self._indexMaps) > 1: outValue[i] = self._indexMaps[i].process(outValue[i]) outValue[i] /= float(dimensions[i] - 1) # Run through per-channel Index Map then normalize elif len(self._indexMaps) > 0: outValue[i] = self._indexMaps[0].process(outValue[i]) outValue[i] /= float(dimensions[i] - 1) # Normalize from bit-depth else: # Convert input bit depth outValue[i] = bitDepthToNormalized(outValue[i], inBitDepth) # Run color through LUT # trilinear interpolation if interpolation == 'trilinear': outValue[0:3] = self._array.lookup3DTrilinear(outValue) # tetrahedral interpolation elif interpolation == 'tetrahedral': outValue[0:3] = self._array.lookup3DTetrahedral(outValue) # Bit Depth conversion for output is ignored for LUTs # as LUT values are assumed to target a specific bit depth #for i in range(min(3, len(value))): # outValue[i] = normalizedToBitDepth(outValue[i], outBitDepth) # Copy the extra channels for i in range(min(3, stride), stride): outValue[i] = value[i] # Copy to the output array outValues[p * stride:(p + 1) * stride] = outValue return outValues
class LUT1D(ProcessNode): "A Common LUT Format LUT 1D ProcessNode element" def __init__(self, inBitDepth=bitDepths["FLOAT16"], outBitDepth=bitDepths["FLOAT16"], id="", name="", interpolation='linear', rawHalfs='', halfDomain=''): "%s - Initialize the standard class variables" % 'LUT1D' ProcessNode.__init__(self, 'LUT1D', inBitDepth, outBitDepth, id, name) if interpolation != '': self._attributes['interpolation'] = interpolation if rawHalfs != '': self._attributes['rawHalfs'] = rawHalfs if halfDomain != '': self._attributes['halfDomain'] = halfDomain self._array = None self._indexMaps = [] # __init__ def setIndexMaps(self, valuesR, valuesG=None, valuesB=None): indexMapR = IndexMap(len(valuesR[0]), valuesR) self._indexMaps.append(indexMapR) self.addElement(indexMapR) # Either one or three indexMaps if (valuesG != None and valuesB != None): indexMapG = IndexMap(len(valuesG[0]), valuesG) self._indexMaps.append(indexMapG) self.addElement(indexMapG) indexMapB = IndexMap(len(valuesB[0]), valuesB) self._indexMaps.append(indexMapB) self.addElement(indexMapB) # setIndexMaps def setArray(self, dimension, values, floatEncoding='string'): dimensions = [len(values) / dimension, dimension] integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) rawHalfs = not (self.getAttribute('rawHalfs') in [None, False]) self._array = Array(dimensions, values, rawHalfs=rawHalfs, integers=integers, floatEncoding=floatEncoding) self.addElement(self._array) # setArray def getLUTDimensions(self): return self._array.getDimensions() def getLUTValues(self): return self._array.getValues() def getIndexMapDimensions(self, channel): return self._indexMaps[channel].getDimensions() def getIndexMapValues(self, channel): return self._indexMaps[channel].getValues() def readChild(self, element): child = None if element.tag == 'Array': rawHalfs = not (self.getAttribute('rawHalfs') in [None, False]) child = Array(rawHalfs=rawHalfs) child.read(element) integers = bitDepthIsInteger(self.getAttribute('outBitDepth')) child.setValuesAreIntegers(integers) self._array = child elif element.tag == 'IndexMap': child = IndexMap() child.read(element) self._indexMaps.append(child) return child # readChild def process(self, values, stride=0, verbose=False): # Base attributes inBitDepth = self._attributes['inBitDepth'] outBitDepth = self._attributes['outBitDepth'] # Node attributes interpolation = '' if 'interpolation' in self._attributes: interpolation = self._attributes['interpolation'] rawHalfs = not (self.getAttribute('rawHalfs') in [None, False]) halfDomain = not (self.getAttribute('halfDomain') in [None, False]) ''' print( "interpolation : %s" % interpolation ) print( "raw halfs : %s" % rawHalfs ) print( "halfs domain : %s" % halfDomain ) ''' # Get LUT dimensions dimensions = self.getLUTDimensions() # Handle processing of single values if stride == 0: stride = len(values) # Initialize the output value outValues = np.zeros(len(values), dtype=np.float32) for p in range(int(len(values) / stride)): value = values[p * stride:(p + 1) * stride] outValue = values[p * stride:(p + 1) * stride] for i in range(min(3, stride)): # Run through single Index Map then normalize if len(self._indexMaps) > 1: outValue[i] = self._indexMaps[i].process(outValue[i]) outValue[i] /= float(dimensions[0] - 1) # Run through per-channel Index Map then normalize elif len(self._indexMaps) > 0: outValue[i] = self._indexMaps[0].process(outValue[i]) outValue[i] /= float(dimensions[0] - 1) # Normalize from bit-depth else: # Convert input bit depth outValue[i] = bitDepthToNormalized(outValue[i], inBitDepth) # Run through LUT # Use Cubic interpolation if interpolation == 'cubic': outValue[i] = self._array.lookup1DCubic(outValue[i], i) # Use halfDomain lookup and interpolation elif halfDomain: outValue[i] = self._array.lookup1DHalfDomain( outValue[i], i, interpolate=True) # Linear interpolation is the default #elif interpolation == 'linear': else: outValue[i] = self._array.lookup1DLinear(outValue[i], i) # Bit Depth conversion for output is ignored for LUTs # as LUT values are assumed to target a specific bit depth #outValue[i] = normalizedToBitDepth(outValue[i], outBitDepth) # Copy the extra channels for i in range(min(3, stride), stride): outValue[i] = value[i] # Copy to the output array outValues[p * stride:(p + 1) * stride] = outValue return outValues