Esempio n. 1
0
def generateLUT1DInverseIndexMap(resolution, samples, minInputValue,
                                 maxInputValue):
    lutpns = []

    # Invert happens in 3 stages
    # 1. Create the index map that goes from LUT output values to index values
    # 2. Create a LUT that maps from index values to [0,1]
    # 3. Create a Range node to remap from [0,1] to [minInput,maxInput]

    # Get the resolution of the prelut
    inputResolution = resolution[0]
    channels = resolution[1]

    #print( inputResolution, minInputValue, maxInputValue )

    # Index Maps for the inverse LUT
    indexMaps = []
    for c in range(channels):
        indexMapInput = [0.0] * inputResolution
        for i in range(inputResolution):
            indexMapInput[i] = samples[i * channels + c]

        indexMapOutput = range(inputResolution)

        indexMaps.append([indexMapInput, indexMapOutput])

    # Sample values for the LUT - output is [0,1]
    inverseSamples = [0.0] * inputResolution * channels

    for i in range(inputResolution):
        v = float(i) / (inputResolution - 1)
        for c in range(channels):
            inverseSamples[i * channels + c] = v

    # Create a 1D LUT with generated index map and sample values
    lutpn = clf.LUT1D(clf.bitDepths["FLOAT16"], clf.bitDepths["FLOAT16"],
                      "inverse_1d_lut", "inverse_1d_lut")

    if channels == 3:
        lutpn.setIndexMaps(indexMaps[0], indexMaps[1], indexMaps[2])
    else:
        lutpn.setIndexMaps(indexMaps[0])

    lutpn.setArray(channels, inverseSamples)

    lutpns.append(lutpn)

    # Create a Range node to expaand from [0,1] to [minIn, maxIn]
    if minInputValue != 0.0 or maxInputValue != 1.0:
        rangepn2 = clf.Range(clf.bitDepths["FLOAT16"],
                             clf.bitDepths["FLOAT16"], "inverse_1d_range_1",
                             "inverse_1d_range_1")
        rangepn2.setMinInValue(0.0)
        rangepn2.setMaxInValue(1.0)
        rangepn2.setMinOutValue(minInputValue)
        rangepn2.setMaxOutValue(maxInputValue)

        lutpns.append(rangepn2)

    return lutpns
Esempio n. 2
0
def generateLUT1DInverseResampled(resolution, samples, minInputValue,
                                  maxInputValue):
    lutpns = []

    # Invert happens in 3 stages
    # 1. Range values down from the min and max output values to 0-1
    # 2. Generate the inverse LUT for these newly reranged values
    # 3. Range the value up to the range defined by minInputValue and maxInputValue
    # This is similar to how .CSP preluts are turned into ProcessNodes

    # Get the resolution of the prelut
    inputResolution = resolution[0]
    channels = resolution[1]

    # XXX
    # Given that we're resampling, we should probably increase the
    # resolution of the resampled lut relative to the source
    outputResolution = inputResolution
    outputResolution *= 2

    # Find the minimum and maximum input
    # XXX
    # We take the min and max of all three preluts because the Range node
    # only takes single value, not RGB triples. If the prelut ranges are
    # very different, this could introduce some artifacting
    minOutputValue = samples[0]
    for c in range(channels):
        minOutputValue = min(minOutputValue, samples[c])

    maxOutputValue = samples[-channels]
    for c in range(channels):
        maxOutputValue = max(maxOutputValue, samples[-channels + c])

    #print( inputResolution, minInputValue, maxInputValue, minOutputValue, maxOutputValue )

    # Create a Range node to normalize data from the range [minOut, maxOut]
    rangepn1 = clf.Range(clf.bitDepths["FLOAT16"], clf.bitDepths["FLOAT16"],
                         "inverse_1d_range_1", "inverse_1d_range_1")
    rangepn1.setMinInValue(minOutputValue)
    rangepn1.setMaxInValue(maxOutputValue)
    rangepn1.setMinOutValue(0.0)
    rangepn1.setMaxOutValue(1.0)

    lutpns.append(rangepn1)

    # Generate inverse 1d LUT by running values through the
    # - inverse normalization [0,1] back to [minOut, maxOut]
    # - the inverse of the original LUT
    inverseSamples = [0.0] * outputResolution * channels

    for inverseLutIndex in range(outputResolution):

        # Normalized LUT input
        inputValue = float(inverseLutIndex) / (outputResolution - 1)

        # Invert the normalization
        rangedValue = inputValue * (maxOutputValue -
                                    minOutputValue) + minOutputValue

        inverseSample = [0.0] * channels

        # For each channel
        for channel in range(channels):
            # Find the location of the de-normalized value in the lut
            for lutIndex in range(inputResolution):
                sampleIndex = lutIndex * channels + channel
                if samples[sampleIndex] > rangedValue:
                    break

            # Get the interpolation value
            lutIndexLow = max(0, lutIndex - 1)
            lutIndexHigh = min(inputResolution - 1, lutIndex)
            sampleIndexLow = lutIndexLow * channels + channel
            sampleIndexHigh = lutIndexHigh * channels + channel

            if lutIndexLow == lutIndexHigh:
                lutInterp = 0.0
            else:
                lutInterp = (rangedValue - samples[sampleIndexLow]) / (
                    samples[sampleIndexHigh] - samples[sampleIndexLow])

            # Find the output value
            outputInterpolated = (lutInterp + lutIndexLow) / (inputResolution -
                                                              1)

            inverseSample[channel] = outputInterpolated

        inverseSamples[inverseLutIndex * channels:(inverseLutIndex + 1) *
                       channels] = inverseSample

    # Create a 1D LUT with generated sample values
    lutpn = clf.LUT1D(clf.bitDepths["FLOAT16"], clf.bitDepths["FLOAT16"],
                      "inverse_1d_lut", "inverse_1d_lut")
    lutpn.setArray(channels, inverseSamples)

    lutpns.append(lutpn)

    # Create a Range node to expaand from [0,1] to [minIn, maxIn]
    rangepn2 = clf.Range(clf.bitDepths["FLOAT16"], clf.bitDepths["FLOAT16"],
                         "inverse_1d_range_2", "inverse_1d_range_2")
    rangepn2.setMinInValue(0.0)
    rangepn2.setMaxInValue(1.0)
    rangepn2.setMinOutValue(minInputValue)
    rangepn2.setMaxOutValue(maxInputValue)

    lutpns.append(rangepn2)

    return lutpns
Esempio n. 3
0
def createShaper(shaperType, shaperMin, shaperMax):
    #
    # Create the forward and inverse input shaper ProcessLists
    #
    shaperPL = clf.ProcessList()
    shaperPLInverse = clf.ProcessList()

    # Log shaper
    if shaperType == 'log2':
        #print( "log shaper - %f, %f" % (shaperMin, shaperMax))

        # Forward ProcessNodes
        logPn = clf.Log(style='log2')
        shaperPL.addProcess(logPn)

        rangePn = clf.Range()
        rangePn.setMinInValue(shaperMin)
        rangePn.setMaxInValue(shaperMax)
        rangePn.setMinOutValue(0.0)
        rangePn.setMaxOutValue(1.0)
        shaperPL.addProcess(rangePn)

        # Input min and max
        inputMin = pow(2, shaperMin)
        inputMax = pow(2, shaperMax)

        # Inverse ProcessNodes
        rangePn2 = clf.Range()
        rangePn2.setMinInValue(0.0)
        rangePn2.setMaxInValue(1.0)
        rangePn2.setMinOutValue(shaperMin)
        rangePn2.setMaxOutValue(shaperMax)
        shaperPLInverse.addProcess(rangePn2)

        logPn2 = clf.Log(style='antiLog2')
        shaperPLInverse.addProcess(logPn2)

    # Linear shaper
    elif shaperType == 'linear':
        #print( "linear shaper - %f, %f" % (shaperMin, shaperMax))

        # Forward ProcessNodes
        rangePn = clf.Range()
        rangePn.setMinInValue(shaperMin)
        rangePn.setMaxInValue(shaperMax)
        rangePn.setMinOutValue(0.0)
        rangePn.setMaxOutValue(1.0)
        shaperPL.addProcess(rangePn)

        # Input min and max
        inputMin = shaperMin
        inputMax = shaperMax

        # Inverse ProcessNodes
        rangePn2 = clf.Range()
        rangePn2.setMinInValue(0.0)
        rangePn2.setMaxInValue(1.0)
        rangePn2.setMinOutValue(shaperMin)
        rangePn2.setMaxOutValue(shaperMax)
        shaperPLInverse.addProcess(rangePn2)

    # No shaper
    else:
        inputMin = 0.0
        inputMax = 1.0

    return (shaperPL, shaperPLInverse, inputMin, inputMax)
Esempio n. 4
0
def filterImageWithCLF(inputPath,
                       outputPath,
                       processList,
                       verbose=False,
                       outBitDepth=None,
                       multithreaded=cpu_count(),
                       compression=None,
                       compressionQuality=0):

    #
    # Get the input image pixel array
    #
    t0 = timeit.default_timer()

    pixels, inBitDepth, width, height, channels, metadata, channelnames = readPixelArray(
        inputPath)
    if pixels == None:
        print("\nImage %s could not be opened. Filtering aborted.\n" %
              inputPath)
        return
    #print( len(pixels), bitDepth, width, height, channels )

    t1 = timeit.default_timer()
    elapsed = t1 - t0
    print("Reading took %s seconds" % elapsed)

    # Determine outBitDepth
    if not outBitDepth or not (outBitDepth in clf.bitDepths.values()):
        outBitDepth = inBitDepth

    #
    # Create two Range ProcessNodes to convert data
    # 1: from the bitdepth of the input image to the bit depth of the CLF start
    # 2: from the bitdepth of the CLF output to the bit depth of the output image
    #
    processListInBitDepth = processList.getInBitDepth()
    processListOutBitDepth = processList.getOutBitDepth()

    InRange = None
    if processListInBitDepth != inBitDepth:
        InRange = clf.Range(inBitDepth, processListInBitDepth)

    OutRange = None
    if processListOutBitDepth != outBitDepth:
        OutRange = clf.Range(processListOutBitDepth, outBitDepth)

    #
    # Filter image
    #

    # Float buffer for the processed pixels
    # Values will be converted to other bit depths when writing to disk
    processedPixels = np.zeros(width * height * channels, dtype=np.float32)

    # Process
    t0 = timeit.default_timer()

    # Multi-threaded execution
    if multithreaded > 1:
        print("Filtering image - multithreaded (%d threads)" % multithreaded)
        try:
            pool = Pool(processes=multithreaded)

            # Each process filters a single row and returns the results
            # Feels a little clunky, but it gets us the speed of multithreading
            # and we're probably not worried about the memory hit since we're
            # only processing one image at a time.

            #print ( "Creating map_async pool ")
            result = pool.map_async(
                filterRow_parallel_splitargs,
                [(x, width, height, channels,
                  pixels[x * width * channels:x * width * channels +
                         width * channels], InRange, processList, OutRange,
                  processedPixels, verbose) for x in range(height)],
                chunksize=1)

            try:
                parallelProcessedPixels = result.get(0xFFFF)
            except KeyboardInterrupt:
                print("\nProcess received Ctrl-C. Exiting.\n")
                return
            except:
                print("\nCaught exception. Exiting.")
                print('-' * 60)
                traceback.print_exc()
                print('-' * 60)
                return

            # The filtered rows have to be copied back to the 'processedPixels' block
            # when everything finishes up
            for i in range(height):
                for j in range(width * channels):
                    processedPixels[i * width * channels +
                                    j] = parallelProcessedPixels[i][j]
        except:
            print("Error in multithreaded processing. Exiting.")
            print('-' * 60)
            traceback.print_exc()
            print('-' * 60)
            return

    # Single-threaded execution
    else:
        print("Filtering image - single threaded")

        #for j in range(height):
        j = 5
        if True:
            # Using filterRow_stride instead of filterRow_pixel
            # Processing a full row is ~10% faster than processing individual pixels
            filterRow_stride(j, width, height, channels, pixels, InRange,
                             processList, OutRange, processedPixels, verbose)

    t1 = timeit.default_timer()
    elapsed = t1 - t0
    print("Filtering took %s seconds" % elapsed)

    #
    # Write the processed pixel array to the output
    #
    t0 = timeit.default_timer()

    writePixelArray(outputPath, processedPixels, outBitDepth, width, height,
                    channels, metadata, channelnames, compression,
                    compressionQuality)

    t1 = timeit.default_timer()
    elapsed = t1 - t0
    print("Writing took %s seconds" % elapsed)
Esempio n. 5
0
def generateCLFPrelut(cspPreluts):
    prelutpns = []

    # Get the individual preluts
    (prelutR, prelutG, prelutB) = cspPreluts

    # Get the resolution of the prelut
    inputResolution = max(len(prelutR[0]), len(prelutG[0]), len(prelutB[0]))

    # XXX
    # Given that we're resampling, we should probably increase the
    # resolution of the resampled lut relative to the source
    outputResolution = inputResolution

    # If the prelut only affects the range, skip this step
    if inputResolution > 2:
        outputResolution *= 2

    # Find the minimum and maximum input
    # XXX
    # We take the min and max of all three preluts because the Range node
    # only takes single value, not RGB triples. If the prelut ranges are
    # very different, this could introduce some artifacting
    minInputValue = min(prelutR[0][0], prelutG[0][0], prelutB[0][0])
    maxInputValue = max(prelutR[0][-1], prelutG[0][-1], prelutB[0][-1])

    #print( inputResolution, minInputValue, maxInputValue )

    # Create a Range node to normalize data from that range [min, max]
    rangepn = clf.Range(clf.bitDepths["FLOAT16"], clf.bitDepths["FLOAT16"],
                        "prelut_range", "prelut_range")
    rangepn.setMinInValue(minInputValue)
    rangepn.setMaxInValue(maxInputValue)
    rangepn.setMinOutValue(0.0)
    rangepn.setMaxOutValue(1.0)

    prelutpns.append(rangepn)

    # If the prelut only affects the range, skip generating a lut to represent it
    if inputResolution > 2:
        # Generate 1d LUT by running values through the
        # - inverse normalization
        # - the cspprelut
        samples = [0.0] * outputResolution * 3

        for i in range(outputResolution):
            # Normalized LUT input
            inputValue = float(i) / (outputResolution - 1)

            # Invert the normalization
            rangedValue = inputValue * (maxInputValue -
                                        minInputValue) + minInputValue

            sample = [0.0, 0.0, 0.0]

            # For each channel
            for channel in range(len(cspPreluts)):
                # Find the location of the de-normalized value in the prelut
                for prelutIndex in range(inputResolution):
                    if cspPreluts[channel][0][prelutIndex] > rangedValue:
                        break

                # Get the interpolation value
                prelutIndexLow = max(0, prelutIndex - 1)
                prelutIndexHigh = min(inputResolution - 1, prelutIndex)
                prelutInterp = (rangedValue -
                                cspPreluts[channel][0][prelutIndexLow]) / (
                                    cspPreluts[channel][0][prelutIndexHigh] -
                                    cspPreluts[channel][0][prelutIndexLow])

                # Find the output value
                outputInterpolationRange = (
                    cspPreluts[channel][1][prelutIndexHigh] -
                    cspPreluts[channel][1][prelutIndexLow])
                outputInterpolated = prelutInterp * outputInterpolationRange + cspPreluts[
                    channel][1][prelutIndexLow]

                sample[channel] = outputInterpolated

            samples[i * 3:(i + 1) * 3] = sample

        # Create a 1D LUT with generated sample values
        lutpn = clf.LUT1D(clf.bitDepths["FLOAT16"], clf.bitDepths["FLOAT16"],
                          "prelut_lut1d", "prelut_lut1d")
        lutpn.setArray(len(cspPreluts), samples)

        prelutpns.append(lutpn)

    return prelutpns
Esempio n. 6
0
def readSPI1D(lutPath,
              direction='forward',
              interpolation='linear',
              inversesUseIndexMaps=True,
              inversesUseHalfDomain=True):
    with open(lutPath) as f:
        lines = f.read().splitlines()

    #
    # Read LUT data
    #
    dataFormat = LUTFORMAT_1D
    resolution = [0, 0]
    samples = []
    indexMap = []
    minInputValue = 0.0
    maxInputValue = 1.0

    for line in lines:
        #print( "line : %s" % line )
        tokens = line.split()

        if tokens[0] == "Version":
            version = int(tokens[1])
            if version != 1:
                break
        elif tokens[0] == "From":
            minInputValue = float(tokens[1])
            maxInputValue = float(tokens[2])
        elif tokens[0] == "Length":
            resolution[0] = int(tokens[1])
        elif tokens[0] == "Components":
            resolution[1] = int(tokens[1])
        elif tokens[0] in ["{", "}"]:
            continue
        else:
            samples.extend(map(float, tokens))
        #else:
        #    print( "Skipping line : %s" % tokens )

    #
    # Create ProcessNodes
    #
    lutpns = []

    # Forward transform, pretty straightforward
    if direction == 'forward':
        # Remap input range
        if minInputValue != 0.0 or maxInputValue != 1.0:
            rangepn = clf.Range(clf.bitDepths["FLOAT16"],
                                clf.bitDepths["FLOAT16"], "range", "range")
            rangepn.setMinInValue(minInputValue)
            rangepn.setMaxInValue(maxInputValue)
            rangepn.setMinOutValue(0.0)
            rangepn.setMaxOutValue(1.0)

            lutpns.append(rangepn)

        # LUT node
        lutpn = clf.LUT1D(clf.bitDepths["FLOAT16"],
                          clf.bitDepths["FLOAT16"],
                          "lut1d",
                          "lut1d",
                          interpolation=interpolation)
        lutpn.setArray(resolution[1], samples)

        lutpns.append(lutpn)

    # Inverse transform, LUT has to be resampled
    else:
        if inversesUseIndexMaps:
            print("Generating inverse of 1D LUT using Index Maps")
            lutpnInverses = generateLUT1DInverseIndexMap(
                resolution, samples, minInputValue, maxInputValue)
        elif inversesUseHalfDomain:
            print("Generating full half-domain inverse of 1D LUT")
            lutpnInverses = generateLUT1DInverseHalfDomain(resolution,
                                                           samples,
                                                           minInputValue,
                                                           maxInputValue,
                                                           rawHalfs=True)
        else:
            print("Generating resampled inverse of 1D LUT")
            lutpnInverses = generateLUT1DInverseResampled(
                resolution, samples, minInputValue, maxInputValue)
        lutpns.extend(lutpnInverses)

    #print (dataFormat, resolution, samples, indexMap, minInputValue, maxInputValue)
    return lutpns
Esempio n. 7
0
def filterImageWithCLF(inputPath,
                       outputPath,
                       processList,
                       verbose=False,
                       outBitDepth=None):

    #
    # Get the input image pixel array
    #
    pixels, inBitDepth, width, height, channels, metadata = readPixelArray(
        inputPath)
    #print( len(pixels), bitDepth, width, height, channels )

    # Determine outBitDepth
    if not outBitDepth or not (outBitDepth in clf.bitDepths.values()):
        outBitDepth = inBitDepth

    #
    # Create Range ProcessNodes to convert data to the correct bit depth
    # for input to the CLF and then for output to file
    #
    processListInBitDepth = processList.getInBitDepth()
    processListOutBitDepth = processList.getOutBitDepth()

    InRange = None
    if processListInBitDepth != inBitDepth:
        InRange = clf.Range(inBitDepth, processListInBitDepth)

    OutRange = None
    if processListOutBitDepth != outBitDepth:
        OutRange = clf.Range(processListOutBitDepth, outBitDepth)

    #
    # Filter image
    #

    # Float buffer for the processed pixels
    # Values will be converted to other bit depths when writing to disk
    processedPixels = array.array("f", "\0" * width * height * channels * 4)

    # Process
    print("Filtering image")
    for i in range(width):
        for j in range(height):
            index = (j * width + i) * channels
            #ovalue = list(pixels[index:index+3])
            ovalue = pixels[index:index + channels]

            pvalue = list(ovalue)
            # Reset values if input image and CLF input bit depths don't match
            if InRange:
                pvalue = InRange.process(pvalue)

            # Process values
            #print( "Processing %04d, %04d : %s" % (i, j, ovalue))
            pvalue = processList.process(pvalue)

            # Reset values if output image and CLF output bit depths don't match
            if OutRange:
                pvalue = OutRange.process(pvalue)

            if verbose:
                print("Processed %04d, %04d : %s -> %s" %
                      (i, j, list(ovalue), pvalue))

            for c in range(channels):
                processedPixels[index + c] = pvalue[c]

    #
    # Write the processed pixel array to the output
    #
    writePixelArray(outputPath, processedPixels, outBitDepth, width, height,
                    channels, metadata)