コード例 #1
0
ファイル: mkhdr.py プロジェクト: hpd/general
def ImageBufMakeConstant(xres, 
    yres, 
    chans=3, 
    format=oiio.UINT8, 
    value=(0,0,0),
    xoffset=0, 
    yoffset=0,
    orientation=1,
    inputSpec=None) :
    '''
    Create a new Image Buffer
    '''
    
    # Copy an existing input spec
    # Mostly to ensure that metadata makes it through
    if inputSpec:
        spec = inputSpec
        spec.width = xres
        spec.height = yres
        spec.nchannels = chans
        spec.set_format( format )

    # Or create a new ImageSpec
    else:
        spec = ImageSpec (xres,yres,chans,format)

    spec.x = xoffset
    spec.y = yoffset
    b = ImageBuf (spec)
    b.orientation = orientation
    oiio.ImageBufAlgo.fill(b, value)

    return b
コード例 #2
0
    def __init__(self, parent=None, mw=None, seq=None, refimage=None):
        super(SequenceGrabber, self).__init__(parent)
        self.mw = mw
        self.seq = seq
        self.frame = None
        self.active_time = None

        try:
            ibuf = ImageBuf(self.seq[0].path)
        except Exception as ex:
            print(ex)
            return
        spec = ibuf.spec()

        im = Image.new("RGB", (spec.width, spec.height), (0, 0, 0))
        draw = ImageDraw.Draw(im)
        font = ImageFont.truetype('/Library/Fonts/Arial Bold.ttf', 48)
        draw.text((spec.width / 3, spec.height / 2), "No Image", font=font)
        self.blank_qimage = ImageQt(im)

        wksavepath = "/tmp"
        wksavepath = wksavepath + "/sequencemissing.jpg".format(self.mw.APPID)
        self.blank_qimage.save(wksavepath)

        wkqim = QImage(wksavepath)
        self.blank_qimage = wkqim
コード例 #3
0
    def _open(self, source, size=3):
        erode = ImageBuf(source.spec())
        ImageBufAlgo.erode(erode, source, size, size)
        dilate = ImageBuf(source.spec())
        ImageBufAlgo.dilate(dilate, erode, size, size)

        return dilate
コード例 #4
0
ファイル: mkhdr.py プロジェクト: nbn1985/general
def ImageBufMakeConstant(xres,
                         yres,
                         chans=3,
                         format=oiio.UINT8,
                         value=(0, 0, 0),
                         xoffset=0,
                         yoffset=0,
                         orientation=1,
                         inputSpec=None):
    '''
    Create a new Image Buffer
    '''

    # Copy an existing input spec
    # Mostly to ensure that metadata makes it through
    if inputSpec:
        spec = inputSpec
        spec.width = xres
        spec.height = yres
        spec.nchannels = chans
        spec.set_format(format)

    # Or create a new ImageSpec
    else:
        spec = ImageSpec(xres, yres, chans, format)

    spec.x = xoffset
    spec.y = yoffset
    b = ImageBuf(spec)
    b.orientation = orientation
    oiio.ImageBufAlgo.fill(b, value)

    return b
コード例 #5
0
def blurImage(srcBuffer):
    K = ImageBuf()
    ImageBufAlgo.make_kernel(K, blurFilter, blurAmountX, blurAmountY)
    Blurred = ImageBuf()
    ImageBufAlgo.convolve(Blurred, srcBuffer, K)

    threadResult.put(Blurred)
    return Blurred
コード例 #6
0
 def load_images(self, file_name):
     allowed_exts = {".exr", ".tif", ".png", ".jpg"}
     result_file = os.path.join(self.result_dir, file_name)
     correct_result_file = os.path.join(self.correct_result_dir, file_name)
     if os.path.splitext(result_file)[1] not in allowed_exts:
         return None, None
     result_image = ImageBuf(result_file)
     correct_result_image = ImageBuf(correct_result_file)
     return result_image, correct_result_image
コード例 #7
0
    def __init__(self, logger, img_file: Path, alpha_over_compositing=False):
        global LOGGER
        LOGGER = logger
        if logger is None:
            logging.basicConfig(level=logging.DEBUG)
            LOGGER = logging.getLogger(__name__)

        self.alpha_over_compositing = alpha_over_compositing

        self.img_file = img_file
        self.img = ImageBuf(img_file.as_posix())
        self.metadata_cache = {}
        self.manifest_cache = {}
コード例 #8
0
    def read_img_metadata(cls, img_file: Path) -> dict:
        img_buf = ImageBuf(img_file.as_posix())
        img_dict = dict()

        if not img_buf:
            LOGGER.error(oiio.geterror())
            return img_dict

        for param in img_buf.spec().extra_attribs:
            img_dict[param.name] = param.value

        cls.close_img_buf(img_buf, img_file)

        return img_dict
コード例 #9
0
ファイル: utils.py プロジェクト: tappi287/pfadaeffchen
    def np_to_imagebuf(cls, img_pixels: np.array):
        """ Load a numpy array 8/32bit to oiio ImageBuf """
        if len(img_pixels.shape) < 3:
            LOGGER.error(
                'Can not create image with pixel data in this shape. Expecting 4 channels(RGBA).'
            )
            return

        h, w, c = img_pixels.shape
        img_spec = ImageSpec(w, h, c,
                             cls.get_numpy_oiio_img_format(img_pixels))

        img_buf = ImageBuf(img_spec)
        img_buf.set_pixels(img_spec.roi_full, img_pixels)

        return img_buf
コード例 #10
0
ファイル: mkhdr.py プロジェクト: nbn1985/general
def findAverageWeightFromPath(inputPath, width, height, channels):
    '''
    Find the average weight of an image, specified by it's file path
    '''
    weight = ImageBufMakeConstant(width, height, channels, oiio.HALF)
    temp = ImageBufMakeConstant(1, 1, channels, oiio.HALF)

    try:
        print("\tReading image : %s" % inputPath)
        inputBufferRaw = ImageBuf(inputPath)

        # Cast to half by adding with a const half buffer.
        inputBufferHalf = ImageBufMakeConstant(width, height, channels,
                                               oiio.HALF)
        ImageBufAlgo.add(inputBufferHalf, inputBufferHalf, inputBufferRaw)

        print("\tComputing Weight")
        ImageBufWeight(weight, inputBufferHalf)
        # Compute the average weight by resizing to 1x1
        print("\tResizing")
        # Not using multithreading here, as this function will be called within
        # Python's multhreading framework
        ImageBufAlgo.resize(temp, weight, filtername='box')
        # Get the average weight value
        weight = temp.getpixel(0, 0)
        #print( "\tWeight : %s" % str(weight) )

        averageWeight = sum(map(float, weight)) / channels
    except Exception, e:
        print("Exception in findAverageWeightFromPath")
        print(repr(e))
コード例 #11
0
    def toQImage(self, filepath):

        ibuf = ImageBuf(filepath)
        try:
            bufok = ibuf.read(subimage=0,
                              miplevel=0,
                              force=True,
                              convert=oiio.UINT8)
        except Exception as ex:
            print(ex)
            return None
        if not bufok:
            return None
        spec = ibuf.spec()

        width = spec.width
        height = spec.height

        # Expect the channel to be RGB from the beginning.
        # It might not work if it is a format like ARGB.
        # qimage = QtGui.QImage(width, height, QtGui.QImage.Format_RGB888)
        roi = oiio.ROI(0, width, 0, height, 0, 1, 0, 3)
        try:
            orgimg = Image.fromarray(ibuf.get_pixels(oiio.UINT8, roi))
            # for ImageQt source format error
            if orgimg.mode in self.mw.shot.NO_SUPPORT_IMAGEQT:
                orgimg = orgimg.convert('RGB')
            if self.mw.thumbnail_bright != self.mw.THUMB_DEFALUT_BRIGHT:
                eim = ImageEnhance.Brightness(orgimg)
                orgimg = eim.enhance(self.mw.thumbnail_bright)

            qimage = ImageQt(orgimg)
            # workaround https://bugreports.qt.io/browse/PYSIDE-884
            # output QImage to a file and reRead qimage
            wksavepath = QStandardPaths.writableLocation(
                QStandardPaths.TempLocation)
            wksavepath = wksavepath + "/{0}/sequencegrab.jpg".format(
                self.mw.APPID)
            qimage.save(wksavepath, "jpg")
            wkqim = QImage(wksavepath)
            qimage = wkqim
            os.remove(wksavepath)

        except Exception as ex:
            print(ex)
            return None
        return (qimage)
コード例 #12
0
ファイル: test_imagebufalgo.py プロジェクト: itscool/oiio
def make_constimage (xres, yres, chans=3, format=oiio.UINT8, value=(0,0,0),
                xoffset=0, yoffset=0) :
    spec = ImageSpec (xres,yres,chans,format)
    spec.x = xoffset
    spec.y = yoffset
    b = ImageBuf (spec)
    oiio.ImageBufAlgo.fill (b, value)
    return b
コード例 #13
0
def resizeHDR(scrBuffer, width, height):
    srcSpec = scrBuffer.spec()

    resizedBuffer = ImageBuf(
        ImageSpec(width, height, srcSpec.nchannels, srcSpec.format))
    ImageBufAlgo.resize(resizedBuffer, scrBuffer, filtername=resizeFilter)
    threadResult.put(resizedBuffer)
    return resizedBuffer
コード例 #14
0
 def _dilate(self, source):
     dilate = ImageBuf(source.spec())
     ImageBufAlgo.dilate(
         dilate,
         source,
         4,
         4,
     )
     return dilate
コード例 #15
0
 def _median(self, source, size=5):
     size = int(size)
     median = ImageBuf(source.spec())
     ImageBufAlgo.median_filter(
         median,
         source,
         size,
         size
     )
     return median
コード例 #16
0
def OIIOImageBufferFromOpenCVImageBuffer(opencvImageBuffer):
    (height, width, channels) = opencvImageBuffer.shape
    npChanneltype = opencvImageBuffer.dtype

    #print( "OIIOImageBufferFromOpenCVImageBuffer", width, height, channels, npChanneltype )

    npToArrayBitDepth = {
        np.dtype('uint8')   : 'B',
        np.dtype('uint16')  : 'H',
        np.dtype('uint32')  : 'I',
        np.dtype('float32') : 'f',
        np.dtype('float64') : 'd',
    }

    npToOIIOBitDepth = {
        np.dtype('uint8')   : oiio.BASETYPE.UINT8,
        np.dtype('uint16')  : oiio.BASETYPE.UINT16,
        np.dtype('uint32')  : oiio.BASETYPE.UINT32,
        np.dtype('float32') : oiio.BASETYPE.FLOAT,
        np.dtype('float64') : oiio.BASETYPE.DOUBLE,
    }

    # Support this when oiio more directly integrates with numpy
    #    np.dtype('float16') : oiio.BASETYPE.HALF,

    if (npChanneltype in npToArrayBitDepth and 
        npChanneltype in npToOIIOBitDepth):
        arrayChannelType = npToArrayBitDepth[npChanneltype]
        oiioChanneltype = npToOIIOBitDepth[npChanneltype]
    else:
        print( "opencv to oiio - Using fallback bit depth" )
        arrayChannelType = 'f'
        oiioChanneltype = oiio.BASETYPE.FLOAT

    spec = ImageSpec(width, height, channels, oiioChanneltype)
    oiioImageBuffer = ImageBuf(spec)
    roi = oiio.ROI(0, width, 0, height, 0, 1, 0, channels)
    conversion = oiioImageBuffer.set_pixels( roi, array.array(arrayChannelType, opencvImageBuffer.flatten()) )
    if not conversion:
        print( "opencv to oiio - Error converting the OpenCV buffer to an OpenImageIO buffer" )
        oiioImageBuffer = None

    return oiioImageBuffer
コード例 #17
0
    def create_diff_buffer(self):
        """Create a difference image buffer from image_a and image_b

        Returns:
            ImageBuf: new difference image buffer
        """
        diff_buffer = ImageBuf(self.image_a_buffer.spec())
        ImageBufAlgo.sub(diff_buffer, self.image_a_buffer, self.image_b_buffer)
        ImageBufAlgo.abs(diff_buffer, diff_buffer)

        return diff_buffer
コード例 #18
0
    def _blur(self, source, size=1.0):
        """Apply gaussian blur to given image

        Args:
            source (ImageBuf): Image buffer which to blur
            size (float): Blur size

        Return:
            ImageBuf: Blurred image
        """
        source = self._open(source)
        kernel = ImageBuf(source.spec())
        ImageBufAlgo.make_kernel(
            kernel,
            "gaussian",
            size, size
        )
        blurred = ImageBuf(source.spec())
        ImageBufAlgo.convolve(blurred, source, kernel)

        return blurred
コード例 #19
0
    def __init__(self, image_a, image_b):

        self.debug = False
        self.fail_threshold = 0.1
        self.warn_threshold = 0.01

        self.image_a_buffer = ImageBuf()
        self.image_b_buffer = ImageBuf()

        # remove alpha channel from input images
        ImageBufAlgo.channels(
            self.image_a_buffer,
            ImageBuf(image_a),
            ('R', 'G', 'B')
        )
        ImageBufAlgo.channels(
            self.image_b_buffer,
            ImageBuf(image_b),
            ('R', 'G', 'B'),
        )

        # protected
        self._image_a_location = image_a
        self._image_b_location = image_b
        self._file_ext = os.path.splitext(image_a)[-1]
        self._compare_results = CompareResults()
コード例 #20
0
def main():
    options, args = parseOptions()

    tile_x = options.tile_x
    tile_y = options.tile_y
    frame = options.frame
    output = options.output
    filemask = options.filemask

    tile_files = []
    tiles_lost = []
    for i in xrange(0, (tile_x * tile_y)):
        filepath = filemask % (i, frame)
        if not os.path.exists(filepath):
            tiles_lost += [filepath]
            continue
        tile_files += [filepath]

    if len(tile_files) != (tile_x * tile_y):
        raise Exception("Tile not found: %s" % tiles_lost)

    #TODO: merge metadata from tiles

    spec = ImageBuf(str(tile_files[0])).spec()
    spec_e = ImageSpec(spec.full_width, spec.full_height, spec.nchannels,
                       spec.format)

    extended = ImageBuf(spec_e)
    for filename in tile_files:
        img = ImageBuf(filename)
        ImageBufAlgo.paste(extended,
                           img.xbegin,
                           img.ybegin,
                           img.zbegin,
                           0,
                           img,
                           nthreads=4)
    extended.write(output)
コード例 #21
0
class ImageCompare(object):
    """Image comparison using OpenImageIO. It creates a difference image.

    Args:
        image_a (str): File path to image
        image_b (str): File path to image to compare against. The baseline

    Attributes:
        debug (bool): Debug mode to output image processing when
            comparing images
        fail_threshold (float): Threshold value for failures
        warn_threshold (float): Threshold value for warnings
        image_a_buffer (ImageBuf): Image buffer
        image_b_buffer (ImageBuf): Image buffer
    """

    def __init__(self, image_a, image_b):

        self.debug = False
        self.fail_threshold = 0.1
        self.warn_threshold = 0.01

        self.image_a_buffer = ImageBuf()
        self.image_b_buffer = ImageBuf()

        # remove alpha channel from input images
        ImageBufAlgo.channels(
            self.image_a_buffer,
            ImageBuf(image_a),
            ('R', 'G', 'B')
        )
        ImageBufAlgo.channels(
            self.image_b_buffer,
            ImageBuf(image_b),
            ('R', 'G', 'B'),
        )

        # protected
        self._image_a_location = image_a
        self._image_b_location = image_b
        self._file_ext = os.path.splitext(image_a)[-1]
        self._compare_results = CompareResults()

    def compare(self, diff_image_location=None, blur=10, raise_exception=True):
        """Compare the two given images

        Args:
            diff_image_location (str): file path for difference image.
                Written only if there are failures
            blur (float): image blur to apply before comparing
        """

        if not diff_image_location:
            diff_image_location = os.path.dirname(self._image_a_location)

        self.blur_images(blur)
        ImageBufAlgo.compare(
            self.image_a_buffer,
            self.image_b_buffer,
            self.fail_threshold,
            self.warn_threshold,
            self._compare_results,
        )
        diff_buffer = self.create_diff_buffer()

        if self.debug:
            self.image_a_buffer.write(
                '{}/{}_debug{}'.format(
                    diff_image_location,
                    os.path.basename(self._image_a_location),
                    self._file_ext,
                )
            )
            self.image_b_buffer.write(
                '{}/{}_debug{}'.format(
                    diff_image_location,
                    os.path.basename(self._image_b_location),
                    self._file_ext,
                )
            )

        if self._compare_results.nfail > 0:
            ImageBufAlgo.color_map(diff_buffer, diff_buffer, -1, 'inferno')
            remap_buffer = ImageBuf()
            multiplier = 5
            ImageBufAlgo.mul(
                remap_buffer,
                diff_buffer,
                (multiplier, multiplier, multiplier, 1.0),
            )
            ImageBufAlgo.add(remap_buffer, self.image_a_buffer, remap_buffer)
            msg = report_msg.format(
                failures=self._compare_results.nfail,
                warn=self._compare_results.nwarn,
                meanerror=self._compare_results.meanerror,
                rmserror=self._compare_results.rms_error,
                psnr=self._compare_results.PSNR
            )

            remap_buffer.write(
                '{}/{}-{}_diff{}'.format(
                    diff_image_location,
                    os.path.basename(self._image_a_location),
                    os.path.basename(self._image_b_location),
                    self._file_ext,
                )
            )
            self.image_a_buffer.write(
                '{}/{}_debug{}'.format(
                    diff_image_location,
                    '1_a',
                    self._file_ext,
                )
            )
            self.image_b_buffer.write(
                '{}/{}_debug{}'.format(
                    diff_image_location,
                    '1_b',
                    self._file_ext,
                )
            )
            if raise_exception:
                raise ImageDifferenceError(msg)
            else:
                print(msg)

    def create_diff_buffer(self):
        """Create a difference image buffer from image_a and image_b

        Returns:
            ImageBuf: new difference image buffer
        """
        diff_buffer = ImageBuf(self.image_a_buffer.spec())
        ImageBufAlgo.sub(diff_buffer, self.image_a_buffer, self.image_b_buffer)
        ImageBufAlgo.abs(diff_buffer, diff_buffer)

        return diff_buffer

    def _blur(self, source, size=1.0):
        """Apply gaussian blur to given image

        Args:
            source (ImageBuf): Image buffer which to blur
            size (float): Blur size

        Return:
            ImageBuf: Blurred image
        """
        source = self._open(source)
        kernel = ImageBuf(source.spec())
        ImageBufAlgo.make_kernel(
            kernel,
            "gaussian",
            size, size
        )
        blurred = ImageBuf(source.spec())
        ImageBufAlgo.convolve(blurred, source, kernel)

        return blurred

    def _dilate(self, source):
        dilate = ImageBuf(source.spec())
        ImageBufAlgo.dilate(
            dilate,
            source,
            4,
            4,
        )
        return dilate

    def _open(self, source, size=3):
        erode = ImageBuf(source.spec())
        ImageBufAlgo.erode(erode, source, size, size)
        dilate = ImageBuf(source.spec())
        ImageBufAlgo.dilate(dilate, erode, size, size)

        return dilate

    def _median(self, source, size=5):
        size = int(size)
        median = ImageBuf(source.spec())
        ImageBufAlgo.median_filter(
            median,
            source,
            size,
            size
        )
        return median

    def blur_images(self, size):
        """Blur test images with given size

        Args:
            size (float): Blur size
        """
        self.image_a_buffer = self._blur(self.image_a_buffer, size)
        self.image_b_buffer = self._blur(self.image_b_buffer, size)
コード例 #22
0
def processHDRs():
    """Main entry point of the app."""

    if not Path(hdrPrevFolder).exists():
        Path(hdrPrevFolder).mkdir(parents=True)
    if not Path(hdrBlurFolder).exists():
        Path(hdrBlurFolder).mkdir(parents=True)

    hdrFiles = folder.getFiles(hdrFolder, "", hdrExts, all=False)
    previewFiles = folder.getFiles(hdrPrevFolder, "", hdrPrevExts, all=False)

    hdrFilesTiling = []
    hdrFilesBlurring = []
    hdrFilesPreview = []

    hdrFilesPreviewNames = []

    for previewFile in previewFiles:

        directory = Path(previewFile).parent
        filename = Path(previewFile).stem
        extension = Path(previewFile).suffix

        hdrFilesPreviewNames.append(filename)

    for hdrFile in hdrFiles:

        directory = Path(hdrFile).parent
        filename = Path(hdrFile).stem
        extension = Path(hdrFile).suffix

        IMG_CACHE = oiio.ImageCache.create(True)

        frameBufferOrig = ImageBuf(str(hdrFile))
        spec = frameBufferOrig.spec()

        if blurredPrefix not in filename:
            if Path(hdrBlurFolder,
                    filename + blurredPrefix + hdrExtension).exists():
                pass
            else:
                hdrFilesBlurring.append(hdrFile)
        if (spec.tile_width == spec.width or spec.tile_width == 0
                or frameBufferOrig.nmiplevels == 1):
            hdrFilesTiling.append(hdrFile)
        if filename not in hdrFilesPreviewNames:
            hdrFilesPreview.append(hdrFile)

        IMG_CACHE.invalidate(str(hdrFile))

    if (len(hdrFilesBlurring) == 0 and len(hdrFilesTiling) == 0
            and len(hdrFilesPreview) == 0):

        showUI("", "Nothing to do...")

    if len(hdrFilesTiling) is not 0:

        showUI(
            "Searching for Files",
            "Found " + str(len(hdrFilesTiling)) +
            " files in scanline/No MipMap or not .exr format. We will make some :)",
        )
        time.sleep(4)

        for hdrFileTiling in tqdm(
                hdrFilesTiling,
                desc="Complete",
                ncols=width,
                position=2,
                unit="file",
                ascii=True,
                bar_format=barFormat,
        ):

            directory = Path(hdrFileTiling).parent
            filename = Path(hdrFileTiling).stem
            extension = Path(hdrFileTiling).suffix

            showUI("Make tiled / MipMapped .exr",
                   "Current File: " + filename + extension)

            IMG_CACHE = oiio.ImageCache.create(True)

            frameBufferOrig2 = ImageBuf(str(hdrFileTiling))
            spec2 = frameBufferOrig2.spec()

            outPutFile = str(
                Path(directory, filename + tiledPrefix + hdrExtension))

            if spec2.width > hdrWidth:

                newHeight = calculateResizeHeight(spec2.width, spec2.height,
                                                  hdrWidth)
                resizedFramebuffer = threadAndStatus(
                    resizeHDR,
                    [frameBufferOrig2, hdrWidth, newHeight],
                    "Resizing",
                    1,
                    True,
                )

                writeFramebuffer = threadAndStatus(
                    writeEXR, [resizedFramebuffer, outPutFile], "Saving", 0,
                    False)

            else:
                writeFramebuffer = threadAndStatus(
                    writeEXR, [frameBufferOrig2, outPutFile], "Saving", 0,
                    False)

            IMG_CACHE.invalidate(str(hdrFileTiling))

            # if Path(hdrFileTiling).exists():
            #     Path(hdrFileTiling).unlink()
            #     Path(hdrFileTiling).with_suffix(hdrExtension)
            #     Path(outPutFile).rename(hdrFileTiling)

            if errorFlag == 0:
                # If file exists, delete it
                if Path(hdrFileTiling).exists():
                    newFile = Path(hdrFileTiling).with_suffix(hdrExtension)
                    Path(hdrFileTiling).unlink()
                    Path(outPutFile).resolve().rename(newFile)

                    if hdrFileTiling in hdrFilesPreview:
                        hdrFilesPreview.remove(hdrFileTiling)
                        hdrFilesPreview.append(newFile)
                    if hdrFileTiling in hdrFilesBlurring:
                        hdrFilesBlurring.remove(hdrFileTiling)
                        hdrFilesBlurring.append(newFile)
                    tqdm.write(prefix + Fore.GREEN +
                               "Successfully replaced the original file.")

                else:
                    tqdm.write(
                        prefix + Fore.RED +
                        "Error: %s not found. Could not delete the File." %
                        hdrFileTiling)

            else:
                tqdm.write(
                    prefix + Fore.RED +
                    "Something went wrong on conversion. File not deleted.")

        showUI("", Fore.GREEN + "All HDRs converted...")

    if len(hdrFilesBlurring) is not 0:

        showUI(
            __title__,
            "Found " + str(len(hdrFilesBlurring)) +
            " files with no blurred partners. We will make some :)",
        )
        time.sleep(4)

        for hdrFileBlurring in tqdm(
                hdrFilesBlurring,
                desc="Complete",
                ncols=width,
                position=3,
                unit="file",
                ascii=True,
                bar_format=barFormat,
        ):

            directory = Path(hdrFileBlurring).parent
            filename = Path(hdrFileBlurring).stem
            extension = Path(hdrFileBlurring).suffix

            showUI("Blurring HDRs", "Current File: " + filename + extension)

            IMG_CACHE = oiio.ImageCache.create(True)

            frameBufferOrig2 = ImageBuf(str(hdrFileBlurring))
            spec2 = frameBufferOrig2.spec()

            outPutFile = str(
                Path(hdrBlurFolder, filename + blurredPrefix + hdrExtension))

            newHeight = calculateResizeHeight(spec2.width, spec2.height,
                                              hdrBlurWidth)

            resizedFramebuffer = threadAndStatus(
                resizeHDR,
                [frameBufferOrig2, hdrBlurWidth, newHeight],
                "Resizing",
                2,
                True,
            )
            blurredFramebuffer = threadAndStatus(blurImage,
                                                 [resizedFramebuffer],
                                                 "Blurring", 1, True)
            writeFramebuffer = threadAndStatus(
                writeEXR, [blurredFramebuffer, outPutFile], "Saving", 0, False)

            IMG_CACHE.invalidate(str(hdrFileBlurring))

            # hdrFilesPreview.append(outPutFile)

        showUI("", Fore.GREEN + "All HDRs blurred...")

    if len(hdrFilesPreview) is not 0:

        showUI(
            "Searching for Files",
            "Found " + str(len(hdrFilesPreview)) +
            " files with no Preview-JPGs. We will make some :)",
        )
        time.sleep(4)

        for hdrFilePreview in tqdm(
                hdrFilesPreview,
                desc="Complete",
                ncols=width,
                position=3,
                unit="file",
                ascii=True,
                bar_format=barFormat,
        ):

            directory = Path(hdrFilePreview).parent
            filename = Path(hdrFilePreview).stem
            extension = Path(hdrFilePreview).suffix

            showUI("Thumbnail creation",
                   "Current File: " + filename + extension)

            IMG_CACHE = oiio.ImageCache.create(True)

            frameBufferOrig2 = ImageBuf(str(hdrFilePreview))
            spec2 = frameBufferOrig2.spec()

            outPutFile = str(Path(hdrPrevFolder,
                                  filename + thumbnailExtension))

            sRGBBuffer = threadAndStatus(convertColor,
                                         [frameBufferOrig2, "linear", "sRGB"],
                                         "Lin2sRGB", 2, True)

            newHeight = calculateResizeHeight(spec2.width, spec2.height,
                                              thumbnailWidth)

            resizedFramebuffer = threadAndStatus(
                resizeHDR, [sRGBBuffer, thumbnailWidth, newHeight], "Resizing",
                1, True)
            writeFramebuffer = threadAndStatus(
                writeJPG, [resizedFramebuffer, outPutFile], "Saving", 0, False)

            IMG_CACHE.invalidate(str(hdrFilePreview))

        showUI("", Fore.GREEN + "All previews generated...")

    tqdm.write(prefix + "Press Enter to exit or close the Terminal")
    wait_key()
コード例 #23
0
def convertColor(srcBuffer, fromColor="linear", toColor="sRGB"):
    Dst = ImageBuf()
    ImageBufAlgo.colorconvert(Dst, srcBuffer, fromColor, toColor)
    threadResult.put(Dst)
    return Dst
コード例 #24
0
ファイル: mkhdr.py プロジェクト: hpd/general
def loadImageBuffer( imagePath, outputGamut=None, rawSaturationPoint=-1.0,
    dcrawVariant=None ):
    '''
    Load an image buffer. Manage raw formats if OIIO can't load them directly
    '''
    global temp_dirs

    # Raw camera files require special handling
    imageExtension = os.path.splitext( imagePath )[-1][1:].lower()
    if imageExtension in rawExtensions:

        # Either OIIO can read the data directly
        if oiioSupportsRaw():
            print( "\tUsing OIIO ImageInput to read raw file" )

            # Convert gamut number to text
            gamuts = { 
                0 : "raw", 
                1 : "sRGB",
                2 : "Adobe",
                3 : "Wide",
                4 : "ProPhoto",
                5 : "XYZ"
            }
            outputGamutText = "sRGB"
            if outputGamut in gamuts:
                outputGamutText = gamuts[outputGamut]

            # Spec will be used to configure the file read
            spec = ImageSpec()
            spec.attribute("raw:ColorSpace", outputGamutText)
            spec.attribute("raw:use_camera_wb", 1)
            spec.attribute("raw:auto_bright", 0)
            spec.attribute("raw:use_camera_matrix", 0)
            spec.attribute("raw:adjust_maximum_thr", 0.0)

            imageBuffer = ImageBuf()
            imageBuffer.reset( imagePath, 0, 0, spec )

        # Or we need to use dcraw to help the process along
        else:
            print( "\tUsing dcraw to convert raw, then OIIO to read file" )

            # Create a new temp dir for each image so there's no chance
            # of a name collision
            temp_dir = tempfile.mkdtemp()
            temp_dirs.append( temp_dir )

            imageName = os.path.split(imagePath)[-1]
            temp_file = os.path.join(temp_dir, "%s_temp.tiff" % imageName)

            if outputGamut is None:
                outputGamut = 1

            if dcrawVariant == "dcraw":
                cmd = "dcraw"
                args  = []
                #args += ['-v']
                args += ['-w', '-o', str(outputGamut), '-4', '-T', '-W']
                args += ['-c']
                if rawSaturationPoint > 0.0:
                    args += ['-S', str(int(rawSaturationPoint))]
                args += [imagePath]

                cmdargs = [cmd]
                cmdargs.extend(args)

                #print( "\tTemp_file : %s" % temp_file )
                print( "\tCommand   : %s" % " ".join(cmdargs) )

                with open(temp_file, "w") as temp_handle:
                    process = sp.Popen(cmdargs, stdout=temp_handle, stderr=sp.STDOUT)
                    process.wait()

            # Use the libraw dcraw_emu when dcraw doesn't support a camera yet
            else:
                cmd = "dcraw_emu"
                args  = []
                args += ['-w', '-o', str(outputGamut), '-4', '-T', '-W']

                #if rawSaturationPoint > 0.0:
                #    args += ['-c', str(float(rawSaturationPoint/16384.0))]
                if rawSaturationPoint > 0.0:
                    args += ['-S', str(int(rawSaturationPoint))]
                args += [imagePath]

                cmdargs = [cmd]
                cmdargs.extend(args)

                print( "\tCommand   : %s" % " ".join(cmdargs) )

                dcraw_emu_temp_file = "%s.tiff" % imageName
                process = sp.Popen(cmdargs, stderr=sp.STDOUT)
                process.wait()

                print( "\tMoving temp file to : %s" % temp_dir )
                shutil.move( dcraw_emu_temp_file, temp_file )

            #print( "Loading   : %s" % temp_file )
            imageBuffer = ImageBuf( temp_file )

    # Just load the image using OIIO
    else:
        #print( "Using OIIO ImageBuf read route" )
        imageBuffer = ImageBuf( imagePath )

    return imageBuffer
コード例 #25
0
ファイル: test_imagebufalgo.py プロジェクト: opencai/oiio

def write(image, filename, format=oiio.UNKNOWN):
    if not image.has_error:
        image.write(filename, format)
    if image.has_error:
        print("Error writing", filename, ":", image.geterror())


######################################################################
# main test starts here

try:
    # Some handy images to work with
    gridname = os.path.join(OIIO_TESTSUITE_IMAGEDIR, "grid.tif")
    grid = ImageBuf(gridname)
    checker = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.checker(checker, 8, 8, 8, (0, 0, 0), (1, 1, 1))
    gray128 = make_constimage(128, 128, 3, oiio.HALF, (0.5, 0.5, 0.5))
    gray64 = make_constimage(64, 64, 3, oiio.HALF, (0.5, 0.5, 0.5))
    tahoetiny = ImageBuf("../oiiotool/src/tahoe-tiny.tif")

    # black
    # b = ImageBuf (ImageSpec(320,240,3,oiio.UINT8))
    b = ImageBufAlgo.zero(roi=oiio.ROI(0, 320, 0, 240, 0, 1, 0, 3))
    write(b, "black.tif", oiio.UINT8)

    # fill (including use of ROI)
    b = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.fill(b, (1, 0.5, 0.5))
    ImageBufAlgo.fill(b, (0, 1, 0), oiio.ROI(100, 180, 100, 180))
コード例 #26
0
ファイル: test_imagebufalgo.py プロジェクト: hybridetech/oiio
        for x in range(spec.x, spec.x + spec.width):
            p = image.getpixel(x, y)
            print("[", end="")
            for c in range(spec.nchannels):
                print(fmt.format(p[c]), end=" ")
            print("] ", end="")
        print("")


######################################################################
# main test starts here

try:
    # Some handy images to work with
    gridname = os.path.join(OIIO_TESTSUITE_IMAGEDIR, "grid.tif")
    grid = ImageBuf(gridname)
    checker = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.checker(checker, 8, 8, 8, (0, 0, 0), (1, 1, 1))
    gray128 = make_constimage(128, 128, 3, oiio.HALF, (0.5, 0.5, 0.5))
    gray64 = make_constimage(64, 64, 3, oiio.HALF, (0.5, 0.5, 0.5))
    tahoetiny = ImageBuf(OIIO_TESTSUITE_ROOT + "/oiiotool/src/tahoe-tiny.tif")

    # black
    # b = ImageBuf (ImageSpec(320,240,3,oiio.UINT8))
    b = ImageBufAlgo.zero(roi=oiio.ROI(0, 320, 0, 240, 0, 1, 0, 3))
    write(b, "black.tif", oiio.UINT8)

    # fill (including use of ROI)
    b = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.fill(b, (1, 0.5, 0.5))
    ImageBufAlgo.fill(b, (0, 1, 0), oiio.ROI(100, 180, 100, 180))
コード例 #27
0
def findOpticalFlow(inputImage1, 
    inputImage2,
    outputWarpedImage,
    outputFlowImage,
    verbose,
    opticalFlowImplementation="simpleflow"):

    oiioImageBuffer1 = ImageBuf( inputImage1 )
    ImageBufReorient(oiioImageBuffer1, oiioImageBuffer1.orientation)

    oiioImageBuffer2 = ImageBuf( inputImage2 )
    ImageBufReorient(oiioImageBuffer2, oiioImageBuffer2.orientation)

    if verbose:
        print( "load and convert 1 - %s" % inputImage1 )
    openCVImageBuffer1 = OpenCVImageBufferFromOIIOImageBuffer(oiioImageBuffer1)
    if verbose:
        print( "load and convert 2 - %s" % inputImage2 )
    openCVImageBuffer2 = OpenCVImageBufferFromOIIOImageBuffer(oiioImageBuffer2)

    if verbose:
        print( "resolution : %s" % str(openCVImageBuffer1.shape) )
        print( "calculate optical flow 1 -> 2")

    if opticalFlowImplementation == "old_farneback":
        if verbose:
            print( "older farneback implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        previous_flow = None
        pyramid_scale = 0.5
        pyramid_levels = 5
        window_size = 50
        iterations_per_pyramid_level = 20
        pixel_neighborhood_size = 3
        neighborhood_match_smoothing_factor = 1.0
        flags = cv2.OPTFLOW_FARNEBACK_GAUSSIAN

        if verbose:
            print( "calculate")
        opencvFlow = cv2.calcOpticalFlowFarneback(gray1, gray2, 
            previous_flow, pyramid_scale, pyramid_levels, window_size, iterations_per_pyramid_level, 
            pixel_neighborhood_size, neighborhood_match_smoothing_factor, flags)

    elif opticalFlowImplementation == "farneback":
        if verbose:
            print( "farneback implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.optflow.createOptFlow_Farneback()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "dualtvl1":
        if verbose:
            print( "dualtvl1 implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.createOptFlow_DualTVL1()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "sparsetodense":
        if verbose:
            print( "sparse to dense implementation" )
        # Current set of constants... Ranges and good values should be documented
        if verbose:
            print( "calculate")
        opencvFlow = cv2.optflow.calcOpticalFlowSparseToDense(openCVImageBuffer1, openCVImageBuffer2, None,
            8, 128, 0.05, True, 500.0, 1.5)

    elif opticalFlowImplementation == "deepflow":
        if verbose:
            print( "deep flow implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.optflow.createOptFlow_DeepFlow()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "dis":
        if verbose:
            print( "dis implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.optflow.createOptFlow_DIS()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "pcaflow":
        if verbose:
            print( "pca flow implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.optflow.createOptFlow_PCAFlow()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "simpleflow":
        if verbose:
            print( "simple flow implementation" )
        # Current set of constants... Ranges and good values should be documented
        opencvFlow = cv2.optflow.calcOpticalFlowSF(openCVImageBuffer1, openCVImageBuffer2, 
            3, 2, 4, 4.1, 25.5, 18, 55.0, 25.5, 0.35, 18, 55.0, 25.5, 10)

    else:
        print( "Unknown optical flow implementation : %s" % opticalFlowImplementation )
        opencvFlow = None

    if outputWarpedImage and (opencvFlow is not None):
        if verbose:
            print( "warping 1 -> 2")
        opencvWarped = applyOpticalFlow(openCVImageBuffer1, opencvFlow)

        if verbose:
            print( "converting and writing warped image - %s" % outputWarpedImage )
        oiioWarped = OIIOImageBufferFromOpenCVImageBuffer( opencvWarped )
        oiioWarped.write( outputWarpedImage )
    else:
        opencvWarped = None

    if outputFlowImage and (opencvFlow is not None):
        if verbose:
            print( "converting and writing flow image - %s" % outputFlowImage )

        oiioFlowBuffer = OIIOImageBufferFromOpenCVImageBuffer( opencvFlow )
        oiioFlowBuffer.write( outputFlowImage )

    return (opencvWarped, opencvFlow)
コード例 #28
0
ファイル: test_imagebufalgo.py プロジェクト: tdsmith/oiio
def write(image, filename, format=oiio.UNKNOWN):
    if not image.has_error:
        image.set_write_format(format)
        image.write(filename)
    if image.has_error:
        print "Error writing", filename, ":", image.geterror()


######################################################################
# main test starts here

try:
    # Some handy images to work with
    gridname = "../../../../../oiio-images/grid.tif"
    grid = ImageBuf(gridname)
    checker = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.checker(checker, 8, 8, 8, (0, 0, 0), (1, 1, 1))
    gray128 = make_constimage(128, 128, 3, oiio.HALF, (0.5, 0.5, 0.5))
    gray64 = make_constimage(64, 64, 3, oiio.HALF, (0.5, 0.5, 0.5))

    # black
    b = ImageBuf(ImageSpec(320, 240, 3, oiio.UINT8))
    ImageBufAlgo.zero(b)
    write(b, "black.tif")

    # fill (including use of ROI)
    b = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.fill(b, (1, 0.5, 0.5))
    ImageBufAlgo.fill(b, (0, 1, 0), oiio.ROI(100, 180, 100, 180))
    write(b, "filled.tif")
コード例 #29
0
def processTextures(excludeFolders):
    allTextures = folder.getFiles(rootFolder,
                                  excludeFolders,
                                  textureExts,
                                  all=True)
    selectedTextures = []

    for texture in allTextures:

        directory = Path(texture).parent
        filename = Path(texture).stem
        extension = Path(texture).suffix

        IMG_CACHE = oiio.ImageCache.create(True)

        frameBufferOrig = ImageBuf(str(texture))
        spec = frameBufferOrig.spec()

        if (spec.tile_width == spec.width or spec.tile_width == 0
                or frameBufferOrig.nmiplevels == 1):
            if Path(directory,
                    filename + mipmapPrefix + mipmapExtension).exists():
                pass
            else:
                selectedTextures.append(texture)

        IMG_CACHE.invalidate(str(texture))

    if len(selectedTextures) == 0:

        showUI("", "Nothing to do...")

    if len(selectedTextures) is not 0:

        showUI(
            "Searching for Files",
            "Found " + str(len(selectedTextures)) +
            " files in scanline/No MipMap or not .exr format. We will make some :)",
        )
        time.sleep(4)

        for texture in tqdm(
                selectedTextures,
                desc="Complete",
                ncols=width,
                position=1,
                unit="file",
                ascii=True,
                bar_format=barFormat,
        ):

            directory = Path(texture).parent
            filename = Path(texture).stem
            extension = Path(texture).suffix

            showUI("Make tiled / MipMapped .exr",
                   "Current File: " + filename + extension)

            IMG_CACHE = oiio.ImageCache.create(True)

            frameBufferOrig2 = ImageBuf(str(texture))

            outPutFile = str(
                Path(directory, filename + mipmapPrefix + mipmapExtension))

            writeFramebuffer = threadAndStatus(writeTexture,
                                               [frameBufferOrig2, outPutFile],
                                               "Saving", 0, False)

            IMG_CACHE.invalidate(str(texture))

        showUI("", Fore.GREEN + "All textures converted...")

    tqdm.write(prefix + "Press Enter to exit or close the Terminal")
    wait_key()
コード例 #30
0
ファイル: mkhdr.py プロジェクト: nbn1985/general
def loadImageBuffer(imagePath, outputGamut=None):
    '''
    Load an image buffer. Manage raw formats if OIIO can't load them directly
    '''
    global temp_dirs

    # Raw camera files require special handling
    imageExtension = os.path.splitext(imagePath)[-1][1:].lower()
    if imageExtension in rawExtensions:

        # Either OIIO can read the data directly
        if oiioSupportsRaw():
            print("\tUsing OIIO ImageInput to read raw file")

            # Convert gamut number to text
            gamuts = {
                0: "raw",
                1: "sRGB",
                2: "Adobe",
                3: "Wide",
                4: "ProPhoto",
                5: "XYZ",
                6: "ACES",
            }
            outputGamutText = "sRGB"
            if outputGamut in gamuts:
                outputGamutText = gamuts[outputGamut]

            # Spec will be used to configure the file read
            spec = ImageSpec()
            spec.attribute("raw:ColorSpace", outputGamutText)
            spec.attribute("raw:use_camera_wb", 1)
            spec.attribute("raw:auto_bright", 0)
            spec.attribute("raw:use_camera_matrix", 0)
            spec.attribute("raw:adjust_maximum_thr", 0.0)

            # Read the image using the adjusted spec
            imageBuffer = oiio.ImageBuf()
            imageBuffer.reset(imagePath, 0, 0, spec)

        # Or we need to use dcraw to help the process along
        else:
            print("\tUsing dcraw to convert raw, then OIIO to read temp file")

            # Create a new temp dir for each image so there's no chance
            # of a name collision
            temp_dir = tempfile.mkdtemp()
            temp_dirs.append(temp_dir)

            imageName = os.path.split(imagePath)[-1]
            temp_file = os.path.join(temp_dir, "%s_temp.tiff" % imageName)

            if outputGamut is None:
                outputGamut = 1

            cmd = "dcraw"
            args = []
            #args += ['-v']
            args += ['-w', '-o', str(outputGamut), '-4', '-T', '-W', '-c']
            args += [imagePath]

            cmdargs = [cmd]
            cmdargs.extend(args)

            #print( "\tTemp_file : %s" % temp_file )
            print("\tCommand   : %s" % " ".join(cmdargs))

            with open(temp_file, "w") as temp_handle:
                process = sp.Popen(cmdargs,
                                   stdout=temp_handle,
                                   stderr=sp.STDOUT)
                process.wait()

            #print( "Loading   : %s" % temp_file )
            imageBuffer = ImageBuf(temp_file)

    # Just load the image using OIIO
    else:
        #print( "Using OIIO ImageBuf read route" )
        imageBuffer = ImageBuf(imagePath)

    return imageBuffer
コード例 #31
0
    # Some handy images to work with
    gridname = os.path.join(OIIO_TESTSUITE_IMAGEDIR, "grid.tif")
    grid = ImageBuf (gridname)
    checker = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.checker (checker, 8, 8, 8, (0,0,0), (1,1,1))
    gray128 = make_constimage (128, 128, 3, oiio.HALF, (0.5,0.5,0.5))
    gray64 = make_constimage (64, 64, 3, oiio.HALF, (0.5,0.5,0.5))
    tahoetiny = ImageBuf("../oiiotool/src/tahoe-tiny.tif")

    # black
    # b = ImageBuf (ImageSpec(320,240,3,oiio.UINT8))
    b = ImageBufAlgo.zero (roi=oiio.ROI(0,320,0,240,0,1,0,3))
    write (b, "black.tif", oiio.UINT8)

    # fill (including use of ROI)
    b = ImageBuf (ImageSpec(256,256,3,oiio.UINT8));
    ImageBufAlgo.fill (b, (1,0.5,0.5))
    ImageBufAlgo.fill (b, (0,1,0), oiio.ROI(100,180,100,180))
    write (b, "filled.tif", oiio.UINT8)

    # checker
    b = ImageBuf (ImageSpec(256,256,3,oiio.UINT8))
    ImageBufAlgo.checker (b, 64, 64, 64, (1,.5,.5), (.5,1,.5), 10, 5)
    write (b, "checker.tif", oiio.UINT8)

    # noise-uniform
    b = ImageBufAlgo.noise ("uniform", 0.25, 0.75, roi=ROI(0,64,0,64,0,1,0,3))
    write (b, "noise-uniform3.tif", oiio.UINT8)

    # noise-gaussian
    b = ImageBufAlgo.noise ("gaussian", 0.5, 0.1, roi=ROI(0,64,0,64,0,1,0,3));
コード例 #32
0
    def compare(self, diff_image_location=None, blur=10, raise_exception=True):
        """Compare the two given images

        Args:
            diff_image_location (str): file path for difference image.
                Written only if there are failures
            blur (float): image blur to apply before comparing
        """

        if not diff_image_location:
            diff_image_location = os.path.dirname(self._image_a_location)

        self.blur_images(blur)
        ImageBufAlgo.compare(
            self.image_a_buffer,
            self.image_b_buffer,
            self.fail_threshold,
            self.warn_threshold,
            self._compare_results,
        )
        diff_buffer = self.create_diff_buffer()

        if self.debug:
            self.image_a_buffer.write(
                '{}/{}_debug{}'.format(
                    diff_image_location,
                    os.path.basename(self._image_a_location),
                    self._file_ext,
                )
            )
            self.image_b_buffer.write(
                '{}/{}_debug{}'.format(
                    diff_image_location,
                    os.path.basename(self._image_b_location),
                    self._file_ext,
                )
            )

        if self._compare_results.nfail > 0:
            ImageBufAlgo.color_map(diff_buffer, diff_buffer, -1, 'inferno')
            remap_buffer = ImageBuf()
            multiplier = 5
            ImageBufAlgo.mul(
                remap_buffer,
                diff_buffer,
                (multiplier, multiplier, multiplier, 1.0),
            )
            ImageBufAlgo.add(remap_buffer, self.image_a_buffer, remap_buffer)
            msg = report_msg.format(
                failures=self._compare_results.nfail,
                warn=self._compare_results.nwarn,
                meanerror=self._compare_results.meanerror,
                rmserror=self._compare_results.rms_error,
                psnr=self._compare_results.PSNR
            )

            remap_buffer.write(
                '{}/{}-{}_diff{}'.format(
                    diff_image_location,
                    os.path.basename(self._image_a_location),
                    os.path.basename(self._image_b_location),
                    self._file_ext,
                )
            )
            self.image_a_buffer.write(
                '{}/{}_debug{}'.format(
                    diff_image_location,
                    '1_a',
                    self._file_ext,
                )
            )
            self.image_b_buffer.write(
                '{}/{}_debug{}'.format(
                    diff_image_location,
                    '1_b',
                    self._file_ext,
                )
            )
            if raise_exception:
                raise ImageDifferenceError(msg)
            else:
                print(msg)
コード例 #33
0
ファイル: test_imagebufalgo.py プロジェクト: gchatelet/oiio


######################################################################
# main test starts here

try:
    # Some handy images to work with
    gridname = "../../../../../oiio-images/grid.tif"
    grid = ImageBuf (gridname)
    checker = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.checker (checker, 8, 8, 8, (0,0,0), (1,1,1))
    gray128 = make_constimage (128, 128, 3, oiio.HALF, (0.5,0.5,0.5))

    # black
    b = ImageBuf (ImageSpec(320,240,3,oiio.UINT8))
    ImageBufAlgo.zero (b)
    write (b, "black.tif")

    # fill (including use of ROI)
    b = ImageBuf (ImageSpec(256,256,3,oiio.UINT8));
    ImageBufAlgo.fill (b, (1,0.5,0.5))
    ImageBufAlgo.fill (b, (0,1,0), oiio.ROI(100,180,100,180))
    write (b, "filled.tif")

    # checker
    b = ImageBuf (ImageSpec(256,256,3,oiio.UINT8))
    ImageBufAlgo.checker (b, 64, 64, 64, (1,.5,.5), (.5,1,.5), 10, 5)
    write (b, "checker.tif")

    # channels, channel_append
コード例 #34
0
ファイル: test_imagebufalgo.py プロジェクト: nburtnyk/oiio
        print "Error writing", filename, ":", image.geterror()


######################################################################
# main test starts here

try:
    # Some handy images to work with
    gridname = "../../../../../oiio-images/grid.tif"
    grid = ImageBuf(gridname)
    checker = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.checker(checker, 8, 8, 8, (0, 0, 0), (1, 1, 1))
    gray128 = make_constimage(128, 128, 3, oiio.HALF, (0.5, 0.5, 0.5))

    # black
    b = ImageBuf(ImageSpec(320, 240, 3, oiio.UINT8))
    ImageBufAlgo.zero(b)
    write(b, "black.tif")

    # fill (including use of ROI)
    b = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.fill(b, (1, 0.5, 0.5))
    ImageBufAlgo.fill(b, (0, 1, 0), oiio.ROI(100, 180, 100, 180))
    write(b, "filled.tif")

    # checker
    b = ImageBuf(ImageSpec(256, 256, 3, oiio.UINT8))
    ImageBufAlgo.checker(b, 64, 64, 64, (1, 0.5, 0.5), (0.5, 1, 0.5), 10, 5)
    write(b, "checker.tif")

    # channels, channel_append
コード例 #35
0
def mkhdr(outputPath, 
    inputPaths, 
    responseLUTPaths, 
    baseExposureIndex, 
    writeIntermediate = False, 
    outputGamut = 1,
    compression = None,
    compressionQuality = 0,
    rawSaturationPoint = -1.0,
    alignImages = False,
    dcrawVariant = None):
    '''
    Create an HDR image from a series of individual exposures
    If the images are non-linear, a series of response LUTs can be used to
    linearize the data
    '''

    global temp_dirs

    # Set up capture of 
    old_stdout, old_stderr = sys.stdout, sys.stderr
    redirected_stdout = sys.stdout = Logger()
    redirected_stderr = sys.stderr = Logger()

    # Create buffers for inputs
    inputBuffers = []
    inputAttributes = []

    # Read images
    for inputPath in inputPaths:
        print( "Reading input image : %s" % inputPath )
        # Read
        inputBufferRaw = loadImageBuffer( inputPath, outputGamut=outputGamut, 
            rawSaturationPoint=rawSaturationPoint,
            dcrawVariant=dcrawVariant )

        # Reset the orientation
        print( "\tRaw Orientation : %d" % inputBufferRaw.orientation)
        ImageBufReorient(inputBufferRaw, inputBufferRaw.orientation)

        # Get attributes
        (channelType, width, height, channels, orientation, metadata, inputSpec) = ImageAttributes(inputBufferRaw)

        # Cast to half by adding with a const half buffer.
        inputBufferHalf = ImageBufMakeConstant(width, height, channels, oiio.HALF)
        ImageBufAlgo.add(inputBufferHalf, inputBufferHalf, inputBufferRaw)

        # Get exposure-specific information
        exposure = getExposureInformation(metadata)

        print( "\tChannel Type : %s" % (channelType) )
        print( "\tWidth        : %s" % (width) )
        print( "\tHeight       : %s" % (height) )
        print( "\tChannels     : %s" % (channels) )
        print( "\tOrientation  : %s" % (orientation) )
        print( "\tExposure     : %s" % (exposure) )
        print( "\tMetadata #   : %s" % (len(metadata)) )

        # Store pixels and image attributes
        inputBuffers.append( inputBufferHalf )
        inputAttributes.append( (channelType, width, height, channels, orientation, metadata, exposure, inputSpec) )

    # Get the base exposure information
    # All other images will be scaled to match this exposure
    if baseExposureIndex >= 0:
        baseExposureIndex = max(0, min(len(inputPaths)-1, baseExposureIndex))
    else:
        multithreaded = True
        if multithreaded:
            threads = cpu_count()
            baseExposureIndex = findBaseExposureIndexMultithreaded(inputPaths, width, height, channels, threads)
        else:
            baseExposureIndex = findBaseExposureIndexSerial(inputBuffers, width, height, channels)

    baseExposureMetadata = inputAttributes[baseExposureIndex][5]
    baseExposureInfo = inputAttributes[baseExposureIndex][6]
    baseInputspec = inputAttributes[baseExposureIndex][7]

    print( "" )
    print( "Base exposure index : %d" % baseExposureIndex )
    print( "Base exposure info  : %s" % baseExposureInfo )

    # Find the lowest and highest exposures
    exposureAdjustments = [getExposureAdjustment(x[6], baseExposureInfo) for x in inputAttributes]

    minExposureOffsetIndex = exposureAdjustments.index(min(exposureAdjustments))
    maxExposureOffsetIndex = exposureAdjustments.index(max(exposureAdjustments))

    print( "Max exposure index  : %d" % minExposureOffsetIndex )
    print( "Min exposure index  : %d" % maxExposureOffsetIndex )

    print( "\nBegin processing\n" )

    # Two buffers needed for algorithm
    imageSum  = ImageBufMakeConstant(width, height, channels, oiio.HALF, 
        inputSpec=baseInputspec)
    weightSum = ImageBufMakeConstant(width, height, channels, oiio.HALF)

    # Re-used intermediate buffers
    color     = ImageBufMakeConstant(width, height, channels, oiio.HALF)
    weight    = ImageBufMakeConstant(width, height, channels, oiio.HALF)
    weightedColor = ImageBufMakeConstant(width, height, channels, oiio.HALF)

    # Process images
    for inputIndex in range(len(inputPaths)):
        inputPathComponents = (os.path.splitext( inputPaths[inputIndex] )[0], ".exr")
        intermediate = 0

        ImageBufAlgo.zero( color )
        ImageBufAlgo.zero( weight )
        ImageBufAlgo.zero( weightedColor )

        print( "Processing input image : %s" % inputPaths[inputIndex] )
        inputBuffer = inputBuffers[inputIndex]

        # Copy the input buffer data
        ImageBufAlgo.add(color, color, inputBuffer)

        if writeIntermediate:
            intermediatePath = "%s_int%d.float_buffer%s" % (inputPathComponents[0], intermediate, inputPathComponents[1])
            intermediate += 1
            ImageBufWrite(color, intermediatePath)

        # Find the image alignment matrix to align this exposure with the base exposure
        if alignImages:
            try:
                if inputIndex != baseExposureIndex:
                    if cv2:
                        print( "\tAligning image %d to base exposure %d " % (inputIndex, baseExposureIndex) )
                        warpMatrix = find2dAlignmentMatrix(inputBuffer, inputBuffers[baseExposureIndex])

                        # reformat for OIIO's warp
                        w = map(float, list(warpMatrix.reshape(1,-1)[0]))
                        warpTuple = (w[0], w[1], 0.0, w[3], w[4], 0.0, w[2], w[5], 1.0)
                        print( warpTuple )

                        warped = ImageBuf()
                        result = ImageBufAlgo.warp(warped, color, warpTuple)
                        if result:
                            print( "\tImage alignment warp succeeded." )
                            if writeIntermediate:
                                intermediatePath = "%s_int%d.warped%s" % (inputPathComponents[0], intermediate, inputPathComponents[1])
                                intermediate += 1
                                ImageBufWrite(warped, intermediatePath)

                            color = warped
                        else:
                            print( "\tImage alignment warp failed." )
                            if writeIntermediate:
                                intermediate += 1
                    else:
                        print( "\tSkipping image alignment. OpenCV not defined" )
                        if writeIntermediate:
                            intermediate += 1
                else:
                    print( "\tSkipping alignment of base exposure to itself")
                    if writeIntermediate:
                        intermediate += 1

            except:
                print( "Exception in image alignment" )
                print( '-'*60 )
                traceback.print_exc()
                print( '-'*60 )

        # Weight
        print( "\tComputing image weight" )

        lut = []
        if inputIndex == minExposureOffsetIndex:
            lut.append(1)
        if inputIndex == maxExposureOffsetIndex:
            lut.append(2)
        if lut:
            print( "\tUsing LUT %s in weighting calculation" % lut )
        ImageBufWeight(weight, color, lut=lut)

        if writeIntermediate:
            intermediatePath = "%s_int%d.weight%s" % (inputPathComponents[0], intermediate, inputPathComponents[1])
            intermediate += 1
            ImageBufWrite(weight, intermediatePath)

        # Linearize using LUTs
        if responseLUTPaths:
            for responseLUTPath in responseLUTPaths:
                print( "\tApplying LUT %s" % responseLUTPath )
                ImageBufAlgo.ociofiletransform(color, color, os.path.abspath(responseLUTPath) )

                if writeIntermediate:
                    intermediatePath = "%s_int%d.linearized%s" % (inputPathComponents[0], intermediate, inputPathComponents[1])
                    intermediate += 1
                    ImageBufWrite(color, intermediatePath)

        # Get exposure offset
        inputExposureInfo = inputAttributes[inputIndex][6]
        exposureAdjustment = getExposureAdjustment(inputExposureInfo, baseExposureInfo)
        exposureScale = pow(2, exposureAdjustment)

        # Re-expose input
        print( "\tScaling by %s stops (%s mul)" % (exposureAdjustment, exposureScale) )
        ImageBufAlgo.mul(color, color, exposureScale)

        if writeIntermediate:
            intermediatePath = "%s_int%d.exposure_adjust%s" % (inputPathComponents[0], intermediate, inputPathComponents[1])
            intermediate += 1
            ImageBufWrite(color, intermediatePath)

        # Multiply color by weight
        print( "\tMultiply by weight" )

        ImageBufAlgo.mul(weightedColor, weight, color)

        if writeIntermediate:
            intermediatePath = "%s_int%d.color_x_weight%s" % (inputPathComponents[0], intermediate, inputPathComponents[1])
            intermediate += 1
            ImageBufWrite(weightedColor, intermediatePath)

        print( "\tAdd values into sum" )

        # Sum weighted color and weight
        ImageBufAlgo.add(imageSum,  imageSum,  weightedColor)
        ImageBufAlgo.add(weightSum, weightSum, weight)

        if writeIntermediate:
            intermediatePath = "%s_int%d.color_x_weight_sum%s" % (inputPathComponents[0], intermediate, inputPathComponents[1])
            intermediate += 1
            ImageBufWrite(imageSum, intermediatePath)

            intermediatePath = "%s_int%d.weight_sum%s" % (inputPathComponents[0], intermediate, inputPathComponents[1])
            intermediate += 1
            ImageBufWrite(weightSum, intermediatePath)

    # Divid out weights
    print( "Dividing out weights" )
    ImageBufAlgo.div(imageSum, imageSum, weightSum)

    # Write to disk
    print( "Writing result : %s" % outputPath )

    # Restore regular streams
    sys.stdout, sys.stderr = old_stdout, old_stderr

    additionalAttributes = {}
    additionalAttributes['inputPaths'] = " ".join(inputPaths)
    additionalAttributes['stdout'] = "".join(redirected_stdout.log)
    additionalAttributes['stderr'] = "".join(redirected_stderr.log)

    ImageBufWrite(imageSum, outputPath, 
        compression=compression,
        compressionQuality=compressionQuality,
        metadata=baseExposureMetadata,
        additionalAttributes=additionalAttributes)

    # Clean up temp folders
    for temp_dir in temp_dirs:
        #print( "Removing : %s" % temp_dir )
        shutil.rmtree(temp_dir)

    for temp_dir in temp_dirs:
        temp_dirs.remove(temp_dir)