예제 #1
0
def blendLuminosity(dest, source):
    """
    Implements blending with luminosity mode,
    which is missing in Qt.
    The blended image retains the hue and saturation of dest,
    with the luminosity of source.
    We use the HLS color model:
    see https://docs.opencv.org/3.3.0/de/d25/imgproc_color_conversions.html
    Note blendColor and blendLuminosity are commuted versions of each other:
    blendLuminosity(img1, img2) = blendColor(img2, img1)
    @param dest: destination QImage
    @type dest QImage
    @param source: source QImage
    @type source QImage
    @return: The blended image
    @rtype: QImage same size and format as source

    """
    sourceBuf = QImageBuffer(source)[:, :, :3]
    destBuf = QImageBuffer(dest)[:, :, :3]
    hlsSourceBuf = rgb2hlsVec(sourceBuf[:, :, ::-1])
    hlsDestBuf = rgb2hlsVec(destBuf[:, :, ::-1])
    # copy source luminosity to dest
    hlsDestBuf[:, :, 1] = hlsSourceBuf[:, :, 1]
    blendBuf = hls2rgbVec(hlsDestBuf)
    img = QImage(source.size(), source.format())
    tmp = QImageBuffer(img)
    tmp[:, :, :3][:, :, ::-1] = blendBuf
    tmp[:, :, 3] = 255
    return img
예제 #2
0
 def __init__(self, cModel, QImg, target=None, size=0, border=0):
     """
     @param cModel: color model
     @type cModel: cmConverter
     @param QImg: color wheel
     @type QImg: vImage
     @param target: image to sample
     @type target: QImage
     @param size: color wheel diameter
     @type size: integer
     @param border: border size
     @type border: int
     """
     self.QImg = QImg  # not a back link !!!
     self.border = border
     if size == 0:
         self.size = min(QImg.width(), QImg.heigth()) - 2 * border
     else:
         self.size = size
     self.origin = 0
     # calculate target histogram
     self.targetHist = None
     if target is not None:
         # convert to current color space
         hsxImg = cModel.rgb2cmVec(QImageBuffer(target)[:, :, :3][:, :, ::-1])
         # get polar coordinates relative to the color wheel
         xyarray = self.QImg.GetPointVec(hsxImg).astype(int)
         maxVal = self.QImg.width()
         STEP = 10
         # build 2D histogram for xyarray
         H, xedges, yedges = np.histogram2d(xyarray[:, :, 0].ravel(), xyarray[:,:,1].ravel(),
                                            bins=[np.arange(0, maxVal+STEP, STEP), np.arange(0, maxVal + STEP, STEP)],
                                            normed=True)
         w,h = QImg.width(), QImg.height()
         b = QImage(w, h, QImage.Format_ARGB32)
         b.fill(0)
         buf = QImageBuffer(b)
         # set the transparency of each pixel
         # proportionally to the height of its bin
         # get bin indices (u, v)
         u = xyarray[:,:,0] // STEP
         v = xyarray[:,:,1] // STEP
         # get heights of bins
         tmp = H[u,v]
         norma = np.amax(H)
         # color white
         buf[xyarray[:,:,1], xyarray[:,:,0],...] = 255
         # alpha channel
         buf[xyarray[:, :, 1], xyarray[:, :, 0], 3] = 90 + 128.0 * tmp / norma
         self.targetHist = b
         self.showTargetHist = False
     super().__init__(self.QImg.rPixmap)
     self.setPixmap(self.QImg.rPixmap)
     self.setOffset(QPointF(-border, -border))
     self.onMouseRelease = lambda p, x, y, z: 0
     self.rubberBand = None
예제 #3
0
 def setMask(self):
     """
     Build the layer mask from the image alpha channel.
     The Image alpha channel is recorded
     in the red channel of the mask.
     """
     layer = self.graphicsScene.layer
     currentImg = layer.getCurrentImage()
     imgBuf = QImageBuffer(currentImg)
     # resize the alpha channel
     imgmask = cv2.resize(imgBuf[:, :, 3], (layer.width(), layer.height()))
     mask = QImageBuffer(layer.mask)
     mask[:, :, 2] = imgmask
     layer.applyToStack()
     layer.parentImage.onImageChanged()
예제 #4
0
def bLUeFloodFill(layer, x, y, color):
    """
    Flood fills a region of a drawing layer
    x, y are the seed coordinates
    @param layer:
    @type layer: QLayerImage
    @param x:
    @type x: float
    @param y:
    @type y: float
    @param color: filling color
    @type color: QColor
    """
    img = layer.sourceImg
    w, h = img.width(), img.height()
    buf0 = QImageBuffer(img)
    # preparing opencv data
    buf = np.ascontiguousarray(buf0[..., :3][..., ::-1])
    mask = np.zeros((h + 2, w + 2), dtype=np.uint8)
    # flood filling
    if 0 <= x < w and 0 <= y < h:
        cv2.floodFill(buf, mask, (x, y),
                      (color.red(), color.green(), color.blue()))
    buf0[..., :3] = buf[..., ::-1]
    # set the alpha channel of the filled region
    buf0[mask[1:-1, 1:-1] == 1, 3] = color.alpha()
예제 #5
0
 def __init__(self, w, h, converter, hue, sat):
     """
     Build a linear gradient of size (w, h) with variable brightnesses
     and fixed hue and sat. The parameter converter defines the color space
     which is used (HSV, HSpB,...).
     @param w: image width
     @type w: int
     @param h: image height
     @type h: int
     @param converter: color space converter
     @type converter: cmConverter
     @param hue: hue value
     @type hue: int or float
     @param sat: saturation value
     @type sat: int or float
     @return: the image of gradient
     @rtype: bImage
     """
     super().__init__(w, h, QImage.Format_ARGB32)
     self.cModel = converter
     imgBuf = QImageBuffer(self)
     # set alpha
     imgBuf[:, :, 3] = 255
     imgBuf = imgBuf[:, :, :3][:, :, ::-1]
     # build the array of (hue, sat, b), b in [0,1], shape=(w,3)
     a = np.zeros((w, 2), dtype=np.float) + [hue, sat]
     hsArray = np.concatenate(
         (a, (np.arange(w) / (w - 1))[..., np.newaxis]), axis=1)
     # convert to rgb and broadcast to imgBuf
     imgBuf[:, :, :] = converter.cm2rgbVec(hsArray[np.newaxis, ...])
     self.updatePixmap()
예제 #6
0
def cmsConvertQImage(image, cmsTransformation=None):
    """
    Apply a Cms transformation to a copy of a QImage and
    return the transformed image.
    If cmsTransformation is None, the input image is returned (no copy).
    @param image: image to transform
    @type image: QImage
    @param cmsTransformation : Cms transformation
    @type cmsTransformation: ImageCmsTransform
    @return: The converted QImage
    @rtype: QImage
    """
    if cmsTransformation is None:
        return image
    image = image.copy()
    buf = QImageBuffer(image)[:, :, :3][:, :, ::-1]
    # convert to the PIL context and apply cmsTransformation
    bufC = np.ascontiguousarray(buf)
    PIL_img = Image.frombuffer(
        'RGB', (image.width(), image.height()), bufC, 'raw', 'RGB', 0,
        1)  # these 3 weird parameters are recommended by a runtime warning !!!
    applyTransform(PIL_img, cmsTransformation, 1)  # 1=in place
    # back to the image buffer
    buf[...] = np.frombuffer(PIL_img.tobytes(),
                             dtype=np.uint8).reshape(buf.shape)
    return image
예제 #7
0
 def maskErode():
     kernel = np.ones((5, 5), np.uint8)
     buf = QImageBuffer(layer.mask)
     # CAUTION dilate increases values (max filter), so it reduces the masked part of the image
     buf[:, :, 2] = cv2.dilate(buf[:, :, 2], kernel, iterations=1)
     for l in self.img.layersStack:
         l.updatePixmap(maskOnly=True)
     self.img.onImageChanged()
예제 #8
0
 def setColorMaskOpacity(self, value):
     """
     Set mask alpha channel to value * 255 / 100
     @param value:
     @type value: int in range 0..100
     """
     buf = QImageBuffer(self.mask)
     buf[:, :, 3] = np.uint8(value * 255 / 100)
예제 #9
0
 def setColorMaskOpacity(self, value):
     """
     Set mask alpha channel to value
     @param value:
     @type value: int in range 0..255
     """
     self.colorMaskOpacity = value
     buf = QImageBuffer(self.mask)
     buf[:, :, 3] = np.uint8(value)
예제 #10
0
def clip(image, mask, inverted=False):
    """
    clip an image by applying a mask to its alpha channel
    @param image:
    @type image:
    @param mask:
    @type mask:
    @param inverted:
    @type inverted:
    @return:
    @rtype:
    """
    bufImg = QImageBuffer(image)
    bufMask = QImageBuffer(mask)
    if inverted:
        bufMask = bufMask.copy()
        bufMask[:, :, 3] = 255 - bufMask[:, :, 3]
    bufImg[:, :, 3] = bufMask[:, :, 3]
예제 #11
0
 def setPb(self, pb):
     """
     Set brightness and update image
     @param pb: perceived brightness (range 0,..,1)
     """
     self.pb = pb
     self.hsArray[:, :, 2] = pb
     imgBuf = QImageBuffer(self)[:, :, :3][:, :, ::-1]
     imgBuf[:, :, :] = self.cModel.cm2rgbVec(self.hsArray)
     self.updatePixmap()
예제 #12
0
 def maskErode():
     """
     Reduce the masked part of the image
     """
     buf = QImageBuffer(layer.mask)
     buf[:, :, 2] = vImage.maskErode(buf[:, :, 2])
     for l in self.img.layersStack:
         l.updatePixmap(maskOnly=True)
     self.img.prLayer.update()
     self.img.onImageChanged()
예제 #13
0
 def maskSmooth():
     """
     Smooth the mask boundary
     """
     buf = QImageBuffer(layer.mask)
     buf[:, :, 2] = vImage.maskSmooth(buf[:, :, 2])
     for l in self.img.layersStack:
         l.updatePixmap(maskOnly=True)
     self.img.prLayer.update()
     self.img.onImageChanged()
예제 #14
0
 def maskDilate():
     """
     Increase the masked part of the image
     """
     buf = QImageBuffer(layer.mask)
     buf[:, :, 2] = vImage.maskDilate(buf[:, :, 2])
     for l in self.img.layersStack:
         l.updatePixmap(maskOnly=True)
     self.img.prLayer.applyNone()
     self.img.onImageChanged()
예제 #15
0
 def updatePixmap(self, maskOnly=False):
     """
     Synchronize qPixmap and rPixmap with the image layer and mask.
     If maskOnly is True, cmImage is not updated.
     if maskIsEnabled is False, the mask is not shown.
     If maskIsEnabled is True, then
         - if maskIsSelected is True, the mask is drawn over
           the layer as a color mask.
         - if maskIsSelected is False, the mask is drawn as an
           opacity mask, setting image opacity to that of mask
           (mode DestinationIn). Mask color is no used.
     @param maskOnly: default False
     @type maskOnly: boolean
     """
     currentImage = self.getCurrentImage()
     # apply color management to presentation layer
     if icc.COLOR_MANAGE and self.parentImage is not None and getattr(
             self, 'role', None) == 'presentation':
         # CAUTION : reset alpha channel
         img = convertQImage(currentImage,
                             transformation=self.parentImage.
                             colorTransformation)  # time 0.66 s for 15 Mpx.
         # restore alpha channel
         # img = img.convertToFormat(currentImage.format()) # TODO 15/10/18 dome by convertQImage()
         buf0 = QImageBuffer(img)
         buf1 = QImageBuffer(currentImage)
         buf0[:, :, 3] = buf1[:, :, 3]
     else:
         img = currentImage
     qImg = img
     rImg = currentImage
     if self.maskIsEnabled:
         #qImg = vImage.visualizeMask(qImg, self.mask, color=self.maskIsSelected, clipping=self.isClipping)
         rImg = vImage.visualizeMask(rImg,
                                     self.mask,
                                     color=self.maskIsSelected,
                                     clipping=self.isClipping)
     self.qPixmap = QPixmap.fromImage(qImg)
     self.rPixmap = QPixmap.fromImage(rImg)
     self.setModified(True)
예제 #16
0
    def __init__(self,
                 name,
                 baseSize,
                 contourPath,
                 presetFilename=None,
                 image=None):
        """

        @param name:
        @type name: str
        @param baseSize:
        @type baseSize: int
        @param contourPath: base shape of the brush family
        @type contourPath: QPainterPath
        @param presetFilename: preset file
        @type presetFilename: str
        """
        self.name = name
        self.baseSize = baseSize
        # init the brush pixmap
        self.basePixmap = QPixmap(self.baseSize, self.baseSize)
        # to get an alpha channel, we must fill the pixmap a first time with an opacity < 255
        self.basePixmap.fill(QColor(0, 0, 0, 0))
        if self.name == 'eraser':
            self.basePixmap.fill(QColor(0, 0, 0, 255))
        self.contourPath = contourPath
        # init brush cursor
        self.baseCursor = QPixmap(self.baseSize, self.baseSize)
        self.baseCursor.fill(QColor(0, 0, 0, 0))
        qp = QPainter(self.baseCursor)
        pen = qp.pen()
        pen.setWidth(self.baseSize / 20)
        qp.setPen(pen)  # needed!!
        qp.drawPath(contourPath)
        qp.end()
        self.__pxmp = None
        self.bOpacity = 1.0
        self.bFlow = 1.0
        self.bHardness = 1.0
        self.preset = None
        if presetFilename is not None:
            img = QImage(presetFilename)
        elif image is not None:
            img = image
        else:
            return
        img = img.convertToFormat(QImage.Format_ARGB32)
        buf = QImageBuffer(img)
        b = np.sum(buf[..., :3], axis=-1, dtype=np.float)
        b /= 3
        buf[..., 3] = b
        self.preset = QPixmap.fromImage(img)
예제 #17
0
 def initHald(self):
     """
     Build a hald image (as a QImage) from identity 3D LUT.
     """
     if not self.cachesEnabled:
         return
     s = int(LUT3DIdentity.size**(3.0 / 2.0)) + 1
     buf0 = LUT3DIdentity.toHaldArray(s, s).haldBuffer
     # self.hald = QLayer(QImg=QImage(QSize(190,190), QImage.Format_ARGB32))
     self.hald = QImage(QSize(s, s), QImage.Format_ARGB32)
     buf1 = QImageBuffer(self.hald)
     buf1[:, :, :3] = buf0
     buf1[:, :, 3] = 255
     self.hald.parentImage = self.parentImage
예제 #18
0
 def getHald(self):
     if not self.cachesEnabled:
         s = int(LUT3DIdentity.size**(3.0 / 2.0)) + 1
         buf0 = LUT3DIdentity.toHaldArray(s, s).haldBuffer
         # self.hald = QLayer(QImg=QImage(QSize(190,190), QImage.Format_ARGB32))
         hald = QImage(QSize(s, s), QImage.Format_ARGB32)
         buf1 = QImageBuffer(hald)
         buf1[:, :, :3] = buf0
         buf1[:, :, 3] = 255
         hald.parentImage = self.parentImage
         return hald
     if self.hald is None:
         self.initHald()
     return self.hald
예제 #19
0
def PilImageToQImage(pilimg):
    """
    Converts a PIL image (mode RGB) to a QImage (format RGB32)
    @param pilimg: The PIL image, mode RGB
    @type pilimg: PIL image
    @return: the converted image
    @rtype: QImage
    """
    ############################################
    # CAUTION: PIL ImageQt causes a memory leak!!!
    # return ImageQt(pilimg)
    ############################################
    im_data = PilImgToRaw(pilimg)
    Qimg = QImage(im_data['im'].size[0], im_data['im'].size[1],
                  im_data['format'])
    buf = QImageBuffer(Qimg).ravel()
    buf[:] = np.frombuffer(im_data['data'], dtype=np.uint8)
    return Qimg
예제 #20
0
def QImageToPilImage(qimg):
    """
    Converts a QImage (format ARGB32or RGB32) to a PIL image
    @param qimg: The Qimage to convert
    @type qimg: Qimage
    @return: PIL image  object, mode RGB
    @rtype: PIL Image
    """
    a = QImageBuffer(qimg)
    if (qimg.format() == QImage.Format_ARGB32) or (qimg.format()
                                                   == QImage.Format_RGB32):
        # convert pixels from BGRA or BGRX to RGB
        a = np.ascontiguousarray(
            a[:, :, :3][:, :, ::-1]
        )  #ascontiguousarray is mandatory to speed up Image.fromArray (x3)
    else:
        raise ValueError("QImageToPilImage : unrecognized format : %s" %
                         qimg.Format())
    return Image.fromarray(a)
예제 #21
0
 def setBackgroundImage(self):
     img = QImage(QSize(256, 256), QImage.Format_ARGB32)
     img.fill(QColor(100, 100, 100))
     a = np.arange(256)
     buf = np.meshgrid(a, a)
     buf1 = QImageBuffer(img)[:, :, :3][:, :, ::-1]
     buf1[:, :, 0], buf1[:, :, 1] = buf
     buf1[:, :, 2] = 1
     buf2 = np.tensordot(buf1, self.invM, axes=(-1, -1)) * 255
     np.clip(buf2, 0, 255, out=buf2)
     buf1[...] = buf2
     qp = QPainter(img)
     qp.drawLine(self.R, self.G)
     qp.drawLine(self.G, self.B)
     qp.drawLine(self.B, self.R)
     b = (self.B + self.R + self.G) / 3.0
     qp.drawLine(b - QPointF(10, 0), b + QPointF(10, 0))
     qp.drawLine(b - QPointF(0, 10), b + QPointF(0, 10))
     qp.end()
     self.scene().addItem(QGraphicsPixmapItem(QPixmap.fromImage(img)))
예제 #22
0
    def __init__(self, w, h, converter, bright=defaultBr, border=0.0):
        """
        Builds a (hue, sat) color wheel image of size (w, h)
        For fast display, the correspondence with RGB values is tabulated
        for each value of the brightness.
        @param w: image width
        @type w: int
        @param h: image height
        @type h: int
        @param converter: color space converter
        @type converter: cmConverter
        @param bright: image brightness
        @type bright: int
        @param border: image border
        @type border: int
        """
        w += 2 * border
        h += 2 * border
        super().__init__(w, h, QImage.Format_ARGB32)
        self.pb = bright
        self.hsArray = None
        self.cModel = converter
        # uninitialized ARGB image
        self.border = border
        imgBuf = QImageBuffer(self)
        # set alpha channel
        imgBuf[:, :, 3] = 255
        # get RGB buffer
        imgBuf = imgBuf[:, :, :3][:, :, ::-1]

        # init array of grid (cartesian) coordinates
        coord = np.dstack(np.meshgrid(np.arange(w), -np.arange(h)))

        # center  : i1 = i - cx, j1 = -j + cy
        cx = w / 2
        cy = h / 2
        coord = coord + [-cx, cy]  # np.array([-cx, cy])

        # init hue and sat arrays as polar coordinates.
        # arctan2 values are in range -pi, pi
        hue = np.arctan2(coord[:, :, 1],
                         coord[:, :, 0]) * (180.0 / np.pi) + self.rotation
        # hue range 0..360, sat range 0..1
        hue = hue - np.floor(hue / 360.0) * 360.0
        sat = np.linalg.norm(coord, axis=2, ord=2) / (cx - border)
        np.minimum(sat, 1.0, out=sat)
        # init a stack of image buffers, one for each brightness in integer range 0..100
        hsBuf = np.dstack((hue, sat))[np.newaxis, :]  # shape (1, h, w, 2)
        hsBuf = np.tile(hsBuf, (101, 1, 1, 1))  # (101, h, w, 2)
        pArray = np.arange(101, dtype=np.float) / 100.0
        pBuf = np.tile(pArray[:, np.newaxis, np.newaxis],
                       (1, h, w))  # 101, h, w
        hspBuf = np.stack((hsBuf[:, :, :, 0], hsBuf[:, :, :, 1], pBuf),
                          axis=-1)  # 101, h, w, 3
        # convert the buffers to rgb
        self.BrgbBuf = converter.cm2rgbVec(hspBuf)  # shape 101, h, w, 3
        p = int(bright * 100.0)
        # select the right image buffer
        self.hsArray = hspBuf[p, ...]
        imgBuf[:, :, :] = self.BrgbBuf[p, ...]
        self.updatePixmap()
예제 #23
0
def rawPostProcess(rawLayer, pool=None):
    """
    raw layer development.
    Processing order is the following:
         1 - postprocessing
         2 - profile look table
         3 - profile and user tone curve
         2 - contrast correction
         3 - saturation correction
    A pool of workers is used to apply the
    profile look table.
    An Exception AttributeError is raised if rawImage
    is not an attribute of rawLayer.parentImage.
    @param rawLayer: development layer
    @type rawLayer: Qlayer
    @param pool: multi processing pool
    @type pool: multiprocessing.pool
    """
    # postprocess output bits per channel
    output_bpc = 8
    max_ouput = 255 if output_bpc == 8 else 65535
    if rawLayer.parentImage.isHald:
        raise ValueError('Cannot build a 3D LUT from raw stack')

    # get adjustment form and rawImage
    adjustForm = rawLayer.getGraphicsForm()  # self.view.widget()
    options = adjustForm.options

    # show the Tone Curve form
    if options['cpToneCurve']:
        toneCurveShowFirst = adjustForm.showToneSpline()
    else:
        toneCurveShowFirst = False

    # get RawPy instance
    rawImage = getattr(rawLayer.parentImage, 'rawImage', None)
    if rawImage is None:
        raise ValueError("rawPostProcessing : not a raw image")
    currentImage = rawLayer.getCurrentImage()

    ##################
    # Control flags
    # postProcessCache is invalidated (reset to None) to by graphicsRaw.updateLayer (graphicsRaw.dataChanged event handler).
    # bufCache_HSV_CV32 is invalidated (reset to None) by camera profile related events.
    doALL = rawLayer.postProcessCache is None
    if not doALL:
        parentImage = rawLayer.parentImage
        if (rawLayer.half
                and not parentImage.useThumb) or (not rawLayer.half
                                                  and parentImage.useThumb):
            rawLayer.postProcessCache, rawLayer.bufCache_HSV_CV32 = (
                None, ) * 2
            doALL = True
    doCameraLookTable = options['cpLookTable'] and (
        doALL or rawLayer.bufCache_HSV_CV32 is None)
    half_size = rawLayer.parentImage.useThumb
    #################

    ######################################################################################################################
    # process raw image (16 bits mode)                        camera ------diag(multipliers)----> camera
    # post processing pipeline (from libraw):                   ^
    # - black substraction                                      | CM=rawpyObj.rgb_xyz_matrix
    # - exposure correction                                     |
    # - white balance                                           |
    # - demosaic                                               XYZ
    # - data scaling to use full range
    # - conversion to output color space
    # - gamma curve and brightness correction : gamma(imax) = 1, imax = 8*white/brightness
    ######################################################################################################################

    use_auto_wb = options['Auto WB']
    use_camera_wb = options['Camera WB']
    exp_preserve_highlights = 0.99 if options[
        'Preserve Highlights'] else 0.2  # 0.6  # range 0.0..1.0 (1.0 = full preservation)
    if doALL:
        ##############################
        # get postprocessing parameters
        ##############################
        # no_auto_scale = False  don't use : green shift
        gamma = (2.222, 4.5)  # default REC BT 709 (exponent, slope)
        # gamma = (2.4, 12.92)  # sRGB (exponent, slope) cf. https://en.wikipedia.org/wiki/SRGB#The_sRGB_transfer_function_("gamma")
        exp_shift = adjustForm.expCorrection if not options[
            'Auto Brightness'] else 0
        no_auto_bright = (not options['Auto Brightness'])

        bright = adjustForm.brCorrection  # default 1, should be > 0
        hv = adjustForm.overexpValue
        highlightmode = rawpy.HighlightMode.Clip if hv == 0 \
            else rawpy.HighlightMode.Ignore if hv == 1 \
            else rawpy.HighlightMode.Blend if hv == 2 \
            else rawpy.HighlightMode.ReconstructDefault
        dv = adjustForm.denoiseValue
        fbdd_noise_reduction = rawpy.FBDDNoiseReductionMode.Off if dv == 0 \
            else rawpy.FBDDNoiseReductionMode.Light if dv == 1 \
            else rawpy.FBDDNoiseReductionMode.Full
        #############################################
        # build sample images for a set of multipliers
        if adjustForm.sampleMultipliers:
            bufpost16 = np.empty((rawLayer.height(), rawLayer.width(), 3),
                                 dtype=np.uint16)
            m = adjustForm.rawMultipliers
            co = np.array([0.85, 1.0, 1.2])
            mults = itertools.product(m[0] * co, [m[1]], m[2] * co)
            adjustForm.samples = []
            for i, mult in enumerate(mults):
                adjustForm.samples.append(mult)
                mult = (mult[0], mult[1], mult[2], mult[1])
                print(mult, '   ', m)
                bufpost_temp = rawImage.postprocess(
                    half_size=half_size,
                    output_color=rawpy.ColorSpace.sRGB,
                    output_bps=output_bpc,
                    exp_shift=exp_shift,
                    no_auto_bright=no_auto_bright,
                    use_auto_wb=use_auto_wb,
                    use_camera_wb=False,  # options['Camera WB'],
                    user_wb=mult,
                    gamma=gamma,
                    exp_preserve_highlights=exp_preserve_highlights,
                    bright=bright,
                    hightlightmode=highlightmode,
                    fbdd_noise_reduction=rawpy.FBDDNoiseReductionMode.Off)
                row = i // 3
                col = i % 3
                w, h = int(bufpost_temp.shape[1] / 3), int(
                    bufpost_temp.shape[0] / 3)
                bufpost_temp = cv2.resize(bufpost_temp, (w, h))
                bufpost16[row * h:(row + 1) * h,
                          col * w:(col + 1) * w, :] = bufpost_temp
        # develop
        else:
            bufpost16 = rawImage.postprocess(
                half_size=half_size,
                output_color=rawpy.ColorSpace.raw,  # XYZ
                output_bps=output_bpc,
                exp_shift=exp_shift,
                no_auto_bright=no_auto_bright,
                use_auto_wb=use_auto_wb,
                use_camera_wb=use_camera_wb,
                user_wb=adjustForm.rawMultipliers,
                gamma=(1, 1),
                exp_preserve_highlights=exp_preserve_highlights,
                bright=bright,
                highlight_mode=highlightmode,
                fbdd_noise_reduction=fbdd_noise_reduction,
                median_filter_passes=1)
            # save image into post processing cache
            rawLayer.postProcessCache = cv2.cvtColor(
                ((bufpost16.astype(np.float32)) / max_ouput).astype(
                    np.float32), cv2.COLOR_RGB2HSV)
            rawLayer.half = half_size
            rawLayer.bufpost16 = bufpost16
    else:
        pass

    # postProcessCache is in raw color space.
    # and must be converted to linear RGB. We follow
    # the guidelines of Adobe dng spec. (chapter 6).
    # If we have a valid dng profile and valid ForwardMatrix1
    # and ForwardMatrix2 matrices, we first convert to XYZ_D50 using the interpolated
    # ForwardMatrix for T and next from XYZ_D50 to RGB.
    # If we have no valid dng profile, we reinit the multipliers and
    # apply a Bradford chromatic adaptation matrix.
    m1, m2, m3 = adjustForm.asShotMultipliers[:
                                              3] if use_camera_wb else adjustForm.rawMultipliers[:
                                                                                                 3]
    D = np.diag((1 / m1, 1 / m2, 1 / m3))
    tempCorrection = adjustForm.asShotTemp if use_camera_wb else adjustForm.tempCorrection
    MM = bradfordAdaptationMatrix(6500, tempCorrection)
    MM1 = bradfordAdaptationMatrix(6500, 5000)
    FM = None
    myHighlightPreservation = 0.8 if exp_preserve_highlights > 0.9 else 1.0
    if adjustForm.dngDict:
        try:
            FM = interpolatedForwardMatrix(adjustForm.tempCorrection,
                                           adjustForm.dngDict)
        except ValueError:
            pass
    raw2sRGBMatrix = sRGB_lin2XYZInverse @ MM1 @ FM * myHighlightPreservation if FM is not None else\
                     sRGB_lin2XYZInverse @ MM @ adjustForm.XYZ2CameraInverseMatrix @ D
    bufpost16 = np.tensordot(rawLayer.bufpost16, raw2sRGBMatrix, axes=(-1, -1))
    M = np.max(bufpost16) / 255.0
    bufpost16 = bufpost16 / M
    np.clip(bufpost16, 0, 255, out=bufpost16)
    rawLayer.postProcessCache = cv2.cvtColor(
        ((bufpost16.astype(np.float32)) / max_ouput).astype(np.float32),
        cv2.COLOR_RGB2HSV)

    # update histogram
    s = rawLayer.postProcessCache.shape
    tmp = bImage(s[1], s[0], QImage.Format_RGB32)
    buf = QImageBuffer(tmp)
    buf[:, :, :] = (rawLayer.postProcessCache[:, :, 2, np.newaxis] *
                    255).astype(np.uint8)
    rawLayer.linearImg = tmp

    if getattr(adjustForm, "toneForm", None) is not None:
        rawLayer.histImg = tmp.histogram(
            size=adjustForm.toneForm.scene().axeSize,
            bgColor=adjustForm.toneForm.scene().bgColor,
            range=(0, 255),
            chans=channelValues.Br)  # mode='Luminosity')
        adjustForm.toneForm.scene().quadricB.histImg = rawLayer.histImg
        adjustForm.toneForm.scene().update()

    # beginning of the camera profile phase : update buffers from the last post processed image
    bufHSV_CV32 = rawLayer.postProcessCache.copy()
    rawLayer.bufCache_HSV_CV32 = bufHSV_CV32.copy()

    ##########################
    # Profile look table
    # it must be applied to the linear buffer and
    # before tone curve (cf. Adobe dng spec. p. 65)
    ##########################
    if doCameraLookTable:
        hsvLUT = dngProfileLookTable(adjustForm.dngDict)
        if hsvLUT.isValid:
            divs = hsvLUT.divs
            steps = tuple(
                [360 / divs[0], 1.0 / (divs[1] - 1),
                 1.0 / (divs[2] - 1)])  # TODO -1 added 16/01/18 validate
            interp = chosenInterp(pool,
                                  currentImage.width() * currentImage.height())
            coeffs = interp(hsvLUT.data, steps, bufHSV_CV32, convert=False)
            bufHSV_CV32[:, :,
                        0] = np.mod(bufHSV_CV32[:, :, 0] + coeffs[:, :, 0],
                                    360)
            bufHSV_CV32[:, :, 1:] = bufHSV_CV32[:, :, 1:] * coeffs[:, :, 1:]
            np.clip(bufHSV_CV32, (0, 0, 0), (360, 1, 1), out=bufHSV_CV32)
            rawLayer.bufCache_HSV_CV32 = bufHSV_CV32.copy()
    else:
        pass
    #############
    # tone curve
    ############
    buf = adjustForm.dngDict.get('ProfileToneCurve', [])
    # apply profile tone curve, if any
    if buf:  # non empty list
        LUTXY = dngProfileToneCurve(buf).toLUTXY(maxrange=255)
        bufHSV_CV32[:, :, 2] = LUTXY[(bufHSV_CV32[:, :, 2] * 255).astype(
            np.uint16)] / 255.0
    # apply user tone curve
    toneForm = adjustForm.toneForm
    if toneForm is not None:
        if toneForm.isVisible():
            userLUTXY = toneForm.scene().quadricB.LUTXY
            bufHSV_CV32[:, :,
                        2] = userLUTXY[(bufHSV_CV32[:, :, 2] * 255).astype(
                            np.uint16)] / 255
    rawLayer.bufCache_HSV_CV32 = bufHSV_CV32.copy(
    )  # CAUTION : must be outside of if toneForm.

    # beginning of the contrast-saturation phase : update buffer from the last camera profile applcation
    bufHSV_CV32 = rawLayer.bufCache_HSV_CV32.copy()
    ###########
    # contrast and saturation correction (V channel).
    # We apply an automatic histogram equalization
    # algorithm, well suited for multimodal histograms.
    ###########

    if adjustForm.contCorrection > 0:
        # warp should be in range 0..1.
        # warp = 0 means that no additional warping is done, but
        # the histogram is always stretched.
        warp = max(0, (adjustForm.contCorrection - 1)) / 10
        bufHSV_CV32[:, :, 2], a, b, d, T = warpHistogram(
            bufHSV_CV32[:, :, 2],
            valleyAperture=0.05,
            warp=warp,
            preserveHigh=options['Preserve Highlights'],
            spline=None if rawLayer.autoSpline else rawLayer.getMmcSpline())
        # show the spline
        if rawLayer.autoSpline and options['manualCurve']:
            rawLayer.getGraphicsForm().setContrastSpline(a, b, d, T)
            rawLayer.autoSpline = False
    if adjustForm.satCorrection != 0:
        satCorr = adjustForm.satCorrection / 100  # range -0.5..0.5
        alpha = 1.0 / (
            0.501 +
            satCorr) - 1.0  # approx. map -0.5...0.0...0.5 --> +inf...1.0...0.0
        # tabulate x**alpha
        LUT = np.power(np.arange(256) / 255, alpha)
        # convert saturation s to s**alpha
        bufHSV_CV32[:, :, 1] = LUT[(bufHSV_CV32[:, :, 1] * 255).astype(int)]
    # back to RGB
    bufpostF32_1 = cv2.cvtColor(
        bufHSV_CV32, cv2.COLOR_HSV2RGB)  #* 65535 # .astype(np.uint16)
    # np.clip(bufpostF32_1, 0, 1, out=bufpostF32_1) # TODO 8/11/18 removed

    ###################
    # apply gamma curve
    ###################
    bufpostF32_255 = rgbLinear2rgbVec(bufpostF32_1)
    # np.clip(bufpostF32_255, 0, 255, out=bufpostF32_255)  # clip not needed after rgbLinear2rgbVec thresholds correction 8/11/18
    #############################
    # Conversion to 8 bits/channel
    #############################
    bufpostUI8 = bufpostF32_255.astype(
        np.uint8
    )  # (bufpost16.astype(np.float32) / 256).astype(np.uint8) TODO 5/11/18 changed
    ###################################################
    # bufpostUI8 = (bufpost16/256).astype(np.uint8)
    #################################################
    if rawLayer.parentImage.useThumb:
        bufpostUI8 = cv2.resize(bufpostUI8,
                                (currentImage.width(), currentImage.height()))

    bufOut = QImageBuffer(currentImage)
    bufOut[:, :, :3][:, :, ::-1] = bufpostUI8
    # base layer : no need to forward the alpha channel
    rawLayer.updatePixmap()
예제 #24
0
    def save(self, filename, quality=-1, compression=-1):
        """
        Overrides QImage.save().
        Writes the presentation layer to a file and returns a
        thumbnail with standard size (160x120 or 120x160).
        Raises IOError if the saving fails.
        @param filename:
        @type filename: str
        @param quality: integer value in range 0..100, or -1
        @type quality: int
        @param compression: integer value in range 0..100, or -1
        @type compression: int
        @return: thumbnail of the saved image
        @rtype: QImage
        """
        def transparencyCheck(buf):
            if np.any(buf[:, :, 3] < 255):
                dlgWarn('Transparency will be lost. Use PNG format instead')

        # don't save thumbnails
        if self.useThumb:
            return None
        # get the final image from the presentation layer.
        # This image is NOT color managed (prLayer.qPixmap
        # only is color managed)
        img = self.prLayer.getCurrentImage()
        # imagewriter and QImage.save are unusable for tif files,
        # due to bugs in libtiff, hence we use opencv imwrite.
        fileFormat = filename[-3:].upper()
        buf = QImageBuffer(img)
        if fileFormat == 'JPG':
            transparencyCheck(buf)
            buf = buf[:, :, :3]
            params = [cv2.IMWRITE_JPEG_QUALITY,
                      quality]  # quality range 0..100
        elif fileFormat == 'PNG':
            params = [cv2.IMWRITE_PNG_COMPRESSION,
                      compression]  # compression range 0..9
        elif fileFormat == 'TIF':
            transparencyCheck(buf)
            buf = buf[:, :, :3]
            params = []
        else:
            raise IOError(
                "Invalid File Format\nValid formats are jpg, png, tif ")
        if self.isCropped:
            # make slices
            w, h = self.width(), self.height()
            w1, w2 = int(self.cropLeft), w - int(self.cropRight)
            h1, h2 = int(self.cropTop), h - int(self.cropBottom)
            buf = buf[h1:h2, w1:w2, :]
        # build thumbnail from (evenyually) cropped image
        # choose thumb size
        wf, hf = buf.shape[1], buf.shape[0]
        if wf > hf:
            wt, ht = 160, 120
        else:
            wt, ht = 120, 160
        thumb = ndarrayToQImage(
            np.ascontiguousarray(buf[:, :, :3][:, :, ::-1]),
            format=QImage.Format_RGB888).scaled(wt, ht, Qt.KeepAspectRatio)
        written = cv2.imwrite(filename, buf, params)  # BGR order
        if not written:
            raise IOError("Cannot write file %s " % filename)
        # self.setModified(False) # cannot be reset if the image is modified again
        return thumb