예제 #1
0
    def process_gpu3(self,
                     img: np.ndarray,
                     sub_factor: typing.Optional[float] = 1.0):
        """Process image data.

        Takes 1 combined numpy (Mat) array and converts to UMat on the fly
        Converts combined image to UMat on gpu then transparent interface
        produces 2 sub image UMats in OPENCL
        OPENCV transparent interface will use OPENCL for processing, uses
        scaleAdd routine for subtraction step
        Approx 2.7 ms processing time on macbook pro (i7 + Intel Iris Pro)
        """
        uimg = cv2.UMat(img)
        img_l = cv2.UMat(uimg, (0, self.height), (0, self.width))
        img_r = cv2.UMat(uimg, (0, self.height), (self.width, 2 * self.width))
        result = cv2.scaleAdd(
            cv2.remap(
                img_r,
                self.defX,
                self.defY,
                cv2.INTER_LINEAR,
                borderMode=cv2.BORDER_CONSTANT,
                borderValue=0,
            ),
            -sub_factor,
            img_l,
        )
        return result
예제 #2
0
 def process_cpu1(self, img, sub_factor=1.0):
     # Takes 1 combined numpy (Mat) array and converts to UMat on the fly from numpy subimages
     # OPENCV transparent interface will use OPENCL for processing
     # Approx 7.2 ms processing time on macbook pro (i7 + Intel Iris Pro)
     result = cv2.scaleAdd(
         cv2.remap(img[:, self.width:],
                   self.defXcpu,
                   self.defYcpu,
                   cv2.INTER_LINEAR,
                   borderMode=cv2.BORDER_CONSTANT,
                   borderValue=0), -sub_factor, img[:, 0:self.width])
     return result
예제 #3
0
 def tick(self):
     self.i += 1
     board = self.board
     buff1 = self.buff_bordered1
     buff2 = self.buff_bordered2
     buff2_in_roi = self.buff2_in_roi
     cv2.copyMakeBorder(
         board, 1, 1, 1, 1, dst=buff1, borderType=cv2.BORDER_WRAP
     )
     cv2.filter2D(buff1, -1, morph_kernel, dst=buff2)
     cv2.threshold(buff2, 3, 0, cv2.THRESH_TOZERO_INV, dst=buff2)
     cv2.scaleAdd(buff2_in_roi, 1, board, dst=board)
     cv2.threshold(board, 2, 1, cv2.THRESH_BINARY, dst=board)
     if self.i >= self.noise_interval > 0:
         self.i = 0
         cv2.randu(self.noise_indices, 0, self.n_pixels)
         self.noise[:] = 0
         self.noise.ravel()[self.noise_indices] = 1
         cv2.bitwise_or(
             cv2.UMat(self.noise), board, dst=board
         )
     return board
예제 #4
0
 def process_gpu1(self, img_l, img_r, sub_factor=1.0):
     # Takes 2 UMat images as arguments,
     # OPENCV transparent interface will use OPENCL for processing
     # Approx 2.8 ms processing time on macbook pro (i7 + Intel Iris Pro), but your images need to be separate UMats already
     # result = cv2.subtract(img_l,
     #                       cv2.remap(cv2.multiply(img_r,sub_factor),self.defX,self.defY,cv2.INTER_LINEAR,
     #                                              borderMode=cv2.BORDER_CONSTANT,borderValue=0))
     result = cv2.scaleAdd(
         cv2.remap(img_r,
                   self.defX,
                   self.defY,
                   cv2.INTER_LINEAR,
                   borderMode=cv2.BORDER_CONSTANT,
                   borderValue=0), -sub_factor, img_l)
     return result
예제 #5
0
    def process_cpu1(self,
                     img: np.ndarray,
                     sub_factor: typing.Optional[float] = 1.0):
        """Process image data.

        Takes 1 combined numpy (Mat) array and converts to UMat on the fly
        from numpy subimages
        OPENCV transparent interface will use OPENCL for processing
        Approx 7.2 ms processing time on macbook pro (i7 + Intel Iris Pro)
        """
        result = cv2.scaleAdd(
            cv2.remap(
                img[:, self.width:],
                self.defXcpu,
                self.defYcpu,
                cv2.INTER_LINEAR,
                borderMode=cv2.BORDER_CONSTANT,
                borderValue=0,
            ),
            -sub_factor,
            img[:, 0:self.width],
        )
        return result
예제 #6
0
    def process_gpu1(
        self,
        img_l: cv2.UMat,
        img_r: cv2.UMat,
        sub_factor: typing.Optional[float] = 1.0,
    ):
        """Process image data.

        Takes 2 UMat images as arguments,
        OPENCV transparent interface will use OPENCL for processing
        Approx 2.8 ms processing time on macbook pro (i7 + Intel Iris Pro),
        but your images need to be separate UMats already
        """
        # result = cv2.subtract(
        #     img_l,
        #     cv2.remap(
        #         cv2.multiply(img_r, sub_factor),
        #         self.defX,
        #         self.defY,
        #         cv2.INTER_LINEAR,
        #         borderMode=cv2.BORDER_CONSTANT,
        #         borderValue=0,
        #     ),
        # )
        result = cv2.scaleAdd(
            cv2.remap(
                img_r,
                self.defX,
                self.defY,
                cv2.INTER_LINEAR,
                borderMode=cv2.BORDER_CONSTANT,
                borderValue=0,
            ),
            -sub_factor,
            img_l,
        )
        return result
예제 #7
0
    def run(self):
        count = 1

        images = self.files[self.idxStart:self.idxEnd + 1]
        nImages = len(images)

        for img in images:
            print("%s: Process (%d/%d) %s." % (self.name, count, nImages, img))

            # Load the image.
            cvImg = cv2.imread(img, cv2.IMREAD_UNCHANGED)

            # Dummy image.
            dummy = np.zeros_like(cvImg[:, :, 0])

            # Balance the input image
            for i in range(cvImg.shape[2]):
                cvImg[:, :, i] = cv2.scaleAdd(cvImg[:, :, i], self.bf[i],
                                              dummy)
                cvImg[:, :, i] = cv2.multiply(cvImg[:, :, i],
                                              self.vcMask,
                                              dtype=cv2.CV_8UC1)

            # Get the name components of the file name.
            fn = os.path.split(img)[1]
            # ext = os.path.splitext( fn )[1]
            fn = os.path.splitext(fn)[0]

            # Only support PNG currently.
            ext = ".png"

            # Save the balanced image.
            cv2.imwrite(self.outDir + "/" + fn + ext, cvImg,
                        [cv2.IMWRITE_PNG_COMPRESSION, 0])

            count += 1
예제 #8
0
def OpenCVRebalanceImage(frame, rfactor, gfactor, bfactor):
    offset = np.zeros(frame[:, :, 0].shape, dtype="uint8")
    frame[:, :, 0] = cv2.scaleAdd(frame[:, :, 0], bfactor, offset)
    frame[:, :, 1] = cv2.scaleAdd(frame[:, :, 1], gfactor, offset)
    frame[:, :, 2] = cv2.scaleAdd(frame[:, :, 2], rfactor, offset)
    return frame
예제 #9
0
        print("Use the average BGR values as the target.")
    else:
        targetBGR = np.array( [ centerPixel[1], centerPixel[1], centerPixel[1] ], dtype=np.int )
        print("Use the green channel as the target.")
    print("The target BGR values are (%d, %d, %d)." % ( targetBGR[0], targetBGR[1], targetBGR[2] ))

    # The balancing factors.
    bf = targetBGR / centerPixel
    print("The balancing factors are: {}".format( bf ))

    imgBalanced = np.zeros_like( imgOri, dtype=np.uint8 )

    # White balance.
    dummyZeroMatrix = np.zeros( [ imgOri.shape[0], imgOri.shape[1] ] , dtype=imgOri.dtype )
    for i in range( 3 ):
        imgBalanced[:, :, i] = cv2.scaleAdd( imgOri[:, :, i], targetBGR[i] / centerPixel[i], dummyZeroMatrix )

    # Save the image.
    namePart = os.path.splitext( os.path.split(args.input_image)[1] )[0]
    fn = namePart + "_Balanced.png"
    cv2.imwrite( fn, imgBalanced )
    print("The balanced image is saved as %s." % ( fn ))

    # Save the balancing factors as a text file.
    np.savetxt( args.bf, bf, fmt="%+e" )
    print("The balancing factors are saved in %s." % ( args.bf ))

    if ( True == args.vc ):
        print("Begin calibrating vignetting effect.")
        # Convert the balanced image into grayscale image.
        gray = cv2.cvtColor( imgBalanced, cv2.COLOR_BGR2GRAY )
예제 #10
0
print("loading")

import cv2
import numpy as np

img_orig = cv2.imread("Lenna.png")
img_orig = np.double(img_orig) / 255.0

mul = float(raw_input("multiplier (default 1.0) :") or 1.0)
gamma = float(raw_input("gamma (default 1.0):") or 1.0)

img_res = cv2.pow(img_orig, gamma)
img_res = cv2.scaleAdd(img_res, mul - 1.0, img_res)

cv2.imshow("original", img_orig)
cv2.moveWindow("original", 0, 0)
cv2.imshow("result", img_res)
cv2.moveWindow("result", 512, 0)

#cv2.imshow("original, result", np.hstack( (img_orig, img_res) ))
#cv2.moveWindow("original, result", 0, 0)

cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #11
0
import numpy as np, cv2

image = cv2.imread("datas/images/contrast.jpg", cv2.IMREAD_GRAYSCALE)  # 영상 읽기
if image is None: raise Exception("영상 파일 읽기 오류 발생")

noimage = np.zeros(image.shape[:2], image.dtype)  # 더미 영상
avg = cv2.mean(image)[0] / 2.0  # 영상 화소 평균의 절반

dst1 = cv2.scaleAdd(image, 0.2, noimage) + 20  # 영상대비 감소
dst2 = cv2.scaleAdd(image, 2.0, noimage)  # 영상대비 증가
dst3 = cv2.addWeighted(image, 0.5, noimage, 0, avg)  # 명암대비 감소
dst4 = cv2.addWeighted(image, 2.0, noimage, 0, -avg)  # 명암대비 증가
dst5 = cv2.addWeighted(image, 2.0, noimage, 0, 0)  # 명암대비 증가

# 영상 띄우기
cv2.imshow("image", image)
cv2.imshow("dst1 - decrease contrast", dst1)
cv2.imshow("dst2 - increase contrast", dst2)
cv2.imshow("dst3 - decrease contrast using average", dst3)
cv2.imshow("dst4 - increase contrast using average", dst4)
cv2.imshow("dst5 - increase contrast without average", dst5)

cv2.imwrite("dst.jpg", dst1)
cv2.waitKey(0)
예제 #12
0
    def create_diff_frames(self,
                           vname,
                           oname,
                           datapath,
                           nframes=60,
                           crop=(1280, 720),
                           targsize=dfc.TARGETSZ):
        #{
        video = cv2.VideoCapture(vname)
        orig = cv2.VideoCapture(oname)

        # Calc entry frame to center capture window
        fps = round(video.get(cv2.CAP_PROP_FPS))
        fentry = round((fps * 10 / 2) - (nframes / 2))
        tframes = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
        assert fentry >= 0 and fentry+nframes <= tframes,\
            (f"Invalid frame window: [{fentry}, {fentry+nframes}] "
             f"At FPS: {fps}, only {tframes} frames in {vname}")

        video.set(cv2.CAP_PROP_POS_FRAMES, fentry)
        orig.set(cv2.CAP_PROP_POS_FRAMES, fentry)

        # Get orientation from video header
        fwidth = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
        fheight = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
        is_portrait_orient, lft, rht, bot = df.crop_params(
            fwidth, fheight, crop)
        targorientsize = (targsize[1],
                          targsize[0]) if is_portrait_orient else targsize

        zblock = np.zeros((targsize[1], targsize[0], 3), dtype=np.uint8)
        fidx, vsuccess, osuccess, fakerprint = 0, True, True, None
        while video.isOpened() and orig.isOpened(
        ) and vsuccess and osuccess and fidx < nframes:
            #{
            # Fakerframes are scaled Photoshop difference blend-mode images,
            # see: https://helpx.adobe.com/photoshop/using/blending-modes.htm

            vsuccess, videoframe = video.read()
            osuccess, origframe = orig.read()

            if vsuccess and osuccess:
                #{
                # Crop and then scale image to the CONVNet target size
                videoframe = cv2.resize(videoframe[0:bot, lft:rht, :],
                                        targorientsize,
                                        interpolation=cv2.INTER_AREA)
                origframe = cv2.resize(origframe[0:bot, lft:rht, :],
                                       targorientsize,
                                       interpolation=cv2.INTER_AREA)

                # Rotate to landscape
                if is_portrait_orient:
                    videoframe = cv2.rotate(videoframe,
                                            cv2.ROTATE_90_COUNTERCLOCKWISE)
                    origframe = cv2.rotate(origframe,
                                           cv2.ROTATE_90_COUNTERCLOCKWISE)

                # Diff and scale pixel values (usually scale up to fill [0,255] range)
                fakerframe = cv2.subtract(cv2.max(videoframe, origframe),
                                          cv2.min(videoframe, origframe))
                fakerframe = cv2.scaleAdd(fakerframe, 255 / fakerframe.max(),
                                          zblock)  # faster than np.mult
                cv2.imwrite(f"{datapath}/fakerframe{fidx}.jpg", fakerframe)

                # Effectively fakerprint = reduce(sum, fakerframes)
                if fakerprint is None:
                    fakerprint = fakerframe.astype(np.uint16)
                else:
                    fakerprint = fakerprint + fakerframe  # faster than cv2.add
            #}

            fidx += 1
        #}

        video.release()
        orig.release()

        # Scale pixel values (usually scale down to fill [0,255] range)
        assert fakerprint is not None, f"OpenCV read failure, video: {vname}"
        fakerprint = cv2.scaleAdd(fakerprint, 255 / fakerprint.max(),
                                  zblock.astype(np.uint16))
        cv2.imwrite(f"{datapath}/fakerprint.jpg", fakerprint)
        return True
예제 #13
0
# @Author  : sunyihuan
# @File    : white_balance.py

import cv2
import numpy as np
import os

bf = np.ones([3], dtype=np.float)

img = "E:/WLS_originalData/3660camera_data202007/X3_original/20200630102015.jpg"
# Load the image.
cvImg = cv2.imread(img, cv2.IMREAD_UNCHANGED)
vcMask = np.ones([cvImg.shape[0], cvImg.shape[1]], dtype=np.float)
# Balance the input image
for i in range(cvImg.shape[2]):
    cvImg[:, :, i] = cv2.scaleAdd(cvImg[:, :, i], bf[i],
                                  np.zeros_like(cvImg[:, :, i]))
    cvImg[:, :, i] = cv2.multiply(cvImg[:, :, i], vcMask, dtype=cv2.CV_8UC1)

# Get the name components of the file name.
fn = os.path.split(img)[1]
# ext = os.path.splitext( fn )[1]
fn = os.path.splitext(fn)[0]

# Only supports PNG currently.
ext = "white_b.jpg"

# Save the balanced image.
# cv2.imwrite( args.output_dir + "/" + fn + "_Balanced" + ext, cvImg )
cv2.imwrite(ext, cvImg, [cv2.IMWRITE_PNG_COMPRESSION, 0])
예제 #14
0
image = cv2.merge([hue, saturation, value])
image = cv2.cvtColor(image.astype("uint8"), cv2.COLOR_HSV2BGR)
image = np.array(image, dtype=np.float32)


#função que define a região de desfoque ao longo do eixo vertical da imagem
def alpha(x, l1, l2, d):
    return (0.5 * (np.tanh((x - l1) / (d + 0.1)) - np.tanh(
        (x - l2) / (d + 0.1))))


height, width, depth = image.shape

media = np.ones([3, 3], dtype=np.float32)
mask = cv2.scaleAdd(media, 1 / 9.0, np.zeros([3, 3], dtype=np.float32))
image_2 = copy(image)
for i in range(10):
    image_2 = cv2.filter2D(image_2, -1, mask, anchor=(1, 1))

result = np.zeros([height, width, depth])


def sety(l):
    global l1, l2, y, delta
    y = l
    l1 = y - int(delta / 2)
    l2 = y + int(delta / 2)
    applyTilt()

예제 #15
0
파일: augmentor.py 프로젝트: Peiiii/pymagic
def brightness(rng, img, brightness_range):
    brightness = rng_between(rng, brightness_range[0], brightness_range[1])
    return cv2.scaleAdd(img, brightness, np.zeros_like(img))
예제 #16
0
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, np.ones((5, 5),
                                                              np.uint8))
    threshRGB = cv2.merge((thresh, thresh, thresh))
    res = cv2.bitwise_and(target, threshRGB)

    #Standstill
    Standstill_im = (
        1 - Standstill_rate) * Standstill_im + Standstill_rate * thresh
    Standstill_im = cv2.bitwise_and(Standstill_im.astype('uint8'), thresh)
    _, sthresh = cv2.threshold(Standstill_im, 200, 255, cv2.THRESH_BINARY_INV)
    thresh_standstill = cv2.bitwise_and(thresh, cv2.bitwise_not(sthresh))

    #HeatMap
    imdiff = cv2.absdiff(thresh, prev_image)
    prev_image = thresh
    HeatMap_im = cv2.scaleAdd(imdiff, 5, ((1 - HeatMap_rate) *
                                          HeatMap_im).astype('uint8'))

    #Contour

    #Bounding box

    #res1 = np.vstack((target,cv2.merge((dst,dst,dst)),thresh,res))
    cv2.imshow("target", target)
    #cv2.imshow("dst", dst)
    #cv2.imshow("bg_thresh", bg_thresh)
    #cv2.imshow("hist_thresh", hist_thresh)
    #cv2.imshow("thresh", thresh)
    #cv2.imshow("result", res)

    #cv2.imshow("sthresh", sthresh)
    cv2.imshow("HeatMap_im", HeatMap_im)