예제 #1
0
def equal_contrast(gauss):
    #step1
    alfa = 0.1
    ec = gauss
    ec0 = cv2.absdiff(ec, 0)
    ec0 = cv2.pow(ec0, alfa)
    ec0 = cv2.mean(ec0)
    ec0 = ec0[0]
    den = np.power(ec0, 1.0 / alfa)
    ec1 = cv2.divide(ec, den)

    #step2
    tal = 10.0
    ec2 = cv2.absdiff(ec1, 0)
    ec2 = cv2.min(tal, ec2)
    ec2 = cv2.pow(ec2, alfa)
    ec2 = cv2.mean(ec2)
    ec2 = ec2[0]
    den = np.power(ec2, 1.0 / alfa)
    ec3 = cv2.divide(ec1, den)
    ec3 = cv2.normalize(ec3.astype('float32'), None, 0.0, 1.0, cv2.NORM_MINMAX)

    #step3
    ele = cv2.divide(ec3, tal)
    exp1 = cv2.exp(ele)
    exp2 = cv2.exp(-ele)
    num = cv2.subtract(exp1, exp2)
    den = cv2.add(exp1, exp2)
    tanh = den
    tanh = cv2.divide(num, den, tanh, tal)
    ec4 = tanh
    ec4 = cv2.normalize(ec4.astype('float32'), None, -1.0, 1.3,
                        cv2.NORM_MINMAX)
    return ec4
예제 #2
0
def tantriggs(image):
    # Convert to float
    image = np.float32(image)

    image = cv2.pow(image, GAMMA)
    image = difference_of_gaussian(image)

    # mean 1
    tmp = cv2.pow(cv2.absdiff(image, 0), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0 / ALPHA))

    # mean 2
    tmp = cv2.pow(cv2.min(cv2.absdiff(image, 0), TAU), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0 / ALPHA))

    # tanh
    exp_x = cv2.exp(cv2.divide(image, TAU))
    exp_negx = cv2.exp(cv2.divide(-image, TAU))
    image = cv2.divide(cv2.subtract(exp_x, exp_negx), cv2.add(exp_x, exp_negx))
    image = cv2.multiply(image, TAU)

    image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)

    return image
예제 #3
0
파일: tantriggs.py 프로젝트: Nambu14/Ringo
def tantriggs(image):
    # Convert to float
    image = np.float32(image)

    image = cv2.pow(image, GAMMA)
    image = difference_of_gaussian(image)

    # mean 1
    tmp = cv2.pow(cv2.absdiff(image, 0), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # mean 2
    tmp = cv2.pow(cv2.min(cv2.absdiff(image, 0), TAU), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # tanh
    exp_x = cv2.exp(cv2.divide(image, TAU))
    exp_negx = cv2.exp(cv2.divide(-image, TAU))
    image = cv2.divide(cv2.subtract(exp_x, exp_negx), cv2.add(exp_x, exp_negx))
    image = cv2.multiply(image, TAU)

    image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)

    return image
예제 #4
0
    def ShowHomomorphicFilter(imgSrc):
        # imgSrc = cv.resize(imgSrc, (int(imgSrc.shape[1]/4), int(imgSrc.shape[0]/4)))
        imgLnSrc = imgSrc
        # 先把范围控制下,不然0值被log以后会出无限值
        cv.normalize(imgLnSrc, imgLnSrc, 1, 255, cv.NORM_MINMAX)
        imgLnSrc = np.float64(imgLnSrc)
        cv.log(imgLnSrc, imgLnSrc)
        b, g, r = cv.split(imgLnSrc)
        H = ImageHandler.CreateHomomorphicFilterTemplate(
            imgLnSrc.shape[0] * 2, imgLnSrc.shape[1] * 2)
        b = ImageHandler.HandleFFTPerChannel(b, H)
        g = ImageHandler.HandleFFTPerChannel(g, H)
        r = ImageHandler.HandleFFTPerChannel(r, H)

        merged = cv.merge((b, g, r))
        imgOut = merged[0:imgSrc.shape[0], 0:imgSrc.shape[1],
                        0:imgSrc.shape[2]]
        # 两次归一,是因为出来的大小太大了,exp会出无限值
        cv.normalize(imgOut, imgOut, 0, 1, cv.NORM_MINMAX)
        cv.exp(imgOut, imgOut)
        cv.normalize(imgOut, imgOut, 0, 1, cv.NORM_MINMAX)

        cv.cvtColor(imgSrc, cv.COLOR_BGR2RGB, imgSrc)
        cv.cvtColor(imgOut, cv.COLOR_BGR2RGB, imgOut)
        plt.figure(1), plt.imshow(imgSrc)
        plt.figure(2), plt.imshow(imgOut)
        plt.show()
예제 #5
0
def noniternorm(img):
    b,g,r = cv2.split(img)
    b = np.float32(b)
    g = np.float32(g)
    r = np.float32(r)
    log_b = cv2.log(b) 
    log_g = cv2.log(g) 
    log_r = cv2.log(r) 
    b = cv2.exp(log_b - cv2.mean(log_b)[0])
    g = cv2.exp(log_g - cv2.mean(log_g)[0])
    r = cv2.exp(log_r - cv2.mean(log_r)[0])
    b = cv2.normalize(b, 0, 255, cv2.NORM_MINMAX)*255
    g = cv2.normalize(g, 0, 255, cv2.NORM_MINMAX)*255
    r = cv2.normalize(r, 0, 255, cv2.NORM_MINMAX)*255
    return cv2.merge((np.uint8(b),np.uint8(g),np.uint8(r)))
예제 #6
0
def sigmoid(x, height, width):
    zeroes = cv2.UMat(np.zeros((height, width)))
    ones = cv2.UMat(np.ones((height, width)))

    subbed = cv2.subtract(zeroes, x, dtype=cv2.CV_32F)
    added = cv2.add(ones, cv2.exp(subbed), dtype=cv2.CV_32F)
    return cv2.divide(ones, added, dtype=cv2.CV_32F)
def saliency_feature(img):
    img_orig = img
    img = cv2.resize(img, (64, 64))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # h = cv2.getOptimalDFTSize(img.shape[0])
    # w = cv2.getOptimalDFTSize(img.shape[1])
    # print "Resizing (%d, %d) to (%d, %d)" % (img.shape[0], img.shape[1], h, w)
    # h = (h - img.shape[0])/2.0
    # w = (w - img.shape[1])/2.0
    # img = cv2.copyMakeBorder(img, int(math.floor(h)), int(math.ceil(h)), int(math.floor(w)), int(math.ceil(w)), cv2.BORDER_CONSTANT, value=0)

    dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    A, P = cv2.cartToPolar(dft[:,:,0], dft[:,:,1])
    L = cv2.log(A)
    h_n = (1./3**2)*np.ones((3,3))
    R = L - cv2.filter2D(L, -1, h_n)
    S = cv2.GaussianBlur(cv2.idft(np.dstack(cv2.polarToCart(cv2.exp(R), P)), flags=cv2.DFT_REAL_OUTPUT)**2, (0,0), 8)
    S = cv2.resize(cv2.normalize(S, None, 0, 1, cv2.NORM_MINMAX), (img_orig.shape[1],img_orig.shape[0]))

    # cv2.namedWindow('tmp1', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp1', img_orig)
    # cv2.namedWindow('tmp', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp', S)
    # cv2.waitKey()

    return S
예제 #8
0
def saliency_feature(img):
    img_orig = img
    img = cv2.resize(img, (64, 64))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # h = cv2.getOptimalDFTSize(img.shape[0])
    # w = cv2.getOptimalDFTSize(img.shape[1])
    # print "Resizing (%d, %d) to (%d, %d)" % (img.shape[0], img.shape[1], h, w)
    # h = (h - img.shape[0])/2.0
    # w = (w - img.shape[1])/2.0
    # img = cv2.copyMakeBorder(img, int(math.floor(h)), int(math.ceil(h)), int(math.floor(w)), int(math.ceil(w)), cv2.BORDER_CONSTANT, value=0)

    dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    A, P = cv2.cartToPolar(dft[:, :, 0], dft[:, :, 1])
    L = cv2.log(A)
    h_n = (1. / 3**2) * np.ones((3, 3))
    R = L - cv2.filter2D(L, -1, h_n)
    S = cv2.GaussianBlur(
        cv2.idft(np.dstack(cv2.polarToCart(cv2.exp(R), P)),
                 flags=cv2.DFT_REAL_OUTPUT)**2, (0, 0), 8)
    S = cv2.resize(cv2.normalize(S, None, 0, 1, cv2.NORM_MINMAX),
                   (img_orig.shape[1], img_orig.shape[0]))

    # cv2.namedWindow('tmp1', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp1', img_orig)
    # cv2.namedWindow('tmp', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp', S)
    # cv2.waitKey()

    return S
예제 #9
0
파일: augmentor.py 프로젝트: Peiiii/pymagic
def gamma(rng, img, gamma_range):
    gamma = rng_between(rng, gamma_range[0], gamma_range[1])
    k = 1.0 / gamma
    img = cv2.exp(k * cv2.log(img.astype('float32') + 1e-15))
    f = math.pow(255.0, 1 - k)
    img = img * f
    img = cv2.add(img, np.zeros_like(img), dtype=0)  # clip
    return img
예제 #10
0
def noniternorm(img):
    b, g, r = cv2.split(img)
    b = np.float32(b)
    g = np.float32(g)
    r = np.float32(r)
    log_b = cv2.log(b)
    log_g = cv2.log(g)
    log_r = cv2.log(r)
    b = cv2.exp(log_b - cv2.mean(log_b)[0])
    g = cv2.exp(log_g - cv2.mean(log_g)[0])
    r = cv2.exp(log_r - cv2.mean(log_r)[0])
    b = cv2.normalize(b, 0, 255, cv2.NORM_MINMAX) * 255
    g = cv2.normalize(g, 0, 255, cv2.NORM_MINMAX) * 255
    r = cv2.normalize(r, 0, 255, cv2.NORM_MINMAX) * 255
    b = b.clip(max=255)
    g = g.clip(max=255)
    r = r.clip(max=255)
    return cv2.merge((np.uint8(b), np.uint8(g), np.uint8(r)))
예제 #11
0
def dFFT2(img):
    """
    Calculated the discrete Fourier
    Transfrom of a 2D image
    :param img: image to be transformed
    :return: complex valued result
    """
    N=len(img)
    if N==1:
        return img

    even=dFFT2([img[k] for k in range(0,N,2)])
    odd= dFFT2([img[k] for k in range(1,N,2)])

    M=N/2
    l=[ even[k] + cv2.exp(-2j*3.14*k/N)*odd[k] for k in range(M) ]
    r=[ even[k] - cv2.exp(-2j*3.14*k/N)*odd[k] for k in range(M) ]

    return l+r
예제 #12
0
def split_r_part(hsv_v, nkernelSize, mean_illumination, ADJUST_DELTA=10):
    v_blur = cv2.GaussianBlur(hsv_v, (nkernelSize, nkernelSize), 0)
    v_log = cv2.log(hsv_v)
    v_blur_log = cv2.log(v_blur)
    r_part_log = v_log - v_blur_log
    r_part = cv2.exp(r_part_log)
    r_part = cv2.convertScaleAbs(r_part, alpha=mean_illumination)
    r_part_32F = r_part.astype(np.float32)
    r_part_32F -= ADJUST_DELTA
    return r_part_32F
예제 #13
0
    def __init__(self, image, parent=None):
        super(ResamplingWidget, self).__init__(parent)

        filt_size = 3
        em_radius = 2
        em_stddev = 5
        em_error = 0.01
        max_iter = 20

        gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY).astype(np.float32)
        minimum, maximum, _, _ = cv.minMaxLoc(gray)
        kernel = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]], np.float32)
        kernel /= (filt_size + 1) * (filt_size + 1)
        alpha0 = np.ones((2 * em_radius + 1, 2 * em_radius + 1), np.float32)
        alpha0[em_radius, em_radius] = 0
        alpha0 /= alpha0.size - 1
        alpha1 = np.zeros((2 * em_radius + 1, 2 * em_radius + 1), np.float32)
        gray0 = np.ravel(gray[em_radius:-em_radius, em_radius:-em_radius])

        block_rows = gray.shape[0] - 2 * em_radius
        block_cols = gray.shape[1] - 2 * em_radius
        resamp = np.array([])
        for i in range(-em_radius, em_radius + 1):
            for j in range(-em_radius, em_radius + 1):
                if i == 0 and j == 0:
                    continue
                block = gray[i + em_radius:i + em_radius + block_rows,
                             j + em_radius:j + em_radius + block_cols]
                block = np.reshape(block, (block.size, 1))
                resamp = block if resamp.size == 0 else np.hstack(
                    (resamp, block))

        i = 0
        sigma = em_stddev
        c1 = 1 / (sigma * np.sqrt(2 * np.pi))
        c2 = 2 * sigma**2
        p0 = 1 / (maximum - minimum)
        while cv.norm(alpha0, alpha1) > em_error and i < max_iter:
            filt = cv.filter2D(gray, cv.CV_32F, alpha1)
            resid0 = cv.absdiff(gray, filt)
            resid = resid0[em_radius:resid0.shape[0] - em_radius,
                           em_radius:resid0.shape[0] - em_radius]
            resid = cv.pow(cv.filter2D(resid, cv.CV_32F, kernel), 2)
            cond = c1 * (1 / cv.exp(resid / c2))
            post = cond / (cond + p0)

            sigma = np.sqrt(np.sum(post * resid) / np.sum(post)) / 2
            alpha0 = np.copy(alpha1)
            weights = np.reshape(post, (post.size, 1))
            # deriv = np.multiply()
            k = 0
예제 #14
0
파일: HDR.py 프로젝트: MicroGuitar/Raw
 def _merge(self, images, times):
     """
     :Description: use images, times, and CRF to merge HDRI
     :param images: image list
     :param times: times list
     :return hdr_img: HDRI(lux_img)
     """
     assert isinstance(images, list), 'images should be list'
     assert isinstance(times, list), 'times should be list'
     assert len(images) == len(
         times), "images length should be same as times"
     weights = self.__intensity_weight_256x_.copy()
     n_img = len(images)
     n_chn = images[0].shape[2]
     response_256x1x3 = self.__camera_response_256x1x3.copy()
     log_response = np.log(response_256x1x3)
     log_time = np.log(times)
     # log_hdr_img channel list
     hdr_chn_list = [0, 0, 0]
     img_avr_w_sum = np.zeros(images[0].shape[:2], dtype="float32")
     for i in xrange(n_img):
         src_chn_list = cv2.split(images[i])
         img_avr_w = np.zeros(images[0].shape[:2], dtype="float32")
         for cn in xrange(n_chn):
             img_cn_w = cv2.LUT(src_chn_list[cn], weights)
             img_avr_w += img_cn_w
         #第n张图3个通道的平均权值图像
         img_avr_w /= n_chn
         #一张图的log_response(log(lum))
         response_img = cv2.LUT(images[i], log_response)
         response_chn_list = cv2.split(response_img)
         for chn in xrange(n_chn):
             #img_avr_w:图片的平均通道权值 response_chn_list[chn]:通道的log_response log_time[i]:图片的log_time.
             hdr_chn_list[chn] += cv2.multiply(
                 img_avr_w, response_chn_list[chn] - log_time[i])
             #全部图的平均权值的和
         img_avr_w_sum += img_avr_w
     #全部图的平均权值的和的倒数
     img_avr_w_sum = 1.0 / img_avr_w_sum
     for cn in xrange(n_chn):
         hdr_chn_list[cn] = cv2.multiply(hdr_chn_list[cn], img_avr_w_sum)
     log_hdr_img = cv2.merge(hdr_chn_list)
     #this is lux, 为什么和官方的数值有数量级的差别。
     hdr_img = cv2.exp(log_hdr_img)
     return hdr_img
예제 #15
0
    def process(self, images, times):
        """
        :Description: combine factors weight to merge HDRI and tonermap LDRI
        :param images: images list
        :param times: times list
        :return ldr_img: LDRI
        """
        assert isinstance(images, list), 'images should be list'
        assert isinstance(times, list), 'times should be list'
        assert len(images) == len(times), "images length should be same as times"
        start = time.time()
        gamma = self.__gamma
        contrast = self.__contrast
        saturation = self.__saturation
        sigma_space = self.__sigma_space
        sigma_color = self.__sigma_color

        hdr_img = self._merge(images, times)
        hdr_img_2d = hdr_img.reshape(1024, 1280*3)
        minval, maxvalue, _, _ = cv2.minMaxLoc(hdr_img_2d)
        img = (hdr_img - minval) / (maxvalue - minval)
        img = img.clip(1.0e-4)
        img = cv2.pow(img, 1.0 / gamma)

        gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        log_img = np.log(gray_img)
        map_img = cv2.bilateralFilter(log_img, -1, sigma_color, sigma_space)
        minval, maxval, _, _ = cv2.minMaxLoc(map_img)
        scale = contrast / (maxval - minval)
        map_img = cv2.exp(map_img * (scale - 1.0) + log_img)
        img = self._mapLuminance(img, gray_img, map_img, saturation)
        img = cv2.pow(img, 1.0 / gamma)
        #no problem!!
        img = img.clip(None, 1.0)
        img = img * 255
        ldr_img = img.astype("uint8")
        end = time.time()
        print "spend time %f" % (end-start)
        return ldr_img
예제 #16
0
def multi_scale_retinex(image, ksize=0, sigma=9):
    I = image.copy()
    I = cv2.convertScaleAbs(np.float32(I), I, alpha=1.0, beta=1.0)
    I = cv2.log(np.float32(I), I)

    l = cv2.GaussianBlur(image, (ksize, ksize), sigma)
    l = cv2.convertScaleAbs(np.float32(l), l, alpha=1.0, beta=1.0)
    l = cv2.log(np.float32(l), l)

    result = cv2.subtract(I, l, l)
    result = cv2.exp(result, result)
    frames = cv2.split(result)
    for i in range(3):
        frames[i] = cv2.normalize(frames[i],
                                  frames[i],
                                  alpha=0,
                                  beta=255,
                                  norm_type=cv2.NORM_MINMAX)
        frames[i] = cv2.convertScaleAbs(frames[i],
                                        frames[i],
                                        alpha=1 / 3,
                                        beta=0.0)
    return color_correction(cv2.merge(frames))
예제 #17
0
파일: HDR.py 프로젝트: huster280/HDR_Fusion
    def sovle_c_w(self, contrast, wellexp, images, shape, i, is_gray):
        if is_gray:
            contrast[i] = cv2.Laplacian(images[i][:, :, 0], cv2.CV_32F)
        else:
            gray = cv2.cvtColor(images[i], cv2.COLOR_RGB2GRAY)
            contrast[i] = cv2.Laplacian(gray, cv2.CV_32F)
        contrast[i] = np.abs(contrast[i])
        if self.wcon != 1:
            contrast[i] = cv2.pow(contrast[i], self.wcon)

        wellexp[i] = np.ones(shape, dtype="float32")
        if self.wexp != 0:
            splitted = [
                images[i][:, :, 0], images[i][:, :, 1], images[i][:, :, 2]
            ]
            for img_cn in splitted:
                expo = cv2.subtract(img_cn, 0.5, dtype=cv2.CV_32F)
                expo = cv2.pow(expo, 2.0)
                expo = -expo / 0.08
                # larger '0.08' only make 'cv2.exp(expo)' nearest "1"
                expo = cv2.exp(expo)
                wellexp[i] *= expo
            wellexp[i] = cv2.pow(wellexp[i], self.wexp)
예제 #18
0
    return 1.0 / (2.0 * pow(x, 2))


print(b.shape, type(b[0, 0]))
print(b[0])
print(h.shape, type(h[0, 0]))
print(h[0])
while (1):
    cv2.imshow('image', img)
    k = cv2.waitKey(1) & 0xFF
    if k == 27:
        break
    hv = cv2.getTrackbarPos('H', 'image')
    sv = cv2.getTrackbarPos('S', 'image')
    sigma = cv2.getTrackbarPos('Sigma', 'image')
    hv = hv - 90
    temp0 = h - ht
    temp1 = 0.5 * (temp0 - hv)
    temp2 = 0.1 * (s - sv)
    cv2.imshow('b image', temp1)
    cv2.imshow('g image', temp2)
    temp0 = cv2.pow(temp1, 2)
    temp1 = cv2.pow(temp2, 2)
    temp2 = temp0 + temp1
    temp0 = -(temp2 * cal_sigma(sigma))
    temp1 = cv2.exp(temp0)
    temp2 = temp1 * 255.0
    cv2.imshow('r image', temp2)

cv2.destroyAllWindows()
예제 #19
0
# from (Book) OpenCV-Python으로 배우는 영상 처리 및 응용
import numpy as np, cv2

# numpy array 생성 예시
v1 = np.array([1, 2, 3], np.float32)          # 1차원 리스트로 생성- 행벡터
v2 = np.array([[1], [2], [3]], np.float32)      # 2차원 리스트로(3행, 1열) - 행벡터
v3 = np.array([[1, 2, 3]], np.float32)        	# 2차원 리스트로(1행, 3열) - 일반 행렬

# OpenCV 산술 연산 함수는 numpy array만 가능함
v1_exp = cv2.exp(v1)                             # 벡터에 대한
v2_exp = cv2.exp(v2)                             # 행렬에 대한 지수 계산
v3_exp = cv2.exp(v3)                             # 행렬에 대한 지수 계산
log = cv2.log(v1)                             # 로그 계산
sqrt= cv2.sqrt(v2)                            # 제곱근 계산
pow = cv2.pow(v3, 3)                          # 3의 거듭제곱 계산

# 결과 출력
print("[v1] 형태: %s 원소: %s" % ( v1.shape, v1))
print("[v2] 형태: %s 원소:\n%s" % ( v2.shape, v2))
print("[v3] 형태: %s 원소: %s" % ( v3.shape, v3))
print()

# 행렬 정보 출력 - OpenCV 결과는 행렬로 반환됨 - 행벡터는 열벡터로 반환됨
print("[v1_exp] 자료형: %s 형태: %s" % ( type(v1_exp), v1_exp.shape))  # 행벡터 인수의 결과
print("[v2_exp] 자료형: %s 형태: %s" % ( type(v2_exp), v2_exp.shape))  # 행벡터 인수의 결과
print("[v3_exp] 자료형: %s 형태: %s" % ( type(v3_exp), v3_exp.shape))  # 행벡터 인수의 결과
print()

# 열벡터를 1 행에 출력하는 예시 - 행벡터로 변환
print("[log] =", log.T)
print("[sqrt] =", np.ravel(sqrt))         
예제 #20
0
    1. ravel()
    2. flatten()
    3. arr.T

'''

import numpy as np
import cv2
from numpy.core.fromnumeric import ndim, size

v1 = np.array([1, 2, 3], np.float32)  # 3, 행렬 (dim = 1)
v2 = np.array([[1], [2], [3]],
              np.float32)  # 3 x 1 행렬 (dim = 2)  / print(ndim(v2)) == 2
v3 = np.array([[1, 2, 3]], np.float32)  # 1 x 3 행령

v_exp = cv2.exp(v1)
m_exp = cv2.exp(v2)
q_exp = cv2.exp(v3)

print(v_exp.shape)
print(m_exp.shape)
print(q_exp.shape)

v_log = cv2.log(v1)
m_sqrt = cv2.sqrt(v2)
q_pow = cv2.pow(v3, 3)

print(v_log.shape)
print(m_sqrt.shape)
print(q_pow.shape)
예제 #21
0
파일: HDR.py 프로젝트: MicroGuitar/Raw
    def process(self, images, times, samples=70, random=False):
        """
        :Description: to calibrate CRF curve
        :param images: image list
        :param times: time list
        :param samples: samples point count
        :param random: whether samples random
        :return: response_256x1x3, CRF array
        """
        assert isinstance(images, list), 'images should be list'
        assert isinstance(times, list), 'times should be list'
        assert len(images) == len(
            times), "images length should be same as times"
        LDR_SIZE = self.__LDR_SIZE
        w = self.__intensity_weight_256x_.copy()
        gamma = self.__gamma
        images = np.array(images, dtype="uint8")
        times = np.array(times, dtype="float32")
        n_img = len(images)
        n_chn = images[0].shape[2]
        img_channel_list = []
        for i in xrange(n_chn):
            tmp = []
            for j in xrange(n_img):
                img_channel = cv2.split(images[j])[i]
                tmp.append(img_channel)
            img_channel_list.append(tmp)
        img_shape = images[0].shape
        img_cols = img_shape[1]
        img_rows = img_shape[0]
        sample_points_list = []

        #set random situation.
        if random == True:
            for i in xrange(samples):
                r = np.random.randint(0, img_rows)
                c = np.random.randint(0, img_cols)
                sample_points_list.append((r, c))
        if random == False:
            x_points = int(np.sqrt(samples * (img_cols) / img_rows))
            y_points = samples / x_points
            n_samples = x_points * y_points
            step_x = img_cols / x_points
            step_y = img_rows / y_points
            r = step_x / 2
            for j in xrange(y_points):
                rr = r + j * step_y
                c = step_y / 2
                for i in xrange(x_points):
                    cc = c + i * step_x
                    sample_points_list.append((rr, cc))

        #svd solve response curve.
        response_list = []
        for z in xrange(n_chn):
            eq = 0
            A = np.zeros(
                (n_samples * n_img + LDR_SIZE + 1, LDR_SIZE + n_samples),
                dtype="float32")
            B = np.zeros((A.shape[0]), dtype="float32")
            for i in xrange(n_samples):
                r = sample_points_list[i][0]
                c = sample_points_list[i][1]
                for j in xrange(n_img):
                    val = img_channel_list[z][j][r, c]
                    A[eq, val] = w[val]
                    A[eq, LDR_SIZE + i] = -w[val]
                    B[eq] = w[val] * np.log(times[j])
                    eq += 1
            #F(128)曝光量对数设0, 也就是曝光量为单位1, 不关事
            A[eq, LDR_SIZE / 2] = 1
            eq += 1
            for i in range(0, 254):
                A[eq, i] = gamma * w[i]
                A[eq, i + 1] = -2 * gamma * w[i]
                A[eq, i + 2] = gamma * w[i]
                eq += 1
            _, response = cv2.solve(A, B, flags=cv2.DECOMP_SVD)

            # just from ln(lum) convert to lum.
            response = cv2.exp(response)
            response_256x1 = response[:256]
            response_list.append(response_256x1)
        self.camera_response_256x1x3 = cv2.merge(response_list)
        #need return 256x1x3 nparray.
        return self.camera_response_256x1x3
예제 #22
0
파일: HDR.py 프로젝트: MicroGuitar/Raw
    def process(self, images):
        """
        :rtype: object
        :Description: combine factors weight to fusion HDRI-alternate
        :param images: img list
        :return fusion_img: fusion_img
        """
        assert isinstance(images, list), 'images should be list'
        n_img = len(images)

        #camera images both are [b, g, r] 3channels
        # n_chn = images[0].shape[2]
        n_chn = 1
        rows, cols = images[0].shape[0], images[0].shape[1]
        shape = (rows, cols)
        #initial the weights[]
        weights = []
        for i in xrange(n_img):
            tmp = np.zeros(shape, dtype="float32")
            weights.append(tmp)
        weight_sum = np.zeros(shape, dtype="float32")
        #solve each image weight and solve weight_sum
        for i in xrange(len(images)):
            #normalize img make convergence speedup
            img = images[i] / 255.0
            ##############################
            # img = images[i] / 65536.0
            img = img.astype("float32")

            #convert to gray
            if n_chn == 3:
                gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            else:
                gray = img
            #solve contrast, ddepth: cv2.CV_32F
            contrast = cv2.Laplacian(gray, cv2.CV_32F)
            contrast = abs(contrast)
            #
            mean = np.zeros(shape, dtype="float32")
            splitted = cv2.split(images[i])
            for img_cn in splitted:
                mean += img_cn
            mean /= n_chn
            #solve saturation
            saturation = np.zeros(shape, dtype="float32")
            for img_cn in splitted:
                deviation = img_cn - mean
                deviation = cv2.pow(deviation, 2.0)
                saturation += deviation
            saturation = cv2.sqrt(saturation)
            wellexp = np.ones(shape, dtype="float32")
            for img_cn in splitted:
                expo = cv2.subtract(img_cn, 0.5, dtype=cv2.CV_32F)
                expo = cv2.pow(expo, 2.0)
                expo = -expo / 0.08
                #larger '0.08' only make 'cv2.exp(expo)' nearest "1"
                expo = cv2.exp(expo)
                wellexp = cv2.multiply(wellexp, expo)
            # pow respective ratio
            contrast = cv2.pow(contrast, self.wcon)
            saturation = cv2.pow(saturation, self.wsat)
            wellexp = cv2.pow(wellexp, self.wexp)

            weights[i] = contrast
            if n_chn == 3:
                weights[i] = cv2.multiply(weights[i], saturation)
            weights[i] = cv2.multiply(weights[i], wellexp) + 1e-12
            weight_sum += weights[i]

        maxlevel = int(np.log(min(rows, cols)) / np.log(2))
        #(maxlevel+1) images, following to solve the final pyramid.
        res_pyr = [0] * (maxlevel + 1)
        for i in xrange(len(images)):
            img_pyr = [0] * (maxlevel + 1)
            weight_pyr = [0] * (maxlevel + 1)
            ###############################
            img = images[i] / 255.0
            # img = images[i] / 65535.0
            img = img.astype("float32")
            img_pyr[0] = img
            weights[i] /= weight_sum
            weight_pyr[0] = weights[i]
            # following: buildPyramid(img, img_pyr, maxlevel)
            # buildPyramid(weights[i], weight_pyr, maxlevel)
            #todo inspection it

            for lvl in xrange(maxlevel):
                img_pyr[lvl + 1] = cv2.pyrDown(img_pyr[lvl])
            for lvl in xrange(maxlevel):
                #size = width, height
                size = img_pyr[lvl].shape[:2][::-1]
                up = cv2.pyrUp(img_pyr[lvl + 1], dstsize=size)
                img_pyr[lvl] -= up

            for lvl in xrange(maxlevel):
                weight_pyr[lvl + 1] = cv2.pyrDown(weight_pyr[lvl])

            for lvl in xrange(maxlevel + 1):
                splitted = cv2.split(img_pyr[lvl])
                splitted2 = []
                for img_pyr_cn in splitted:
                    tmp = cv2.multiply(img_pyr_cn, weight_pyr[lvl])
                    splitted2.append(tmp)
                cv2.merge(splitted2, img_pyr[lvl])
                # add 3 laplace pyr together -> res_pyr
                # first image to assign res_pry[0-maxlevel]
                if i == 0:
                    res_pyr[lvl] = img_pyr[lvl]
                # latter image to assign res_pry[0-maxlevel]
                else:
                    res_pyr[lvl] += img_pyr[lvl]

        #第一层求出第0层在第一次loop就已经完成了,后面的loop没有意义
        for lvl in range(maxlevel, 0, -1):
            #size = width, height
            size = res_pyr[lvl - 1].shape[:2][::-1]
            up = cv2.pyrUp(res_pyr[lvl], dstsize=size)
            res_pyr[lvl - 1] += up
        dst_tmp = res_pyr[0]
        dst_tmp = dst_tmp * 255
        # dst_tmp = dst_tmp * 65535
        fusion_img = dst_tmp.astype("uint8")
        # fusion_img = dst_tmp.astype("uint16")
        return fusion_img
def Retinex(input_img,filter=['Gauss'],ksize=[3],weight=[1],gstd=0,hstd=0): # gstd-空间高斯函数标准差;sstd-灰度值相似性高斯函数标准差
    '''Retinex匀光算法(SSR/MSR)流程:
        1.估计照度分量;2.计算反射分量;3.反射分量增强
        ***注意***
        输出input_img必须为单通道图像,即len(input_img.shape)=2
    '''
    multi_scale = len(filter)
    blur_img = []
    # 检查输入图像维度
    if(len(input_img.shape)!=2):
        raise RuntimeError('The dimensionality of input image must be 2')
    # 检查filter,ksize和weight三者维度是否一致
    if(len(filter)!=len(ksize) or len(filter)!=len(weight)):
        raise RuntimeError('The number of filter, ksize and weight are not equal')
    # 检查滤波器大小和权重和
    if(multi_scale==1):
        if (ksize[0] % 2 != 1):
            raise RuntimeError('Filter ksize must be odd')
        if(weight[0]!=1):
            raise RuntimeError('The weight must be 1')
    else:
        for i in ksize:
            if(i % 2 != 1):
                raise RuntimeError('Filter ksize must be odd')
        if(sum(weight)!=1):
            raise RuntimeError('The sum of weight must be 1')
    # 计算照度分量
    def filter_blur(input_img=input_img,filter=filter[0],ksize=ksize[0],gstd=gstd,hstd=hstd):
        if (filter == 'Gauss'):
            blur = cv2.GaussianBlur(input_img, (ksize, ksize), gstd)
        elif (filter == 'Mean'):
            blur = cv2.blur(input_img, (ksize, ksize))
        elif (filter == 'Median'):
            blur = cv2.medianBlur(input_img, ksize)
        elif (filter == 'Bilateral'):
            blur = cv2.bilateralFilter(input_img, ksize, gstd, hstd)
        else:
            raise RuntimeError('Filter type error')
        return blur
    if(multi_scale==1):
        blur_img.append(filter_blur())
    else:
        for i in range(len(filter)):
            blur_img.append(filter_blur(filter=filter[i],ksize=ksize[i]))
    # 计算反射图像:R(x,y)=f(x,y)/f(x,y)*G(x,y) => log[R(x,y)]=log[f(x,y)]-log[f(x,y)*G(x,y)] =>R(x,y)=e^{log[f(x,y)]-log[f(x,y)*G(x,y)]}
    def replace_zeroes(input_img): # 替换图像中亮度为0的像素点
        min_nonzero=min(input_img[np.nonzero(input_img)]) # np.nonzero返回表征非零元素在矩阵中位置的元组,元组中前一个列表存放非零行坐标,后一个列表存放非零元素列坐标
        input_img[input_img==0]=min_nonzero
        return input_img
    if(multi_scale==1):
        dst_input=cv2.log(replace_zeroes(input_img)/255.0) # 归一化像素值以保证cv2.log正常运行
        dst_blur=cv2.log(replace_zeroes(blur_img[0])/255.0)
        log_R=cv2.subtract(dst_input,dst_blur)
        exp_R=cv2.exp(log_R)
        dst_R=cv2.normalize(exp_R,None,0,255,cv2.NORM_MINMAX) # 归一化像素值至[0,255]
        output_img=cv2.convertScaleAbs(dst_R) # 转换为uint8格式
    else:
        intermediate_img = []
        dst_input = cv2.log(replace_zeroes(input_img) / 255.0)
        for i in range(len(blur_img)):
            dst_blur = cv2.log(replace_zeroes(blur_img[i]) / 255.0)
            log_R = cv2.subtract(dst_input, dst_blur)
            intermediate_img.append(weight[i]*log_R)
        exp_R = cv2.exp(sum(intermediate_img))
        dst_R = cv2.normalize(exp_R, None, 0, 255, cv2.NORM_MINMAX)
        output_img = cv2.convertScaleAbs(dst_R)
    # 颜色增强(暂缺)
    return output_img
예제 #24
0
파일: net.py 프로젝트: nalinaly/cnn-net
 def tanh(x):
     exp_x = cv2.exp(x)
     exp_x_ = cv2.exp(-x)
     fx = (exp_x - exp_x_) / (exp_x + exp_x_)
     return fx
예제 #25
0
파일: net.py 프로젝트: nalinaly/cnn-net
 def sigmoid(x):
     exp_x = cv2.exp(-x)
     fx = 1.0 / (1.0 + exp_x)
     return fx
예제 #26
0
    ec1 = cv2.divide(ec, den)

    #step2
    tal = 10.0
    ec2 = cv2.absdiff(ec1, 0)
    ec2 = cv2.min(tal, ec2)
    ec2 = cv2.pow(ec2, alfa)
    ec2 = cv2.mean(ec2)
    ec2 = ec2[0]
    den = numpy.power(ec2, 1.0 / alfa)
    ec3 = cv2.divide(ec1, den)
    ec3 = cv2.normalize(ec3.astype('float32'), None, 0.0, 1.0, cv2.NORM_MINMAX)

    #step3
    ele = cv2.divide(ec3, tal)
    exp1 = cv2.exp(ele)
    exp2 = cv2.exp(-ele)
    num = cv2.subtract(exp1, exp2)
    den = cv2.add(exp1, exp2)
    tanh = den
    tanh = cv2.divide(num, den, tanh, tal)
    ec4 = tanh
    ec4 = cv2.normalize(ec4.astype('float32'), None, -1.0, 1.3,
                        cv2.NORM_MINMAX)

    #if foto==332 or foto==343 or foto==358 or foto==654 or foto==658 or foto==662 or foto==665 or foto==668 or foto==672 or foto==919:
    #	cv2.imshow("Images", numpy.hstack([rosto, corrigida_gam, gaus, ec3, ec4]))
    #	cv2.waitKey(0)

    #Fazendo a Transformada de Fourier da imagem
예제 #27
0
 def sigmoid(self, x):
     subbed = cv2.subtract(self.zeroes, x, dtype=cv2.CV_32F)
     added = cv2.add(self.ones, cv2.exp(subbed), dtype=cv2.CV_32F)
     return cv2.divide(self.ones, added, dtype=cv2.CV_32F)