Beispiel #1
0
 def __calc_gradient_image(self, img, image_depth=cv2.CV_8UC1):
     deriv_img = cv2.GaussianBlur(img, self.__gauss_kernel_size, self.__gauss_sigma)
     sx = cv2.Sobel(deriv_img, image_depth, 1, 0, ksize=self.__ksize_gradient)
     sy = cv2.Sobel(deriv_img, image_depth, 0, 1, ksize=self.__ksize_gradient)
     self.__deriv_img = cv2.sqrt(cv2.add(cv2.pow(sx, 2), cv2.pow(sy, 2)))
     # self.__deriv_img = cv2.Laplacian(deriv_img, image_depth, ksize=self.__ksize_gradient)
     return self.__deriv_img
Beispiel #2
0
 def _execute(self,x):
     x = x.astype(np.float32)
     x = x / ( np.mean( cv2.pow(np.abs(x), self.alpha) ) ** (1/self.alpha) + 1e-6)
     absx = np.abs(x)
     x = x / ( np.mean( cv2.pow(np.choose(absx > self.tau,(self.tau,absx)), self.alpha ) ) ** (1/self.alpha) + 1e-6 )
     x = self.tau * np.tanh( x / self.tau )
     return x
def getGradientImageInfo(gray):
	temp1=gray
	gx=np.array(0)
	gy=np.array(0)
	gd=gray
	gm=gray
	gx=cv2.Sobel(temp1, cv2.CV_16S, 1, 0, gx, 3, 1, 0, cv2.BORDER_DEFAULT)
	gy=cv2.Sobel(temp1, cv2.CV_16S, 0, 1, gy, 3, 1, 0, cv2.BORDER_DEFAULT)
	gm=cv2.add(cv2.pow(gx, 2), cv2.pow(gy, 2))		
	gm=pylab.sqrt(gm) 
	gd=cv2.add(np.arctan(gx), np.arctan(gy))*(180/math.pi)
	resolution=5
	gx=gx[::resolution*-1,::resolution]
	gy=gy[::resolution*-1,::resolution]
	gm=gm[::resolution*-1,::resolution]
	gd=gd[::resolution*-1,::resolution]
	X,Y = np.meshgrid( np.arange(0,2*math.pi,.2),np.arange(0,2*math.pi,.2))
	U = pylab.cos(X)
	V = pylab.sin(Y)
	q=matplotlib.pyplot.quiver(gx,gy)
#	key=matplotlib.pyplot.quiverkey(q, 1, 1, 5, 'test', coordinates='data', color='b')
	#matplotlib.pyplot.show()
	matplotlib.pyplot.close()
	#cv2.imshow('gd', gd)
	return gx,gy,gm,gd, resolution
Beispiel #4
0
def tantriggs(image):
    # Convert to float
    image = np.float32(image)

    image = cv2.pow(image, GAMMA)
    image = difference_of_gaussian(image)

    # mean 1
    tmp = cv2.pow(cv2.absdiff(image, 0), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # mean 2
    tmp = cv2.pow(cv2.min(cv2.absdiff(image, 0), TAU), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # tanh
    exp_x = cv2.exp(cv2.divide(image, TAU))
    exp_negx = cv2.exp(cv2.divide(-image, TAU))
    image = cv2.divide(cv2.subtract(exp_x, exp_negx), cv2.add(exp_x, exp_negx))
    image = cv2.multiply(image, TAU)

    image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)

    return image
Beispiel #5
0
def Pyramid(img):
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
	YUV = cv2.resize(YUV,(40,40))
	Y,U,V = cv2.split(YUV)
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	img = cv2.resize(YUV,(26,26))
	kernel1 = np.ones((3,1),np.float32)
	kernel2 = np.ones((1,3),np.float32)
	kernel1[0] = -1
	kernel1[1] = 0
	kernel2[0] = [-1,0,1]
	dst = cv2.filter2D(img,cv2.CV_16S,kernel1)
	dstv1 = np.int16(dst)
	dstv2 = cv2.pow(dstv1,2)
	dst = cv2.filter2D(img,cv2.CV_16S,kernel2)
	dsth1 = np.int16(dst)
	dsth2 = cv2.pow(dsth1,2)
	dst1 = dsth2 + dstv2
	dst1 = np.float32(dst1)
	dstfinal = cv2.sqrt(dst1).astype(np.uint8)
	finalh =  dsth1
	finalv = dstv1
	finalm = dstfinal
	UporDown = (finalv > 0 ).astype(int)
	LeftorRight = 2*(finalh > 0).astype(int)
	absh = map(abs, finalh)
	absv = map(abs, finalv)
	absv[:] = [x*1.732 for x in absv]
	absh = np.float32(absh)
	absv = np.float32(absv)
	high = 4*(absv > absh).astype(int)
	out = high + LeftorRight + UporDown
	features = []
	for x in range(6):
		hrt = np.zeros(out.shape[:2],np.uint8)
		features.append(hrt)
	for x in range(out.shape[:2][0]):
		for y in range(out.shape[:2][1]):
			z = out[x][y]
			if z == 4 or z == 6:
#				print "a",z
				features[4][x][y] = finalm[x][y]
			elif z == 5 or z == 7:
				features[5][x][y] = finalm[x][y]
#				print "b",z
			else:
				features[z][x][y] = finalm[x][y]
#				print z
	kernelg1 = 0.125*np.ones((4,4),np.float32)
	kernelg2 = 0.25*np.ones((2,2),np.float32)
	lastFeatures = []	
	for img in features:
		tote = cv2.sumElems(img)
		tote = tote/img.size
		img = img/tote
		print img
		print cv2.sumElems(img)
		print img.size
		lastFeatures.append(img1)
	return lastFeatures
Beispiel #6
0
def curvature_central(u):
    u=np.float32(u)
    u_x,u_y=np.gradient(u)
    norm=cv2.pow(np.power(u_x,2)+cv2.pow(u_y,2)+1E-10,0.5)
    N_x=cv2.divide(u_x,norm)
    N_y=cv2.divide(u_y,norm)
    N_xx,junk=np.gradient(N_x)
    junk,N_yy=np.gradient(N_y)
    return np.float32(N_xx+N_yy)
Beispiel #7
0
 def expProcessing(self, sourceImg):
     # kernel1 = cv2.getStructuringElement(cv2.MORPH_RECT, (11,11))
     # closed = cv2.morphologyEx(binaryImg, cv2.MORPH_CLOSE, kernel1)
     # div = np.float32(binaryImg)/(closed)
     # binaryImg = np.uint8(cv2.normalize(div, div, 0, 255, cv2.NORM_MINMAX))
     # res2 = cv2.cvtColor(res, cv2.COLOR_GRAY2BGR)
     gammaImg = sourceImg.copy()
     cv2.cv.ConvertScale(cv2.cv.fromarray(gammaImg), cv2.cv.fromarray(gammaImg), 1.0 / 255, 0)
     cv2.pow(gammaImg, self.gammaCorrectionVal, gammaImg)
     return binaryImg
 def preprocess(self, image):
     image_in = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
     corrected = np.uint8(cv2.pow(image_in / 255.0, 1.4) * 255)
     gray = cv2.cvtColor(corrected, cv2.COLOR_RGB2GRAY)
     thresh = cv2.threshold(gray, 0, 255,
                            cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
     return thresh
def random_manipulation(img, manipulation=None):
    if manipulation == None:
        manipulation = random.choice(MANIPULATIONS)

    if manipulation.startswith('jpg'):
        quality = int(manipulation[3:])
        out = BytesIO()
        im = Image.fromarray(img)
        im.save(out, format='jpeg', quality=quality)
        im_decoded = jpeg.JPEG(np.frombuffer(out.getvalue(),
                                             dtype=np.uint8)).decode()
        del out
        del im
    elif manipulation.startswith('gamma'):
        gamma = float(manipulation[5:])
        # alternatively use skimage.exposure.adjust_gamma
        # img = skimage.exposure.adjust_gamma(img, gamma)
        im_decoded = np.uint8(cv2.pow(img / 255., gamma) * 255.)
    elif manipulation.startswith('bicubic'):
        scale = float(manipulation[7:])
        im_decoded = cv2.resize(img, (0, 0),
                                fx=scale,
                                fy=scale,
                                interpolation=cv2.INTER_CUBIC)
    else:
        assert False
    return im_decoded
Beispiel #10
0
def r2n(ros_msg):
    # if isinstance(ros_msg, Image): # have no idea why this doesn't work
    # if ros_msg._md5sum == OculusPing._md5sum:
    if ros_msg._type == "sonar_oculus/OculusPing":
        """
        If ping (OculusPing) is passed instead of ping.ping (sensor_msgs/Image),
        then gamma corrected image is decoded to return the original intensity.
        The type of returned image is np.float32.

        output = input ^ (gamma / 255.0)
        """
        img = r2n(ros_msg.ping)
        img = np.clip(
            cv2.pow(img / 255.0, 255.0 / ros_msg.fire_msg.gamma) * 255.0, 0,
            255)
        return np.float32(img)
    elif ros_msg._type == "sensor_msgs/Image":
        img = bridge.imgmsg_to_cv2(ros_msg, desired_encoding="passthrough")
        return np.array(img, "uint8")
    elif ros_msg._type == "sensor_msgs/PointCloud2":
        rows = ros_msg.width
        cols = sum(f.count for f in ros_msg.fields)
        return np.array([p for p in pc2.read_points(ros_msg)
                         ]).reshape(rows, cols)
    else:
        raise NotImplementedError("Not implemented from {} to numpy".format(
            str(type(ros_msg))))
def norm_illum_color(img, gamma=2.2):
    """ Normalizes illumination for colored image """
    img = np.float32(img)
    img /= 255.0
    img = cv2.pow(img, 1 / gamma) * 255
    img = np.uint8(img)
    return img
Beispiel #12
0
def RMSD(target, master):
    # Note: use grayscale images only

    # Get width, height, and number of channels of the master image
    master_height, master_width = master.shape[:2]
    master_channel = len(master.shape)

    # Get width, height, and number of channels of the target image
    target_height, target_width = target.shape[:2]
    target_channel = len(target.shape)

    # Validate the height, width and channels of the input image
    if (master_height != target_height or master_width != target_width
            or master_channel != target_channel):

        return -1
    else:

        total_diff = 0.0
        dst = cv2.absdiff(master, target)
        dst = cv2.pow(dst, 2)
        mean = cv2.mean(dst)
        total_diff = mean[0]**(1 / 2.0)

        return total_diff
Beispiel #13
0
def RMSD(target, master):
    # Get width, height, and number of channels of the master image
    master_height, master_width = master.shape[:2]
    master_channel = len(master.shape)

    # Get width, height, and number of channels of the target image
    target_height, target_width = target.shape[:2]
    target_channel = len(target.shape)

    # Validate the height, width and channels of the input image
    if (master_height != target_height or master_width != target_width
            or master_channel != target_channel):
        return -1
    else:
        total_diff = 0.0
        master_channels = cv2.split(master)
        target_channels = cv2.split(target)

        for i in range(0, len(master_channels), 1):
            dst = cv2.absdiff(master_channels[i], target_channels[i])
            dst = cv2.pow(dst, 2)
            mean = cv2.mean(dst)
            total_diff = total_diff + mean[0]**(1 / 2.0)

        return total_diff
Beispiel #14
0
def correction(gray, gammaFactor):
    u"""
    skala szarości
    :param gray:
    :param gammaFactor:
    :return:
    """
    gamma_correction = 1.0 / gammaFactor

    img_tmp = gray / 255.0
    cv2.pow(img_tmp, gamma_correction, img_tmp)
    img_gamma = img_tmp * 255.0

    # zamiana na int
    img_result = np.array(img_gamma, 'uint8')
    return img_result
Beispiel #15
0
def farthest_point(defects, contour, centroid):
    if defects is not None and centroid is not None:
        s = defects[:, 0][:, 0]
        cx, cy = centroid
        x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
        y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
        xp = cv2.pow(cv2.subtract(x, cx), 2)
        yp = cv2.pow(cv2.subtract(y, cy), 2)
        dist = cv2.sqrt(cv2.add(xp, yp))
        dist_max_i = np.argmax(dist)
        if dist_max_i < len(s):
            farthest_defect = s[dist_max_i]
            farthest_point = tuple(contour[farthest_defect][0])
            return farthest_point
        else:
            return None
Beispiel #16
0
def compute_ncc(l, r, mask):
    L_mean = cv2.mean(l, mask)
    R_mean = cv2.mean(r, mask)

    L = cv2.subtract(l, L_mean, mask=mask, dtype=cv2.CV_32S)
    R = cv2.subtract(r, R_mean, mask=mask, dtype=cv2.CV_32S)

    L2 = cv2.pow(L, 2)
    R2 = cv2.pow(R, 2)

    L2_sum = cv2.sumElems(L2)
    R2_sum = cv2.sumElems(R2)

    LR = cv2.multiply(L, R)
    LR_sum = cv2.sumElems(LR)

    return LR_sum[0] / sqrt(L2_sum[0] * R2_sum[0])
Beispiel #17
0
def pycv_power(arr, exponent):
    """Raise the elements of a floating point matrix to a power. 
    It is 3-4 times faster than numpy's built-in power function/operator."""
    if arr.dtype not in [numpy.float32, numpy.float64]:
        arr = arr.astype('f')
    if arr.flags['C_CONTIGUOUS'] == False:
        arr = numpy.ascontiguousarray(arr)
    return cv2.pow(arr, exponent)
Beispiel #18
0
 def __call__(self, img, boxes=None):
     if len(img.shape) == 2:
         img = np.expand_dims(img, 0)
     out = np.zeros_like(img)
     for c in range(img.shape[0]):
         out[c] = cv2.pow(img[c] - img[c].min(), self.gamma)
     out = np.squeeze(out)
     return out if boxes is None else (out, boxes)
Beispiel #19
0
	def apply_gamma_correction(self):
		new_img = copy.deepcopy(self.image)

		correction = 0.5
		# invGamma = 1.0 / correction
		new_img = new_img / 255.0
		new_img = cv2.pow(new_img, correction)
		return np.uint8(new_img * 255)
Beispiel #20
0
def pycv_power(arr, exponent):
    """Raise the elements of a floating point matrix to a power. 
    It is 3-4 times faster than numpy's built-in power function/operator."""
    if arr.dtype not in [numpy.float32, numpy.float64]:
        arr = arr.astype('f')
    if arr.flags['C_CONTIGUOUS'] == False:
        arr = numpy.ascontiguousarray(arr)        
    return cv2.pow(arr, exponent)
Beispiel #21
0
def gammaCorrection(filename, pow_value=3):
	img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)

	transformed = img / 255.0
	transformed = cv2.pow(transformed, pow_value)
	transformed = np.uint8(transformed*255)

	return [img, transformed]
Beispiel #22
0
 def local_normalization(float_im, k):
     blur = cv2.GaussianBlur(float_im, (0, 0), sigmaX=k, sigmaY=k)
     num = float_im - blur
     blur = cv2.GaussianBlur(num * num, (0, 0), sigmaX=k, sigmaY=k)
     den = cv2.pow(blur + 1e-7, 0.5)
     im = num / den
     # cv2.normalize(im, dst=im, alpha=0.0, beta=255., norm_type=cv2.NORM_MINMAX).astype('uint8')
     return im
Beispiel #23
0
def convert_twin_weighted_mask(src1, src2, pow_opt):

    overlap_region = cv2.bitwise_and(src1, src2)
    image1 = convert_weighted_mask(src1)
    image2 = convert_weighted_mask(src2)
    if pow_opt != 0:
        image1 = cv2.pow(image1, pow_opt)
        image2 = cv2.pow(image2, pow_opt)
    image_summed_weight = image1 + image2 + 0.00000001
    result = image1 / image_summed_weight
    result2 = image2 / image_summed_weight
    # src1 = src1 / 255.
    # src2 = src2 / 255.
    # src1[overlap_region>0][0] = result[overlap_region>0][0]
    # src2[overlap_region>0] = result2[overlap_region>0][0]

    return result, result2
Beispiel #24
0
 def preprocess(self):
     start = time()
     channel = self.chan_combo.currentIndex()
     if channel == 0:
         img = cv.cvtColor(self.image, cv.COLOR_BGR2GRAY)
     elif channel == 4:
         b, g, r = cv.split(self.image.astype(np.float64))
         img = cv.sqrt(cv.pow(b, 2) + cv.pow(g, 2) + cv.pow(r, 2))
     else:
         img = self.image[:, :, 3 - channel]
     kernel = 3
     border = kernel // 2
     shape = (img.shape[0] - kernel + 1, img.shape[1] - kernel + 1, kernel,
              kernel)
     strides = 2 * img.strides
     patches = np.lib.stride_tricks.as_strided(img,
                                               shape=shape,
                                               strides=strides)
     patches = patches.reshape((-1, kernel, kernel))
     mask = np.full((kernel, kernel), 255, dtype=np.uint8)
     mask[border, border] = 0
     progress = QProgressDialog(self.tr('Computing deviation...'),
                                self.tr('Cancel'), 0,
                                shape[0] * shape[1] - 1, self)
     progress.canceled.connect(self.cancel)
     progress.setWindowModality(Qt.WindowModal)
     blocks = [0] * shape[0] * shape[1]
     for i, patch in enumerate(patches):
         blocks[i] = self.minmax_dev(patch, mask)
         progress.setValue(i)
         if self.stopped:
             self.stopped = False
             return
     output = np.array(blocks).reshape(shape[:-2])
     output = cv.copyMakeBorder(output, border, border, border, border,
                                cv.BORDER_CONSTANT)
     self.low = output == -1
     self.high = output == +1
     self.min_combo.setEnabled(True)
     self.max_combo.setEnabled(True)
     self.filter_spin.setEnabled(True)
     self.process_button.setEnabled(False)
     self.process()
     self.info_message.emit(
         self.tr('Min/Max Deviation = {}'.format(elapsed_time(start))))
def farthest(defect, cnt, contour_centroid):
    if defect is not None and contour_centroid is not None:
        z = defect[:, 0][:, 0]
        cx, cy = contour_centroid

        x = np.array(cnt[z][:, 0][:, 0], dtype=np.float)
        y = np.array(cnt[z][:, 0][:, 1], dtype=np.float)
        xp = cv.pow(cv.subtract(x, cx), 2)
        yp = cv.pow(cv.subtract(y, cy), 2)
        distance = cv.sqrt(cv.add(xp, yp))
        MAX_distanceI = np.argmax(distance)

        if MAX_distanceI < len(z):
            FAR_defect = z[MAX_distanceI]
            FAR_point = tuple(cnt[FAR_defect][0])
            return FAR_point
        else:
            return None
Beispiel #26
0
 def preprocess(self, image):
     image_in = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
     corrected = np.uint8(cv2.pow(image_in / 255.0, 1.4) * 255)
     # scipy.misc.imsave("camera_data/gamma_corrected.jpg", corrected)
     gray = cv2.cvtColor(image_in, cv2.COLOR_RGB2GRAY)
     thresh = cv2.threshold(gray, 0, 255,
                            cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
     # scipy.misc.imsave("camera_data/thresh.jpg", thresh)
     return thresh
Beispiel #27
0
 def save2jpg(self,fout):
     img8 = numpy.zeros((self.width,self.height,3),dtype= numpy.uint8)
     img = cv2.pow(self.img,1/2.2)
     cv2.convertScaleAbs(img,img8,256)
     proxy = cv2.resize(img8, (1280, 720), interpolation=cv2.INTER_AREA)
     #cv2.putText(proxy, "HELLO", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255),2)
     print "saving image: " + fout
     cv2.imwrite(fout, proxy)
     return "saving image: " + fout
Beispiel #28
0
 def _gamma_correction(self, image, gamma):
     '''
     这个是针对相机而言,具体的可以搜索关键词查看,
     简单来说,相机内部等进行图像保存时,会隐式地进行图片的Gamma Encode来
     模拟人眼的亮度检测规律。当我们需要对屏幕上对真实的亮度进行显示时,需要进行
     Gamma correction来消除encode带来的影响
     '''
     np_img = np.uint8(cv2.pow(image / 255., gamma) * 255.)
     return np_img
def multi_frame_differecing(Frames_five):

    Threshold = 180
    height, width = Frames_five[0].shape

    # Which frame is computed
    cur_frame = 2

    # Values especified by the paper
    LAO1 = np.zeros((height, width), np.uint8)
    LAO2 = np.zeros((height, width), np.uint8)
    D = np.zeros((4, height, width), np.float32)
    Dif = np.zeros((4, height, width), np.float32)

    D[0] = Frames_five[cur_frame - 2] - Frames_five[cur_frame]
    D[1] = Frames_five[cur_frame - 1] - Frames_five[cur_frame]
    D[2] = Frames_five[cur_frame + 1] - Frames_five[cur_frame]
    D[3] = Frames_five[cur_frame + 2] - Frames_five[cur_frame]

    Dif[0] = cv.sqrt(cv.pow(D[0], 2))
    Dif[1] = cv.sqrt(cv.pow(D[1], 2))
    Dif[2] = cv.sqrt(cv.pow(D[2], 2))
    Dif[3] = cv.sqrt(cv.pow(D[3], 2))

    Dif[0] = (Dif[0]).astype('uint8')
    Dif[1] = (Dif[1]).astype('uint8')
    Dif[2] = (Dif[2]).astype('uint8')
    Dif[3] = (Dif[3]).astype('uint8')

    ret, Dif[0] = cv.threshold(Dif[0], Threshold, 255, cv.THRESH_BINARY)
    ret, Dif[1] = cv.threshold(Dif[1], Threshold, 255, cv.THRESH_BINARY)
    ret, Dif[2] = cv.threshold(Dif[2], Threshold, 255, cv.THRESH_BINARY)
    ret, Dif[3] = cv.threshold(Dif[3], Threshold, 255, cv.THRESH_BINARY)

    LAO1 = D[1, :, :] * D[2, :, :]
    LAO2 = D[0, :, :] * D[3, :, :]

    MR = np.zeros((height, width), np.uint8)

    MR = cv.bitwise_or(LAO1, LAO2)

    MR = MR.astype('uint8')

    return MR
Beispiel #30
0
    def find_farthest_point(self):
        contour = self.handContour
        defects = self.defects
        centroid = self.handCenterPositions
        s = defects[:, 0][:, 0]
        cx, cy = centroid[0] if len(centroid) > 0 else (0, 0)

        x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
        y = np.array(contour[s][:, 0][:, 1], dtype=np.float)

        xp = cv2.pow(cv2.subtract(x, cx), 2)
        yp = cv2.pow(cv2.subtract(y, cy), 2)
        dist = cv2.sqrt(cv2.add(xp, yp))

        dist_max_i = np.argmax(dist)

        if dist_max_i < len(s):
            farthest_defect = s[dist_max_i]
            self.farthest_point = tuple(contour[farthest_defect][0])
Beispiel #31
0
def AWDS(ref_file, dis_file):
    ref_img = cv2.imread(ref_file, cv2.IMREAD_GRAYSCALE)
    dis_img = cv2.imread(dis_file, cv2.IMREAD_GRAYSCALE)

    size, sigma = (25, 25), 0
    c, a = 0.0025 * 65535, 0.7
    ref_GM_0 = gradMag(ref_img)
    dis_GM_0 = gradMag(dis_img)
    mu1 = cv2.GaussianBlur(ref_GM_0, size, sigma)
    mu2 = cv2.GaussianBlur(dis_GM_0, size, sigma)
    weight_map = cv2.max(mu1, mu2)
    qualityMap = ((2 - a) * ref_GM_0 * dis_GM_0 + c) / \
                  (cv2.pow(ref_GM_0, 2) + cv2.pow(dis_GM_0, 2) - a * ref_GM_0 * dis_GM_0 + c)
    score_fine = cv2.sumElems(
        qualityMap * weight_map)[0] / cv2.sumElems(weight_map)[0]

    c, a = 0.0025 * 65535, -10
    ref_GM_1 = gradMag(cv2.blur(ref_img, (2, 2))[::2, ::2])
    dis_GM_1 = gradMag(cv2.blur(dis_img, (2, 2))[::2, ::2])
    mu1 = cv2.GaussianBlur(ref_GM_1, size, sigma)
    mu2 = cv2.GaussianBlur(dis_GM_1, size, sigma)
    weight_map1 = cv2.max(mu1, mu2)
    qualityMap1 = ((2 - a) * ref_GM_1 * dis_GM_1 + c) / \
                  (cv2.pow(ref_GM_1, 2) + cv2.pow(dis_GM_1, 2) - a * ref_GM_1 * dis_GM_1 + c)
    score_coarse = cv2.sumElems(
        qualityMap1 * weight_map1)[0] / cv2.sumElems(weight_map1)[0]

    def getGDoG(gb_size=(5, 5)):
        grad0 = ref_GM_0
        grad1 = gradMag(cv2.GaussianBlur(ref_img, (5, 5), 0))
        c, a = 0.0025 * 65535, -10
        GDoG = ((2 - a) * grad0 * grad1 + c) / (
            cv2.pow(grad0, 2) + cv2.pow(grad1, 2) - a * grad0 * grad1 + c)
        weight_map = cv2.max(cv2.GaussianBlur(grad0, (5, 5), 0),
                             cv2.GaussianBlur(grad1, (5, 5), 0))
        GDoG = cv2.sumElems(GDoG * weight_map)[0] / cv2.sumElems(weight_map)[0]
        norm_GDoG = sigmoid(2 * (97.49502237 * mean - 90.52996552))
        return norm_GDoG

    GDoG = getGDoG(ref_img)
    mean = score_fine * (1 - GDoG) + GDoG * (score_coarse**4)

    return mean
def gamma_correction(image, gamma):
    """
    image Gamma Correction
    x: source img, array like
    gamma: >1 image darken; <1 image brighten
    """
    img = image / 255.0
    img = cv2.pow(img, gamma) * 255.0
    # img = img.clip(0,255) # 不会超出范围,因为1的幂还是1
    return img.astype(np.uint8)
Beispiel #33
0
def arithmetic():
    img1 = cv2.imread('../data/women.jpg')[0:200, 0:200]
    img2 = cv2.imread('../data/stinkbug.png')[0:200, 0:200]
    add = cv2.add(img1, img2)
    cv2.imshow('add', add)
    diff = cv2.absdiff(img1, img2)
    cv2.imshow('diff', diff)
    power = cv2.pow(img1, 2)
    cv2.imshow('power', power)
    cv2.waitKey(0)
Beispiel #34
0
def GuidedFiltF(img, r):
    eps = 0.04
    I = img
    I2 = cv2.pow(I, 2)
    mean_I = cv2.boxFilter(I, -1, ((2 * r) + 1, (2 * r) + 1))
    mean_I2 = cv2.boxFilter(I2, -1, ((2 * r) + 1, (2 * r) + 1))

    cov_I = mean_I2 - cv2.pow(mean_I, 2)

    var_I = cov_I

    a = cv2.divide(cov_I, var_I + eps)
    b = mean_I - (a * mean_I)

    mean_a = cv2.boxFilter(a, -1, ((2 * r) + 1, (2 * r) + 1))
    mean_b = cv2.boxFilter(b, -1, ((2 * r) + 1, (2 * r) + 1))

    q = (mean_a * I) + mean_b
    return (q)
Beispiel #35
0
    def update(self, gt, pred):

        # color1 = visualize.flow2color(pred[0].asnumpy()[0].transpose(1,2,0))
        # color2 = visualize.flow2color(pred[1].asnumpy()[0].transpose(1,2,0))
        # color3 = visualize.flow2color(pred[2].asnumpy()[0].transpose(1,2,0))

        # visualize.plot(pred[0].asnumpy()[0,0], 'flownet-s2-prediction')
        # visualize.plot(color2, 'flownet-s1-prediction')
        # visualize.plot(color3, 'flownet-c-prediction')
        pred = pred[0].asnumpy()
        gt = gt[0].asnumpy()

        mask = (gt == gt)[:, 0, :, :]
        r = pred - gt
        r = cv2.pow(r, 2)
        r = cv2.pow(r.sum(axis=1), 0.5)

        self.sum_metric += r[mask].sum()
        self.num_inst += mask.sum()
def gamma_correction(ImageName, OUTPUT, sensitivity=0.4):
    image_v = cv2.imread(ImageName)
    image_v = image_v / 255.0
    gamma_corrected_image = cv2.pow(image_v, sensitivity)
    gamma_corrected_image *= 255
    gamma_corrected_image = gamma_corrected_image.astype(np.uint8)
    # cv2.imshow('Original Image',image_v)
    # cv2.imshow('Power Law Transformation',gamma_corrected_image)
    # cv2.waitKey(0)
    cv2.imwrite(OUTPUT, gamma_corrected_image)
Beispiel #37
0
def build_is_hist(img):

    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    hsv = hsv.astype(np.float64)

    fh = np.array([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]])
    fv = fh.conj().T

    [H, S, I] = cv2.split(hsv)

    dIh = convolve(I, np.rot90(fh, 2))
    dIv = convolve(I, np.rot90(fv, 2))
    dIh[dIh == 0] = 0.00001
    dIv[dIv == 0] = 0.00001
    di = np.sqrt(dIh**2 + dIv**2).astype(np.uint32)

    dSh = convolve(S, np.rot90(fh, 2))
    dSv = convolve(S, np.rot90(fv, 2))
    dSh[dSh == 0] = 0.00001
    dSv[dSv == 0] = 0.00001
    ds = cv2.sqrt(cv2.pow(dSh, 2) + cv2.pow(dSv, 2)).astype(np.uint32)

    Imean = convolve(I, np.ones((5, 5)) / 25.0)
    Smean = convolve(S, np.ones((5, 5)) / 25.0)

    print("building Rho corrcoefs")
    rho = pearson(I, S, shape=(3, 3))
    rho[np.isnan(rho)] = 0
    rd = (rho * ds).astype(np.uint32)
    Hist_I = np.zeros((256, 1))
    Hist_S = np.zeros((256, 1))

    # TODO: needs optimizing
    print("building histograms...")
    for n in range(0, 255):
        temp = np.zeros(di.shape)
        temp[I == n] = di[I == n]
        Hist_I[n + 1] = np.sum(temp.flatten('F'))
        temp = np.zeros(di.shape)
        temp[I == n] = rd[I == n]
        Hist_S[n + 1] = np.sum(temp.flatten('F'))

    return Hist_I, Hist_S
Beispiel #38
0
    def power_transformation(self, image):
        info = np.iinfo(
            image.dtype)  # Get the information of the incoming image type
        normalized = image.astype(
            np.float64) / info.max  # normalize the data to 0 - 1

        powered_img = cv2.pow(normalized, 1.2)
        powered_img = 255 * powered_img  # Now scale by 255
        powered_img = powered_img.astype(np.uint8)
        return powered_img
def farthest_point(defects, contour, centroid):
	s = defects[:,0][:,0]
	cx, cy = centroid
	
	x = np.array(contour[s][:,0][:,0], dtype=np.float)
	y = np.array(contour[s][:,0][:,1], dtype=np.float)
				
	xp = cv2.pow(cv2.subtract(x, cx), 2)
	yp = cv2.pow(cv2.subtract(y, cy), 2)
	dist = cv2.sqrt(cv2.add(xp, yp))

	dist_max_i = np.argmax(dist)

	if dist_max_i < len(s):
		farthest_defect = s[dist_max_i]
		farthest_point = tuple(contour[farthest_defect][0])
		return farthest_point
	else:
		return None	
def preprocessing(img):
    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    equal = cv2.equalizeHist(img)
    ret, th = cv2.threshold(equal, 253, 0, cv2.THRESH_TOZERO)

    gamapowder = cv2.pow(th, 80)
    ret, th1 = cv2.threshold(gamapowder, 251, 255, cv2.THRESH_BINARY_INV)
    median = cv2.medianBlur(th1, 3)

    return img, equal, th, gamapowder, th1, median
Beispiel #41
0
    def preprocess(self):
        channel = self.chan_combo.currentIndex()
        if channel == 0:
            img = cv.cvtColor(self.image, cv.COLOR_BGR2GRAY)
        elif channel == 4:
            b, g, r = cv.split(self.image.astype(np.float64))
            img = cv.sqrt(cv.pow(b, 2) + cv.pow(g, 2) + cv.pow(r, 2)).astype(np.uint8)
        else:
            img = self.image[:, :, 3 - channel]

        self.planes = [normalize_mat(cv.bitwise_and(np.full_like(img, 2**b), img), to_bgr=True) for b in range(8)]

        # rows, cols = img.shape
        # bits = 8
        # data = [np.binary_repr(img[i][j], width=bits) for i in range(rows) for j in range(cols)]
        # self.planes = [
        #     (np.array([int(i[b]) for i in data], dtype=np.uint8) * 2 ** (bits - b - 1)).reshape(
        #         (rows, cols)) for b in range(bits)]

        self.process()
Beispiel #42
0
def getGradientImageInfo(I):
	g_x = cv2.Sobel(I, cv.CV_64F, 1,0)
	g_y = cv2.Sobel(I, cv.CV_64F, 0,1) # ksize=3 som **kwargs

	X,Y = I.shape
	orientation = np.zeros(I.shape)
	magnitude = np.zeros(I.shape)
	sq_g_x = cv2.pow(g_x, 2)
	sq_g_y = cv2.pow(g_y, 2)
	fast_magnitude = cv2.pow(sq_g_x + sq_g_y, .5)

	# for x in range(X):
	# 	for y in range(Y):
	# 		orientation[x][y] = np.arctan2(g_y[x][y], g_x[x][y]) * (180 / math.pi)
	# 		magnitude[x][y] = math.sqrt(g_y[x][y] ** 2 + g_x[x][y] ** 2)


	#print fast_magnitude[0]
	#print magnitude[0]

	return fast_magnitude,orientation
Beispiel #43
0
    def gammaCorrection(self, img):
        hsvArray = cv2.split(img)
        val = hsvArray[2]
        correction = 0.2
        inverse_gamma = 1.0 / correction
        val = val / 255.0
        val = cv2.pow(val, inverse_gamma)
        val = np.uint8(val * 255.0)
        hsvArray[2] = val

        img = cv2.merge((hsvArray))
        return img
Beispiel #44
0
def imcv2_recolor(im, a = .1):
	t = [np.random.uniform()]
	t += [np.random.uniform()]
	t += [np.random.uniform()]
	t = np.array(t) * 2. - 1.

	# random amplify each channel
	im = im * (1 + t * a)
	mx = 255. * (1 + a)
	up = np.random.uniform() * 2 - 1
# 	im = np.power(im/mx, 1. + up * .5)
	im = cv2.pow(im/mx, 1. + up * .5)
	return np.array(im * 255., np.uint8)
Beispiel #45
0
 def save2Thumbnail(self,fout):
     Tb = numpy.zeros((256,256,3),dtype= numpy.uint8)
     img8 = numpy.zeros((self.width,self.height,3),dtype= numpy.uint8)
     img = cv2.pow(self.img,1/2.2)
     cv2.convertScaleAbs(img,img8,256)
     big = max(self.width,self.height)
     small = min(self.width,self.height)
     scale = float(big)/float(small)
     newSize =  int(256 / scale)
     thumbnail = cv2.resize(img8, (256,newSize), interpolation=cv2.INTER_AREA)
     h, w = thumbnail.shape[:2]
     s =  int((256-h)/2.0)
     Tb[s:256-s,:,:] = thumbnail[:,:,:]
     cv2.imwrite(fout, Tb)
Beispiel #46
0
def light_normalization(img):
    """
        Normalizes light conditions in the given B&W image
    """
    #Histogram equalization
    img = cv.equalizeHist(img)
    #Gamma correction with factor 0.8 (smaller factors -> more bright)
    img = img/255.0
    img = cv.pow(img,0.8)
    img = np.uint8(img*255)
    img = cv.fastNlMeansDenoising(img,10,10,7,21)
    #Gaussian filter to smooth
    img = cv.GaussianBlur(img,(3,3),0)
    return img
Beispiel #47
0
def generateNewColor(img):
    b,g,r = cv2.split(img)
    c1 = VUtil.toBGR(np.uint8(np.arctan2(r,np.maximum(b,g))*255), 'gray')
    c2 = VUtil.toBGR(np.uint8(np.arctan2(g,np.maximum(r,b))*255), 'gray')
    c3 = VUtil.toBGR(np.uint8(np.arctan2(b,np.maximum(r,g))*255), 'gray')
    denominator = cv2.pow(r-g,2)+cv2.pow(r-b,2)+cv2.pow(g-b,2)
    l1 = VUtil.toBGR(cv2.pow(r-g,2)/denominator, 'gray')
    l2 = VUtil.toBGR(cv2.pow(r-b,2)/denominator, 'gray')
    l3 = VUtil.toBGR(cv2.pow(g-b,2)/denominator, 'gray')
    return np.vstack((np.hstack((c1,c2,c3)),np.hstack((l1,l2,l3))))
Beispiel #48
0
def GammaCorrection(img, correction):
    """            
    Function definition
    +++++++++++++++++++
            
        .. py:function:: GammaCorrection(img, correction)

            Apply gamma correction on input image.
            
            :param uint8 img: grayscale image to be gamma corrected.
            :param float correction: gamma value.
               
            :rtype: uint8 img - two dimensional uint8 numpy array corresponding to gamma corrected image. 
    """
    img = img/255.0
    img = cv2.pow(img, correction)
    return np.uint8(img*255)
Beispiel #49
0
def log_chroma(img): 
    """Log-chromacity"""
    b,g,r = cv2.split(img)
    b = np.float32(b)
    g = np.float32(g)
    r = np.float32(r)
    sum = cv2.pow(b+g+r+0.1, 1/3.0)
    b = b/sum
    g = g/sum
    r = r/sum
    b = cv2.log(b)
    g = cv2.log(g)
    r = cv2.log(r)
    b = cv2.normalize(b,0,255,cv2.NORM_MINMAX)*255
    g = cv2.normalize(g,0,255,cv2.NORM_MINMAX)*255 
    r = cv2.normalize(r,0,255,cv2.NORM_MINMAX)*255 
    out = cv2.merge((np.uint8(b),np.uint8(g),np.uint8(r)))
    return out
Beispiel #50
0
    def levels(self, minv, maxv, gamma=1.0, img=None):
        if img is None:
            img = self._img

        interval = maxv - minv

        _ = None
        if maxv < 255:
            _,img = cv2.threshold(img, maxv, 255, cv2.THRESH_TRUNC)
        if minv > 0:
            _,img = cv2.threshold(img, minv, 255, cv2.THRESH_TOZERO)
        if _ is not None:
            cv2.normalize(img, img, 0, 255, cv2.NORM_MINMAX)
        if gamma != 1.0:
            lut = np.array([i / 255.0 for i in range(256)])
            igamma = 1.0 / gamma
            lut = cv2.pow(lut, igamma) * 255.0
            abs64f = np.absolute(cv2.LUT(img, lut))
            img = np.uint8(abs64f)
        return _, img
Beispiel #51
0
    def test_cudaarithm_arithmetic(self):
        npMat1 = np.random.random((128, 128, 3)) - 0.5
        npMat2 = np.random.random((128, 128, 3)) - 0.5

        cuMat1 = cv.cuda_GpuMat()
        cuMat2 = cv.cuda_GpuMat()
        cuMat1.upload(npMat1)
        cuMat2.upload(npMat2)

        self.assertTrue(np.allclose(cv.cuda.add(cuMat1, cuMat2).download(),
                                         cv.add(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.subtract(cuMat1, cuMat2).download(),
                                         cv.subtract(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.multiply(cuMat1, cuMat2).download(),
                                         cv.multiply(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.divide(cuMat1, cuMat2).download(),
                                         cv.divide(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.absdiff(cuMat1, cuMat2).download(),
                                         cv.absdiff(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE).download(),
                                         cv.compare(npMat1, npMat2, cv.CMP_GE)))

        self.assertTrue(np.allclose(cv.cuda.abs(cuMat1).download(),
                                         np.abs(npMat1)))

        self.assertTrue(np.allclose(cv.cuda.sqrt(cv.cuda.sqr(cuMat1)).download(),
                                    cv.cuda.abs(cuMat1).download()))


        self.assertTrue(np.allclose(cv.cuda.log(cv.cuda.exp(cuMat1)).download(),
                                                            npMat1))

        self.assertTrue(np.allclose(cv.cuda.pow(cuMat1, 2).download(),
                                         cv.pow(npMat1, 2)))
Beispiel #52
0
def gamma_correction():
	cv2.destroyAllWindows()
	global pannelGammaCorrection, path, updated_path, gammaValue
	gamma = float(gammaValue.get())
	img = cv2.imread(updated_path,0)

	pathGammaCorr = 'Gamma Corrected.jpg'
	updated_path = pathGammaCorr

	img = image_resize(img)
	img = img/255.0
	img = cv2.pow(img, (1.0/gamma))
	
	# Write the image
	cv2.imwrite(pathGammaCorr, img*255)

	new = cv2.imread(pathGammaCorr)
	new = Image.fromarray(new)
	new = ImageTk.PhotoImage(new)

	pannelGammaCorrection = Label(image=new)
	pannelGammaCorrection.image = new
	pannelGammaCorrection.grid(row=0, column=2, columnspan=2, rowspan=24, sticky=W+E+N+S, padx=150, pady=10)
Beispiel #53
0
def RMSD(questionID, target, master):
    # Get width, height, and number of channels of the master image
    master_height, master_width = master.shape[:2]
    master_channel = len(master.shape)

    # Get width, height, and number of channels of the target image
    target_height, target_width = target.shape[:2]
    target_channel = len(target.shape)

    # Validate the height, width and channels of the input image
    if (master_height != target_height or master_width != target_width or
            master_channel != target_channel):
        return -1
    else:
        nonZero_target = cv2.countNonZero(target)
        nonZero_master = cv2.countNonZero(master)

        if (questionID == 1):
            if (nonZero_target < 1200000):
                return -1
        elif (questionID == 2):
            if (nonZero_target < 700000):
                return -1
        else:
            return -1

        total_diff = 0.0
        master_channels = cv2.split(master)
        target_channels = cv2.split(target)

        for i in range(0, len(master_channels), 1):
            dst = cv2.absdiff(master_channels[i], target_channels[i])
            dst = cv2.pow(dst, 2)
            mean = cv2.mean(dst)
            total_diff = total_diff + mean[0]**(1 / 2.0)

        return total_diff
Beispiel #54
0
def RMSD(target, master):
    # Note: use grayscale images only

    # Get width, height, and number of channels of the master image
    master_height, master_width = master.shape[:2]
    master_channel = len(master.shape)

    # Get width, height, and number of channels of the target image
    target_height, target_width = target.shape[:2]
    target_channel = len(target.shape)

    # Validate the height, width and channels of the input image
    if (master_height != target_height or master_width != target_width or
            master_channel != target_channel):
        return -1
    else:

        total_diff = 0.0
        dst = cv2.absdiff(master, target)
        dst = cv2.pow(dst, 2)
        mean = cv2.mean(dst)
        total_diff = mean[0]**(1 / 2.0)

        return total_diff
Beispiel #55
0
def random_manipulation(img, manipulation=None):

    if manipulation == None:
        manipulation = np.random.choice(MANIPULATIONS)

    if manipulation.startswith('jpg'):
        quality = int(manipulation[3:])
        out = BytesIO()
        im = Image.fromarray(img)
        im.save(out, format='jpeg', quality=quality)
        im_decoded = jpeg.JPEG(np.frombuffer(out.getvalue(), dtype=np.uint8)).decode()
        del out
        del im
    elif manipulation.startswith('gamma'):
        gamma = float(manipulation[5:])
        # alternatively use skimage.exposure.adjust_gamma
        # img = skimage.exposure.adjust_gamma(img, gamma)
        im_decoded = np.uint8(cv2.pow(img / 255., gamma)*255.)
    elif manipulation.startswith('bicubic'):
        scale = float(manipulation[7:])
        im_decoded = cv2.resize(img,(0,0), fx=scale, fy=scale, interpolation = cv2.INTER_CUBIC)
    else:
        return img
    return im_decoded
Beispiel #56
0
def gamma_correction(img, correction):
    img = img/255.0
    img = cv2.pow(img, correction)
    return np.uint8(img*255)
Beispiel #57
0
print("loading")

import cv2
import numpy as np

img_orig = cv2.imread("Lenna.png")
img_orig = np.double(img_orig) / 255.0

mul = float(raw_input("multiplier (default 1.0) :") or 1.0)
gamma = float(raw_input("gamma (default 1.0):") or 1.0)

img_res = cv2.pow(img_orig, gamma)
img_res = cv2.scaleAdd(img_res, mul - 1.0, img_res)

cv2.imshow("original", img_orig)
cv2.moveWindow("original", 0, 0)
cv2.imshow("result", img_res)
cv2.moveWindow("result", 512, 0)

#cv2.imshow("original, result", np.hstack( (img_orig, img_res) ))
#cv2.moveWindow("original, result", 0, 0)

cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #58
0
def view_superwhite(img):
    threshold = 512/1023 
    img = img * (img > threshold)

    return img

if __name__ == '__main__':

    # tiffのデータをread
    img = cv2.imread(File_name, cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH)

    # 0..1 の範囲に正規化
    normalized_val = 2**(4 * img.dtype.num) - 1
    img             = img/normalized_val

    img = cv2.pow(img, 3.5)
    img = img * normalized_val
    img = np.uint8(img)
    img = img/normalized_val
    img = cv2.pow(img, 1/3.5)

    # 画像のプレビュー
    cv2.imshow('bbb.tif', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    # 出力用に 0..1 → 0..65535 の変換を実施
    out_img = img * normalized_val_uint8
    out_img = np.uint8(out_img)

    # 保存
Beispiel #59
0
def correct_gamma(img, correction):

    temp = img.copy()/255.0
    temp = np.array(temp, dtype=np.float32)
    temp = cv2.pow(temp, 1./correction)*255.0
    return temp