Example #1
0
    def computeSphericalCoordinates(self):
        self.gammaG = 0.33333334
        self.gammaR = 0.33333334
        self.gammaB = 0.33333334
        self.theta = np.arctan((algo.G - self.gammaG) / (algo.R - self.gammaR))
        self.phi = np.arctan(
            (algo.B - self.gammaB) / cv2.sqrt((algo.G - self.gammaG)**2 +
                                              (algo.R - self.gammaR)**2))
        self.radius = cv2.sqrt((algo.B - self.gammaB)**2 +
                               (algo.G - self.gammaG)**2 +
                               (algo.R - self.gammaR)**2)

        self.deltaP = np.hstack(
            (algo.theta.reshape(-1, 1), algo.phi.reshape(-1, 1)))
Example #2
0
def convert_to_nearest_label(label_path, image_size, apply_ignore=True):
    """
    Convert RGB label image to onehot label image
    :param label_path: File path of RGB label image
    :param image_size: Size to resize result image
    :param apply_ignore: Apply ignore
    :return:
    """
    label = np.array(
        Image.open(label_path).resize((image_size[0], image_size[1]),
                                      Image.ANTIALIAS))[:, :, :3]
    label = label.astype(np.float32)
    stacked_label = list()
    for index, mask in enumerate(label_mask):
        length = np.sum(cv2.pow(label - mask, 2), axis=2, keepdims=False)
        length = cv2.sqrt(length)
        stacked_label.append(length)

    stacked_label = np.array(stacked_label)
    stacked_label = np.transpose(stacked_label, [1, 2, 0])
    converted_to_classes = np.argmin(stacked_label, axis=2).astype(np.uint8)
    if apply_ignore:
        ignore_mask = (converted_to_classes == (len(label_mask) - 1)).astype(
            np.uint8)
        ignore_mask *= (256 - len(label_mask))
        converted_to_classes += ignore_mask

    return converted_to_classes
Example #3
0
def Pyramid(img):
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
	YUV = cv2.resize(YUV,(40,40))
	Y,U,V = cv2.split(YUV)
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	img = cv2.resize(YUV,(26,26))
	kernel1 = np.ones((3,1),np.float32)
	kernel2 = np.ones((1,3),np.float32)
	kernel1[0] = -1
	kernel1[1] = 0
	kernel2[0] = [-1,0,1]
	dst = cv2.filter2D(img,cv2.CV_16S,kernel1)
	dstv1 = np.int16(dst)
	dstv2 = cv2.pow(dstv1,2)
	dst = cv2.filter2D(img,cv2.CV_16S,kernel2)
	dsth1 = np.int16(dst)
	dsth2 = cv2.pow(dsth1,2)
	dst1 = dsth2 + dstv2
	dst1 = np.float32(dst1)
	dstfinal = cv2.sqrt(dst1).astype(np.uint8)
	finalh =  dsth1
	finalv = dstv1
	finalm = dstfinal
	UporDown = (finalv > 0 ).astype(int)
	LeftorRight = 2*(finalh > 0).astype(int)
	absh = map(abs, finalh)
	absv = map(abs, finalv)
	absv[:] = [x*1.732 for x in absv]
	absh = np.float32(absh)
	absv = np.float32(absv)
	high = 4*(absv > absh).astype(int)
	out = high + LeftorRight + UporDown
	features = []
	for x in range(6):
		hrt = np.zeros(out.shape[:2],np.uint8)
		features.append(hrt)
	for x in range(out.shape[:2][0]):
		for y in range(out.shape[:2][1]):
			z = out[x][y]
			if z == 4 or z == 6:
#				print "a",z
				features[4][x][y] = finalm[x][y]
			elif z == 5 or z == 7:
				features[5][x][y] = finalm[x][y]
#				print "b",z
			else:
				features[z][x][y] = finalm[x][y]
#				print z
	kernelg1 = 0.125*np.ones((4,4),np.float32)
	kernelg2 = 0.25*np.ones((2,2),np.float32)
	lastFeatures = []	
	for img in features:
		tote = cv2.sumElems(img)
		tote = tote/img.size
		img = img/tote
		print img
		print cv2.sumElems(img)
		print img.size
		lastFeatures.append(img1)
	return lastFeatures
Example #4
0
def Corner(img, THRESHOLD):
    image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.float32)
    image *= 1. / 255

    Ix = cv2.Sobel(image, -1, 1, 0, ksize=3)
    Iy = cv2.Sobel(image, -1, 0, 1, ksize=3)

    Ix2 = Ix**2
    Iy2 = Iy**2
    Ixy = Ix * Iy

    Ix2 = cv2.GaussianBlur(Ix2, (3, 3), 0)
    Iy2 = cv2.GaussianBlur(Iy2, (3, 3), 0)
    Ixy = cv2.GaussianBlur(Ixy, (3, 3), 0)

    determinant = Ix2 * Iy2 - Ixy * Ixy
    trace = Ix2 + Iy2
    response = np.divide(determinant, trace)
    response[np.isnan(response)] = 0
    magnitude = cv2.sqrt(Ix**2 + Iy**2)
    orientations = np.arctan2(Iy, Ix)

    #     response = nonMaxSup(respons)
    keypoints = np.argwhere(response > THRESHOLD)
    keypoints = [
        cv2.KeyPoint(x[1], x[0], response[x[0], x[1]]) for x in keypoints
    ]
    outImage = cv2.drawKeypoints(img, keypoints, img)

    #     keyp = adaptiveNonMaxSup(image, response, 4)

    return keypoints, magnitude, np.rad2deg(orientations), outImage
Example #5
0
def GetFlowGradientMagnitude(flow, img_grad_x, img_grad_y):
    x1, x2 = cv2.split(cv2.Sobel(flow, cv2.CV_64F, 1, 0, ksize=5))
    y1, y2 = cv2.split(cv2.Sobel(flow, cv2.CV_64F, 0, 1, ksize=5))
    flow_grad_x = AbsoluteMaximum([x1, x2])
    flow_grad_y = AbsoluteMaximum([y1, y2])
    flow_gradient_magnitude = cv2.sqrt((flow_grad_x * flow_grad_x) +
                                       (flow_grad_y * flow_grad_y))
    reliability = np.zeros((flow.shape[0], flow.shape[1]))

    for x in tqdm(range(0, flow.shape[0]),
                  leave=True,
                  ascii=True,
                  desc="by one image length"):
        for y in range(1, flow.shape[1]):
            magn = (img_grad_x[x, y] * img_grad_x[x, y]) + (img_grad_y[x, y] *
                                                            img_grad_y[x, y])
            gradient_dir = np.array((img_grad_y[x, y], img_grad_x[x, y]))
            if (np.linalg.norm(gradient_dir) == 0):
                reliability[x, y] = 0
                continue
            gradient_dir = gradient_dir / np.linalg.norm(gradient_dir)
            center_pixel = np.array((x, y))
            p0 = center_pixel + gradient_dir
            p1 = center_pixel - gradient_dir
            if p0[0] < 0 or p1[0] < 0 or p0[1] < 0 or p1[1] < 0 or p0[
                    0] >= flow.shape[0] or p0[1] >= flow.shape[1] or p1[
                        0] >= flow.shape[0] or p1[1] >= flow.shape[1]:
                reliability[x, y] = -1000
                continue
            f0 = flow[int(p0[0]), int(p0[1])].dot(gradient_dir)
            f1 = flow[int(p1[0]), int(p1[1])].dot(gradient_dir)
            reliability[x, y] = f1 - f0

    return flow_gradient_magnitude, reliability
def initial_ETF(input_img, size):
    global flowField
    global refinedETF
    global gradientMag
    
    src = cv2.imread(input_img, COLOUR_OR_GRAY)
    src_n = np.zeros(size, dtype = np.float32)
    src_n = cv2.normalize(src.astype('float32'), None, 0.0, 1.0, cv2.NORM_MINMAX)

    #Generate grad_x and grad_y
    grad_x = []
    grad_y = []
    grad_x = cv2.Sobel(src_n, cv2.CV_32FC1, 1, 0, ksize=5)
    grad_y = cv2.Sobel(src_n, cv2.CV_32FC1, 0, 1, ksize=5)
    
    #Compute gradient
    gradientMag = cv2.sqrt(grad_x**2.0 + grad_y**2.0) 
    gradientMag = cv2.normalize(gradientMag.astype('float32'), None, 0.0, 1.0, cv2.NORM_MINMAX)
    h,w = src.shape[0], src.shape[1]
    for i in range(h):
        for j in range(w):
            u = grad_x[i][j]
            v = grad_y[i][j]
            n = np.array([v, u, 0.0])
            cv2.normalize(np.array([v, u, 0.0]).astype('float32'), n)
            flowField[i][j] = n
    rotateFlow(flowField, flowField, 90.0)
Example #7
0
def sobel(img):
    sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
    sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
    sobel_intensity = cv2.sqrt(
        cv2.addWeighted(cv2.pow(sobelx, 2.0), 1.0, cv2.pow(sobely, 2.0), 1.0,
                        0.0))
    return sobel_intensity
Example #8
0
def farthest_point(defects, contour, centroid):
    if defects is not None and centroid is not None:
        s = defects[:, 0][:, 0]
        cx, cy = centroid

        x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
        y = np.array(contour[s][:, 0][:, 1], dtype=np.float)

        xp = cv2.pow(cv2.subtract(x, cx), 2)
        yp = cv2.pow(cv2.subtract(y, cy), 2)
        dist = cv2.sqrt(cv2.add(xp, yp))

        dist_max_i = np.argmax(dist)

        if dist_max_i < len(s):
            farthest_defect = s[dist_max_i]
            farthest_point = contour[farthest_defect][0]

            # squeeze the farthest point in the direction of the centroid
            # define the percentage of the centroid to squeeze
            pct_sq = 0.05

            if (farthest_point[0] > cx + (cx * pct_sq)):
                farthest_point[0] = farthest_point[0] - cx * pct_sq
            elif (farthest_point[0] < cx - (cx * pct_sq)):
                farthest_point[0] = farthest_point[0] + cx * pct_sq

            if (farthest_point[1] > cy + (cy * pct_sq)):
                farthest_point[1] = farthest_point[1] - cy * pct_sq
            elif (farthest_point[1] < cy - (cy * pct_sq)):
                farthest_point[1] = farthest_point[1] + cy * pct_sq

            return tuple(farthest_point)
        else:
            return None
Example #9
0
    def getCentroidandFingertip(self, contour, frame):
        if contour is not None:
            hull = cv2.convexHull(contour, returnPoints=False)
            self.defects = cv2.convexityDefects(contour, hull)

        moments = cv2.moments(contour)
        if moments['m00'] != 0:
            cx = int(moments['m10'] / moments['m00'])
            cy = int(moments['m01'] / moments['m00'])
            self.centroid = cx, cy
            cv2.circle(frame, self.centroid, 5, [255, 0, 255], -1)

        if self.centroid is not None and self.defects is not None:
            shape = self.defects[:, 0][:, 0]
            cx, cy = self.centroid

            x = np.array(contour[shape][:, 0][:, 0], dtype=np.float)
            y = np.array(contour[shape][:, 0][:, 1], dtype=np.float)

            pointX = cv2.pow(cv2.subtract(x, cx), 2)
            pointY = cv2.pow(cv2.subtract(y, cy), 2)
            distance = cv2.sqrt(cv2.add(pointX, pointY))
            maxDistanceIndex = np.argmax(distance)

            if maxDistanceIndex < len(shape):
                far_defect = shape[maxDistanceIndex]
                far_point = tuple(contour[far_defect][0])
                cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
                return far_point
            else:
                return None
Example #10
0
 def preprocess(self):
     start = time()
     channel = self.chan_combo.currentIndex()
     if channel == 0:
         img = cv.cvtColor(self.image, cv.COLOR_BGR2GRAY)
     elif channel == 4:
         b, g, r = cv.split(self.image.astype(np.float64))
         img = cv.sqrt(cv.pow(b, 2) + cv.pow(g, 2) + cv.pow(r, 2))
     else:
         img = self.image[:, :, 3 - channel]
     kernel = 3
     border = kernel // 2
     shape = (img.shape[0] - kernel + 1, img.shape[1] - kernel + 1, kernel,
              kernel)
     strides = 2 * img.strides
     patches = np.lib.stride_tricks.as_strided(img,
                                               shape=shape,
                                               strides=strides)
     patches = patches.reshape((-1, kernel, kernel))
     mask = np.full((kernel, kernel), 255, dtype=np.uint8)
     mask[border, border] = 0
     output = np.array([self.minmax_dev(patch, mask)
                        for patch in patches]).reshape(shape[:-2])
     output = cv.copyMakeBorder(output, border, border, border, border,
                                cv.BORDER_CONSTANT)
     self.low = output == -1
     self.high = output == +1
     self.process()
     self.info_message.emit(
         self.tr('Min/Max Deviation = {}'.format(elapsed_time(start))))
Example #11
0
 def __calc_gradient_image(self, img, image_depth=cv2.CV_8UC1):
     deriv_img = cv2.GaussianBlur(img, self.__gauss_kernel_size, self.__gauss_sigma)
     sx = cv2.Sobel(deriv_img, image_depth, 1, 0, ksize=self.__ksize_gradient)
     sy = cv2.Sobel(deriv_img, image_depth, 0, 1, ksize=self.__ksize_gradient)
     self.__deriv_img = cv2.sqrt(cv2.add(cv2.pow(sx, 2), cv2.pow(sy, 2)))
     # self.__deriv_img = cv2.Laplacian(deriv_img, image_depth, ksize=self.__ksize_gradient)
     return self.__deriv_img
Example #12
0
def flow2RGB(flow, max_flow_mag=5):
    """ Color-coded visualization of optical flow fields

        # Arguments
            flow: array of shape [:,:,2] containing optical flow
            max_flow_mag: maximal expected flow magnitude used to normalize. If max_flow_mag < 0 the maximal
            magnitude of the optical flow field will be used
    """
    hsv_mat = np.ones(shape=(flow.shape[0], flow.shape[1], 3),
                      dtype=np.float32) * 255
    ee = cv2.sqrt(flow[:, :, 0] * flow[:, :, 0] +
                  flow[:, :, 1] * flow[:, :, 1])
    angle = np.arccos(flow[:, :, 0] / ee)
    angle[flow[:, :, 0] == 0] = 0
    angle[flow[:, :, 1] == 0] = 6.2831853 - angle[flow[:, :, 1] == 0]
    angle = angle * 180 / 3.141
    hsv_mat[:, :, 0] = angle
    if max_flow_mag < 0:
        max_flow_mag = ee.max()
    hsv_mat[:, :, 1] = ee * 220.0 / max_flow_mag
    ret, hsv_mat[:, :, 1] = cv2.threshold(src=hsv_mat[:, :, 1],
                                          maxval=255,
                                          thresh=255,
                                          type=cv2.THRESH_TRUNC)
    rgb_mat = cv2.cvtColor(hsv_mat.astype(np.uint8), cv2.COLOR_HSV2BGR)
    return rgb_mat
Example #13
0
def compute_error(flow, gt_flow, invalid_mask):

    mag_flow = cv2.sqrt(gt_flow[:, :, 0] * gt_flow[:, :, 0] +
                        gt_flow[:, :, 1] * gt_flow[:, :, 1])

    ret, mask_to_large = cv2.threshold(src=mag_flow,
                                       thresh=900,
                                       maxval=1,
                                       type=cv2.THRESH_BINARY_INV)

    total_inp_mask = invalid_mask[:, :,
                                  0] + invalid_mask[:, :,
                                                    1] + invalid_mask[:, :, 2]
    ret, fg_mask = cv2.threshold(src=invalid_mask[:, :, 1],
                                 thresh=0.5,
                                 maxval=1,
                                 type=cv2.THRESH_BINARY)
    ret, total_mask = cv2.threshold(src=total_inp_mask,
                                    thresh=0.5,
                                    maxval=1,
                                    type=cv2.THRESH_BINARY)
    #mask_to_large = np.ones(fg_mask.shape)
    bg_mask = total_mask - fg_mask
    ee_base = computeEE(flow, gt_flow)
    result = dict()
    result["FG"] = computer_errors(ee_base, fg_mask * mask_to_large)
    result["BG"] = computer_errors(ee_base, bg_mask * mask_to_large)
    result["Total"] = computer_errors(ee_base, total_mask * mask_to_large)
    return result
Example #14
0
def findFarPoint(res, cx, cy, defects, max_con):
    try:
        s = defects[:,0][:,0]

        x = np.array(max_con[s][:,0][:,0], dtype = np.float)
        y = np.array(max_con[s][:,0][:,1], dtype = np.float)

        xp = cv.pow(cv.subtract(x, cx), 2)
        yp = cv.pow(cv.subtract(y, cy), 2)
        
        dist = cv.sqrt(cv.add(xp, yp))
        dist_max_i = np.argmax(dist)

        if dist_max_i < len(s):
            farthest_defect = s[dist_max_i]
            farthest_point = tuple(max_con[farthest_defect][0])

        cv.line(res, (cx,cy), farthest_point, (0,255,255), 2)
        
        return farthest_point
        
    except:
        farthest_point = 0
        
        return farthest_point
Example #15
0
def farthest(deflects, contour, center):
    """Megkeresi a középponttól legmesszebb levő convexity deflect-et, 
    ennek koordinátáival tér vissza"""

    if center is not None and deflects is not None:
        # pontok meghatározása
        d = deflects[:, 0][:, 0]
        cx, cy = center
        x = np.array(contour[d][:, 0][:, 0], dtype=np.float)
        y = np.array(contour[d][:, 0][:, 1], dtype=np.float)

        # tavolság számítása
        dx = cv2.pow(cv2.subtract(x, center[0]), 2)
        dy = cv2.pow(cv2.subtract(y, center[1]), 2)
        distance = cv2.sqrt(cv2.add(dx, dy))

        # visszatérés a legmesszebbi ponttal
        max_distance = np.argmax(distance)
        if max_distance < len(d):
            f_deflect = d[max_distance]
            return tuple(contour[d[max_distance]][0])
        else:
            return None
    else:
        return None
def filter_img(target_img, filter_type):
    """
    Performs the given edge detector on the given image

    Arguments:
        `target_img`: The image to detect edges from

        `filter_type`: The filter to be applied to the target image. Can be one
                       of 'canny', 'sobel' or 'none', if the image is to be
                       used as-is.

    Returns:
        An image containing the edges of the target image 
    """
    blurred_img = cv2.GaussianBlur(target_img, (5, 5), 1.4)
    filtered_img = None
    if filter_type == 'none':
        return target_img
    if filter_type == 'canny':
        filtered_img = cv2.Canny(blurred_img, 70, 210)
    else:
        dx = cv2.Sobel(blurred_img, cv2.CV_64F, 1, 0)
        dy = cv2.Sobel(blurred_img, cv2.CV_64F, 0, 1)
        edge_detected = cv2.sqrt(dx * dx + dy * dy)
        filtered_img = cv2.convertScaleAbs(edge_detected)
    return filtered_img
Example #17
0
    def LaplaceofGaussianClicked(self):
        img = cv2.imread('1.jpg')
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_out = cv2.GaussianBlur(gray, (5,5),0)

        x = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
        y = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
        sx = cv2.filter2D(img_out, cv2.CV_64F, x)
        sy = cv2.filter2D(img_out, cv2.CV_64F, y)

        hitung = cv2.sqrt((sx * sx) + (sy * sy))
        h, w = hitung.shape[:2]
        for i in np.arange(h):
            for j in np.arange(w):
                a = hitung.item(i, j)
                if a > 255:
                    a = 255
                elif a < 0:
                    a = 0
                else:
                    a = a

        plt.imshow(hitung, cmap='gray', interpolation='bicubic')
        plt.xticks([]), plt.yticks([])
        print(hitung)
        plt.show()
Example #18
0
def getFurthestPoint(contourDefects, contour, centroid):
    """ get defect point furthest from centroid
    """
    furthestPoint = None

    if contourDefects.any() and all(centroid):  #data-structures have values
        defectsIndices = contourDefects[:,
                                        0][:,
                                           0]  # [all_defects, defectData][all_defects, contour_index_of_defectStartpoint]
        x_bar, y_bar = centroid

        x = np.array(contour[defectsIndices][:, 0][:, 0], dtype=np.float)
        y = np.array(contour[defectsIndices][:, 0][:, 0], dtype=np.float)

        x_sqDev = cv2.pow(cv2.subtract(x, x_bar), 2)
        y_sqDev = cv2.pow(cv2.subtract(y, y_bar), 2)

        distance = cv2.sqrt(cv2.add(x_sqDev, y_sqDev))
        maxDistIndex = np.argmax(distance)

        if maxDistIndex < len(defectsIndices):
            furthestDefect = defectsIndices[maxDistIndex]
            furthestPoint = tuple(contour[furthestDefect][0])

    return furthestPoint
Example #19
0
def create_std_dev_img(img):

    blur1 = cv2.multiply(cv2.blur(img[:,:,0],(3,3)),cv2.blur(img[:,:,0],(3,3)),None,1,cv2.CV_64F)
    blur3 = cv2.multiply(cv2.blur(img[:,:,1],(3,3)),cv2.blur(img[:,:,1],(3,3)),None,1,cv2.CV_64F)
    blur5 = cv2.multiply(cv2.blur(img[:,:,2],(3,3)),cv2.blur(img[:,:,2],(3,3)),None,1,cv2.CV_64F)

    blur2 = cv2.multiply(img[:,:,0],img[:,:,0],0,1,cv2.CV_64F)
    blur2 = cv2.blur(blur2,(3,3))
    blur4 = cv2.multiply(img[:,:,1],img[:,:,1],0,1,cv2.CV_64F)
    blur4 = cv2.blur(blur4,(3,3))
    blur6 = cv2.multiply(img[:,:,2],img[:,:,2],0,1,cv2.CV_64F)
    blur6 = cv2.blur(blur6,(3,3))

    blur_output = cv2.sqrt(cv2.absdiff(blur2,blur1))+cv2.sqrt(cv2.absdiff(blur4,blur3))+cv2.sqrt(cv2.absdiff(blur6,blur5))
    blur_output = np.uint8(blur_output)

    return blur_output
Example #20
0
def std_filter(image, kernel):
    assert (
        type(image) is np.ndarray and image.dtype == np.uint8 and len(image.shape) == 2
    ), "The input image has to be a uint8 2D numpy array."
    assert len(kernel) == 2, "The 'kernel' should be a tuple of 2 integers."
    image = image.astype(np.float)
    image = cv2.sqrt(cv2.blur(image ** 2, kernel) - cv2.blur(image, kernel) ** 2)
    return image
Example #21
0
def compute_DELTAE(imagename1,imagename2):
	img1 = cv2.imread(imagename1)
	img2 = cv2.imread(imagename2)

	
	

	img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2LAB)
	img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2LAB)
	#cv2.imwrite(imagename2+".png",img1)

	# s1 = cv2.absdiff(img1,img2)
	# s1 = np.float32(s1)
	# s1 = cv2.multiply(s1,s1)
	# s1 = cv2.sqrt(s1)

	L1,a1,b1 = cv2.split(img1)
	L2,a2,b2 = cv2.split(img2)
	
	dL = L1 - L2
	da = a1-a2
	db = b1-b2
	# cv2.imwrite(imagename2+".png",dL)
	
	# dL_2 = cv2.multiply(dL,dL)
	# da_2 = cv2.multiply(da,da)
	# db_2 = cv2.multiply(db,db)
	# dL_2 = np.float32(dL_2)
	# da_2 = np.float32(da_2)
	# db_2 = np.float32(db_2)
	# dE = cv2.sqrt( (dL_2) + (da_2) + (db_2))
	# mde = cv2.mean(dE)
	# print mde


	c1 = np.sqrt(cv2.multiply(a1,a1) + cv2.multiply(b1,b1))
	c2 = np.sqrt(cv2.multiply(a2,a2) + cv2.multiply(b2,b2))
	dCab = c1-c2
	dH = np.sqrt(cv2.multiply(da,da) + cv2.multiply(db,db)- cv2.multiply(db,db))
	sL = 1
	K1 = 0.045 #can be changed
	K2 = 0.015 #can be changed
	sC = 1+K1*c1
	sH = 1+K2 *c1
	kL = 1 #can be changed

	t1 = cv2.divide(dL,kL*sL)
	t2 = cv2.divide(dCab,sC)
	t3 = cv2.divide(dH,sH)
	t1 = cv2.multiply(t1,t1)
	t2 = cv2.multiply(t2,t2)
	t3 = cv2.multiply(t3,t3)
	t1 = np.float32(t1)
	t2 = np.float32(t2)
	t3 = np.float32(t3)
	dE = cv2.sqrt(t1+t2+t3)
	mde = cv2.mean(dE)
	return "{0:.4f}".format(mde[0])
Example #22
0
def local_sd(img, ksize=(3,3)):
    fimg = img.astype(np.float)
    # mean
    m = cv.blur(fimg, ksize)
    # mean squared
    m2 = cv.blur(np.multiply(fimg, fimg), ksize)
    # NB numerically instable
    sd = cv.sqrt(m2 - m*m)
    return sd
Example #23
0
def prewitt(image):
    kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
    kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])

    vertical = ndimage.convolve(image.astype('double'), kernelx)
    horizontal = ndimage.convolve(image.astype('double'), kernely)

    return cv2.sqrt(cv2.pow(horizontal, 2) +
                    cv2.pow(vertical, 2)).astype('uint8')
def multi_frame_differecing(Frames_five):

    Threshold = 180
    height, width = Frames_five[0].shape

    # Which frame is computed
    cur_frame = 2

    # Values especified by the paper
    LAO1 = np.zeros((height, width), np.uint8)
    LAO2 = np.zeros((height, width), np.uint8)
    D = np.zeros((4, height, width), np.float32)
    Dif = np.zeros((4, height, width), np.float32)

    D[0] = Frames_five[cur_frame - 2] - Frames_five[cur_frame]
    D[1] = Frames_five[cur_frame - 1] - Frames_five[cur_frame]
    D[2] = Frames_five[cur_frame + 1] - Frames_five[cur_frame]
    D[3] = Frames_five[cur_frame + 2] - Frames_five[cur_frame]

    Dif[0] = cv.sqrt(cv.pow(D[0], 2))
    Dif[1] = cv.sqrt(cv.pow(D[1], 2))
    Dif[2] = cv.sqrt(cv.pow(D[2], 2))
    Dif[3] = cv.sqrt(cv.pow(D[3], 2))

    Dif[0] = (Dif[0]).astype('uint8')
    Dif[1] = (Dif[1]).astype('uint8')
    Dif[2] = (Dif[2]).astype('uint8')
    Dif[3] = (Dif[3]).astype('uint8')

    ret, Dif[0] = cv.threshold(Dif[0], Threshold, 255, cv.THRESH_BINARY)
    ret, Dif[1] = cv.threshold(Dif[1], Threshold, 255, cv.THRESH_BINARY)
    ret, Dif[2] = cv.threshold(Dif[2], Threshold, 255, cv.THRESH_BINARY)
    ret, Dif[3] = cv.threshold(Dif[3], Threshold, 255, cv.THRESH_BINARY)

    LAO1 = D[1, :, :] * D[2, :, :]
    LAO2 = D[0, :, :] * D[3, :, :]

    MR = np.zeros((height, width), np.uint8)

    MR = cv.bitwise_or(LAO1, LAO2)

    MR = MR.astype('uint8')

    return MR
Example #25
0
def localSD(mat, n):
    
    mat=np.float32(mat)
    mu = cv2.blur(mat,(n,n))
    mdiff=mu-mat
    mat2=cv2.blur(np.float64(mdiff*mdiff),(n,n))
    sd = np.float32(cv2.sqrt(mat2))
    sdn=normalize(sd)

    return sdn
Example #26
0
def localSD(mat, n):

    mat = np.float32(mat)
    mu = cv2.blur(mat, (n, n))
    mdiff = mu - mat
    mat2 = cv2.blur(np.float64(mdiff * mdiff), (n, n))
    sd = np.float32(cv2.sqrt(mat2))
    sdn = normalize(sd)

    return sdn
Example #27
0
def roberts(image):
    roberts_cross_v = np.array([[0, 0, 0], [0, 1, 0], [0, 0, -1]])

    roberts_cross_h = np.array([[0, 0, 0], [0, 0, 1], [0, -1, 0]])

    vertical = ndimage.convolve(image.astype('double'), roberts_cross_v)
    horizontal = ndimage.convolve(image.astype('double'), roberts_cross_h)

    return cv2.sqrt(cv2.pow(horizontal, 2) +
                    cv2.pow(vertical, 2)).astype('uint8')
def get_sobel(img, kernel=None):
    sobelx = None
    sobely = None
    if kernel is not None:
        sobelx = cv2.Sobel(img, ddepth=-1, dx=1, dy=0, ksize=kernel)
        sobely = cv2.Sobel(img, ddepth=-1, dx=0, dy=1, ksize=kernel)
    else:
        sobelx = cv2.Sobel(img, ddepth=-1, dx=1, dy=0)
        sobely = cv2.Sobel(img, ddepth=-1, dx=0, dy=1)
    return cv2.sqrt(sobelx**2 + sobely**2)
 def __thresh(self, img, tval=None):
     fram = cv2.absdiff(img, self.meancol).astype(np.float32)
     fram = cv2.multiply(fram, fram)
     fram = cv2.add(fram[:,:,1], fram[:,:,2])
     fram = (cv2.sqrt(fram)/2).astype(np.uint8)
     fram = fram/np.max(fram)
     fram = fram.astype(np.float32)
     fram = np.exp(-fram*10)
     if tval is not None:
         t,fram = cv2.threshold(fram, tval, 1.0, cv2.THRESH_BINARY)
     return fram
Example #30
0
def get_fingertip(defects_start_points, contour, centroid, frame_shape):
    farthest_points = []
    cx = centroid[0]
    cy = centroid[1]  # center point coordinates of hand contour:
    finger_tip_x = 0
    finger_tip_y = 0

    # retrieve the x and y coordinates of the defects start points:
    x = np.array(contour[defects_start_points][:, 0][:, 0], dtype=np.float)
    y = np.array(contour[defects_start_points][:, 0][:, 1], dtype=np.float)

    # calculate the euclidean distance from the centre to start points:
    Xpoints_subtract_Xcenter = cv2.pow(cv2.subtract(x, cx), 2)
    Ypoints_subtract_Ycenter = cv2.pow(cv2.subtract(y, cy), 2)
    distance = cv2.sqrt(
        cv2.add(Xpoints_subtract_Xcenter, Ypoints_subtract_Ycenter))
    max_distance_index = np.argmax(
        distance
    )  # fingertip point locates at the most distance from the center hand palm
    if max_distance_index < len(defects_start_points):
        finger_tip_index = defects_start_points[max_distance_index]
        finger_tip_x = contour[finger_tip_index][0][0]
        finger_tip_y = contour[finger_tip_index][0][1]

    # if fingertip point lies below the hand contour center point:
    if finger_tip_y > cy:
        find_finger_tip = False
        # Find all points that it is a distance from the center equal farthest_x_margin (right/left)
        for index in range(len(x)):
            x_p = int(x[index])
            y_p = int(y[index])
            if (x_p > cx - farthest_x_margin) and (
                    x_p < cx + farthest_x_margin) and (y_p < cy):
                farthest_points.append((x_p, y_p))

        # then find from the farthest points the closest one from the center and lies up to the center of the hand palm.
        for j in range(3):
            closest_x = closest_x_margin + j * 4
            for i in range(len(farthest_points)):
                if (farthest_points[i][0] > cx - closest_x) and (
                        farthest_points[i][0] < cx + closest_x):
                    finger_tip_x = farthest_points[i][0]
                    finger_tip_y = farthest_points[i][1]
                    find_finger_tip = True
                    break
            if find_finger_tip:
                break

    # update the finger tip point coordinate within the detection rectangle coordinates.
    finger_tip_x += int(detection_rec_x_start * frame_shape[1])
    finger_tip_y += int(detection_rec_y_start * frame_shape[0])
    finger_tip = (finger_tip_x, finger_tip_y)
    return finger_tip
Example #31
0
def contours(imgs):
    img, original = imgs

    height, width = img.shape
    mask = np.zeros((height, width), np.uint8)

    # ret, thresh = cv.threshold(img, 127, 255, cv.THRESH_BINARY)
    contours, hierarchy = cv.findContours(img, cv.RETR_CCOMP, 4)
    img = rgb((img, original))
    height, width, channels = img.shape
    # search below this coordinates
    h_max = height * 4 / 5

    # maximum box width with respect to image width (proportion)
    w_max = width * 3 / 4

    selected_boxes = []
    # final_boxes = []

    for cnt in contours:
        rect = cv.minAreaRect(cnt)
        box = cv.boxPoints(rect)
        box = np.int0(box)

        y_max = box[np.argsort(box[:, 1])][-1][1]
        if y_max > h_max:
            x_min = box[np.argsort(box[:, 0])][0][0]
            x_max = box[np.argsort(box[:, 0])][-1][0]
            y_min = box[np.argsort(box[:, 1])][0][1]
            dx = x_max - x_min
            if dx < w_max:
                dy = y_max - y_min
                if dx > 0:
                    d = dy / dx
                    area = dx * dy
                    mask = cv.drawContours(mask, [box], 0, 255, cv.FILLED)
                    masked = cv.bitwise_and(original, original, mask=mask)
                    mean = cv.mean(masked)
                    l = cv.sqrt(mean[0] ** 2 + mean[1] ** 2 + mean[2] ** 2)[0]

                    if 0.05 < d < 0.3 and 1200 < area and l < 50:
                        selected_boxes.append(box)

    # n²: relation between boxes
    # for b0, b1 in [(x, y) for x in box for y in box]:

    return img

    img = original
    for box in selected_boxes:
        img = cv.drawContours(img, [box], 0, (0, 255, 0), 2)

    return img
def get_farthest_point(defects, max_contour, centroid):
    """
        @params
        - defects : each convexity defect is represented as [[start_index, end_index, farthest_pt_index, fixpt_depth]]. Any deviation of the object from this hull can be considered as convexity defect.
        - max_contour : contour that has the biggest area. Ex :  [[[272 233]], [[271 234]]]
        - centroid : centroid of the max contour area
        @returns
        - farthest_point : farthest point from the centroid 
        @description:
            Get the farthest point from the centroid (center of the hand). That point should be the logically the vertice of the index finger.
    """
    if defects is not None and centroid is not None:
        # np.array([[[1,2,3,4]], [[5,6,7,8]], [[9,10,11,12]]])   => x[:, 0][:, 0]  => array([1, 5, 9])
        contours_start_indexes_defects = defects[:, 0][:, 0]
        x_of_contour_points = np.array(
            max_contour[contours_start_indexes_defects][:, 0][:, 0],
            dtype=np.float)
        y_of_contour_points = np.array(
            max_contour[contours_start_indexes_defects][:, 0][:, 1],
            dtype=np.float)

        cx, cy = centroid

        xp = cv.pow(cv.subtract(x_of_contour_points, cx), 2)
        yp = cv.pow(cv.subtract(y_of_contour_points, cy), 2)
        dist = cv.sqrt(cv.add(xp, yp))

        index_max_distance = -1
        not_blocking_counter = 0
        # To focus the index, the position of the point needs to be above the centroid of the hand
        condition_above = False
        while condition_above == False and not_blocking_counter < 5:
            not_blocking_counter += 1
            # index of point from the contour which has the biggest distance from one point of the contour to the centroid of the hand
            index_max_distance = np.argmax(dist)
            # !! Don't change it, this is how it works !!
            if index_max_distance < len(y_of_contour_points):
                if y_of_contour_points[index_max_distance] < cy:
                    condition_above = True
                else:
                    dist[index_max_distance] = -1

        if condition_above == False:
            return None
        else:
            if index_max_distance < len(contours_start_indexes_defects):
                farthest_defect = contours_start_indexes_defects[
                    index_max_distance]
                farthest_point = tuple(max_contour[farthest_defect][0])
                return farthest_point
            else:
                return None
Example #33
0
def logspectrum(frame):
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    h, w = frame.shape[:2]
    realInput = frame.astype(numpy.float64)
    dft_M = cv2.getOptimalDFTSize(w)
    dft_N = cv2.getOptimalDFTSize(h)
    dft_A = numpy.zeros((dft_N, dft_M, 2), dtype=numpy.float64)
    dft_A[:h, :w, 0] = realInput
    cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)
    image_Re, image_Im = cv2.split(dft_A)
    magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0)
    log_spectrum = cv2.log(1.0 + magnitude)
    return log_spectrum
Example #34
0
def pearson(x, y, shape=(3, 3)):
    """Pearson Correlation Coeff filter"""
    mu_x = cv2.blur(x, shape)
    mu_y = cv2.blur(y, shape)
    xy = cv2.multiply(x, y)
    mu_xy = cv2.blur(xy, shape)

    cov = mu_xy - cv2.multiply(mu_x, mu_y)

    mu_x2 = cv2.blur(cv2.multiply(x, x), shape)
    mu_y2 = cv2.blur(cv2.multiply(y, y), shape)

    var_x = mu_x2 - cv2.multiply(mu_x, mu_x)
    var_x = var_x.astype(np.float64)
    sigma_x = cv2.sqrt(var_x)

    var_y = mu_y2 - cv2.multiply(mu_y, mu_y)
    var_y = var_y.astype(np.float64)
    sigma_y = cv2.sqrt(var_y)

    rho = cov / (cv2.multiply(sigma_x, sigma_y))
    return rho
def varianceFilter(raw, isGaussian, ksize, gsigma, absnorm):
    raw64F = np.float64(raw)
    if isGaussian:
        mean = cv2.GaussianBlur(raw64F, (ksize, ksize), gsigma)
        meanSqr = cv2.GaussianBlur(raw64F**2, (ksize, ksize), gsigma)
        variance = cv2.absdiff(meanSqr, mean**2)
        variance = cv2.sqrt(variance)
        if absnorm <= 0:
            varianceNormed = cv2.normalize(variance,
                                           None,
                                           alpha=0,
                                           beta=255,
                                           norm_type=cv2.NORM_MINMAX)
            variance8Bit = np.uint8(varianceNormed)

        else:
            varianceNormed = variance / absnorm
            variance8Bit = np.uint8(varianceNormed)

    else:
        #KPY: this condition doesn't run -> cv2.Blur not in cv2.cv2 module ??!
        mean = cv2.Blur(raw64F, (ksize, ksize))
        meanSqr = cv2.Blur(raw64F**2, (ksize, ksize))
        variance = cv2.absdiff(meanSqr, mean**2)
        variance = cv2.sqrt(variance)
        if absnorm <= 0:
            varianceNormed = cv2.normalize(variance,
                                           None,
                                           alpha=0,
                                           beta=255,
                                           norm_type=cv2.NORM_MINMAX)
            variance8Bit = np.uint8(varianceNormed)

        else:
            varianceNormed = variance / absnorm
            variance8Bit = np.uint8(varianceNormed)

    return variance8Bit
Example #36
0
    def contour_properties(cls):
        # get contour
        image = cv.imread("../lib/images/hand.png", 0)
        ret, image_thre = cv.threshold(image, 0, 255,
                                       cv2.THRESH_BINARY_INV + cv.THRESH_OTSU)
        hand_img_thr, hand_contours, hierarchy = cv.findContours(
            image_thre, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
        contour = hand_contours[0]

        cv.drawContours(image, [contour], -1, 0, 2)
        cv.imshow("Image", image)

        # high-weight rate
        x, y, w, h = cv.boundingRect(contour)
        print "h-w rate:", float(h) / w

        # extent
        cnt_area = cv.contourArea(contour)
        rect_area = w * h
        print "extent:", float(cnt_area) / rect_area
        # solidity
        hull = cv.convexHull(contour)
        hull_area = cv.contourArea(hull)
        print "solidity:", float(cnt_area) / hull_area

        # Equivalent Diameter
        equi_diameter = cv.sqrt(4 * cnt_area / np.pi)

        # Direction
        (x, y), (MA, ma), angle = cv.fitEllipse(contour)
        print "direction:", angle

        # all pixel of contour-mask
        mask = cv.zeros(image.shape[0:2], np.uint8)
        cv.drawContours(mask, [contour], 0, 255, -1)
        pixel_points = cv.transpose(np.array(np.nonzero(mask)))
        print pixel_points

        # max and min pixel location
        min_val, max_val, min_loc, max_loc = cv.minMaxLoc(image, mask=mask)
        print "min value ", min_val, "at", min_loc, "\n", "max value ", max_val, "at", max_loc

        # pole
        leftmost = tuple(contour[contour[:, :, 0].argmin()][0])
        rightmost = tuple(contour[contour[:, :, 0].argmax()][0])
        topmost = tuple(contour[contour[:, :, 1].argmin()][0])
        bottommost = tuple(contour[contour[:, :, 1].argmax()][0])
        print leftmost, rightmost, topmost, bottommost

        cv.waitKey(0)
def get_std_dev_image(image, side = 5):
    # get the standard deviation for each pixel w.r.t. a side x side window

    # work with floats
    imgf = image.astype(np.float32)

    # get the mean
    mean = cv2.blur(imgf, (side, side))

    # get the mean of the squared image
    mean_sq = cv2.blur(cv2.multiply(imgf, imgf), (side, side))

    # std deviation = sqrt( expectation(x^2) - (expecation(x)^2) )
    std = cv2.sqrt(mean_sq - cv2.multiply(mean, mean))

    return std
Example #38
0
def calculate_parameters_from_mat(stack_of_y):
    weight = 1.0 / len(stack_of_y)
    yshape = stack_of_y[0].shape

    mean_y = np.zeros(yshape, dtype=np.float32)
    for y in stack_of_y:
        mean_y = cv2.addWeighted(mean_y, 1.0, y, weight, 0)

    mean_corrected_y = [y - mean_y for y in stack_of_y]

    element_wise_y_squared = [np.float32(y) * y for y in mean_corrected_y]
    mean_corrected_y_squared = np.zeros(yshape, dtype=np.float32)
    for y_squared in element_wise_y_squared:
        mean_corrected_y_squared = cv2.add(mean_corrected_y_squared, y_squared)

    mean_corrected_y_norm = cv2.sqrt(mean_corrected_y_squared)

    return mean_y, mean_corrected_y, mean_corrected_y_squared, mean_corrected_y_norm
def farthest_point(defects, contour, centroid):
	s = defects[:,0][:,0]
	cx, cy = centroid
	
	x = np.array(contour[s][:,0][:,0], dtype=np.float)
	y = np.array(contour[s][:,0][:,1], dtype=np.float)
				
	xp = cv2.pow(cv2.subtract(x, cx), 2)
	yp = cv2.pow(cv2.subtract(y, cy), 2)
	dist = cv2.sqrt(cv2.add(xp, yp))

	dist_max_i = np.argmax(dist)

	if dist_max_i < len(s):
		farthest_defect = s[dist_max_i]
		farthest_point = tuple(contour[farthest_defect][0])
		return farthest_point
	else:
		return None	
    def show_specturm(self, dft_result):
        """
        Show spectrun graph.
        """
        # Split fourier into real and imaginary parts
        image_Re, image_Im = cv2.split(dft_result)

        # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
        magnitude = cv2.sqrt(image_Re ** 2.0 + image_Im ** 2.0)

        # Compute log(1 + Mag)
        log_spectrum = cv2.log(1.0 + magnitude)

        # Rearrange the quadrants of Fourier image so that the origin is at
        # the image center
        # shift_dft(log_spectrum, log_spectrum)

        # normalize and display the results as rgb
        cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.cv.CV_MINMAX)
        # plt.imshow(log_spectrum)
        # plt.show()
        cv2.imshow(self.spectrum_winname, log_spectrum)
Example #41
0
def remove_slp(img, gstd1=GSTD1, gstd2=GSTD2, gstd3=GSTD3, ksize=KSIZE, w=W):
    """Remove the SLP from kinect IR image
    
    The input image should be a float32 numpy array, and should NOT be a square root image
    Parameters
    ------------------
    img : (M, N) float ndarray
            Kinect NIR image with SLP pattern
    gstd1 : float
            Standard deviation of gaussian kernel 1
    gstd2 : float
            Standard deviation of gaussian kernel 2
    gstd3 : float
            Standard deviation of gaussian kernel 3
    ksize : int
            Size of kernel (should be odd)
    w   : float
            Weighting factor

    Returns
    ------------------
    img_noslp : (M,N) float ndarray
            Input image with SLP removed
    """
    gf1 = cv2.getGaussianKernel(ksize, gstd1)
    gf2 = cv2.getGaussianKernel(ksize, gstd2)
    gf3 = cv2.getGaussianKernel(ksize, gstd3)
    sqrtimg = cv2.sqrt(img)
    p1 = cv2.sepFilter2D(sqrtimg, -1, gf1, gf1)
    p2 = cv2.sepFilter2D(sqrtimg, -1, gf2, gf2)
    maxarr = np.maximum(0, (p1 - p2) / p2)
    minarr = np.minimum(w * maxarr, 1)
    p = 1 - minarr
    nc = cv2.sepFilter2D(p, -1, gf3, gf3) + EPS
    output = cv2.sepFilter2D(p*sqrtimg, -1, gf3, gf3)
    output = (output / nc) ** 2 # Since input is sqrted
    
    return output
Example #42
0
def dftSkew(im):

    # convert to grayscale
    # im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    h, w = im.shape[:2]

    realInput = im.astype(np.float64)

    # perform an optimally sized dft
    dft_M = cv2.getOptimalDFTSize(w)
    dft_N = cv2.getOptimalDFTSize(h)

    # copy A to dft_A and pad dft_A with zeros
    dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64)
    dft_A[:h, :w, 0] = realInput

    # no need to pad bottom part of dft_A with zeros because of
    # use of nonzeroRows parameter in cv2.dft()
    cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)

    cv2.imshow("win", im)

    # Split fourier into real and imaginary parts
    image_Re, image_Im = cv2.split(dft_A)

    # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    magnitude = cv2.sqrt(image_Re ** 2.0 + image_Im ** 2.0)

    # Compute log(1 + Mag)
    log_spectrum = cv2.log(1.0 + magnitude)

    # Rearrange the quadrants of Fourier image so that the origin is at
    # the image center
    shift_dft(log_spectrum, log_spectrum)

    # normalize and display the results as rgb
    cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX)
    magMat = log_spectrum * 255
    magMat = np.uint8(np.around(magMat))
    cv2.imwrite("dft.png", magMat)
    rows = h
    cols = w
    #    //imwrite("imageText_mag.jpg",magImg);

    #    //Turn into binary image
    (_, magImg) = cv2.threshold(magMat, 160, 255, cv2.THRESH_BINARY)
    cv2.imwrite("dft1.png", magImg)
    #    //imwrite("imageText_bin.jpg",magImg);

    #    //Find lines with Hough Transformation
    pi180 = np.pi / 180
    linImg = np.zeros(magImg.shape)
    lines = cv2.HoughLines(magImg, 1, pi180, 100, 0, 0)
    print lines
    for line in lines[0]:
        rho = line[0]
        theta = line[1]
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a * rho
        y0 = b * rho
        pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
        pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
        cv2.line(linImg, pt1, pt2, (255), 1)
    cv2.imwrite("dlines.png", linImg)
    #    //imwrite("imageText_line.jpg",linImg);
    #    if(lines.size() == 3){
    #        cout << "found three angels:" << endl;
    #            cout << lines[0][1]*180/CV_PI << endl << lines[1][1]*180/CV_PI << endl << lines[2][1]*180/CV_PI << endl << endl;
    #    }

    #    //Find the proper angel from the three found angels
    angel = 0
    piThresh = np.pi / 90
    pi2 = np.pi / 2
    for line in lines[0]:
        theta = line[1]
        if abs(theta) < piThresh or abs(theta - pi2) < piThresh:
            continue
        else:
            angel = theta
            break

    #    //Calculate the rotation angel
    #    //The image has to be square,
    #    //so that the rotation angel can be calculate right
    if angel < pi2:
        angel = angel
    else:
        angel = angel - np.pi

    if angel != pi2:
        angelT = rows * tan(angel) / cols
        angel = np.arctan(angelT)
    angelD = angel * 180 / np.pi

    # Rotate the image to recover
    rotMat = cv2.getRotationMatrix2D((cols / 2, rows / 2), angelD, 1.0)
    dstImg = cv2.warpAffine(im, rotMat, (cols, rows))
    cv2.imwrite("dresult.png", dstImg)
Example #43
0
__author__ = 'phillg07'

import cv2
import numpy as np

img = cv2.imread('/Users/phillg07/Pictures/tennis2.jpg', 0).astype(float)

mu = cv2.GaussianBlur(img, (41, 41), 20)
mu_2 = cv2.multiply(mu, mu)
mu_2g = cv2.GaussianBlur(mu_2, (41, 41), 20)
mu_sub = cv2.subtract(mu_2g, mu_2)
sigma = cv2.sqrt(mu_sub)

cv2.imshow('image', sigma)
cv2.waitKey(0)
cv2.destroyAllWindows()
	def distance_points(self, start, end):
		return cv2.sqrt(cv2.add(cv2.pow(int(start[0]-end[0]),2),cv2.pow(int(start[1]-end[1]),2)))[0][0]
Example #45
0
import cv2
import numpy as np
#import hist

img = cv2.imread('sofseam.jpg',0)
im = cv2.imread('sofseam.jpg')
rows,cols = img.shape

dx = cv2.Sobel(img,cv2.CV_32F,1,0)
dy = cv2.Sobel(img,cv2.CV_32F,0,1)
dz = np.zeros(img.shape,np.float32)
dx2 = cv2.accumulateSquare(dx,dz)
dy2 = cv2.accumulateSquare(dy,dz)

lap = cv2.sqrt(dz)
#lap = cv2.convertScaleAbs(lap)

t = np.zeros(img.shape,np.float32)
t[0,:] = lap[0,:]

for r in xrange(1,rows):
    for c in xrange(0,cols):
        i = lap.item(r,c)
        
        if c==0:
            j = min(t[r-1,c:c+2])
        elif c==cols-1:
            j = min(t[r-1,c-1:c+1])
        else:
            j = min(t[r-1,c-1:c+2])
Example #46
0
def ReducedRun(img):
	img = cv2.resize(img,(40,40))
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
	Y,U,V = cv2.split(YUV)
	r,g,b = cv2.split(img)
	kernel1 = np.ones((3,1),np.float32)
	kernel2 = np.ones((1,3),np.float32)
	kernel1[0] = -1
	kernel1[1] = 0
	kernel2[0] = [-1,0,1]
	dst = cv2.filter2D(img,cv2.CV_16S,kernel1)
	rv,gv,bv = cv2.split(dst)
	rv1 = np.int16(rv)
	gv1 = np.float32(gv)
	bv1 = np.float32(bv)
	rv1 = cv2.pow(rv1,2)
	gv1 = cv2.pow(gv1,2)
	bv1 = cv2.pow(bv1,2)
	dst = cv2.filter2D(img,cv2.CV_16S,kernel2)
	rh,gh,bh = cv2.split(dst)
	rh1 = np.int16(rh)
	gh1 = np.float32(gh)
	bh1 = np.float32(bh)
	rh1 = cv2.pow(rh1,2)
	gh1 = cv2.pow(gh1,2)
	bh1 = cv2.pow(bh1,2)
	r1 = rh1 + rv1
	g1 = gh1 + gv1
	b1 = bh1 + bv1
	r1 = np.float32(r1)
	g1 = np.float32(g1)
	b1 = np.float32(b1)
	rfinal = cv2.sqrt(r1).astype(np.uint8)
	bfinal = cv2.sqrt(g1).astype(np.uint8)
	gfinal = cv2.sqrt(b1).astype(np.uint8)
	red = (rfinal > bfinal) & (rfinal > gfinal)
	red = red.astype(np.uint8)
	blue = (bfinal > rfinal) & (bfinal > gfinal)
	blue = blue.astype(np.uint8)
	green = (gfinal > bfinal) & (gfinal > rfinal)
	green = green.astype(np.uint8)
	redfh = red*rh
	bluefh = blue*bh
	greenfh = green*gh
	redfv = red*rv
	bluefv = blue*bv
	greenfv = green*gv
	redm = red*rfinal
	bluem = blue*bfinal
	greenm = green*gfinal
	finalh =  redfh + bluefh + greenfh
	finalv = redfv + bluefv + greenfv
	finalm = redm + bluem + greenm
	UporDown = (finalv > 0 ).astype(int)
	LeftorRight = 2*(finalh > 0).astype(int)
	absh = map(abs, finalh)
	absv = map(abs, finalv)
	absv[:] = [x*1.732 for x in absv]
	absh = np.float32(absh)
	absv = np.float32(absv)
	high = 4*(absv > absh).astype(int)
	out = high + LeftorRight + UporDown
	features = []
	for x in range(6):
		hrt = np.zeros(out.shape[:2],np.uint8)
		features.append(hrt)
	for x in range(out.shape[:2][0]):
		for y in range(out.shape[:2][1]):
			z = out[x][y]
			if z == 4 or z == 6:
				features[4][x][y] = finalm[x][y]
			elif z == 5 or z == 7:
				features[5][x][y] = finalm[x][y]
			else:
				features[z][x][y] = finalm[x][y]
	lastFeatures = []	
	kernelg = np.ones((5,5),np.float32)
	for img in features:
		img1 =  cv2.filter2D(img,-1,kernelg)
		lastFeatures.append(img1)
	lastFeatures.append(finalm)
	lastFeatures.append(Y)
	lastFeatures.append(U)
	lastFeatures.append(V)

	return lastFeatures
	def describe(self, image):
		#Convert to graysacale
		im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
		features = []
		h, w = im.shape[:2]

		realInput = im.astype(np.float64)

		# perform an optimally sized dft
		dft_M = cv2.getOptimalDFTSize(w)
		dft_N = cv2.getOptimalDFTSize(h)

		# copy A to dft_A and pad dft_A with zeros
		dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64)
		dft_A[:h, :w, 0] = realInput

		# no need to pad bottom part of dft_A with zeros because of
		# use of nonzeroRows parameter in cv2.dft()
		cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)

		# Split fourier into real and imaginary parts
		image_Re, image_Im = cv2.split(dft_A)

		# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
		magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0)

		# Compute log(1 + Mag)
		log_spectrum = cv2.log(1.0 + magnitude)

		# Rearrange the quadrants of Fourier image so that the origin is at
		# the image center
		self.shift_dft(log_spectrum, log_spectrum)

		# normalize and display the results as rgb
		cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX)

		h, w = log_spectrum.shape[:2]
		
		#Calcula media com uma mascara de 3x3		
		if(self.maskSize == 3):
			i_h = 1
			while i_h < h:
				i_w = 1
				while i_w < w:
					s = log_spectrum[i_h-1, i_w-1] + log_spectrum[i_h-1, i_w] + log_spectrum[i_h-1, i_w+1] + log_spectrum[i_h, i_w-1] + log_spectrum[i_h, i_w] + log_spectrum[i_h, i_w+1] + log_spectrum[i_h+1, i_w-1] + log_spectrum[i_h+1, i_w] + log_spectrum[i_h+1, i_w+1]
					p = s / 9
					features.append(p)
					i_w += 3
				i_h += 3
		
		#Calcula media com uma mascara de 5x5
		elif(self.maskSize == 5):
			i_h = 2
			while i_h < h:
				i_w = 2
				while i_w < w:
					s = log_spectrum[i_h-2, i_w-2] + log_spectrum[i_h-2, i_w-1] + log_spectrum[i_h-2, i_w+1] + log_spectrum[i_h-2, i_w] + log_spectrum[i_h-2, i_w+1] + log_spectrum[i_h-2, i_w+2]
					s += log_spectrum[i_h-1, i_w-2] + log_spectrum[i_h-1, i_w-1] + log_spectrum[i_h-1, i_w+1] + log_spectrum[i_h-1, i_w] + log_spectrum[i_h-1, i_w+1] + log_spectrum[i_h-1, i_w+2]
					s += log_spectrum[i_h, i_w-2] + log_spectrum[i_h, i_w-1] + log_spectrum[i_h, i_w+1] + log_spectrum[i_h, i_w] + log_spectrum[i_h, i_w+1] + log_spectrum[i_h, i_w+2]
					s += log_spectrum[i_h+1, i_w-2] + log_spectrum[i_h+1, i_w-1] + log_spectrum[i_h+1, i_w+1] + log_spectrum[i_h+1, i_w] + log_spectrum[i_h+1, i_w+1] + log_spectrum[i_h+1, i_w+2]
					s += log_spectrum[i_h+2, i_w-2] + log_spectrum[i_h+2, i_w-1] + log_spectrum[i_h+2, i_w+1] + log_spectrum[i_h+2, i_w] + log_spectrum[i_h+2, i_w+1] + log_spectrum[i_h+2, i_w+2]
					p = s / 25
					features.append(p)
					i_w += 5
				i_h += 5
		#Calcula media com uma mascara de 7x7
		elif(self.maskSize == 7):
			i_h = 3
			while i_h < h - 7:
				i_w = 3
				while i_w < w - 7:
					s = log_spectrum[i_h-3, i_w-3] + log_spectrum[i_h-3, i_w-2] + log_spectrum[i_h-3, i_w-1] + log_spectrum[i_h-3, i_w] + log_spectrum[i_h-3, i_w+1] + log_spectrum[i_h-3, i_w+2] + log_spectrum[i_h-3, i_w+3]
					s += log_spectrum[i_h-2, i_w-3] + log_spectrum[i_h-2, i_w-2] + log_spectrum[i_h-2, i_w-1] + log_spectrum[i_h-2, i_w] + log_spectrum[i_h-2, i_w+1] + log_spectrum[i_h-2, i_w+2] + log_spectrum[i_h-2, i_w+3]
					s += log_spectrum[i_h-1, i_w-3] + log_spectrum[i_h-1, i_w-2] + log_spectrum[i_h-1, i_w-1] + log_spectrum[i_h-1, i_w] + log_spectrum[i_h-1, i_w+1] + log_spectrum[i_h-1, i_w+2] + log_spectrum[i_h-1, i_w+3]
					s += log_spectrum[i_h, i_w-3] + log_spectrum[i_h, i_w-2] + log_spectrum[i_h, i_w-1] + log_spectrum[i_h, i_w] + log_spectrum[i_h, i_w+1] + log_spectrum[i_h, i_w+2] + log_spectrum[i_h, i_w+3]
					s += log_spectrum[i_h+1, i_w-3] + log_spectrum[i_h+1, i_w-2] + log_spectrum[i_h+1, i_w-1] + log_spectrum[i_h+1, i_w] + log_spectrum[i_h+1, i_w+1] + log_spectrum[i_h+1, i_w+2] + log_spectrum[i_h+1, i_w+3]
					s += log_spectrum[i_h+2, i_w-3] + log_spectrum[i_h+2, i_w-2] + log_spectrum[i_h+2, i_w-1] + log_spectrum[i_h+2, i_w] + log_spectrum[i_h+2, i_w+1] + log_spectrum[i_h+2, i_w+2] + log_spectrum[i_h+2, i_w+3]
					s += log_spectrum[i_h+3, i_w-3] + log_spectrum[i_h+3, i_w-2] + log_spectrum[i_h+3, i_w-1] + log_spectrum[i_h+3, i_w] + log_spectrum[i_h+3, i_w+1] + log_spectrum[i_h+3, i_w+2] + log_spectrum[i_h+3, i_w+3]
					p = s / 49
					features.append(p)
					i_w += 7
				i_h += 7

		return features
    fgmask = cv2.medianBlur(fgmask, 7)
    oldFgmask = fgmask.copy()
    image, contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_EXTERNAL,1)
    for contour in contours:
        x,y,w,h = cv2.boundingRect(contour)
        if w>40 and h>90:
            cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2, lineType=cv2.LINE_AA)
            point = (int(x+w/2.0), int(y+h/2.0))
            points.add(point)
    for point in points:
        (xnew, ynew) = point
        if line1(xnew, ynew) > 0 and line2(xnew, ynew) < 0:
            pointInMiddle.add(point)
        for prevPoint in prev:
            (xold, yold) = prevPoint
            dist = cv2.sqrt((xnew-xold)*(xnew-xold)+(ynew-yold)*(ynew-yold))
            if dist[0] <= 120:
                if line1(xnew, ynew) >= 0 and line2(xnew, ynew) <= 0:
                    if line1(xold, yold) < 0: # Point entered from line above
                        pointFromAbove.add(point)
                    elif line2(xold, yold) > 0: # Point entered from line below
                        pointFromBelow.add(point)
                    else:   # Point was inside the block
                        if prevPoint in pointFromBelow:
                            pointFromBelow.remove(prevPoint)
                            pointFromBelow.add(point)

                        elif prevPoint in pointFromAbove:
                            pointFromAbove.remove(prevPoint)
                            pointFromAbove.add(point)
Example #49
0
def RunG(img):
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
	YUV = cv2.resize(YUV,(26,26))
	Y,U,V = cv2.split(YUV)
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	img = cv2.resize(YUV,(26,26))
	kernel1 = np.ones((3,1),np.float32)
	kernel2 = np.ones((1,3),np.float32)
	kernel1[0] = -1
	kernel1[1] = 0
	kernel2[0] = [-1,0,1]
	dst = cv2.filter2D(img,cv2.CV_16S,kernel1)
	dstv1 = np.int16(dst)
	dstv2 = cv2.pow(dstv1,2)
	dst = cv2.filter2D(img,cv2.CV_16S,kernel2)
	dsth1 = np.int16(dst)
	dsth2 = cv2.pow(dsth1,2)
	dst1 = dsth2 + dstv2
	dst1 = np.float32(dst1)
	dstfinal = cv2.sqrt(dst1).astype(np.uint8)
	finalh =  dsth1
	finalv = dstv1
	finalm = dstfinal
	UporDown = (finalv > 0 ).astype(int)
	LeftorRight = 2*(finalh > 0).astype(int)
	absh = map(abs, finalh)
	absv = map(abs, finalv)
	absv[:] = [x*1.732 for x in absv]
	absh = np.float32(absh)
	absv = np.float32(absv)
	high = 4*(absv > absh).astype(int)
	out = high + LeftorRight + UporDown
	features = []
	for x in range(6):
		hrt = np.zeros(out.shape[:2],np.uint8)
		features.append(hrt)
	kernelg = np.ones((6,6),np.float32)
	for x in range(out.shape[:2][0]):
		for y in range(out.shape[:2][1]):
			z = out[x][y]
			if z == 4 or z == 6:
#				print "a",z
				features[4][x][y] = finalm[x][y]
			elif z == 5 or z == 7:
				features[5][x][y] = finalm[x][y]
#				print "b",z
			else:
				features[z][x][y] = finalm[x][y]
#				print z
	lastFeatures = []	
	for img in features:
		img1 =  cv2.filter2D(img,-1,kernelg)
		lastFeatures.append(img1)
	lastFeatures.append(finalm)
	lastFeatures.append(Y)
	lastFeatures.append(U)
	lastFeatures.append(V)

	integrals = []
	for img in lastFeatures:
		integ = cv2.integral(img)
		integrals.append(integ)
	print  integ.shape[:2]
	height, width = integ.shape[:2]
	a = readFeatures(26,26)
	results = []
	results = Evaluate(a, integrals,0,0)
	return results
Example #50
0
    # copy A to dft_A and pad dft_A with zeros
    dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64)
    dft_A[:h, :w, 0] = realInput

    # no need to pad bottom part of dft_A with zeros because of
    # use of nonzeroRows parameter in cv2.dft()
    cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)

    cv2.imshow("win", im)

    # Split fourier into real and imaginary parts
    image_Re, image_Im = cv2.split(dft_A)

    # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0)

    # Compute log(1 + Mag)
    log_spectrum = cv2.log(1.0 + magnitude)

    # Rearrange the quadrants of Fourier image so that the origin is at
    # the image center
    shift_dft(log_spectrum, log_spectrum)

    # normalize and display the results as rgb
    cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX)
    cv2.imshow("magnitude", log_spectrum)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #51
0
def find_hand_farthest_point(frame, hist, bill_center):
    MAX_DISTANCE_FROM_CENTER = 200
    
    hand_isolated_frame = hist_filter(frame, hist)

    gray = cv2.cvtColor(hand_isolated_frame, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray, 0, 255, 0)
    #_,contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    if contours is not None and len(contours) > 0:
        max_i = 0
        max_area = 0

        for i in range(len(contours)):
            cnt = contours[i]
            area = cv2.contourArea(cnt)
            if area > max_area:
                max_area = area
                max_i = i

        largest_contour = contours[max_i]


        hull = cv2.convexHull(largest_contour)

        moments = cv2.moments(largest_contour)
        centroid = None
        if moments['m00'] != 0:
            cx = int(moments['m10']/moments['m00'])
            cy = int(moments['m01']/moments['m00'])
            centroid = (cx,cy)

        defects = None
        non_returnpoints_hull = cv2.convexHull(largest_contour, returnPoints=False)
        if non_returnpoints_hull is not None and len(non_returnpoints_hull) > 3 and len(largest_contour) > 3:
            defects = cv2.convexityDefects(largest_contour, non_returnpoints_hull)

        if centroid is not None and defects is not None and len(defects) > 0:
            s = defects[:,0][:,0]
            cx, cy = centroid

            x = np.array(largest_contour[s][:,0][:,0], dtype=np.float)
            y = np.array(largest_contour[s][:,0][:,1], dtype=np.float)

            to_delete = []
            for i in range(len(x)):
                if (x[i] - bill_center[0]) * (x[i] - bill_center[0])\
                + (y[i] - bill_center[1]) * (y[i] - bill_center[1])\
                > MAX_DISTANCE_FROM_CENTER * MAX_DISTANCE_FROM_CENTER:
                    to_delete.append(i)
            x = np.delete(x, to_delete)
            y = np.delete(y, to_delete)

            if len(x) > 0:
                xp = cv2.pow(cv2.subtract(x, cx), 2)
                yp = cv2.pow(cv2.subtract(y, cy), 2)
                dist = cv2.sqrt(cv2.add(xp, yp))

                dist_max_i = np.argmax(dist)

                if dist_max_i < len(s):
                    farthest_defect = s[dist_max_i]
                    farthest_point = tuple(largest_contour[farthest_defect][0])
                
                    for cnt in contours:
                        if cnt is not largest_contour:
                            cv2.drawContours(hand_isolated_frame, cnt, -1, (255,0,0), 3)
                        else:
                            cv2.drawContours(hand_isolated_frame, cnt, -1, (0,255,0), 3)

                        cv2.circle(hand_isolated_frame, centroid, 5, [0,255,0], -1)
                        cv2.circle(hand_isolated_frame, farthest_point, 5, [0,0,255], -1)
                        cv2.imshow("hand", hand_isolated_frame)
                        return dist[dist_max_i], farthest_point, hand_isolated_frame
            
    return None, None, None