Beispiel #1
0
def process_frame(frame, center_ratio, marker_det, scale):
    alpha = None
    gamma = None
    pt_head = None
    pt_nose = None

    undist_orig = cvt.undo_distortion(frame)
    undist = cv2.resize(undist_orig,
                        (int(frame.shape[1] / 3), int(frame.shape[0] / 3)))

    center = (int(center_ratio[0] * undist.shape[1]),
              int((center_ratio[1] * undist.shape[0])))

    # pt_target is in the scaled frame coordinates, while marker coordinates are in full frame
    pt_target, theta, pt_rod, rod_centroid = find_target_position.get_target_position(
        undist, center, scale / 3, 1.0)
    pt_head, pt_nose = marker_det.detect_marker(frame)

    cv2.circle(undist, (int(rod_centroid[0]), int(rod_centroid[1])), 2,
               (0, 255, 255), 4)
    cv2.putText(undist, str(theta), (int(center[0]), int(center[1])),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))

    if pt_nose is not None:
        pt_head = np.asarray([pt_head[0] / 3, pt_head[1] / 3])
        pt_nose = np.asarray([pt_nose[0] / 3, pt_nose[1] / 3])

        cv2.line(undist, (int(pt_head[0]), int(pt_head[1])),
                 (int(pt_target[0]), int(pt_target[1])), (0, 200, 50), 2,
                 cv2.LINE_AA)
        cv2.arrowedLine(undist, (int(pt_head[0]), int(pt_head[1])),
                        (int(pt_nose[0]), int(pt_nose[1])), (0, 255, 255), 2,
                        cv2.LINE_AA)
        cv2.arrowedLine(undist, (int(center[0]), int(center[1])),
                        (int(pt_target[0]), int(pt_target[1])),
                        (255, 255, 255), 2, cv2.LINE_AA)

        head_pose_vec = pt_nose - pt_head
        gt_vec = pt_head - pt_target
        dot = -gt_vec[0] / cv2.norm(gt_vec)
        # alpha = acos(dot) * 180 / 3.14
        alpha = cv2.fastAtan2(gt_vec[1], -gt_vec[0])
        dot = head_pose_vec[0] / cv2.norm(head_pose_vec)
        # gamma = acos(dot) * 180 / 3.14
        gamma = cv2.fastAtan2(-head_pose_vec[1], head_pose_vec[0])
        cv2.putText(undist, str(alpha), (int(pt_head[0]), int(pt_head[1])),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 200, 50))
        cv2.putText(undist, str(gamma), (int(pt_nose[0]), int(pt_nose[1])),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))

    cv2.circle(undist, center, 5, (0, 255, 0), 6)

    return undist, alpha, gamma, theta, pt_target, pt_head, pt_nose
Beispiel #2
0
def orientation(img, block_size, smooth=False):
    h, w = img.shape

    # make a reflect border frame to simplify kernel operation on borders
    borderedImg = cv2.copyMakeBorder(img, block_size, block_size, block_size,
                                     block_size, cv2.BORDER_DEFAULT)

    # apply a gradient in both axis
    sobelx = cv2.Sobel(borderedImg, cv2.CV_64F, 1, 0, ksize=3)
    sobely = cv2.Sobel(borderedImg, cv2.CV_64F, 0, 1, ksize=3)

    angles = np.zeros((h / block_size, w / block_size), np.float32)

    for i in xrange(w / block_size):
        for j in xrange(h / block_size):
            nominator = 0.
            denominator = 0.

            # calculate the summation of nominator (2*Gx*Gy)
            # and denominator (Gx^2 - Gy^2), where Gx and Gy
            # are the gradient values in the position (j, i)
            for k in xrange(block_size):
                for l in xrange(block_size):
                    posX = block_size - 1 + (i * block_size) + k
                    posY = block_size - 1 + (j * block_size) + l
                    valX = sobelx.item(posY, posX)
                    valY = sobely.item(posY, posX)

                    nominator += f(valX, valY)
                    denominator += g(valX, valY)

            # if the strength (norm) of the vector
            # is not greater than a threshold
            if math.sqrt(nominator**2 + denominator**2) < 1000000:
                angle = 0.
            else:
                if denominator >= 0:
                    angle = cv2.fastAtan2(nominator, denominator)
                elif denominator < 0 and nominator >= 0:
                    angle = cv2.fastAtan2(nominator, denominator) + math.pi
                else:
                    angle = cv2.fastAtan2(nominator, denominator) - math.pi
                angle /= float(2)

            angles.itemset((j, i), angle)

    if smooth:
        angles = cv2.GaussianBlur(angles, (3, 3), 0, 0)
    return angles
Beispiel #3
0
def orientation(img, block_size, smooth=False):
    h, w = img.shape

    # make a reflect border frame to simplify kernel operation on borders
    borderedImg = cv2.copyMakeBorder(img, block_size,block_size,block_size,block_size, cv2.BORDER_DEFAULT)

    # apply a gradient in both axis
    sobelx = cv2.Sobel(borderedImg, cv2.CV_64F, 1, 0, ksize=3)
    sobely = cv2.Sobel(borderedImg, cv2.CV_64F, 0, 1, ksize=3)

    angles = np.zeros((h/block_size, w/block_size), np.float32)

    for i in xrange(w/block_size):
        for j in xrange(h/block_size):
            nominator = 0.
            denominator = 0.

            # calculate the summation of nominator (2*Gx*Gy)
            # and denominator (Gx^2 - Gy^2), where Gx and Gy
            # are the gradient values in the position (j, i)
            for k in xrange(block_size):
                for l in xrange(block_size):
                    posX = block_size-1 + (i*block_size) + k
                    posY = block_size-1 + (j*block_size) + l
                    valX = sobelx.item(posY, posX)
                    valY = sobely.item(posY, posX)

                    nominator += f(valX, valY)
                    denominator += g(valX, valY)
            
            # if the strength (norm) of the vector 
            # is not greater than a threshold
            if math.sqrt(nominator**2 + denominator**2) < 1000000:
                angle = 0.
            else:
                if denominator >= 0:
                    angle = cv2.fastAtan2(nominator, denominator)
                elif denominator < 0 and nominator >= 0:
                    angle = cv2.fastAtan2(nominator, denominator) + math.pi
                else:
                    angle = cv2.fastAtan2(nominator, denominator) - math.pi
                angle /= float(2)

            angles.itemset((j, i), angle)
    
    if smooth:
        angles = cv2.GaussianBlur(angles, (3,3), 0, 0)
    return angles
def draw_arrow(im, x_from, y_from, x_to, y_to, thick_line):
    if x_from == x_to and y_from == y_to:
        return im
    if x_from < x_to:
        y_from, y_to = y_from - y_shift, y_to - y_shift
    elif x_from > x_to:
        y_from, y_to = y_from + y_shift, y_to + y_shift

    if y_from < y_to:
        x_from, x_to = x_from - x_shift, x_to - x_shift
    elif y_from > y_to:
        x_from, x_to = x_from + x_shift, x_to + x_shift

    cv2.line(im, (int(x_from), int(y_from)), (int(x_to), int(y_to)), 0,
             thick_line)
    angle_deg = cv2.fastAtan2(y_to - y_from, x_to - x_from)
    angle_rad = angle_deg * math.pi / 180.0
    for sain in range(-1, 2, 2):
        x_tip = round(x_to -
                      tipSize * math.cos(angle_rad + sain * math.pi / 8))
        y_tip = round(y_to -
                      tipSize * math.sin(angle_rad + sain * math.pi / 8))
        cv2.line(im, (int(x_tip), int(y_tip)), (int(x_to), int(y_to)), 0,
                 thick_line)
    return im
Beispiel #5
0
    def detect_hough_line(self):
        _grayimage = cv2.cvtColor(self._srcimage, cv2.COLOR_RGB2GRAY)
        _cannyimage = cv2.Canny(_grayimage,
                                CANNY_LOW_THRESHOLD,
                                CANNY_HIGH_THRESHOLD,
                                apertureSize=3)
        lines = cv2.HoughLinesP(_cannyimage,
                                1,
                                np.pi / 180,
                                160,
                                minLineLength=200,
                                maxLineGap=180)

        #        寻找长度最长的线
        distance = []
        for line in lines:
            x1, y1, x2, y2 = line[0]
            dis = np.sqrt(pow((x2 - x1), 2) + pow((y2 - y1), 2))
            distance.append(dis)
        max_dis_index = distance.index(max(distance))
        max_line = lines[max_dis_index]
        x1, y1, x2, y2 = max_line[0]

        #       获取旋转角度
        angle = cv2.fastAtan2((y2 - y1), (x2 - x1))
        centerpoint = (self._srcimage.shape[1] / 2,
                       self._srcimage.shape[0] / 2)
        rotate_mat = cv2.getRotationMatrix2D(centerpoint, angle, 1.0)  #获取旋转矩阵
        correct_image = cv2.warpAffine(
            self._srcimage,
            rotate_mat, (self._srcimage.shape[1], self._srcimage.shape[0]),
            borderValue=(255, 255, 255))
        return correct_image
    def detect(self, img):
        rectangles = self.model.detect_face(img, self.threshold)
        draw = img.copy()

        best_crop_img = img
        flag = False
        best_distance = 0

        # find the best face in img
        for rectangle in rectangles:
            if rectangle is not None:
                W = -int(rectangle[0]) + int(rectangle[2])
                H = -int(rectangle[1]) + int(rectangle[3])
                paddingH = 0.01 * W
                paddingW = 0.02 * H
                crop_img = img[int(rectangle[1] + paddingH):int(rectangle[3] -
                                                                paddingH),
                               int(rectangle[0] - paddingW):int(rectangle[2] +
                                                                paddingW)]
                if crop_img is None:
                    continue
                if crop_img.shape[0] < 0 or crop_img.shape[1] < 0:
                    continue

                if crop_img.shape[0] < 1 or crop_img.shape[1] < 1:
                    continue

                eye_center = ((int(rectangle[5]) + int(rectangle[7])) / 2,
                              (int(rectangle[6]) + int(rectangle[8])) / 2)
                dy = int(rectangle[8]) - int(rectangle[6])
                dx = int(rectangle[7]) - int(rectangle[5])

                angle = cv2.fastAtan2(dy, dx)
                rot = cv2.getRotationMatrix2D(eye_center, angle, scale=1)
                rot_img = cv2.warpAffine(crop_img,
                                         rot,
                                         dsize=(crop_img.shape[1],
                                                crop_img.shape[0]))

                distance = np.sqrt(
                    (draw.shape[0] -
                     (int(rectangle[0]) + int(rectangle[2])) / 2)**2 +
                    (draw.shape[1] -
                     (int(rectangle[1]) + int(rectangle[3])) / 2)**2)
                if flag is False:
                    flag = True
                    best_crop_img = rot_img
                    best_distance = distance

                elif distance < best_distance:
                    best_crop_img = rot_img
                    best_distance = distance

                # cv2.rectangle(draw, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])),
                #               (0, 0, 255), 1)

                # for i in range(5, 15, 2):
                #     cv2.circle(draw, (int(rectangle[i + 0]), int(rectangle[i + 1])), 2, (0, 255, 0))

        return best_crop_img
Beispiel #7
0
def oriented_gradient(grad_x, grad_y, degree, bin_size):
    """
    Returns the oriented gradient channel.

    :param grad_x: Gradient computed only for X axis.
    :param grad_y: Gradient computed only for Y axis.
    :param degree: Degree of the edge to be calculated
    :param bin_size: Degree margin for which the edges to be calculated.

    For example, if degree is '30' and bin size is '10', this routine computes edges for the degree interval 20 to 40.
    """
    assert grad_x.shape == grad_y.shape

    lower_bound = degree - bin_size
    upper_bound = degree + bin_size

    rows, cols = grad_x.shape

    oriented = np.zeros((rows, cols), np.uint8)

    for i in xrange(rows):
        for j in xrange(cols):
            e_x = grad_x.item(i, j)
            e_y = grad_y.item(i, j)

            d = cv2.fastAtan2(e_y, e_x)

            if lower_bound < d < upper_bound:
                oriented.itemset((i, j), 255)

    return oriented
 def rotate_frame(self):
     self.set_button_false()
     cap = cv2.VideoCapture(video_path)
     ret, frame = cap.read()
     cap.release()
     frame2 = frame.copy()
     cv2.namedWindow('rotate frame', 0)
     cv2.setMouseCallback('rotate frame', self.get_points,
                          frame)  # 输出鼠标点击位置的坐标
     while 1:
         cv2.imshow('rotate frame', frame)
         k = cv2.waitKey(100)
         if k == ord('c'):  # 重置图片
             frame = frame2.copy()
             cv2.setMouseCallback('rotate frame', self.get_points,
                                  frame)  # 重新建立连接
         elif k == 32 or k == 13:  # 空格或回车键确认并退出
             try:
                 theta = cv2.fastAtan2((self.y2 - self.y1),
                                       (self.x2 - self.x1))  # 把需要旋转的角度计算出来
             except ZeroDivisionError:
                 print('zero')
                 theta = 90  # 以度为单位
             self.vth.view_para['theta'] = theta
             print(theta)  # 赋值
             cv2.destroyWindow('rotate frame')
             frame2 = self.vth.adjust_frame(frame2)
             self.show_frame(frame2, None, 0)
             break
         elif k == 27:  # 退出不做修改
             cv2.destroyAllWindows()
             break
def load_and_align_data(image_dir, image_size, margin, gpu_memory_fraction,
                        sess, pnet, rnet, onet):
    # print (image_paths)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # print('Creating networks and loading parameters')
    # with tf.Graph().as_default():
    #     gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
    #     sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
    #     with sess.as_default():
    #         pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    image_paths = []
    image_names = []
    for file in os.listdir(os.path.expanduser(image_dir)):
        image_paths.append(os.path.join(image_dir, file))
        image_names.append(file)

    tmp_image_paths = copy.copy(image_paths)
    print(tmp_image_paths)
    img_list = []
    for image in tmp_image_paths:
        img = misc.imread(os.path.expanduser(image), mode='RGB')
        img_size = np.asarray(img.shape)[0:2]
        bounding_boxes, _ = align.detect_face.detect_face(
            img, minsize, pnet, rnet, onet, threshold, factor)
        # print(bounding_boxes)
        # print("_ : ", _)
        if len(bounding_boxes) < 1:
            image_paths.remove(image)
            print("can't detect face, remove ", image)
            continue
        det = np.squeeze(bounding_boxes[0, 0:4])
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
        bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
        cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
        eye_center = ((_[0] + _[1]) / 2, (_[5] + _[6]) / 2)
        dy = _[6] - _[5]
        dx = _[1] - _[0]
        angle = cv2.fastAtan2(dy, dx)
        rot = cv2.getRotationMatrix2D(eye_center, angle, scale=1.0)
        cropped = cv2.warpAffine(cropped,
                                 rot,
                                 dsize=(cropped.shape[1], cropped.shape[0]))
        aligned = misc.imresize(cropped, (image_size, image_size),
                                interp='bilinear')
        prewhitened = facenet.prewhiten(aligned)
        img_list.append(prewhitened)
    images = np.stack(img_list)
    return images, image_names
Beispiel #10
0
def line_angle(line):
    '''return angle range [-90., 90.)'''
    x0, y0, x1, y1 = line
    ang = cv2.fastAtan2(y1 - y0, x1 - x0)
    if ang < 90.:
        return ang
    elif 90. <= ang < 270.:
        return ang - 180.
    elif 270. <= ang:
        return ang - 360.
Beispiel #11
0
 def calculate_gradient_direction(self):
     gx_Mat, gy_Mat = self.gradient_Mat
     direction_Mat = np.zeros(gx_Mat.shape)
     height, width = gx_Mat.shape
     for y in range(height):
         for x in range(width):
             #direction = math.atan2(gy_Mat[y, x], gx_Mat[y, x]) * 180
             direction = cv2.fastAtan2(gy_Mat[y, x], gx_Mat[y, x])
             direction_Mat[y, x] = direction
     self.direction_Mat = direction_Mat
Beispiel #12
0
def warp_affine(image, points, scale=1.0):
    eye_center = ((points[0][0] + points[1][0]) / 2,
                  (points[0][1] + points[1][1]) / 2)
    dy = points[1][1] - points[0][1]
    dx = points[1][0] - points[0][0]
    # 计算旋转角度
    angle = cv2.fastAtan2(dy, dx)
    rot = cv2.getRotationMatrix2D(eye_center, angle, scale=scale)
    rot_img = cv2.warpAffine(image,
                             rot,
                             dsize=(image.shape[1], image.shape[0]))
    return rot_img
Beispiel #13
0
def 轮廓求旋转角(cnt):
    m = cv2.moments(cnt)
    xc = m['m10'] / m['m00']
    yc = m['m01'] / m['m00']
    a = m['m20'] / m['m00'] - xc**2
    b = m['m11'] / m['m00'] - xc * yc
    c = m['m02'] / m['m00'] - yc**2
    θ = cv2.fastAtan2(2 * b, a - c) / 2

    (x, y), (w, h), _ = cv2.minAreaRect(cnt)
    power = w / h if w > h else h / w

    return θ, power - 1
Beispiel #14
0
def align_face(image, keypoints, scale=1.0):
    eye_center = (
        (keypoints['left_eye'][0] + keypoints['right_eye'][0]) * 0.5,
        (keypoints['left_eye'][1] + keypoints['right_eye'][1]) * 0.5,
    )
    dx = keypoints['right_eye'][0] - keypoints['left_eye'][0]
    dy = keypoints['right_eye'][1] - keypoints['left_eye'][1]

    angle = cv2.fastAtan2(dy, dx)
    rot_matrix = cv2.getRotationMatrix2D(eye_center, angle, scale=scale)
    rot_image = cv2.warpAffine(image,
                               rot_matrix,
                               dsize=(image.shape[1], image.shape[0]))
    return rot_image
Beispiel #15
0
def warp_affine(image, x1, y1, x2, y2, scale=1.0):
    eye_center = ((x1 + x2) / 2, (y1 + y2) / 2)

    dy = y2 - y1
    dx = x2 - x1
    # 计算旋转角度
    angle = cv2.fastAtan2(dy, dx)
    rot = cv2.getRotationMatrix2D(eye_center, angle, scale=scale)

    rot_img = cv2.warpAffine(image,
                             rot,
                             dsize=(image.shape[1], image.shape[0]))

    return rot_img
Beispiel #16
0
def gradient(patch):
    dx, dy, _, _, _ = get_gradient(patch)
    height, width = patch.shape
    gradient_matrix = np.zeros((height, width, 4))

    for y in range(height):
        for x in range(width):
            gradient_matrix[y, x] = [
                dx[y, x], dy[y, x],
                linalg.norm([dx[y, x], dy[y, x]]),
                opencv.fastAtan2(dy[y, x], dx[y, x])
            ]

    return gradient_matrix
Beispiel #17
0
    def gradient_matrix(self):
        h, w, _ = self.image.shape
        self.gradient_matrix = np.zeros((h, w, 4))
        for y in range(h):
            for x in range(w):
                d_x = float(self.I_x[y, x])
                d_y = float(self.I_y[y, x])

                # for the position get length and the angle
                # between derivatives in the range od -pi to pi
                self.gradient_matrix[y, x] = [
                    d_x, d_y,
                    LA.norm([d_x, d_y]),
                    cv2.fastAtan2(d_y, d_x)
                ]
Beispiel #18
0
def calc_degree(cnt):
    moments = cv2.moments(cnt)
    m00 = moments['m00']
    m10 = moments['m10']
    m01 = moments['m01']
    m20 = moments['m20']
    m11 = moments['m11']
    m02 = moments['m02']
    cx = int(m10 / m00)
    cy = int(m01 / m00)
    a = m20 / m00 - cx**2
    b = m11 / m00 - cx * cy
    c = m02 / m00 - cy**2
    theta = 0.5 * (cv2.fastAtan2(2 * b, a - c))
    return theta
    def _computeOrientationHistogram(self, kp, nbrdRadius, binsNum, windowSigma, gaussianScaleFactor, computingDescrHist = False):
        histogram = [0.] * binsNum
        binsToDegreesRatio = binsNum/360.               # how we divide the various angles into bins
        x, y, scale = kp.getRoundedCoords()
        L = self.pyramid[kp.octave][0][scale]           # we select the smoothed image at the right octave and scale
        yUpperBound, xUpperBound = L.shape

        if computingDescrHist:
            # if we are computing the histogram for the descriptor
            # we need to adjust the point to the angle of the keypoint
            cosOri, sinOri = cos(radians(kp.orientation))/nbrdRadius, sin(radians(kp.orientation))/nbrdRadius

        k = 0
        for i in xrange(-nbrdRadius, nbrdRadius + 1):
            for j in xrange(-nbrdRadius, nbrdRadius + 1):
                if computingDescrHist:
                    i, j = i*cosOri - j*sinOri, i*sinOri + j*cosOri

                xOfst, yOfst = int(round(x + i)), int(round(y + j))

                if (xOfst + 1 >= xUpperBound) or (yOfst + 1 >= yUpperBound) or (xOfst - 1 < 0) or (yOfst - 1 < 0):
                    continue

                xDiff = L[yOfst][xOfst + 1] - L[yOfst][xOfst - 1]
                yDiff = L[yOfst + 1][xOfst] - L[yOfst - 1][xOfst]

                weight = exp((i**2 + j**2)*gaussianScaleFactor)
                magnitude = sqrt(xDiff**2 + yDiff**2)
                orientation = fastAtan2(yDiff, xDiff)                # NOTE: ori is an anticlockwise angle

                # if we are computing the histogram for the descriptor we adjust the deteced orientation to achieve rotation independence
                if computingDescrHist:
                    orientation -= kp.orientation

                binNum = round(orientation*binsToDegreesRatio)
                if binNum >= binsNum:
                    binNum -= binsNum
                elif binNum < 0:
                    binNum += binsNum

                histogram[int(binNum)] += magnitude*weight
                k += 1

        if not computingDescrHist:
            #return histogram
            return self._smoothHistogram(histogram)
        else:
            return histogram
Beispiel #20
0
def load_and_align_single_img(count, img, image_size, margin,
                              gpu_memory_fraction, sess, pnet, rnet, one):
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    img_size = np.asarray(img.shape)[0:2]
    bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet,
                                                      onet, threshold, factor)
    # print(bounding_boxes)
    # print("_ : ", _)
    if len(bounding_boxes) < 1:
        print("can't detect face, remove ")
        return [False, False]
    img_list = []
    nrof_faces = bounding_boxes.shape[0]
    det = bounding_boxes[:, 0:4]
    det_arr = []
    img_size = np.asarray(img.shape)[0:2]
    for i in range(nrof_faces):
        det_arr.append(np.squeeze(det[i]))
    for i, det in enumerate(det_arr):
        det = np.squeeze(det)
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
        bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
        cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
        # scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
        output_filename_n = "{}{}_{}{}".format("data/faces/", count, i, ".jpg")
        misc.imsave(output_filename_n, cropped)
        eye_center = ((_[0][i] + _[1][i]) / 2, (_[5][i] + _[6][i]) / 2)
        dy = _[6][i] - _[5][i]
        dx = _[1][i] - _[0][i]
        angle = cv2.fastAtan2(dy, dx)
        rot = cv2.getRotationMatrix2D(eye_center, angle, scale=1.0)
        cropped = cv2.warpAffine(cropped,
                                 rot,
                                 dsize=(cropped.shape[1], cropped.shape[0]))
        aligned = misc.imresize(cropped, (image_size, image_size),
                                interp='bilinear')
        prewhitened = facenet.prewhiten(aligned)
        img_list.append(prewhitened)
    images = np.stack(img_list)
    return [True, images]
def affine_transform(shape_face, frame):
    shape_img = frame.shape

    rows = shape_img[0]
    cols = shape_img[1]

    w = float(shape_face[46][1]) - float(shape_face[37][1])
    w = float(w / (float(shape_face[46][0]) - float(shape_face[37][0])))

    angle = int(cv2.fastAtan2(w, 1))

    affine = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
    '''
    cols/2, rows/2 are important
    '''
    dst = cv2.warpAffine(frame, affine, (cols, rows))

    return dst
Beispiel #22
0
def getOrientationAndMagnitude(image, show=False):
    '''
    Calculate orientation and magnitude of the gradient image
    and return it as vector arrays
    
    Uses cv2.fastAtan2 for fast orientation calc, cv2.magnitude
    for fast magnitude calculation
    
    Params:
        image (numpy array): grayscale image to compute this on
        show (bool): show intermediate steps
    
    Returns:
        (orientation, magnitude): numpy arrays
    '''
    sobelHorizontal = cv2.Sobel(image, cv2.CV_32F, 1, 0)
    sobelVertical = cv2.Sobel(image, cv2.CV_32F, 0, 1)

    h = sobelHorizontal
    v = sobelVertical

    orientation = np.empty(image.shape)
    magnitude = np.empty(image.shape)

    height, width = h.shape
    for y in range(height):
        for x in range(width):
            orientation[y][x] = cv2.fastAtan2(h[y][x], v[y][x])

    magnitude = cv2.magnitude(h, v)

    if show:

        fig = figure()
        imshow(magnitude)
        matplotlib.pyplot.show()

        fig2 = figure()
        res = 7
        quiver(h[::res, ::res], -v[::res, ::res])
        imshow(image[::res, ::res], cmap=gray())
        matplotlib.pyplot.show()

    return orientation, magnitude
Beispiel #23
0
def getOrientationAndMagnitude(image, show=False):
    '''
    Calculate orientation and magnitude of the gradient image
    and return it as vector arrays
    
    Uses cv2.fastAtan2 for fast orientation calc, cv2.magnitude
    for fast magnitude calculation
    
    Params:
        image (numpy array): grayscale image to compute this on
        show (bool): show intermediate steps
    
    Returns:
        (orientation, magnitude): numpy arrays
    '''
    sobelHorizontal = cv2.Sobel(image, cv2.CV_32F, 1, 0)
    sobelVertical = cv2.Sobel(image, cv2.CV_32F, 0, 1)

    h = sobelHorizontal
    v = sobelVertical

    orientation = np.empty(image.shape)
    magnitude = np.empty(image.shape)

    height, width = h.shape
    for y in range(height):
        for x in range(width):
            orientation[y][x] = cv2.fastAtan2(h[y][x], v[y][x])

    magnitude = cv2.magnitude(h, v)

    if show:

        fig = figure()
        imshow(magnitude)
        matplotlib.pyplot.show()

        fig2 = figure()
        res = 7
        quiver(h[::res, ::res], -v[::res, ::res])
        imshow(image[::res, ::res], cmap=gray())
        matplotlib.pyplot.show()

    return orientation, magnitude
Beispiel #24
0
def hough_rotate_cv(image):
    """ not Long time consuming, not Strong generalization ability, not high accuracy, more super parameters"""
    img_np = utils.resize_by_percent(asarray(image), 1)
    if len(img_np.shape) == 3:
        img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
    canny_image = cv2.Canny(img_np, 0, 255, apertureSize=3)
    # cv2.imshow('canny', canny_image)
    # cv2.waitKey(10)
    lines = cv2.HoughLinesP(canny_image,
                            1,
                            np.pi / 180,
                            160,
                            minLineLength=500,
                            maxLineGap=65)
    # lines = cv2.HoughLines(canny_image, 1, np.pi / 180, 160, max_theta=30, min_theta=0)

    # 寻找长度最长的线
    distance = []
    for line in lines:
        x1, y1, x2, y2 = line[0]
        dis = np.sqrt(pow((x2 - x1), 2) + pow((y2 - y1), 2))
        distance.append(dis)
    max_dis_index = distance.index(max(distance))
    max_line = lines[max_dis_index]
    x1, y1, x2, y2 = max_line[0]

    # 获取旋转角度
    angle = cv2.fastAtan2((y2 - y1), (x2 - x1))
    print(angle)

    if 0.5 <= angle <= 7:  # 因为识别误差问题,根据实际情况设置旋转阈值
        centerpoint = (image.shape[1] / 2, image.shape[0] / 2)
        rotate_mat = cv2.getRotationMatrix2D(centerpoint, angle, 1.0)  # 获取旋转矩阵
        correct_image = cv2.warpAffine(image,
                                       rotate_mat,
                                       (image.shape[1], image.shape[0]),
                                       borderValue=(255, 255, 255))

        # cv2.imshow('test', resize_by_percent(correct_image, 0.1))
        # cv2.waitKey(10)
        return correct_image
    else:
        return image
def FLD(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    edges = cv2.Canny(gray, 50, 150, apertureSize=3)

    # Create default Fast Line Detector class
    fld = cv2.ximgproc.createFastLineDetector()

    # Get line vectors from the image
    lines = fld.detect(edges)
    if lines is not None:
        x1, y1, x2, y2 = lines[0][0]
        rotate_angle = cv2.fastAtan2((y2 - y1), (x2 - x1)) - 90
        line_on_image = fld.drawSegments(
            image, lines)  # draw founded red lines on the original image
    else:  # "lines is None" means this image don't need to be rotated
        line_on_image = 0
        rotate_angle = 0

    return thresh, line_on_image, rotate_angle
def box2rect(box_4points):
    if box_4points.shape == (4, 2):
        b = box_4points
        center = tuple(np.sum(b, axis=0) / 4)
        width = (
            np.sqrt(pow(b[1][0] - b[2][0], 2) + pow(b[1][1] - b[2][1], 2)) +
            np.sqrt(pow(b[0][0] - b[3][0], 2) + pow(b[0][1] - b[3][1], 2))) / 2
        height = (
            np.sqrt(pow(b[1][0] - b[0][0], 2) + pow(b[1][1] - b[0][1], 2)) +
            np.sqrt(pow(b[2][0] - b[3][0], 2) + pow(b[2][1] - b[3][1], 2))) / 2
        dx = b[0][0] - b[1][0]
        dy = b[0][1] - b[1][1]
        if dx == 0 or dy == 0:
            theta = -90.0
        else:
            theta = -float(cv2.fastAtan2(dx, dy))
        if theta >= 0 or theta < -90:
            print("Error in  function_module box2rect")
        return center, (width, height), theta
    else:
        pass
Beispiel #27
0
    def __rotate_calib(self, srcimage):
        blurimage = cv2.blur(srcimage, (3, 3))
        grayimage = cv2.cvtColor(blurimage, cv2.COLOR_BGR2GRAY)
        _, binimage = cv2.threshold(grayimage, 15, 255, cv2.THRESH_BINARY)
        cannyimage = cv2.Canny(binimage,
                               CANNY_LOW_THRESHOLD,
                               CANNY_HIGH_THRESHOLD,
                               apertureSize=3)
        lines = cv2.HoughLinesP(cannyimage,
                                1,
                                np.pi / 180,
                                160,
                                minLineLength=200,
                                maxLineGap=180)
        #        寻找长度最长的线
        if not lines is None:
            distance = []
            for line in lines:
                x1, y1, x2, y2 = line[0]
                dis = np.sqrt(pow((x2 - x1), 2) + pow((y2 - y1), 2))
                distance.append(dis)
            max_dis_index = distance.index(max(distance))
            max_line = lines[max_dis_index]
            x1, y1, x2, y2 = max_line[0]

            # 获取旋转角度
            angle = cv2.fastAtan2((y2 - y1), (x2 - x1))
            centerpoint = (srcimage.shape[1] / 2, srcimage.shape[0] / 2)
            rotate_mat = cv2.getRotationMatrix2D(centerpoint, angle,
                                                 1.0)  # 获取旋转矩阵
            inverse_rotate_mat = cv2.getRotationMatrix2D(
                centerpoint, -angle, 1.0)  # 获取反转矩阵
            correct_image = cv2.warpAffine(
                srcimage,
                rotate_mat, (srcimage.shape[1], srcimage.shape[0]),
                borderValue=(0, 0, 0))
            return correct_image, inverse_rotate_mat
        else:
            return srcimage, None
Beispiel #28
0
def warp_affine(image, points, scale=1.0):
    dis1, dis2 = getDis(points[2][0], points[2][1], points[0][0], points[0][1],
                        points[1][0], points[1][1])
    eye_center = ((points[0][0] + points[1][0]) / 2,
                  (points[0][1] + points[1][1]) / 2)
    dy = points[1][1] - points[0][1]
    dx = points[1][0] - points[0][0]
    center = (points[2][0], points[2][1])
    # 计算旋转角度
    angle = cv2.fastAtan2(
        dy, dx)  #获取旋转角度angle = cv2.fastAtan2((y2 - y1), (x2 - x1))
    print("angle:", angle)
    rot = cv2.getRotationMatrix2D(center, angle, scale=scale)  # 获取旋转矩阵
    rot_img = cv2.warpAffine(image,
                             rot,
                             dsize=(image.shape[1], image.shape[0]))
    delta_width = dis2 * 1
    delta_height1 = dis1 * 3
    delta_height2 = dis1 * 2
    x1 = max(round(center[0] - delta_width), 0)
    y1 = max(round(center[1] - delta_height1), 0)
    x2 = min(x1 + round(delta_width * 2), rot_img.shape[1])
    y2 = min(round(y1 + delta_height1 + delta_height2), rot_img.shape[0])
    return rot_img, (x1, y1, x2, y2)
Beispiel #29
0
def orientFieldEstimation(orig_img):
    white = cv2.imread("white.jpg")
    white = cv2.resize(white,(360,360))
    img = np.float32(orig_img)

    rows = np.size(img, 0)
    cols = np.size(img, 1)

    shape_img = img.shape

    grad_x = np.zeros(shape_img, dtype=np.float32)
    grad_y = np.zeros(shape_img, dtype=np.float32)
    Vx = np.zeros(shape_img, dtype=np.float32)
    Vy = np.zeros(shape_img, dtype=np.float32)
    theta = np.zeros(shape_img, dtype=np.float32)
    phi_x_array = np.zeros(shape_img, dtype=np.float32)
    phi_y_array = np.zeros(shape_img, dtype=np.float32)
    magnitude_array = np.zeros(shape_img, dtype=np.float32)
    #or_array = np.zeros((22,22))

    grad_x = cv2.Sobel(img,cv2.CV_32FC1,1, 0, cv2.BORDER_DEFAULT, ksize=3)
    grad_y = cv2.Sobel(img,cv2.CV_32FC1,0, 1, cv2.BORDER_DEFAULT, ksize=3)

    block_div = 7
    right_angle  = math.pi / 2
    step = 14
    #orient_arr = list()

    m = 0
    n = 0

    for i in range(block_div, rows-block_div, step):
        for j in range(block_div, cols-block_div, step):
            sum_Vx = 0.0
            sum_Vy = 0.0
            for u in range(i-block_div, i+block_div):
                for v in range(j-block_div, j+block_div):
                    #print(grad_x[u][v])
                    grad_x_value_np = grad_x[u][v]
                    grad_x_value_str = np.array2string(grad_x_value_np)
                    grad_x_value_str = grad_x_value_str.split("[", 1)[1]
                    grad_x_value_str = grad_x_value_str.split(".", 1)[0]
                    grad_x_value_str = grad_x_value_str.strip();
                    grad_x_value = int (grad_x_value_str)

                    grad_y_value_np = grad_y[u][v]
                    grad_y_value_str = np.array2string(grad_y_value_np)
                    grad_y_value_str = grad_y_value_str.split("[", 1)[1]
                    grad_y_value_str = grad_y_value_str.split(".", 1)[0]
                    grad_y_value_str = grad_y_value_str.strip();
                    grad_y_value = int (grad_y_value_str)


                    sum_Vx = sum_Vx + (2*grad_x_value * grad_y_value)
                    sum_Vy = sum_Vy + ((grad_x_value * grad_x_value) - (grad_y_value * grad_y_value))

            if (sum_Vx != 0):
                #tan_arg = sum_Vy / sum_Vx
                result = 0.5 * cv2.fastAtan2(sum_Vy, sum_Vx);
                #print(result)
            #orientatin_matrix[i][j] = result
            else:
                result = 0.0

            magnitude = math.sqrt((sum_Vx * sum_Vx) + (sum_Vy * sum_Vy))
            phi_x = magnitude * math.cos(2*(math.radians(result)))
            phi_y = magnitude * math.sin(2*(math.radians(result)))
            if (phi_x != 0):
                orient = 0.5 * cv2.fastAtan2(phi_y, phi_x)
            else:
                orient = 0.0

            #print("Orientation of block [" + str(i) + ", " + str(j) + "] in degrees:")
            print(orient)
            #orient_arr.append(orient)


            '''
            if (n == 22 ):
                m = m + 1
                n = 0

            or_array[m][n] = orient

            n = n + 1
            '''




            X0 = i + block_div
            Y0 = j + block_div
            r = block_div

            #result_rad = result * math.pi / 180.0
            orient_deg = orient - 90
            orient_rad = math.radians(orient_deg)

            X1 = r * math.cos(orient_rad)+ X0
            X1 = int (X1)
            #print(X1)

            Y1 = r * math.sin(orient_rad)+ Y0
            Y1 = int (Y1)

            X2 = X0 - r * math.cos(orient_rad)
            X2 = int (X2)

            Y2 = Y0 - r * math.sin(orient_rad)
            Y2 = int (Y2)

            orient_img = cv2.line(orig_img,(X0,Y0) , (X1,Y1), (0,255,0), 3)
            #cv2.imshow('Oriented image', orient_img)
            white_img = cv2.line(white,(X0,Y0) , (X1,Y1), (0,255,0), 3)
            #cv2.imshow('Oriented skeleton', white_img)
            rotated_img = cv2.rotate(white_img, cv2.ROTATE_90_CLOCKWISE)
            #cv2.imshow('Oriented rotated skeleton', rotated_img)
            flip_horizontal_img = cv2.flip(rotated_img, 1)

    #print(orient_arr)
    #print(or_array)
    #print(len(orient_arr))
    return flip_horizontal_img
Beispiel #30
0
def get_converted_img(path, learning):
    # 1) Load an color image in grayscale
    if path == 'nao':
        img = get_nao_image()
    else:
        img = cv2.imread(path,0)


    #blurred = cv2.blur(img, (5,5))

    if learning == 0:
        cv2.imshow("capture", img)
        cv2.waitKey(750)

    # 2) resize
    rsize = cv2.resize(img, (40, 40))

    # 3) edge detection
    #Gradient X
    ddepth = cv2.CV_16S
    scale = 1
    delta = 0

    #only do additional grayscaling if img is retrieved from nao instead of localfs
    if path == 'nao':
        gray = cv2.cvtColor(rsize,cv2.COLOR_BGR2GRAY)
        #Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
        grad_x = cv2.Sobel(gray,ddepth,1,0,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)
        grad_y = cv2.Sobel(gray,ddepth,0,1,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)
    else:
        grad_x = cv2.Sobel(rsize,ddepth,1,0,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)
        grad_y = cv2.Sobel(rsize,ddepth,0,1,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)

    # 4)convert to vectors using atan
    #a = 0
    #b = 0
    #orientation = {}
    #for x in grad_x:
    #    orientation[a] = {}
    #    for y in grad_x[a]:
    #        orientation[a][b] = cv2.fastAtan2(grad_y[a][b],grad_x[a][b])
    #        b = b + 1
    #    a = a + 1
    #    b = 0
    #print(orientation[1])

    # 4) and 5) convert to vectors, then create histograms (18)
    a = 0
    b = 0
    orientation = {}
    grad_bins = [0] * 18
    for x in grad_x:
        orientation[a] = {}
        for y in grad_x[a]:
            orientation[a][b] = cv2.fastAtan2(grad_y[a][b],grad_x[a][b])
            j_bin = int(orientation[a][b]/20)
            grad_bins[j_bin] = grad_bins[j_bin] + 1
            b = b + 1
        a = a + 1
        b = 0

    if DEBUG_WEBCAM or DEBUG_NAO:
        print grad_bins

    grad_binss = [0.0] * 18

    for bin in range(len(grad_bins)):
        grad_binss[bin] = grad_bins[bin]/256

    return grad_binss
Beispiel #31
0
def calc_angle(pt1, pt2):#각도 계산 함수
    d = pt1 - pt2
    return cv2.fastAtan2(float(d[1]), float(d[0]))
Beispiel #32
0
def getIrisForPupil(image, pupil, show=False):
    """
    Find the best iris radius for a given pupil. Always assumes there is one,
    so it will very likely return a result. But can also return a None, when
    there is absolutely no indication of an iris.
    
    Params:
        image (numpy array): color image to use
        pupil (ellipse): location of a pupil
        show (bool): show partial resutls
    
    Returns:
        (center, radius) for the detected iris, center is the same as center of the pupil
    """
    gray = getGray(image)
    orientation, magnitude = getOrientationAndMagnitude(gray, show=False)

    # pupil, and therefore also iris center
    center = (int(pupil[0][0]), int(pupil[0][1]))

    # average radius for pupil (since it is an ellipse)
    pupilRadius = (pupil[1][0] / 2 + pupil[1][1] / 2) / 2
    # max pupil radius will be at most 5 times pupil radius
    irisRadius = 5 * pupilRadius

    # 30 points laying between pupil and iris
    pupilSamples = getCircleSamples(center, min(irisRadius * 0.5, pupilRadius * 2))

    # 30 points laying on a circle that is bigger than iris
    irisSamples = getCircleSamples(center, irisRadius)

    # vote dict for different radii
    finalIrisRadiusVotes = dict()

    # for each sample point in the concentric circle that lies between pupil and iris
    for sample in range(len(pupilSamples)):
        # starting point for a line that goes from in between pupil and iris edge
        pupilSample = (int(pupilSamples[sample][0]), int(pupilSamples[sample][1]))
        # ending point for the line that ends at 5x pupil radius from the pupil center
        irisSample = (int(irisSamples[sample][0]), int(irisSamples[sample][1]))

        # line defined by pupilSample and irisSample points has the direction of
        # the normal for the iris circle

        # points in the image that lay on the line
        lineCoordinates = getLineCoordinates(pupilSample, irisSample)

        # normal vector for the pupil/iris circles
        sampleVector = (pupilSample[0] - center[0], pupilSample[1] - center[1])

        # length of the normal vector
        dist = sqrt(sampleVector[0] ** 2 + sampleVector[1] ** 2)

        # angle of the normal vector
        angle = cv2.fastAtan2(sampleVector[1], sampleVector[0])

        # loop over all the points on the line
        for s in lineCoordinates:
            # sometimes the line is outside the magnitude arrays, in that case just conitnue the loop
            try:
                mag = magnitude[s[1] - 1][s[0] - 1]
            except:
                continue

            # only consider those points that have magnitude greater than 15 but lower than 30
            # since the gradient is a slow one
            if mag > 15 and mag < 30:
                # orientation at the point in question
                ori = orientation[s[1] - 1][s[0] - 1]

                # cleanup the angle so that it is a comparable number to the angle of the line we've
                # obtained earlier
                an = angle + ori - 90.0
                if an > 360.0:
                    an -= 360.0

                # angle difference should be +-3 degrees
                if an < 3 or an > 357:
                    # we have a good sample point with the right magnitude and orientation
                    # calculate the radius of the iris this would correspond to
                    radius = sqrt((s[0] - center[0]) ** 2 + (s[1] - center[1]) ** 2)
                    # Round radius to tens
                    radius = round(radius / 10.0) * 10.0
                    radius = int(radius)

                    # draw the sample that we have used
                    if show:
                        cv2.circle(image, (s[0], s[1]), 2, (255, 255, 0), 2)

                    # add the radius to the vote dict
                    if radius not in finalIrisRadiusVotes:
                        finalIrisRadiusVotes[radius] = 0

                    finalIrisRadiusVotes[radius] += 1

        # draw the line
        if show:
            cv2.line(image, pupilSample, irisSample, (0, 255, 0))

    # very rare, in normal real life images probably won't occur
    if len(finalIrisRadiusVotes) == 0:
        return None

    # order the radius dict by votes and grab the winner
    finalIrisRadius = max(finalIrisRadiusVotes.iteritems(), key=operator.itemgetter(1))[0]

    # draw the winning radius
    if show:
        cv2.circle(image, center, finalIrisRadius, (255, 0, 255), 2)
        cv2.imshow("Iris Samples", image)

    return (center, finalIrisRadius)
Beispiel #33
0
def crop_face(image, points):
    dis1, dis2 = getDis(points[2][0], points[2][1], points[0][0], points[0][1],
                        points[1][0], points[1][1])
    dy = points[1][1] - points[0][1]
    dx = points[1][0] - points[0][0]
    center = (points[2][0], points[2][1])
    print("center:", center)
    cv2.circle(image, center, radius=3, color=(0, 0, 255), thickness=2)
    print(dis1, dis2)
    # 计算旋转角度
    angle = cv2.fastAtan2(
        dy, dx)  #获取旋转角度angle = cv2.fastAtan2((y2 - y1), (x2 - x1))

    delta_width = dis2 * 1
    delta_height1 = dis1 * 3
    delta_height2 = dis1 * 2
    x1 = max(round(center[0] - delta_width), 0)
    y1 = max(round(center[1] - delta_height1), 0)
    x2 = min(x1 + round(delta_width * 2), image.shape[1])
    y2 = min(round(y1 + delta_height1 + delta_height2), image.shape[0])

    polygon = np.array([
        (x1, y1),
        (x2, y1),
        (x2, y2),
        (x1, y2),
    ], np.int32)
    print("polygon:", polygon)
    #cv2.circle(image,(int(center[0]-delta_width),int(center[1]-delta_height)),radius =3,
    #       color = (0,0,255), thickness = 2)
    #cv2.circle(image,(int(center[0]+delta_width),int(center[1]+delta_height)),radius =3,
    #       color = (0,0,255), thickness = 2)
    # magic that makes sense if one understands numpy arrays
    poly = np.reshape(polygon, (4, 1, 2))
    cv2.polylines(image, [poly], 1, (0, 0, 255))
    M = cv2.getRotationMatrix2D(center, 360 - angle, 1)  # M.shape =  (2, 3)
    rotatedpolygon = cv2.transform(poly, M)
    print("rotatedpolygon:", rotatedpolygon.shape)
    cv2.polylines(image, [rotatedpolygon], True, (255, 255, 255))
    cv2.circle(image,
               (int(rotatedpolygon[0][0][0]), int(rotatedpolygon[0][0][1])),
               radius=4,
               color=(255, 0, 0),
               thickness=2)
    cv2.circle(image,
               (int(rotatedpolygon[1][0][0]), int(rotatedpolygon[1][0][1])),
               radius=4,
               color=(255, 0, 0),
               thickness=2)
    cv2.circle(image,
               (int(rotatedpolygon[2][0][0]), int(rotatedpolygon[2][0][1])),
               radius=4,
               color=(255, 0, 0),
               thickness=2)
    cv2.circle(image,
               (int(rotatedpolygon[3][0][0]), int(rotatedpolygon[3][0][1])),
               radius=4,
               color=(255, 0, 0),
               thickness=2)
    print("rotatedpolygon:", rotatedpolygon)
    x, y, w, h = cv2.boundingRect(rotatedpolygon)
    print(x, y, w, h)
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
    cv2.imwrite("crop_face.png", image)
    return (x, y, x + w, y + h)
Beispiel #34
0
def angle(x1,y1,x2,y2):
    return cv2.fastAtan2(y2,x2) - cv2.fastAtan2(y1,x1)
Beispiel #35
0
def findfingers(target_frame, mask):
    # target_frame = frame.copy()

    blurred = cv2.medianBlur(mask, 5)
    # _, blurred = cv2.threshold(mask, 200, 255, cv2.THRESH_BINARY)

    eroded = cv2.erode(blurred,
                       cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9)), 2)
    dilated = cv2.dilate(eroded,
                         cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                         3)

    cv2.imshow('b', dilated)

    _, contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)

    contour_sizes = [(cv2.contourArea(contour), contour)
                     for contour in contours]

    try:
        biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]

        # cv2.drawContours(target_frame, [biggest_contour], -1,(30,30,255),3)

        hull = cv2.convexHull(biggest_contour, returnPoints=False)
        # cv2.drawContours(target_frame, [hull], -1,(30,30,255),3)

        bx, by, bw, bh = cv2.boundingRect(biggest_contour)
        # cv2.rectangle(target_frame, (bx,by), (bx+bw, by+bh) , [255, 0, 0], 3)
        cx = bx + int(bw / 2)
        cy = by + int(bh / 2)

        count = 0

        if len(hull) > 3:

            defects = cv2.convexityDefects(biggest_contour, hull)

            if type(defects) != type(
                    None):  # avoid crashing.   (BUG not found)

                for i in range(defects.shape[0]):
                    s, e, f, _ = defects[i, 0]  # (s, e, f, d)
                    start = tuple(biggest_contour[s][0])
                    end = tuple(biggest_contour[e][0])
                    far = tuple(biggest_contour[f][0])

                    cv2.line(target_frame, start, far, [0, 255, 0], 2)
                    cv2.line(target_frame, far, end, [0, 255, 0], 2)
                    cv2.circle(target_frame, far, 5, [0, 0, 255], -1)

                    a = math.sqrt((end[0] - start[0])**2 +
                                  (end[1] - start[1])**2)
                    b = math.sqrt((far[0] - start[0])**2 +
                                  (far[1] - start[1])**2)
                    c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
                    inangle = abs(math.acos(
                        (b**2 + c**2 - a**2) / (2 * b * c)))  # cosine theorem

                    c_angle = cv2.fastAtan2(cy - start[1],
                                            cx - start[0])  #* 180 / math.pi
                    # fastAtan2 returns degrees
                    dlength = math.sqrt((far[0] - start[0])**2 +
                                        (far[1] - start[1])**2)

                    if inangle <= (math.pi * 2 / 3) and inangle >= (
                            20 / 180 * math.pi
                    ) and c_angle > -30 and c_angle < 160 and dlength > 0.1 * bh:  # inangle less than 90 degree, treat as fingers
                        count += 1
                        cv2.circle(target_frame, far, 8, [211, 84, 0], -1)

                    # if count == 0:

        cv2.putText(target_frame, str(count + 1), bottomLeftCornerOfText, font,
                    fontScale, fontColor, lineType)

    except ValueError:
        pass

    cv2.rectangle(target_frame, (0, 0), (x0, y1), (0, 255, 0), 3)