Esempio n. 1
0
def main():
    vs = cv2.VideoCapture(0)
    width = 640
    vs.set(3, width)
    height = 480
    vs.set(4, height)
    face_detector = cv2.CascadeClassifier(
        'haarcascade_frontalface_default.xml')

    while True:
        ret, pic = vs.read()
        pic_gray = cv2.split(pic)[2]

        face_boxs = face_detector.detectMultiScale(
            pic_gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(100, 100),
            flags=cv2.CASCADE_SCALE_IMAGE)
        if len(face_boxs):
            for x, y, w, h in face_boxs:
                e_right, e_left = find_eye_region(pic[y:y + h, x:x + w])
                e_right_gray = cv2.split(e_right)[2]
                e_left_gray = cv2.split(e_left)[2]

                e_right_gray = cv2.GaussianBlur(e_right_gray, (15, 15), 1)
                e_left_gray = cv2.GaussianBlur(e_left_gray, (15, 15), 1)

                eye_radius = int(0.1 * (e_right.shape)[1])
                eye_radius = eye_radius if eye_radius % 2 == 1 else eye_radius + 1
                for i in range(e_left_gray.shape[0]):
                    for j in range(e_left_gray.shape[1]):
                        if e_left_gray[i][j] < gray_threshold:
                            e_left_gray[i][j] = 255 - e_left_gray[i][j]
                        else:
                            e_left_gray[i][j] = 0
                for i in range(e_right_gray.shape[0]):
                    for j in range(e_right_gray.shape[1]):
                        if e_right_gray[i][j] < gray_threshold:
                            e_right_gray[i][j] = 255 - e_right_gray[i][j]
                        else:
                            e_right_gray[i][j] = 0

                loc, mags = frst.frst(e_left_gray, eye_radius, 0.8)
                cv2.circle(e_left, loc, 1, (0, 255, 0))
                loc1, mags = frst.frst(e_right_gray, eye_radius, 0.8)
                cv2.circle(e_right, loc1, 1, (0, 255, 0))
                cv2.imshow('left eye', e_left)
                cv2.imshow('left1 eye', e_left_gray)
                cv2.imshow('right eye', e_right)
                cv2.imshow('right1 eye', e_right_gray)

        cv2.imshow('frame', pic)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            vs.release()
            cv2.destroyAllWindows()
            break
def process(window, radii, alpha, beta, tbIter, tra):
    frame = cv2.imread(iname)
    w0, h0 = frame.shape[:2]
    frame = cv2.resize(frame, (int(400 * h0 / w0), 400))
    frame = cv2.GaussianBlur(frame, (15, 15), 0)
    grayImg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    grayImg = cv2.adaptiveThreshold(grayImg, 255,
                                    cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                    cv2.THRESH_BINARY_INV, 11, 1)
    kernel = np.ones((3, 3), np.uint8)
    if tbIter > 0:
        grayImg = cv2.morphologyEx(grayImg,
                                   cv2.MORPH_CLOSE,
                                   kernel,
                                   iterations=int(tbIter))

    grayImg = frst(grayImg, radii, alpha, beta, 0)
    print(np.min(grayImg), np.max(grayImg))
    hi = np.max(grayImg)
    lo = np.min(grayImg)
    grayImg = np.uint8(np.around((grayImg - lo) * 255 / (hi - lo)))
    grayImg = cv2.cvtColor(grayImg, cv2.COLOR_GRAY2BGR)
    # frame = frame[:grayImg.shape[0], :grayImg.shape[1], :]
    grayImg = grayImg[:frame.shape[0], :frame.shape[1]]
    print(frame.shape, grayImg.shape)
    frame = cv2.addWeighted(frame, tra, grayImg, 1 - tra, 0)

    window.show(frame)
Esempio n. 3
0
def find_radial_symmetry(img):
    # find regions of interest
    firstImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    firstImg = frst(firstImg, 200, 1, 0.5, 0.1, 'BRIGHT')
    # show_img(firstImg)

    firstImg = cv2.normalize(firstImg, None, 0.0, 1.0, cv2.NORM_MINMAX)
    # frstImage.convertTo(frstImage, CV_8U, 255.0);

    firstImg = cv2.convertScaleAbs(firstImg, None, 255.0, 0)
    show_img(firstImg)

    ret, thresh = cv2.threshold(firstImg, 0, 255,
                                cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    # show_img(ret)
    # cv::morphologyEx(inputImage, inputImage, operation, element, cv::Point(-1, -1), iterations);
    # bwMorph(frstImage, markers, cv::MORPH_CLOSE, cv::MORPH_ELLIPSE, 5);

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    markers = cv2.morphologyEx(firstImg, cv2.MORPH_CLOSE, kernel, iterations=5)

    print(len(markers))

    im2, contours, hierarchy = cv2.findContours(markers, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    # print(len(contours))
    #
    # img = cv2.drawContours(img, contours, -1, (0,0,255), 8)
    # show_img(img)
    moments = []
    for cnt in contours:
        moment = cv2.moments(cnt)
        moments.append(moment)
        print(moment)

    #get the mass centers
    mass_centers = []
    for moment in moments:
        x, y = moment.get('m10') / moment.get('m00'), moment.get(
            'm01') / moment.get('m00')
        mass_centers.append((int(x), int(y)))

    for center in mass_centers:
        print(center)
        cv2.circle(img, center, 20, (0, 255, 0), 2)
    show_img(img)
    return None
Esempio n. 4
0
def loop(socket_link):
    magsum = 0
    magavg = 0
    locxsum = 0
    locysum = 0
    locxavg = 0
    locyavg = 0
    eye_state = False
    eye_left = False
    eye_right = False
    sumcount = 30
    # 眨眼次数
    blink_count = 2

    loctestx = []
    loctesty = []

    vs = VideoStream(src=camera_flag).start()

    while True:
        frame = vs.read()
        frame = cv2.flip(frame, 0)  # 水平镜像
        # 获取眼部区域
        eye_region = frame[left_eye_y:left_eye_y + eye_region_height,
                           left_eye_x:left_eye_x + eye_region_width]
        eye_region = cv2.resize(eye_region, (resizex, resizey))

        # 眼部转换成灰度图
        gray = cv2.cvtColor(eye_region, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (15, 15), 1)

        # 对眼图区域灰度图阈值取反
        for i in range(resizex):
            for j in range(resizey):
                if gray[j][i] < gray_threshold:
                    gray[j][i] = 255 - gray[j][i]
                else:
                    gray[j][i] = 0
        loc, mags = frst.frst(gray, eye_radius, frst_threshold)

        if sumcount > 0:
            locxsum += loc[0]
            locysum += loc[1]
            magsum += mags
            sumcount -= 1
            if sumcount == 0:
                magsum /= 30
                locxsum /= 30
                locysum /= 30
                try:
                    socket_link.send(('4').encode('utf-8'))
                    recvdata = socket_link.recv(1)
                except Exception:
                    print('client disconnected!')
                    break
                print("Mag average:", magsum, "X average:", locxsum,
                      "Y average:", locysum)
            else:
                continue

        # 0 center、 1 right 、2 left 、3 ensure
        if mags < 0.3 * magsum and eye_state:
            blink_count = blink_count - 1
            if blink_count == 0:
                blink_count = 2
                try:
                    socket_link.send(('3').encode('utf-8'))
                    recvdata = socket_link.recv(1)
                except Exception:
                    print('client disconnected!')
                    break
            eye_state = False
            continue

        elif mags > 0.5 * magsum:
            eye_state = True
            cv2.circle(eye_region, loc, 2, (0, 255, 0))
            loctestx.append(loc[0])
            loctesty.append(loc[1])
            if (not eye_left) and (not eye_right):
                if loc[0] - locxsum > 5:
                    try:
                        socket_link.send(('1').encode('utf-8'))
                        recvdata = socket_link.recv(1)
                    except Exception:
                        print('client disconnected!')
                        break
                    blink_count = 2
                    eye_left = True
                    continue
                elif loc[0] - locxsum < -5:
                    try:
                        socket_link.send(('2').encode('utf-8'))
                        recvdata = socket_link.recv(1)
                    except Exception:
                        print('client disconnected!')
                        break
                    blink_count = 2
                    eye_right = True
                    continue
            elif (loc[0] - locxsum < 3) and (loc[0] - locxsum > -2):
                try:
                    socket_link.send(('0').encode('utf-8'))
                    recvdata = socket_link.recv(1)
                except Exception:
                    print('client disconnected!')
                    break
                eye_left = False
                eye_right = False
        # cv2.imshow("tracking eye", eye_region)
        ret, data = cv2.imencode('.jpg', eye_region)
        data = data.tostring()
        try:
            socket_link.send(('5').encode('utf-8') + data)
            # key = cv2.waitKey(1) & 0xFF
            # if key == ord('q'):
            # break
            recvdata = socket_link.recv(1)
        except Exception:
            print('client disconnected!')
            break
    socket_link.close()
    # cv2.destroyAllWindows()
    vs.stop()
    time.sleep(1)
    return