Exemple #1
0
    def show_pic(self):
        # 读取一帧
        success, frame = self.cap.read()
        if success:
            while True:
                # 发现在视频帧所有的脸和face_enqcodings
                face_locations = face_recognition.face_locations(frame)
                face_encodings = face_recognition.face_encodings(frame, face_locations)
                # 在这个视频帧中循环遍历每个人脸
                for (top, right, bottom, left), face_encoding in zip(
                        face_locations, face_encodings):
                    # 画出一个框,框住脸
                    cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
                    # 画出一个带名字的标签,放在框下
                    cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255),
                                  cv2.filled)
                    font = cv2.font_hershey_duplex
                    cv2.puttext(frame, 'face', (left + 6, bottom - 6), font, 1.0,
                                (255, 255, 255), 1)
                # 显示结果图像
                cv2.imshow('video', frame)
                if cv2.waitkey(1) & 0xff == ord('q'):
                    break

        self.cap.release()
        cv2.destroyAllWindows()
Exemple #2
0
    def get_frame(self):
        # Camera.last_access = time.time()
        # self.__init__()

        success, image = self.video.read()  #record from camera
        cv2.flip(image, 180)  #camera rotate 180 degree
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = self.faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE)
        for (x, y, w, h) in faces:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = img[y:y + h, x:x + w]
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex, ey, ew, eh) in eyes:
                cv.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                             (0, 0, 255), 2)

        face_count = len(faces)
        inType = "Found %d faces." % face_count
        print face_count
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.puttext(image, inType, (10, 500), font, 4, (255, 255, 255), 2,
                    cv2.LINE_AA)

        ret, jpeg = cv2.imencode('.jpg', image)

        return jpeg.tostring()
def gen():
    """ video streaming Geneatorr function."""
    cap = cv2.VideoCapture('test.mp4')
    while (cap.isOpened()):
        #capture frame-by-frame
        ret, frame = cap.read()
        if not ret:
            frame = cv2.VideoCapture("test.mp4")
            continue
        if ret:
            image = cv2.resize(frame, (0, 0), None, 1, 1)
            gray = cv2.cvtcolor(image, cv2.COLOR_BGR2GRAY)
            fgmask = sub.apply(gray)
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
            closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
            opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
            dilation = cv2.dilate(opening, kernel)
            retvalbin, bins = cv2.threshold(dilation, 220, 255,
                                            cv2.THRESH_BINARY)
            contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            minarea = 400
            maxarea = 50000
            for i in range(len(contours)):
                if hierarchy[0, i, 3] == -1:
                    area = cv2.contourArea(contours[i])
                    if minarea < area < maxarea:
                        cnt = contours[i]
                        M = cv2.moments(cnt)
                        cx = int(M['m10'] / M['m00'])
                        cy = int(M['m01'] / M['m00'])

                        x, y, w, h = cv2.boundingRect(cnt)

                        cv2.rectangle(image, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)

                        cv2.puttext(image,
                                    str(cx) + "," + str(cy),
                                    (cx + 10, cy + 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 0, 255),
                                    1)
                        cv2.drawMarker(image, (cx, cy), (0, 255, 255),
                                       cv2.MARKER_CROSS,
                                       markerSize=8,
                                       thickness=3,
                                       line_type=cv2.LINE_8)
            frame = cv2.imencode('.jpg', image)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
            time.sleep(0.1)
            key = cv2.waitKey(20)
            if key == 27:
                break
def visulize_result(image_path,result_path,save_path):
    results = json.load(open(result_path))
    im_bbox = {}
    for res in results:
        name = res['name']
        bbox = res['bbox']
        category = res['category']
        if not name in im_bbox.keys():
            im_bbox[name] = [bbox,category]
    for im_name in im_bbox.keys():
        img_path = osp.join(image_path,im_name)
        image = cv2.imread(img_path)
        for ann in im_bbox[im_name]:
            bbox = ann[0]
            cat = ann[1]
            image = cv2.rectangle(image,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),3)
            image = cv2.puttext(image,str(cat),(bbox[0],bbox[1]),cv2.FONT_HERSHEY_SIMPLEX,10,(0,0,255),3)
        img_save = osp.join(save_path,im_name)
        cv2.imwrite(img_save,image)
Exemple #5
0
    #storing the whole directory in a list
    images_list = glob(os.path.join(images_dir, "*.jpg"))

for img_filename in images_list:
    img = cv2.imread(img_filename)
    roi = img[269: 795, 537: 1416]
    #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    imgray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
    imgray = cv2.GaussianBlur(imgray, (7,7), 0)

    ret, thresh = cv2.threshold(imgray, 4, 255, cv2.THRESH_BINARY_INV)

    contours, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)

    noOfContours = str(len(contours))
    if noOfContours == 0:
        cv2.puttext("Warning")
    else:
        print("number of contours = "+noOfContours)

    for cnt in contours:
        (x, y, w, h) = cv2.boundingRect(cnt)
        cv2.rectangle(roi, (x,y), (x + w , y+ h), (255, 0, 0), 2)
         
    #cv2.imshow('image',img)  
    cv2.imshow('ROI',roi)  
    cv2.waitKey(0)           
    cv2.destroyAllWindows()
       
Exemple #6
0
def saveResult(img_file,
               img,
               boxes,
               dirname='./result/',
               verticals=None,
               texts=None):
    """ save text detection result one by one
        Args:
            img_file (str): image file name
            img (array): raw image context
            boxes (array): array of result file
                Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
        Return:
            None
        """
    img = np.array(img)

    # make result file list
    filename, file_ext = os.path.splitext(os.path.basename(img_file))

    # result directory
    res_file = dirname + "res_" + filename + '.txt'
    res_img_file = dirname + "res_" + filename + '.jpg'

    if not os.path.isdir(dirname):
        os.mkdir(dirname)

    with open(res_file, 'w') as f:
        for i, box in enumerate(boxes):
            poly = np.array(box).astype(np.int32).reshape((-1))
            strResult = ','.join([str(p) for p in poly]) + '\r\n'
            f.write(strResult)

            poly = poly.reshape(-1, 2)
            cv2.polylines(img, [poly.reshape((-1, 1, 2))],
                          True,
                          color=(0, 0, 255),
                          thickness=2)
            cv2.puttext()
            ptColor = (0, 255, 255)
            if verticals is not None:
                if verticals[i]:
                    ptColor = (255, 0, 0)

            if texts is not None:
                font = cv2.FONT_HERSHEY_SIMPLEX
                font_scale = 0.5
                cv2.putText(img,
                            "{}".format(texts[i]),
                            (poly[0][0] + 1, poly[0][1] + 1),
                            font,
                            font_scale, (0, 0, 0),
                            thickness=1)
                cv2.putText(img,
                            "{}".format(texts[i]),
                            tuple(poly[0]),
                            font,
                            font_scale, (0, 255, 255),
                            thickness=1)

    # Save result image
    cv2.imwrite(res_img_file, img)
Exemple #7
0
while True:
    img_url = urllib.request.urlopen(URL)
    image = np.array(bytearray(img_url.read()), np.uint8)
    frame = cv2.imdecode(image, -1)

    faces = classifier.detectMultiScale(frame, 1.5, 5)
    if faces is not None:
        for x, y, w, h in faces:

            face_image = frame[y:y + h, x:x + w].copy()
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 5)
            if len(data) <= 100:
                data.append(face_image)
            else:
                cv2.puttext(frame, 'complete', (200, 200),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2)

    cv2.imshow('capture', frame)
    if cv2.waitKey(1) == ord('q'):
        break

cv2.destroyAllWindows()

name = input("enter name: ")
c = 0
for i in data:
    cv2.imwrite("images/" + name + '_' + str(c) + '.jpg', i)
    c = c + 1
for i in range(0, 12):
    plt.imshow(data[i])
    plt.show()
Exemple #8
0
import cv2
import numpy as np

faceDetect = cv2.CascadeClassifier(
    'opencv-3.3.0/data/haarcascades/haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0)
rec = cv2.LBPHFaceRecogniser()
rec.load(
    '/home/pi/Desktop/Raspberry-Face-Recognition-master/trainer/trainer.yml')
id = 0
font = cv2.FONT_HERSHEY_SIMPLEX
while (TRUE):
    ret, img = cam.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = faceDetect.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
        id, conf = rec.predict(gray[y:y + h, x:x + w])
        if (id == 1):
            print('neeraj')
        elif (id == 2):
            cv2.puttext(cv2.cv.fromarray(img), str(id), (x, y + h), font, 255)
    cv2.imshow('Face', img)
    if (cv2.waitKey(1) == ord('q')):
        break
cam.release()
cv2.destroyAllWindows()