def getUserPicture(outputWidth):

    camera = cv2.VideoCapture(0)

    if not camera.isOpened():
        logging.error("Arrgghhh! The camera is not working!")
        return None

    outputSize = calculateScaledSize(outputWidth, capture=camera)
    logging.debug("Reading camera...")
    readOk, image = camera.read()

    picWin = "Sonria..."
    cv2.namedWindow(picWin)

    key = -1

    while key != ENTER_KEY and readOk:
        image = cv2.resize(image, outputSize)
        drawLabel("Presione [Enter]...", image, (int(outputWidth/3), 50))
        cv2.imshow(picWin, image)
        key = cv2.waitKey(5) % 256
        readOk, image = camera.read()

    cv2.destroyWindow(picWin)
    cv2.waitKey(1)

    logging.debug('Picture taken.')

    return image
def recognizeVideo(faceRecognizer, videoFileName, subjects, haarFolder):
  faceCascade = loadCascadeClassifier(haarFolder + "/haarcascade_frontalface_alt2.xml")
  leftEyeCascade = loadCascadeClassifier(haarFolder + "/haarcascade_lefteye_2splits.xml")
  rightEyeCascade = loadCascadeClassifier(haarFolder + "/haarcascade_righteye_2splits.xml")

  if not videoFileName:
    videoFileName = 0

  capture = cv2.VideoCapture(videoFileName)
  readOk, image = capture.read()

  if readOk:
    height, width, channels = image.shape
  else:
    logging.warning("Could not read capture!!")
    return

  minFaceSize = (int(width * 0.1), int(width * 0.1))
  rectColor = (255, 0, 0)
  rectThickness = 2
  fontColor = (255, 255, 255)
  fontScale = 0.8
  fontThickness = 1

  title = 'Face Recognizer App'
  cv2.namedWindow(title)

  while cv2.waitKey(10) == -1 and readOk:

    faces = detectFaces(image, faceCascade, leftEyeCascade, rightEyeCascade, minFaceSize)

    if len(faces) == 0 :
      for i in xrange(0, 3):
        cv2.imshow(title, image)
        _, image = capture.read()

    else:
      for (x, y, w, h, _, _) in faces:
        face = cv2.cvtColor(image[y:y+h, x:x+w], cv2.COLOR_BGR2GRAY)
        faceGray = cv2.resize(face, (92, 112))
        (prediction, distance) = faceRecognizer.predict(faceGray)

        if distance > 140:
          predictionLegend = "Unknow subject"
        else:
          predictionLegend = "Predicted {0} - Distance {1}".format(subjects[prediction], distance)
        cv2.rectangle(image, (x,y), (x+w,y+h), rectColor, rectThickness)
        drawLabel(predictionLegend, image, (x-20, y-10))
    
    cv2.imshow(title, image)
    readOk, image = capture.read()

  cv2.destroyWindow(title)
Ejemplo n.º 3
0
def main():
  args = configureArguments()
  configureLogging(args.log)

  windowTitle = "Test draw app"
  cv2.namedWindow(windowTitle)

  haarFolder = "/home/juan/ciberpunks/opencv-2.4.11/data/haarcascades"
  faceCascade = loadCascadeClassifier(haarFolder + "/haarcascade_frontalface_alt2.xml")
  leftEyeCascade = loadCascadeClassifier(haarFolder + "/haarcascade_lefteye_2splits.xml")
  rightEyeCascade = loadCascadeClassifier(haarFolder + "/haarcascade_righteye_2splits.xml")
  mouthCascade = loadCascadeClassifier(haarFolder + '/haarcascade_mcs_mouth.xml')

  color = (120,120,130)
  thickness = 2

  width = 600

  image = cv2.imread('/home/juan/ciberpunks/faces/news/[email protected]')
  image = cv2.resize(image, calculateScaledSize(width, image=image))

  if image is None:
    print 'ERROR: no se pudo leer la imagen.'
    return

  minFaceSize = (10, 10)
  minEyeSize = (5, 5)

  faces = detectFaces(image, faceCascade, leftEyeCascade, rightEyeCascade, minFaceSize, minEyeSize)

  for (x, y, w, h, leftEyes, rightEyes) in faces:
    center = calculateCenter((x,y,w,h))

    cv2.line(image, (x,0), (x, width), color, 2)
    cv2.line(image, (x+w,0), (x+w, width), color, 2)
    cv2.line(image, (0,y), (width, y), color, 2)
    cv2.line(image, (0,y+h), (width, y+h), color, 2)

    drawLabel("Juan Gabriel", image, (x, y+20))

    cv2.imshow(windowTitle, image)
    cv2.waitKey(6000)
    
  cv2.destroyWindow(windowTitle)
Ejemplo n.º 4
0
def draw_boxes(img, boxes, names):
    """ Draws bounding boxes of objects detected on given image """
    h, w = img.shape[:2]
    for box, tid in zip(boxes, names):
        # draw rectangle
        x, y, w, h = box
        x, y, w, h = int(x), int(y), int(w), int(h)
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

        text = "%d" % (tid)
        img = common.drawLabel(img, text, (x, y))
    return img
Ejemplo n.º 5
0
def demo_video(video_file):
    detector = ObjectDetectorYolo(model='tiny-yolo-voc')
    mtracker = KalmanTracker(['person'], tracker='deep_sort')

    cap = common.VideoStream(video_file, queueSize=4).start()
    cv2.waitKey(500)
    Outcount, Incount = 0, 0
    total_t, counter = 0, 0

    while not cap.stopped:
        t = common.clock()
        imgcv = cap.read()

        if imgcv is not None:
            counter += 1
            detections = detector.run(imgcv)
            mtracker.update(imgcv, detections)
            cvboxes, ids = [], []

            for tid, tracker in mtracker.trackers.iteritems():
                if tracker.consecutive_invisible_count < 5:
                    state_current = get_pos(tracker.bbox)

                    try:
                        if state_current != tracker.regionside:
                            tracker.statechange += 1
                            print state_current, tracker.regionside, tracker.statechange
                            if state_current == 'Positive':
                                if tracker.statechange % 2:
                                    Incount += 1
                                else:
                                    Outcount -= 1
                            else:
                                if tracker.statechange % 2:
                                    Outcount += 1
                                else:
                                    Incount -= 1
                            tracker.regionside = state_current

                    except AttributeError:
                        tracker.regionside = state_current
                        tracker.statechange = 0

                    cvboxes.append(tracker.bbox)
                    ids.append(tid)
            print Incount, Outcount

            cv2.line(imgcv, (LINE['x1'], LINE['y1']), (LINE['x2'], LINE['y2']),
                     (0, 0, 255), 4)
            common.drawLabel(imgcv,
                             "IN:%d  OUT:%d" % (Incount, Outcount), (10, 10),
                             size=1,
                             color=(0, 0, 255))
            common.showImage(draw_boxes(imgcv, cvboxes, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print counter / total_t
Ejemplo n.º 6
0
def demo_video(video_file):
    facedemo = Face(detector_method=DETECTOR, recognition_method=None)
    mtracker = MultiTracker(SingleTrackerType=CorrelationTracker)
    # mtracker = MultiTracker(SingleTrackerType=CorrelationTracker,
    #                         removalConfig=removalConfig)
    # mtracker = MultiTracker(SingleTrackerType = cv2.TrackerKCF_create)

    cap = common.VideoStream(video_file, queueSize=4).start()
    cv2.waitKey(500)
    Outcount, Incount = 0, 0

    while not cap.stopped:
        t = common.clock()
        total_t, counter = 0, 0

        imgcv = cap.read()
        if imgcv is not None:
            counter += 1
            detections = facedemo.detect(imgcv, upsamples=0)
            mtracker.update(imgcv, common.toCvbox(detections))
            cvboxes, ids = [], []

            for tid, tracker in mtracker.trackers.items():
                if tracker.visible_count > 3 and tracker.consecutive_invisible_count < 10:
                    state_current = get_pos(tracker.bbox)
                    try:
                        if state_current != tracker.regionside:
                            tracker.statechange += 1
                            print state_current, tracker.regionside, tracker.statechange
                            if state_current == 'Positive':
                                if tracker.statechange % 2:
                                    Incount += 1
                                else:
                                    Outcount -= 1
                            else:
                                if tracker.statechange % 2:
                                    Outcount += 1
                                else:
                                    Incount -= 1
                            tracker.regionside = state_current
                    except AttributeError:
                        tracker.regionside = state_current
                        tracker.statechange = 0

                    cvboxes.append(tracker.bbox)
                    ids.append(tid)

            detections = to_bbox(cvboxes)
            print Incount, Outcount
            cv2.line(imgcv, (LINE['x1'], LINE['y1']), (LINE['x2'], LINE['y2']),
                     (0, 0, 255), 4)
            imgcv = common.drawLabel(imgcv,
                                     "IN:%d  OUT:%d" % (Incount, Outcount),
                                     (10, 10),
                                     color=(0, 0, 255))
            common.showImage(common.drawObjects(imgcv, detections, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print counter / total_t