コード例 #1
0
    def run(self):
        started = time.time()
        while True:

            currentframe = cv.QueryFrame(self.capture)
            instant = time.time()  #Get timestamp o the frame

            self.processImage(currentframe)  #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant  #Update the trigger_time
                    if instant > started + 10:  #Wait 5 second after the webcam start for luminosity adjusting etc..
                        print "Something is moving !"
                        if self.doRecord:  #set isRecording=True only if we record a video
                            self.isRecording = True
                cv.DrawContours(currentframe, self.currentcontours,
                                (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
            else:
                if instant >= self.trigger_time + 10:  #Record during 10 seconds
                    print "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(currentframe,
                               datetime.now().strftime("%b %d, %H:%M:%S"),
                               (25, 30), self.font, 0)  #Put date on the frame
                    cv.WriteFrame(self.writer, currentframe)  #Write the frame

            if self.show:
                cv.ShowImage("Image", currentframe)

            c = cv.WaitKey(1) % 0x100
            if c == 27 or c == 10:  #Break if user enters 'Esc'.
                break
コード例 #2
0
    def run(self):
        started = time.time()
        while True:

            curframe = cv.QueryFrame(self.capture)
            instant = time.time()  #Get timestamp o the frame

            self.processImage(curframe)  #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant  #Update the trigger_time
                    if instant > started + 5:  #Wait 5 second after the webcam start for luminosity adjusting etc..
                        print("Something is moving !")
                        if self.doRecord:  #set isRecording=True only if we record a video
                            self.isRecording = True
            else:
                if instant >= self.trigger_time + 10:  #Record during 10 seconds
                    print("Stop recording")
                    self.isRecording = False
                else:
                    cv.PutText(curframe,
                               datetime.now().strftime("%b %d, %H:%M:%S"),
                               (25, 30), self.font, 0)  #Put date on the frame
                    cv.WriteFrame(self.writer, curframe)  #Write the frame

            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)

            cv.Copy(self.frame2gray, self.frame1gray)
            c = cv.WaitKey(1)
            if c == 27 or c == 1048603:  #Break if user enters 'Esc'.
                break
コード例 #3
0
ファイル: heartMonitor.py プロジェクト: lfkopp/heart_monitor
    def draw_fft(self, frame, fft_data, min_bpm, max_bpm):
        w = frame.width
        h = int(frame.height * Annotator.FFT_HEIGHT)
        x = 0
        y = frame.height

        max_magnitude = max(d[1][0] for d in fft_data)

        def get_position(i):
            point_x = int(
                w *
                (float(fft_data[i][0] - min_bpm) / float(max_bpm - min_bpm)))
            point_y = int(y - ((h * fft_data[i][1][0]) / max_magnitude))
            return point_x, point_y

        line = [get_position(i) for i in range(len(fft_data))]

        cv.PolyLine(frame, [line], False, self.get_colour()[0], 3)

        # Label the largest bin
        max_bin = max(range(len(fft_data)), key=(lambda i: fft_data[i][1][0]))

        x, y = get_position(max_bin)
        c = self.get_colour()
        text = "%0.1f" % fft_data[max_bin][0]

        cv.PutText(frame, text, (x, y), self.small_font_outline, c[1])
        cv.PutText(frame, text, (x, y), self.small_font, c[0])

        # Pulse ring
        r = Annotator.SMALL_PULSE_SIZE
        phase = int(
            ((fft_data[max_bin][1][1] % (2 * numpy.pi)) / numpy.pi) * 180)
        cv.Ellipse(frame, (int(x - (r * 1.5)), int(y - r)), (int(r), int(r)),
                   0, 90, 90 - phase, c[1], Annotator.THIN + Annotator.BORDER)
        cv.Ellipse(frame, (int(x - (r * 1.5)), int(y - r)), (int(r), int(r)),
                   0, 90, 90 - phase, c[0], Annotator.THIN)
コード例 #4
0
path = os.path.dirname(os.path.abspath(__file__))

recognizer = cv2.face.createLBPHFaceRecognizer()
recognizer.load(path + r'\trainer\trainer.yml')
cascadePath = path + "\Classifiers\face.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)

cam = cv2.VideoCapture(0)
font = cv2.CV_FONT_HERSHEY_SIMPLEX  #Creates a font
while True:
    ret, im = cam.read()
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.2,
                                         minNeighbors=5,
                                         minSize=(100, 100),
                                         flags=cv2.CASCADE_SCALE_IMAGE)
    for (x, y, w, h) in faces:
        nbr_predicted, conf = recognizer.predict(gray[y:y + h, x:x + w])
        cv2.rectangle(im, (x - 50, y - 50), (x + w + 50, y + h + 50),
                      (225, 0, 0), 2)
        if (nbr_predicted == 7):
            nbr_predicted = 'Obama'
        elif (nbr_predicted == 2):
            nbr_predicted = 'Anirban'
        cv2.PutText(im,
                    str(nbr_predicted) + "--" + str(conf), (x, y + h), font,
                    1.1, (0, 255, 0))  #Draw the text
        cv2.imshow('im', im)
        cv2.waitKey(10)
コード例 #5
0
recognizer.read('trainer/trainer.yml')
cascadePath = "Classifiers/face.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
path = 'dataSet'

cam = cv2.VideoCapture(0)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
while True:
    ret, im = cam.read()
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.2,
                                         minNeighbors=5,
                                         minSize=(100, 100),
                                         flags=cv2.CASCADE_SCALE_IMAGE)
    for (x, y, w, h) in faces:
        nbr_predicted, conf = recognizer.predict(gray[y:y + h, x:x + w])
        cv2.rectangle(im, (x - 50, y - 50), (x + w + 50, y + h + 50),
                      (225, 0, 0), 2)
        if (nbr_predicted == 7):
            nbr_predicted = 'Obama'
        elif (nbr_predicted == 2):
            nbr_predicted = 'Anirban'
        cv2.PutText(cv2.fromarray(im),
                    str(nbr_predicted) + "--" + str(conf), (x, y + h), font,
                    255)  #Draw the text
        cv2.imshow('im', im)
        cv2.waitKey(10)
コード例 #6
0
recognizer = cv2.face.createLBPHFaceRecognizer()
recognizer.load('recognizer/trainingData.yml')

cascadePath = "/home/tharushi/opencv-3.1.0/data/haarcascades/haarcascade_frontalcatface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
cam = cv2.VideoCapture(0)
#font = cv2.InitFont(cv2.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
    ret, img = cam.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray, 1.2, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x - 50, y - 50), (x + w + 50, y + h + 50),
                      (255, 0, 0), 2)
        Id, conf = recognizer.predict(gray[y:y + h, x:x + w])
        if (conf < 50):
            if (Id == 1):
                Id = "Tharushi"
            else:
                Id = "Unknown"
        cv2.PutText(cv2.cv.fromarray(img), str(Id), (x, y + h), font, 255)
    cv2.imshow('image', img)
    if (cv2.waitKey(10) & 0xFF == ord('q')):
        break

cam.release()
cv2.destroyAllWindows()
waitKey()
#cv2.putText(frame, text, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, size, color, thickness)
コード例 #7
0
    def run(self):
        #initiate font
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
        # instantiate images
        hsv_img = cv.CreateImage(cv.GetSize(cv.QueryFrame(self.capture)), 8, 3)
        threshold_img1 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        threshold_img1a = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        threshold_img2 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        i = 0
        writer = cv.CreateVideoWriter('angle_tracking.avi', cv.CV_FOURCC('M', 'J', 'P', 'G'), 30, cv.GetSize(hsv_img), 1)

        while True:
            # capture the image from the cam
            img = cv.QueryFrame(self.capture)

            # convert the image to HSV
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            # threshold the image to isolate two colors
            cv.InRangeS(hsv_img, (165, 145, 100), (250, 210, 160), threshold_img1)  # red
            cv.InRangeS(hsv_img, (0, 145, 100), (10, 210, 160), threshold_img1a)  # red again
            cv.Add(threshold_img1, threshold_img1a, threshold_img1)  # this is combining the two limits for red
            cv.InRangeS(hsv_img, (105, 180, 40), (120, 260, 100), threshold_img2)  # blue

            # determine the moments of the two objects
            threshold_img1 = cv.GetMat(threshold_img1)
            threshold_img2 = cv.GetMat(threshold_img2)
            moments1 = cv.Moments(threshold_img1, 0)
            moments2 = cv.Moments(threshold_img2, 0)
            area1 = cv.GetCentralMoment(moments1, 0, 0)
            area2 = cv.GetCentralMoment(moments2, 0, 0)

            # initialize x and y
            x1, y1, x2, y2 = (1, 2, 3, 4)
            coord_list = [x1, y1, x2, y2]
            for x in coord_list:
                x = 0

            # there can be noise in the video so ignore objects with small areas
            if (area1 > 200000):
                # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x1 = int(cv.GetSpatialMoment(moments1, 1, 0) / area1)
                y1 = int(cv.GetSpatialMoment(moments1, 0, 1) / area1)

            # draw circle
            cv.Circle(img, (x1, y1), 2, (0, 255, 0), 20)

            # write x and y position
            cv.PutText(img, str(x1) +', '+str(y1), (x1, y1 + 20), font, 255)  # Draw the text

            if (area2 > 100000):
                # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x2 = int(cv.GetSpatialMoment(moments2, 1, 0) / area2)
                y2 = int(cv.GetSpatialMoment(moments2, 0, 1) / area2)

                # draw circle
                cv.Circle(img, (x2, y2), 2, (0, 255, 0), 20)

            cv.PutText(img, str(x2) +', '+str(y2), (x2, y2 + 20), font, 255)  # Draw the text
            cv.Line(img, (x1, y1), (x2, y2), (0, 255, 0), 4, cv.CV_AA)
            # draw line and angle
            cv.Line(img, (x1, y1), (cv.GetSize(img)[0], y1), (100, 100, 100, 100), 4, cv.CV_AA)
            x1 = float(x1)
            y1 = float(y1)
            x2 = float(x2)
            y2 = float(y2)
            angle = int(math.atan((y1 - y2) / (x2 - x1)) * 180 / math.pi)
            cv.PutText(img, str(angle), (int(x1) + 50, (int(y2) + int(y1)) / 2), font, 255)

            # cv.WriteFrame(writer,img)

            # display frames to users
            cv.ShowImage('Target', img)
            cv.ShowImage('Threshold1', threshold_img1)
            cv.ShowImage('Threshold2', threshold_img2)
            cv.ShowImage('hsv', hsv_img)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            cv.DestroyAllWindows()
コード例 #8
0
while 1:
    ret, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.5, 5)
    #faces = face_cascade.detectMultiScale(gray, 1.3, 5)

    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
        id, conf = rec.predict(gray[y:y + h, x:x + w])
        if (id == 2):
            id = "Gayatri"
        if id == 1:
            id = "alok"
        if id == 3:
            id = "anjali"
        if id == 4:
            id = "Gaurav"
        if id == 5:
            id = 'rahul'
        if id == 6:
            id = "akshay"

        cv2.PutText(img, str(id), (x, y + h), font, fontscale, fontcolor)
    cv2.imshow('img', img)

    if cv2.waitKey(1) == ord('q'):
        break
cap.release()

cv2.destroyAllWindows()
コード例 #9
0
ファイル: heartMonitor.py プロジェクト: lfkopp/heart_monitor
    def draw_bpm(self, frame, bpm):
        x, y, w, h = self.metrics
        c = self.get_colour()

        cv.PutText(frame, "%0.0f" % bpm, (x, y), self.large_font_outline, c[1])
        cv.PutText(frame, "%0.0f" % bpm, (x, y), self.large_font, c[0])