Exemple #1
0
def greenElaboration(filepath, showVideo=False):
    '''
	Script adapted from the arrota project from the webcam to the video use
	It uses the a green channel mode to calculate the bpm series
	This is a variant of the classic mode in which the series is given as a circolar list of means and each seconds.
	
	'''

    #variables
    bpms = list(
    )  # a series with the second in question and the bpm calculated for that second
    means = list()
    firstTime = True
    secondsBeforeNewLine = 20  # cosmetic variable to set after how many second-points it should have a new line
    fps = 0  # in a video, the fps is known
    videoLen = 0  # The number of frames in the video

    secondsInTheList = 30  # The seconds window to keep

    cap = cv2.VideoCapture(filepath)

    if (cap.isOpened() == False):
        print("Unable to read the video")

    # Find OpenCV version
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    #Taking the fps of the video and the number of frames of it
    if int(major_ver) < 3:
        fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
        videoLen = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    else:
        fps = cap.get(cv2.CAP_PROP_FPS)
        videoLen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # finding the face cascade
    face_cascade = cv2.CascadeClassifier(
        os.path.join(sourcePath, 'haarcascade_frontalface_default.xml'))

    print("processing the video with the circular green mode...")
    print("the fps of the video is: " + str(fps) + " , " +
          "and the number of frames is: " + str(videoLen))
    print("please wait while it is calculating:")
    for currentFrame in range(0, videoLen):
        ret, frame = cap.read()
        #print(frame)
        try:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #Face detection
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            # reads a new frame only if there is 1 face
            if len(faces) == 1:
                for (x, y, w, h) in faces:
                    reducedWidth = int(w * 0.63)
                    modifiedX = int(w * 0.15)
                    reduceHeigh = int(h * 0.25)
                    inizioY = int(h * 0.1)
                    fronteX = int(w - reducedWidth)

                    # done for each frame because it is
                    foreheadX = x + fronteX
                    foreheadY = y + inizioY
                    foreheadX2 = x + reducedWidth
                    foreheadY2 = y + reduceHeigh

                    #forehead detection
                    fronte = frame[foreheadY:foreheadY2, foreheadX:foreheadX2]
                    face = frame[y:y + h, x + modifiedX:x + w - modifiedX]

                    means.append(np.mean(fronte[:, :, 1]))

            else:  #TO CHANGE?: se non c'è una faccia come media mette la media precedente
                if len(means) > 0:
                    means.append(means[len(means) - 1])

            #calculates not for each frame but for each second more or less
            if (currentFrame > 0 and currentFrame % fps == 0):
                bpm = bpm_elaboration(means, fps)
                bpms.append([currentFrame / fps, bpm])
                print('.', end='', flush=True)  # 1 dot displayed for second
                if (
                        int(currentFrame / fps) % secondsBeforeNewLine == 0
                ):  # 1 new line displayed for each secondsBeforeNewLine seconds
                    print()

                #deleting the first second when it has secondsInTheList secs
                if (len(means) >= secondsInTheList * fps):
                    del means[0:int(fps)]

            # show the skin in video
            if (showVideo):
                cv2.rectangle(
                    frame, (foreheadX, foreheadY), (foreheadX2, foreheadY2),
                    (0, 255, 0),
                    2)  # the rectangle that will show on the forehead

            if (showVideo):  #show the video
                cv2.imshow('Video', frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
        except Exception as e:
            print("[exception GreenElaboration] At frame: " +
                  str(currentFrame) + " of " + str(videoLen) +
                  " there is no frame. Terminating...")
            #print("[exception GreenElaboration] cap.opened: "+str(cap.isOpened()))
            break  #just for now

    #bpm = bpm_elaboration(means, fps)
    print()  # putting the next print after a new line having a clean text

    # clear up the capture
    cap.release()
    return bpms
Exemple #2
0
def dynGreenElaboration(filepath, showVideo=False):
    '''
	Script adapted from the arrota project from the webcam to the video use
	It uses the ica mode to calculate the bpm series
	'''

    #variables
    bpms = list(
    )  # a series with the second in question and the bpm calculated for that second
    means = list()
    firstTime = True
    secondOfFrame = 10  # the window of seconds on which clear the means saved. 10 seconds for default
    # 1 means that every second after the calculation it will clear the means lists
    secondsBeforeNewLine = 20  # cosmetic variable to set after how many second-points it should have a new line
    fps = 0  # in a video, the fps is known
    videoLen = 0  # the number of frames in the video

    cap = cv2.VideoCapture(filepath)

    if (cap.isOpened() == False):
        print("Unable to read the video")

    # Find OpenCV version
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    #Taking the fps of the video and the number of frames of it
    if int(major_ver) < 3:
        fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
        videoLen = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    else:
        fps = cap.get(cv2.CAP_PROP_FPS)
        videoLen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    face_cascade = cv2.CascadeClassifier(
        os.path.join(sourcePath, 'haarcascade_frontalface_default.xml'))
    print(
        "[info dynGreenElaboration] Processing the video with the dynamic green mode..."
    )

    #print("[info dynGreenElaboration]the fps of the video is: "+str(fps)+" , "+ "and the number of frames is: "+str(videoLen) )
    #print("[info dynGreenElaboration]please wait while it is calculating:")
    for currentFrame in range(0, videoLen):
        ret, frame = cap.read()
        #print(frame)
        try:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #Face detection
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)

            if len(faces) == 1:
                for (x, y, w, h) in faces:
                    reducedWidth = int(w * 0.63)
                    modifiedX = int(w * 0.15)
                    reduceHeigh = int(h * 0.25)
                    inizioY = int(h * 0.1)
                    fronteX = int(w - reducedWidth)
                    fronteY = int(h - reduceHeigh)

                    face = frame[y:y + h, x + modifiedX:x + w - modifiedX]
                    if firstTime:
                        foreheadX = x + fronteX
                        foreheadY = y + inizioY
                        foreheadX2 = x + reducedWidth
                        foreheadY2 = y + reduceHeigh
                        firstTime = False
                    fronte = frame[foreheadY:foreheadY2, foreheadX:foreheadX2]
                    #forehead detection
                    green = fronte[:, :, 1]
                    fronte = np.zeros((green.shape[0], green.shape[1], 3),
                                      dtype=green.dtype)
                    fronte[:, :, 1] = green
                    means.append(np.mean(fronte[:, :, 1]))

            else:  # se non c'è una faccia come media mette la media precedente
                if len(means) > 0:
                    means.append(means[len(means) - 1])
            #calculates not for each frame but for each second more or less
            if (currentFrame > 0 and currentFrame % fps == 0):
                if (int(currentFrame / fps) % secondOfFrame == 0):
                    bpm = bpm_elaboration(means, fps)
                    bpms.append([currentFrame / fps, bpm])
                    means = []

                print('.', end='', flush=True)  # 1 point displayed for second
                if (int(currentFrame / fps) % secondsBeforeNewLine == 0):
                    print()
            # show the skin in video
            if (showVideo):
                cv2.rectangle(
                    frame, (foreheadX, foreheadY), (foreheadX2, foreheadY2),
                    (0, 255, 0),
                    2)  # the rectangle that will show on the forehead

            if (showVideo):  #show the video
                cv2.imshow('Video', frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
        except Exception as e:
            print("[exception dynGreenElaboration] At frame: " +
                  str(currentFrame) + " of " + str(videoLen) +
                  " there is no frame. Terminating...")
            #print("[exception dynGreenElaboration] cap.opened: "+str(cap.isOpened()))
            break  #just for now

    #bpm = bpm_elaboration(means, fps)
    print()  # putting the next print after a new line having a clean text

    # clear up the capture
    cap.release()
    return bpms
Exemple #3
0
    def greenElaboration(self):
        '''
		Script adapted from the arrota project. It uses the ica mode to calculate the bpm series.
		In this case, this all happens each frame instead of the usual loop while inside this method
		'''

        self.frame = self.vs.read()
        if time.clock(
        ) > 3:  # skip the first seconds to setup the focus of the camera

            #Face detection
            gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
            faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)

            if len(faces) == 1:  #The detection is of just 1 face
                if self.debug == True:
                    cv2.putText(self.frame, "Please, don't move",
                                (self.leftOffset, self.lowHeightOffset),
                                cv2.FONT_HERSHEY_SIMPLEX, self.tsize - 2,
                                self.redCol, 1, cv2.LINE_AA)  # just a warning
                for (x, y, w, h) in faces:
                    #Skin detection of the foreground
                    reducedWidth = int(w * 0.63)
                    modifiedX = int(w * 0.15)
                    reduceHeigh = int(h * 0.25)
                    inizioY = int(h * 0.1)
                    frontX = int(w - reducedWidth)

                    # Picking the coordinates of the forehead
                    self.foreheadY = y + inizioY
                    self.foreheadX = x + frontX
                    self.foreheadX2 = x + reducedWidth
                    self.foreheadY2 = y + reduceHeigh

                    # initial time and start the savings
                    if self.firstTime:
                        self.firstTime = False
                        self.t0 = time.clock()
                        self.vs.startSaving()
                    # Note: put it down when it determines the foreground to pass
                    #cv2.rectangle(frame, (foreheadX, foreheadY), (foreheadX2, foreheadY2), greenCol, 2) # the rectangle that will shown on the forehead
            else:

                if len(faces) == 0:
                    #print("[info] Green WebcamThreadCirc: no face detected")
                    if self.debug == True:
                        cv2.putText(self.frame, "No face detected...",
                                    (self.leftOffset, self.lowHeightOffset),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, self.redCol)
                else:
                    #print("[info] Green WebcamThreadCirc: too many facec detected")
                    if self.debug == True:
                        cv2.putText(self.frame, "Too many faces...",
                                    (self.leftOffset, self.lowHeightOffset),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, self.redCol)

            if len(self.times) > 9 and len(
                    self.means
            ) > 0:  # wait some frames before to start the calculations of the bpm
                #	The CALCULATION
                # calculates for each second more or less, because the user won't understand the difference between time+0.5 or time +1 seconds
                if time.clock() - self.timeBetweenCalculations >= 1:
                    self.timeBetweenCalculations = time.clock()

                    # determines the means for the new frames
                    if (not self.vs.isNotSaving):
                        # TO DO: circular means/greens
                        if (len(self.bpms) >= self.secondsInTheList):
                            del self.bpms[0]
                            del self.means[0:int(self.fps)]
                            del self.times[0:int(self.fps)]

                        self.means, self.times = self.determineMeans(
                            self.vs, self.means, self.times, self.foreheadX,
                            self.foreheadY, self.foreheadX2, self.foreheadY2)
                        if self.debug == True:
                            cv2.rectangle(
                                self.frame, (self.foreheadX, self.foreheadY),
                                (self.foreheadX2, self.foreheadY2),
                                self.greenCol, 2
                            )  # the rectangle that will be shown on the forehead
                    #print("After determine red : "+str(len(red_means))+ "and times: "+str(len(times)))

                    if not len(self.times) == len(
                            self.means
                    ):  # when the two lenghts are different, for some reasons
                        print(
                            "[info check len(times) != len(red_means)] different len: "
                            + str(len(self.times)) + " , " +
                            str(len(self.means)))
                        self.means = adjustList(self.means, self.times)
                    self.fps = fps_elaboration(self.times)

                    self.bpm = bpm_elaboration(self.means, self.fps,
                                               self.times)
                    self.bpms.append([time.clock() - self.t0, int(self.bpm)])
                    # this works for when there is a time window to investigate. 30 is for 30 seconds

                    if (self.fps * 30 > 250):
                        max_samples = int(self.fps) * 30
                else:
                    #TO DO: 35 is static. we should need a proportion time
                    self.fremaining = int(
                        35 + self.t0 - time.clock()
                    )  #probably to change with a dynamic remaining
                    #print("time passed after the last check: "+ str(timeBetweenCalculations))

                if (
                        self.bpm > 0
                ):  # In this way we won't show non positive bpm (the start or wrong calculations)
                    if self.debug == True:
                        cv2.putText(self.frame, "bpm: " + str(int(self.bpm)),
                                    (self.foreheadX - 25, self.foreheadY - 45),
                                    cv2.FONT_HERSHEY_SIMPLEX, self.tsize - 2,
                                    self.redCol)  # display the bpm
            else:
                if self.debug == True:
                    cv2.putText(self.frame, "Starting...",
                                (self.leftOffset, self.highHeightOffset),
                                cv2.FONT_HERSHEY_SIMPLEX, 1,
                                self.redCol)  # second message
                    cv2.putText(self.frame, "Please, don't move",
                                (self.leftOffset, self.lowHeightOffset + 40),
                                cv2.FONT_HERSHEY_SIMPLEX, self.tsize - 2,
                                self.redCol, 1, cv2.LINE_AA)  # just a warning

                if time.clock(
                ) - self.timeBetweenCalculations >= 1:  # i'm trying to calculate not for each frame but for each second more or less, to increase the fps
                    self.timeBetweenCalculations = time.clock()
                    # determines the means for the new frames
                    if (not self.vs.isNotSaving):
                        self.means, self.times = self.determineMeans(
                            self.vs, self.means, self.times, self.foreheadX,
                            self.foreheadY, self.foreheadX2, self.foreheadY2)
                        if self.debug == True:
                            cv2.rectangle(
                                self.frame, (self.foreheadX, self.foreheadY),
                                (self.foreheadX2, self.foreheadY2),
                                self.greenCol, 2
                            )  # the rectangle that will be shown on the forehead
                    if self.debug == True:
                        cv2.imshow('Webcam', self.frame)

        else:
            if self.debug == True:
                cv2.putText(self.frame, "Loading...",
                            (self.leftOffset, self.highHeightOffset),
                            cv2.FONT_HERSHEY_SIMPLEX, self.tsize - 2,
                            self.redCol)  # first message
                cv2.putText(self.frame, "Please, take off your glasses",
                            (self.leftOffset, self.lowHeightOffset),
                            cv2.FONT_HERSHEY_SIMPLEX, self.tsize - 2,
                            self.redCol, 1, cv2.LINE_AA)  # just a warning
                #cv2.imshow('Webcam', frame)
        '''	
		if time.clock() >= 180:	# this is ok only if the time of invastigation is under the 180 seconds
			print("[Debug time clock>180] over the time break")
			self.vs.stop()
			cv2.destroyAllWindows()
			return -3
		'''

        if self.debug == True:
            cv2.imshow('Webcam', self.frame)

        if cv2.waitKey(1) & 0xFF == ord(
                'q'):  # Press Q on keyboard to stop reproducing the camera
            print("return in green webcamThread")
            self.stop()
            return

        #last calculation after the loop
        #print("last before rppg. red : "+str(len(red_means))+ " and times: "+str(len(times)))
        '''		
Exemple #4
0
def greenElaboration(camSource=0):
    '''
	Script adapted from the arrota project. It uses the ica mode to calculate the bpm series.
	The input is the camera source number (0 is the default one)
	'''

    # calling the thread responsible for the webcam readings
    vs = web.WebcamStreamForHRV(
        src=camSource).start()  #to change when this becomes a function
    face_cascade = cv2.CascadeClassifier(
        os.path.join(sourcePath, 'haarcascade_frontalface_default.xml'))

    #variables
    frame_width = vs.getWidth()
    frame_height = vs.getHeight()

    redCol = (0, 0, 255)  # red color in BGR
    greenCol = (0, 255, 0)  # green color in BGR
    tsize = 3  #Dimension of the time elements

    max_samples = 300  # number of frames to collect before end
    remaining = -1  # const to say when it is the end
    t0 = 0
    bpm = 0

    times = list()
    bpms = list(
    )  # a series with the second in question and the bpm calculated for that second
    means = list()

    firstTime = True
    secondsBeforeNewLine = 20  # cosmetic variable to set after how many second-points it should have a new line
    red_means = list()
    means = list()
    blue_means = list()

    # variables to not calculate everytime
    leftOffset = int(frame_width /
                     21)  # around 30 in offeset with 640 as frame width
    highHeightOffset = int(frame_height * 3 /
                           24)  # 60 in offeset with 480 as frame height
    lowHeightOffset = int(frame_height * 21 /
                          24)  # 420 in offeset with 480 as frame height
    heightOffsetSeconds = int(frame_height * 5 /
                              24)  # 100 in offeset with 480 as frame height
    timeBetweenCalculations = 0  #a variable to wait before recalculate the bpm

    print("starting the green webcam mode...")
    print("please wait while it is calculating:")

    # first loop: it takes the frames, saves the means and works on them
    while (True):
        frame = vs.read()
        if time.clock(
        ) > 3:  # skip the first seconds to setup the focus of the camera
            if 0 <= max_samples - len(means) and remaining != -1:
                cv2.putText(frame, str(remaining),
                            (leftOffset, heightOffsetSeconds),
                            cv2.FONT_HERSHEY_SIMPLEX, tsize,
                            redCol)  # the countdown displayed

            #Face detection
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)

            if len(faces) == 1:  #The detection is of just 1 face
                cv2.putText(frame, "Please, don't move",
                            (leftOffset, lowHeightOffset),
                            cv2.FONT_HERSHEY_SIMPLEX, tsize - 2, redCol, 1,
                            cv2.LINE_AA)  # just a warning
                for (x, y, w, h) in faces:
                    #Skin detection of the foreground
                    reducedWidth = int(w * 0.63)
                    modifiedX = int(w * 0.15)
                    reduceHeigh = int(h * 0.25)
                    inizioY = int(h * 0.1)
                    frontX = int(w - reducedWidth)

                    # Picking the coordinates of the forehead
                    foreheadY = y + inizioY
                    foreheadX = x + frontX
                    foreheadX2 = x + reducedWidth
                    foreheadY2 = y + reduceHeigh

                    # initial time and start the savings
                    if firstTime:
                        firstTime = False
                        t0 = time.clock()
                        vs.startSaving()
                    # Note: put it down when it determines the foreground to pass
                    #cv2.rectangle(frame, (foreheadX, foreheadY), (foreheadX2, foreheadY2), greenCol, 2) # the rectangle that will shown on the forehead
            else:
                if len(faces) == 0:
                    cv2.putText(frame, "No face detected...",
                                (leftOffset, lowHeightOffset),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, redCol)
                else:
                    cv2.putText(frame, "Too many faces...",
                                (leftOffset, lowHeightOffset),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, redCol)

            if len(times) > 9 and len(
                    means
            ) > 0:  # wait some frames before to start the calculations of the bpm
                #	The CALCULATION
                # calculates for each second more or less, because the user won't understand the difference between time+0.5 or time +1 seconds
                if time.clock() - timeBetweenCalculations >= 1:
                    timeBetweenCalculations = time.clock()

                    # determines the means for the new frames
                    if (not vs.isNotSaving):
                        means, times = determineMeans(vs, means, times,
                                                      foreheadX, foreheadY,
                                                      foreheadX2, foreheadY2)
                        cv2.rectangle(
                            frame, (foreheadX, foreheadY),
                            (foreheadX2, foreheadY2), greenCol, 2
                        )  # the rectangle that will be shown on the forehead
                    #print("After determine red : "+str(len(red_means))+ "and times: "+str(len(times)))

                    if not len(times) == len(
                            means
                    ):  # when the two lenghts are different, for some reasons
                        print(
                            "[info check len(times) != len(red_means)] different len: "
                            + str(len(times)) + " , " + str(len(means)))
                        means = adjustList(means, times)
                    fps = fps_elaboration(times)

                    bpm = bpm_elaboration(means, fps, times)
                    bpms.append([time.clock() - t0, int(bpm)])
                    # this works for when there is a time window to investigate. 30 is for 30 seconds
                    if (fps * 30 > 250):
                        max_samples = int(fps) * 30
                else:
                    #TO DO: 35 is static. we should need a proportion time
                    remaining = int(
                        35 + t0 - time.clock()
                    )  #probably to change with a dynamic remaining
                    #print("time passed after the last check: "+ str(timeBetweenCalculations))

                if (
                        bpm > 0
                ):  # In this way we won't show non positive bpm (the start or wrong calculations)
                    cv2.putText(frame, "bpm: " + str(int(bpm)),
                                (foreheadX - 25, foreheadY - 45),
                                cv2.FONT_HERSHEY_SIMPLEX, tsize - 2,
                                redCol)  # display the bpm
            else:
                cv2.putText(frame, "Starting...",
                            (leftOffset, highHeightOffset),
                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                            redCol)  # second message
                cv2.putText(frame, "Please, don't move",
                            (leftOffset, lowHeightOffset + 40),
                            cv2.FONT_HERSHEY_SIMPLEX, tsize - 2, redCol, 1,
                            cv2.LINE_AA)  # just a warning
                if time.clock(
                ) - timeBetweenCalculations >= 1:  # i'm trying to calculate not for each frame but for each second more or less, to increase the fps
                    timeBetweenCalculations = time.clock()
                    # determines the means for the new frames
                    if (not vs.isNotSaving):
                        means, times = determineMeans(vs, means, times,
                                                      foreheadX, foreheadY,
                                                      foreheadX2, foreheadY2)
                        cv2.rectangle(
                            frame, (foreheadX, foreheadY),
                            (foreheadX2, foreheadY2), greenCol, 2
                        )  # the rectangle that will be shown on the forehead
                    cv2.imshow('Webcam', frame)

        else:
            cv2.putText(frame, "Loading...", (leftOffset, highHeightOffset),
                        cv2.FONT_HERSHEY_SIMPLEX, tsize - 2,
                        redCol)  # first message
            cv2.putText(frame, "Please, take off your glasses",
                        (leftOffset, lowHeightOffset),
                        cv2.FONT_HERSHEY_SIMPLEX, tsize - 2, redCol, 1,
                        cv2.LINE_AA)  # just a warning
            #cv2.imshow('Webcam', frame)

        if time.clock(
        ) >= 180:  # this is ok only if the time of invastigation is under the 180 seconds
            print("[Debug time clock>180] over the time break")
            vs.stop()
            cv2.destroyAllWindows()
            return -3

        if len(times) >= max_samples:
            #print("times> max samples. red : "+str(len(red_means))+ "and times: "+str(len(times)))
            break

        cv2.imshow('Webcam', frame)

        if cv2.waitKey(1) & 0xFF == ord(
                'q'):  # Press Q on keyboard to stop reproducing the camera
            break

    #last calculation after the loop
    #print("last before rppg. red : "+str(len(red_means))+ " and times: "+str(len(times)))
    try:
        bpm = bpm_elaboration(means, fps, times)
        bpms.append([time.clock() - t0, int(bpm)])
    except ValueError as e:  #the butterworth doesn't work
        print(" last bpm.ValueError: {0}".format(e))
        vs.stop()
        cv2.destroyAllWindows()
        return -1
    except Exception as e:
        print("last calculation. Unknown error: " + (str(e)))
        vs.stop()
        cv2.destroyAllWindows()
        return -2

    #print("[info] the calculated bpm is : " + str(bpm))
    vs.stopSaving()

    # second loop:  show the result but it doesn't compute anymore
    while (True):
        frame = vs.read()
        cv2.putText(frame, "Press 'q' to quit", (leftOffset, highHeightOffset),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, redCol)
        cv2.putText(frame, "Final bpm: " + str(int(bpm)),
                    (leftOffset, heightOffsetSeconds),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, redCol)
        cv2.imshow('Webcam', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # do a bit of cleanup
    vs.stop()
    cv2.destroyAllWindows()
    return bpms