Beispiel #1
0
class MUcamera:
    def __init__(self):
        self.w = Webcam()

        self.start = self.w.start()
        self.im = self.w.grab_image()
        self.w.register_callback(self.average_intensity, 1)
        #        self.grabimage = self.w.grab_image()
        self.avg_intensity = []
        self.images = []
        self.filt_list = []
        self.f, self.ax = plt.subplots(1, 1)
        self.day = []

    def average_intensity(self):

        pix_val = list(self.im.getdata())
        pixel_intensity = []
        for x in pix_val:
            avg = sum(x) / len(x)
            pixel_intensity.append(avg)
        self.avg_pixel = np.average(pixel_intensity)
        self.avg_intensity.append(self.avg_pixel)
        return self.avg_intensity
#        avg  = np.mean(np.mean(image,axis=1))
#        self.avg_list.append(avg)

    def average_intensity_filtered(self):
        width = 3
        #        i=0

        if len(self.avg_intensity) >= 5:
            for x in range(len(self.avg_intensity) - 2):
                self.filt_list.append(
                    (self.avg_intensity[x] + self.avg_intensity[x + 1] +
                     self.avg_intensity[x + 2]) / width)
            return self.filt_list
        else:
            return self.filt_list
#        while i+width <= len(self.filt_list):
#            y = self.filt_list[i:i+width]
#            total_sum=sum(y)/width
#            self.filt_list.append(total_sum)
#            i+=1

#    def stop(self):
#        self.w.stop()
#        self.average_intensity_mean_plot()
#        self.average_intensity_filtered_plot()
#
#    def average_intensity_mean_plot(self):
#        self.ax.plot(self.avg_intensity, 'C1')
#        self.ax.set_xlabel('Image Number')
#        self.ax.set_ylabel('Intensity')
#        self.ax.set_title('Image Intensity')
##
#    def average_intensity_filtered_plot(self):
#        self.average_intensity_filtered()
#        self.ax.plot(self.filt_list, 'C2')
#

    def daytime(self):
        self.average = np.mean(np.mean(self.im, axis=1))
        if self.average >= 95:
            #            self.i.append(self.i)
            return print("True")
        else:
            return print("False")
##

    def most_common_color(self):
        w, h = self.im.size
        pixels = self.im.getcolors(w * h)
        print(len(pixels))
        most_frequent_pixel = pixels[0]
        for count, color in pixels:
            if count > most_frequent_pixel[0]:
                most_frequent_pixel = (count, color)

    #        compare("Most Common", image, most_frequent_pixel[1])
    #    print(self.most_frequent_pixel)
        return print(most_frequent_pixel[0] / len(pixels), most_frequent_pixel)

    def stop(self):
        self.w.stop()
        self.daytime()
        self.most_common_color()
        self.average_intensity_mean_plot()
        self.average_intensity_filtered_plot()

    def average_intensity_mean_plot(self):
        self.average_intensity()
        self.ax.plot(self.avg_intensity, 'C1')
        self.ax.set_xlabel('Image Number')
        self.ax.set_ylabel('Intensity')
        self.ax.set_title('Image Intensity')
#

    def average_intensity_filtered_plot(self):
        self.average_intensity_filtered()
        self.ax.plot(self.filt_list, 'C2')
class GUI(QMainWindow, QThread):
    def __init__(self):
        if os.path.exists("data.json"):
            os.remove("data.json")

        super(GUI, self).__init__()
        self.initUI()
        self.webcam = Webcam()
        self.video = Video()
        self.input = self.webcam
        self.dirname = ""
        print("Input: webcam")
        self.statusBar.showMessage("Input: webcam", 5000)
        self.btnOpen.setEnabled(False)
        self.process = Process()
        self.status = False
        self.frame = np.zeros((10, 10, 3), np.uint8)
        #self.plot = np.zeros((10,10,3),np.uint8)
        self.bpm = 0

    def initUI(self):
        #set font
        font = QFont()
        font.setPointSize(16)

        #widgets
        self.btnStart = QPushButton("Start", self)
        self.btnStart.move(440, 520)
        self.btnStart.setFixedWidth(200)
        self.btnStart.setFixedHeight(50)
        self.btnStart.setFont(font)
        self.btnStart.clicked.connect(self.run)

        self.btnOpen = QPushButton("Open", self)
        self.btnOpen.move(230, 520)
        self.btnOpen.setFixedWidth(200)
        self.btnOpen.setFixedHeight(50)
        self.btnOpen.setFont(font)
        self.btnOpen.clicked.connect(self.openFileDialog)

        self.cbbInput = QComboBox(self)
        self.cbbInput.addItem("Webcam")
        self.cbbInput.addItem("Video")
        self.cbbInput.setCurrentIndex(0)
        self.cbbInput.setFixedWidth(200)
        self.cbbInput.setFixedHeight(50)
        self.cbbInput.move(20, 520)
        self.cbbInput.setFont(font)
        self.cbbInput.activated.connect(self.selectInput)
        #-------------------

        self.lblDisplay = QLabel(self)  #label to show frame from camera
        self.lblDisplay.setGeometry(10, 10, 640, 480)
        self.lblDisplay.setStyleSheet("background-color: #000000")

        self.lblROI = QLabel(self)  #label to show face with ROIs
        self.lblROI.setGeometry(660, 10, 200, 200)
        self.lblROI.setStyleSheet("background-color: #000000")

        self.lblHR = QLabel(self)  #label to show HR change over time
        self.lblHR.setGeometry(900, 20, 300, 40)
        self.lblHR.setFont(font)
        self.lblHR.setText("Frequency: ")

        self.lblHR2 = QLabel(self)  #label to show stable HR
        self.lblHR2.setGeometry(900, 70, 300, 40)
        self.lblHR2.setFont(font)
        self.lblHR2.setText("Heart rate: ")

        # self.lbl_Age = QLabel(self) #label to show stable HR
        # self.lbl_Age.setGeometry(900,120,300,40)
        # self.lbl_Age.setFont(font)
        # self.lbl_Age.setText("Age: ")

        # self.lbl_Gender = QLabel(self) #label to show stable HR
        # self.lbl_Gender.setGeometry(900,170,300,40)
        # self.lbl_Gender.setFont(font)
        # self.lbl_Gender.setText("Gender: ")

        #dynamic plot
        self.signal_Plt = pg.PlotWidget(self)

        self.signal_Plt.move(660, 220)
        self.signal_Plt.resize(480, 192)
        self.signal_Plt.setLabel('bottom', "Signal")

        self.fft_Plt = pg.PlotWidget(self)

        self.fft_Plt.move(660, 425)
        self.fft_Plt.resize(480, 192)
        self.fft_Plt.setLabel('bottom', "FFT")

        self.timer = pg.QtCore.QTimer()
        self.timer.timeout.connect(self.update)
        self.timer.start(200)

        self.statusBar = QStatusBar()
        self.statusBar.setFont(font)
        self.setStatusBar(self.statusBar)

        #event close
        self.c = Communicate()
        self.c.closeApp.connect(self.close)

        #event change combobox index

        #config main window
        self.setGeometry(100, 100, 1160, 640)
        #self.center()
        self.setWindowTitle("Heart rate monitor")
        self.show()

    def update(self):
        #z = np.random.normal(size=1)
        #u = np.random.normal(size=1)
        self.signal_Plt.clear()
        self.signal_Plt.plot(self.process.samples[20:], pen='g')

        self.fft_Plt.clear()
        self.fft_Plt.plot(np.column_stack(
            (self.process.freqs, self.process.fft)),
                          pen='g')

    def center(self):
        qr = self.frameGeometry()
        cp = QDesktopWidget().availableGeometry().center()
        qr.moveCenter(cp)
        self.move(qr.topLeft())

    def closeEvent(self, event):
        reply = QMessageBox.question(self, "Message",
                                     "Are you sure want to quit",
                                     QMessageBox.Yes | QMessageBox.No,
                                     QMessageBox.Yes)
        if reply == QMessageBox.Yes:
            event.accept()
            self.input.stop()
            cv2.destroyAllWindows()
        else:
            event.ignore()

    def selectInput(self):
        self.reset()
        if self.cbbInput.currentIndex() == 0:
            self.input = self.webcam
            print("Input: webcam")
            self.btnOpen.setEnabled(False)
            self.statusBar.showMessage("Input: webcam", 5000)
        elif self.cbbInput.currentIndex() == 1:
            self.input = self.video
            print("Input: video")
            self.btnOpen.setEnabled(True)
            self.statusBar.showMessage("Input: video", 5000)

    def mousePressEvent(self, event):
        self.c.closeApp.emit()

    # def make_bpm_plot(self):

    # plotXY([[self.process.times[20:],
    # self.process.samples[20:]],
    # [self.process.freqs,
    # self.process.fft]],
    # labels=[False, True],
    # showmax=[False, "bpm"],
    # label_ndigits=[0, 0],
    # showmax_digits=[0, 1],
    # skip=[3, 3],
    # name="Plot",
    # bg=None)

    # fplot = QImage(self.plot, 640, 280, QImage.Format_RGB888)
    # self.lblPlot.setGeometry(10,520,640,280)
    # self.lblPlot.setPixmap(QPixmap.fromImage(fplot))

    def key_handler(self):
        """
        cv2 window must be focused for keypresses to be detected.
        """
        self.pressed = waitKey(1) & 255  # wait for keypress for 10 ms
        if self.pressed == 27:  # exit program on 'esc'
            print("[INFO] Exiting")
            self.webcam.stop()
            sys.exit()

    def openFileDialog(self):
        self.dirname = QFileDialog.getOpenFileName(
            self, 'OpenFile', r"C:\Users\uidh2238\Desktop\test videos")
        self.statusBar.showMessage("File name: " + self.dirname, 5000)

    def reset(self):
        self.process.reset()
        self.lblDisplay.clear()
        self.lblDisplay.setStyleSheet("background-color: #000000")

    @QtCore.pyqtSlot()
    def main_loop(self):
        frame = self.input.get_frame()

        self.process.frame_in = frame
        self.process.run()

        cv2.imshow("Processed", frame)

        self.frame = self.process.frame_out  #get the frame to show in GUI
        self.f_fr = self.process.frame_ROI  #get the face to show in GUI
        #print(self.f_fr.shape)
        self.bpm = self.process.bpm  #get the bpm change over the time

        self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR)
        cv2.putText(self.frame,
                    "FPS " + str(float("{:.2f}".format(self.process.fps))),
                    (20, 460), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 255), 2)
        img = QImage(self.frame, self.frame.shape[1], self.frame.shape[0],
                     self.frame.strides[0], QImage.Format_RGB888)
        self.lblDisplay.setPixmap(QPixmap.fromImage(img))

        self.f_fr = cv2.cvtColor(self.f_fr, cv2.COLOR_RGB2BGR)
        #self.lblROI.setGeometry(660,10,self.f_fr.shape[1],self.f_fr.shape[0])
        self.f_fr = np.transpose(self.f_fr, (0, 1, 2)).copy()
        f_img = QImage(self.f_fr, self.f_fr.shape[1], self.f_fr.shape[0],
                       self.f_fr.strides[0], QImage.Format_RGB888)
        self.lblROI.setPixmap(QPixmap.fromImage(f_img))

        self.lblHR.setText("Freq: " + str(float("{:.2f}".format(self.bpm))))

        if self.process.bpms.__len__() > 50:
            if (
                    max(self.process.bpms - np.mean(self.process.bpms)) < 5
            ):  #show HR if it is stable -the change is not over 5 bpm- for 3s
                self.lblHR2.setText(
                    "Heart rate: " +
                    str(float("{:.2f}".format(np.mean(self.process.bpms)))) +
                    " bpm")

        #entry = "FPS: " + str(float("{:.2f}".format(self.process.fps)))
        #entry = "Freq: " + str(float("{:.2f}".format(self.bpm)))
        #entry = "Heart rate: " + str(float("{:.2f}".format(np.mean(self.process.bpms))))

        entry = [
            "FPS: " + str(float("{:.2f}".format(self.process.fps))),
            "Freq: " + str(float("{:.2f}".format(self.bpm))), "Heart rate: " +
            str(float("{:.2f}".format(np.mean(self.process.bpms))))
        ]
        with open('data.json', 'a') as outfile:
            outfile.write(json.dumps(entry))
            outfile.write(",")
            outfile.close()

        #self.lbl_Age.setText("Age: "+str(self.process.age))
        #self.lbl_Gender.setText("Gender: "+str(self.process.gender))
        #self.make_bpm_plot()#need to open a cv2.imshow() window to handle a pause
        #QtTest.QTest.qWait(10)#wait for the GUI to respond
        self.key_handler()  #if not the GUI cant show anything

    def run(self, input):
        self.reset()
        input = self.input
        self.input.dirname = self.dirname
        if self.input.dirname == "" and self.input == self.video:
            print("choose a video first")
            self.statusBar.showMessage("choose a video first", 5000)
            return
        if self.status == False:
            self.status = True
            input.start()
            self.btnStart.setText("Stop")
            self.cbbInput.setEnabled(False)
            self.btnOpen.setEnabled(False)
            self.lblHR2.clear()
            while self.status == True:
                self.main_loop()
        elif self.status == True:
            self.status = False
            input.stop()
            self.btnStart.setText("Start")
            self.cbbInput.setEnabled(True)
class GUI(QtWidgets.QMainWindow, QThread):
    def __init__(self):
        super(GUI, self).__init__()
        self.initUI()
        self.webcam = Webcam()
        self.video = Video()
        self.input = self.webcam
        self.dirname = ""
        print("Input: webcam")
        self.statusBar.showMessage("Input: webcam", 5000)
        self.btnOpen.setEnabled(False)
        self.process = Process()
        self.status = False
        self.frame = np.zeros((10, 10, 3), np.uint8)
        self.plot = np.zeros((10, 10, 3), np.uint8)
        self.bpm = 0

    def initUI(self):

        #set font
        font = QFont()
        font.setPointSize(16)

        #widgets
        self.btnStart = QtWidgets.QPushButton("Start", self)
        self.btnStart.move(680, 400)
        self.btnStart.setFixedWidth(200)
        self.btnStart.setFixedHeight(50)
        self.btnStart.setFont(font)
        self.btnStart.clicked.connect(self.run)

        self.btnOpen = QtWidgets.QPushButton("Open", self)
        self.btnOpen.move(680, 340)
        self.btnOpen.setFixedWidth(200)
        self.btnOpen.setFixedHeight(50)
        self.btnOpen.setFont(font)
        self.btnOpen.clicked.connect(self.openFileDialog)

        self.cbbInput = QtWidgets.QComboBox(self)
        self.cbbInput.addItem("Webcam")
        self.cbbInput.addItem("Video")
        self.cbbInput.setCurrentIndex(0)
        self.cbbInput.setFixedWidth(200)
        self.cbbInput.setFixedHeight(50)
        self.cbbInput.move(680, 280)
        self.cbbInput.setFont(font)
        self.cbbInput.activated.connect(self.selectInput)

        self.lblDisplay = QtWidgets.QLabel(
            self)  #label to show frame from camera
        self.lblDisplay.setGeometry(10, 10, 640, 480)
        self.lblDisplay.setStyleSheet("background-color: #000000")

        self.lblROI = QtWidgets.QLabel(self)  #label to show face with ROIs
        self.lblROI.setGeometry(660, 10, 256, 256)
        self.lblROI.setStyleSheet("background-color: #000000")

        self.lblHR = QtWidgets.QLabel(self)  #label to show HR change over time
        self.lblHR.setGeometry(680, 450, 300, 40)
        self.lblHR.setFont(font)

        self.lblHR2 = QtWidgets.QLabel(self)  #label to show stable HR
        self.lblHR2.setGeometry(680, 485, 300, 40)
        self.lblHR2.setFont(font)

        self.lblPlot = QtWidgets.QLabel(self)  #label to show plot

        self.statusBar = QtWidgets.QStatusBar()
        self.statusBar.setFont(font)
        self.setStatusBar(self.statusBar)

        #event close
        self.c = Communicate()
        self.c.closeApp.connect(self.close)

        #event change combobox index

        #config main window
        self.setGeometry(100, 100, 950, 540)
        #self.center()
        self.setWindowTitle("Heart rate monitor")
        self.show()

    def center(self):
        qr = self.frameGeometry()
        cp = QDesktopWidget().availableGeometry().center()
        qr.moveCenter(cp)
        self.move(qr.topLeft())

    def closeEvent(self, event):
        cv2.destroyAllWindows()

    def selectInput(self):
        self.reset()
        if self.cbbInput.currentIndex() == 0:
            self.input = self.webcam
            print("Input: webcam")
            self.btnOpen.setEnabled(False)
            self.statusBar.showMessage("Input: webcam", 5000)
        elif self.cbbInput.currentIndex() == 1:
            self.input = self.video
            print("Input: video")
            self.btnOpen.setEnabled(True)
            self.statusBar.showMessage("Input: video", 5000)

    def mousePressEvent(self, event):
        self.c.closeApp.emit()

    def make_bpm_plot(self):

        plotXY([[self.process.times[20:], self.process.samples[20:]],
                [self.process.freqs, self.process.fft]],
               labels=[False, True],
               showmax=[False, "bpm"],
               label_ndigits=[0, 0],
               showmax_digits=[0, 1],
               skip=[3, 3],
               name="Plot",
               bg=None)

    # fplot = QImage(self.plot, 640, 280, QImage.Format_RGB888)
    # self.lblPlot.setGeometry(10,520,640,280)
    # self.lblPlot.setPixmap(QPixmap.fromImage(fplot))

    def key_handler(self):
        """
        cv2 window must be focused for keypresses to be detected.
        """
        self.pressed = waitKey(1) & 255  # wait for keypress for 10 ms
        if self.pressed == 27:  # exit program on 'esc'
            print("[INFO] Exiting")
            self.webcam.stop()
            sys.exit()

    def openFileDialog(self):
        self.dirname = QFileDialog.getOpenFileName(
            self, 'OpenFile', r"C:\Users\uidh2238\Desktop\test videos")
        self.statusBar.showMessage("File name: " + self.dirname, 5000)

    def reset(self):
        self.process.reset()
        self.lblDisplay.clear()
        self.lblDisplay.setStyleSheet("background-color: #000000")

    def main_loop(self):

        frame = self.input.get_frame()

        self.process.frame_in = frame
        self.process.run()

        #cv2.imshow("Processed", frame)

        self.frame = self.process.frame_out  #get the frame to show in GUI
        self.f_fr = self.process.frame_ROI  #get the face to show in GUI
        self.bpm = self.process.bpm  #get the bpm change over the time

        self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR)
        cv2.putText(self.frame,
                    "FPS " + str(float("{:.2f}".format(self.process.fps))),
                    (20, 460), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 255), 2)
        img = QImage(self.frame, self.frame.shape[1], self.frame.shape[0],
                     self.frame.strides[0], QImage.Format_RGB888)
        self.lblDisplay.setPixmap(QPixmap.fromImage(img))

        self.f_fr = cv2.cvtColor(self.f_fr, cv2.COLOR_RGB2BGR)
        #self.lblROI.setGeometry(660,10,self.f_fr.shape[1],self.f_fr.shape[0])
        self.f_fr = np.transpose(self.f_fr, (0, 1, 2)).copy()
        f_img = QImage(self.f_fr, self.f_fr.shape[1], self.f_fr.shape[0],
                       self.f_fr.strides[0], QImage.Format_RGB888)
        self.lblROI.setPixmap(QPixmap.fromImage(f_img))

        self.lblHR.setText("Freq: " + str(float("{:.2f}".format(self.bpm))))

        if self.process.bpms.__len__() > 50:
            if (
                    max(self.process.bpms - np.mean(self.process.bpms)) < 5
            ):  #show HR if it is stable -the change is not over 5 bpm- for 3s
                self.lblHR2.setText(
                    "Heart rate: " +
                    str(float("{:.2f}".format(np.mean(self.process.bpms)))) +
                    " bpm")

        self.make_bpm_plot(
        )  #need to open a cv2.imshow() window to handle a pause
        #QtTest.QTest.qWait(10)#wait for the GUI to respond
        self.key_handler()  #if not the GUI cant show anything

    def run(self, input):
        self.reset()
        input = self.input
        self.input.dirname = self.dirname
        if self.input.dirname == "" and self.input == self.video:
            print("choose a video first")
            self.statusBar.showMessage("choose a video first", 5000)
            return
        if self.status == False:
            self.status = True
            input.start()
            self.btnStart.setText("Stop")
            self.cbbInput.setEnabled(False)
            self.btnOpen.setEnabled(False)
            self.lblHR2.clear()
            while self.status == True:
                self.main_loop()
        elif self.status == True:
            self.status = False
            input.stop()
            self.btnStart.setText("Start")
            self.cbbInput.setEnabled(True)
class VidMag():
    def __init__(self):
        self.webcam = Webcam()
        self.buffer_size = 40
        self.fps = 0
        self.times = []
        self.t0 = time.time()
        self.data_buffer = []
        #self.vidmag_frames = []
        self.frame_out = np.zeros((10, 10, 3), np.uint8)
        self.webcam.start()
        print("init")

    #--------------COLOR MAGNIFICATIONN---------------------#
    def build_gaussian_pyramid(self, src, level=3):
        s = src.copy()
        pyramid = [s]
        for i in range(level):
            s = cv2.pyrDown(s)
            pyramid.append(s)
        return pyramid

    def gaussian_video(self, video_tensor, levels=3):
        for i in range(0, video_tensor.shape[0]):
            frame = video_tensor[i]
            pyr = self.build_gaussian_pyramid(frame, level=levels)
            gaussian_frame = pyr[-1]
            if i == 0:
                vid_data = np.zeros(
                    (video_tensor.shape[0], gaussian_frame.shape[0],
                     gaussian_frame.shape[1], 3))
            vid_data[i] = gaussian_frame
        return vid_data

    def temporal_ideal_filter(self, tensor, low, high, fps, axis=0):
        fft = fftpack.fft(tensor, axis=axis)
        frequencies = fftpack.fftfreq(tensor.shape[0], d=1.0 / fps)
        bound_low = (np.abs(frequencies - low)).argmin()
        bound_high = (np.abs(frequencies - high)).argmin()
        fft[:bound_low] = 0
        fft[bound_high:-bound_high] = 0
        fft[-bound_low:] = 0
        iff = fftpack.ifft(fft, axis=axis)
        return np.abs(iff)

    def amplify_video(self, gaussian_vid, amplification=70):
        return gaussian_vid * amplification

    def reconstract_video(self, amp_video, origin_video, levels=3):
        final_video = np.zeros(origin_video.shape)
        for i in range(0, amp_video.shape[0]):
            img = amp_video[i]
            for x in range(levels):
                img = cv2.pyrUp(img)
            img = img + origin_video[i]
            final_video[i] = img
        return final_video

    def magnify_color(self,
                      data_buffer,
                      fps,
                      low=0.4,
                      high=2,
                      levels=3,
                      amplification=30):
        gau_video = self.gaussian_video(data_buffer, levels=levels)
        filtered_tensor = self.temporal_ideal_filter(gau_video, low, high, fps)
        amplified_video = self.amplify_video(filtered_tensor,
                                             amplification=amplification)
        final_video = self.reconstract_video(amplified_video,
                                             data_buffer,
                                             levels=levels)
        #print("c")
        return final_video

    #-------------------------------------------------------------#

    #-------------------MOTION MAGNIFICATIONN---------------------#
    #build laplacian pyramid for video
    def laplacian_video(self, video_tensor, levels=3):
        tensor_list = []
        for i in range(0, video_tensor.shape[0]):
            frame = video_tensor[i]
            pyr = self.build_laplacian_pyramid(frame, levels=levels)
            if i == 0:
                for k in range(levels):
                    tensor_list.append(
                        np.zeros((video_tensor.shape[0], pyr[k].shape[0],
                                  pyr[k].shape[1], 3)))
            for n in range(levels):
                tensor_list[n][i] = pyr[n]
        return tensor_list

    #Build Laplacian Pyramid
    def build_laplacian_pyramid(self, src, levels=3):
        gaussianPyramid = self.build_gaussian_pyramid(src, levels)
        pyramid = []
        for i in range(levels, 0, -1):
            GE = cv2.pyrUp(gaussianPyramid[i])
            L = cv2.subtract(gaussianPyramid[i - 1], GE)
            pyramid.append(L)
        return pyramid

    #reconstract video from laplacian pyramid
    def reconstract_from_tensorlist(self, filter_tensor_list, levels=3):
        final = np.zeros(filter_tensor_list[-1].shape)
        for i in range(filter_tensor_list[0].shape[0]):
            up = filter_tensor_list[0][i]
            for n in range(levels - 1):
                up = cv2.pyrUp(up) + filter_tensor_list[n + 1][i]
            final[i] = up
        return final

    #butterworth bandpass filter
    def butter_bandpass_filter(self, data, lowcut, highcut, fs, order=5):
        omega = 0.5 * fs
        low = lowcut / omega
        high = highcut / omega
        b, a = signal.butter(order, [low, high], btype='band')
        y = signal.lfilter(b, a, data, axis=0)
        return y

    def magnify_motion(self,
                       video_tensor,
                       fps,
                       low=0.4,
                       high=1.5,
                       levels=3,
                       amplification=30):
        lap_video_list = self.laplacian_video(video_tensor, levels=levels)
        filter_tensor_list = []
        for i in range(levels):
            filter_tensor = self.butter_bandpass_filter(
                lap_video_list[i], low, high, fps)
            filter_tensor *= amplification
            filter_tensor_list.append(filter_tensor)
        recon = self.reconstract_from_tensorlist(filter_tensor_list)
        final = video_tensor + recon
        return final

    #-------------------------------------------------------------#

    def buffer_to_tensor(self, buffer):
        tensor = np.zeros((len(buffer), 192, 256, 3), dtype="float")
        i = 0
        for i in range(len(buffer)):
            tensor[i] = buffer[i]
        return tensor

    def run_color(self):
        self.times.append(time.time() - self.t0)
        L = len(self.data_buffer)
        #print(self.data_buffer)

        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            #self.vidmag_frames = self.vidmag_frames[-self.buffer_size:]
            L = self.buffer_size

        if len(self.data_buffer) > self.buffer_size - 1:
            self.fps = float(L) / (self.times[-1] - self.times[0])
            tensor = self.buffer_to_tensor(self.data_buffer)
            final_vid = self.magnify_color(data_buffer=tensor, fps=self.fps)
            #print(final_vid[0].shape)
            #self.vidmag_frames.append(final_vid[-1])
            #print(self.fps)
            self.frame_out = final_vid[-1]

    def run_motion(self):
        self.times.append(time.time() - self.t0)
        L = len(self.data_buffer)
        #print(L)

        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            #self.vidmag_frames = self.vidmag_frames[-self.buffer_size:]
            L = self.buffer_size

        if len(self.data_buffer) > self.buffer_size - 1:
            self.fps = float(L) / (self.times[-1] - self.times[0])
            tensor = self.buffer_to_tensor(self.data_buffer)
            final_vid = self.magnify_motion(video_tensor=tensor, fps=self.fps)
            #print(self.fps)
            #self.vidmag_frames.append(final_vid[-1])
            self.frame_out = final_vid[-1]

    def key_handler(self):
        """
        A plotting or camera frame window must have focus for keypresses to be
        detected.
        """
        self.pressed = waitKey(1) & 255  # wait for keypress for 10 ms
        if self.pressed == 27:  # exit program on 'esc'
            print("[INFO] Exiting")
            self.webcam.stop()
            sys.exit()

    def mainLoop(self):
        frame = self.webcam.get_frame()
        f1 = imutils.resize(frame, width=256)
        #crop_frame = frame[100:228,200:328]
        self.data_buffer.append(f1)
        self.run_color()
        #print(frame)

        #if len(self.vidmag_frames) > 0:
        #print(self.vidmag_frames[0])
        cv2.putText(frame, "FPS " + str(float("{:.2f}".format(self.fps))),
                    (20, 420), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)

        #frame[100:228,200:328] = cv2.convertScaleAbs(self.vidmag_frames[-1])
        cv2.imshow("Original", frame)
        #f2 = imutils.resize(cv2.convertScaleAbs(self.vidmag_frames[-1]), width = 640)
        f2 = imutils.resize(cv2.convertScaleAbs(self.frame_out), width=640)

        cv2.imshow("Color amplification", f2)

        self.key_handler()  #if not the GUI cant show anything
def main():
    try:

        camera_type = sys.argv[1]
        recording = False

        if len(sys.argv) == 3:
            if sys.argv[2] == "record":
                recording = True
            else:
                recording = False

        if camera_type == "webcam":

            collector = Webcam(video_width=640, video_height=480)
            collector.start()

        else:
            print("No such camera {camera_type}")
            collector = None
            exit(-1)

        if not os.path.isfile(MODEL_PATH):
            print("Downloading model, please wait...")
            download_file_from_google_drive(SOURCE, MODEL_PATH)
            print("Done downloading the model.")

        # get device
        device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')
        # initialise model
        model = get_model_instance_segmentation(NUMBER_OF_CLASSES)
        model.load_state_dict(
            torch.load('./models/frcnn_hands.pth', map_location=device))
        model.to(device)
        model.eval()

        if recording:
            movie = cv2.VideoWriter(
                f'./recordings/hand_frcnn_{camera_type}.avi',
                cv2.VideoWriter_fourcc(*'DIVX'), 8, (640, 480))

        with torch.no_grad():

            while collector.started:

                image, _ = collector.read()

                if image is not None:

                    orig = image.copy()

                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    image = transforms.ToTensor()(image).to(device)

                    out = model([image])

                    boxes = get_prediction(pred=out, threshold=.7)

                    try:

                        for box in boxes:
                            cv2.rectangle(img=orig,
                                          pt1=(box[0], box[1]),
                                          pt2=(box[2], box[3]),
                                          color=(0, 255, 255),
                                          thickness=2)

                        if recording:
                            movie.write(orig)

                        cv2.imshow("mask", orig)
                        k = cv2.waitKey(1)

                        if k == ord('q'):
                            collector.stop()

                    except Exception as e:
                        print(e)

    finally:
        print("Stopping stream.")
        collector.stop()
        if recording:
            movie.release()
        cv2.destroyAllWindows()
Beispiel #6
0
class MUCamera:
    def __init__(self):
        self.MU = Webcam()
        self.img_intensity = []
        self.img_time = []
        self.MU.start()
        self.filtered_average = []
        self.MU.register_callback(self._average_intensity, 1)
        self.img = []
        self.euclidean_dist = None

    def _average_intensity(self, image):
        '''
        A function that does the actual calculations
        :param image: the image is retrieved from the webcam.py file using the callback function
        :return: the average intensity of the captured images in a list, the time of capture, the image objects
            in a list
        '''
        self.img_intensity.append(np.mean(np.mean(image)))
        self.img_time.append(time.time())
        self.img.append(image)
        return np.mean(np.mean(image))

    def average_intensity(self):
        '''
        The function that should actually be called if you want to know the average intensity
        :return: The average intensity of the image that was most recently retreived from the webcam
        '''
        while len(self.img_intensity) < 1:
            pass
        return self.img_intensity[-1]

    def filtered_average_intensity(self):
        '''
        Function that takes in the average intensity list from _average_intensity() and passes the data through
        a butterworth filter.
        https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html
        :return: A list of the filtered average intensities
        '''
        b, a = signal.butter(5, 0.025)
        zi = signal.lfilter_zi(b, a)
        z, _ = signal.lfilter(b,
                              a,
                              self.img_intensity,
                              zi=zi * self.img_intensity[0])
        z2, _ = signal.lfilter(b, a, z, zi=zi * z[0])
        self.filtered_average = signal.filtfilt(b, a, self.img_intensity)

    def intensity_plot(self):
        '''
        Creates a plot of the raw average intensites and the filtered intensites
        Raw is a solid black line, filtered is a dashed red line
        :return: A plot
        '''
        t0 = self.img_time[0]
        t = [(x - t0) / 60 for x in self.img_time]
        y = self.img_intensity
        y_filtered = self.filtered_average

        plt.plot(t, y, 'k', t, y_filtered, 'r--')
        plt.xlabel('Minutes')
        plt.ylabel('Average Intensity')
        plt.legend(
            ('Average Image Intensity', 'Smoothed Average Image Intensity'),
            loc='best')
        plt.title(
            'Average Image Intensity from 5:45 PM to 8:30 AM PST, May 29th')
        plt.grid()
        plt.show()

    def daytime(self, threshold=75):
        '''
        Determines whether it is night or day from retreived webcam image
        :param threshold: the average intesntity value that is used to determine time of day
        if calculated average intensity is less than the threshold, it is night, otherwise it is day
        :return: True if daytime, false if nighttime
        '''
        while len(self.img_intensity) < 1:
            pass
        img = self.img[-1]
        intensity = np.mean(np.mean(img))
        if intensity < threshold:
            return False
        else:
            return True

    def common_color(self):
        '''
        Calculates the most common color in the retrieved webcam image
        Uses statistics library
        :return: color that occurs the most in a tuple (R,G,B)
        '''
        img = self.img[-1].getdata()
        m = statistics.mode(img)
        return m

    def stop(self):
        '''
        Function that terminates the callback function that is started in __init__
        Will call the filtering function and plotting function
        :return:
        '''
        self.MU.stop()
        self.filtered_average_intensity()
        self.intensity_plot()

    def motion(self):
        '''
        Determines whether or not motion took place between two images
        Waits until 25 images have been retreived to make sure the two images that are being compared
        are actually different. Webcam updates about once a minute so 25 images should be long enough
        :return: True if motion occurred, false if motion did not occur
        '''
        while len(self.img) < 25:
            pass

        img1 = self.img[-25]
        img2 = self.img[-1]
        img3 = ImageChops.subtract(img1, img2)
        self.euclidean_dist = mth.sqrt(np.sum(np.array(img3.getdata())**2))

        if self.euclidean_dist > 8000:
            return True
        else:
            return False

    def highlight_motion(self):
        '''
        Creates an image that highlights the motion between 2 webcam images in red
        aits until 25 images have been retreived to make sure the two images that are being compared
        are actually different. Webcam updates about once a minute so 25 images should be long enough
        :return: The second picture, but with the different pixels highlighted in red
        '''
        while len(self.img) < 25:
            pass
        img1 = self.img[-25]
        img2 = self.img[-1]

        img3 = ImageChops.subtract(img1, img2)
        img2_data = np.asarray(img2)
        img3_data = np.asarray(img3)
        img2_data.setflags(write=1)
        for i in range(len(img3_data[1, :])):
            for j in range(len(img3_data[:, i])):
                avg = np.mean(img3_data[j, i])
                if avg > 35 and j > 250:
                    img2_data[j, i] = [255, 0, 0]

        img_new = Image.fromarray(img2_data, 'RGB')
        img_new.show()

    def event(self):
        '''
        Determines if there is an event going on in the quad. Based on the color and euclidean distance of
        two images in the quad
        :return: True if there is an event, false if otherwise
        Also displays the test cases.
        :return The image with the grey square is the baseline from which everything is compared to. The grey was the most
        common color in a cropped version of the size of the square
        :return The image with the white square is the case where there is an event
        '''
        while len(self.img_intensity) < 1:
            pass

        pxl_coor = (250, 365, 500, 470)
        img_grey_large = np.asarray(self.img[-1])
        img_event = np.asarray(self.img[-1])
        img = self.img[-1].crop(pxl_coor)
        baseline = np.asarray(img)

        baseline.setflags(write=1)
        img_grey_large.setflags(write=1)
        img_event.setflags(write=1)

        for i in range(len(baseline[1, :])):
            for j in range(len(baseline[:, i])):
                baseline[j, i] = [170, 170, 168]

        for i in range(249, 500):
            for j in range(365, 470):
                img_grey_large[j, i] = [170, 170, 168]
                img_event[j, i] = [255, 255, 255]

        img_grey = Image.fromarray(baseline, 'RGB')
        img_grey_large = Image.fromarray(img_grey_large, 'RGB')
        img_event = Image.fromarray(img_event, 'RGB')

        img_compare = ImageChops.subtract(img, img_grey)
        euclidean_dist = mth.sqrt(np.sum(np.array(img_compare.getdata())**2))

        img_grey_large.show()
        img_event.show()

        if euclidean_dist > 8000:
            return True
        else:
            return False
Beispiel #7
0
class MUcamera:
    def __init__(self):
        self.w = Webcam()
        self.start = self.w.start()
        self.im = self.w.grab_image()
        self.w.register_callback(self.average_intensity, 1)
        self.avg_intensity = []
        self.images = []
        self.filtered = []
        self.f, self.ax = plt.subplots(1, 1)

#To find the average intensity

    def average_intensity(self, image):
        pix_val = list(image.getdata())
        pixel_intensity = []
        for x in pix_val:
            avg = sum(x) / len(x)
            pixel_intensity.append(avg)
        self.avg_pixel = np.average(pixel_intensity)
        self.avg_intensity.append(self.avg_pixel)
        return self.avg_intensity

#To find the average filtered intensity using width as 3

    def average_intensity_filtered(self):

        width = 3
        if len(self.avg_intensity) >= 5:
            for x in range(len(self.avg_intensity) - 2):
                self.filtered.append(
                    (self.avg_intensity[x] + self.avg_intensity[x + 1] +
                     self.avg_intensity[x + 2]) / width)
            return self.filtered
        else:
            self.filtered = self.avg_intensity
            return self.filtered

#To stop the execution of webcam.py

    def stop(self):
        self.w.stop()
        self.average_intensity_mean_plot()
        self.average_intensity_filtered_plot()
        self.daytime()
        self.most_common_color()

    #To plot the average intensity
    def average_intensity_mean_plot(self):

        self.ax.plot(self.avg_intensity, 'C1', label='Average')
        self.ax.legend()
        self.ax.set_xlabel('Image Number')
        self.ax.set_ylabel('Intensity')
        self.ax.set_title('Image Intensity')

#To plot the average filtered intensity

    def average_intensity_filtered_plot(self):
        self.average_intensity_filtered()
        self.ax.plot(self.filtered, 'C2', label='Filtered')
        self.ax.legend()

#To check if it is daytime or nighttime

    def daytime(self):
        self.average = np.mean(np.mean(self.im, axis=1))
        if self.average >= 95:
            return print("True")
        else:
            return print("False")


#To check the most common color

    def most_common_color(self):
        w, h = self.im.size
        pixels = self.im.getcolors(w * h)
        most_frequent_pixel = pixels[0]
        for count, color in pixels:
            if count > most_frequent_pixel[0]:
                most_frequent_pixel = (count, color)
        proportion = most_frequent_pixel[0] / len(pixels)
        return print(
            'The most common color is {}'.format(most_frequent_pixel[1]),
            'with a count of {}'.format(most_frequent_pixel[0]),
            'and the proportion of pixels is {}'.format(proportion))
Beispiel #8
0
arucoParams = aruco.DetectorParameters_create()

#stream = cv2.VideoCapture('http://104.38.59.31:8080/video')

# Use the next line if your camera has a username and password
# stream = cv2.VideoCapture('protocol://*****:*****@IP:port/1')

while True:
    img = webcam.get_current_frame()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=arucoParams)
    print("type {} value: {}".format(type(ids), id))
    if type(ids) is np.ndarray:
        if ids.any() != None:
            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                corners, markerLength, mtx, dist)
            imgAruco = aruco.drawDetectedMarkers(img, corners, ids,
                                                 (0, 255, 0))
            imgAruco = aruco.drawAxis(imgAruco, mtx, dist, rvec, tvec, 0.07)

    else:
        # print("marker not detected")

        imgAruco = img
    cv2.imshow("aruco", img)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
        webcam.stop()
        break
Beispiel #9
0
from skimage.measure import compare_ssim as ssim


def mse(imageA, imageB):
    # the 'Mean Squared Error' between the two images is the
    # sum of the squared difference between the two images;
    # NOTE: the two images must have the same dimension
    err = np.sum((imageA.astype("float") - imageB.astype("float"))**2)
    err /= float(imageA.shape[0] * imageA.shape[1])
    return err


if __name__ == "__main__":
    camera = Webcam()
    if camera.isConnected():
        try:
            print("copying output")
            camera.start()
            while (True):
                image1 = camera.getFrame()
                image2 = camera.getFrame()
                cor = ssim(image1, image2)
                if cor < 0.78:
                    print("gettingframe:   ", cor)
                #camera.show(image1)
        except (KeyboardInterrupt, SystemExit):
            print("exiting")
            camera.stop()
    else:
        print("problems")