def real_time_lrp(conf):
    """Method to display feature relevance scores in real time.

    Args:
        conf: Dictionary consisting of configuration parameters.
    """
    record_video = conf["playback"]["record_video"]

    webcam = Webcam()
    lrp = RelevancePropagation(conf)

    if record_video:
        recorder = VideoRecorder(conf)

    while True:
        t0 = time.time()

        frame = webcam.get_frame()
        heatmap = lrp.run(frame)
        heatmap = post_processing(frame, heatmap, conf)
        cv2.imshow("LRP", heatmap)

        if record_video:
            recorder.record(heatmap)

        t1 = time.time()
        fps = 1.0 / (t1 - t0)
        print("{:.1f} FPS".format(fps))

        if cv2.waitKey(1) % 256 == 27:
            print("Escape pressed.")
            break

    if record_video:
        recorder.release()

    webcam.turn_off()
    cv2.destroyAllWindows()
class VidMag():
    def __init__(self):
        self.webcam = Webcam()
        self.buffer_size = 40
        self.fps = 0
        self.times = []
        self.t0 = time.time()
        self.data_buffer = []
        #self.vidmag_frames = []
        self.frame_out = np.zeros((10, 10, 3), np.uint8)
        self.webcam.start()
        print("init")

    #--------------COLOR MAGNIFICATIONN---------------------#
    def build_gaussian_pyramid(self, src, level=3):
        s = src.copy()
        pyramid = [s]
        for i in range(level):
            s = cv2.pyrDown(s)
            pyramid.append(s)
        return pyramid

    def gaussian_video(self, video_tensor, levels=3):
        for i in range(0, video_tensor.shape[0]):
            frame = video_tensor[i]
            pyr = self.build_gaussian_pyramid(frame, level=levels)
            gaussian_frame = pyr[-1]
            if i == 0:
                vid_data = np.zeros(
                    (video_tensor.shape[0], gaussian_frame.shape[0],
                     gaussian_frame.shape[1], 3))
            vid_data[i] = gaussian_frame
        return vid_data

    def temporal_ideal_filter(self, tensor, low, high, fps, axis=0):
        fft = fftpack.fft(tensor, axis=axis)
        frequencies = fftpack.fftfreq(tensor.shape[0], d=1.0 / fps)
        bound_low = (np.abs(frequencies - low)).argmin()
        bound_high = (np.abs(frequencies - high)).argmin()
        fft[:bound_low] = 0
        fft[bound_high:-bound_high] = 0
        fft[-bound_low:] = 0
        iff = fftpack.ifft(fft, axis=axis)
        return np.abs(iff)

    def amplify_video(self, gaussian_vid, amplification=70):
        return gaussian_vid * amplification

    def reconstract_video(self, amp_video, origin_video, levels=3):
        final_video = np.zeros(origin_video.shape)
        for i in range(0, amp_video.shape[0]):
            img = amp_video[i]
            for x in range(levels):
                img = cv2.pyrUp(img)
            img = img + origin_video[i]
            final_video[i] = img
        return final_video

    def magnify_color(self,
                      data_buffer,
                      fps,
                      low=0.4,
                      high=2,
                      levels=3,
                      amplification=30):
        gau_video = self.gaussian_video(data_buffer, levels=levels)
        filtered_tensor = self.temporal_ideal_filter(gau_video, low, high, fps)
        amplified_video = self.amplify_video(filtered_tensor,
                                             amplification=amplification)
        final_video = self.reconstract_video(amplified_video,
                                             data_buffer,
                                             levels=levels)
        #print("c")
        return final_video

    #-------------------------------------------------------------#

    #-------------------MOTION MAGNIFICATIONN---------------------#
    #build laplacian pyramid for video
    def laplacian_video(self, video_tensor, levels=3):
        tensor_list = []
        for i in range(0, video_tensor.shape[0]):
            frame = video_tensor[i]
            pyr = self.build_laplacian_pyramid(frame, levels=levels)
            if i == 0:
                for k in range(levels):
                    tensor_list.append(
                        np.zeros((video_tensor.shape[0], pyr[k].shape[0],
                                  pyr[k].shape[1], 3)))
            for n in range(levels):
                tensor_list[n][i] = pyr[n]
        return tensor_list

    #Build Laplacian Pyramid
    def build_laplacian_pyramid(self, src, levels=3):
        gaussianPyramid = self.build_gaussian_pyramid(src, levels)
        pyramid = []
        for i in range(levels, 0, -1):
            GE = cv2.pyrUp(gaussianPyramid[i])
            L = cv2.subtract(gaussianPyramid[i - 1], GE)
            pyramid.append(L)
        return pyramid

    #reconstract video from laplacian pyramid
    def reconstract_from_tensorlist(self, filter_tensor_list, levels=3):
        final = np.zeros(filter_tensor_list[-1].shape)
        for i in range(filter_tensor_list[0].shape[0]):
            up = filter_tensor_list[0][i]
            for n in range(levels - 1):
                up = cv2.pyrUp(up) + filter_tensor_list[n + 1][i]
            final[i] = up
        return final

    #butterworth bandpass filter
    def butter_bandpass_filter(self, data, lowcut, highcut, fs, order=5):
        omega = 0.5 * fs
        low = lowcut / omega
        high = highcut / omega
        b, a = signal.butter(order, [low, high], btype='band')
        y = signal.lfilter(b, a, data, axis=0)
        return y

    def magnify_motion(self,
                       video_tensor,
                       fps,
                       low=0.4,
                       high=1.5,
                       levels=3,
                       amplification=30):
        lap_video_list = self.laplacian_video(video_tensor, levels=levels)
        filter_tensor_list = []
        for i in range(levels):
            filter_tensor = self.butter_bandpass_filter(
                lap_video_list[i], low, high, fps)
            filter_tensor *= amplification
            filter_tensor_list.append(filter_tensor)
        recon = self.reconstract_from_tensorlist(filter_tensor_list)
        final = video_tensor + recon
        return final

    #-------------------------------------------------------------#

    def buffer_to_tensor(self, buffer):
        tensor = np.zeros((len(buffer), 192, 256, 3), dtype="float")
        i = 0
        for i in range(len(buffer)):
            tensor[i] = buffer[i]
        return tensor

    def run_color(self):
        self.times.append(time.time() - self.t0)
        L = len(self.data_buffer)
        #print(self.data_buffer)

        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            #self.vidmag_frames = self.vidmag_frames[-self.buffer_size:]
            L = self.buffer_size

        if len(self.data_buffer) > self.buffer_size - 1:
            self.fps = float(L) / (self.times[-1] - self.times[0])
            tensor = self.buffer_to_tensor(self.data_buffer)
            final_vid = self.magnify_color(data_buffer=tensor, fps=self.fps)
            #print(final_vid[0].shape)
            #self.vidmag_frames.append(final_vid[-1])
            #print(self.fps)
            self.frame_out = final_vid[-1]

    def run_motion(self):
        self.times.append(time.time() - self.t0)
        L = len(self.data_buffer)
        #print(L)

        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            #self.vidmag_frames = self.vidmag_frames[-self.buffer_size:]
            L = self.buffer_size

        if len(self.data_buffer) > self.buffer_size - 1:
            self.fps = float(L) / (self.times[-1] - self.times[0])
            tensor = self.buffer_to_tensor(self.data_buffer)
            final_vid = self.magnify_motion(video_tensor=tensor, fps=self.fps)
            #print(self.fps)
            #self.vidmag_frames.append(final_vid[-1])
            self.frame_out = final_vid[-1]

    def key_handler(self):
        """
        A plotting or camera frame window must have focus for keypresses to be
        detected.
        """
        self.pressed = waitKey(1) & 255  # wait for keypress for 10 ms
        if self.pressed == 27:  # exit program on 'esc'
            print("[INFO] Exiting")
            self.webcam.stop()
            sys.exit()

    def mainLoop(self):
        frame = self.webcam.get_frame()
        f1 = imutils.resize(frame, width=256)
        #crop_frame = frame[100:228,200:328]
        self.data_buffer.append(f1)
        self.run_color()
        #print(frame)

        #if len(self.vidmag_frames) > 0:
        #print(self.vidmag_frames[0])
        cv2.putText(frame, "FPS " + str(float("{:.2f}".format(self.fps))),
                    (20, 420), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)

        #frame[100:228,200:328] = cv2.convertScaleAbs(self.vidmag_frames[-1])
        cv2.imshow("Original", frame)
        #f2 = imutils.resize(cv2.convertScaleAbs(self.vidmag_frames[-1]), width = 640)
        f2 = imutils.resize(cv2.convertScaleAbs(self.frame_out), width=640)

        cv2.imshow("Color amplification", f2)

        self.key_handler()  #if not the GUI cant show anything
Beispiel #3
0
class Studio(QMainWindow, Ui_MainWindow):
    def __init__(self, *args, **kwargs):
        super(Studio, self).__init__(*args, **kwargs)
        self.setupUi(self)

        # Device
        self.device_default = 0
        self.device = self.device_default

        # Webcam
        self.webcam = Webcam()

        # Image
        self.image_dir = 'outputs'
        self.image_ext = 'jpg'
        self.num_images_max_default = 10
        self.num_images_max = self.num_images_max_default
        self.num_images = 0

        self.saved_width_default = 416  # In pixel
        self.saved_height_default = 416
        self.saved_width = self.saved_width_default
        self.saved_height = self.saved_height_default

        self.flip_image = False
        self.cb_flip_image.stateChanged.connect(self.change_flip_image)

        # Filename prefix
        self.filename_prefix = 'class_memo'

        # Recording flag
        self.is_recording = False

        # Timer
        self.timer_is_on = False
        self.timer_duration = 500  # msec
        self.timer = QTimer(self)
        self.timer.timeout.connect(self.process_image)

        # Plot min/max
        self.plot_min = 0.0
        self.plot_max = -1.0

        # Initialize
        self.initialize()

    def open_webcam(self):

        # Release the resource which had been used.
        if self.webcam.is_open():
            self.webcam.release()

        self.webcam.open(self.device)
        self.process_image()

        # Show message
        self.show_message('webcam is opened.')

        # Start the timer
        if not self.timer_is_on:
            self.start_timer()

    def start_timer(self):
        self.timer_is_on = True
        self.timer.start(self.timer_duration)

    def stop_timer(self):
        self.timer_is_on = False
        self.timer.stop()

    def change_flip_image(self):

        if self.cb_flip_image.isChecked():
            self.flip_image = True
        else:
            self.flip_image = False

    def start_recording(self):

        self.is_recording = True
        self.num_images = 0
        self.show_message('recording frames.')

    def finish_recording(self):

        self.is_recording = False
        self.show_message('recording is finished.')

    def show_message(self, msg):
        text = 'Status: ' + msg
        self.lb_status.setText(text)

    def show_num_images(self):

        text = '{}/{}'.format(self.num_images, self.num_images_max)
        self.lb_num_images.setText(text)

    def get_image_path(self, n):

        str_num = to_str_digits(n, num_digits=5)
        filename = self.filename_prefix + '_' + str_num + '.' + self.image_ext

        path = os.path.join(self.image_dir, filename)

        return path

    def save_image(self):
        # Save the image.

        self.num_images += 1

        if self.num_images <= self.num_images_max:
            image_path = self.get_image_path(self.num_images)
            frame = self.webcam.get_frame()
            image = Image.fromarray(frame)
            size = (self.saved_width, self.saved_height)
            image = make_square(image)

            image = image.resize(size)
            image.save(image_path)

        else:
            self.num_images = self.num_images_max
            self.finish_recording()

        # Show the number of images
        self.show_num_images()

    def process_image(self):

        if self.webcam.is_open():

            # Show frame
            frame = self.webcam.read()
            image = QImage(frame.data, frame.shape[1], frame.shape[0],
                           QImage.Format_RGB888)

            # Flip the image horizontally
            image_flipped = image.mirrored(True, False)

            if self.flip_image:
                pixmap = QPixmap.fromImage(image_flipped)
            else:
                pixmap = QPixmap.fromImage(image)

            self.lb_image.setPixmap(pixmap)

            # Record frame
            if self.is_recording:
                self.save_image()

    def initialize(self):

        # Connect the signal and slot
        self.cb_device.activated[str].connect(self.set_device)
        self.edit_num_images_max.textChanged.connect(self.set_num_images_max)
        self.edit_saved_width.textChanged.connect(self.set_saved_width)
        self.edit_saved_height.textChanged.connect(self.set_saved_height)
        self.edit_filename_prefix.textChanged.connect(self.set_filename_prefix)

        self.btn_open.clicked.connect(self.open_webcam)
        self.btn_record.clicked.connect(self.start_recording)

        # UI
        text = str(self.num_images_max)
        self.edit_num_images_max.setText(text)

        text = str(self.saved_width)
        self.edit_saved_width.setText(text)

        text = str(self.saved_height)
        self.edit_saved_height.setText(text)

        text = str(self.filename_prefix)
        self.edit_filename_prefix.setText(text)

    def set_device(self):

        value = self.cb_device.currentIndex()

        try:
            value = int(value)
        except:
            value = self.device_default

        self.device = value

    def set_num_images_max(self):

        value = self.edit_num_images_max.text()

        try:
            value = int(value)
        except:
            value = self.num_images_max_default

        self.num_images_max = value

    def set_saved_width(self):

        value = self.edit_saved_width.text()

        try:
            value = int(value)
        except:
            value = self.saved_width_default

        self.saved_width = value

    def set_saved_height(self):

        value = self.edit_saved_height.text()

        try:
            value = int(value)
        except:
            value = self.edit_saved_height_default

        self.saved_height = value

    def set_filename_prefix(self):

        value = self.edit_filename_prefix.text()
        self.filename_prefix = value

    def add_widget(self, widget):

        widget.setParent(self.central_widget)
        self.view_layout.addWidget(widget)

    def remove_widget(self, widget):

        self.view_layout.removeWidget(widget)
        widget.setParent(None)

    def refresh_view(self):

        text = 'Remianing time: {} (sec)'.format(self.num_images)
        self.lb_num_images.setText(text)

    def closeEvent(self, event):

        self.webcam.release()