Example #1
0
    def extract_motion(self):
        if self.motion_extraction_method == "average":
            # Average the cropped image pixels
            avg = np.average(self.cropped_image)
            return avg

        elif self.motion_extraction_method == "flow":
            # On the first iteration, no motion can be detected because it requires two points, however,
            # we can detect the key points
            if self.previous_cropped_image is None:
                self.previous_cropped_image = float_to_uint8(
                    self.cropped_image.copy())
                self.motion_key_points = cv2.goodFeaturesToTrack(
                    self.previous_cropped_image,
                    mask=None,
                    **self.feature_params)
                if len(self.motion_key_points) < 1:
                    self.trigger_error("No motion key points found.")
                return 0.0

            p1, st, err = cv2.calcOpticalFlowPyrLK(
                self.previous_cropped_image,
                float_to_uint8(self.cropped_image), self.motion_key_points,
                None, **self.lk_params)
            if p1 is None:
                return np.nan

            # Select good points
            good_new = p1[st == 1]
            good_old = self.motion_key_points[st == 1]
            '''Restructure Data if Feature Points Change'''
            # Now update the previous frame and previous points
            self.previous_cropped_image = float_to_uint8(
                self.cropped_image.copy())
            self.motion_key_points = good_new.reshape(-1, 1, 2)

            # Failed to find keypoints, tracking lost
            if len(good_new) == 0 or len(good_old) == 0:
                return np.nan

            raw_motion_estimation = list(np.mean(good_old - good_new, axis=0))
            self.motion_data.append(raw_motion_estimation)
            '''PCA
                1. Find the first eigenvector and transform the points along that, 
                    getting only the primary motion component.
                2. Transform the feature points into the first component dimension.
                3. Lowpass filter the motion 
            '''
            if len(self.motion_data) >= 2:
                x, y = np.transpose(self.motion_data)
                coords = np.vstack([x, y])
                cov_mat = np.cov(coords)
                eig_vals, eig_vecs = np.linalg.eig(cov_mat)
                sort_indices = np.argsort(eig_vals)[::-1]
                evec1, evec2 = eig_vecs[:, sort_indices]
                reduced_data = np.array(self.motion_data).dot(evec1)
                motion_estimation = reduced_data[-1]
                return motion_estimation
            else:
                return 0.0
Example #2
0
    def locate(calibration_video_data, fps,
               freq_min=0.1, freq_max=1.0, amplification=500,
               pyramid_levels=9, skip_levels_at_top=4, temporal_threshold=0.7,
               threshold=20, threshold_type=cv2.THRESH_BINARY,
               verbose=False):
        logging.info("Beginning processing calibration frames...")
        # Perform motion extraction
        op, raw = eulerian_magnification_bandpass(calibration_video_data, fps, freq_min, freq_max, amplification,
                                                  skip_levels_at_top=skip_levels_at_top, pyramid_levels=pyramid_levels,
                                                  threshold=temporal_threshold,
                                                  verbose=verbose)
        logging.info("Done processing calibration frames.")
        # Generate normed average frame (0-255 grayscale)
        logging.info("Finding peak region...")
        avg_frame = np.array(np.average(op, axis=0))
        avg_norm = ((avg_frame - avg_frame.min()) / (avg_frame.max() - avg_frame.min()))
        avg = float_to_uint8(avg_norm)
        # Find largest region
        ret, thresh = cv2.threshold(avg, threshold, 255, threshold_type)  # Threshold image
        thresh_copy = copy.deepcopy(thresh)
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)  # Find contours
        if len(contours) <= 0:
            return None
        c = max(contours, key=cv2.contourArea)  # Find max contours
        area = cv2.contourArea(c)
        logging.info("Found peak region.")
        # Get bounding box
        x, y, w, h = cv2.boundingRect(c)

        # if save_calibration_image:
        #     logging.info('Creating calibration image.')
        #     total_avg = float_to_uint8(np.average(calibration_video_data, axis=0))
        #     contour_img = copy.deepcopy(total_avg)
        #     cv2.drawContours(contour_img, contours, -1, (0, 255, 0), 3)
        #     avg_copy = copy.deepcopy(avg)
        #     drawn = cv2.rectangle(total_avg + avg_copy, (x, y), (x + w, y + h), 255, 2)
        #
        #     avg_raw_frame = np.array(np.average(raw, axis=0))
        #     avg_raw_norm = ((avg_raw_frame - avg_raw_frame.min()) / (avg_raw_frame.max() - avg_raw_frame.min()))
        #     avg_raw = float_to_uint8(avg_raw_norm)
        #     avg_original = float_to_uint8(np.average(calibration_video_data, axis=0))
        #     row0 = np.hstack((avg_original, avg_raw, avg))
        #     row1 = np.hstack((thresh_copy, contour_img,  drawn))
        #     calibration = np.vstack((row0, row1))
        #     i = 0
        #     while os.path.exists("calibration%s.png" % i):
        #         i += 1
        #     cv2.imwrite(r'calibration%s.png' % i, calibration)
        #     logging.info('Calibration image saved.')

        if verbose:
            print('contour area:{4} - x:{0}, y:{1}, w:{2}, h:{3}'.format(x, y, w, h, area))

        return x, y, w, h
Example #3
0
    def update_ui(self):
        if self.visualize == 'pyqtgraph':
            if self.state == "calibration":
                if self.calibration_buffer_idx < self.calibration_buffer_target_length:
                    self.set_window_title(
                        'Capturing calibration frames... {0}/{1}'.format(
                            self.calibration_buffer_idx,
                            self.calibration_buffer_target_length))
                    self.display_frame = self.current_frame
                    self.set_image(self.display_frame)
                else:
                    self.set_window_title('Measuring...')
            elif self.state == "measure":
                # First iteration
                if self.cropped_image is None:
                    self.set_plot_autoscale(True)
                    return
                self.display_frame = float_to_uint8(self.cropped_image)
                if self.motion_extraction_method == 'flow':
                    mask = np.zeros_like(self.display_frame)
                    for i, new in enumerate(self.motion_key_points):
                        a, b = new.ravel()
                        mask = cv2.circle(mask, (a, b), 2, (255, 255, 255), -1)
                        self.display_frame = cv2.add(self.display_frame, mask)
                self.set_window_title(
                    'Building Measurement Buffer.' + '.'.join(
                        ['' for _ in range(0,
                                           len(self.filtered_data) % 4)]))
                if len(self.peak_times) > 0:
                    self.ui["peak_plot"].setData(
                        self.peak_times,
                        np.take(self.filtered_data, self.peak_indices))
                self.set_window_title('Measuring.' + '.'.join(
                    ['' for _ in range(0,
                                       len(self.filtered_data) % 4)]))
                if len(self.filtered_data) >= 2 and len(self.t) >= 2:
                    self.set_plot_x_range(min(self.t), max(self.t))
                    self.ui["raw_signal"].setData(self.t, self.filtered_data)
                self.ui["capture_image"].setImage(self.display_frame)
                if len(self.freq) >= 2 and len(self.t) >= 2:
                    self.ui["frequency_plot"].setData(
                        np.array(self.t)[-len(self.freq):], self.freq)
                    self.ui["bpm_text"].setText('{0:#.4} BPM'.format(
                        self.freq[-1]))
            elif self.state == "error":
                self.ui["bpm_text"].setText('??? BPM')
                self.set_window_title(
                    "Error: Recalibrating due to poor signal in {0}s.".format(
                        self.error_reset_delay -
                        (time.time() - self.reset_start_time)))

            pg.QtGui.QApplication.processEvents()
Example #4
0
    def update_ui(self):
        if self.visualize == 'pyqtgraph':
            if self.state == "calibration":
                if self.calibration_buffer_idx < self.calibration_buffer_target_length:
                    #self.set_window_title(
                        ##'Capturing calibration frames... {0}/{1}'.format(self.calibration_buffer_idx,
                                                        #                 self.calibration_buffer_target_length))
                    self.display_frame = self.current_frame
                    #self.set_image(self.display_frame)
                else:
                    pass
                    #self.set_window_title('Measuring...')
            elif self.state == "measure":
                # First iteration
                if self.cropped_image is None:
                    # self.set_plot_autoscale(True)
                    return
                self.display_frame = float_to_uint8(self.cropped_image)
                if self.motion_extraction_method == 'flow':
                    mask = np.zeros_like(self.display_frame)
                    for i, new in enumerate(self.motion_key_points):
                        a, b = new.ravel()
                        mask = cv2.circle(mask, (a, b), 2, (255, 255, 255), -1)
                        self.display_frame = cv2.add(self.display_frame, mask)
                #self.set_window_title('Building Measurement Buffer.'+'.'.join(
                    #['' for _ in range(0, len(self.filtered_data) % 4)]))
                if len(self.peak_times) > 0:
                    pass
                    #self.ui["peak_plot"].setData(self.peak_times, np.take(self.filtered_data, self.peak_indices))
                #self.set_window_title(
                    #'Measuring.' + '.'.join(['' for _ in range(0, len(self.filtered_data) % 4)]))
                if len(self.filtered_data) >= 2 and len(self.t) >= 2:
                    pass
                    # self.set_plot_x_range(min(self.t), max(self.t))
                    #self.ui["raw_signal"].setData(self.t, self.filtered_data)
                #self.ui["capture_image"].setImage(self.display_frame)
                if len(self.freq) >= 2 and len(self.t) >= 2:
                    #self.ui["frequency_plot"].setData(np.array(self.t)[-len(self.freq):], self.freq)
                    #self.ui["bpm_text"].setText('{0:#.4} BPM'.format(self.freq[-1]))
                    print('{0:#.4}'.format(self.freq[-1]))

                    self.sio.emit('breath.ping', data={"jwt": str(self.jwt), "value": str('{0:#.4}'.format(self.freq[-1])) })
                    arr_me.append(round(float('{0:#.4}'.format(self.freq[-1])), 3))
            elif self.state == "error":
                pass
Example #5
0
    def run(self):
        self.benchmarker.add_tag('Measurement Loop')
        self.benchmarker.add_tag('Frame Capture')
        self.benchmarker.add_tag('Calibration Measurement')
        while self.cap.isOpened():
            self.loop_start_time = time.time()

            self.benchmarker.tick_start('Frame Capture')
            # Capture the frame (quit if the frame is a bool, meaning end of stream)
            self.current_frame = self.next_frame()
            if isinstance(self.current_frame, bool):
                break
            self.benchmarker.tick_end('Frame Capture')

            if self.state == 'initialize':
                self.initialize()
                self.state = 'calibration'
            # Calibration phase
            elif self.state == 'calibration':
                # The beginning of the calibration phase is just acquiring enough images to calibrate
                if self.calibration_buffer_idx < self.calibration_buffer_target_length:
                    # Fill frame buffer
                    self.calibration_buffer[
                        self.calibration_buffer_idx][:] = self.current_frame
                    self.calibration_buffer_idx += 1
                    # Update the progress bar
                    self.calibration_progress_bar.update(1)
                # Once enough images have been acquired, the locate function is run to find the ROI
                else:
                    logging.info(
                        "Finished capturing calibration frames. Beginning calibration..."
                    )
                    # Detect the FPS if needed
                    self.detect_fps()
                    # Fill FPS dependent variables
                    self.peak_minimum_sample_distance = int(
                        np.floor(self.fps / self.freq_max))
                    self.benchmarker.tick_start('Calibration Measurement')
                    # Run the localizer
                    location = self.locate(
                        self.calibration_buffer,
                        self.fps,
                        save_calibration_image=self.save_calibration_image,
                        freq_min=self.freq_min,
                        freq_max=self.freq_max,
                        temporal_threshold=self.temporal_threshold,
                        threshold=int(np.round(self.threshold * 255)))
                    self.benchmarker.tick_end('Calibration Measurement')
                    # If the localizer fails, try again
                    if location is None:
                        logging.info(
                            "Failed finding ROI during calibration. Retrying..."
                        )
                        self.calibration_buffer_idx = 0
                        continue
                    # If the localizer didn't fail, save the values and reduce the bounding box as requested
                    self.x, self.y, self.w, self.h = location
                    self.x, self.y, self.w, self.h = reduce_bounding_box(
                        self.x, self.y, self.w, self.h,
                        self.maximum_bounding_box_area)
                    logging.info("Finished calibration.")
                    logging.info("Beginning measuring...")
                    self.calibration_progress_bar.close()

                    self.state = 'measure'
            elif self.state == 'measure':
                if self.save_all_data and self.video_out is None:
                    self.video_out = cv2.VideoWriter(
                        str(self.capture_target) + '.avi',
                        cv2.VideoWriter_fourcc(*'MSVC'), self.fps,
                        (self.w, self.h))
                self.benchmarker.tick_start('Measurement Loop')
                # Crop to the bounding box
                self.cropped_image = self.current_frame[self.y:self.y + self.h,
                                                        self.x:self.x + self.w]
                # Check for full buffer and popleft
                for b in self.buffers:
                    if len(b) >= self.measure_buffer_length:
                        b.popleft()

                current_motion_value = self.extract_motion()
                self.data.append(current_motion_value)

                # Append to the temporal domain
                if len(self.t) == 0:
                    self.t.append(0.)
                else:
                    self.t.append(self.t[-1] + (1. / self.fps))
                # If the raw data is to be saved, add it to the dedicated list
                if self.save_all_data:
                    self.video_out.write(float_to_uint8(self.cropped_image))
                    self.all_data.append((self.t[-1], current_motion_value))
                if len(self.data) > self.measure_initialization_length:
                    # Perform the measurement
                    self.measure()
                    # Look for errors
                    if not self.disable_error_detection and self.detect_errors(
                    ):
                        self.trigger_error("error detection found poor signal")
                self.benchmarker.tick_end('Measurement Loop')
            elif self.state == 'error':
                if time.time(
                ) - self.reset_start_time >= self.error_reset_delay:
                    logging.info('Benchmark Report...\r\n' +
                                 self.benchmarker.get_report())
                    self.reset()
                    self.state = 'calibration'

            # Update the UI once the internal state has been set (will do nothing if visualize is None)
            self.update_ui()
            # Sleep the loop as needed to sync to the desired FPS
            self.sync_to_fps()

        logging.info("Capture closed.")

        self.cap.release()

        if self.save_all_data:
            self.video_out.release()
            np.save(str(self.capture_target) + '.npy', self.all_data)
Example #6
0
if run_original:
    from base import eulerian_magnification_bandpass
    logging.info("Beginning processing frames...")
    op = eulerian_magnification_bandpass(vid_gray,
                                         fps,
                                         freq_min,
                                         freq_max,
                                         amplification,
                                         skip_levels_at_top=skip_levels_at_top,
                                         pyramid_levels=pyramid_levels,
                                         verbose=verbose)
    avg = np.array(np.average(op, axis=0))
    avg = ((avg - avg.min()) / (avg.max() - avg.min()))

    # Find largest region
    avg = float_to_uint8(avg)
    cv2.imwrite(r'C:\Users\kevin\Desktop\average.png', avg)
    ret, thresh = cv2.threshold(avg, 20, 255, cv2.THRESH_BINARY)
    cv2.imwrite(r'C:\Users\kevin\Desktop\thresh.png', thresh)
    im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_SIMPLE)
    c = max(contours, key=cv2.contourArea)
    print(cv2.contourArea(c))
    # Draw it
    x, y, w, h = cv2.boundingRect(c)
    print('x:{0}, y:{1}, w:{2}, h:{3}'.format(x, y, w, h))
    drawn = cv2.rectangle(avg, (x, y), (x + w, y + h), 255, 2)
    cv2.imwrite(r'C:\Users\kevin\Desktop\contour.png', drawn)

    logging.info("Done processing frames.")
    logging.info("Writing output file...")