Beispiel #1
0
    def calculate_sharpness_video_capture(
            self,
            cv_video_capture: CVVideoCapture,
            frame_start=0,
            frame_end=None,
            batch_size=200,
            gray_scale_conversion_code=cv2.COLOR_BGR2GRAY,
            progress_tracker: CVProgressTracker = None):
        frame_count = int(cv_video_capture.get_frame_count())
        if frame_end:
            cv_video_capture.set_position_frame(frame_start)
            frame_count = min(frame_end - frame_start, frame_count)
            frame_count = max(frame_count, 0)

        if progress_tracker:
            progress_tracker.running = True

        frame_sharpness_ctype = multiprocessing.Array('d', frame_count)
        progress_value = multiprocessing.Value('d')
        progress_value.value = 0
        lock_video_capture = RLock()

        worker_count = multiprocessing.cpu_count()
        task_per_worker = int(frame_count / worker_count)
        args_list = [
            (task_per_worker * i, task_per_worker * (i + 1), frame_start,
             frame_count, batch_size, self.kernel_x, self.kernel_y,
             frame_sharpness_ctype, cv_video_capture.file_handle,
             progress_value, lock_video_capture, gray_scale_conversion_code)
            for i in range(0, worker_count - 1)
        ]
        args_list.append(
            (task_per_worker * (worker_count - 1), frame_count, frame_start,
             frame_count, batch_size, self.kernel_x, self.kernel_y,
             frame_sharpness_ctype, cv_video_capture.file_handle,
             progress_value, lock_video_capture, gray_scale_conversion_code))

        processes = [
            Process(target=_calculate_sharpness_video_capture_worker,
                    args=arg_tuple) for arg_tuple in args_list
        ]

        def update_progress_tracker():
            progress_tracker.progress = progress_value.value

        progress_timer = RepeatingTimer(0.5, update_progress_tracker)

        if progress_tracker:
            progress_timer.start()

        if progress_tracker:
            progress_tracker.running = True
        for p in processes:
            p.start()
        for p in processes:
            p.join()
        if progress_tracker:
            progress_timer.cancel()
            progress_tracker.complete()
        return np.array(frame_sharpness_ctype)
Beispiel #2
0
 def test_sharpness_acceptance(sharpness_calculated: np.ndarray,
                               frame_window_size,
                               z_score=1.0,
                               median_absolute_deviation=True,
                               progress_tracker: CVProgressTracker = None):
     # single sided, only reject if sharpness < (-sigma_bound * \sigma)
     # median absolute deviation ref http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
     frame_window_size = int(round(frame_window_size))
     sigma_bound = abs(z_score)
     result = np.array([], dtype=np.bool_)
     frame_count = sharpness_calculated.shape[0]
     if progress_tracker:
         progress_tracker.running = True
     for i in range(0, frame_count, frame_window_size):
         window = sharpness_calculated[i:i +
                                       min(frame_window_size, frame_count -
                                           i)]  # type: np.ndarray
         result_window = np.ones_like(window, dtype=np.bool_)
         if median_absolute_deviation:
             window_median = np.median(stats.trimboth(window, 0.1))
             diff = window - window_median
             abs_diff = np.abs(diff)
             median_deviation = np.median(abs_diff)
             window_z_score = \
                 (
                 0.6745 * diff) / median_deviation if median_deviation else 0.
             result_window[window_z_score < -sigma_bound] = False
         else:
             window_mean = stats.trimboth(window, 0.1).mean()
             window_std = window.std()
             diff = (window - window_mean)
             result_window[diff < -sigma_bound * window_std] = False
         result = np.concatenate((result, result_window))
         if progress_tracker:
             progress_tracker.progress = i / frame_count
     if progress_tracker:
         progress_tracker.complete()
     return result
    #  filename = 'C:/Users/Yifei/unixhome/develop/sealab/keyframe/data/GP017728.MP4'
    #  filename_out = 'C:/Users/Yifei/unixhome/develop/sealab/keyframe/data' \
    #  '/GP017728_out.avi'
    # filename = '/home/yifei/develop/sealab/keyframe/data/GP017728.MP4'
    # filename_out = '/home/yifei/develop/sealab/keyframe/data/GP017728_out.avi'
    filename = '/home/yifei/develop/sealab/keyframe/data/GOPR7728.MP4'
    filename_out = '/home/yifei/develop/sealab/keyframe/data/GOPR7728_out_200.avi'
    video_cap = CVVideoCapture(filename)
    frame_rate = video_cap.get_frame_rate()

    start_time = datetime.datetime.now()

    def callback(arg):
        print(arg.progress)

    progress_tracker = CVProgressTracker(callback)

    # num_frames = 1000
    num_frames = int(video_cap.get_frame_count())
    print('frame count = ' + str(video_cap.get_frame_count()))
    cvsharpness = CVSharpness()
    sharpness_measure = cvsharpness.calculate_sharpness_video_capture(
        frame_start=0,
        frame_end=num_frames,
        cv_video_capture=video_cap,
        progress_tracker=progress_tracker)
    print(sharpness_measure.shape[0])
    result_arr = cvsharpness.test_sharpness_acceptance(sharpness_measure,
                                                       frame_rate * 2,
                                                       z_score=1)
    print("sharpness done")
Beispiel #4
0
    def test_optical_flow_video_capture(
            self,
            cv_video_capture: CVVideoCapture,
            distance_limit,
            frame_acceptance_np: np.ndarray,
            frame_start=0,
            frame_end=None,
            batch_size=200,
            gray_scale_conversion_code=cv2.COLOR_BGR2GRAY,
            progress_tracker: CVProgressTracker = None):
        frame_count = int(cv_video_capture.get_frame_count())
        if frame_end:
            cv_video_capture.set_position_frame(frame_start)
            frame_count = min(frame_end - frame_start, frame_count)
            frame_count = max(frame_count, 0)

        if progress_tracker:
            progress_tracker.running = True

        frame_acceptance_ctype = \
            multiprocessing.Array('b', frame_acceptance_np.tolist())

        progress_value = multiprocessing.Value('d')
        progress_value.value = 0
        lock_video_capture = multiprocessing.RLock()

        skip_window_both_end = int(cv_video_capture.get_frame_rate())
        # worker_count = 1
        worker_count = multiprocessing.cpu_count()
        task_per_worker = int(frame_count / worker_count)
        args_list = [
            (task_per_worker * i, task_per_worker * (i + 1), frame_start,
             frame_count, batch_size, distance_limit, self.feature_params,
             self.lucas_kanade_params, frame_acceptance_ctype,
             cv_video_capture.file_handle, progress_value, lock_video_capture,
             gray_scale_conversion_code, skip_window_both_end)
            for i in range(0, worker_count - 1)
        ]
        args_list.append(
            (task_per_worker * (worker_count - 1), frame_count, frame_start,
             frame_count, batch_size, distance_limit, self.feature_params,
             self.lucas_kanade_params, frame_acceptance_ctype,
             cv_video_capture.file_handle, progress_value, lock_video_capture,
             gray_scale_conversion_code, skip_window_both_end))

        processes = [
            Process(target=_test_optical_flow_capture_worker, args=arg_tuple)
            for arg_tuple in args_list
        ]

        def update_progress_tracker():
            progress_tracker.progress = progress_value.value / worker_count * 0.7

        progress_timer = RepeatingTimer(0.1, update_progress_tracker)

        if progress_tracker:
            progress_timer.start()

        if progress_tracker:
            progress_tracker.running = True
        for p in processes:
            p.start()
        for p in processes:
            p.join()

        print('[OpticalFlow] final pass')

        final_pass_ranges = generate_multiprocessing_final_pass_ranges \
            (frame_acceptance_ctype, frame_count, task_per_worker, worker_count, skip_window_both_end)

        final_pass_arg_list = [
            (range_i[0], range_i[1], frame_start, frame_count, batch_size,
             distance_limit, self.feature_params, self.lucas_kanade_params,
             frame_acceptance_ctype, cv_video_capture.file_handle,
             progress_value, lock_video_capture, gray_scale_conversion_code)
            for range_i in final_pass_ranges
        ]

        final_pass_processes = [
            Process(target=_test_optical_flow_capture_worker, args=arg_tuple)
            for arg_tuple in final_pass_arg_list
        ]

        def update_progress_tracker_final_pass():
            progress_tracker.progress = 0.7 + progress_value.value / worker_count * 0.3

        progress_value.value = 0
        if progress_tracker:
            progress_timer.function = update_progress_tracker_final_pass

        for p in final_pass_processes:
            p.start()
            # for p in final_pass_processes:
            p.join()

        if progress_tracker:
            progress_timer.cancel()
            progress_tracker.complete()

        return np.array(frame_acceptance_ctype, dtype=np.bool_).copy()
        pass
Beispiel #5
0
        def worker_function(progress_changed, state_changed):
            sharpness_filter_updated = False
            self.frame_acceptance_np = np.ones(
                [int(self.cv_video_cap.get_frame_count())], dtype=np.bool_)

            if self.sharpness_filter:
                progress_changed.emit(0)
                state_changed.emit('Running sharpness filter...')

                def callback(obj):
                    progress_changed.emit(obj.progress)

                sharpness_filter = self.sharpness_filter[
                    'filter']  # type: CVSharpness
                sharpness_value = self.sharpness_filter['calculation']
                if sharpness_value is None:
                    # calculate and save
                    progress_changed.emit(0)
                    state_changed.emit(
                        'Analyzing video for image sharpness...')
                    print('sharpness recalculating')
                    sharpness_value = sharpness_filter.calculate_sharpness_video_capture(
                        cv_video_capture=self.cv_video_cap,
                        progress_tracker=CVProgressTracker(callback),
                        batch_size=self.params_batch_count)
                    sharpness_filter.save_calculation_file(
                        sharpness_value, self.cv_video_cap)
                progress_changed.emit(1)
                state_changed.emit('Running sharpness acceptance test...')
                if (self.sharpness_filter['params'] != self.sharpness_filter['params_loaded']) or \
                        (self.sharpness_filter['acceptance_loaded'] is None):
                    # different params
                    sharpness_acceptance = \
                        sharpness_filter.test_sharpness_acceptance(
                            sharpness_calculated=sharpness_value,
                            frame_window_size=self.sharpness_filter['params']['window_size'],
                            z_score=self.params_sharpness['z_score']
                        )
                    self.sharpness_filter['acceptance'] = sharpness_acceptance
                    self.sharpness_filter[
                        'acceptance_loaded'] = sharpness_acceptance
                    sharpness_filter.save_params_file(
                        self.sharpness_filter['params'], self.cv_video_cap)
                    sharpness_filter.save_acceptance_file(
                        sharpness_acceptance, self.cv_video_cap)
                    sharpness_filter_updated = True

                original_count = np.sum(self.frame_acceptance_np)
                current_count = np.sum(
                    self.sharpness_filter['acceptance_loaded'])
                self.sharpness_filter_status = (
                    "[%d] => [%d] frames (%.2f%% dropped)" %
                    (original_count, current_count,
                     (original_count - current_count) / original_count * 100))
                progress_changed.emit(1)
                state_changed.emit('Sharpness filter done...')
                self.frame_acceptance_np = self.sharpness_filter[
                    'acceptance_loaded']
            else:
                sharpness_filter_updated = True

            correlation_filter_updated = False
            if sharpness_filter_updated:
                # requires recalculation
                correlation_filter_updated = True

            if self.correlation_filter:
                progress_changed.emit(0)
                state_changed.emit('Running correlation filter...')

                def callback(obj):
                    progress_changed.emit(obj.progress)

                # correlation filter enabled
                correlation_filter = self.correlation_filter[
                    'filter']  # type: CVCorrelation

                if correlation_filter_updated:
                    self.correlation_filter['acceptance_loaded'] = None
                if (self.correlation_filter['params'] != self.correlation_filter['params_loaded']) or \
                        (self.correlation_filter['acceptance_loaded'] is None):

                    progress_changed.emit(0)
                    state_changed.emit(
                        'Removing still frames using cross correlation...')
                    print('correlation recalculating')
                    # different params
                    correlation_acceptance = \
                        correlation_filter.test_correlation_video_capture(
                            cv_video_capture=self.cv_video_cap,
                            correlation_limit=self.correlation_filter['params']['threshold'],
                            frame_acceptance_np=self.frame_acceptance_np,
                            progress_tracker=CVProgressTracker(callback),
                            batch_size=self.params_batch_count,
                        )
                    self.correlation_filter[
                        'acceptance'] = correlation_acceptance
                    self.correlation_filter[
                        'acceptance_loaded'] = correlation_acceptance
                    correlation_filter.save_params_file(
                        self.correlation_filter['params'], self.cv_video_cap)
                    correlation_filter.save_acceptance_file(
                        correlation_acceptance, self.cv_video_cap)
                    correlation_filter_updated = True

                original_count = np.sum(self.frame_acceptance_np)
                current_count = np.sum(
                    self.correlation_filter['acceptance_loaded'])
                self.correlation_filter_status = (
                    "[%d] => [%d] frames (%.2f%% dropped)" %
                    (original_count, current_count,
                     (original_count - current_count) / original_count * 100))
                progress_changed.emit(1)
                state_changed.emit('Correlation filter done...')
                self.frame_acceptance_np = self.correlation_filter[
                    'acceptance_loaded']
            else:
                correlation_filter_updated = True

            opticalflow_filter_updated = False
            if correlation_filter_updated:
                # requires recalculation
                opticalflow_filter_updated = True

            if self.opticalflow_filter:
                # optical_flow enabled
                progress_changed.emit(1)
                state_changed.emit('Running optical flow filter...')

                def callback(obj):
                    progress_changed.emit(obj.progress)

                opticalflow_filter = self.opticalflow_filter[
                    'filter']  # type: CVOpticalFlow
                if opticalflow_filter_updated:
                    self.opticalflow_filter['acceptance_loaded'] = None
                if (json.dumps(self.opticalflow_filter['params'], sort_keys=True) !=
                        json.dumps(self.opticalflow_filter['params_loaded'], sort_keys=True)) or \
                        (self.opticalflow_filter['acceptance_loaded'] is None):
                    # different params
                    progress_changed.emit(0)
                    state_changed.emit(
                        'Calculating distance between frames using optical flow...'
                    )
                    print('opticalflow recalculating')
                    opticalflow_acceptance = \
                        opticalflow_filter.test_optical_flow_video_capture(
                            cv_video_capture=self.cv_video_cap,
                            distance_limit=self.opticalflow_filter['params']['threshold'],
                            frame_acceptance_np=self.frame_acceptance_np,
                            progress_tracker=CVProgressTracker(callback),
                            batch_size=self.params_batch_count,
                        )
                    self.opticalflow_filter[
                        'acceptance'] = opticalflow_acceptance
                    self.opticalflow_filter[
                        'acceptance_loaded'] = opticalflow_acceptance
                    opticalflow_filter.save_params_file(
                        self.opticalflow_filter['params'], self.cv_video_cap)
                    opticalflow_filter.save_acceptance_file(
                        opticalflow_acceptance, self.cv_video_cap)

                original_count = np.sum(self.frame_acceptance_np)
                current_count = np.sum(
                    self.opticalflow_filter['acceptance_loaded'])
                self.opticalflow_filter_status = (
                    "[%d] => [%d] frames (%.2f%% dropped)" %
                    (original_count, current_count,
                     (original_count - current_count) / original_count * 100))
                progress_changed.emit(1)
                state_changed.emit('Optical flow filter done...')
                self.frame_acceptance_np = self.opticalflow_filter[
                    'acceptance_loaded']
            else:
                opticalflow_filter_updated = True

            progress_changed.emit(1)
            state_changed.emit('All filters done!')
            print('all filters done')
            sleep(1)