def apply_segmentation(self):
        index = int(self.slider.value()) - 1 - self.cluster_range[0]
        segments = []
        curr_lbl = -1
        curr_segment = [0, 0]
        last_idx = -1
        for i, c in enumerate(self.clusterings[index]):
            if i == 0:
                last_idx = 0
                curr_lbl = c

            elif c == curr_lbl:
                last_idx = i

            else:
                curr_segment[1] = self.indices[last_idx]
                segments.append(curr_segment)
                curr_segment = [self.indices[last_idx], 0]
                curr_lbl = c
                last_idx = i

        curr_segment[1] = self.indices[last_idx]
        segments.append(curr_segment)

        segmentation = self.project.create_segmentation("Auto Segmentation",
                                                        dispatch=False)
        for s in segments:
            segmentation.create_segment2(frame2ms(s[0], self.fps),
                                         frame2ms(s[1], self.fps),
                                         mode=SegmentCreationMode.INTERVAL,
                                         inhibit_overlap=False,
                                         dispatch=False)

        self.project.dispatch_changed()
    def _generate_segments(self, clustering, timestamps, fps):
        result_dict = []
        current_lbl = -1
        start = timestamps[0]
        stop = None
        for i, lbl in enumerate(clustering):
            if lbl != current_lbl or i == len(clustering) - 1:
                if current_lbl != -1:
                    stop = timestamps[i]
                    result_dict.append(
                        dict(label=lbl,
                             f_start=start,
                             f_stop=stop,
                             t_start=frame2ms(start, fps),
                             t_stop=frame2ms(stop, fps)))
                current_lbl = lbl
                start = timestamps[i]

        return result_dict
    def run_concurrent(self, project, sign_progress):
        # [self.project.movie_descriptor.get_movie_path(), screenshot_position, screenshot_annotation_dicts]
        # for s in self.project.screenshots:
        #     screenshot_position.append(s.frame_pos)
        #     a_dicts = []
        #     if s.annotation_item_ids is not None:
        #         for a_id in s.annotation_item_ids:
        #             annotation_dict = self.project.get_by_id(a_id)
        #             if annotation_dict is not None:
        #                 a_dicts.append(annotation_dict.serialize())
        #
        #     screenshot_annotation_dicts.append(a_dicts)
        movie_path = project.movie_descriptor.get_movie_path()
        video_capture = cv2.VideoCapture(movie_path)
        fps = video_capture.get(cv2.CAP_PROP_FPS)
        for i, scr in enumerate(project.screenshots):  #type: Screenshot
            if self.aborted:
                break

            sign_progress(float(i) / len(project.screenshots))
            video_capture.set(cv2.CAP_PROP_POS_FRAMES, scr.frame_pos)
            scr.movie_timestamp = frame2ms(scr.frame_pos, fps)
            ret, frame = video_capture.read()
            a_dicts = []
            if scr.annotation_item_ids is not None:
                for a_id in scr.annotation_item_ids:
                    annotation_dict = project.get_by_id(a_id)
                    if annotation_dict is not None:
                        a_dicts.append(annotation_dict.serialize())
            if len(a_dicts) > 0:
                try:
                    annotation = render_annotations(frame, a_dicts[i])
                    if annotation is not None:
                        annotation = annotation.astype(np.uint8)
                except:
                    annotation = None
            else:
                annotation = None

            scr.set_img_movie(frame)
            scr.img_blend = annotation

        return None
Example #4
0
    def run_concurrent(self, args, callback):
        movie_path = args[0]
        start = args[1]
        end = args[2]
        resolution = args[3]
        fps = args[4]
        margins = args[5]

        start *= resolution
        length = np.clip(int(end - start), 1, None)

        video_capture = cv2.VideoCapture(movie_path)
        video_capture.set(cv2.CAP_PROP_POS_FRAMES, start)

        width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))

        progress_counter = 0
        hist_counter = 0
        model = None

        for i in range(length):
            if i % resolution == 0:
                video_capture.set(cv2.CAP_PROP_POS_FRAMES, i + start)
                ret, frame = video_capture.read()
            else:
                continue

            if frame is None:
                break

            # Get sub frame if there are any margins
            if margins is not None:
                frame = frame[margins[1]:margins[3], margins[0]:margins[2]]
                # cv2.imshow("", frame)
                # cv2.waitKey(5)

            t = time.time()
            t_total = time.time()

            # Colorspace Conversion
            frame_lab = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2Lab)
            palette_input_width = 300
            if model is None:
                if palette_input_width < frame.shape[0]:
                    rx = palette_input_width / frame.shape[0]
                    frame_lab_input = cv2.resize(frame_lab, None, None, rx, rx,
                                                 cv2.INTER_CUBIC)
                else:
                    frame_lab_input = frame
                model = PaletteExtractorModel(frame_lab_input,
                                              n_pixels=400,
                                              num_levels=8)
            t_read = time.time() - t
            t = time.time()
            # Histogram
            hist = np.divide(calculate_histogram(frame_lab, 16),
                             (width * height))
            t_hist = time.time() - t
            t = time.time()
            # hist = None
            palette = color_palette(frame_lab,
                                    n_merge_steps=200,
                                    n_merge_per_lvl=20,
                                    image_size=150.0,
                                    n_pixels=400,
                                    seeds_input_width=palette_input_width,
                                    seeds_model=model)
            t_palette = time.time() - t
            t = time.time()
            # palette = None

            # Color Features
            frame_lab = cv2.cvtColor(
                frame.astype(np.float32) / 255, cv2.COLOR_BGR2Lab)
            color_bgr = np.mean(frame, axis=(0, 1))
            color_lab = np.mean(frame_lab, axis=(0, 1))

            feature_mat = np.zeros(shape=8)
            feature_mat[0:3] = color_lab
            feature_mat[3:6] = color_bgr
            feature_mat[6] = lab_to_sat(lab=color_lab, implementation="luebbe")
            feature_mat[7] = lab_to_sat(lab=color_lab,
                                        implementation="pythagoras")

            t_features = time.time() - t
            t = time.time()
            # Spatial
            rx = 250 / frame.shape[0]
            frame = cv2.resize(frame, None, None, rx, rx, cv2.INTER_CUBIC)
            eout, enorm, edenorm = get_spacial_frequency_heatmap(
                frame, method="edge-mean", normalize=False)
            cout, cnorm, cdenorm = get_spacial_frequency_heatmap(
                frame, method="color-var", normalize=False)
            hout, hnorm, hdenorm = get_spacial_frequency_heatmap(
                frame, method="hue-var", normalize=False)
            lout, lnorm, ldenorm = get_spacial_frequency_heatmap(
                frame, method="luminance-var", normalize=False)

            t_spatial = time.time() - t
            t = time.time()
            if self.aborted:
                return

            max_p_length = 1000
            palette_mat = np.zeros(shape=(max_p_length, 6))
            count = max_p_length
            if len(palette.tree[0]) < max_p_length:
                count = len(palette.tree[0])
            palette_mat[:len(palette.merge_dists), 0] = palette.merge_dists
            palette_mat[:count, 1] = palette.tree[0][:count]
            palette_mat[:count, 2:5] = palette.tree[1][:count]
            palette_mat[:count, 5] = palette.tree[2][:count]

            yielded_result = dict(
                frame_pos=i + start,
                time_ms=frame2ms(i + start, fps),
                hist=hist,
                palette=palette_mat,
                features=feature_mat,
                spatial_edge=np.array(
                    [np.amax(edenorm), np.mean(edenorm)], dtype=np.float32),
                spatial_color=np.array(
                    [np.amax(cdenorm), np.mean(cdenorm)], dtype=np.float32),
                spatial_hue=np.array(
                    [np.amax(hdenorm), np.mean(hdenorm)], dtype=np.float32),
                spatial_luminance=np.array(
                    [np.amax(ldenorm), np.mean(ldenorm)], dtype=np.float32))
            callback.emit([yielded_result, (i + start) / end])

            t_store = time.time() - t
            if self.profile:
                print("Total", round(time.time() - t_total, 4), "Read",
                      round(t_read, 4), "Features", round(t_features, 4),
                      "Histogram:", round(t_hist, 4), "Palette",
                      round(t_palette, 4), "Spatial", round(t_spatial,
                                                            4), "Store",
                      round(t_store, 4))
            hist_counter += 1
            progress_counter += 1
Example #5
0
    def run_concurrent(self, args, sign_progress):
        annotation_id = args[0]
        bbox = tuple(args[1])
        movie_path = args[2]
        fps = args[5]
        start_frame = ms_to_frames(args[3], fps)
        end_frame = ms_to_frames(args[4], fps)
        method = args[6]
        resolution = args[7]

        keys = []

        # TRACKING
        if method == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        elif method == 'MIL':
            tracker = cv2.TrackerMIL_create()
        elif method == 'KCF':
            tracker = cv2.TrackerKCF_create()
        elif method == 'TLD':
            tracker = cv2.TrackerTLD_create()
        elif method == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        elif method == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        else:
            raise Exception("Tracking Method not identifiable. " + str(method))

        # Read video
        capture = cv2.VideoCapture(movie_path)

        # Exit if video not opened.
        if not capture.isOpened():
            raise RuntimeError("Tracking: Could not open video.")

        # Read first frame.
        capture.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
        ok, frame = capture.read()
        if not ok:
            raise RuntimeError("Tracking: Could not read Frame.")

        # Initialize tracker with first frame and bounding box
        ok = tracker.init(frame, bbox)

        for i in range(start_frame, end_frame, 1):
            sign_progress(
                round(float(i - start_frame) / (end_frame - start_frame), 2))
            # Read a new frame
            ok, frame = capture.read()
            if not ok:
                break

            # Update tracker
            ok, bbox = tracker.update(frame)

            # Draw bounding box
            if ok:
                # Tracking success
                if i % resolution == 0:
                    time = frame2ms(i, fps)
                    pos = [bbox[0], bbox[1]]
                    keys.append([time, pos])

                    p1 = (int(bbox[0]), int(bbox[1]))
                    p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                    cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
                    # cv2.imshow("Returned", frame)
                    # cv2.waitKey(30)

        return [annotation_id, keys]