def prepare(self, project: VIANProject, targets: List[IProjectContainer], fps, class_objs=None): """ This function is called before the analysis takes place. Since it is in the Main-Thread, we can access our project, and gather all data we need. """ super(ColorPaletteAnalysis, self).prepare(project, targets, fps, class_objs) args = [] fps = project.movie_descriptor.fps if not isinstance(targets, list): targets = [targets] for tgt in targets: semseg = None if isinstance(tgt, Screenshot): if class_objs is not None: semseg = tgt.get_connected_analysis( "SemanticSegmentationAnalysis") if len(semseg) > 0: semseg = semseg[0] else: semseg = None args.append([ ms_to_frames(tgt.get_start(), fps), ms_to_frames(tgt.get_end(), fps), project.movie_descriptor.movie_path, tgt.get_id(), project.movie_descriptor.get_letterbox_rect(), semseg ]) return args
def prepare(self, project: VIANProject, targets: List[IProjectContainer], fps, class_objs=None): """ This function is called before the analysis takes place. Since it is in the Main-Thread, we can access our project, and gather all data we need. """ super(SemanticSegmentationAnalysis, self).prepare(project, targets, fps, class_objs) args = [] fps = project.movie_descriptor.fps for tgt in targets: if tgt.get_type() == SCREENSHOT_GROUP: for s in tgt.screenshots: args.append([ ms_to_frames(s.get_start(), fps), ms_to_frames(s.get_end(), fps), project.movie_descriptor.movie_path, s.get_id() ]) else: args.append([ ms_to_frames(tgt.get_start(), fps), ms_to_frames(tgt.get_end(), fps), project.movie_descriptor.movie_path, tgt.get_id() ]) return args
def prepare(self, project: VIANProject, targets: List[IProjectContainer], fps, class_objs = None): """ This function is called before the analysis takes place. Since it is in the Main-Thread, we can access our project, and gather all data we need. """ super(BarcodeAnalysis, self).prepare(project, targets, fps, class_objs) if project.folder is None and self.output_dir is None: raise ValueError("Z-Projections need a directory-based project or an output_dir") elif project.folder is not None: self.output_dir = os.path.join(project.data_dir) args = [] fps = project.movie_descriptor.fps for tgt in targets: semseg = None if isinstance(tgt, Screenshot): if class_objs is not None: semseg = tgt.get_connected_analysis("SemanticSegmentationAnalysis") if len(semseg) > 0: semseg = semseg[0] else: semseg = None args.append(dict(start=ms_to_frames(tgt.get_start(), fps), end=ms_to_frames(tgt.get_end(), fps), movie_path=project.movie_descriptor.movie_path, target=tgt.get_id(), margins=project.movie_descriptor.get_letterbox_rect(), semseg=semseg)) return args
def auto_screenshot(project: VIANProject, method, distribution, n, segmentation, hdf5_manager): frame_ms = [] if method == "Uniform Distribution": if distribution == "N - Per Segment": for s in segmentation.segments: delta = (s.get_end() - s.get_start()) / n k = s.get_start() while k < s.get_end(): frame_ms.append(k) k += delta elif distribution == "N - Complete": delta = project.movie_descriptor.duration / n k = 0 while k < project.movie_descriptor.duration: frame_ms.append(k) k += delta elif distribution == "Every N-th Frame": k = 0 while k < project.movie_descriptor.duration: frame_ms.append(k) k += frame2ms(n, project.movie_descriptor.fps) elif method == "Most Informative": for s in segmentation.segments: idx_start = int( ms_to_frames(s.get_start(), project.movie_descriptor.fps) / project.colormetry_analysis.resolution) idx_end = int( ms_to_frames(s.get_end(), project.movie_descriptor.fps) / project.colormetry_analysis.resolution) indices = range(idx_start, idx_end, 1) hists = hdf5_manager.col_histograms()[indices] hists = np.reshape(hists, newshape=(hists.shape[0], hists.shape[1] * hists.shape[2] * hists.shape[3])) hists /= np.sqrt(np.sum(hists**2, axis=1, keepdims=True)) result = select_rows(hists, np.clip(n, 1, hists.shape[0])) frame_ms.extend([ frame2ms( (f + idx_start) * project.colormetry_analysis.resolution, project.movie_descriptor.fps) for f in result ]) frame_pos = [] for f in frame_ms: frame_pos.append(ms_to_frames(f, project.movie_descriptor.fps)) return frame_pos
def prepare(self, project, targets, fps, class_objs = None): super(MovieMosaicAnalysis, self).prepare(project, targets, fps, class_objs) args = [] for t in targets: if t.get_type() == MOVIE_DESCRIPTOR: start = ms_to_frames(t.get_start(), fps) end = t.get_end() else: start = ms_to_frames(t.get_start(), fps) end = ms_to_frames(t.get_end(), fps) args.append([start, end, project.movie_descriptor.get_movie_path(), t.get_id()]) return args
def prepare(self, project: VIANProject, targets: List[Segmentation], fps, class_objs=None): """ This function is called before the analysis takes place. Since it is in the Main-Thread, we can access our project, and gather all data we need. """ super(BarcodeAnalysisJob, self).prepare(project, targets, fps, class_objs) # Since multiple_result is True, we want to generate a Barcode for each Segmentation # Thus an array of arguments has to be returned. For each Segmentation one argument Array args = [] movie_path = project.movie_descriptor.get_movie_path() # Targets are Segmentations for tgt in targets: name = tgt.get_name() # Collecting all Segment start and end point in Frame-Indices segments = [] if tgt.get_type() == SEGMENTATION: for segm in tgt.segments: start = ms_to_frames(segm.get_start(), fps) end = ms_to_frames(segm.get_end(), fps) segments.append([start, end]) elif tgt.get_type() == MOVIE_DESCRIPTOR: segments.append( [0, ms_to_frames(project.movie_descriptor.duration, fps)]) else: segments.append([ ms_to_frames(tgt.get_start(), fps), ms_to_frames(tgt.get_end(), fps) ]) args.append([segments, movie_path, name, tgt.get_id()]) return args
def get_auto_text(self, property_name, time_ms, fps): if property_name == "Current Time": return ms_to_string(time_ms) elif property_name == "Current Frame": return str(ms_to_frames(time_ms, fps)) elif property_name == "Movie Name": return self.movie_name elif property_name == "Movie Path": return self.movie_path elif property_name == "Movie ID": return self.movie_id elif property_name == "Year": return self.year elif property_name == "Source": return self.source elif property_name == "Duration": return ms_to_string(self.duration) elif property_name == "Notes": return self.notes else: return "Invalid Property"
def prepare(self, project: VIANProject): if project.colormetry_analysis is None: self.colormetry_analysis = project.create_colormetry( resolution=self.resolution) self.colormetry_analysis.clear() start = 0 else: self.colormetry_analysis = project.colormetry_analysis self.resolution = self.colormetry_analysis.resolution start = self.colormetry_analysis.current_idx if start == 0: self.colormetry_analysis = project.create_colormetry( resolution=self.resolution) self.colormetry_analysis.clear() self.duration = project.movie_descriptor.duration frame_duration = ms_to_frames(self.duration, project.movie_descriptor.fps) return [ project.movie_descriptor.get_movie_path(), start, frame_duration, self.resolution, project.movie_descriptor.fps, project.movie_descriptor.get_letterbox_rect() ]
def on_import(self, project: VIANProject, fps): mode = 0 files = [] scr_paths = [] timestamps = [] segment_ranges = [] segment_ids = [] # If the Time Location is given, we just want to parse the screenshots locations and place them in the Project if self.checkBox_UseLocation.isChecked( ) and self.lineEdit_Delimiter.text() != "": scr_ranges = [] idx_h = self.sB_PositionTimeH.value() - 1 idx_m = self.sB_PositionTimeM.value() - 1 idx_s = self.sB_PositionTimeS.value() - 1 idx_ms = self.sB_PositionTimeMS.value() - 1 idx_segment = self.sB_PositionSegment.value() - 1 has_time_location = (idx_h >= 0 or idx_m >= 0 or idx_s >= 0 or idx_ms >= 0) if has_time_location: files = self.files mode = 0 timestamps = [] for f in self.files: dir, file = os.path.split(f) file = file.split(".")[0] file = file.split(self.lineEdit_Delimiter.text()) try: t_hour = 0 t_min = 0 t_sec = 0 t_milli = 0 if idx_h > 0: t_hour = int(file[idx_h]) if t_min > 0: t_min = int(file[idx_m]) if idx_s > 0: t_sec = int(file[idx_s]) if idx_ms > 0: t_milli = int(file[idx_ms]) time_ms = ts_to_ms(t_hour, t_min, t_sec, t_milli) timestamps.append(time_ms) scr_paths.append(f) except Exception as e: log_error("Error in Screenshot Import", str(e)) continue elif idx_segment >= 0: mode = 1 segment_ids = [] for f in self.files: dir, file = os.path.split(f) file = file.split(".")[0] file = file.split(self.lineEdit_Delimiter.text()) try: segment_id = int(file[idx_segment]) scr_paths.append(f) segment_ids.append(segment_id - 1) except Exception as e: log_error("Error in Screenshot Import", str(e)) continue for s in project.get_main_segmentation().segments: segment_ranges.append([ ms_to_frames(s.get_start(), fps), ms_to_frames(s.get_end(), fps) ]) else: mode = 2 scr_paths = self.files args = dict(mode=mode, movie_path=project.movie_descriptor.get_movie_path(), scr_paths=scr_paths, segment_ids=segment_ids, segment_ranges=segment_ranges, timestamps=timestamps) importer = ScreenshotImporter(args) self.main_window.run_job_concurrent(importer)
def run_concurrent(self, args, sign_progress): annotation_id = args[0] bbox = tuple(args[1]) movie_path = args[2] fps = args[5] start_frame = ms_to_frames(args[3], fps) end_frame = ms_to_frames(args[4], fps) method = args[6] resolution = args[7] keys = [] # TRACKING if method == 'BOOSTING': tracker = cv2.TrackerBoosting_create() elif method == 'MIL': tracker = cv2.TrackerMIL_create() elif method == 'KCF': tracker = cv2.TrackerKCF_create() elif method == 'TLD': tracker = cv2.TrackerTLD_create() elif method == 'MEDIANFLOW': tracker = cv2.TrackerMedianFlow_create() elif method == 'GOTURN': tracker = cv2.TrackerGOTURN_create() else: raise Exception("Tracking Method not identifiable. " + str(method)) # Read video capture = cv2.VideoCapture(movie_path) # Exit if video not opened. if not capture.isOpened(): raise RuntimeError("Tracking: Could not open video.") # Read first frame. capture.set(cv2.CAP_PROP_POS_FRAMES, start_frame) ok, frame = capture.read() if not ok: raise RuntimeError("Tracking: Could not read Frame.") # Initialize tracker with first frame and bounding box ok = tracker.init(frame, bbox) for i in range(start_frame, end_frame, 1): sign_progress( round(float(i - start_frame) / (end_frame - start_frame), 2)) # Read a new frame ok, frame = capture.read() if not ok: break # Update tracker ok, bbox = tracker.update(frame) # Draw bounding box if ok: # Tracking success if i % resolution == 0: time = frame2ms(i, fps) pos = [bbox[0], bbox[1]] keys.append([time, pos]) p1 = (int(bbox[0]), int(bbox[1])) p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1) # cv2.imshow("Returned", frame) # cv2.waitKey(30) return [annotation_id, keys]