def yolo_drawbbox(in_frame_folder, in_detection_result_file, out_folder=None, start=1, end=None, show_video=False): in_frame_folder = os.path.abspath(in_frame_folder) in_detection_result_file = os.path.abspath(in_detection_result_file) if out_folder is not None: out_folder = os.path.abspath(out_folder) pickler = BATRPickle(in_file=in_detection_result_file) sorted_frames_filenames = sorted(os.listdir(in_frame_folder)) for frame_n, frame_filename in enumerate(sorted_frames_filenames, start=start): frame_filename = os.path.join(in_frame_folder, frame_filename) detected_objects = pickler.unpickle(f"frame{frame_n:06d}") frame = cv2.imread(frame_filename) for obj in detected_objects: Yolov3.draw_bboxes(frame, obj) cv2.imshow("image", frame) if out_folder is not None: cv2.imwrite(os.path.join(out_folder, f"frame{frame_n:06d}.jpg"), frame) if show_video: if cv2.waitKey(1) & 0xFF == ord('q'): break else: cv2.waitKey(0) cv2.destroyAllWindows() if frame_n == end: break cv2.destroyAllWindows()
def detect_and_save(video: Video, start=0, end=None, detector=None): """ Create a file containing object detection results of frames inside in_frame_folder :param video: Object of Video class :param start: Starting Frame Number (default: 1) :param end: Ending Frame Number (default: last frame) :param detector: Detector to use for object detection """ in_frame_folder = os.path.abspath(video.frames_path) out_file_name = os.path.abspath(video.detection_path) # print(in_frame_folder, out_file_name) print(detector) pickler = BATRPickle(out_file=out_file_name) sorted_frames_filenames = sorted(os.listdir(in_frame_folder)) for frame_n, frame_filename in enumerate(sorted_frames_filenames, start=start): frame_filename = os.path.join(in_frame_folder, frame_filename) detected_objects = detector.detect(frame_filename) pickler.pickle(detected_objects, f"frame{frame_n:06d}") # print(f"Frame # {frame_n}") if frame_n == end: break del pickler
def main(): video = Video(CONFIG["input_video_file"]) output_video = None if CONFIG["create_masked_video"]: output_video = cv2.VideoWriter(CONFIG["output_video_file"], cv2.VideoWriter_fourcc(*"mp4v"), 30, video[0].shape[:-1]) background = None if CONFIG["subtract_background"]: background = cv2.imread(CONFIG["background_file"]) pickler = BATRPickle(in_file=CONFIG["input_mask_file"]) store = [] for frame_n in range(CONFIG["offset"], CONFIG["end"]): print(frame_n) frame = cv2.cvtColor(video[frame_n], cv2.COLOR_BGR2RGB) deeplab_result = pickler.unpickle("frame{:06d}".format(frame_n)) if CONFIG["subtract_background"]: frame = (background - frame) + (frame - background) out_frame = None vis_segmentation(deeplab_result.resized_image, deeplab_result.segmentation_map) if CONFIG["display_image"] or CONFIG["display_video"]: cv2.imshow("output", out_frame) if CONFIG["display_image"]: cv2.waitKey(0) cv2.destroyAllWindows() elif CONFIG["display_video"]: if cv2.waitKey(1) & 0xFF == ord('q'): break if CONFIG["create_masked_video"] and output_video: output_video.write(out_frame) if CONFIG["display_video"]: cv2.destroyAllWindows() if CONFIG["create_masked_video"] and output_video: output_video.release()
def yolo_object_tracker(video: Video, start=0, end=None): in_frame_folder = os.path.abspath(video.frames_path) in_foreground_mask_folder = os.path.abspath(video.foreground_masks_path) in_detection_result_file = os.path.abspath(video.detection_path) pickler = BATRPickle(in_file=in_detection_result_file) sorted_frames_filenames = sorted(os.listdir(in_frame_folder)) store = [] for frame_n, frame_filename in enumerate(sorted_frames_filenames, start=start): frame_file_abs_path = os.path.join(in_frame_folder, frame_filename) foreground_mask_abs_path = os.path.join(in_foreground_mask_folder, frame_filename) detected_objects = pickler.unpickle(f"frame{frame_n:06d}") frame = cv2.imread(frame_file_abs_path) mask = cv2.imread(foreground_mask_abs_path) # print(frame_filename, frame_file_abs_path, foreground_mask_abs_path) for obj in detected_objects: obj_image = frame[obj.ya():obj.yb(), obj.xa():obj.xb()] obj_mask = mask[obj.ya():obj.yb(), obj.xa():obj.xb()] if obj_mask.size > 0 and obj_image.size > 0: track_object(obj=obj, obj_mask=obj_mask, obj_image=obj_image, _store=store, _frame_n=frame_n, _store_data_path=video.store_data_path) print(f"Frame# {frame_n}") if frame_n == end: break with open(video.store_path, "wb") as f: pickle.dump(store, f) cv2.destroyAllWindows()
def yolo_deeplab(in_frame_folder, in_detection_result_file, in_mask_file, out_frame_folder, start=1, end=None, show_video=False): in_frame_folder = os.path.abspath(in_frame_folder) out_frame_folder = os.path.abspath(out_frame_folder) in_mask_file = os.path.abspath(in_mask_file) in_detection_result_file = os.path.abspath(in_detection_result_file) background = cv2.imread("input/testavg.jpg") pickler = BATRPickle(in_file=in_detection_result_file) mask_pickler = BATRPickle(in_file=in_mask_file) sorted_frames_filenames = sorted(os.listdir(in_frame_folder)) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7)) store = [] # deeplabmasks.tar.gz for frame_n, frame_filename in enumerate(sorted_frames_filenames, start=start): out_frame = np.copy(background) frame_filename = os.path.join(in_frame_folder, frame_filename) detected_objects = pickler.unpickle(f"frame{frame_n:06d}") frame = cv2.imread(frame_filename) for obj in detected_objects: if obj.type != "b'car'": continue obj_image = frame[obj.ya:obj.yb, obj.xa:obj.xb] track_object(obj, obj_image, store) mask = mask_pickler.unpickle(f"frame{frame_n:06d}") mask[mask > 0] = 255 # cv2.imshow("Mask", mask) # cv2.waitKey(0) # cv2.destroyAllWindows() cv2.imwrite("mask.jpg", mask) _alpha = cv2.imread("mask.jpg") # _alpha2 = cv2.dilate(_alpha, kernel2, iterations=10) _alpha = cv2.dilate(_alpha, kernel, iterations=3) # _alpha = cv2.morphologyEx(_alpha, cv2.MORPH_CLOSE, kernel) # _common = cv2.morphologyEx(_common, cv2.MORPH_CLOSE, kernel) # # cv2.imshow("image", _common) # cv2.waitKey(0) # cv2.destroyAllWindows() _forg = np.float32(obj_image) _back = np.float32(background[obj.ya:obj.yb, obj.xa:obj.xb]) _alpha = np.float32(_alpha) / 255 _forg = cv2.multiply(_alpha[obj.ya:obj.yb, obj.xa:obj.xb], _forg) _back = cv2.multiply(1.0 - _alpha[obj.ya:obj.yb, obj.xa:obj.xb], _back) output = cv2.add(_forg, _back) # if output is not None: # cv2.imshow("image", _alpha ) # if cv2.waitKey(1) & 0xFF == ord('q'): # break out_frame[obj.ya:obj.yb, obj.xa:obj.xb] = output draw_object_track(out_frame, store) cv2.imwrite("{}/frame{:06d}.jpg".format(out_frame_folder, frame_n), out_frame) print(frame_n) if show_video: cv2.imshow("image", out_frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # else: # cv2.waitKey(0) # cv2.destroyAllWindows() if frame_n == end: break cv2.destroyAllWindows()
def main(): # video = Video(Config.input_video_file) output_video = None if Config.create_masked_video: # output_video = cv2.VideoWriter(CONFIG["output_video_file"], cv2.VideoWriter_fourcc(*"mp4v"), # 30, frame.shape[:-1]) pass background = cv2.imread(Config.background_file) if Config.subtract_background: background = cv2.imread(Config.background_file) pickler = BATRPickle(in_file=Config.input_mask_file) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) for frame_n in range(Config.offset, Config.end): store = [] out_frame = np.copy(background) print(frame_n) frame = cv2.imread(f"input/videos/frames/frame{frame_n:06d}.jpg") results = pickler.unpickle("frame{:06d}".format(frame_n)) r = results[0] n = r['rois'].shape[0] colors = visualize.random_colors(n) if Config.subtract_background: frame = (background - frame) + (frame - background) for i in range(n): obj = DetectedObject(_type=class_names[r['class_ids'][i]], _probability=1, image=frame[r['rois'][i][0]:r['rois'][i][2], r['rois'][i][1]:r['rois'][i][3]], _xa=r['rois'][i][1], _ya=r['rois'][i][0], _xb=r['rois'][i][3], _yb=r['rois'][i][2], _w=abs(r['rois'][i][1] - r['rois'][i][3]), _h=abs(r['rois'][i][0] - r['rois'][i][2])) cx_axis = int((obj.xa + obj.xb) / 2) cy_axis = int((obj.ya + obj.yb) / 2) mask = np.float32(255 * r['masks'][:, :, i]) # color_for_obj, obj_index, near_value = color_for_object(obj, store, colors) cv2.imwrite("mask.jpg", mask) # mask = None _alpha = cv2.imread("mask.jpg") # cv2.imshow("cc", mask) # cv2.waitKey(0) # cv2.destroyAllWindows() _forg = np.float32(obj.image) _back = np.float32(background[obj.ya:obj.yb, obj.xa:obj.xb]) _alpha = np.float32(_alpha) / 255 _forg = cv2.multiply(_alpha[obj.ya:obj.yb, obj.xa:obj.xb], _forg) _back = cv2.multiply(1.0 - _alpha[obj.ya:obj.yb, obj.xa:obj.xb], _back) output = cv2.add(_forg, _back) _forg = cv2.dilate(_forg, kernel, iterations=3) # cv2.imshow("cc", _forg) # cv2.waitKey(0) # cv2.destroyAllWindows() out_frame = visualize.apply_mask(frame, r['masks'][:, :, i], random.choice(colors)) # if obj.type == "car": # out_frame = visualize.apply_mask(frame, r['masks'][:, :, i], random.choice(colors)) # # out_frame[obj.ya:obj.yb, obj.xa:obj.xb] = output # # # cv2.putText(frame, "Frame # {}".format(frame_n), # # (250, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2, cv2.LINE_4) # # if Config.display_object_info: # cv2.putText(frame, "({},{},{})".format(cx_axis, cy_axis, obj.type), # (cx_axis, cy_axis + 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA) # fgMask = backSub.apply(background_copy) # background_copy = frame track_for_object(out_frame, store, "car") if Config.display_image or Config.display_video: cv2.imshow("output", out_frame) # cv2.imshow("output", fgMa sk) if Config.display_image: cv2.waitKey(0) cv2.destroyAllWindows() elif Config.display_video: if cv2.waitKey(1) & 0xFF == ord('q'): break # if Config.create_masked_video: # frame = cv2.cvtColor(background_copy, cv2.COLOR_BGR2RGB) cv2.imwrite(f"output/ferozpur10012019_maskrcnn/frame{frame_n:06d}.jpg", out_frame) # output_video.write(np.uint8(out_frame)) if Config.display_video: cv2.destroyAllWindows() if Config.create_masked_video and output_video: output_video.release()
CONFIG["end"] = 1000 CONFIG["input_video_file"] = "input/videos/wales_shortened.mp4" CONFIG["input_mask_file"] = "input/maskrcnn0-5000.tar.gz" CONFIG["input_opticalflow_file"] = "input/optical_flow0-750.tar.xz" CONFIG["display_video"] = False CONFIG["display_image"] = False CONFIG["display_object_info"] = False CONFIG["create_video"] = True CONFIG["output_video_file"] = "output/wales_shortened_iou.avi" video = Video(CONFIG["input_video_file"]) pickled_masks = BATRPickle(in_file=CONFIG["input_mask_file"]) output_video = None if CONFIG["create_video"]: output_video = cv2.VideoWriter(CONFIG["output_video_file"], cv2.VideoWriter_fourcc(*"X264"), 30, tuple(reversed(video[0].shape[:-1])), True) optical_flow_tar = tarfile.open(CONFIG["input_opticalflow_file"], "r:xz") store = [] for frame_n in range(0, min(len(video), CONFIG["end"])): print(frame_n) frame = video[frame_n + 1] optical_flow_member = optical_flow_tar.getmember(
import cv2 import numpy as np import tarfile from utilities.utils import BATRPickle optical_flow_tar = tarfile.open("input/optical_flow0-750.tar.xz", "r:xz") pickled_masks = BATRPickle("input/maskrcnn0-5000.tar.gz") run_video = True prev_frame = optical_flow_tar for i in range(1, 5000): for member in : file = optical_flow_tar.extractfile(member) content = np.fromstring(file.read(), np.uint8) image = np.asarray(cv2.imdecode(content, cv2.IMREAD_COLOR)) cv2.imshow("optical flow", image) if cv2.waitKey(1) & 0xFF == ord('q'): break cv2.destroyAllWindows() # for file in optical_flow_tar.allextractedfiles(): # content = np.fromstring(file.read(), np.uint8) # image = np.asarray(cv2.imdecode(content, cv2.IMREAD_COLOR)) # gradient = np.gradient(image) # hypot = np.uint8(np.hypot(gradient[0], gradient[1])) # output_video.write(hypot) #
_store.append(new_object) return _store[-1].color, len(_store) - 1 def track_for_object(frame, _store): for o in _store: for _c, c in enumerate(o.centroids[:-1]): c_next = o.centroids[_c+1] cv2.line(frame, c, c_next, (0, 255, 255), 4) offset = 0 counter = 0 video = BATRVideoCapture("input/videos/wales_shortened.mp4", offset) background = cv2.imread("input/wales_background.jpg") pickler = BATRPickle(in_file="input/frames0-500.tar.bz2") output_video = cv2.VideoWriter("output/tracks.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 30, video.dimensions) unpickler = pickler.unpickle() store = [] run_video = True for image in video.frames: _filename = None try: _filename, results = next(unpickler) except StopIteration: break r = results[0] N = r['rois'].shape[0]
def improved_fill_missing_detection(video: Video): in_detection_result_file = os.path.abspath(video.detection_path) pickler = BATRPickle(in_file=in_detection_result_file) tracking_path = os.path.join(os.path.dirname(in_detection_result_file), "improved_detections.tar.gz") output_pickler = BATRPickle(out_file=tracking_path) sorted_frames_abs_filenames = sorted([ os.path.join(video.frames_path, filename) for filename in os.listdir(video.frames_path) ]) last_serial = 0 previously_detected_objects = [] for frame_n, frame_filename in enumerate(sorted_frames_abs_filenames, start=0): # print(f"Frame # {frame_n}\n{'-' * 10}") frame = cv2.imread(frame_filename) detected_objects = pickler.unpickle(f"frame{frame_n:06d}") # Find Out which object is Tracked and which is not for obj in detected_objects: set_tracker(obj, frame) nn_index, nn_distance = nearest_neighbor( obj, previously_detected_objects) if nn_distance > 0.5 and not math.isinf(nn_distance): previously_detected_objects[ nn_index].tracked_in_next_frame = True obj.serial = previously_detected_objects[nn_index].serial else: # This is new object obj.serial = last_serial last_serial += 1 # Seperate objects that are not tracked missed_obj = [ obj for obj in previously_detected_objects if not hasattr(obj, "tracked_in_next_frame") ] # Track these missed object in current frame for obj in missed_obj: nn_index, nn_distance = nearest_neighbor(obj, detected_objects) nn_index = nn_index # Double Check. May be it is already being tracked. if not math.isinf(nn_index): intersect = intersection(obj.bbox(), detected_objects[nn_index].bbox()) if obj.w * obj.h == 0 or intersect / (obj.w * obj.h) > 0.7: continue # cv2.rectangle(frame, obj.pt_a(), obj.pt_b(), COLOR['blue'], 6) # # if nn_distance > 0.5 and not math.isinf(nn_distance): # continue ok, bbox = obj.tracker.update(frame) bbox = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]) # Found in current frame by tracker if ok: lost_and_found = DetectedObject(obj.type, obj.probability, _x=bbox[0], _y=bbox[1], _w=bbox[2], _h=bbox[3]) lost_and_found.correct_bbox(frame) lost_and_found.tracker = obj.tracker lost_and_found.serial = obj.serial detected_objects.append(lost_and_found) previously_detected_objects = list(detected_objects) # Display Rectangle around objects and checking whether their # bbox are correct detected_objects_without_trackers = [] for obj in detected_objects: cv2.rectangle(frame, obj.pt_a(), obj.pt_b(), Color.GREEN.value, 4) write_text(frame, f"{obj.serial}", (obj.cx(), obj.cy())) detected_objects_without_trackers.append( DetectedObject(obj.type, obj.probability, obj.x, obj.y, obj.w, obj.h)) for obj in detected_objects_without_trackers: object_bbox_check(frame, obj) output_pickler.pickle(detected_objects_without_trackers, f"frame{frame_n:06d}") cv2.imwrite(f"output/frame{frame_n:06d}.jpg", frame)