def process_images(self, images, tstamps, detections_of_interest=None): image_tensor = self._detection_graph.get_tensor_by_name( 'image_tensor:0') detection_boxes = self._detection_graph.get_tensor_by_name( 'detection_boxes:0') detection_scores = self._detection_graph.get_tensor_by_name( 'detection_scores:0') detection_classes = self._detection_graph.get_tensor_by_name( 'detection_classes:0') num_pred_detections = self._detection_graph.get_tensor_by_name( 'num_detections:0') for frame, tstamp in zip(images, tstamps): frame_np = np.array(frame) frame_np = frame_np[..., ::-1] frame_np_expanded = np.expand_dims(frame_np, axis=0) (detections, scores, classes, num) = self._sess.run([ detection_boxes, detection_scores, detection_classes, num_pred_detections ], feed_dict={image_tensor: frame_np_expanded}) num = int(num[0]) for det_idx in range(num): try: confidence = float(scores[0, det_idx]) if confidence < CONFIDENCE_MIN: continue xmin = float(detections[0, det_idx, 1]) ymin = float(detections[0, det_idx, 0]) xmax = float(detections[0, det_idx, 3]) ymax = float(detections[0, det_idx, 2]) contour = [ Point(xmin, ymin), Point(xmax, ymin), Point(xmax, ymax), Point(xmin, ymax) ] prop = Property(property_type=self.prop_type, server=self.server_name, ver=self.version, value=str(self.label_map[str( int(classes[0, det_idx]))]), confidence=confidence, confidence_min=CONFIDENCE_MIN) region = Region(contour, [prop]) self.response.append_region(t=tstamp, region=region) except Exception as e: log.error(traceback.format_exc())
def process_images(self, images, tstamps, prev_regions=None): for frame, tstamp in zip(images, tstamps): predictions = inference_detector(self.model, frame) for pred_idx, pred in enumerate(predictions): try: for xmin, ymin, xmax, ymax, confidence in pred: if confidence < self.confidence_min: continue label = self.model.CLASSES[pred_idx] xmin_n = xmin / frame.shape[1] xmax_n = xmax / frame.shape[1] ymin_n = ymin / frame.shape[0] ymax_n = ymax / frame.shape[0] contour = create_bbox_contour_from_points(xmin_n, ymin_n, xmax_n, ymax_n, bound=True) area = compute_box_area(contour) prop = Property( confidence=float(confidence), confidence_min=self.confidence_min, ver=self.version, server=self.name, value=label, property_type=self.prop_type, fraction=area, ) self.response.append_region(t=tstamp, region=Region( contour=contour, props=[prop])) except Exception: log.error(traceback.format_exc())
def process_images(self, images, tstamps, prev_detections=None): for frame, tstamp in zip(images, tstamps): im = self.transformer.preprocess("data", frame) self.net.blobs["data"].data[...] = im predictions = self.net.forward()[LAYER_NAME] for pred_idx in range(predictions.shape[2]): try: confidence = float(predictions[0, 0, pred_idx, 2]) if confidence < self.confidence_min: continue index = int(predictions[0, 0, pred_idx, 1]) label = self.labelmap[index] xmin = float(predictions[0, 0, pred_idx, 3]) ymin = float(predictions[0, 0, pred_idx, 4]) xmax = float(predictions[0, 0, pred_idx, 5]) ymax = float(predictions[0, 0, pred_idx, 6]) contour = create_bbox_contour_from_points(xmin, ymin, xmax, ymax, bound=True) area = compute_box_area(contour) prop = Property( confidence=confidence, confidence_min=self.confidence_min, ver=self.version, server=self.name, value=label, property_type=self.prop_type, fraction=area, ) self.response.append_region(t=tstamp, region=Region(contour=contour, props=[prop])) except Exception: log.error(traceback.format_exc())
def process_properties(self): self._s3_write_manager = WorkerManager( func=self._upload_frame_helper, n=self.n_threads, max_queue_size=100, parallelization="thread", ) self._local_write_manager = WorkerManager( func=self._write_frame_helper, n=self.n_threads, max_queue_size=100, parallelization="thread", ) self.last_tstamp = 0.0 log.info("Processing") # filelike = self.media_api.download(return_filelike=True) # if filelike.getbuffer().nbytes == 0: # self.code = "ERROR_NO_IMAGES_LOADED" # log.info('Getting hash') # video_hash = hashfileobject(filelike, hexdigest=True) self.video_url = self.response.request.url self.med_ret = MediaRetriever(self.video_url) self.contents_file_key = get_contents_file_s3_key( self.video_url, self._sample_rate) video_id = self.contents_file_key.split("/")[0] if self._local_dir is not None: self._mklocaldirs("{}/{}".format(self._local_dir, video_id)) self._mklocaldirs("{}/{}/frames".format(self._local_dir, video_id)) if os.path.exists("{}/{}".format(self._local_dir, self.contents_file_key)): log.info("Local Video already exists") try: self._s3_client.head_object(Bucket=self._s3_bucket, Key=self.contents_file_key) new_url = self._s3_url_format.format(bucket=self._s3_bucket, s3_key=self.contents_file_key) log.info("Video already exists") p = Property( server=self.name, ver=self.version, value=new_url, property_type="extraction", property_id=1, ) track = VideoAnn(t1=0.0, t2=float(self.last_tstamp), props=[p]) self.response.append_track(track) self._s3_write_manager.kill_workers_on_completion() self._local_write_manager.kill_workers_on_completion() return except: pass contents = [] log.info("Getting frames") for i, (frame, tstamp_secs) in enumerate( self.med_ret.get_frames_iterator( sample_rate=self._sample_rate)): tstamp = int(tstamp_secs * 1000) # self._upload_frame(frame, tstamp, video_hash) if i % 100 == 0: log.info("...tstamp: " + str(tstamp)) log.debug("tstamp: " + str(tstamp)) if frame is None: continue frame = np.ascontiguousarray(frame[:, :, ::-1]) # RGB to BGR self.last_tstamp = tstamp data = {"frame": frame, "tstamp": tstamp, "video_id": video_id} contents.append((video_id, tstamp)) if self._local_dir is not None: self._local_write_manager.queue.put(data) if self._s3_bucket is not None: self._s3_write_manager.queue.put(data) # self._s3_write_manager.kill_workers_on_completion() # self._local_write_manager.kill_workers_on_completion() if self._s3_bucket is not None: result = self._add_contents_to_s3(contents) if self._local_dir is not None: self._add_contents_to_local(contents) self.response.url_original = self.video_url new_url = self._s3_url_format.format(bucket=self._s3_bucket, s3_key=self.contents_file_key) self.response.url = new_url p = Property( server=self.name, ver=self.version, value=new_url, property_type="extraction", property_id=1, ) track = VideoAnn(t1=0.0, t2=float(self.last_tstamp), props=[p]) self.response.append_track(track) self._s3_write_manager.kill_workers_on_completion() self._local_write_manager.kill_workers_on_completion()
def process_images(self, images, tstamps, prev_regions): # log.debug("Processing images") # log.debug("tstamps: " + str(tstamps)) assert len(images) == len(tstamps) == len(prev_regions) for i, (frame, tstamp, prev_region) in enumerate(zip(images, tstamps, prev_regions)): log.debug("caffe classifier tstamp: " + str(tstamp)) try: if prev_region is not None: frame = crop_image_from_bbox_contour( frame, prev_region.get("contour")) im = self.transformer.preprocess("data", frame) self.net.blobs["data"].data[...] = im # TODO : clean this up probs = self.net.forward()[self.layer_name] # log.debug("probs: " + str(probs)) # log.debug("probs.shape: " + str(probs.shape)) target_shape = (1, len(self.labels)) if (probs.shape == target_shape) is False: log.debug("Changing shape " + str(probs.shape) + "->" + str(target_shape)) probs = np.reshape(probs, target_shape) props = [] for p in probs: # log.debug('p: ' + str(p)) p_indexes = np.argsort(p) p_indexes = np.flip(p_indexes, 0) while True: if len(p_indexes) == 1: break index = p_indexes[0] label = self.labels[index] log.debug("label: " + str(label)) if label in LOGOEXCLUDE: p_indexes = np.delete(p_indexes, 0) else: break p_indexes = p_indexes[:self.top_n] # log.debug("p_indexes: " + str(p_indexes)) for i, property_id in enumerate(p_indexes): if i == self.top_n: break index = p_indexes[i] label = self.labels[index] confidence = p[index] # TODO remove this unknown if confidence < self.confidence_min: label = "Unknown" prop = Property( server=self.name, ver=self.version, value=label, property_type=self.prop_type, confidence=float(confidence), confidence_min=float(self.confidence_min), ) if prev_region is not None: prev_region.get("props").append(prop) else: props.append(prop) if prev_region is None: self.response.append_region(t=tstamp, region=Region(props=props)) except Exception as e: log.error(traceback.print_exc()) log.error(e)
def process_properties(self, dump_video=True, dump_images=False, tstamps_Of_Interest=None): self.last_tstamp = 0.0 assert (self.response) self.med_ret = MediaRetriever(self.response.url) self.w, self.h = self.med_ret.get_w_h() media_id = os.path.basename(self.response.url).rsplit(".", 1)[0] self.media_id = "".join( [e for e in media_id if e.isalnum() or e in ["/", "."]]) self.content_type_map = {} # if there is no flag in the request of not request_api we'll get None. self.dump_video = None self.dump_images = None try: self.dump_video = self.request.get("dump_video") self.dump_images = self.request.get("dump_images") except Exception: log.error( "Unable to get flags from request dump_video or dump_images") pass if self.dump_video is None: self.dump_video = dump_video if self.dump_images is None: self.dump_images = dump_images if self.dump_video is False and self.dump_images is False: log.warning( "Not dumping anything--you might want to dump something.") return dump_folder = self.pushing_folder + '/' + self.media_id + '/' self.dumping_folder_url = dump_folder if dump_folder: if not os.path.exists(dump_folder): os.makedirs(dump_folder) if self.dump_video: filename = dump_folder + '/video.mp4' fps = 1 frameSize = self.med_ret.shape frameSize = (self.w, self.h) fourcc = cv2.VideoWriter_fourcc(*'H264') log.info("filename: " + filename) log.info("fourcc: " + str(fourcc)) log.info("type(fourcc): " + str(type(fourcc))) log.info("fps: " + str(fps)) log.info("type(fps): " + str(type(fps))) log.info("frameSize: " + str(frameSize)) log.info("type(frameSize): " + str(type(frameSize))) vid = cv2.VideoWriter(filename, fourcc, fps, frameSize) self.content_type_map[os.path.basename(filename)] = 'video/mp4' face = cv2.FONT_HERSHEY_SIMPLEX scale = 0.65 thickness = 2 # we get the image_annotation tstamps tstamps = self.response.get_timestamps() tstamp_frame_anns = self.response.get_timestamps_from_frames_ann() log.debug('tstamps: ' + str(tstamps)) log.debug('tstamps_dets: ' + str(tstamp_frame_anns)) # we get the frame iterator frames_iterator = [] if tstamps_Of_Interest: if type(tstamps_Of_Interest) is list: for t in tstamps_Of_Interest: frame = self.med_ret.get_frame(tstamp=t) frames_iterator.append((frame, t)) elif tstamps_Of_Interest is None: try: frames_iterator = self.med_ret.get_frames_iterator( sample_rate=1.0) except Exception: log.error(traceback.format_exc()) raise Exception("Error loading media") for i, (img, tstamp) in enumerate(frames_iterator): self.last_tstamp = tstamp if img is None: log.warning("Invalid frame") continue if tstamp is None: log.warning("Invalid tstamp") continue # log.info('tstamp: ' + str(tstamp)) if tstamp in tstamp_frame_anns: log.debug("drawing frame for tstamp: " + str(tstamp)) # we get image_ann for that time_stamps regions = self.response.get_regions_from_tstamp(tstamp) # log.info(json.dumps(image_ann, indent=2)) for region in regions: rand_color = get_rand_bgr() p0, p1 = p0p1_from_bbox_contour(region['contour'], self.w, self.h) anchor_point = [p0[0] + 3, p1[1] - 3] if abs(p1[1] - self.h) < 30: anchor_point = [p0[0] + 3, int(p1[1] / 2) - 3] img = cv2.rectangle(img, p0, p1, rand_color, thickness) prop_strs = get_props_from_region(region) for i, prop in enumerate(prop_strs): img = cv2.putText( img, prop, (anchor_point[0], anchor_point[1] + i * 25), face, 1.0, rand_color, thickness) elif tstamp in tstamps: log.debug("Making frame gray") gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) else: log.debug("No processed frame") continue # Include the timestamp img = cv2.putText(img, str(tstamp), (20, 20), face, scale, [255, 255, 255], thickness) if self.dump_video: # we add the frame log.debug("Adding frame") vid.write(img) if self.dump_images: # we dump the frame outfn = "{}/{}.jpg".format(dump_folder, tstamp) log.debug("Writing to file: {}".format(outfn)) cv2.imwrite(outfn, img) self.content_type_map[os.path.basename(outfn)] = 'image/jpeg' if self.dump_video: vid.release() if self.s3_bucket: try: self.upload_files(dump_folder) except Exception: log.error(traceback.format_exc()) if self.pushing_folder == DEFAULT_DUMP_FOLDER: log.info('Removing files in ' + dump_folder) shutil.rmtree(dump_folder) props = [] if self.dump_images: props.append( Property( server=self.name, ver=self.version, value=self.dumping_folder_url, property_type="dumped_images", property_id=1, )) if self.dump_video: dumped_video_url = self.dumping_folder_url + '/video.mp4' dumped_video_url = dumped_video_url.replace('//', '/') dumped_video_url = dumped_video_url.replace('https:/', 'https://') props.append( Property( server=self.name, ver=self.version, value=dumped_video_url, property_type="dumped_video", property_id=2, )) media_summary = VideoAnn(t1=0.0, t2=self.last_tstamp, props=props) self.response.append_media_summary(media_summary)
def process_properties(self): self.last_tstamp = 0.0 log.info("Processing") # filelike = self.media_api.download(return_filelike=True) # if filelike.getbuffer().nbytes == 0: # self.code = "ERROR_NO_IMAGES_LOADED" # log.info('Getting hash') # video_hash = hashfileobject(filelike, hexdigest=True) self.video_url = self.response.request.url try: log.info(f"Loading media from url: {self.response.request.url}") self.med_ret = MediaRetriever(self.video_url) except Exception as e: log.error(e) log.error(traceback.print_exc()) self.code = Codes.ERROR_LOADING_MEDIA return self.contents_file_key = get_contents_file_s3_key(self.video_url, self._sample_rate) video_id = self.contents_file_key.split("/")[0] if self._local_dir is not None: self._mklocaldirs("{}/{}".format(self._local_dir, video_id)) self._mklocaldirs("{}/{}/frames".format(self._local_dir, video_id)) if os.path.exists("{}/{}".format(self._local_dir, self.contents_file_key)): log.info("Local Video already exists") try: self._s3_client.head_object( Bucket=self._s3_bucket, Key=self.contents_file_key ) new_url = self._s3_url_format.format( bucket=self._s3_bucket, s3_key=self.contents_file_key ) log.info("Video already exists") p = Property( server=self.name, ver=self.version, value=new_url, property_type="extraction", property_id=1, ) track = VideoAnn(t1=0.0, t2=float(self.last_tstamp), props=[p]) self.response.append_track(track) return except Exception: pass contents = [] log.info("Getting frames") with ThreadPoolExecutor(max_workers=self.n_threads) as writer: for i, (frame, tstamp_secs) in enumerate( self.med_ret.get_frames_iterator(sample_rate=self._sample_rate) ): # int(16.016 * 1000) == 16015, but round(16.016) == 16016 tstamp = round(tstamp_secs * 1000) # self._upload_frame(frame, tstamp, video_hash) if i % 100 == 0: log.info("...tstamp: " + str(tstamp)) log.debug("tstamp: " + str(tstamp)) if frame is None: continue frame = np.ascontiguousarray(frame[:, :, ::-1]) # RGB to BGR self.last_tstamp = tstamp data = {"frame": frame, "tstamp": tstamp, "video_id": video_id} contents.append((video_id, tstamp)) if self._local_dir is not None: writer.submit(self._write_frame_helper, data) if self._s3_bucket is not None: writer.submit(self._upload_frame_helper, data) if self._s3_bucket is not None: _ = self._add_contents_to_s3(contents) if self._local_dir is not None: self._add_contents_to_local(contents) new_url = self._s3_url_format.format( bucket=self._s3_bucket, s3_key=self.contents_file_key ) p = Property( server=self.name, ver=self.version, value=new_url, property_type="extraction", property_id=1, ) track = VideoAnn(t1=0.0, t2=float(self.last_tstamp), props=[p]) self.response.append_track(track)
def process_properties(self, dump_video=True, dump_images=False, tstamps_of_interest=None): assert (isinstance(self.response, Response)) self.med_ret = MediaRetriever(self.response.url) dump_video = (self.request.get("dump_video") or dump_video) and \ self.med_ret.is_video dump_images = (self.request.get("dump_images") or dump_images) or \ self.med_ret.is_image if dump_images is False and self.med_ret.is_image: dump_images = True if dump_video is False and dump_images is False: log.warning("`dump_video` and `dump_images` are both false." " Unable to proceed.") return log.debug(f"Dumping Video: {dump_video}") log.debug(f"Dumping Frames: {dump_images}") # we get the frame iterator frames_iterator = [] if tstamps_of_interest is not None: if type(tstamps_of_interest) is list: for t in tstamps_of_interest: frame = self.med_ret.get_frame(tstamp=t) frames_iterator.append((frame, t)) else: try: frames_iterator = self.med_ret.get_frames_iterator( sample_rate=1.0) except Exception: log.error(traceback.format_exc()) raise Exception("Error loading media") vid_file, images_dir, max_tstamp = self.dump_data( frames_iterator, dump_video=dump_video, dump_images=dump_images) props = [] if self.local_dir is not None and dump_video: local_vid_path = self.copy_video(vid_file.name) p = Property( server=self.name, ver=self.version, value=local_vid_path, property_type="dumped_video", property_id=4, ) props.append(p) if self.local_dir is not None and dump_images: local_frames_paths = self.copy_frames(images_dir.name) ps = [ Property( server=self.name, ver=self.version, value=path, property_type="dumped_image", property_id=3, ) for path in local_frames_paths ] props.extend(ps) if self.s3_bucket is not None and dump_video: s3_vid_url = self.upload_video(vid_file.name) p = Property( server=self.name, ver=self.version, value=s3_vid_url, property_type="dumped_video", property_id=2, ) props.append(p) if self.s3_bucket is not None and dump_images: s3_frames_urls = self.upload_frames(images_dir.name) ps = [ Property( server=self.name, ver=self.version, value=url, property_type="dumped_image", property_id=1, ) for url in s3_frames_urls ] props.extend(ps) images_dir.cleanup() vid_file.close() media_summary = VideoAnn(t1=0.0, t2=max_tstamp, props=props) self.response.append_media_summary(media_summary)