def gst_record(options): # If video path is a local directory and not a uri, append file:// to the path if os.path.isfile(options.input_video_path): options_source = "file://" + options.input_video_path else: options_source = options.input_video_path options_metadata_file = options.metadata_file_path if not options_metadata_file: options_metadata_file = default_metadata_record_path # Populate the request to provide to VAServing library string_request = ('{{' '"source": {{' '"type": "uri",' '"uri": "{source}"' '}},' '"destination": {{' '"type": "file",' '"path": "{fp}",' '"format": "json-lines"' '}},' '"parameters": {{' '"recording_prefix": "{output_video_folder}",' '"max-size-time": {max_size_chunks}' '}}' '}}') string_request = string_request.format( source=options_source, fp=options_metadata_file, output_video_folder=options.output_video_folder, max_size_chunks=options.max_time) request = json.loads(string_request) # Start the recording, once complete, stop VAServing VAServing.start({'log_level': 'INFO'}) pipeline = VAServing.pipeline("object_detection", "2") pipeline.start(request) status = pipeline.status() while (not status.state.stopped()): time.sleep(0.1) status = pipeline.status() VAServing.stop()
def gst_playback(options): location = "" start_pts = 0 if os.path.isdir(options.input_video_path): location = options.input_video_path + "/*.mp4" else: start_pts = get_timestamp_from_filename(options.input_video_path) location = options.input_video_path # Populate the request to provide to VAServing library module = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'preproc_callbacks/insert_metadata.py') metadata_args = { "metadata_file_path": options.metadata_file_path, "offset_timestamp": start_pts } request = { "source": { "type": "path", "path": location }, "parameters": { "module": module, "kwarg": metadata_args } } # Start the recording, once complete, stop VAServing record_playback_file_dir = os.path.dirname(os.path.realpath(__file__)) VAServing.start({ 'log_level': 'INFO', 'pipeline_dir': os.path.join(record_playback_file_dir, "pipelines") }) pipeline = VAServing.pipeline("recording_playback", "playback") pipeline.start(request) status = pipeline.status() while (not status.state.stopped()): time.sleep(0.1) status = pipeline.status() VAServing.stop()
def loop(self, reqs, _pipeline, _version="1"): pipeline = VAServing.pipeline(_pipeline, _version) instance_id = pipeline.start(source=reqs["source"], destination=reqs["destination"], tags=reqs["tags"], parameters=reqs["parameters"]) if instance_id is None: print("Pipeline {} version {} Failed to Start".format( _pipeline, _version), flush=True) return -1 fps = 0 while True: status = pipeline.status() print(status, flush=True) if (status.state.stopped()): print( "Pipeline {} Version {} Instance {} Ended with {}".format( _pipeline, _version, instance_id, status.state.name), flush=True) if status.state is Pipeline.State.COMPLETED: fps = status.avg_fps print("Status analysis: Timing {0} {1} {2} {3} {4}".format( reqs["start_time"], status.start_time, status.elapsed_time, reqs["user"], reqs["source"]["uri"]), flush=True) break if status.state is Pipeline.State.ABORTED or status.state is Pipeline.State.ERROR: return -1 time.sleep(self._pause) pipeline.stop() print("exiting va pipeline", flush=True) return fps
def ProcessMediaStream(self, requestIterator, context): requests_received = 0 responses_sent = 0 # First message from the client is (must be) MediaStreamDescriptor request = next(requestIterator) requests_received += 1 # Extract message IDs request_seq_num = request.sequence_number request_ack_seq_num = request.ack_sequence_number # State object per client client_state = State(request.media_stream_descriptor) self._logger.info("[Received] SeqNum: {0:07d} | " "AckNum: {1}\nMediaStreamDescriptor:\n{2}".format( request_seq_num, request_ack_seq_num, client_state.media_stream_descriptor, )) # First message response ... media_stream_message = extension_pb2.MediaStreamMessage( sequence_number=1, ack_sequence_number=request_seq_num, media_stream_descriptor=extension_pb2.MediaStreamDescriptor( media_descriptor=media_pb2.MediaDescriptor( timescale=client_state.media_stream_descriptor. media_descriptor.timescale)), ) responses_sent += 1 yield media_stream_message final_pipeline_parameters = {} if self._version.startswith("debug"): timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") location = os.path.join(tempfile.gettempdir(), "vaserving", self._version, timestamp) os.makedirs(os.path.abspath(location)) final_pipeline_parameters = { "location": os.path.join(location, "frame_%07d.jpeg") } try: if self._pipeline_parameter_arg: pipeline_parameters = {} if os.path.isfile(self._pipeline_parameter_arg): with open(self._pipeline_parameter_arg) as json_file: pipeline_parameters = json.load(json_file) else: pipeline_parameters = json.loads( self._pipeline_parameter_arg) final_pipeline_parameters.update(pipeline_parameters) except ValueError: self._logger.error("Issue loading json parameters: {}".format( self._pipeline_parameter_arg)) raise self._logger.info("Pipeline Name : {}".format(self._pipeline)) self._logger.info("Pipeline Version : {}".format(self._version)) self._logger.info( "Pipeline Parameters : {}".format(final_pipeline_parameters)) detect_input = Queue(maxsize=self._input_queue_size) detect_output = Queue() # Start object detection pipeline # It will wait until it receives frames via the detect_input queue detect_pipeline = VAServing.pipeline(self._pipeline, self._version) detect_pipeline.start( source={ "type": "application", "class": "GStreamerAppSource", "input": detect_input, "mode": "push", }, destination={ "type": "application", "class": "GStreamerAppDestination", "output": detect_output, "mode": "frames", }, parameters=final_pipeline_parameters, ) # Process rest of the MediaStream message sequence for request in requestIterator: try: if requests_received - responses_sent >= self._input_queue_size: queued_samples = self._get_queued_samples(detect_output, block=True) else: queued_samples = [] # Read request id, sent by client request_seq_num = request.sequence_number self._logger.info( "[Received] SeqNum: {0:07d}".format(request_seq_num)) requests_received += 1 gva_sample = self._generate_gva_sample(client_state, request) detect_input.put(gva_sample) queued_samples.extend(self._get_queued_samples(detect_output)) if context.is_active(): for gva_sample in queued_samples: if gva_sample: media_stream_message = self._generate_media_stream_message( gva_sample) responses_sent += 1 self._logger.info( "[Sent] AckSeqNum: {0:07d}".format( media_stream_message.ack_sequence_number)) yield media_stream_message else: break if detect_pipeline.status().state.stopped(): break except: log_exception(self._logger) raise if detect_pipeline.status().state.stopped(): try: raise Exception(detect_pipeline.status().state) except: log_exception(self._logger) raise # After the server has finished processing all the request iterator objects # Push a None object into the input queue. # When the None object comes out of the output queue, we know we've finished # processing all requests gva_sample = None if not detect_pipeline.status().state.stopped(): detect_input.put(None) gva_sample = detect_output.get() while gva_sample: media_stream_message = self._generate_media_stream_message( gva_sample) responses_sent += 1 self._logger.info("[Sent] AckSeqNum: {0:07d}".format( media_stream_message.ack_sequence_number)) if context.is_active(): yield media_stream_message gva_sample = detect_output.get() self._logger.info( "Done processing messages: Received: {}, Sent: {}".format( requests_received, responses_sent)) self._logger.info("MediaStreamDescriptor:\n{0}".format( client_state.media_stream_descriptor))
" DRIVER_RESPONSETOPIC: Edgex-command-response\n"\ " volumes:\n"\ " - ./res/device-mqtt-go/:/res/\n"\ "version: '3.7'" with open(compose_dest, 'w') as override_file: override_file.write(COMPOSE.format(**parameters["edgexbridge"])) print("Generated EdgeX Compose Override:\n{}\n\n".format(compose_dest)) else: # Raise error if compose override does not exist, expecting the generation to # complete at least once. if os.path.isfile(compose_dest): pipeline_name = "object_detection" pipeline_version = "edgex" VAServing.start({'log_level': 'INFO', "ignore_init_errors":False}) pipeline = VAServing.pipeline(pipeline_name, pipeline_version) source = {"uri":args.source, "type":"uri"} destination = {"type":"mqtt", "host":args.destination, "topic":'edgex_bridge/'+args.topic} pipeline.start(source=source, destination=destination, parameters=parameters) start_time = None start_size = 0 VAServing.wait() else: print("ERROR: Invoke edgex_bridge.py with '--generate' to prepare EdgeX Foundry.") except KeyboardInterrupt: pass except Exception: print("Error processing script: {}".format(traceback.print_exc())) VAServing.stop()
return parser.parse_args(args) if __name__ == "__main__": args = parse_args() decode_output = Queue() detect_input = Queue() detect_output = Queue() VAServing.start({'log_level': 'INFO', "ignore_init_errors": True}) parameters = None if args.parameters: parameters = json.loads(args.parameters) # Start object detection pipeline # It will wait until it receives frames via the detect_input queue detect_pipeline = VAServing.pipeline(args.pipeline, args.pipeline_version) detect_pipeline.start(source={ "type": "application", "class": "GStreamerAppSource", "input": detect_input, "mode": args.source_mode }, destination={ "type": "application", "class": "GStreamerAppDestination", "output": detect_output, "mode": "frames" }, parameters=parameters) # Start decode only pipeline.
def loop(self, sensor, location, uri, algorithm, algorithmName, options={}, topic="analytics"): try: VAServing.start({ 'model_dir': '/home/models', 'pipeline_dir': '/home/pipelines', 'max_running_pipelines': 1, }) try: source = { "type": "uri", "uri": uri, } destination = { "type": "mqtt", "host": mqtthost, "clientid": algorithm, "topic": topic } tags = { "sensor": sensor, "location": location, "algorithm": algorithmName, "office": { "lat": office[0], "lon": office[1] }, } parameters = { "inference-interval": every_nth_frame, "recording_prefix": "/tmp/rec/" + sensor } parameters.update(options) pipeline = VAServing.pipeline(self._pipeline, self._version) instance_id = pipeline.start(source=source, destination=destination, tags=tags, parameters=parameters) if instance_id is None: raise Exception( "Pipeline {} version {} Failed to Start".format( self._pipeline, self._version)) self._stop = Event() while not self._stop.is_set(): status = pipeline.status() print(status, flush=True) if status.state.stopped(): print( "Pipeline {} Version {} Instance {} Ended with {}". format(self._pipeline, self._version, instance_id, status.state.name), flush=True) break if status.avg_fps > 0 and status.state is Pipeline.State.RUNNING: avg_pipeline_latency = status.avg_pipeline_latency if not avg_pipeline_latency: avg_pipeline_latency = 0 self._db.update( algorithm, { "sensor": sensor, "performance": status.avg_fps, "latency": avg_pipeline_latency * 1000, "cpu": psutil.cpu_percent(), "memory": psutil.virtual_memory().percent, }) self._stop.wait(3) self._stop = None pipeline.stop() except: print(traceback.format_exc(), flush=True) VAServing.stop() except: print(traceback.format_exc(), flush=True)
def gst_record(options): # If video path is a local directory and not a uri, append file:// to the path if os.path.isfile(options.input_video_path): options_source = "file://" + options.input_video_path else: options_source = options.input_video_path options_metadata_file = options.metadata_file_path if not options_metadata_file: options_metadata_file = default_metadata_record_path # Check if have write permissions for metadata file location try: file_handler = open(options_metadata_file, 'w') file_handler.close() except IOError: print("No write permissions for metadata file location") return -1 # if metadata file already exists, delete it if os.path.exists(options_metadata_file): os.remove(options_metadata_file) # If output video directory doesn't exist if not os.path.isdir(options.output_video_folder): os.mkdir(options.output_video_folder) # Check if directory has write permissions try: file_check_write_permissions = os.path.join( options.output_video_folder, "checkDirWritable.txt") file_handler = open(file_check_write_permissions, 'w') file_handler.close() os.remove(file_check_write_permissions) except IOError: print("No write permissions for video output directory") return -1 # Populate the request to provide to VAServing library request = { "source": { "type": "uri", "uri": options_source }, "destination": { "type": "file", "path": options_metadata_file, "format": "json-lines" }, "parameters": { "recording_prefix": options.output_video_folder, "max-size-time": options.max_time } } # Start the recording, once complete, stop VAServing record_playback_file_dir = os.path.dirname(os.path.realpath(__file__)) VAServing.start({ 'log_level': 'INFO', 'pipeline_dir': os.path.join(record_playback_file_dir, "pipelines") }) pipeline = VAServing.pipeline("object_detection", "segment_record") pipeline.start(request) status = pipeline.status() while (not status.state.stopped()): time.sleep(0.1) status = pipeline.status() VAServing.stop()