示例#1
0
 def __init__(self):
     super(RunVA, self).__init__()
     vaserving_args = {
         'model_dir': '/home/models',
         'pipeline_dir': '/home/pipelines',
         'max_running_pipelines': 1,
     }
     print("vaserving args: {} ".format(vaserving_args), flush=True)
     VAServing.start(vaserving_args)
     self._pause = 0.05
示例#2
0
def pipelines_name_version_post(name, version):  # noqa: E501
    """pipelines_name_version_post

    Start new instance of pipeline.
    Specify the source and destination parameters as URIs # noqa: E501

    :param name:
    :type name: str
    :param version:
    :type version: str
    :param pipeline_request:
    :type pipeline_request: dict | bytes

    :rtype: None
    """

    logger.debug("POST on /pipelines/{name}/{version}".format(
        name=name, version=str(version)))
    if connexion.request.is_json:
        try:
            pipeline_id, err = VAServing.pipeline_instance(
                name, version, connexion.request.get_json())
            if pipeline_id is not None:
                return pipeline_id
            return (err, HTTPStatus.BAD_REQUEST)
        except Exception as error:
            logger.error('Exception in pipelines_name_version_post %s', error)
            return ('Unexpected error', HTTPStatus.INTERNAL_SERVER_ERROR)

    return ('Invalid Request, Body must be valid JSON', HTTPStatus.BAD_REQUEST)
def main(options):
    try:
        app = connexion.App(__name__,
                            port=options.port,
                            specification_dir='rest_api/',
                            server='tornado')
        app.add_api('video-analytics-serving.yaml',
                    arguments={'title': 'Video Analytics Serving API'})
        logger.info("Starting Tornado Server on port: %s", options.port)
        app.run(port=options.port, server='tornado')
    except (KeyboardInterrupt, SystemExit):
        logger.info("Keyboard Interrupt or System Exit")
    except Exception as error:
        logger.error("Error Starting Tornado Server: %s", error)
    VAServing.stop()
    logger.info("Exiting")
示例#4
0
def gst_record(options):
    # If video path is a local directory and not a uri, append file:// to the path
    if os.path.isfile(options.input_video_path):
        options_source = "file://" + options.input_video_path
    else:
        options_source = options.input_video_path

    options_metadata_file = options.metadata_file_path
    if not options_metadata_file:
        options_metadata_file = default_metadata_record_path

    # Populate the request to provide to VAServing library
    string_request = ('{{'
                      '"source": {{'
                      '"type": "uri",'
                      '"uri": "{source}"'
                      '}},'
                      '"destination": {{'
                      '"type": "file",'
                      '"path": "{fp}",'
                      '"format": "json-lines"'
                      '}},'
                      '"parameters": {{'
                      '"recording_prefix": "{output_video_folder}",'
                      '"max-size-time": {max_size_chunks}'
                      '}}'
                      '}}')
    string_request = string_request.format(
        source=options_source,
        fp=options_metadata_file,
        output_video_folder=options.output_video_folder,
        max_size_chunks=options.max_time)
    request = json.loads(string_request)

    # Start the recording, once complete, stop VAServing
    VAServing.start({'log_level': 'INFO'})
    pipeline = VAServing.pipeline("object_detection", "2")
    pipeline.start(request)
    status = pipeline.status()
    while (not status.state.stopped()):
        time.sleep(0.1)
        status = pipeline.status()
    VAServing.stop()
def gst_playback(options):
    location = ""
    start_pts = 0
    if os.path.isdir(options.input_video_path):
        location = options.input_video_path + "/*.mp4"
    else:
        start_pts = get_timestamp_from_filename(options.input_video_path)
        location = options.input_video_path

    # Populate the request to provide to VAServing library
    module = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                          'preproc_callbacks/insert_metadata.py')
    metadata_args = {
        "metadata_file_path": options.metadata_file_path,
        "offset_timestamp": start_pts
    }
    request = {
        "source": {
            "type": "path",
            "path": location
        },
        "parameters": {
            "module": module,
            "kwarg": metadata_args
        }
    }

    # Start the recording, once complete, stop VAServing
    record_playback_file_dir = os.path.dirname(os.path.realpath(__file__))
    VAServing.start({
        'log_level':
        'INFO',
        'pipeline_dir':
        os.path.join(record_playback_file_dir, "pipelines")
    })
    pipeline = VAServing.pipeline("recording_playback", "playback")
    pipeline.start(request)
    status = pipeline.status()
    while (not status.state.stopped()):
        time.sleep(0.1)
        status = pipeline.status()
    VAServing.stop()
示例#6
0
    def loop(self, reqs, _pipeline, _version="1"):
        pipeline = VAServing.pipeline(_pipeline, _version)
        instance_id = pipeline.start(source=reqs["source"],
                                     destination=reqs["destination"],
                                     tags=reqs["tags"],
                                     parameters=reqs["parameters"])
        if instance_id is None:
            print("Pipeline {} version {} Failed to Start".format(
                _pipeline, _version),
                  flush=True)
            return -1

        fps = 0
        while True:
            status = pipeline.status()
            print(status, flush=True)

            if (status.state.stopped()):

                print(
                    "Pipeline {} Version {} Instance {} Ended with {}".format(
                        _pipeline, _version, instance_id, status.state.name),
                    flush=True)

                if status.state is Pipeline.State.COMPLETED:
                    fps = status.avg_fps
                    print("Status analysis: Timing {0} {1} {2} {3} {4}".format(
                        reqs["start_time"], status.start_time,
                        status.elapsed_time, reqs["user"],
                        reqs["source"]["uri"]),
                          flush=True)
                    break

                if status.state is Pipeline.State.ABORTED or status.state is Pipeline.State.ERROR:
                    return -1
            time.sleep(self._pause)

        pipeline.stop()
        print("exiting va pipeline", flush=True)
        return fps
示例#7
0
    parser.add_argument("--parameters", action="store",
                        dest="parameters",
                        type=str, default=os.getenv('PARAMETERS', '{}'))

    if (isinstance(args, dict)):
        args = ["--{}={}".format(key, value)
                for key, value in args.items() if value]

    return parser.parse_args(args)

if __name__ == "__main__":

    args = parse_args()
    try:
        VAServing.start({'log_level': 'INFO', "ignore_init_errors":True,
                         'max_running_pipelines': args.max_running_pipelines})

        # create gRPC server and start running
        server = grpc.server(futures.ThreadPoolExecutor(max_workers=args.max_running_pipelines))
        extension_pb2_grpc.add_MediaGraphExtensionServicer_to_server(
            MediaGraphExtension(args.pipeline, args.version, args.debug, args.parameters), server)
        server.add_insecure_port(f'[::]:{args.port}')
        print("Starting Protocol Server Application on port", args.port)
        server.start()
        server.wait_for_termination()
        VAServing.stop()

    except Exception:
        VAServing.stop()
        sys.exit(-1)
    def ProcessMediaStream(self, requestIterator, context):
        requests_received = 0
        responses_sent = 0
        # First message from the client is (must be) MediaStreamDescriptor
        request = next(requestIterator)
        requests_received += 1
        # Extract message IDs
        request_seq_num = request.sequence_number
        request_ack_seq_num = request.ack_sequence_number
        # State object per client
        client_state = State(request.media_stream_descriptor)
        self._logger.info("[Received] SeqNum: {0:07d} | "
                          "AckNum: {1}\nMediaStreamDescriptor:\n{2}".format(
                              request_seq_num,
                              request_ack_seq_num,
                              client_state.media_stream_descriptor,
                          ))
        # First message response ...
        media_stream_message = extension_pb2.MediaStreamMessage(
            sequence_number=1,
            ack_sequence_number=request_seq_num,
            media_stream_descriptor=extension_pb2.MediaStreamDescriptor(
                media_descriptor=media_pb2.MediaDescriptor(
                    timescale=client_state.media_stream_descriptor.
                    media_descriptor.timescale)),
        )
        responses_sent += 1
        yield media_stream_message

        final_pipeline_parameters = {}
        if self._version.startswith("debug"):
            timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
            location = os.path.join(tempfile.gettempdir(), "vaserving",
                                    self._version, timestamp)
            os.makedirs(os.path.abspath(location))
            final_pipeline_parameters = {
                "location": os.path.join(location, "frame_%07d.jpeg")
            }

        try:
            if self._pipeline_parameter_arg:
                pipeline_parameters = {}
                if os.path.isfile(self._pipeline_parameter_arg):
                    with open(self._pipeline_parameter_arg) as json_file:
                        pipeline_parameters = json.load(json_file)
                else:
                    pipeline_parameters = json.loads(
                        self._pipeline_parameter_arg)
                final_pipeline_parameters.update(pipeline_parameters)
        except ValueError:
            self._logger.error("Issue loading json parameters: {}".format(
                self._pipeline_parameter_arg))
            raise

        self._logger.info("Pipeline Name : {}".format(self._pipeline))
        self._logger.info("Pipeline Version : {}".format(self._version))
        self._logger.info(
            "Pipeline Parameters : {}".format(final_pipeline_parameters))
        detect_input = Queue(maxsize=self._input_queue_size)
        detect_output = Queue()
        # Start object detection pipeline
        # It will wait until it receives frames via the detect_input queue
        detect_pipeline = VAServing.pipeline(self._pipeline, self._version)
        detect_pipeline.start(
            source={
                "type": "application",
                "class": "GStreamerAppSource",
                "input": detect_input,
                "mode": "push",
            },
            destination={
                "type": "application",
                "class": "GStreamerAppDestination",
                "output": detect_output,
                "mode": "frames",
            },
            parameters=final_pipeline_parameters,
        )

        # Process rest of the MediaStream message sequence
        for request in requestIterator:
            try:
                if requests_received - responses_sent >= self._input_queue_size:
                    queued_samples = self._get_queued_samples(detect_output,
                                                              block=True)
                else:
                    queued_samples = []
                # Read request id, sent by client
                request_seq_num = request.sequence_number
                self._logger.info(
                    "[Received] SeqNum: {0:07d}".format(request_seq_num))
                requests_received += 1
                gva_sample = self._generate_gva_sample(client_state, request)
                detect_input.put(gva_sample)
                queued_samples.extend(self._get_queued_samples(detect_output))
                if context.is_active():
                    for gva_sample in queued_samples:
                        if gva_sample:
                            media_stream_message = self._generate_media_stream_message(
                                gva_sample)
                            responses_sent += 1
                            self._logger.info(
                                "[Sent] AckSeqNum: {0:07d}".format(
                                    media_stream_message.ack_sequence_number))
                            yield media_stream_message
                else:
                    break
                if detect_pipeline.status().state.stopped():
                    break
            except:
                log_exception(self._logger)
                raise

        if detect_pipeline.status().state.stopped():
            try:
                raise Exception(detect_pipeline.status().state)
            except:
                log_exception(self._logger)
                raise

        # After the server has finished processing all the request iterator objects
        # Push a None object into the input queue.
        # When the None object comes out of the output queue, we know we've finished
        # processing all requests
        gva_sample = None
        if not detect_pipeline.status().state.stopped():
            detect_input.put(None)
            gva_sample = detect_output.get()
        while gva_sample:
            media_stream_message = self._generate_media_stream_message(
                gva_sample)
            responses_sent += 1
            self._logger.info("[Sent] AckSeqNum: {0:07d}".format(
                media_stream_message.ack_sequence_number))
            if context.is_active():
                yield media_stream_message
            gva_sample = detect_output.get()

        self._logger.info(
            "Done processing messages: Received: {}, Sent: {}".format(
                requests_received, responses_sent))
        self._logger.info("MediaStreamDescriptor:\n{0}".format(
            client_state.media_stream_descriptor))

def main(options):
    try:
        app = connexion.App(__name__,
                            port=options.port,
                            specification_dir='rest_api/',
                            server='tornado')
        app.add_api('video-analytics-serving.yaml',
                    arguments={'title': 'Video Analytics Serving API'})
        logger.info("Starting Tornado Server on port: %s", options.port)
        app.run(port=options.port, server='tornado')
    except (KeyboardInterrupt, SystemExit):
        logger.info("Keyboard Interrupt or System Exit")
    except Exception as error:
        logger.error("Error Starting Tornado Server: %s", error)
    VAServing.stop()
    logger.info("Exiting")


if __name__ == '__main__':
    try:
        VAServing.start()
    except Exception as error:
        logger.error("Error Starting VA Serving: %s", error)
        sys.exit(1)
    try:
        main(VAServing.options)
    except Exception as error:
        logger.error("Unexpected Error: %s", error)
                "      DRIVER_RESPONSECLIENTID: edgex-mqtt-command-sub\n"\
                "      DRIVER_RESPONSETOPIC: Edgex-command-response\n"\
                "    volumes:\n"\
                "      - ./res/device-mqtt-go/:/res/\n"\
                "version: '3.7'"
            with open(compose_dest, 'w') as override_file:
                override_file.write(COMPOSE.format(**parameters["edgexbridge"]))
                print("Generated EdgeX Compose Override:\n{}\n\n".format(compose_dest))

        else:
            # Raise error if compose override does not exist, expecting the generation to
            # complete at least once.
            if os.path.isfile(compose_dest):
                pipeline_name = "object_detection"
                pipeline_version = "edgex"
                VAServing.start({'log_level': 'INFO', "ignore_init_errors":False})
                pipeline = VAServing.pipeline(pipeline_name, pipeline_version)
                source = {"uri":args.source, "type":"uri"}
                destination = {"type":"mqtt",
                               "host":args.destination,
                               "topic":'edgex_bridge/'+args.topic}
                pipeline.start(source=source, destination=destination, parameters=parameters)
                start_time = None
                start_size = 0
                VAServing.wait()
            else:
                print("ERROR: Invoke edgex_bridge.py with '--generate' to prepare EdgeX Foundry.")
    except KeyboardInterrupt:
        pass
    except Exception:
        print("Error processing script: {}".format(traceback.print_exc()))
示例#11
0
    va_serving_args.append("--max_running_pipelines")
    va_serving_args.append(str(max_running_pipelines))
    return va_serving_args


if __name__ == "__main__":

    args, va_serving_args = parse_args()
    logger = get_logger("Main")
    try:
        server_args = append_default_server_args(
            va_serving_args, args.max_running_pipelines
        )

        try:
            VAServing.start(server_args)
        except Exception as error:
            logger.error(error)
            logger.error("Exception encountered during VAServing start")
            raise

        if (
                (args.pipeline_name and not args.pipeline_version)
                or (not args.pipeline_name and args.pipeline_version)
        ):
            logger.error("Pipeline name or version set but not both")
            raise ValueError('Pipeline name or version set but not both')

        # create gRPC server and start running
        server = grpc.server(
            futures.ThreadPoolExecutor(max_workers=args.max_running_pipelines)
示例#12
0
    if (isinstance(args, dict)):
        args = [
            "--{}={}".format(key, value) for key, value in args.items()
            if value
        ]

    return parser.parse_args(args)


if __name__ == "__main__":
    args = parse_args()
    decode_output = Queue()
    detect_input = Queue()
    detect_output = Queue()
    VAServing.start({'log_level': 'INFO', "ignore_init_errors": True})
    parameters = None
    if args.parameters:
        parameters = json.loads(args.parameters)
    # Start object detection pipeline
    # It will wait until it receives frames via the detect_input queue
    detect_pipeline = VAServing.pipeline(args.pipeline, args.pipeline_version)
    detect_pipeline.start(source={
        "type": "application",
        "class": "GStreamerAppSource",
        "input": detect_input,
        "mode": args.source_mode
    },
                          destination={
                              "type": "application",
                              "class": "GStreamerAppDestination",
示例#13
0
    va_serving_args.append("--max_running_pipelines")
    va_serving_args.append(str(max_running_pipelines))
    return va_serving_args


if __name__ == "__main__":

    args, va_serving_args = parse_args()
    logger = get_logger("Main")
    try:
        server_args = append_default_server_args(
            va_serving_args, args.max_running_pipelines
        )

        try:
            VAServing.start(server_args)
            pipelines = VAServing.pipelines()
            pipeline_versions = defaultdict(list)
            for pipeline in pipelines:
                pipeline_versions[pipeline.name()].append(pipeline.version())
            if args.pipeline_name not in pipeline_versions:
                raise Exception("Unknown Pipeline: {}".format(args.pipeline_name))
            if (args.debug) and (not args.pipeline_version.startswith("debug")):
                args.pipeline_version = "debug_{}".format(args.pipeline_version)
            if args.pipeline_version not in pipeline_versions[args.pipeline_name]:
                raise Exception(
                    "Unknown Pipeline Version: {}".format(args.pipeline_version)
                )
        except Exception as error:
            logger.error(error)
            logger.error("Exception encountered during VAServing start")
示例#14
0
 def close(self):
     VAServing.stop()
示例#15
0
    def loop(self,
             sensor,
             location,
             uri,
             algorithm,
             algorithmName,
             options={},
             topic="analytics"):
        try:
            VAServing.start({
                'model_dir': '/home/models',
                'pipeline_dir': '/home/pipelines',
                'max_running_pipelines': 1,
            })

            try:
                source = {
                    "type": "uri",
                    "uri": uri,
                }
                destination = {
                    "type": "mqtt",
                    "host": mqtthost,
                    "clientid": algorithm,
                    "topic": topic
                }
                tags = {
                    "sensor": sensor,
                    "location": location,
                    "algorithm": algorithmName,
                    "office": {
                        "lat": office[0],
                        "lon": office[1]
                    },
                }
                parameters = {
                    "inference-interval": every_nth_frame,
                    "recording_prefix": "/tmp/rec/" + sensor
                }
                parameters.update(options)

                pipeline = VAServing.pipeline(self._pipeline, self._version)
                instance_id = pipeline.start(source=source,
                                             destination=destination,
                                             tags=tags,
                                             parameters=parameters)

                if instance_id is None:
                    raise Exception(
                        "Pipeline {} version {} Failed to Start".format(
                            self._pipeline, self._version))

                self._stop = Event()
                while not self._stop.is_set():
                    status = pipeline.status()
                    print(status, flush=True)

                    if status.state.stopped():
                        print(
                            "Pipeline {} Version {} Instance {} Ended with {}".
                            format(self._pipeline, self._version, instance_id,
                                   status.state.name),
                            flush=True)
                        break

                    if status.avg_fps > 0 and status.state is Pipeline.State.RUNNING:
                        avg_pipeline_latency = status.avg_pipeline_latency
                        if not avg_pipeline_latency: avg_pipeline_latency = 0

                        self._db.update(
                            algorithm, {
                                "sensor": sensor,
                                "performance": status.avg_fps,
                                "latency": avg_pipeline_latency * 1000,
                                "cpu": psutil.cpu_percent(),
                                "memory": psutil.virtual_memory().percent,
                            })

                    self._stop.wait(3)

                self._stop = None
                pipeline.stop()
            except:
                print(traceback.format_exc(), flush=True)

            VAServing.stop()
        except:
            print(traceback.format_exc(), flush=True)
def gst_record(options):
    # If video path is a local directory and not a uri, append file:// to the path
    if os.path.isfile(options.input_video_path):
        options_source = "file://" + options.input_video_path
    else:
        options_source = options.input_video_path

    options_metadata_file = options.metadata_file_path
    if not options_metadata_file:
        options_metadata_file = default_metadata_record_path

    # Check if have write permissions for metadata file location
    try:
        file_handler = open(options_metadata_file, 'w')
        file_handler.close()
    except IOError:
        print("No write permissions for metadata file location")
        return -1

    # if metadata file already exists, delete it
    if os.path.exists(options_metadata_file):
        os.remove(options_metadata_file)

    # If output video directory doesn't exist
    if not os.path.isdir(options.output_video_folder):
        os.mkdir(options.output_video_folder)

    # Check if directory has write permissions
    try:
        file_check_write_permissions = os.path.join(
            options.output_video_folder, "checkDirWritable.txt")
        file_handler = open(file_check_write_permissions, 'w')
        file_handler.close()
        os.remove(file_check_write_permissions)
    except IOError:
        print("No write permissions for video output directory")
        return -1

    # Populate the request to provide to VAServing library
    request = {
        "source": {
            "type": "uri",
            "uri": options_source
        },
        "destination": {
            "type": "file",
            "path": options_metadata_file,
            "format": "json-lines"
        },
        "parameters": {
            "recording_prefix": options.output_video_folder,
            "max-size-time": options.max_time
        }
    }

    # Start the recording, once complete, stop VAServing
    record_playback_file_dir = os.path.dirname(os.path.realpath(__file__))
    VAServing.start({
        'log_level':
        'INFO',
        'pipeline_dir':
        os.path.join(record_playback_file_dir, "pipelines")
    })
    pipeline = VAServing.pipeline("object_detection", "segment_record")
    pipeline.start(request)
    status = pipeline.status()
    while (not status.state.stopped()):
        time.sleep(0.1)
        status = pipeline.status()
    VAServing.stop()