def __init__(self, pipeline, version="2"): super(RunVA, self).__init__() self._pipeline = pipeline self._version = version self._db = DBIngest(host=dbhost, index="algorithms", office=office) self._maincontext = GLib.MainLoop().get_context() self._stop = None ModelManager.load_config("/home/models", {}) PipelineManager.load_config("/home/pipelines", 1)
def main(options): PipelineManager.load_config( os.path.join(CONFIG_PATH, options.pipeline_dir), MAX_RUNNING_PIPELINES) ModelManager.load_config(os.path.join(CONFIG_PATH, options.model_dir)) asyncio.set_event_loop(asyncio.new_event_loop()) app = connexion.App(__name__, specification_dir='./openapi/') app.app.json_encoder = encoder.JSONEncoder app.add_api('openapi.yaml', arguments={'title': 'Video Analytics API'}) logger.info("Starting Tornado Server on port: {p}".format(p=options.port)) app.run(server='tornado', port=options.port)
def main(options): PipelineManager.load_config( os.path.join(options.config_path, options.pipeline_dir), options.max_running_pipelines) ModelManager.load_config( os.path.join(options.config_path, options.model_dir), parse_network_preference(options)) app = connexion.App(__name__, specification_dir='./openapi/') app.app.json_encoder = encoder.JSONEncoder app.add_api('openapi.yaml', arguments={'title': 'Video Analytics Serving API'}) logger.info("Starting Tornado Server on port: {p}".format(p=options.port)) app.run(server='tornado', port=options.port)
def test_get_loaded_models_empty(self): ModelManager.models = None expected_result = [] result = ModelManager.get_loaded_models() self.assertEqual(result, expected_result)
def models_get(): # noqa: E501 """models_get Return supported models # noqa: E501 :rtype: List[ModelVersion] """ logger.debug("GET on /models") return ModelManager.get_loaded_models()
def _add_default_models(self): gva_elements = [ e for e in self.pipeline.iterate_elements() if (e.__gtype__.name in self.GVA_INFERENCE_ELEMENT_TYPES) ] for e in gva_elements: network = ModelManager.get_default_network_for_device( e.get_property("device"), e.get_property("model")) logger.debug("Setting model to {} for element {}".format( network, e.get_name())) e.set_property("model", network)
def models_get(): # noqa: E501 """models_get Return supported models # noqa: E501 :rtype: List[ModelVersion] """ try: logger.debug("GET on /models") return ModelManager.get_loaded_models() except Exception as e: logger.error('pipelines_name_version_get '+e) return ('Unexpected error', HTTPStatus.INTERNAL_SERVER_ERROR)
def _add_default_models(self,args): vf_index = args.index('-vf') if ('-vf' in args) else None if (vf_index==None): return filters = args[vf_index+1].split(',') new_filters=[] for _filter in filters: filter_params = self._get_filter_params(_filter) if ( (filter_params['type'] in FFmpegPipeline.GVA_INFERENCE_FILTER_TYPES)): device='CPU' if ('device' in filter_params): device = FFmpegPipeline.DEVICEID_MAP[int(filter_params['device'])] filter_params['model'] = ModelManager.get_default_network_for_device(device,filter_params['model']) new_filters.append(self._join_filter_params(filter_params)) else: new_filters.append(_filter) args[vf_index+1] =','.join(new_filters)
def _add_default_models(self, args): vf_index = args.index('-vf') if ('-vf' in args) else None if (vf_index == None): return filters = args[vf_index + 1].split(',') new_filters = [] for _filter in filters: filter_params = self._get_filter_params(_filter) if ((filter_params['type'] in FFmpegPipeline.GVA_INFERENCE_FILTER_TYPES) and ("VA_DEVICE_DEFAULT" in filter_params['model'])): device = "CPU" if ("device" in filter_params): device = filter_params["device"] if isinstance(filter_params['device'], int): device = FFmpegPipeline.DEVICEID_MAP[int( filter_params['device'])] filter_params[ "model"] = ModelManager.get_default_network_for_device( device, filter_params["model"]) new_filters.append(self._join_filter_params(filter_params)) elif (filter_params['type'] == "metaconvert"): # use the file to send the tags info to ffmpeg tmp_file = "/tmp/timestamp" if "tags" in self.request: try: tmp_tags = "" for key in self.request["tags"]: #filter_params["tags"] = "{\"%s\":%s}" % (key, self.request["tags"][key]) tmp_tags = "{\"%s\":%s}" % ( key, self.request["tags"][key]) with open(tmp_file, 'w') as f: f.write(tmp_tags) except Exception: logger.error("Error adding tags") source_uri = "source=" + "'" + self.request["source"][ "uri"].replace(":", "\\:") + "'" filter_params_str = self._join_filter_params( filter_params).replace("source=NULL", source_uri) filter_params_str = filter_params_str.replace( "tags=NULL", "tags=file|" + tmp_file) new_filters.append(filter_params_str) else: new_filters.append(_filter) args[vf_index + 1] = ','.join(new_filters)
def test_load_config(self): ModelManager.load_config("../models") print(ModelManager.models)
def __init__(self): super(RunVA, self).__init__() ModelManager.load_config("/home/models", {}) PipelineManager.load_config("/home/pipelines", 1) self._maincontext = GLib.MainLoop().get_context() GLib.timeout_add(1000, self._noop)