def main(): single_image_applier = YOLOSingleImageApplier() default_inference_mode_config = InfModeFullImage.make_default_config(model_result_suffix='_yolo') dataset_applier = BatchInferenceApplier(single_image_inference=single_image_applier, default_inference_mode_config=default_inference_mode_config, config_validator=YoloJsonConfigValidator()) dataset_applier.run_inference()
def main(): # Initially instantiate an inference applier without loading the weights. # This will parse, merge and validate all the configs and let us know # the number of inference processes to use. single_image_applier = YOLOSingleImageApplier(_load_model_weights=False) num_processes = single_image_applier._config[NUM_PROCESSES] default_inference_mode_config = InfModeFullImage.make_default_config(model_result_suffix='_yolo') config_validator = common.YoloJsonConfigValidator() if num_processes == 1: # Use the usual fully serial inference logic if only one inference process was requested. single_image_applier._construct_and_fill_model() dataset_applier = BatchInferenceApplier(single_image_inference=single_image_applier, default_inference_mode_config=default_inference_mode_config, config_validator=config_validator) else: # More than one inference process was requested - use the new parallelized logic. def yolo_initializer(): return YOLOSingleImageApplier() dataset_applier = BatchInferenceMultiprocessApplier(single_image_inference_initializer=yolo_initializer, num_processes=num_processes, default_inference_mode_config=default_inference_mode_config, config_validator=config_validator) dataset_applier.run_inference()
def __init__(self, logger, model_applier: SingleImageInferenceInterface, conn_config, cache): self.logger = logger self.model_applier = model_applier self._default_inference_mode_config = InfModeFullImage.make_default_config( model_result_suffix=MODEL_RESULT_SUFFIX) self.logger.info('Created InactiveRPCServicer for internal usage', extra=conn_config)
def main(): # Bring up the model for inference (process class mapping configs; load model weights to the GPU). single_image_applier = PytorchSegmentationApplier(model_factory_fn=model_factory_fn) # By default simply use the full image as model input. Other inference modes are possible, see # supervisely_lib/nn/hosted/inference_modes.py default_inference_mode_config = InfModeFullImage.make_default_config( model_result_suffix='_pytorch_segm_example') # IO wrapper to read inputs and save results in supervisely format within the context of a supervisely # agent task. dataset_applier = BatchInferenceApplier(single_image_inference=single_image_applier, default_inference_mode_config=default_inference_mode_config) # Process the input images and write out results. dataset_applier.run_inference()
def __init__(self, logger, model_applier: SingleImageInferenceInterface, conn_config, cache): self.logger = logger self.api = AgentAPI(token=conn_config['token'], server_address=conn_config['server_address'], ext_logger=self.logger) self.api.add_to_metadata('x-task-id', conn_config['task_id']) self.model_applier = model_applier self.thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=10) self.download_queue = Queue(maxsize=self.QUEUE_MAX_SIZE) self.final_processing_queue = Queue(maxsize=self.QUEUE_MAX_SIZE) self.image_cache = cache self._default_inference_mode_config = InfModeFullImage.make_default_config( model_result_suffix=MODEL_RESULT_SUFFIX) self.logger.info('Created AgentRPCServicer', extra=conn_config)