def run(): setup_logging() # Parse arguments: parser = argparse.ArgumentParser(description='A face recognizer server') parser.add_argument('-m', '--model', default='/root/frpyc/lbpcascade_frontalface.xml', help='path to the face detector model (default lbpcascade_frontalface.xml') parser.add_argument('-c', '--usecpu', action='store_true', help='use cpu (gpu is default)') parser.add_argument('-s', '--skip', action='store_true', help='immediately return zero detected faces') parser.add_argument('-f', '--faces', type=str, default='/root/frpyc/images', help='directory with faces to be learnt and recognized') args = parser.parse_args() # decide whether to use DB or not if args.faces: img_dir = Path(args.faces) if not img_dir.is_dir(): print('%s is not directory' % args.faces) sys.exit(1) else: logging.error("This implementation of recognizer cannot work with database. A directory with faces to be " "trained from must be specified (syntax: \"-f directory\").") return # setting appropriate face detector if args.skip: detector = Detectors.non elif args.usecpu: detector = Detectors.cpu else: detector = Detectors.gpu logging.info('Starting server on %s:%s, using %s.' % (RS_HOST, str(RS_PORT), str(detector))) # Instantiate the recognizer: recognizer = RecognizerGrpcServer(args.model, detector, img_dir) # Integrating and starting middleware agent: agent = ComponentAgent({"recognize": recognizer.probe_image_recognition}, None, recognizer.finalize, recognizer.initialize) agent.set_ready() recognizer.set_agent(agent) agent.start() # Start the recognizer: server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) servicers.add_RecognizerServerServicer_to_server(recognizer, server) server.add_insecure_port(RS_HOST + ":" + str(RS_PORT)) server.start() try: while True: time.sleep(1) except KeyboardInterrupt: logging.info('^C received, ending') server.stop(0)
def start_client_controller( wait_signal_frequency=DEFAULT_WAIT_SIGNAL_FREQUENCY) -> None: """ Starts both external and internal Client Controller interfaces. Creates the shared data structure for these interfaces. Runs the liveness check thread that checks whether any clients have been disconnected. This function does not return. May be invoked from a thread or as a separate process. """ setup_logging() # A common data structure for external and internal interfaces: client_model = ClientModel(wait_signal_frequency) internal_server = start_grpc_server( servicer=ClientControllerInternal(client_model), adder=servicers.add_ClientControllerInternalServicer_to_server, host=CLIENT_CONTROLLER_HOST, port=CLIENT_CONTROLLER_PORT) external_server = start_grpc_server( servicer=ClientControllerExternal(client_model), adder=mw_servicers.add_ClientControllerExternalServicer_to_server, host=CLIENT_CONTROLLER_EXTERNAL_HOST, port=CLIENT_CONTROLLER_EXTERNAL_PORT, threads=MAX_CLIENTS) try: while True: time.sleep(client_model.liveness_check_frequency) client_model.update_distances() timestamp = time.perf_counter() clients_to_delete = [] for application in client_model.clients: for id_ in client_model.clients[application]: client = client_model.clients[application][id_] if client.last_call < timestamp - client_model.liveness_check_frequency and \ client.status == ClientStatus.CONNECTED: clients_to_delete.append((application, id_)) for app, id_ in clients_to_delete: logging.info("Cancelling the call for client (%s:%s)" % (app, id_)) client_model.disconnect_client(app, id_) except KeyboardInterrupt: print("ClientController: ^C received, ending") external_server.stop(0) internal_server.stop(0)
return predictor_pb.JudgeReply( result=predictor_pb.JudgeResult.Value("ACCEPTED")) def OnScenarioDone(self, request, context): """ Reports scenario completion and provides a path to the corresponding measurement data file. """ scenario: Scenario = Scenario.init_from_pb(request, self.applications) app_name = scenario.controlled_probe.component.application.name logging.info( f"Received ScenarioDone notification for scenario {scenario.id_} of app {app_name}" ) with self._lock: # Remove scenario from the list of to-be-done scenarios self._scenario_generator.scenario_completed(scenario) self._single_process_predictor.process_measurement_file( MeasurementAggregator.compose_measurement_name_from_scenario( scenario), scenario.filename_data) return predictor_pb.CallbackAck() def _register_probe(self, probe: Probe) -> None: self._probes_by_id[probe.alias] = probe if __name__ == "__main__": setup_logging() start_grpc_server(PerformanceDataAggregator(), add_PredictorServicer_to_server, PREDICTOR_HOST, PREDICTOR_PORT, block=True)