if __name__ == '__main__': global stop_server global pause_server stop_server = False pause_server = False torch.multiprocessing.set_start_method("forkserver") args = parse_args() n_cpus = args.n_cpus port_number = args.port_number server_url = '0.0.0.0' # Server Settings print('=============== Server Settings:') print('Server URL: ', server_url) print('Port Number:', port_number) print('Num CPUs:', n_cpus) print() # Initiate variables and start server eval_server = EvaluatorServer(n_cpus) server = SimpleXMLRPCServer((server_url, port_number)) server.register_instance(eval_server) # Run untill cancellation try: while not stop_server: server.handle_request() except Exception as e: server.quit() print("ERROR: ", str(e)) print(repr(e))