def create_docker_connection(cleanup=True, start_clipper=True): logger.info("Creating DockerContainerManager") cm = DockerContainerManager(clipper_query_port=find_unbound_port(), clipper_management_port=find_unbound_port(), clipper_rpc_port=find_unbound_port(), redis_port=find_unbound_port()) cl = ClipperConnection(cm) if cleanup: cl.stop_all() docker_client = get_docker_client() docker_client.containers.prune(filters={"label": CLIPPER_DOCKER_LABEL}) if start_clipper: # Try to start Clipper in a retry loop here to address flaky tests # as described in https://github.com/ucbrise/clipper/issues/352 while True: try: logger.info("Starting Clipper") cl.start_clipper() time.sleep(1) break except docker.errors.APIError as e: logger.info( "Problem starting Clipper: {}\nTrying again.".format(e)) cl.stop_all() cm = DockerContainerManager( clipper_query_port=find_unbound_port(), clipper_management_port=find_unbound_port(), clipper_rpc_port=find_unbound_port(), redis_port=find_unbound_port()) cl = ClipperConnection(cm) else: cl.connect() return cl
def create_docker_connection(cleanup=False, start_clipper=False, cleanup_name='default-cluster', new_name='default-cluster', use_centralized_log=False): logger.info("Creating DockerContainerManager") cl = None assert cleanup or start_clipper, "You must set at least one of {cleanup, start_clipper} to be true." if cleanup: logger.info("Cleaning up Docker cluster {}".format(cleanup_name)) cm = DockerContainerManager( cluster_name=cleanup_name, clipper_query_port=find_unbound_port(), clipper_management_port=find_unbound_port(), clipper_rpc_port=find_unbound_port(), fluentd_port=find_unbound_port(), redis_port=find_unbound_port(), prometheus_port=find_unbound_port(), ) cl = ClipperConnection(cm) cl.stop_all(graceful=False) if start_clipper: # Try to start Clipper in a retry loop here to address flaky tests # as described in https://github.com/ucbrise/clipper/issues/352 logger.info("Starting up Docker cluster {}".format(new_name)) while True: cm = DockerContainerManager( cluster_name=new_name, clipper_query_port=find_unbound_port(), clipper_management_port=find_unbound_port(), clipper_rpc_port=find_unbound_port(), fluentd_port=find_unbound_port(), redis_port=find_unbound_port(), prometheus_port=find_unbound_port(), use_centralized_log=use_centralized_log) cl = ClipperConnection(cm) try: logger.info("Starting Clipper") cl.start_clipper() time.sleep(1) break except docker.errors.APIError as e: logger.info( "Problem starting Clipper: {}\nTrying again.".format(e)) cl.stop_all() return cl
def setup_clipper(): app_name = 'inceptionv3-app' model_name = 'inceptionv3-model' clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.connect() pytorch_deployer.deploy_pytorch_model( clipper_conn=clipper_conn, name=model_name, version='1', input_type='bytes', func=incept_predict, pytorch_model=incept, num_replicas=10, batch_size=1, pkgs_to_install=['pillow', 'pyarrow', 'torch', 'torchvision']) clipper_conn.register_application(name=app_name, input_type="bytes", default_output="-1.0", slo_micros=10000000) # 10s clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name) print( "url: ", "http://{addr}/{app_name}/predict".format(addr="", app_name=app_name))
def main(): # Setup container manager. # k8 = KubernetesContainerManager(kubernetes_proxy_addr="127.0.0.1:8080", # useInternalIP=True) # clipper_conn = ClipperConnection(k8) swarm = DockerContainerManager() clipper_conn = ClipperConnection(swarm) clipper_conn.stop_all() clipper_conn.start_clipper() # Register application. clipper_conn.register_application(name="sum-app", input_type="doubles", default_output="-1.0", slo_micros=10000000) # Model deployement. python_deployer.deploy_python_closure(clipper_conn, name="sum-model", version=1, input_type="doubles", func=sum) # Link application to model. clipper_conn.link_model_to_app(app_name="sum-app", model_name="sum-model") # Test headers = {"Content-type": "application/json"} response = requests.post("http://localhost:1337/sum-app/predict", headers=headers, data=json.dumps({"input": list(np.random.random(10))})).json() print(response)
def __init__(self, sleep_time): from clipper_admin import ClipperConnection, DockerContainerManager #from clipper_admin.deployers import python as python_deployer from clipper_admin.deployers import pytorch as pt_deployer self.clipper_conn = ClipperConnection(DockerContainerManager()) try: self.clipper_conn.connect() self.clipper_conn.stop_all() except Exception: pass self.clipper_conn.start_clipper() self.clipper_conn.register_application(name="hello-world", input_type="strings", default_output="-1.0", slo_micros=10**8) model = Model() def policy(ptmodel, x): batch = (len(x)) arr = [] for j in x: xs, masks = j.split("###") res = np.frombuffer(base64.decodestring(xs), dtype=np.float32) res = res.reshape((64, 19, 19, 3)) res = np.frombuffer(base64.decodestring(masks), dtype=np.float32) res = res.reshape((64, 362)) for i in x: time.sleep(sleep_time) return [ np.random.rand(64).astype(np.float32) for i in range(batch) ] pt_deployer.deploy_pytorch_model(self.clipper_conn, name="policy", version=1, input_type="strings", func=policy, pytorch_model=model) self.clipper_conn.link_model_to_app(app_name="hello-world", model_name="policy")
def __init__(self, shape, model_name): from clipper_admin import ClipperConnection, DockerContainerManager from clipper_admin.deployers import python as python_deployer from clipper_admin.deployers import pytorch as pytorch_deployer self.clipper_conn = ClipperConnection(DockerContainerManager()) try: self.clipper_conn.connect() self.clipper_conn.stop_all() except Exception: pass self.clipper_conn.start_clipper() self.clipper_conn.register_application(name="hello-world", input_type="strings", default_output="-1.0", slo_micros=10**8) ptmodel = get_model(model_name) def policy(model, x): print(len(x)) batch = (len(x)) arr = [] for j in x: print(type(j), len(j)) res = np.frombuffer(base64.decodestring(j), dtype=np.float32) print(res.shape) arr += [res] x = np.array(arr) x = x.reshape((-1, ) + shape[1:]) print("new shape", x.shape) return evaluate_model(model, x).reshape((batch, shape[0])) pytorch_deployer.deploy_pytorch_model(self.clipper_conn, name="policy", version=1, input_type="strings", func=policy, pytorch_model=ptmodel) self.clipper_conn.link_model_to_app(app_name="hello-world", model_name="policy")
def deployModelToClipper(): """Deploy model to clipper and replace its entry.""" global app_name, model_name, model_version print('Deploying model to clipper, model_name={}, model_version={}'.format( model_name, model_version)) # Setup clipper and deploy model clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380)) try: clipper_conn.start_clipper() except: clipper_conn.connect() try: # input_type must be bytes as inputs will be serialized into bytes with pickle clipper_conn.register_application(name=app_name, input_type="bytes", default_output="-1.0", slo_micros=1000000) except Exception as e: print(e) try: deploy_python_closure(clipper_conn, name=model_name, version=model_version, input_type="bytes", batch_size=1, func=predict, base_image='hysia-clipper-base-container-gpu') except Exception as e: print(e) try: clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name) except Exception as e: print(e) replaceDefaultEntry() print('{} deployed to clipper!'.format(model_name))
def setup_clipper(): app_name = 'predict-app' model_name = "predict-model" clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.connect() deploy_python_closure(clipper_conn, name="predict-model", version='1', input_type="bytes", func=join_predict) clipper_conn.register_application(name=app_name, input_type="bytes", default_output="-1.0", slo_micros=10000000) # 10s clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name) print( "url: ", "http://{addr}/{app_name}/predict".format(addr="", app_name=app_name))
def register(model_name, sess, func): """ Register a tf session with its function Input: - model_name: name of the model, string - sess: TF session - func: the function that runs the TF session Return: - clipper connection """ clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.connect() deploy_tensorflow_model( clipper_conn=clipper_conn, name=model_name, version='1.0', input_type='strings', func=func, tf_sess_or_saved_model_path=sess, ) print(model_name, "registered") return clipper_conn
def setup_clipper(): app_name = 'resnet101-app' model_name = 'resnet101-model' clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.connect() pytorch_deployer.deploy_pytorch_model(clipper_conn=clipper_conn, name=model_name, version='1', input_type='bytes', func=resnet_predict, pytorch_model=resnet101, pkgs_to_install=['pillow', 'torch', 'torchvision']) clipper_conn.register_application(name=app_name, input_type="bytes", default_output="-1.0", slo_micros=10000000) # 10s clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name) print("query_adress: ", clipper_conn.get_query_addr()) print("app_name: ", ) print("model_name: ", ) print("url: ", "http://{addr}/{app_name}/predict".format(addr=clipper_conn.get_query_addr(),app_name=app_name))
help= "The Docker container registry to which to push the freshly built model image" ) args = parser.parse_args() arg_errs = [] if not args.model_name: arg_errs.append( "The name of the model being deployed must be specified!") if not args.model_version: arg_errs.append( "The version of the model being deployed must be specified!") if not args.model_data_path: arg_errs.append( "The path to the serialized R model data must be specified!") if len(arg_errs) > 0: for err in arg_errs: print(err) raise ClipperException() # Note: This container manager is only necessary for # creating a connection object that can be used to build the model cm = DockerContainerManager() conn = ClipperConnection(cm) conn.build_model(args.model_name, args.model_version, args.model_data_path, CLIPPER_R_CONTAINER_BASE_IMAGE, args.registry)
def signal_handler(signal, frame): print("Stopping Clipper...") clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.stop_all() sys.exit(0)
model = nn.Linear(3, 1) def predict(model, inputs): inputs = shift(inputs) inputs = torch.tensor(inputs).float() pred = model(inputs) pred = pred.data.numpy() return [str(x) for x in pred] APP_NAME = "test-app" MODEL_NAME = "test-pytorch-model" # Setup clipper and deploy pytorch model clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380)) try: clipper_conn.start_clipper() clipper_conn.register_application(name=APP_NAME, input_type="doubles", default_output="-1.0", slo_micros=1000000) deploy_pytorch_model(clipper_conn, name=MODEL_NAME, version="1", input_type="doubles", func=predict, pytorch_model=model, pkgs_to_install=pip_deps) clipper_conn.link_model_to_app(app_name=APP_NAME, model_name=MODEL_NAME) except:
def signal_handler(signal, frame): logging.info("\nGracefully stopping server...") conn = ClipperConnection(DockerContainerManager()) conn.stop_all() sys.exit(0)
from clipper_admin import ClipperConnection, DockerContainerManager clipper_conn = ClipperConnection(DockerContainerManager(extra_container_kwargs={'cpuset_cpus':'0-35'})) clipper_conn.start_clipper(cache_size=0)
def __del__(self): clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.stop_all()
def get_new_connection_instance(cluster_name, use_centralized_log): return ClipperConnection( DockerContainerManager(cluster_name=cluster_name, use_centralized_log=use_centralized_log))
parser = argparse.ArgumentParser() # parser.add_argument('-n', '--num_nodes', type=int, default=3) parser.add_argument('node_id', type=int) args = parser.parse_args() # num_nodes = args.num_nodes node_id = args.node_id clipper_conn = ClipperConnection( DockerContainerManager( cluster_name='clipper_cluster_{}'.format(node_id), docker_ip_address='localhost', clipper_query_port=1337 + node_id, clipper_management_port=2337 + node_id, clipper_rpc_port=7000 + node_id, redis_ip=None, redis_port=6379 + node_id, prometheus_port=9090 + node_id, # WARING: DO NOT CHANGE THE RULE OF NETWORK NAMES docker_network='clipper_network_{}'.format(node_id), # SINCE THIS IS USED BY reset.sh TO IDENTIFY CLIPPER CONTAINERS extra_container_kwargs={}) ) # for node_id in range(args.num_nodes)] try: clipper_conn.start_clipper() clipper_conn.register_application(name="default", input_type="string", default_output="", slo_micros=100000)
version='1.0', input_type='strings', func=func, tf_sess_or_saved_model_path=sess, ) print(model_name, "registered") return clipper_conn # Stop Clipper on Ctrl-C def signal_handler(signal, frame): print("Stopping Clipper...") clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.stop_all() sys.exit(0) if __name__ == '__main__': signal.signal(signal.SIGINT, signal_handler) clipper_conn = ClipperConnection( DockerContainerManager(use_centralized_log=False)) clipper_conn.stop_all() clipper_conn.start_clipper() print('Clipper Started') try: while True: time.sleep(2) except Exception as e: clipper_conn.stop_all() print("done")
from clipper_admin import ClipperConnection, DockerContainerManager from clipper_admin.deployers import python as py_deployer import random cl = ClipperConnection(DockerContainerManager()) clipper_url = "http://localhost:1337/pong/predict" headers = {'Content-Type': 'application/json'} start = datetime.now() clipper_response = requests.post(clipper_url, headers=headers, data=json.dumps(req_json))
def create_clipper_conn(): conn = ClipperConnection(DockerContainerManager()) conn.start_clipper() return conn