def create_docker_connection(cleanup=True, start_clipper=True): logger.info("Creating DockerContainerManager") cm = DockerContainerManager(clipper_query_port=find_unbound_port(), clipper_management_port=find_unbound_port(), clipper_rpc_port=find_unbound_port(), redis_port=find_unbound_port()) cl = ClipperConnection(cm) if cleanup: cl.stop_all() docker_client = get_docker_client() docker_client.containers.prune(filters={"label": CLIPPER_DOCKER_LABEL}) if start_clipper: # Try to start Clipper in a retry loop here to address flaky tests # as described in https://github.com/ucbrise/clipper/issues/352 while True: try: logger.info("Starting Clipper") cl.start_clipper() time.sleep(1) break except docker.errors.APIError as e: logger.info( "Problem starting Clipper: {}\nTrying again.".format(e)) cl.stop_all() cm = DockerContainerManager( clipper_query_port=find_unbound_port(), clipper_management_port=find_unbound_port(), clipper_rpc_port=find_unbound_port(), redis_port=find_unbound_port()) cl = ClipperConnection(cm) else: cl.connect() return cl
def setup_clipper(): app_name = 'inceptionv3-app' model_name = 'inceptionv3-model' clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.connect() pytorch_deployer.deploy_pytorch_model( clipper_conn=clipper_conn, name=model_name, version='1', input_type='bytes', func=incept_predict, pytorch_model=incept, num_replicas=10, batch_size=1, pkgs_to_install=['pillow', 'pyarrow', 'torch', 'torchvision']) clipper_conn.register_application(name=app_name, input_type="bytes", default_output="-1.0", slo_micros=10000000) # 10s clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name) print( "url: ", "http://{addr}/{app_name}/predict".format(addr="", app_name=app_name))
def create_kubernetes_connection(cleanup=True, start_clipper=True, connect=True, with_proxy=False, num_frontend_replicas=1): logger.info("Creating KubernetesContainerManager") if with_proxy: cm = KubernetesContainerManager(kubernetes_proxy_addr="127.0.0.1:8080") else: cm = KubernetesContainerManager() cl = ClipperConnection(cm) if cleanup: cl.stop_all() # Give kubernetes some time to clean up time.sleep(20) logger.info("Done cleaning up clipper") if start_clipper: logger.info("Starting Clipper") cl.start_clipper( query_frontend_image= "568959175238.dkr.ecr.us-west-1.amazonaws.com/clipper/query_frontend:{}". format(clipper_version), mgmt_frontend_image= "568959175238.dkr.ecr.us-west-1.amazonaws.com/clipper/management_frontend:{}". format(clipper_version), num_frontend_replicas=num_frontend_replicas) time.sleep(1) if connect: try: cl.connect() except Exception: pass except ClipperException: pass return cl
def create_kubernetes_connection(cleanup=False, start_clipper=False, connect=False, with_proxy=False, num_frontend_replicas=1, cleanup_name='default-cluster', new_name='default-cluster', connect_name='default-cluster', service_types=None, namespace='default'): logger.info("Creating KubernetesContainerManager") cl = None assert cleanup or start_clipper or connect, "You must set at least one of {cleanup, start_clipper, connect} to be true." if with_proxy: kubernetes_proxy_addr = "127.0.0.1:8080" else: kubernetes_proxy_addr = None if cleanup: logger.info("Cleaning up Kubernetes Cluster {}".format(cleanup_name)) cm = KubernetesContainerManager( cluster_name=cleanup_name, useInternalIP=USE_MINIKUBE, service_types=service_types, kubernetes_proxy_addr=kubernetes_proxy_addr) cl = ClipperConnection(cm) cl.stop_all() logger.info("Done cleaning up clipper") if start_clipper: logger.info("Starting up Kubernetes Cluster {}".format(new_name)) cm = KubernetesContainerManager( cluster_name=new_name, kubernetes_proxy_addr=kubernetes_proxy_addr, namespace=namespace, useInternalIP=USE_MINIKUBE, service_types=service_types, create_namespace_if_not_exists=True) cl = ClipperConnection(cm) cl.start_clipper(num_frontend_replicas=num_frontend_replicas) if connect: try: cm = KubernetesContainerManager( cluster_name=connect_name, useInternalIP=USE_MINIKUBE, service_types=service_types, kubernetes_proxy_addr=kubernetes_proxy_addr) cl = ClipperConnection(cm) cl.connect() except Exception: pass return cl
def main(version, label): from clipper_admin import ClipperConnection, KubernetesContainerManager clipper_conn = ClipperConnection( KubernetesContainerManager(useInternalIP=True)) clipper_conn.connect() from clipper_admin.deployers import python as python_deployer registry = 'localhost:5000' python_deployer.deploy_python_closure(clipper_conn, name="sum-model", version=version, input_type="doubles", func=feature_sum, labels=[label], registry=registry)
class Clip(object): def __init__(self, sleep_time): from clipper_admin import ClipperConnection, DockerContainerManager #from clipper_admin.deployers import python as python_deployer from clipper_admin.deployers import pytorch as pt_deployer self.clipper_conn = ClipperConnection(DockerContainerManager()) try: self.clipper_conn.connect() self.clipper_conn.stop_all() except Exception: pass self.clipper_conn.start_clipper() self.clipper_conn.register_application(name="hello-world", input_type="strings", default_output="-1.0", slo_micros=10**8) model = Model() def policy(ptmodel, x): batch = (len(x)) arr = [] for j in x: xs, masks = j.split("###") res = np.frombuffer(base64.decodestring(xs), dtype=np.float32) res = res.reshape((64, 19, 19, 3)) res = np.frombuffer(base64.decodestring(masks), dtype=np.float32) res = res.reshape((64, 362)) for i in x: time.sleep(sleep_time) return [ np.random.rand(64).astype(np.float32) for i in range(batch) ] pt_deployer.deploy_pytorch_model(self.clipper_conn, name="policy", version=1, input_type="strings", func=policy, pytorch_model=model) self.clipper_conn.link_model_to_app(app_name="hello-world", model_name="policy")
class Clip(object): def __init__(self, shape, model_name): from clipper_admin import ClipperConnection, DockerContainerManager from clipper_admin.deployers import python as python_deployer from clipper_admin.deployers import pytorch as pytorch_deployer self.clipper_conn = ClipperConnection(DockerContainerManager()) try: self.clipper_conn.connect() self.clipper_conn.stop_all() except Exception: pass self.clipper_conn.start_clipper() self.clipper_conn.register_application(name="hello-world", input_type="strings", default_output="-1.0", slo_micros=10**8) ptmodel = get_model(model_name) def policy(model, x): print(len(x)) batch = (len(x)) arr = [] for j in x: print(type(j), len(j)) res = np.frombuffer(base64.decodestring(j), dtype=np.float32) print(res.shape) arr += [res] x = np.array(arr) x = x.reshape((-1, ) + shape[1:]) print("new shape", x.shape) return evaluate_model(model, x).reshape((batch, shape[0])) pytorch_deployer.deploy_pytorch_model(self.clipper_conn, name="policy", version=1, input_type="strings", func=policy, pytorch_model=ptmodel) self.clipper_conn.link_model_to_app(app_name="hello-world", model_name="policy")
def deployModelToClipper(): """Deploy model to clipper and replace its entry.""" global app_name, model_name, model_version print('Deploying model to clipper, model_name={}, model_version={}'.format( model_name, model_version)) # Setup clipper and deploy model clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380)) try: clipper_conn.start_clipper() except: clipper_conn.connect() try: # input_type must be bytes as inputs will be serialized into bytes with pickle clipper_conn.register_application(name=app_name, input_type="bytes", default_output="-1.0", slo_micros=1000000) except Exception as e: print(e) try: deploy_python_closure(clipper_conn, name=model_name, version=model_version, input_type="bytes", batch_size=1, func=predict, base_image='hysia-clipper-base-container-gpu') except Exception as e: print(e) try: clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name) except Exception as e: print(e) replaceDefaultEntry() print('{} deployed to clipper!'.format(model_name))
def setup_clipper(): app_name = 'predict-app' model_name = "predict-model" clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.connect() deploy_python_closure(clipper_conn, name="predict-model", version='1', input_type="bytes", func=join_predict) clipper_conn.register_application(name=app_name, input_type="bytes", default_output="-1.0", slo_micros=10000000) # 10s clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name) print( "url: ", "http://{addr}/{app_name}/predict".format(addr="", app_name=app_name))
def setup_clipper(): app_name = 'resnet101-app' model_name = 'resnet101-model' clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.connect() pytorch_deployer.deploy_pytorch_model(clipper_conn=clipper_conn, name=model_name, version='1', input_type='bytes', func=resnet_predict, pytorch_model=resnet101, pkgs_to_install=['pillow', 'torch', 'torchvision']) clipper_conn.register_application(name=app_name, input_type="bytes", default_output="-1.0", slo_micros=10000000) # 10s clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name) print("query_adress: ", clipper_conn.get_query_addr()) print("app_name: ", ) print("model_name: ", ) print("url: ", "http://{addr}/{app_name}/predict".format(addr=clipper_conn.get_query_addr(),app_name=app_name))
def register(model_name, sess, func): """ Register a tf session with its function Input: - model_name: name of the model, string - sess: TF session - func: the function that runs the TF session Return: - clipper connection """ clipper_conn = ClipperConnection(DockerContainerManager()) clipper_conn.connect() deploy_tensorflow_model( clipper_conn=clipper_conn, name=model_name, version='1.0', input_type='strings', func=func, tf_sess_or_saved_model_path=sess, ) print(model_name, "registered") return clipper_conn
# turn into predictions with open('submission.csv') as fh: lines = fh.readlines()[1:] # ignore first line preds = [line.strip().split(',')[1] for line in lines] return preds # pickle function and write to appropriate location s = six.StringIO() c = CloudPickler(s, 2) c.dump(libffm) serialized_prediction_function = s.getvalue() filepath = 'docker/lib/func.pkl' with open(filepath, 'w') as fh: fh.write(serialized_prediction_function) # refresh creds os.system('gcloud container clusters get-credentials redis-cluster') os.system('kubectl cluster-info') clipper_conn = ClipperConnection(KubernetesContainerManager(clipper_ip, useInternalIP=True)) clipper_conn.connect() # Build model and deploy to clipper version = int(time.time()) clipper_conn.build_and_deploy_model('ffm', version, 'strings', 'docker/lib', 'clipper/python-closure-container:develop', container_registry='ryanhoque') # Uncomment the following if first time #clipper_conn.link_model_to_app(app_name="testbed", model_name="ffm") # finally deploy new version of model to clipper (set version as timestamp) print('Successfully deployed model ffm version ' + str(version) + ' to Clipper.')