Beispiel #1
0
def create_docker_connection(cleanup=True, start_clipper=True):
    logger.info("Creating DockerContainerManager")
    cm = DockerContainerManager(clipper_query_port=find_unbound_port(),
                                clipper_management_port=find_unbound_port(),
                                clipper_rpc_port=find_unbound_port(),
                                redis_port=find_unbound_port())
    cl = ClipperConnection(cm)
    if cleanup:
        cl.stop_all()
        docker_client = get_docker_client()
        docker_client.containers.prune(filters={"label": CLIPPER_DOCKER_LABEL})
    if start_clipper:
        # Try to start Clipper in a retry loop here to address flaky tests
        # as described in https://github.com/ucbrise/clipper/issues/352
        while True:
            try:
                logger.info("Starting Clipper")
                cl.start_clipper()
                time.sleep(1)
                break
            except docker.errors.APIError as e:
                logger.info(
                    "Problem starting Clipper: {}\nTrying again.".format(e))
                cl.stop_all()
                cm = DockerContainerManager(
                    clipper_query_port=find_unbound_port(),
                    clipper_management_port=find_unbound_port(),
                    clipper_rpc_port=find_unbound_port(),
                    redis_port=find_unbound_port())
                cl = ClipperConnection(cm)
    else:
        cl.connect()
    return cl
Beispiel #2
0
def create_kubernetes_connection(cleanup=False,
                                 start_clipper=False,
                                 connect=False,
                                 with_proxy=False,
                                 num_frontend_replicas=1,
                                 cleanup_name='default-cluster',
                                 new_name='default-cluster',
                                 connect_name='default-cluster',
                                 service_types=None,
                                 namespace='default'):
    logger.info("Creating KubernetesContainerManager")
    cl = None
    assert cleanup or start_clipper or connect, "You must set at least one of {cleanup, start_clipper, connect} to be true."

    if with_proxy:
        kubernetes_proxy_addr = "127.0.0.1:8080"
    else:
        kubernetes_proxy_addr = None

    if cleanup:
        logger.info("Cleaning up Kubernetes Cluster {}".format(cleanup_name))
        cm = KubernetesContainerManager(
            cluster_name=cleanup_name,
            useInternalIP=USE_MINIKUBE,
            service_types=service_types,
            kubernetes_proxy_addr=kubernetes_proxy_addr)
        cl = ClipperConnection(cm)
        cl.stop_all()
        logger.info("Done cleaning up clipper")

    if start_clipper:
        logger.info("Starting up Kubernetes Cluster {}".format(new_name))
        cm = KubernetesContainerManager(
            cluster_name=new_name,
            kubernetes_proxy_addr=kubernetes_proxy_addr,
            namespace=namespace,
            useInternalIP=USE_MINIKUBE,
            service_types=service_types,
            create_namespace_if_not_exists=True)
        cl = ClipperConnection(cm)
        cl.start_clipper(num_frontend_replicas=num_frontend_replicas)

    if connect:
        try:
            cm = KubernetesContainerManager(
                cluster_name=connect_name,
                useInternalIP=USE_MINIKUBE,
                service_types=service_types,
                kubernetes_proxy_addr=kubernetes_proxy_addr)
            cl = ClipperConnection(cm)
            cl.connect()
        except Exception:
            pass

    return cl
Beispiel #3
0
def create_docker_connection(cleanup=False,
                             start_clipper=False,
                             cleanup_name='default-cluster',
                             new_name='default-cluster',
                             use_centralized_log=False):
    logger.info("Creating DockerContainerManager")
    cl = None
    assert cleanup or start_clipper, "You must set at least one of {cleanup, start_clipper} to be true."

    if cleanup:
        logger.info("Cleaning up Docker cluster {}".format(cleanup_name))
        cm = DockerContainerManager(
            cluster_name=cleanup_name,
            clipper_query_port=find_unbound_port(),
            clipper_management_port=find_unbound_port(),
            clipper_rpc_port=find_unbound_port(),
            fluentd_port=find_unbound_port(),
            redis_port=find_unbound_port(),
            prometheus_port=find_unbound_port(),
        )
        cl = ClipperConnection(cm)
        cl.stop_all(graceful=False)

    if start_clipper:
        # Try to start Clipper in a retry loop here to address flaky tests
        # as described in https://github.com/ucbrise/clipper/issues/352
        logger.info("Starting up Docker cluster {}".format(new_name))

        while True:
            cm = DockerContainerManager(
                cluster_name=new_name,
                clipper_query_port=find_unbound_port(),
                clipper_management_port=find_unbound_port(),
                clipper_rpc_port=find_unbound_port(),
                fluentd_port=find_unbound_port(),
                redis_port=find_unbound_port(),
                prometheus_port=find_unbound_port(),
                use_centralized_log=use_centralized_log)
            cl = ClipperConnection(cm)
            try:
                logger.info("Starting Clipper")
                cl.start_clipper()
                time.sleep(1)
                break
            except docker.errors.APIError as e:
                logger.info(
                    "Problem starting Clipper: {}\nTrying again.".format(e))
                cl.stop_all()
    return cl
def setup_clipper():
    app_name = 'inceptionv3-app'
    model_name = 'inceptionv3-model'
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.connect()

    pytorch_deployer.deploy_pytorch_model(
        clipper_conn=clipper_conn,
        name=model_name,
        version='1',
        input_type='bytes',
        func=incept_predict,
        pytorch_model=incept,
        num_replicas=10,
        batch_size=1,
        pkgs_to_install=['pillow', 'pyarrow', 'torch', 'torchvision'])

    clipper_conn.register_application(name=app_name,
                                      input_type="bytes",
                                      default_output="-1.0",
                                      slo_micros=10000000)  # 10s

    clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)

    print(
        "url: ", "http://{addr}/{app_name}/predict".format(addr="",
                                                           app_name=app_name))
Beispiel #5
0
def signal_handler(signal, frame):
    print("Stopping Clipper...")
    clipper_conn = ClipperConnection(
        KubernetesContainerManager(useInternalIP=True))
    # clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.stop_all()
    sys.exit(0)
Beispiel #6
0
def main():
    # Setup container manager.
    # k8 = KubernetesContainerManager(kubernetes_proxy_addr="127.0.0.1:8080",
    #                                 useInternalIP=True)
    # clipper_conn = ClipperConnection(k8)
    swarm = DockerContainerManager()
    clipper_conn = ClipperConnection(swarm)
    clipper_conn.stop_all()
    clipper_conn.start_clipper()

    # Register application.
    clipper_conn.register_application(name="sum-app", 
                                      input_type="doubles", 
                                      default_output="-1.0", 
                                      slo_micros=10000000)

    # Model deployement.
    python_deployer.deploy_python_closure(clipper_conn, 
                                          name="sum-model", 
                                          version=1, 
                                          input_type="doubles", 
                                          func=sum)

    # Link application to model.
    clipper_conn.link_model_to_app(app_name="sum-app", 
                                   model_name="sum-model")

    # Test
    headers = {"Content-type": "application/json"}
    response = requests.post("http://localhost:1337/sum-app/predict", 
                             headers=headers, 
                             data=json.dumps({"input": list(np.random.random(10))})).json()
    print(response)
def create_kubernetes_connection(cleanup=True,
                                 start_clipper=True,
                                 connect=True,
                                 with_proxy=False,
                                 num_frontend_replicas=1):
    logger.info("Creating KubernetesContainerManager")
    if with_proxy:
        cm = KubernetesContainerManager(kubernetes_proxy_addr="127.0.0.1:8080")
    else:
        cm = KubernetesContainerManager()
    cl = ClipperConnection(cm)
    if cleanup:
        cl.stop_all()
        # Give kubernetes some time to clean up
        time.sleep(20)
        logger.info("Done cleaning up clipper")
    if start_clipper:
        logger.info("Starting Clipper")
        cl.start_clipper(
            query_frontend_image=
            "568959175238.dkr.ecr.us-west-1.amazonaws.com/clipper/query_frontend:{}".
            format(clipper_version),
            mgmt_frontend_image=
            "568959175238.dkr.ecr.us-west-1.amazonaws.com/clipper/management_frontend:{}".
            format(clipper_version),
            num_frontend_replicas=num_frontend_replicas)
        time.sleep(1)
    if connect:
        try:
            cl.connect()
        except Exception:
            pass
        except ClipperException:
            pass
        return cl
def main(version, label):
    from clipper_admin import ClipperConnection, KubernetesContainerManager
    clipper_conn = ClipperConnection(
        KubernetesContainerManager(useInternalIP=True))
    clipper_conn.connect()
    from clipper_admin.deployers import python as python_deployer
    registry = 'localhost:5000'
    python_deployer.deploy_python_closure(clipper_conn,
                                          name="sum-model",
                                          version=version,
                                          input_type="doubles",
                                          func=feature_sum,
                                          labels=[label],
                                          registry=registry)
Beispiel #9
0
    def __init__(self, sleep_time):
        from clipper_admin import ClipperConnection, DockerContainerManager
        #from clipper_admin.deployers import python as python_deployer
        from clipper_admin.deployers import pytorch as pt_deployer
        self.clipper_conn = ClipperConnection(DockerContainerManager())
        try:
            self.clipper_conn.connect()
            self.clipper_conn.stop_all()
        except Exception:
            pass
        self.clipper_conn.start_clipper()
        self.clipper_conn.register_application(name="hello-world",
                                               input_type="strings",
                                               default_output="-1.0",
                                               slo_micros=10**8)
        model = Model()

        def policy(ptmodel, x):
            batch = (len(x))
            arr = []
            for j in x:
                xs, masks = j.split("###")
                res = np.frombuffer(base64.decodestring(xs), dtype=np.float32)
                res = res.reshape((64, 19, 19, 3))
                res = np.frombuffer(base64.decodestring(masks),
                                    dtype=np.float32)
                res = res.reshape((64, 362))
            for i in x:
                time.sleep(sleep_time)
            return [
                np.random.rand(64).astype(np.float32) for i in range(batch)
            ]

        pt_deployer.deploy_pytorch_model(self.clipper_conn,
                                         name="policy",
                                         version=1,
                                         input_type="strings",
                                         func=policy,
                                         pytorch_model=model)

        self.clipper_conn.link_model_to_app(app_name="hello-world",
                                            model_name="policy")
Beispiel #10
0
    def __init__(self, shape, model_name):
        from clipper_admin import ClipperConnection, DockerContainerManager
        from clipper_admin.deployers import python as python_deployer
        from clipper_admin.deployers import pytorch as pytorch_deployer
        self.clipper_conn = ClipperConnection(DockerContainerManager())
        try:
            self.clipper_conn.connect()
            self.clipper_conn.stop_all()
        except Exception:
            pass
        self.clipper_conn.start_clipper()
        self.clipper_conn.register_application(name="hello-world",
                                               input_type="strings",
                                               default_output="-1.0",
                                               slo_micros=10**8)
        ptmodel = get_model(model_name)

        def policy(model, x):
            print(len(x))
            batch = (len(x))
            arr = []
            for j in x:
                print(type(j), len(j))
                res = np.frombuffer(base64.decodestring(j), dtype=np.float32)
                print(res.shape)
                arr += [res]
            x = np.array(arr)
            x = x.reshape((-1, ) + shape[1:])
            print("new shape", x.shape)
            return evaluate_model(model, x).reshape((batch, shape[0]))

        pytorch_deployer.deploy_pytorch_model(self.clipper_conn,
                                              name="policy",
                                              version=1,
                                              input_type="strings",
                                              func=policy,
                                              pytorch_model=ptmodel)

        self.clipper_conn.link_model_to_app(app_name="hello-world",
                                            model_name="policy")
Beispiel #11
0
def deployModelToClipper():
    """Deploy model to clipper and replace its entry."""
    global app_name, model_name, model_version

    print('Deploying model to clipper, model_name={}, model_version={}'.format(
        model_name, model_version))

    # Setup clipper and deploy model
    clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380))
    try:
        clipper_conn.start_clipper()
    except:
        clipper_conn.connect()
    try:
        # input_type must be bytes as inputs will be serialized into bytes with pickle
        clipper_conn.register_application(name=app_name,
                                          input_type="bytes",
                                          default_output="-1.0",
                                          slo_micros=1000000)
    except Exception as e:
        print(e)
    try:
        deploy_python_closure(clipper_conn,
                              name=model_name,
                              version=model_version,
                              input_type="bytes",
                              batch_size=1,
                              func=predict,
                              base_image='hysia-clipper-base-container-gpu')
    except Exception as e:
        print(e)
    try:
        clipper_conn.link_model_to_app(app_name=app_name,
                                       model_name=model_name)
    except Exception as e:
        print(e)

    replaceDefaultEntry()
    print('{} deployed to clipper!'.format(model_name))
Beispiel #12
0
def setup_clipper():

    app_name = 'predict-app'
    model_name = "predict-model"
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.connect()

    deploy_python_closure(clipper_conn,
                          name="predict-model",
                          version='1',
                          input_type="bytes",
                          func=join_predict)

    clipper_conn.register_application(name=app_name,
                                      input_type="bytes",
                                      default_output="-1.0",
                                      slo_micros=10000000)  # 10s

    clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)

    print(
        "url: ", "http://{addr}/{app_name}/predict".format(addr="",
                                                           app_name=app_name))
Beispiel #13
0
def setup_clipper():
  app_name = 'resnet101-app'
  model_name = 'resnet101-model'
  clipper_conn = ClipperConnection(DockerContainerManager())
  clipper_conn.connect()
  
  pytorch_deployer.deploy_pytorch_model(clipper_conn=clipper_conn,
          name=model_name,
          version='1',
          input_type='bytes',
          func=resnet_predict,
          pytorch_model=resnet101,
          pkgs_to_install=['pillow', 'torch', 'torchvision'])

  clipper_conn.register_application(name=app_name,
          input_type="bytes",
          default_output="-1.0",
          slo_micros=10000000)  # 10s

  clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)
  print("query_adress: ", clipper_conn.get_query_addr())
  print("app_name: ", )
  print("model_name: ", )
  print("url: ", "http://{addr}/{app_name}/predict".format(addr=clipper_conn.get_query_addr(),app_name=app_name))
Beispiel #14
0
def register(model_name, sess, func):
    """
    Register a tf session with its function 

    Input: 
    - model_name: name of the model, string
    - sess: TF session
    - func: the function that runs the TF session 

    Return:
    - clipper connection 
    """
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.connect()
    deploy_tensorflow_model(
        clipper_conn=clipper_conn,
        name=model_name,
        version='1.0',
        input_type='strings',
        func=func,
        tf_sess_or_saved_model_path=sess,
    )
    print(model_name, "registered")
    return clipper_conn
    # turn into predictions
    with open('submission.csv') as fh:
        lines = fh.readlines()[1:] # ignore first line
    preds = [line.strip().split(',')[1] for line in lines]
    return preds

# pickle function and write to appropriate location
s = six.StringIO()
c = CloudPickler(s, 2)
c.dump(libffm)
serialized_prediction_function = s.getvalue()
filepath = 'docker/lib/func.pkl'
with open(filepath, 'w') as fh:
    fh.write(serialized_prediction_function)

# refresh creds
os.system('gcloud container clusters get-credentials redis-cluster')
os.system('kubectl cluster-info')

clipper_conn = ClipperConnection(KubernetesContainerManager(clipper_ip, useInternalIP=True))
clipper_conn.connect()

# Build model and deploy to clipper
version = int(time.time())
clipper_conn.build_and_deploy_model('ffm', version, 'strings', 'docker/lib', 'clipper/python-closure-container:develop', container_registry='ryanhoque')
# Uncomment the following if first time
#clipper_conn.link_model_to_app(app_name="testbed", model_name="ffm") 

# finally deploy new version of model to clipper (set version as timestamp)
print('Successfully deployed model ffm version ' + str(version) + ' to Clipper.')
Beispiel #16
0
        version='1.0',
        input_type='strings',
        func=func,
        tf_sess_or_saved_model_path=sess,
    )
    print(model_name, "registered")
    return clipper_conn


# Stop Clipper on Ctrl-C
def signal_handler(signal, frame):
    print("Stopping Clipper...")
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.stop_all()
    sys.exit(0)


if __name__ == '__main__':
    signal.signal(signal.SIGINT, signal_handler)
    clipper_conn = ClipperConnection(
        DockerContainerManager(use_centralized_log=False))
    clipper_conn.stop_all()
    clipper_conn.start_clipper()
    print('Clipper Started')
    try:
        while True:
            time.sleep(2)
    except Exception as e:
        clipper_conn.stop_all()

    print("done")
        help=
        "The Docker container registry to which to push the freshly built model image"
    )

    args = parser.parse_args()
    arg_errs = []

    if not args.model_name:
        arg_errs.append(
            "The name of the model being deployed must be specified!")
    if not args.model_version:
        arg_errs.append(
            "The version of the model being deployed must be specified!")
    if not args.model_data_path:
        arg_errs.append(
            "The path to the serialized R model data must be specified!")

    if len(arg_errs) > 0:
        for err in arg_errs:
            print(err)
        raise ClipperException()

    # Note: This container manager is only necessary for
    # creating a connection object that can be used to build the model
    cm = DockerContainerManager()

    conn = ClipperConnection(cm)

    conn.build_model(args.model_name, args.model_version, args.model_data_path,
                     CLIPPER_R_CONTAINER_BASE_IMAGE, args.registry)
def signal_handler(signal, frame):
    print("Stopping Clipper...")
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.stop_all()
    sys.exit(0)
Beispiel #19
0
accuracy_op = graph.get_tensor_by_name('Mean_1:0')
oX = graph.get_tensor_by_name('Placeholder:0')
oY = graph.get_tensor_by_name('Placeholder_1:0')


def predict(X):
    print("inputs {}".format(X))
    result = sess.run(load_infer_op, feed_dict={oX: X})
    ret = [str(i) for i in result]
    print("return is {}".format(ret))
    return ret


manager = KubernetesContainerManager(kubernetes_proxy_addr=K8S_ADDR,
                                     namespace=K8S_NS)
clipper_conn = ClipperConnection(manager)
clipper_conn.connect()

# clipper_conn.delete_application(APP_NAME)
# clipper_conn.register_application(
#   name = APP_NAME, input_type = 'doubles', default_output = '0', slo_micros = 100000000)

deploy_tensorflow_model(clipper_conn,
                        name=PREDICT_NAME,
                        version=VERSION,
                        input_type="doubles",
                        func=predict,
                        tf_sess_or_saved_model_path=sess,
                        registry=REGISTRY,
                        pkgs_to_install=['tensorflow'])
model = nn.Linear(3, 1)


def predict(model, inputs):
    inputs = shift(inputs)
    inputs = torch.tensor(inputs).float()
    pred = model(inputs)
    pred = pred.data.numpy()
    return [str(x) for x in pred]


APP_NAME = "test-app"
MODEL_NAME = "test-pytorch-model"

# Setup clipper and deploy pytorch model
clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380))
try:
    clipper_conn.start_clipper()
    clipper_conn.register_application(name=APP_NAME,
                                      input_type="doubles",
                                      default_output="-1.0",
                                      slo_micros=1000000)
    deploy_pytorch_model(clipper_conn,
                         name=MODEL_NAME,
                         version="1",
                         input_type="doubles",
                         func=predict,
                         pytorch_model=model,
                         pkgs_to_install=pip_deps)
    clipper_conn.link_model_to_app(app_name=APP_NAME, model_name=MODEL_NAME)
except:
Beispiel #21
0
def get_new_connection_instance(cluster_name, use_centralized_log):
    return ClipperConnection(
        DockerContainerManager(cluster_name=cluster_name,
                               use_centralized_log=use_centralized_log))
Beispiel #22
0
from clipper_admin import ClipperConnection, DockerContainerManager

clipper_conn = ClipperConnection(DockerContainerManager(extra_container_kwargs={'cpuset_cpus':'0-35'}))
clipper_conn.start_clipper(cache_size=0)
Beispiel #23
0
from clipper_admin import ClipperConnection, KubernetesContainerManager
from clipper_admin.deployers import python as python_deployer
import requests, json, numpy as np
import time

clipper_conn = ClipperConnection(
    KubernetesContainerManager("https://35.197.66.133", useInternalIP=True))

clipper_conn.connect()
addr = clipper_conn.get_query_addr()
headers = {"Content-type": "application/json"}

# sample from 5 arbitrary valid feature vectors

possible_data = [
    "27,17,45,28,2,28,27,29,28,1,1,,23,68fd1e64,960c983b,9fbfbfd5,38c11726,25c83c98,7e0ccccf,fe06fd10,062b5529,a73ee510,ca53fc84,67360210,895d8bbb,4f8e2224,f862f261,b4cc2435,4c0041e5,e5ba7672,b4abdd09,21ddcdc9,5840adea,36a7ab86,,32c7478e,85e4d73f,010f6491,ee63dd9b",
    "1,1,19,7,1,3,1,7,7,1,1,,2,09ca0b81,8947f767,a87e61f7,c4ba2a67,25c83c98,7e0ccccf,ce6020cc,062b5529,a73ee510,b04d3cfe,70dcd184,899eb56b,aca22cf9,b28479f6,a473257f,88f592e4,d4bb7bd8,bd17c3da,1d04f4a4,a458ea53,82bdc0bb,,32c7478e,5bdcd9c4,010f6491,cca57dcc",
    "8,11,38,9,316,25,8,11,10,1,1,,9,05db9164,09e68b86,aa8c1539,85dd697c,25c83c98,7e0ccccf,bc252bd0,5b392875,a73ee510,ef5c0d3c,0bd0c3b3,d8c29807,c0e6befc,8ceecbc8,d2f03b75,c64d548f,e5ba7672,63cdbb21,cf99e5de,5840adea,5f957280,,55dd3565,1793a828,e8b83407,b7d9c3bc",
    ",4,13,20,17700,,0,20,1,,0,,20,68fd1e64,08d6d899,9143c832,f56b7dd5,0942e0a7,7e0ccccf,e88f1cec,0b153874,a73ee510,3b08e48b,8f410860,ae1bb660,b8eec0b1,b28479f6,bffbd637,bad5ee18,776ce399,bbf70d82,,,0429f84b,,be7c41b4,c0d61a5c,,",
    "16,18,5203,8,0,0,4,49,10,0,1,,0,05db9164,9f7e1d07,0253bbf5,d6420627,4cf72387,,0db090eb,0b153874,a73ee510,3b08e48b,10e6a64f,31adfaee,38b5339a,07d13a8f,3e25e5f5,1621c7f4,e5ba7672,6a58e423,21ddcdc9,5840adea,bcc7a461,,32c7478e,3214afd4,ea9a246c,e7ecb821"
]

fh = open('predictions.log', 'a')
while (True):
    # poisson process with rate 0.1
    interarrival_time = np.random.exponential(scale=10)
    time.sleep(interarrival_time)
    num_requests = int(np.random.random() * 10) + 1  # uniform between 1 and 10
    input = list()
    for _ in range(num_requests):
        input.append(possible_data[int(np.random.random() * 5)])
Beispiel #24
0
def frontend(args):
    print(args)
    batch_size = args.batch_size
    num_models = args.num_models
    func = predict
    red_func = predict

    assert args.redundancy_mode in ["none", "equal"]
    redundancy_mode = 0

    if args.queue_mode == "single_queue":
        queue_mode = 0
    elif args.queue_mode == "rr":
        queue_mode = 1
    else:
        assert False, "Unrecognized queue mode '{}'".format(args.queue_mode)

    model_instance_ip_port = []
    red_model_instance_ip_port = []
    cur_port = base_port
    if num_models < len(args.f_ips):
	# Round up to highest int so as not to launch more models than needed.
        num_between = int(len(args.f_ips) / num_models + 0.5)
        chosen_indices = list(range(0, len(args.f_ips), num_between))
        print("Range is", chosen_indices)

        # Shift our chosen indices so that they are evenly distributed
        # throughout the clients.
        delta = len(args.f_ips) - chosen_indices[-1]
        shift = delta // 2
        if len(args.f_ips) == 15:
            shift += 1
        chosen_indices = [i + shift for i in chosen_indices]
        print("Shifted range is", chosen_indices)
        for i in chosen_indices:
            model_instance_ip_port.append((args.f_ips[i], cur_port))

    else:
        for i in range(num_models):
            model_instance_ip_port.append(
                    (args.f_ips[i % len(args.f_ips)], cur_port))

            # Wrap around to the next port number if we will ned to repeat workers.
            if i % len(args.f_ips) == len(args.f_ips) - 1:
                cur_port += 1

    print("Model instance ip, port:", model_instance_ip_port)
    client_ip_port = []
    if len(args.f_client_ports) != len(args.f_client_ips):
        assert len(args.f_client_ports) == 1
        args.f_client_ports *= len(args.f_client_ips)
    client_ip_port = [(ip, int(port)) for ip, port in zip(args.f_client_ips, args.f_client_ports)]
    cm = DistributedParmDockerContainerManager(model_instance_ip_port=model_instance_ip_port,
                                                 red_model_instance_ip_port=red_model_instance_ip_port,
                                                 client_ip_port=client_ip_port)
    clipper_conn = ClipperConnection(cm, distributed=True)
    frontend_args = {
        "redundancy_mode": redundancy_mode,
        "queue_mode": queue_mode,
        "num_models": num_models,
        "num_redundant_models": 0,
        "batch_size": batch_size,
        "mode": args.f_mode,
    }

    clipper_conn.start_clipper(frontend_args=frontend_args)

    red_input_type = "bytes"
    pytorch_deployer.create_endpoint(
            clipper_conn=clipper_conn,
            name="bg",
            input_type="bytes",
            func=func,
            pytorch_model=model,
            pkgs_to_install=['pillow'],
            num_replicas=num_models,
            batch_size=batch_size,
            num_red_replicas=0,
            red_func=red_func,
            red_input_type=red_input_type,
            red_pytorch_model=red_model,
            prefer_original=False,
            slo_micros=10000000 * 10)

    sleep_time = 5
    print("Sleeping for", sleep_time, "seconds to let things start up")
    time.sleep(sleep_time)

    cm.run_clients(wait=False)

    # Listen to a ud socket to determine when we should quit.
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
    sock.bind('/home/ubuntu/bg_sock')
    sock.listen(5)
    (clientsocket, address) = sock.accept()

    print("Stopping all clients")
    cm.stop_all_clients()

    print("Sending response")
    clientsocket.sendall('1'.encode())
    clientsocket.close()
    sock.close()

    print("Stopping all")
    clipper_conn.stop_all()
X = ckd.iloc[:300, ]
X = X.loc[:, X.columns != 'classification']
X.fillna(0, inplace=True)
y = ckd.iloc[:300, -1]

# train a classifier
model = GradientBoostingClassifier(random_state=2019)
model.fit(X, y)

# First we need to import Clipper
from clipper_admin import ClipperConnection, KubernetesContainerManager
from clipper_admin.deployers.python import deploy_python_closure

# Create a Clipper connection
clipper_conn = ClipperConnection(
    KubernetesContainerManager(useInternalIP=True,
                               kubernetes_proxy_addr="127.0.0.1:8080"))

# Start a Clipper cluster or connect to a running one
clipper_conn.start_clipper()

# Register an app called 'kddtutorial'. This would create a REST endpoint
clipper_conn.register_application(name="kddtutorial",
                                  input_type="doubles",
                                  default_output="-1.0",
                                  slo_micros=10000000)


# Access the trained model via closure capture
def predict(inputs):
    global model
Beispiel #26
0
    parser = argparse.ArgumentParser()
    # parser.add_argument('-n', '--num_nodes', type=int, default=3)
    parser.add_argument('node_id', type=int)
    args = parser.parse_args()

    # num_nodes = args.num_nodes
    node_id = args.node_id

    clipper_conn = ClipperConnection(
        DockerContainerManager(
            cluster_name='clipper_cluster_{}'.format(node_id),
            docker_ip_address='localhost',
            clipper_query_port=1337 + node_id,
            clipper_management_port=2337 + node_id,
            clipper_rpc_port=7000 + node_id,
            redis_ip=None,
            redis_port=6379 + node_id,
            prometheus_port=9090 + node_id,
            # WARING: DO NOT CHANGE THE RULE OF NETWORK NAMES
            docker_network='clipper_network_{}'.format(node_id),
            # SINCE THIS IS USED BY reset.sh TO IDENTIFY CLIPPER CONTAINERS
            extra_container_kwargs={})
    )  # for node_id in range(args.num_nodes)]

    try:
        clipper_conn.start_clipper()
        clipper_conn.register_application(name="default",
                                          input_type="string",
                                          default_output="",
                                          slo_micros=100000)
	def __del__(self):
		clipper_conn = ClipperConnection(DockerContainerManager())
		clipper_conn.stop_all()
Beispiel #28
0
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers import python as py_deployer
import random

cl = ClipperConnection(DockerContainerManager())
clipper_url = "http://localhost:1337/pong/predict"

headers = {'Content-Type': 'application/json'}
start = datetime.now()
clipper_response = requests.post(clipper_url,
                                 headers=headers,
                                 data=json.dumps(req_json))
def frontend(args):
    print(args)
    batch_size = args.batch_size
    num_models = args.num_models
    num_redundant_models = args.num_redundant_models
    func = predict
    red_func = predict

    if args.redundancy_mode == "none" or args.redundancy_mode == "equal":
        redundancy_mode = 0
    elif args.redundancy_mode == "coded":
        redundancy_mode = 2
    elif args.redundancy_mode == "cheap":
        redundancy_mode = 3

    if args.queue_mode == "single_queue":
        queue_mode = 0
        single_queue = True
    else:
        # Round robin
        queue_mode = 1
        single_queue = False

    assert len(args.f_ips) == num_models + num_redundant_models
    if len(args.f_ports) != len(args.f_ips):
        assert len(args.f_ports) == 1
        args.f_ports *= len(args.f_ips)

    model_instance_ip_port = []
    red_model_instance_ip_port = []
    for i in range(len(args.f_ips)):
        if i < num_models:
            model_instance_ip_port.append(
                (args.f_ips[i], int(args.f_ports[i])))
        else:
            red_model_instance_ip_port.append(
                (args.f_ips[i], int(args.f_ports[i])))

    client_ip_port = []
    if len(args.f_client_ports) != len(args.f_client_ips):
        assert len(args.f_client_ports) == 1
        args.f_client_ports *= len(args.f_client_ips)
    client_ip_port = [
        (ip, int(port))
        for ip, port in zip(args.f_client_ips, args.f_client_ports)
    ]
    cm = DistributedParmDockerContainerManager(
        model_instance_ip_port=model_instance_ip_port,
        red_model_instance_ip_port=red_model_instance_ip_port,
        client_ip_port=client_ip_port)
    clipper_conn = ClipperConnection(cm, distributed=True)
    frontend_args = {
        "redundancy_mode": redundancy_mode,
        "queue_mode": queue_mode,
        "num_models": num_models,
        "num_redundant_models": num_redundant_models,
        "batch_size": batch_size,
        "mode": args.f_mode
    }

    clipper_conn.start_clipper(frontend_args=frontend_args)

    if args.redundancy_mode == "coded":
        red_input_type = "floats"
    else:
        red_input_type = "bytes"

    pytorch_deployer.create_endpoint(clipper_conn=clipper_conn,
                                     name="example",
                                     input_type="bytes",
                                     func=func,
                                     pytorch_model=model,
                                     pkgs_to_install=['pillow'],
                                     num_replicas=num_models,
                                     batch_size=batch_size,
                                     num_red_replicas=num_redundant_models,
                                     red_func=red_func,
                                     red_input_type=red_input_type,
                                     red_pytorch_model=red_model,
                                     prefer_original=False,
                                     slo_micros=10000000 * 10)

    sleep_time = 5
    print("Sleeping for", sleep_time, "seconds to let things start up")
    time.sleep(sleep_time)

    total_time = cm.run_clients()
    print(total_time)

    with open(args.f_outfile, 'w') as outfile:
        outfile.write("{:.4f}".format(total_time))

    clipper_conn.stop_all()
Beispiel #30
0
def signal_handler(signal, frame):
   logging.info("\nGracefully stopping server...")
   conn = ClipperConnection(DockerContainerManager())
   conn.stop_all()
   sys.exit(0)