Пример #1
0
# In[ ]:

class_names[classifier.predict([[5.84, 3.05, 3.76, 1.2]])]

# In[ ]:

class_names[classifier.predict([[5.84, 3.05, 3.76, 1.2]])]

# In[ ]:

from clipper_admin import ClipperConnection, DockerContainerManager

# In[ ]:

clipper_conn = ClipperConnection(DockerContainerManager())

# In[ ]:

try:
    clipper_conn.start_clipper()
except:
    print("Clipper already running")
    clipper_conn.connect()

# In[ ]:

# Register an application called "hello_world". This will create
# a prediction REST endpoint at http://localhost:1337/hello_world/predict
clipper_conn.register_application(name="hello-world",
                                  input_type="doubles",
def feature_sum(xs):
    return [str(sum(x)) for x in xs]


# Stop Clipper on Ctrl-C
def signal_handler(signal, frame):
    print("Stopping Clipper...")
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.stop_all()
    sys.exit(0)


if __name__ == '__main__':
    signal.signal(signal.SIGINT, signal_handler)
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.start_clipper()
    python_deployer.create_endpoint(clipper_conn, "simple-example", "doubles",
                                    feature_sum)
    time.sleep(2)

    # For batch inputs set this number > 1
    batch_size = 1

    try:
        while True:
            if batch_size > 1:
                predict(
                    clipper_conn.get_query_addr(),
                    [list(np.random.random(200)) for i in range(batch_size)],
                    batch=True)
def signal_handler(signal, frame):
    print("Stopping Clipper...")
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.stop_all()
    sys.exit(0)
Пример #4
0
import os
import urllib
import sys
from StringIO import StringIO

from pyspark.sql.functions import *
from pyspark.ml.classification import *
from pyspark.ml.evaluation import *
from pyspark.ml.feature import *
from pyspark.sql import SparkSession


spark = SparkSession                .builder                .appName("clipper-pyspark")                .getOrCreate()
sc = spark.sparkContext

clipper_conn = ClipperConnection(DockerContainerManager())
clipper_conn.start_clipper()

data = spark.createDataFrame(pd.read_csv('iris.csv', header=None, names=['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']))

# vectorize all numerical columns into a single feature column
feature_cols = data.columns[:-1]
assembler = pyspark.ml.feature.VectorAssembler(inputCols=feature_cols, outputCol='features')
data = assembler.transform(data)

# convert text labels into indices
data = data.select(['features', 'class'])
label_indexer = pyspark.ml.feature.StringIndexer(inputCol='class', outputCol='label').fit(data)
data = label_indexer.transform(data)

# only select the features and label column
# digit_nn_model_concat.py
from clipper_admin import ClipperConnection, DockerContainerManager
clipper_conn = ClipperConnection(DockerContainerManager())

# clipper_conn.start_clipper()
clipper_conn.connect()

clipper_conn.register_application(
    name="query_agent",
    input_type="doubles",
    default_output="-1.0",
    slo_micros=10000000)  # 10,000,000 micros == 10 sec

clipper_conn.get_all_apps()

#################################################
############## Define Own Function ##############
#################################################

# def feature_sum(xs):
# return [str(sum(x)) for x in xs]


def query_agent_function(xs):
    # xs is a list of x; type(x): numpy.ndarray
    import requests, json, time, sys, numpy as np
    headers = {"Content-type": "application/json"}
    clipper_url = "192.168.56.101"  # default: "localhost"
    app_name = "digit"
    results = []
    for x in xs:
Пример #6
0
def create_docker_connection(cleanup=True, start_clipper=True):
    logger.info("Creating DockerContainerManager")
    cm = DockerContainerManager(
        clipper_query_port=find_unbound_port(),
        clipper_management_port=find_unbound_port(),
        clipper_rpc_port=find_unbound_port(),
        redis_port=find_unbound_port())
    cl = ClipperConnection(cm)
    if cleanup:
        cl.stop_all()
        docker_client = get_docker_client()
        docker_client.containers.prune(filters={"label": CLIPPER_DOCKER_LABEL})
    if start_clipper:
        # Try to start Clipper in a retry loop here to address flaky tests
        # as described in https://github.com/ucbrise/clipper/issues/352
        while True:
            try:
                logger.info("Starting Clipper")
                cl.start_clipper()
                time.sleep(1)
                break
            except docker.errors.APIError as e:
                logger.info(
                    "Problem starting Clipper: {}\nTrying again.".format(e))
                cl.stop_all()
                cm = DockerContainerManager(
                    clipper_query_port=find_unbound_port(),
                    clipper_management_port=find_unbound_port(),
                    clipper_rpc_port=find_unbound_port(),
                    redis_port=find_unbound_port())
                cl = ClipperConnection(cm)
    else:
        cl.connect()
    return cl
Пример #7
0
    parser = argparse.ArgumentParser()
    # parser.add_argument('-n', '--num_nodes', type=int, default=3)
    parser.add_argument('node_id', type=int)
    args = parser.parse_args()

    # num_nodes = args.num_nodes
    node_id = args.node_id

    clipper_conn = ClipperConnection(
        DockerContainerManager(
            cluster_name='clipper_cluster_{}'.format(node_id),
            docker_ip_address='localhost',
            clipper_query_port=1337 + node_id,
            clipper_management_port=2337 + node_id,
            clipper_rpc_port=7000 + node_id,
            redis_ip=None,
            redis_port=6379 + node_id,
            prometheus_port=9090 + node_id,
            # WARING: DO NOT CHANGE THE RULE OF NETWORK NAMES
            docker_network='clipper_network_{}'.format(node_id),
            # SINCE THIS IS USED BY reset.sh TO IDENTIFY CLIPPER CONTAINERS
            extra_container_kwargs={})
    )  # for node_id in range(args.num_nodes)]

    try:
        clipper_conn.start_clipper()
        clipper_conn.register_application(name="default",
                                          input_type="string",
                                          default_output="",
                                          slo_micros=100000)
Пример #8
0

def pipeline_merge(input_lin, clipper_conn):
    out_lin_1 = predict(clipper_conn.get_query_addr(), "linear1", [input_lin])
    out_lin_2 = predict(clipper_conn.get_query_addr(), "linear2", [out_lin_1])

    out_lin_3 = predict(clipper_conn.get_query_addr(), "linear3", [input_lin])
    out_lin_4 = predict(clipper_conn.get_query_addr(), "linear4", [out_lin_3])

    out_lin_5 = predict(clipper_conn.get_query_addr(), "linear5",
                        [out_lin_2, out_lin_4])
    return out_lin_5


#setup Clipper connection
clipper_conn = ClipperConnection(
    KubernetesContainerManager(useInternalIP=True))
clipper_conn.connect()

# rand_input = Lineage(np.random.random_sample())
# output = pipeline_merge(rand_input, clipper_conn)
# print(type(output))
# print(output.val)
# print(output.graph)
# print(output.input_node)
# print(output.used)

# dot = Digraph(comment='Merge Example')
# for n in output.graph.nodes:
# 	dot.node(n)
# for e in output.graph.edges:
# 	dot.edge(e[0], e[1])
Пример #9
0
from clipper_admin import ClipperConnection, ParmDockerContainerManager
clipper_conn = ClipperConnection(ParmDockerContainerManager(), False)
clipper_conn.stop_all()
#!/usr/bin/env python
# Start long-lived Clipper cluster
import os, sys
from clipper_admin import ClipperConnection, KubernetesContainerManager
from clipper_admin.deployers import python as python_deployer

if len(sys.argv) != 3:
    print('Usage: python start-clipper.py clipper-cluster-IP redis-service-IP')
    print("For example, python start-clipper.py 35.197.66.133 10.59.247.82")
    sys.exit(1)

clipper_ip = 'https://' + sys.argv[1]
redis_ip = 'https://' + sys.argv[2]
clipper_conn = ClipperConnection(
    KubernetesContainerManager(clipper_ip, useInternalIP=True))
try:
    clipper_conn.stop_all()
    clipper_conn.stop_all_model_containers()
    clipper_conn.start_clipper()
    clipper_conn.register_application(name="testbed",
                                      input_type="strings",
                                      default_output="-1.0",
                                      slo_micros=100000000)
    clipper_conn.get_all_apps()
except Exception as e:
    print(e)
Пример #11
0
def create_clipper_conn():
    conn = ClipperConnection(DockerContainerManager())
    conn.start_clipper()
    return conn
Пример #12
0
    "/home/sdn-nfv/Desktop/clipper/clipper_admin"
]

os.environ["PATH"] = os.pathsep.join(pathlist) + os.environ["PATH"]

import logging
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.exceptions import ClipperException
import pickle
from numpy import array
import numpy as np
import requests, json

from sklearn.ensemble import RandomForestClassifier

clipper_conn = ClipperConnection(DockerContainerManager())

# In[25]:

FEATURE_SET = "uid,history,connection_state_description,src_packets,src_bytes,dst_bytes,dIp,sIp,duration,dst_packets,sP,dP"
FEATURE_SET = FEATURE_SET.split(",")

# In[26]:

try:
    clipper_conn.start_clipper()
except:
    print("Clipper already running")
    clipper_conn.connect()

# In[28]:
Пример #13
0
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers.pytorch import deploy_pytorch_model


clipper_conn = ClipperConnection(DockerContainerManager())

try:
    clipper_conn.connect()
except:
    clipper_conn.start_clipper()

clipper_conn.register_application(name="logo-detector", input_type="strings", default_output="no logo", slo_micros=10000000)
clipper_conn.get_all_apps()

from pathlib import Path
from io import BytesIO

import numpy as np
from PIL import Image
import requests
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torchvision
from torchvision.transforms import transforms

DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # pylint: disable=maybe-no-member

CLASSES = [
        'ADAC', 'FCB', 'HP', 'adidas', 'aldi', 'apple', 'becks', 'bmw',
        'carlsberg', 'chimay', 'cocacola', 'corona', 'dhl', 'erdinger', 'esso',
Пример #14
0
import os
import sys
sys.path.insert(0,
                '/Users/xunzhang/Desktop/2018/github/clipper/clipper_admin/')
import tensorflow as tf
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers.tensorflow import deploy_tensorflow_model

cur_dir = os.path.dirname(os.path.abspath(__file__))

app_name = "tf-lr-app"
model_name = "tf-lr-model"

if __name__ == "__main__":
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.stop_all()
    clipper_conn.start_clipper()
    clipper_conn.register_application(name=app_name,
                                      input_type="integers",
                                      default_output="rabbit",
                                      slo_micros=100000)
    print(os.path.abspath("data"))
    clipper_conn.build_and_deploy_model(
        name=model_name,
        version=1,
        input_type="integers",
        model_data_path=os.path.abspath("data"),
        base_image="xunzhang/tf_lr_container:latest",
        num_replicas=1)
    clipper_conn.link_model_to_app(app_name, model_name)
    print(clipper_conn.get_clipper_logs())
# clipper_start
from clipper_admin import ClipperConnection, DockerContainerManager
clipper_conn = ClipperConnection(DockerContainerManager())

clipper_conn.start_clipper()
clipper_conn.connect()

clipper_conn.register_application(
    name="digit",
    input_type="doubles",
    default_output="-1.0",
    slo_micros=10000000)  # 10,000,000 micros == 10 sec

clipper_conn.get_all_apps()

#################################################
######### Define Own Prediction Function ########
#################################################

import sklearn
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.externals import joblib

model_path = "../../models/sklearn/"
model_name = "dig_nn_model.sav"
clf = joblib.load(model_path + model_name)


def clf_predict(xs):
    return clf.predict(xs)
Пример #16
0
def signal_handler(signal, frame):
    print("Stopping Clipper...")
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.stop_all()
    sys.exit(0)


if __name__ == '__main__':
    signal.signal(signal.SIGINT, signal_handler)

    parser = argparse.ArgumentParser(
        description='Use Clipper to Query Images.')
    parser.add_argument('image', nargs='+', help='Path to an image')
    imgs = parser.parse_args().image

    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.start_clipper()
    python_deployer.create_endpoint(clipper_conn=clipper_conn,
                                    name="image-example",
                                    input_type="bytes",
                                    func=image_size,
                                    pkgs_to_install=['pillow'])
    time.sleep(2)
    try:
        for f in imgs:
            if f.endswith('.jpg') or f.endswith('.png'):
                query(clipper_conn.get_query_addr(), f)
    except Exception as e:
        print("exception")
    clipper_conn.get_clipper_logs()
    clipper_conn.stop_all()