Пример #1
0
def main():
    # Setup container manager.
    # k8 = KubernetesContainerManager(kubernetes_proxy_addr="127.0.0.1:8080",
    #                                 useInternalIP=True)
    # clipper_conn = ClipperConnection(k8)
    swarm = DockerContainerManager()
    clipper_conn = ClipperConnection(swarm)
    clipper_conn.stop_all()
    clipper_conn.start_clipper()

    # Register application.
    clipper_conn.register_application(name="sum-app", 
                                      input_type="doubles", 
                                      default_output="-1.0", 
                                      slo_micros=10000000)

    # Model deployement.
    python_deployer.deploy_python_closure(clipper_conn, 
                                          name="sum-model", 
                                          version=1, 
                                          input_type="doubles", 
                                          func=sum)

    # Link application to model.
    clipper_conn.link_model_to_app(app_name="sum-app", 
                                   model_name="sum-model")

    # Test
    headers = {"Content-type": "application/json"}
    response = requests.post("http://localhost:1337/sum-app/predict", 
                             headers=headers, 
                             data=json.dumps({"input": list(np.random.random(10))})).json()
    print(response)
Пример #2
0
def setup_clipper():
    app_name = 'inceptionv3-app'
    model_name = 'inceptionv3-model'
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.connect()

    pytorch_deployer.deploy_pytorch_model(
        clipper_conn=clipper_conn,
        name=model_name,
        version='1',
        input_type='bytes',
        func=incept_predict,
        pytorch_model=incept,
        num_replicas=10,
        batch_size=1,
        pkgs_to_install=['pillow', 'pyarrow', 'torch', 'torchvision'])

    clipper_conn.register_application(name=app_name,
                                      input_type="bytes",
                                      default_output="-1.0",
                                      slo_micros=10000000)  # 10s

    clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)

    print(
        "url: ", "http://{addr}/{app_name}/predict".format(addr="",
                                                           app_name=app_name))
Пример #3
0
class Clip(object):
    def __init__(self, sleep_time):
        from clipper_admin import ClipperConnection, DockerContainerManager
        #from clipper_admin.deployers import python as python_deployer
        from clipper_admin.deployers import pytorch as pt_deployer
        self.clipper_conn = ClipperConnection(DockerContainerManager())
        try:
            self.clipper_conn.connect()
            self.clipper_conn.stop_all()
        except Exception:
            pass
        self.clipper_conn.start_clipper()
        self.clipper_conn.register_application(name="hello-world",
                                               input_type="strings",
                                               default_output="-1.0",
                                               slo_micros=10**8)
        model = Model()

        def policy(ptmodel, x):
            batch = (len(x))
            arr = []
            for j in x:
                xs, masks = j.split("###")
                res = np.frombuffer(base64.decodestring(xs), dtype=np.float32)
                res = res.reshape((64, 19, 19, 3))
                res = np.frombuffer(base64.decodestring(masks),
                                    dtype=np.float32)
                res = res.reshape((64, 362))
            for i in x:
                time.sleep(sleep_time)
            return [
                np.random.rand(64).astype(np.float32) for i in range(batch)
            ]

        pt_deployer.deploy_pytorch_model(self.clipper_conn,
                                         name="policy",
                                         version=1,
                                         input_type="strings",
                                         func=policy,
                                         pytorch_model=model)

        self.clipper_conn.link_model_to_app(app_name="hello-world",
                                            model_name="policy")
Пример #4
0
class Clip(object):
    def __init__(self, shape, model_name):
        from clipper_admin import ClipperConnection, DockerContainerManager
        from clipper_admin.deployers import python as python_deployer
        from clipper_admin.deployers import pytorch as pytorch_deployer
        self.clipper_conn = ClipperConnection(DockerContainerManager())
        try:
            self.clipper_conn.connect()
            self.clipper_conn.stop_all()
        except Exception:
            pass
        self.clipper_conn.start_clipper()
        self.clipper_conn.register_application(name="hello-world",
                                               input_type="strings",
                                               default_output="-1.0",
                                               slo_micros=10**8)
        ptmodel = get_model(model_name)

        def policy(model, x):
            print(len(x))
            batch = (len(x))
            arr = []
            for j in x:
                print(type(j), len(j))
                res = np.frombuffer(base64.decodestring(j), dtype=np.float32)
                print(res.shape)
                arr += [res]
            x = np.array(arr)
            x = x.reshape((-1, ) + shape[1:])
            print("new shape", x.shape)
            return evaluate_model(model, x).reshape((batch, shape[0]))

        pytorch_deployer.deploy_pytorch_model(self.clipper_conn,
                                              name="policy",
                                              version=1,
                                              input_type="strings",
                                              func=policy,
                                              pytorch_model=ptmodel)

        self.clipper_conn.link_model_to_app(app_name="hello-world",
                                            model_name="policy")
Пример #5
0
def deployModelToClipper():
    """Deploy model to clipper and replace its entry."""
    global app_name, model_name, model_version

    print('Deploying model to clipper, model_name={}, model_version={}'.format(
        model_name, model_version))

    # Setup clipper and deploy model
    clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380))
    try:
        clipper_conn.start_clipper()
    except:
        clipper_conn.connect()
    try:
        # input_type must be bytes as inputs will be serialized into bytes with pickle
        clipper_conn.register_application(name=app_name,
                                          input_type="bytes",
                                          default_output="-1.0",
                                          slo_micros=1000000)
    except Exception as e:
        print(e)
    try:
        deploy_python_closure(clipper_conn,
                              name=model_name,
                              version=model_version,
                              input_type="bytes",
                              batch_size=1,
                              func=predict,
                              base_image='hysia-clipper-base-container-gpu')
    except Exception as e:
        print(e)
    try:
        clipper_conn.link_model_to_app(app_name=app_name,
                                       model_name=model_name)
    except Exception as e:
        print(e)

    replaceDefaultEntry()
    print('{} deployed to clipper!'.format(model_name))
Пример #6
0
def setup_clipper():

    app_name = 'predict-app'
    model_name = "predict-model"
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.connect()

    deploy_python_closure(clipper_conn,
                          name="predict-model",
                          version='1',
                          input_type="bytes",
                          func=join_predict)

    clipper_conn.register_application(name=app_name,
                                      input_type="bytes",
                                      default_output="-1.0",
                                      slo_micros=10000000)  # 10s

    clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)

    print(
        "url: ", "http://{addr}/{app_name}/predict".format(addr="",
                                                           app_name=app_name))
Пример #7
0
def setup_clipper():
  app_name = 'resnet101-app'
  model_name = 'resnet101-model'
  clipper_conn = ClipperConnection(DockerContainerManager())
  clipper_conn.connect()
  
  pytorch_deployer.deploy_pytorch_model(clipper_conn=clipper_conn,
          name=model_name,
          version='1',
          input_type='bytes',
          func=resnet_predict,
          pytorch_model=resnet101,
          pkgs_to_install=['pillow', 'torch', 'torchvision'])

  clipper_conn.register_application(name=app_name,
          input_type="bytes",
          default_output="-1.0",
          slo_micros=10000000)  # 10s

  clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)
  print("query_adress: ", clipper_conn.get_query_addr())
  print("app_name: ", )
  print("model_name: ", )
  print("url: ", "http://{addr}/{app_name}/predict".format(addr=clipper_conn.get_query_addr(),app_name=app_name))
Пример #8
0
    elif sys.argv[2] == "sess-frozen":
        sess = load_from_frozen()
        deploy_tensorflow_model(clipper_conn,
                                model_name,
                                version=1,
                                input_type="integers",
                                func=predict,
                                tf_sess_or_saved_model_path=sess)
    elif sys.argv[2] == "checkpoint":
        deploy_tensorflow_model(
            clipper_conn,
            model_name,
            version=1,
            input_type="integers",
            func=predict,
            tf_sess_or_saved_model_path=CHECKPOINT_PATH.split('/')[0])
    elif sys.argv[2] == "frozen":
        deploy_tensorflow_model(clipper_conn,
                                model_name,
                                version=1,
                                input_type="integers",
                                func=predict,
                                tf_sess_or_saved_model_path=FROZEN_GRAPH_PATH)
    else:
        print("invalid usage")
        print(
            "usage: python deploy.py --mode checkpoint|frozen|checkpoint|sess-frozen"
        )
        exit(1)
    clipper_conn.link_model_to_app(app_name, model_name)
    global model
    pred = model.predict(inputs)
    return [str(p) for p in pred]


# Point to the gradient boosting model
model = model

# Deploy the 'predict' function as a model
deploy_python_closure(
    clipper_conn,
    name="gb-model",
    version=1,
    input_type="doubles",
    func=predict,
    pkgs_to_install=['scikit-learn', 'pandas', 'numpy', 'scipy'],
    registry="gkip")

# Routes requests for the application 'kddtutorial' to the model 'gb-model'
clipper_conn.link_model_to_app(app_name="kddtutorial", model_name="gb-model")

inputs = X.loc[200, X.columns != 'classification']  # use random data point
headers = {"Content-type": "application/json"}
addr = clipper_conn.get_query_addr()
response = requests.post("http://%s/%s/predict" % (addr, 'kddtutorial'),
                         headers=headers,
                         data=json.dumps({"input": list(inputs)})).json()
print(response)

clipper_conn.stop_all()
Пример #10
0
# In[ ]:


from clipper_admin.deployers import python as python_deployer
# We specify which packages to install in the pkgs_to_install arg.
# For example, if we wanted to install xgboost and psycopg2, we would use
# pkgs_to_install = ['xgboost', 'psycopg2']
python_deployer.deploy_python_closure(clipper_conn, name='xgboost-model', version=1,
     input_type="integers", func=predict, pkgs_to_install=['xgboost'])


# In[ ]:


clipper_conn.link_model_to_app('xgboost-test', 'xgboost-model')


# In[ ]:


import requests, json
# Get Address
addr = clipper_conn.get_query_addr()
# Post Query
response = requests.post(
     "http://%s/%s/predict" % (addr, 'xgboost-test'),
     headers={"Content-type": "application/json"},
     data=json.dumps({
         'input': get_test_point()
     }))
Пример #11
0
    KubernetesContainerManager(useInternalIP=True))
# clipper_conn = ClipperConnection(DockerContainerManager())
clipper_conn.start_clipper()

#Deploy lin_model_1
clipper_conn.register_application(name="linear1",
                                  input_type="doubles",
                                  default_output="-1.0",
                                  slo_micros=100000)
deploy_python_closure(clipper_conn,
                      name="lin-model-1",
                      version=1,
                      input_type="doubles",
                      func=lin_model_1,
                      registry="hsubbaraj")
clipper_conn.link_model_to_app(app_name="linear1", model_name="lin-model-1")
print("deployed model 1")

#Deploy lin_model_2
clipper_conn.register_application(name="linear2",
                                  input_type="doubles",
                                  default_output="-1.0",
                                  slo_micros=100000)
deploy_python_closure(clipper_conn,
                      name="lin-model-2",
                      version=1,
                      input_type="doubles",
                      func=lin_model_2,
                      registry="hsubbaraj")
clipper_conn.link_model_to_app(app_name="linear2", model_name="lin-model-2")
print("deployed model 2")
Пример #12
0
def predict(delay):
    #delay = eval(delay)
    future_r = model_r.make_future_dataframe(periods=30, freq='D')
    forecast_r = model_r.predict(future_r)
    #forecast_r.index = forecast_r['ds']
    #forecast
    #pred_r = pd.DataFrame(forecast_r['yhat'][len(forecast_r)-delay:len(forecast_r)])
    #pred_r=pred_r.reset_index()
    #pred_r = pred_r.to_json()
    return forecast_r.to_json()


from clipper_admin.deployers import python as python_deployer

python_deployer.deploy_python_closure(
    clipper_conn,
    name="p1model",
    version=1,
    input_type="strings",
    func=predict,
    pkgs_to_install=['pandas', 'fbprophet==0.4'])

clipper_conn.link_model_to_app(app_name="p1app", model_name="p1model")

#import requests, json, numpy as np
#headers = {"Content-type": "application/json"}
#datas = json.dumps({"input": list(np.random.random(10))})

#requests.post("http://10.65.47.80:1337/p1app/predict", headers=headers, data=datas).json()
# Setup clipper and deploy pytorch model
clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380))
try:
    clipper_conn.start_clipper()
    clipper_conn.register_application(name=APP_NAME,
                                      input_type="doubles",
                                      default_output="-1.0",
                                      slo_micros=1000000)
    deploy_pytorch_model(clipper_conn,
                         name=MODEL_NAME,
                         version="1",
                         input_type="doubles",
                         func=predict,
                         pytorch_model=model,
                         pkgs_to_install=pip_deps)
    clipper_conn.link_model_to_app(app_name=APP_NAME, model_name=MODEL_NAME)
except:
    clipper_conn.connect()

# Check all apps
print(clipper_conn.get_all_apps())

# Test inference
# inputs = np.array([[1., 2., 3.], [2., 3., 4.], [3., 4., 5.]])
# print(predict(model, inputs))
inputs = np.array([1., 2., 3.]).tolist(
)  # Inputs can only be one-dimensional or there will be json serialization error
headers = {"Content-type": "aplication/json"}
result = requests.post("http://localhost:1337/" + APP_NAME + "/predict",
                       headers=headers,
                       data=json.dumps({"input": inputs})).json()
    return results


#################################################
#################################################
#################################################

from clipper_admin.deployers import python as python_deployer

python_deployer.deploy_python_closure(clipper_conn,
                                      name="query-agent-model",
                                      input_type="doubles",
                                      func=query_agent_function,
                                      version=9)

clipper_conn.link_model_to_app(app_name="query_agent",
                               model_name="query-agent-model")

# Debugging
clipper_conn.set_model_version(name="query-agent-model", version="3")

import requests, json, time, sys, numpy as np
headers = {"Content-type": "application/json"}
clipper_url = "192.168.56.101"  # default: "localhost"
app_name = "query_agent"
# data_input = json.dumps({"input": list(np.random.random(input_size))})
data_input = json.dumps(
    {"input": list(np.random.randint(0, 16, 64).astype(float))})
print requests.post("http://" + clipper_url + ":1337/" + app_name + "/predict",
                    headers=headers,
                    data=data_input).json()
Пример #15
0
	TESTDATA = StringIO(inputs[0])
	data = spark.createDataFrame(read_csv(TESTDATA, header=None, names=['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']))
	feature_cols = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width']
	assembler = pyspark.ml.feature.VectorAssembler(inputCols=feature_cols, outputCol='features')
	data = assembler.transform(data)
	data = data.select(['features', 'class'])
	label_indexer = pyspark.ml.feature.StringIndexer(inputCol='class', outputCol='label').fit(data)
	data = label_indexer.transform(data)
	data = data.select(['features', 'label'])
	output = model.transform(data).select("prediction").rdd.flatMap(lambda x: x).collect()
	return output

deploy_pyspark_model(
    clipper_conn,
    name="iris-output",
    version=1,
    input_type="string",
    func=predict,
    pyspark_model=model,
    sc=sc,
    pkgs_to_install=["pandas"])

clipper_conn.register_application(
	name="iris-app",
	input_type="strings",
	default_output="-1",
    slo_micros=9000000) #will return default value in 9 seconds

clipper_conn.link_model_to_app(app_name="iris-app", model_name="iris-output")

Пример #16
0
#     )

deploy_pytorch_model(
    clipper_conn,
    name="superresolution-model",
    version=1,
    input_type="bytes",
    func=image_enhance,
    pytorch_model=model,
    base_image='custom-model-image',
    pkgs_to_install=['opencv-python','numpy','six', 'Pillow','wheel',]
    )

print("linking model to app...")

clipper_conn.link_model_to_app(app_name="superresolution", model_name="superresolution-model")

def query(addr, filename):
    url = "http://%s/superresolution/predict" % addr
    req_json = json.dumps({
        "input":
        base64.b64encode(open(filename, "rb").read()).decode() # bytes to unicode
    })
    headers = {'Content-type': 'application/json'}
    r = requests.post(url, headers=headers, data=req_json)
    print(r.json())


print("deployed... do a query")

query(clipper_conn.get_query_addr(),'LR/baboon.png')
Пример #17
0
######### Define Own Prediction Function ########
#################################################

import sklearn
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.externals import joblib

model_path = "../../models/sklearn/" 
model_name = "nn_model.sav"
clf = joblib.load(model_path + model_name)

def clf_predict(xs):
	return clf.predict(xs)

#################################################
#################################################
#################################################

from clipper_admin.deployers import python as python_deployer

python_deployer.deploy_python_closure(
	clipper_conn, 
	name="nn-model", 
	version=1, 
	input_type="doubles", 
	func=clf_predict)

clipper_conn.link_model_to_app(app_name="breast-cancer", model_name="nn-model")

# clipper_conn.stop_all()
Пример #18
0
	cols = ['age','workclass','fnlwgt','education','education-num','marital-status', \
		'occupation','relationship','race','sex','capital-gain', \
		'capital-loss','hours-per-week','native-country','label']
	TESTDATA = StringIO(inputs[0])
	data = spark.createDataFrame(read_csv(TESTDATA, header=None, names=cols))
	feature_cols = cols[:-1]
	assembler = pyspark.ml.feature.VectorAssembler(inputCols=feature_cols, outputCol='features')
	data = assembler.transform(data)
	data = data.select(['features', 'label'])
	output = model.transform(data).select("prediction").rdd.flatMap(lambda x: x).collect()
	return output

deploy_pyspark_model(
    clipper_conn,
    name="pyspark-test",
    version=1,
    input_type="string",
    func=predict,
    pyspark_model=model,
    sc=sc,
    pkgs_to_install=["pandas"])

clipper_conn.register_application(
	name="pyspark-app",
	input_type="strings",
	default_output="-1",
    slo_micros=9000000) #will return default value in 9 seconds

clipper_conn.link_model_to_app(app_name="pyspark-app", model_name="pyspark-test")

# For example, if we wanted to install xgboost and psycopg2, we would use
# pkgs_to_install = ['xgboost', 'psycopg2']
print("Deploy predict function closure using Clipper...")
python_deployer.deploy_python_closure(clipper_conn,
                                      name='xgboost-model',
                                      version=1,
                                      input_type="doubles",
                                      func=predict,
                                      pkgs_to_install=['xgboost'])

time.sleep(5)

# In[8]:

print("Link Clipper connection to model application...")
clipper_conn.link_model_to_app('xgboost-airlines', 'xgboost-model')

# In[22]:

import requests, json
# Get Address
addr = clipper_conn.get_query_addr()
print(
    "Model predict for a single instance via Python requests POST request & parse response..."
)

# Post Query
response = requests.post("http://%s/%s/predict" % (addr, 'xgboost-airlines'),
                         headers={"Content-type": "application/json"},
                         data=json.dumps({'input': get_test_point(0)}))
result = response.json()
Пример #20
0
clipper_conn = ClipperConnection(DockerContainerManager())
clipper_conn.start_clipper()
clipper_addr = clipper_conn.get_query_addr()

def preprocess(inputs):
	inputArr = (inputs[0]).split(",")
	floats = inputArr[:-1]
	rounded = [round(float(i),1) for i in floats]
	rounded.append(inputArr[-1])
	output = [(str(rounded))[1:-1]]
	return output

python_deployer.deploy_python_closure(
    clipper_conn,
    name="process-iris",  # The name of the model in Clipper
    version=1,  # A unique identifier to assign to this model.
    input_type="string",  # The type of data the model function expects as input
    func=preprocess # The model function to deploy
)

clipper_conn.register_application(
    name="process-app",
    input_type="strings",
    default_output="-1",
    slo_micros=9000000) #will return default value in 9 seconds

clipper_conn.link_model_to_app(app_name="process-app", model_name="process-iris")


Пример #21
0
        response = apply_forecast_impl(parameters[0].decode("utf-8").split(','))
        code = '202' 
    except Exception as e:
        response = str(e)
        code = '500'
    return [str(code+', '+response) for _ in parameters]

if __name__ == '__main__':
    # setup logging format
    format = "%(asctime)-15s %(message)s"
    logging.basicConfig(
        filename='./timeseries/log.log', level=logging.DEBUG, format=format)
    # set up logging to console
    console = logging.StreamHandler(sys.stdout)
    console.setLevel(logging.ERROR)
    logging.getLogger().addHandler(console)

    signal.signal(signal.SIGINT, signal_handler)
    conn = ClipperConnection(DockerContainerManager())
    conn.start_clipper()
    try:
        conn.register_application(name="forecast",input_type="strings",default_output="500, Error executing call.",slo_micros=100000000)
        python_deployer.deploy_python_closure(conn, name="do-forecast", version=1, input_type="strings", func=do_forecast, base_image='wamsiv/timeseries:latest')
        conn.link_model_to_app(app_name="forecast", model_name='do-forecast')
        print(subprocess.getoutput(["docker update --restart=on-failure $(docker ps -a | grep 'clipper/query_frontend:0.3.0' | awk '{ print $1 }')"]))
        input("Server started. Press ctrl+c to stop server.\n")
    except Exception as e:
        logging.error("Encountered {}. Stopping server...".format(e))
        conn.stop_all()
    conn.stop_all()
Пример #22
0
            clipper_query_port=1337 + node_id,
            clipper_management_port=2337 + node_id,
            clipper_rpc_port=7000 + node_id,
            redis_ip=None,
            redis_port=6379 + node_id,
            prometheus_port=9090 + node_id,
            # WARING: DO NOT CHANGE THE RULE OF NETWORK NAMES
            docker_network='clipper_network_{}'.format(node_id),
            # SINCE THIS IS USED BY reset.sh TO IDENTIFY CLIPPER CONTAINERS
            extra_container_kwargs={})
    )  # for node_id in range(args.num_nodes)]

    try:
        clipper_conn.start_clipper()
        clipper_conn.register_application(name="default",
                                          input_type="string",
                                          default_output="",
                                          slo_micros=100000)

        python_deployer.deploy_python_closure(clipper_conn,
                                              name="echo-model",
                                              version=1,
                                              input_type="string",
                                              func=echo_model)
        clipper_conn.link_model_to_app(app_name="default",
                                       model_name="echo-model")
    except:
        exit(1)

    exit(0)
Пример #23
0
oY = graph.get_tensor_by_name('Placeholder_1:0')


def predict(X):
    print("inputs {}".format(X))
    result = sess.run(load_infer_op, feed_dict={oX: X})
    ret = [str(i) for i in result]
    print("return is {}".format(ret))
    return ret


manager = KubernetesContainerManager(kubernetes_proxy_addr=K8S_ADDR,
                                     namespace=K8S_NS)
clipper_conn = ClipperConnection(manager)
clipper_conn.connect()

# clipper_conn.delete_application(APP_NAME)
# clipper_conn.register_application(
#   name = APP_NAME, input_type = 'doubles', default_output = '0', slo_micros = 100000000)

deploy_tensorflow_model(clipper_conn,
                        name=PREDICT_NAME,
                        version=VERSION,
                        input_type="doubles",
                        func=predict,
                        tf_sess_or_saved_model_path=sess,
                        registry=REGISTRY,
                        pkgs_to_install=['tensorflow'])

clipper_conn.link_model_to_app(app_name=APP_NAME, model_name=PREDICT_NAME)
Пример #24
0
    res = []
    for x in xs:
        x = x.reshape(1, -1)
        res.append(str(model.predict(x)))
    print("res: ", res)
    return res


# In[43]:

from clipper_admin.deployers import python as python_deployer
# We specify which packages to install in the pkgs_to_install arg.
# For example, if we wanted to install xgboost and psycopg2, we would use
# pkgs_to_install = ['xgboost', 'psycopg2']
python_deployer.deploy_python_closure(clipper_conn,
                                      name='simple-model',
                                      version=1,
                                      input_type="floats",
                                      pkgs_to_install=['sklearn'],
                                      func=predict)

# In[44]:

try:
    clipper_conn.link_model_to_app('simple_model', 'simple-model')
except ClipperException as e:
    print(e)

addr = clipper_conn.get_query_addr()
print(addr)
Пример #25
0
            args = json.loads(param)
            zone = args['zone']
            date = str(args['date'])
            schedule = args['schedule']
            temps, actions = execute_schedule(date, schedule, models[zone], 65)
            ret.append(temps)
        return ret

    from clipper_admin.deployers import python as python_deployer
    python_deployer.deploy_python_closure(clipper_conn,
                                          name='thermal-model-ciee',
                                          version=1,
                                          input_type='strings',
                                          func=execute_thermal_model,
                                          base_image="xbospy")
    clipper_conn.link_model_to_app(app_name="ciee_thermal",
                                   model_name="thermal-model-ciee")
except:
    clipper_conn.connect()

import time
import requests
time.sleep(10)
inp = json.dumps({
    'zone': 'http://buildsys.org/ontologies/ciee#CentralZone',
    'date': '2018-02-06 00:00:00 UTC',
    'schedule': normal_schedule
})
print inp

resp = requests.post('http://localhost:1337/ciee_thermal/predict',
                     data=json.dumps({'input': inp}))
# clipper_start
from clipper_admin import ClipperConnection, DockerContainerManager
clipper_conn = ClipperConnection(DockerContainerManager())

clipper_conn.start_clipper()
clipper_conn.connect()

clipper_conn.register_application(
	name="hello-world", 
	input_type="doubles", 
	default_output="-1.0", 
	slo_micros=100000)

clipper_conn.get_all_apps()

def feature_sum(xs):
	return [str(sum(x)) for x in xs]

from clipper_admin.deployers import python as python_deployer

python_deployer.deploy_python_closure(
	clipper_conn, 
	name="sum-model", 
	version=1, 
	input_type="doubles", 
	func=feature_sum)

clipper_conn.link_model_to_app(app_name="hello-world", model_name="sum-model")

# clipper_conn.stop_all()
Пример #27
0
	clipper_conn.start_clipper()
	clipper_conn.register_application(name="pytorch-example-2", input_type="doubles", default_output="-1.0", slo_micros=100000)
	# model = nn.Linear(1, 1)

# Define a shift function to normalize prediction inputs
	def pred(model, inputs):
		preds = []
		for i in inputs:
			preds.append(np.random.rand(1,10))
		return preds

		# return [model(torch.FloatTensor(np.reshape(i, (-1, 3, 32, 32))).data.numpy()) for i in inputs]
		# return [str(model(torch.FloatTensor(np.reshape(i, (1, 3, 32, 32)))).data.numpy()) for i in inputs]

	deploy_pytorch_model(
		clipper_conn,
	    name="pytorch-nn",
	    version=1,
	    input_type="doubles",
	    func=pred,
	    pytorch_model=net,
	    registry="hsubbaraj"
	    )

	clipper_conn.link_model_to_app(app_name="pytorch-example-2", model_name="pytorch-nn")
	time.sleep(2)
	print("deployed model")



Пример #28
0
def predict(model, inputs):
    trans = shift(inputs)
    pred = model(trans)
    # print(pred)
    pred = pred.data.numpy()
    # print(pred)
    # print(pred.shape)
    # pred = pred.flatten()
    # print(pred)
    # print(pred.shape)
    #Fix bug, append str(pred.shape) tuple as string, and append dtype
    return pred.astype(np.str).tolist()
    # return [np.array2string(x, precision=8,separator=",") for x in pred]

    # res = pred.tostring().encode('base64')
    # print(res)

    # return [res]
    # return [np.array_str(pred)]


deploy_pytorch_model(clipper_conn,
                     name="example",
                     version=1,
                     input_type="doubles",
                     func=predict,
                     pytorch_model=model)

clipper_conn.link_model_to_app(app_name="Shoe", model_name="example")
Пример #29
0
    slo_micros=10000000)  # 10,000,000 micros == 10 sec

clipper_conn.get_all_apps()

#################################################
######### Define Own Prediction Function ########
#################################################

import sklearn
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.externals import joblib
from clipper_admin.deployers import python as python_deployer

for version_postfix in ["10x1k", "10x2k", "20x1k", "15x2k"]:

    model_path = "../../models/sklearn/"
    model_name = "dig_nn_model_" + version_postfix + ".sav"
    clf = joblib.load(model_path + model_name)

    def clf_predict(xs):
        return clf.predict(xs)

    python_deployer.deploy_python_closure(clipper_conn,
                                          name="digit-nn-model",
                                          version=version_postfix,
                                          input_type="doubles",
                                          func=clf_predict)

clipper_conn.link_model_to_app(app_name="digit", model_name="digit-nn-model")
# clipper_conn.stop_all()
Пример #30
0
    data_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(train_mean, train_std)])
    image = get_image(data_transforms, image_url)
    pred = np.argmax(model(image).detach().numpy())
    return CLASSES[pred]

model = get_model()

clipper_conn.stop_models(model_names='logo-detector') 
deploy_pytorch_model(
    clipper_conn,
    name="logo-detector",
    version=1,
    input_type="strings",
    func=predict,
    pytorch_model=model,
    pkgs_to_install=['Pillow', 'torchvision', 'torch', 'numpy', 'requests']
    )
clipper_conn.get_clipper_logs()

clipper_conn.link_model_to_app(app_name="logo-detector", model_name="logo-detector")

clipper_conn.get_linked_models(app_name="logo-detector")

clipper_conn.cm.get_num_replicas(name="logo-detector", version="1")

clipper_conn.get_clipper_logs()