Exemplo n.º 1
0
def main():
    # Setup container manager.
    # k8 = KubernetesContainerManager(kubernetes_proxy_addr="127.0.0.1:8080",
    #                                 useInternalIP=True)
    # clipper_conn = ClipperConnection(k8)
    swarm = DockerContainerManager()
    clipper_conn = ClipperConnection(swarm)
    clipper_conn.stop_all()
    clipper_conn.start_clipper()

    # Register application.
    clipper_conn.register_application(name="sum-app", 
                                      input_type="doubles", 
                                      default_output="-1.0", 
                                      slo_micros=10000000)

    # Model deployement.
    python_deployer.deploy_python_closure(clipper_conn, 
                                          name="sum-model", 
                                          version=1, 
                                          input_type="doubles", 
                                          func=sum)

    # Link application to model.
    clipper_conn.link_model_to_app(app_name="sum-app", 
                                   model_name="sum-model")

    # Test
    headers = {"Content-type": "application/json"}
    response = requests.post("http://localhost:1337/sum-app/predict", 
                             headers=headers, 
                             data=json.dumps({"input": list(np.random.random(10))})).json()
    print(response)
Exemplo n.º 2
0
def setup_clipper():
    app_name = 'inceptionv3-app'
    model_name = 'inceptionv3-model'
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.connect()

    pytorch_deployer.deploy_pytorch_model(
        clipper_conn=clipper_conn,
        name=model_name,
        version='1',
        input_type='bytes',
        func=incept_predict,
        pytorch_model=incept,
        num_replicas=10,
        batch_size=1,
        pkgs_to_install=['pillow', 'pyarrow', 'torch', 'torchvision'])

    clipper_conn.register_application(name=app_name,
                                      input_type="bytes",
                                      default_output="-1.0",
                                      slo_micros=10000000)  # 10s

    clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)

    print(
        "url: ", "http://{addr}/{app_name}/predict".format(addr="",
                                                           app_name=app_name))
Exemplo n.º 3
0
class Clip(object):
    def __init__(self, sleep_time):
        from clipper_admin import ClipperConnection, DockerContainerManager
        #from clipper_admin.deployers import python as python_deployer
        from clipper_admin.deployers import pytorch as pt_deployer
        self.clipper_conn = ClipperConnection(DockerContainerManager())
        try:
            self.clipper_conn.connect()
            self.clipper_conn.stop_all()
        except Exception:
            pass
        self.clipper_conn.start_clipper()
        self.clipper_conn.register_application(name="hello-world",
                                               input_type="strings",
                                               default_output="-1.0",
                                               slo_micros=10**8)
        model = Model()

        def policy(ptmodel, x):
            batch = (len(x))
            arr = []
            for j in x:
                xs, masks = j.split("###")
                res = np.frombuffer(base64.decodestring(xs), dtype=np.float32)
                res = res.reshape((64, 19, 19, 3))
                res = np.frombuffer(base64.decodestring(masks),
                                    dtype=np.float32)
                res = res.reshape((64, 362))
            for i in x:
                time.sleep(sleep_time)
            return [
                np.random.rand(64).astype(np.float32) for i in range(batch)
            ]

        pt_deployer.deploy_pytorch_model(self.clipper_conn,
                                         name="policy",
                                         version=1,
                                         input_type="strings",
                                         func=policy,
                                         pytorch_model=model)

        self.clipper_conn.link_model_to_app(app_name="hello-world",
                                            model_name="policy")
Exemplo n.º 4
0
class Clip(object):
    def __init__(self, shape, model_name):
        from clipper_admin import ClipperConnection, DockerContainerManager
        from clipper_admin.deployers import python as python_deployer
        from clipper_admin.deployers import pytorch as pytorch_deployer
        self.clipper_conn = ClipperConnection(DockerContainerManager())
        try:
            self.clipper_conn.connect()
            self.clipper_conn.stop_all()
        except Exception:
            pass
        self.clipper_conn.start_clipper()
        self.clipper_conn.register_application(name="hello-world",
                                               input_type="strings",
                                               default_output="-1.0",
                                               slo_micros=10**8)
        ptmodel = get_model(model_name)

        def policy(model, x):
            print(len(x))
            batch = (len(x))
            arr = []
            for j in x:
                print(type(j), len(j))
                res = np.frombuffer(base64.decodestring(j), dtype=np.float32)
                print(res.shape)
                arr += [res]
            x = np.array(arr)
            x = x.reshape((-1, ) + shape[1:])
            print("new shape", x.shape)
            return evaluate_model(model, x).reshape((batch, shape[0]))

        pytorch_deployer.deploy_pytorch_model(self.clipper_conn,
                                              name="policy",
                                              version=1,
                                              input_type="strings",
                                              func=policy,
                                              pytorch_model=ptmodel)

        self.clipper_conn.link_model_to_app(app_name="hello-world",
                                            model_name="policy")
Exemplo n.º 5
0
def deployModelToClipper():
    """Deploy model to clipper and replace its entry."""
    global app_name, model_name, model_version

    print('Deploying model to clipper, model_name={}, model_version={}'.format(
        model_name, model_version))

    # Setup clipper and deploy model
    clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380))
    try:
        clipper_conn.start_clipper()
    except:
        clipper_conn.connect()
    try:
        # input_type must be bytes as inputs will be serialized into bytes with pickle
        clipper_conn.register_application(name=app_name,
                                          input_type="bytes",
                                          default_output="-1.0",
                                          slo_micros=1000000)
    except Exception as e:
        print(e)
    try:
        deploy_python_closure(clipper_conn,
                              name=model_name,
                              version=model_version,
                              input_type="bytes",
                              batch_size=1,
                              func=predict,
                              base_image='hysia-clipper-base-container-gpu')
    except Exception as e:
        print(e)
    try:
        clipper_conn.link_model_to_app(app_name=app_name,
                                       model_name=model_name)
    except Exception as e:
        print(e)

    replaceDefaultEntry()
    print('{} deployed to clipper!'.format(model_name))
Exemplo n.º 6
0
def setup_clipper():

    app_name = 'predict-app'
    model_name = "predict-model"
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.connect()

    deploy_python_closure(clipper_conn,
                          name="predict-model",
                          version='1',
                          input_type="bytes",
                          func=join_predict)

    clipper_conn.register_application(name=app_name,
                                      input_type="bytes",
                                      default_output="-1.0",
                                      slo_micros=10000000)  # 10s

    clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)

    print(
        "url: ", "http://{addr}/{app_name}/predict".format(addr="",
                                                           app_name=app_name))
Exemplo n.º 7
0
def setup_clipper():
  app_name = 'resnet101-app'
  model_name = 'resnet101-model'
  clipper_conn = ClipperConnection(DockerContainerManager())
  clipper_conn.connect()
  
  pytorch_deployer.deploy_pytorch_model(clipper_conn=clipper_conn,
          name=model_name,
          version='1',
          input_type='bytes',
          func=resnet_predict,
          pytorch_model=resnet101,
          pkgs_to_install=['pillow', 'torch', 'torchvision'])

  clipper_conn.register_application(name=app_name,
          input_type="bytes",
          default_output="-1.0",
          slo_micros=10000000)  # 10s

  clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)
  print("query_adress: ", clipper_conn.get_query_addr())
  print("app_name: ", )
  print("model_name: ", )
  print("url: ", "http://{addr}/{app_name}/predict".format(addr=clipper_conn.get_query_addr(),app_name=app_name))
Exemplo n.º 8
0
clipper_conn = ClipperConnection(DockerContainerManager())
clipper_conn.start_clipper()
clipper_addr = clipper_conn.get_query_addr()

def preprocess(inputs):
	inputArr = (inputs[0]).split(",")
	floats = inputArr[:-1]
	rounded = [round(float(i),1) for i in floats]
	rounded.append(inputArr[-1])
	output = [(str(rounded))[1:-1]]
	return output

python_deployer.deploy_python_closure(
    clipper_conn,
    name="process-iris",  # The name of the model in Clipper
    version=1,  # A unique identifier to assign to this model.
    input_type="string",  # The type of data the model function expects as input
    func=preprocess # The model function to deploy
)

clipper_conn.register_application(
    name="process-app",
    input_type="strings",
    default_output="-1",
    slo_micros=9000000) #will return default value in 9 seconds

clipper_conn.link_model_to_app(app_name="process-app", model_name="process-iris")


Exemplo n.º 9
0
import json
from datetime import datetime
from clipper_admin import ClipperConnection, DockerContainerManager
clipper_conn = ClipperConnection(DockerContainerManager())
from thermal_model import get_model_per_zone, normal_schedule, dr_schedule, execute_schedule

try:
    clipper_conn.start_clipper()
    default_output = json.dumps([-1] * 24)
    clipper_conn.register_application(name="ciee_thermal",
                                      input_type="string",
                                      default_output=default_output,
                                      slo_micros=1000000000)
    print 'apps', clipper_conn.get_all_apps()
    models = get_model_per_zone("2018-01-30 00:00:00 PST")

    # model parameters:
    #   zone: string
    #   date: string
    #   schedule: [(hsp, csp), ... x 24 ...]
    def execute_thermal_model(params):
        """
        Accepts list of JSON string as argument
        """
        ret = []
        for param in params:
            args = json.loads(param)
            zone = args['zone']
            date = str(args['date'])
            schedule = args['schedule']
            temps, actions = execute_schedule(date, schedule, models[zone], 65)
Exemplo n.º 10
0
from clipper_admin import ClipperConnection, DockerContainerManager
import p1_agg_clipper

clipper_conn = ClipperConnection(DockerContainerManager())

clipper_conn.start_clipper()

clipper_conn.register_application(name="p1app",
                                  input_type="strings",
                                  default_output="-1.0",
                                  slo_micros=100000)

clipper_conn.get_all_apps()

model_r = p1_agg_clipper.main()


def feature_sum(xs):
    forecast_df = p1_agg_clipper.main()
    return [str(x) for x in forecast_df.yhat]
    #return [str(sum(x)) for x in xs]


def predict(delay):
    #delay = eval(delay)
    future_r = model_r.make_future_dataframe(periods=30, freq='D')
    forecast_r = model_r.predict(future_r)
    #forecast_r.index = forecast_r['ds']
    #forecast
    #pred_r = pd.DataFrame(forecast_r['yhat'][len(forecast_r)-delay:len(forecast_r)])
    #pred_r=pred_r.reset_index()
    inputs = shift(inputs)
    inputs = torch.tensor(inputs).float()
    pred = model(inputs)
    pred = pred.data.numpy()
    return [str(x) for x in pred]


APP_NAME = "test-app"
MODEL_NAME = "test-pytorch-model"

# Setup clipper and deploy pytorch model
clipper_conn = ClipperConnection(DockerContainerManager(redis_port=6380))
try:
    clipper_conn.start_clipper()
    clipper_conn.register_application(name=APP_NAME,
                                      input_type="doubles",
                                      default_output="-1.0",
                                      slo_micros=1000000)
    deploy_pytorch_model(clipper_conn,
                         name=MODEL_NAME,
                         version="1",
                         input_type="doubles",
                         func=predict,
                         pytorch_model=model,
                         pkgs_to_install=pip_deps)
    clipper_conn.link_model_to_app(app_name=APP_NAME, model_name=MODEL_NAME)
except:
    clipper_conn.connect()

# Check all apps
print(clipper_conn.get_all_apps())
Exemplo n.º 12
0
        response = apply_forecast_impl(parameters[0].decode("utf-8").split(','))
        code = '202' 
    except Exception as e:
        response = str(e)
        code = '500'
    return [str(code+', '+response) for _ in parameters]

if __name__ == '__main__':
    # setup logging format
    format = "%(asctime)-15s %(message)s"
    logging.basicConfig(
        filename='./timeseries/log.log', level=logging.DEBUG, format=format)
    # set up logging to console
    console = logging.StreamHandler(sys.stdout)
    console.setLevel(logging.ERROR)
    logging.getLogger().addHandler(console)

    signal.signal(signal.SIGINT, signal_handler)
    conn = ClipperConnection(DockerContainerManager())
    conn.start_clipper()
    try:
        conn.register_application(name="forecast",input_type="strings",default_output="500, Error executing call.",slo_micros=100000000)
        python_deployer.deploy_python_closure(conn, name="do-forecast", version=1, input_type="strings", func=do_forecast, base_image='wamsiv/timeseries:latest')
        conn.link_model_to_app(app_name="forecast", model_name='do-forecast')
        print(subprocess.getoutput(["docker update --restart=on-failure $(docker ps -a | grep 'clipper/query_frontend:0.3.0' | awk '{ print $1 }')"]))
        input("Server started. Press ctrl+c to stop server.\n")
    except Exception as e:
        logging.error("Encountered {}. Stopping server...".format(e))
        conn.stop_all()
    conn.stop_all()
import logging, xgboost as xgb, numpy as np
from sklearn.metrics import mean_absolute_error
import joblib
import pandas as pd
from datetime import datetime
import pickle
import time
import matplotlib.pyplot as plt
plt.show(block=True)

from clipper_admin import ClipperConnection, DockerContainerManager
clipper_conn = ClipperConnection(DockerContainerManager())
print("Start Clipper...")
clipper_conn.start_clipper()
print("Register Clipper application...")
clipper_conn.register_application('xgboost-airlines', 'doubles',
                                  'default_pred', 100000)

# In[17]:

training_examples = pd.read_pickle(
    "../data/processed/airlines_training_examples.pkl")
f1 = open("../data/processed/airlines_training_targets.pkl", 'rb')
training_targets = pickle.load(f1)
f1.close()
test_examples = pd.read_pickle("../data/processed/airlines_test_examples.pkl")


def get_train_points():
    return training_examples.values.tolist()

Exemplo n.º 14
0

def load_from_ckp():
    pass


def load_from_frozen():
    pass


if __name__ == "__main__":
    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.stop_all()
    clipper_conn.start_clipper()
    clipper_conn.register_application(name=app_name,
                                      input_type="integers",
                                      default_output="rabbit",
                                      slo_micros=100000)
    if len(sys.argv) != 3:
        print("invalid usage")
        print(
            "usage: python deploy.py --mode checkpoint|frozen|sess-checkpoint|sess-frozen"
        )
        exit(1)
    if sys.argv[2] == "sess-checkpoint":
        sess = load_from_ckp()
        deploy_tensorflow_model(clipper_conn,
                                model_name,
                                version=1,
                                input_type="integers",
                                func=predict,
                                tf_sess_or_saved_model_path=sess)
Exemplo n.º 15
0
    model_dir = '.'
    print(
        '    app_name="%s"\n    model_name="%s"\n    input_type="%s"\n    slo_micros=%d'
        % (app_name, model_name, input_type, slo_micros))

    clipper_conn = ClipperConnection(DockerContainerManager())
    clipper_conn.connect()
    #-----------------------------------------------------------------------
    # spark = SparkSession.builder.appName("clipper-pyspark").getOrCreate()
    # spark.sparkContext.setLogLevel("DEBUG")
    #-----------------------------------------------------------------------
    info = clipper_conn.get_app_info(app_name)
    if (info is None):
        print('    Registering app %s' % app_name)
        clipper_conn.register_application(name=app_name,
                                          input_type=input_type,
                                          default_output="None",
                                          slo_micros=slo_micros)
        version = '1'
        new_app = True
    else:
        if len(info['linked_models']) > 0:
            model_name = info['linked_models'][0]
            version = str(
                int(clipper_conn.get_current_model_version(model_name)) + 1)
        else:
            version = '1'
        new_app = False
    print('    version: %s' % (version))

    #-----------------------------------------------------------------------
    if args.deploy == 'python':
Exemplo n.º 16
0
# In[26]:

try:
    clipper_conn.start_clipper()
except:
    print("Clipper already running")
    clipper_conn.connect()

# In[28]:

# We will register it to deploy a simple model.

try:
    clipper_conn.register_application(name='simple_model',
                                      input_type='floats',
                                      default_output="-1.0",
                                      slo_micros=100000)
except ClipperException as e:
    print(str(e))

# In[29]:

with open('model.pickle', 'rb') as handle:
    model = pickle.load(handle)

# In[42]:


def predict(xs):
    print("xs: ", xs)
    res = []
Exemplo n.º 17
0
from clipper_admin.exceptions import ClipperException
from docker.errors import APIError
import json
import requests

# Set up connection
clipper_conn = ClipperConnection(DockerContainerManager())

try:
    clipper_conn.start_clipper()
except (APIError, ClipperException):
    clipper_conn.connect()

# Deploy Sum function
clipper_conn.register_application(name="Sum",
                                  input_type="doubles",
                                  default_output="-1.0",
                                  slo_micros=100000)


# Define model func
def feature_sum(xs):
    return [str(sum(x)) for x in xs]


# Deploy python model
python_deployer.deploy_python_closure(clipper_conn,
                                      name="sum-model",
                                      version=1,
                                      input_type="doubles",
                                      func=feature_sum)
Exemplo n.º 18
0
			optimizer.step()
			# print statistics
			running_loss += loss.item()
			if i % 2000 == 1999:    # print every 2000 mini-batches
				print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
				running_loss = 0.0

	print('Finished Training')



	# try:
	clipper_conn = ClipperConnection(KubernetesContainerManager(useInternalIP=True))
	# clipper_conn = ClipperConnection(DockerContainerManager())
	clipper_conn.start_clipper()
	clipper_conn.register_application(name="pytorch-example-2", input_type="doubles", default_output="-1.0", slo_micros=100000)
	# model = nn.Linear(1, 1)

# Define a shift function to normalize prediction inputs
	def pred(model, inputs):
		preds = []
		for i in inputs:
			preds.append(np.random.rand(1,10))
		return preds

		# return [model(torch.FloatTensor(np.reshape(i, (-1, 3, 32, 32))).data.numpy()) for i in inputs]
		# return [str(model(torch.FloatTensor(np.reshape(i, (1, 3, 32, 32)))).data.numpy()) for i in inputs]

	deploy_pytorch_model(
		clipper_conn,
	    name="pytorch-nn",
Exemplo n.º 19
0
            clipper_query_port=1337 + node_id,
            clipper_management_port=2337 + node_id,
            clipper_rpc_port=7000 + node_id,
            redis_ip=None,
            redis_port=6379 + node_id,
            prometheus_port=9090 + node_id,
            # WARING: DO NOT CHANGE THE RULE OF NETWORK NAMES
            docker_network='clipper_network_{}'.format(node_id),
            # SINCE THIS IS USED BY reset.sh TO IDENTIFY CLIPPER CONTAINERS
            extra_container_kwargs={})
    )  # for node_id in range(args.num_nodes)]

    try:
        clipper_conn.start_clipper()
        clipper_conn.register_application(name="default",
                                          input_type="string",
                                          default_output="",
                                          slo_micros=100000)

        python_deployer.deploy_python_closure(clipper_conn,
                                              name="echo-model",
                                              version=1,
                                              input_type="string",
                                              func=echo_model)
        clipper_conn.link_model_to_app(app_name="default",
                                       model_name="echo-model")
    except:
        exit(1)

    exit(0)
Exemplo n.º 20
0
def lin_model_5(inputs):
    print(inputs)
    l = [3.1 * x[0] + 1.3 * x[1] - 10.8 for x in inputs]
    print(l)
    return l


clipper_conn = ClipperConnection(
    KubernetesContainerManager(useInternalIP=True))
# clipper_conn = ClipperConnection(DockerContainerManager())
clipper_conn.start_clipper()

#Deploy lin_model_1
clipper_conn.register_application(name="linear1",
                                  input_type="doubles",
                                  default_output="-1.0",
                                  slo_micros=100000)
deploy_python_closure(clipper_conn,
                      name="lin-model-1",
                      version=1,
                      input_type="doubles",
                      func=lin_model_1,
                      registry="hsubbaraj")
clipper_conn.link_model_to_app(app_name="linear1", model_name="lin-model-1")
print("deployed model 1")

#Deploy lin_model_2
clipper_conn.register_application(name="linear2",
                                  input_type="doubles",
                                  default_output="-1.0",
                                  slo_micros=100000)
from clipper_admin import ClipperConnection, KubernetesContainerManager
clipper_conn = ClipperConnection(
    KubernetesContainerManager(useInternalIP=True))
clipper_conn.connect()

clipper_conn.register_application(name="hello-world",
                                  input_type="doubles",
                                  default_output="-1.0",
                                  slo_micros=100000)
clipper_conn.get_all_apps()


def feature_sum(xs):
    return [str(sum(x)) for x in xs]


from clipper_admin.deployers import python as python_deployer

registry = 'localhost:5000'
python_deployer.deploy_python_closure(clipper_conn,
                                      name="sum-model",
                                      version=1,
                                      input_type="doubles",
                                      func=feature_sum,
                                      registry=registry)
Exemplo n.º 22
0
        os.unlink(tmpout.name) 

    return upscaled


from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers.pytorch import deploy_pytorch_model
from torch import nn

clipper_conn = ClipperConnection(DockerContainerManager())
clipper_conn.stop_all()
# clipper_conn.connect()
clipper_conn.start_clipper()


clipper_conn.register_application(name="superresolution", input_type="bytes", default_output="undefined", slo_micros=100000)


print("going to deploy...")

# deploy_pytorch_model(
#     clipper_conn,
#     name="superresolution-model",
#     version=1,
#     input_type="bytes",
#     func=image_enhance,
#     pytorch_model=model,
#     pkgs_to_install=['opencv-python','numpy','six', 'Pillow', 'wheel', 'certifi']
#     )

deploy_pytorch_model(
Exemplo n.º 23
0
# In[ ]:


try:
    clipper_conn.start_clipper()
except:
    print("Clipper already running")
    clipper_conn.connect()


# In[ ]:


# We will register it to deploy an xgboost model.
clipper_conn.register_application('xgboost-test', 'integers', 'default_pred', 100000)


# In[ ]:


def get_test_point():
    return [np.random.randint(255) for _ in range(784)]


# In[ ]:


# Create a training matrix.
dtrain = xgb.DMatrix(get_test_point(), label=[0])
# We then create parameters, watchlist, and specify the number of rounds
Exemplo n.º 24
0
# First we need to import Clipper
from clipper_admin import ClipperConnection, KubernetesContainerManager
from clipper_admin.deployers.python import deploy_python_closure

# Create a Clipper connection
clipper_conn = ClipperConnection(
    KubernetesContainerManager(useInternalIP=True,
                               kubernetes_proxy_addr="127.0.0.1:8080"))

# Start a Clipper cluster or connect to a running one
clipper_conn.start_clipper()

# Register an app called 'kddtutorial'. This would create a REST endpoint
clipper_conn.register_application(name="kddtutorial",
                                  input_type="doubles",
                                  default_output="-1.0",
                                  slo_micros=10000000)


# Access the trained model via closure capture
def predict(inputs):
    global model
    pred = model.predict(inputs)
    return [str(p) for p in pred]


# Point to the gradient boosting model
model = model

# Deploy the 'predict' function as a model
deploy_python_closure(
Exemplo n.º 25
0
# clipper_start
from clipper_admin import ClipperConnection, DockerContainerManager
clipper_conn = ClipperConnection(DockerContainerManager())

clipper_conn.start_clipper()
clipper_conn.connect()

clipper_conn.register_application(
    name="digit",
    input_type="doubles",
    default_output="-1.0",
    slo_micros=10000000)  # 10,000,000 micros == 10 sec

clipper_conn.get_all_apps()

#################################################
######### Define Own Prediction Function ########
#################################################

import sklearn
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.externals import joblib
from clipper_admin.deployers import python as python_deployer

for version_postfix in ["10x1k", "10x2k", "20x1k", "15x2k"]:

    model_path = "../../models/sklearn/"
    model_name = "dig_nn_model_" + version_postfix + ".sav"
    clf = joblib.load(model_path + model_name)
Exemplo n.º 26
0
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers.pytorch import deploy_pytorch_model


clipper_conn = ClipperConnection(DockerContainerManager())

try:
    clipper_conn.connect()
except:
    clipper_conn.start_clipper()

clipper_conn.register_application(name="logo-detector", input_type="strings", default_output="no logo", slo_micros=10000000)
clipper_conn.get_all_apps()

from pathlib import Path
from io import BytesIO

import numpy as np
from PIL import Image
import requests
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torchvision
from torchvision.transforms import transforms

DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # pylint: disable=maybe-no-member

CLASSES = [
        'ADAC', 'FCB', 'HP', 'adidas', 'aldi', 'apple', 'becks', 'bmw',
        'carlsberg', 'chimay', 'cocacola', 'corona', 'dhl', 'erdinger', 'esso',