Example #1
0
 def __init__(self, configurations={}):
     super().__init__(configurations)
     host = utils.getKey('be.tfsrv.host', dicts=settings)
     port = utils.getKey('be.tfsrv.rest_port', dicts=settings)
     with closing(socket.socket(socket.AF_INET,
                                socket.SOCK_STREAM)) as sock:
         if sock.connect_ex((host, int(port))) != 0:
             logging.critical("Failed to connect {}:{}".format(host, port))
     self.base_url = "http://" + host + ":" + port + "/v1/models/{}:predict"
Example #2
0
    def run(self, switch_configs):
        try:
            if not runtime.FGs['enable_sandbox'] and bool(
                    utils.getKey('encrypted', dicts=switch_configs)):
                self.state.value = State.Error.value
                raise RuntimeError(
                    "model is encrypted, but sandbox is not available")

            # cleaning state
            self.state.value = State.Cleaning.value
            target_model = utils.getKey('model', dicts=switch_configs)
            target_implhash = target_model.get('implhash')
            target_version = target_model.get('version')
            if target_implhash is None:
                target_implhash = model.generateModelImplHashByExtractInfo(
                    target_model)
            target_model_config = model.loadModelInfoFromStorage(
                target_implhash, target_version)
            for i in range(self.backend_configs['inferprocnum']):
                if self.inferproc_th[i] is not None:
                    self.inferproc_th[i].terminate()
                    logging.debug("th: >>> {} is terminated".format(i))
            self.state.value = State.Cleaned.value

            # loading state
            self.state.value = State.Loading.value
            self.model_configs = target_model_config
            self.model_path = os.path.join(self.backend_configs['storage'],
                                           "models", target_implhash,
                                           target_version)

            # load customized model pre-process and post-process functions
            sys.path.append(self.model_path)
            self.predp = importlib.import_module('pre_dataprocess')
            self.postdp = importlib.import_module('post_dataprocess')
            self.state.value = State.Loaded.value

            # start inference process
            for i in range(self.backend_configs['inferprocnum']):
                self.inferproc_th[i] = Process(target=self.predictor,
                                               args=(
                                                   switch_configs,
                                                   self.inferproc_state[i],
                                                   self.state,
                                               ))
                self.inferproc_th[i].start()
            self.state.value = State.Running.value
        except Exception as e:
            self.state.value = State.Error.value
            raise e
Example #3
0
def parseValidBackendInfo(info):
    temp_backend_info = info
    if temp_backend_info.get('storage') is None:
        temp_backend_info['storage'] = utils.getKey('storage',
                                                    dicts=settings,
                                                    env_key='AISRV_STORAGE')
    if temp_backend_info.get('preheat') is None:
        temp_backend_info['preheat'] = utils.getKey('preheat', dicts=settings)
    if temp_backend_info.get('batchsize') is None:
        temp_backend_info['batchsize'] = 1
    if temp_backend_info.get('inferprocnum') is None:
        temp_backend_info['inferprocnum'] = 1

    regulator.ConstrainBackendInfo(temp_backend_info)
    return temp_backend_info
Example #4
0
def checkModelExist(model_hash, version):
    storage_path = utils.getKey(
        'storage', dicts=settings, env_key='AISRV_STORAGE')
    model_path = os.path.join(storage_path, "models", model_hash, version)
    if not os.path.exists(model_path):
        return False
    return True
Example #5
0
    def _loadModel(self, load_configs):
        try:
            # load tensorflow session
            model_type = utils.getKey(
                'm',
                {'m': str.split(self.model_configs['impl'], ".")[1]},
                v=ModelTypeValidator)
            # model_type = utils.getKey('mode', dicts=load_configs, v=ModelTypeValidator)
            if model_type == ModelType.Frozen:
                self.__loadFrozenModel()
            if model_type == ModelType.Unfrozen:
                self.__loadUnfrozenModel()
            # set input/output tensor
            tensor_map = json.loads(
                self.model_configs['modelext']).get('tensors')
            self.input_tensor_vec = []
            for it in tensor_map['input']:
                self.input_tensor_vec.append(
                    self.model_object.graph.get_tensor_by_name(it))
            self.output_tensor_vec = []
            for it in tensor_map['output']:
                self.output_tensor_vec.append(
                    self.model_object.graph.get_tensor_by_name(it))

            return True
        except Exception as e:
            self.output_tensor_vec = []
            self.input_tensor_vec = []
            raise e
Example #6
0
def dumpModelInfoToStorage(model_hash, version, detail):
    if not checkModelExist(model_hash, version):
        raise ValueError("model not exist")

    storage_path = utils.getKey(
        'storage', dicts=settings, env_key='AISRV_STORAGE')
    with open(os.path.join(storage_path, "models", model_hash, version, "distros.json"), 'w') as dist_file:
        dist_file.write(json.dumps(detail, indent=2))
Example #7
0
def listModels(simple=False):
    model_list = []
    storage_path = utils.getKey(
        'storage', dicts=settings, env_key='AISRV_STORAGE')
    for m in os.listdir(os.path.join(storage_path, "models")):
        for v in os.listdir(os.path.join(storage_path, "models", m)):
            detail = loadModelInfoFromStorage(m, v)
            model_list.append(detail)
    return {'models': model_list}
Example #8
0
 def __inferBatch(self, passby_lists):
     ori = utils.getKey('shape', dicts=passby_lists)
     self.model_object.set_tensor(self.input_details[0]['index'], ori)
     self.model_object.invoke()
     infer_lists = []
     for i in range(0, len(self.output_details)):
         infer_lists.append(
             self.model_object.get_tensor(
                 self.output_details[i]['index']).squeeze())
     return infer_lists
Example #9
0
def createModel(model):
    model['implhash'] = generateModelImplHashByExtractInfo(model)

    if checkModelExist(model['implhash'], model['version']):
        raise RuntimeError("model already exists")
    storage_path = utils.getKey(
        'storage', dicts=settings, env_key='AISRV_STORAGE')
    os.makedirs(os.path.join(storage_path, "models",
                             model['implhash'], model['version']))
    dumpModelInfoToStorage(model['implhash'], model['version'], model)
Example #10
0
def deleteModel(model):
    if model.get('implhash') is None:
        model['implhash'] = generateModelImplHashByExtractInfo(model)
    storage_path = utils.getKey(
        'storage', dicts=settings, env_key='AISRV_STORAGE')
    model_path = os.path.join(storage_path, "models",
                              model['implhash'], model['version'])
    if not os.path.exists(model_path):
        raise DeleteModelError(msg="model not exist")
    shutil.rmtree(model_path)
Example #11
0
 def _loadModel(self, load_configs):
     model_type = utils.getKey(
         'm',
         {'m': str.split(self.model_configs['impl'], ".")[1]},
         v=ModelTypeValidator)
     if model_type == ModelType.StructureEmbed:
         return self._loadStructEmbedModel(load_configs)
     if model_type == ModelType.StructureSplit:
         return self._loadStructSplitModel(load_configs)
     return True
Example #12
0
 def _getDevice(self, load_configs):
     device = utils.getKey(
         'be.trpy.device', dicts=settings, level=utils.Access.Optional)
     logging.debug(device)
     if device == None:
         device = 'cpu'
     logging.debug("torch get device on: {}".format(device))
     if 'cuda' in device:
         torch.backends.cudnn.benchmark = True
         logging.debug("set torch.backends.cudnn.benchmark = True")
     return torch.device(device)
Example #13
0
def loadModelInfoFromStorage(model_hash, version):
    if not checkModelExist(model_hash, version):
        raise ValueError("model not exist")

    storage_path = utils.getKey(
        'storage', dicts=settings, env_key='AISRV_STORAGE')
    dist_path = os.path.join(storage_path, "models",
                             model_hash, version, "distros.json")
    with open(dist_path, 'r') as dist_file:
        detail = json.loads(dist_file.read())
    return detail
Example #14
0
    def _loadParameter(self, load_configs):
        path = os.path.join(self.model_path, "param.pth")
        device = utils.getKey(
            'be.trpy.device', dicts=settings, level=utils.Access.Optional)
        if device == None:
            device = 'cpu'
        logging.debug("torch load parameters on: {})".format(device))
        parameters = torch.load(path, map_location=device)['state_dict']

        mixed_mode = utils.getKey('be.trpy.mixed_mode', settings)
        if mixed_mode == '1':
            current_state = self.model_object.state_dict()
            new_state = collections.OrderedDict()
            for key, _ in current_state.items():
                if key in parameters and parameters[key].size() == current_state[key].size():
                    new_state[key] = parameters[key]
                else:
                    new_state[key] = current_state[key]
                    logging.warning(
                        'not found pre-trained parameters for {}'.format(key))
            self.model_object.load_state_dict(new_state)
        else:
            self.model_object.load_state_dict(parameters)
        self.model_object.eval()
Example #15
0
def initializeBackend(info, passby_model=None):
    configs = parseValidBackendInfo(info)
    # configs['queue.in'] = redis.Redis(connection_pool=runtime.Conns['redis.pool'])
    # TODO(arth): move to LoadModels
    # configs['encrypted'] = utils.getKey('encrypted', dicts=init_data)
    # if runtime.FGs['enable_sandbox']:
    #     configs['a64'] = utils.getKey('a64key', dicts=init_data, level=utils.Access.Optional)
    #     configs['pvt'] = utils.getKey('pvtpth', dicts=init_data, level=utils.Access.Optional)

    backend_instance = None
    impl_backend = utils.getKey(
        'm', dicts={'m': str.split(configs['impl'], ".")[0]}, v=sb.Validator)

    if impl_backend == sb.Type.TfPy:
        from serving.backend import tensorflow_python as tfpy
        backend_instance = tfpy.TfPyBackend(configs)

    if impl_backend == sb.Type.TfSrv:
        from serving.backend import tensorflow_serving as tfsrv
        backend_instance = tfsrv.TfSrvBackend(configs)

    if impl_backend == sb.Type.Torch:
        from serving.backend import torch_python as trpy
        backend_instance = trpy.TorchPyBackend(configs)

    if impl_backend == sb.Type.RknnPy:
        from serving.backend import rknn_python as rknnpy
        backend_instance = rknnpy.RKNNPyBackend(configs)

    if impl_backend == sb.Type.TfLite:
        from serving.backend import tensorflow_lite as tflite
        backend_instance = tflite.TfLiteBackend(configs)

    if backend_instance is None:
        raise CreateAndLoadModelError(
            msg="unknown error, failed to create backend")
    bid = str(len(runtime.BEs))
    runtime.BEs[bid] = backend_instance
    logging.debug(runtime.BEs)
    return {'code': 0, 'msg': bid}
Example #16
0
def unpackImageBundleAndImportWithDistro(detail):
    bundle_id = detail["bundle"]
    del (detail['bundle'])
    given_implhash = detail['implhash']
    given_version = detail['version']

    bundle_path = os.path.join("/tmp", bundle_id + ".tar.gz")
    if not os.path.exists(bundle_path):
        raise ImportModelDistroError(msg="failed to find temporary bundle")
    validateModelInfo(detail)

    # decompress image
    tar = tarfile.open(bundle_path, "r")
    tar.extractall("/tmp")
    tar.close()

    # configs.json -> distros.json, make ImageBundle to DistroBundle
    target_config = {}
    content_path = os.path.join("/tmp", detail['implhash'])
    with open(os.path.join(content_path, "configs.json"), 'r') as config_file:
        target_config = json.loads(config_file.read())

    target_implhash = target_config['implhash']
    target_version = target_config['version']
    if target_implhash != given_implhash:
        raise ImportModelDistroError(msg="incompatible model image")
    os.remove(os.path.join(content_path, "configs.json"))

    # import bundle
    storage = utils.getKey('storage', dicts=settings, env_key='AISRV_STORAGE')
    target_model_path = os.path.join(storage, "models", given_implhash)

    if not os.path.exists(target_model_path):
        os.makedirs(target_model_path)
    if os.path.exists(os.path.join(target_model_path, given_version)):
        raise ImportModelDistroError(
            msg="model already exist with specific version")
    shutil.move(content_path, os.path.join(target_model_path, given_version))
    dumpModelInfoToStorage(given_implhash, given_version, detail)
Example #17
0
def buildImageBundleFromDistroBundle(model):
    storage_path = utils.getKey(
        'storage', dicts=settings, env_key='AISRV_STORAGE')
    if not model.get('implhash'):
        model['implhash'] = generateModelImplHashByExtractInfo(model)
    model_hash = model['implhash']
    model_version = model['version']
    # model_path = os.path.join(storage_path, "models", model_hash, model_version)

    tmp_path = os.path.join("/tmp", model_hash)
    if os.path.exists(tmp_path):
        shutil.rmtree(tmp_path)
    shutil.copytree(os.path.join(storage_path, "models",
                                 model_hash, model_version), tmp_path)

    # distros.json -> configs.json
    distro = loadModelInfoFromStorage(model_hash, model_version)

    del (distro['threshold'])
    del (distro['mapping'])
    if 'disthash' in distro.keys():
        del (distro['disthash'])
    os.remove(os.path.join(tmp_path, "distros.json"))

    with open(os.path.join(tmp_path, "configs.json"), 'w') as config_file:
        config_file.write(json.dumps(distro, indent=2))

    if os.path.exists(os.path.join(tmp_path, "__pycache__")):
        shutil.rmtree(os.path.join(tmp_path, "__pycache__"))

    # compress
    tar = tarfile.open(os.path.join("/tmp", model_hash + ".tar.gz"), "w:gz")
    tar.add(tmp_path, arcname=model_hash)
    tar.close()
    # clean tmp files
    shutil.rmtree(tmp_path)
Example #18
0
dev_serial = None
dev_debug = True

FGs = {
    'enable_sandbox': True,
    'enable_device_validation': False,
    'enable_regulator': True,
    'use_native_stat': True
}
Ps = {'native_stat': lambda: print("IOI")}

BEs = {}
Conns = {
    'redis.pool':
    redis.ConnectionPool(host=utils.getKey('redis.host', dicts=settings),
                         port=utils.getKey('redis.port', dicts=settings),
                         db=5),
}

main_process_pid = 0


def init_main_process_pid():
    global main_process_pid
    main_process_pid = os.getpid()


def default_dev_validator():
    global dev_serial
    dsn = sandbox.default_device_serial()