예제 #1
0
    def predict_image(module_name):
        if request.path.split("/")[-1] not in cv_module:
            return {"error": "Module {} is not available.".format(module_name)}
        req_id = request.data.get("id")
        global use_gpu, batch_size_dict
        img_base64 = request.form.getlist("image")
        extra_info = {}
        for item in list(request.form.keys()):
            extra_info.update({item: request.form.getlist(item)})

        for key in extra_info.keys():
            if isinstance(extra_info[key], list):
                extra_info[key] = utils.base64s_to_cvmats(
                    eval(extra_info[key][0])["b64s"]) if isinstance(
                        extra_info[key][0], str
                    ) and "b64s" in extra_info[key][0] else extra_info[key]

        file_name_list = []
        if img_base64 != []:
            for item in img_base64:
                ext = item.split(";")[0].split("/")[-1]
                if ext not in ["jpeg", "jpg", "png"]:
                    return {"result": "Unrecognized file type"}
                filename = req_id + "_" \
                           + utils.md5(str(time.time()) + item[0:20]) \
                           + "." \
                           + ext
                img_data = base64.b64decode(item.split(',')[-1])
                file_name_list.append(filename)
                with open(filename, "wb") as fp:
                    fp.write(img_data)
        else:
            file = request.files.getlist("image")
            for item in file:
                file_name = req_id + "_" + item.filename
                item.save(file_name)
                file_name_list.append(file_name)
        module = default_module_manager.get_module(module_name)
        predict_func_name = cv_module_method.get(module_name, "")
        if predict_func_name != "":
            predict_func = eval(predict_func_name)
        else:
            module_type = module.type.split("/")[-1].replace("-", "_").lower()
            predict_func = eval("predict_" + module_type)
        batch_size = batch_size_dict.get(module_name, 1)
        if file_name_list == []:
            file_name_list = None
        if extra_info == {}:
            extra_info = None
        results = predict_func(module, file_name_list, req_id, batch_size,
                               extra_info)
        r = {"results": str(results)}
        return r
예제 #2
0
 def predict_text(module_name):
     if request.path.split("/")[-1] not in nlp_module:
         return {"error": "Module {} is not available.".format(module_name)}
     req_id = request.data.get("id")
     inputs = {}
     for item in list(request.form.keys()):
         inputs.update({item: request.form.getlist(item)})
     files = {}
     for file_key in list(request.files.keys()):
         files[file_key] = []
         for file in request.files.getlist(file_key):
             file_name = req_id + "_" + file.filename
             files[file_key].append(file_name)
             file.save(file_name)
     module = default_module_manager.get_module(module_name)
     results = predict_nlp(module=module,
                           input_text=inputs,
                           req_id=req_id,
                           batch_size=batch_size_dict.get(module_name, 1),
                           extra=files)
     return results