示例#1
0
    def __init__(self, *args):
        # TVM
        ctx = tvm.cpu()  # Hardcoded backend

        model_path = args[0] + '/'
        input_shape = shape_str_to_npshape(args[1])
        input_type = datatype_str_to_nptype(args[2])
        output_shape = shape_str_to_npshape(args[3])
        output_type = datatype_str_to_nptype(args[4])

        if input_type is None:
            print("Invalid input_shape")
            return None
        if output_type is None:
            print("Invalid output_shape")
            return None
        self.input_dims = [nns.TensorShape(input_shape, input_type)]
        self.output_dims = [nns.TensorShape(output_shape, output_type)]

        loaded_json = open(model_path + 'deploy_graph.json').read()
        loaded_lib = tvm.module.load(model_path + 'deploy_lib.tar')
        loaded_params = bytearray(
            open(model_path + 'deploy_param.params', 'rb').read())

        self.module = graph_runtime.create(loaded_json, loaded_lib, ctx)
        self.module.load_params(loaded_params)
        return None
示例#2
0
    def __init__(self, *args):
        # Parse arguments
        model_path = args[0]
        input_shapes = shapes_str_to_npshapes(args[1])
        input_types = datatypes_str_to_nptypes(args[2])
        output_shapes = shapes_str_to_npshapes(args[3])
        output_types = datatypes_str_to_nptypes(args[4])
        input_names = names_str_to_strarray(args[5])
        output_names = names_str_to_strarray(args[6])
        for input_type in input_types:
            if input_type is None:
                print("Invalid input_type")
                return None
        for output_type in output_types:
            if output_type is None:
                print("Invalid output_type")
                return None
        if (len(input_shapes) > 4 or len(input_types) > 4
                or len(input_names) > 4
                or len(input_shapes) != len(input_types)
                or len(input_shapes) != len(input_names)):
            print("Invalid input count: (%d,%d,%d)".format(
                len(input_shapes), len(input_types), len(input_names)))
            return None
        if (len(output_shapes) > 4 or len(output_types) > 4
                or len(output_names) > 4
                or len(output_shapes) != len(output_types)
                or len(output_shapes) != len(output_names)):
            print("Invalid output count: (%d,%d,%d)".format(
                len(output_shapes), len(output_types), len(output_names)))
            return None
        self.input_dims = []
        self.output_dims = []
        for i in range(len(input_shapes)):
            input_dim = nns.TensorShape(input_shapes[i], input_types[i])
            self.input_dims.append(input_dim)
        for i in range(len(output_shapes)):
            output_dim = nns.TensorShape(output_shapes[i], output_types[i])
            self.output_dims.append(output_dim)
        self.input_names = input_names
        self.output_names = output_names

        # Initialize TVM runtime session with given binary
        session = rpc.LocalSession()
        session.upload(os.path.join(model_path, "mod.so"))
        lib = session.load_module("mod.so")
        ctx = session.cpu()  # TODO: Hardcoded CPU backend

        # Load graph and create a module
        self.graph = open(os.path.join(model_path, "mod.json")).read()
        self.module = runtime.create(self.graph, lib, ctx)

        # Load params
        self.params = bytearray(
            open(os.path.join(model_path, "mod.params"), "rb").read())
        self.module.load_params(self.params)
        return None
    def convert(self, input_array):
        rate_n = 10
        rate_d = 1
        dim = [len(input_array[0]), 1, 1, 1]
        ttype = np.int32
        tensors_info = [nns.TensorShape(dim, ttype)]

        return tensors_info, input_array, rate_n, rate_d
    def convert(self, input_array):
        json_data = json.loads(input_array[0].tobytes())
        json_string = (json_data["json_string"] + '\0').encode()
        json_object = json.dumps(json_data["json_object"]).encode()

        output_array1 = np.frombuffer(json_string, dtype=np.uint8)
        output_array2 = np.frombuffer(json_object, dtype=np.uint8)
        raw_data = [output_array1, output_array2]

        rate_n = 10
        rate_d = 1
        dim1 = [len(raw_data[0]), 1, 1, 1]
        dim2 = [len(raw_data[1]), 1, 1, 1]
        ttype = np.uint8
        tensors_info = [nns.TensorShape(dim1, ttype), nns.TensorShape(dim2, ttype)]

        return tensors_info, raw_data, rate_n, rate_d
    def __init__(self, *args):
        # Parse arguments
        model_path = args[0]
        input_shapes = util.shapes_str_to_npshapes(args[1])
        input_types = util.datatypes_str_to_nptypes(args[2])
        input_names = util.names_str_to_strarray(args[3])
        num_fragments = int(args[4])
        target_uri = args[5]

        for input_type in input_types:
            if input_type is None:
                print("Invalid input_type")
                return None
        if (len(input_shapes) > 4 or len(input_types) > 4
                or len(input_names) > 4
                or len(input_shapes) != len(input_types)
                or len(input_shapes) != len(input_names)):
            print("Invalid input count: (%d,%d,%d)".format(
                len(input_shapes), len(input_types), len(input_names)))
            return None
        self.input_dims = []
        for i, input_shape in enumerate(input_shapes):
            input_dim = nns.TensorShape(input_shape, input_types[i])
            self.input_dims.append(input_dim)
        output_dim = nns.TensorShape([1, 1, 1, 1], np.int32)
        self.output_dims = [output_dim]
        self.input_names = input_names
        self.num_fragments = num_fragments
        self.target_uri = target_uri

        # Initialize fragment runner
        self.offload_from = num_fragments - 1  # Initial setting
        model_path_head = os.path.join(model_path, 'mod')
        self.interpreters = runner.load_model(model_path_head, num_fragments)

        # Connect to target
        self.is_connected = False
        self.connect_to_target()
        return None
示例#6
0
    def setInputDim(self, input_dims):
        if len(input_dims) != 1:
            print("One input tensor is allowed")
            return None

        self.input_dims = input_dims
        self.output_dims = [nns.TensorShape(self.input_dims[0].getDims(), self.input_dims[0].getType())]

        dims = self.output_dims[0].getDims()
        if self.new_x > 0:
            dims[1] = self.new_x
        if self.new_y > 0:
            dims[2] = self.new_y

        return self.output_dims
    def convert(self, input_array):
        data = input_array[0].tobytes()
        root = flexbuffers.GetRoot(data)
        tensors = root.AsMap

        num_tensors = tensors['num_tensors'].AsInt
        rate_n = tensors['rate_n'].AsInt
        rate_d = tensors['rate_d'].AsInt
        raw_data = []
        tensors_info = []

        for i in range(num_tensors):
            tensor_key = "tensor_{idx}".format(idx=i)
            tensor = tensors[tensor_key].AsVector
            ttype = convert_to_numpy_type(tensor[1].AsInt)
            tdim = tensor[2].AsTypedVector
            dim = []
            for j in range(4):
                dim.append(tdim[j].AsInt)
            tensors_info.append(nns.TensorShape(dim, ttype))
            raw_data.append(np.frombuffer(tensor[3].AsBlob, dtype=np.uint8))

        return tensors_info, raw_data, rate_n, rate_d
示例#8
0
 def __init__(self, *args):
     self.input_dims = [nns.TensorShape([D1, D2, D3, D4], np.uint8)]
     self.output_dims = [nns.TensorShape([D1, D2, D3, D4], np.uint8)]