Esempio n. 1
0
def run_models_for_device(flags, args, dev):
    conf = config_parser.parse(flags.config)
    for name, model_conf in conf["models"].items():
        if not flags.model_name or name == flags.model_name:
            MaceLogger.info("Run model %s" % name)
            model_conf = config_parser.normalize_model_config(model_conf)
            run_model_for_device(flags, args, dev, name, model_conf)
Esempio n. 2
0
def normalize_model_config(conf, model_output=None, org_model_dir=None):
    conf = normalize_graph_config(conf, model_output, org_model_dir)
    if ModelKeys.subgraphs in conf:
        nor_subgraphs = {}
        if isinstance(conf[ModelKeys.subgraphs], list):
            nor_subgraph = normalize_graph_config(conf[ModelKeys.subgraphs][0],
                                                  model_output, org_model_dir)
            conf[ModelKeys.input_tensors] = \
                nor_subgraph[ModelKeys.input_tensors]
            conf[ModelKeys.output_tensors] = \
                nor_subgraph[ModelKeys.output_tensors]
            set_default_config_value(nor_subgraph, conf)
            nor_subgraphs[ModelKeys.default_graph] = nor_subgraph
        else:
            for graph_name, subgraph in conf[ModelKeys.subgraphs].items():
                nor_subgraph = normalize_graph_config(subgraph, model_output,
                                                      org_model_dir)
                set_default_config_value(nor_subgraph, conf)
                nor_subgraphs[graph_name] = nor_subgraph

        conf[ModelKeys.subgraphs] = nor_subgraphs

        model_base_conf = copy.deepcopy(conf)
        del model_base_conf[ModelKeys.subgraphs]
        subgraphs = conf[ModelKeys.subgraphs]
        for net_name, subgraph in subgraphs.items():
            net_conf = copy.deepcopy(model_base_conf)
            net_conf.update(subgraph)
            subgraphs[net_name] = net_conf

    MaceLogger.summary(conf)
    return conf
Esempio n. 3
0
def run_models(flags, args):
    if flags.device_conf:
        device_conf = config_parser.parse_device_info(flags.device_conf)
        device.ArmLinuxDevice.set_devices(device_conf)

    run_devices = device.choose_devices(flags.target_abi, flags.target_socs)
    MaceLogger.info("Run on devices: %s" % run_devices)

    for device_id in run_devices:
        dev = device.create_device(flags.target_abi, device_id)
        run_models_for_device(flags, args, dev)
Esempio n. 4
0
def get_version(flags):
    device_id = get_cur_device_id(flags)
    if device_id is not None:
        android_device = device.create_device(flags.target_abi, device_id)
        target_props = android_device.info()
        target_soc = target_props["ro.board.platform"]
        android_ver = (int)(target_props["ro.build.version.release"])
        apu_version = get_apu_version(True, android_ver, target_soc)
    else:
        apu_version = 4
        MaceLogger.warning("Can not get unique device ID, MACE select the"
                           " latest apu version: %s" % apu_version)
    sys.exit(apu_version)
Esempio n. 5
0
def generate_input_data(input_file, input_node, input_shape, input_ranges,
                        input_data_type):
    np.random.seed()
    for i in range(len(input_node)):
        data = np.random.random(input_shape[i]) * (
            input_ranges[i][1] - input_ranges[i][0]) + input_ranges[i][0]
        input_file_name = util.formatted_file_name(input_file, input_node[i])
        MaceLogger.info('Generate input file: %s' % input_file_name)
        if input_data_type[i] == mace_pb2.DT_FLOAT:
            np_data_type = np.float32
        elif input_data_type[i] == mace_pb2.DT_INT32:
            np_data_type = np.int32

        data.astype(np_data_type).tofile(input_file_name)
Esempio n. 6
0
def get_cur_device_id(flags):
    run_devices = device.choose_devices(flags.target_abi, "all")
    run_device = None
    device_num = len(run_devices)
    if device_num == 0:  # for CI
        MaceLogger.warning("No Android devices are plugged in, "
                           "you need to copy `apu` so files by yourself.")
    elif device_num > 1:  # for CI
        MaceLogger.warning("More than one Android devices are plugged in, "
                           "you need to copy `apu` so files by yourself.")
    else:
        run_device = run_devices[0]

    return run_device
Esempio n. 7
0
def run_models(flags, args):
    if flags.device_conf:
        device_conf = config_parser.parse_device_info(flags.device_conf)
        device.ArmLinuxDevice.set_devices(device_conf)

    run_devices = device.choose_devices(flags.target_abi, flags.target_socs)
    MaceLogger.info("Run on devices: %s" % run_devices)

    for device_id in run_devices:
        dev = device.create_device(flags.target_abi, device_id)
        if len(flags.devices_to_run) > 0 and \
                device_id not in flags.devices_to_run.split(','):
            continue
        run_models_for_device(flags, args, dev)
Esempio n. 8
0
def merge_opencl_binaries(opencl_binaries, output_file):
    platform_info_key = 'mace_opencl_precompiled_platform_info_key'

    kvs = {}
    for binary in opencl_binaries:
        if not os.path.exists(binary):
            MaceLogger.warning("OpenCL bin %s not found" % binary)
            continue

        with open(binary, "rb") as f:
            binary_array = np.fromfile(f, dtype=np.uint8)

        idx = 0
        size, = struct.unpack("Q", binary_array[idx:idx + 8])
        idx += 8
        for _ in range(size):
            key_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            key, = struct.unpack(
                str(key_size) + "s", binary_array[idx:idx + key_size])
            idx += key_size
            value_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            if key == platform_info_key and key in kvs:
                mace_check(
                    (kvs[key] == binary_array[idx:idx + value_size]).all(),
                    "There exists more than one OpenCL version for models:"
                    " %s vs %s " %
                    (kvs[key], binary_array[idx:idx + value_size]))
            else:
                kvs[key] = binary_array[idx:idx + value_size]
            idx += value_size

    output_byte_array = bytearray()
    data_size = len(kvs)
    output_byte_array.extend(struct.pack("Q", data_size))
    for key, value in kvs.items():
        key_size = len(key)
        output_byte_array.extend(struct.pack("i", key_size))
        output_byte_array.extend(struct.pack(str(key_size) + "s", key))
        value_size = len(value)
        output_byte_array.extend(struct.pack("i", value_size))
        output_byte_array.extend(value)

    np.array(output_byte_array).tofile(output_file)
Esempio n. 9
0
def merge_opencl_parameters(params_files, output_file):
    kvs = {}
    for params in params_files:
        if not os.path.exists(params):
            MaceLogger.warning("Tune param %s not found" % params)
            continue

        with open(params, "rb") as f:
            binary_array = np.fromfile(f, dtype=np.uint8)

        idx = 0
        size, = struct.unpack("Q", binary_array[idx:idx + 8])
        idx += 8
        for _ in range(size):
            key_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            key, = struct.unpack(
                str(key_size) + "s", binary_array[idx:idx + key_size])
            idx += key_size
            value_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            kvs[key] = binary_array[idx:idx + value_size]
            idx += value_size

    output_byte_array = bytearray()
    data_size = len(kvs)
    output_byte_array.extend(struct.pack("Q", data_size))
    for key, value in kvs.items():
        key_size = len(key)
        output_byte_array.extend(struct.pack("i", key_size))
        output_byte_array.extend(struct.pack(str(key_size) + "s", key))
        value_size = len(value)
        output_byte_array.extend(struct.pack("i", value_size))
        output_byte_array.extend(value)

    np.array(output_byte_array).tofile(output_file)
Esempio n. 10
0
def run_model_for_device(flags, args, dev, model_name, model_conf):
    target_abi = flags.target_abi
    install_dir = run_target.default_install_dir(target_abi) + "/" + model_name
    sysdir = install_dir + "/interior"
    dev.mkdir(sysdir)

    runtime_list = []
    for graph_name, graph_conf in model_conf[ModelKeys.subgraphs].items():
        runtime = graph_conf[ModelKeys.runtime]
        runtime_list.append(runtime)
        mace_check(runtime != DeviceType.APU or target_abi == "arm64-v8a",
                   "APU runtime does only support arm64-v8a")

    # install models to devices
    workdir = flags.output + "/" + model_name
    model_file = model_name + ".pb"
    model_data_file = model_name + ".data"
    model_path = workdir + "/model/" + model_file
    model_data_path = workdir + "/model/" + model_data_file
    if os.path.exists(model_path) and os.path.exists(model_data_path):
        dev.install(Target(model_path), install_dir)
        dev.install(Target(model_data_path), install_dir)
    else:
        MaceLogger.warning("No models exist in %s, use --model_file and"
                           " --model_data_file specified in args" % model_path)

    if ModelKeys.check_tensors in model_conf:
        model_conf[ModelKeys.output_tensors] = model_conf[
            ModelKeys.check_tensors]
        model_conf[ModelKeys.output_shapes] = model_conf[
            ModelKeys.check_shapes]

    model_file_path = ""
    if not flags.gencode_model:
        model_file_path = install_dir + "/" + model_file
    model_data_file_path = ""
    if not flags.gencode_param:
        model_data_file_path = install_dir + "/" + model_data_file

    input_tensors_info = config_parser.find_input_tensors_info(
        model_conf[ModelKeys.subgraphs], model_conf[ModelKeys.input_tensors])
    output_tensors_info = config_parser.find_output_tensors_info(
        model_conf[ModelKeys.subgraphs], model_conf[ModelKeys.output_tensors])

    model_args = {
        "model_name":
        model_name,
        "model_file":
        model_file_path,
        "model_data_file":
        model_data_file_path,
        "input_node":
        ",".join(model_conf[ModelKeys.input_tensors]),
        "input_shape":
        join_2d_array(input_tensors_info[ModelKeys.input_shapes]),
        "output_node":
        ",".join(model_conf[ModelKeys.output_tensors]),
        "output_shape":
        join_2d_array(output_tensors_info[ModelKeys.output_shapes]),
        "input_data_format":
        ",".join([
            df.name for df in input_tensors_info[ModelKeys.input_data_formats]
        ]),
        "output_data_format":
        ",".join([
            df.name
            for df in output_tensors_info[ModelKeys.output_data_formats]
        ])
    }

    opts = [
        "--%s='%s'" % (arg_key, arg_val)
        for arg_key, arg_val in model_args.items()
    ] + args
    should_generate_data = (flags.validate or flags.tune
                            or "--benchmark" in opts)

    if should_generate_data:
        tmpdirname = tempfile.mkdtemp()
        input_file_prefix = tmpdirname + "/" + model_name

        if ModelKeys.validation_inputs_data in model_conf:
            input_tensor = model_conf[ModelKeys.input_tensors]
            input_data = model_conf[ModelKeys.validation_inputs_data]
            mace_check(
                len(input_tensor) == len(input_data),
                "len(input_tensor) != len(validate_data")

            for i in range(len(input_tensor)):
                util.download_or_get_file(
                    model_conf[ModelKeys.validation_inputs_data][i], "",
                    util.formatted_file_name(input_file_prefix,
                                             input_tensor[i]))
        else:
            generate_input_data(input_file_prefix,
                                model_conf[ModelKeys.input_tensors],
                                input_tensors_info[ModelKeys.input_shapes],
                                input_tensors_info[ModelKeys.input_ranges],
                                input_tensors_info[ModelKeys.input_data_types])

        dev.install(Target(tmpdirname), install_dir + "/validate_in")
        target_input_file = "%s/validate_in/%s" % (install_dir, model_name)
        target_output_dir = "%s/validate_out" % install_dir
        dev.mkdir(target_output_dir)
        target_output_file = target_output_dir + "/" + model_name
        opts += [
            "--input_file=%s" % target_input_file,
            "--output_file=%s" % target_output_file
        ]

    # run
    envs = flags.envs.split(" ") + ["MACE_INTERNAL_STORAGE_PATH=%s" % sysdir]
    if flags.tune:
        envs += [
            "MACE_TUNING=1",
            "MACE_RUN_PARAMETER_PATH=%s/interior/tune_params" % install_dir
        ]
        opts += ["--round=0"]

    mace_check(flags.vlog_level >= 0,
               "vlog_level should be greater than zeror")
    envs += ["MACE_CPP_MIN_VLOG_LEVEL=%s" % flags.vlog_level]

    build_dir = flags.build_dir + "/" + target_abi
    libs = []
    if DeviceType.HEXAGON in runtime_list:
        libs += ["third_party/nnlib/%s/libhexagon_controller.so" % target_abi]
    elif runtime == DeviceType.HTA:
        libs += ["third_party/hta/%s/libhta_hexagon_runtime.so" % target_abi]
    elif DeviceType.APU in runtime_list:
        apu_libs = get_apu_so_paths(dev)
        libs += apu_libs

    cpp_shared_lib_path = os.path.join(build_dir,
                                       "install/lib/libc++_shared.so")
    if os.path.exists(cpp_shared_lib_path):
        libs.append(cpp_shared_lib_path)

    target = Target(build_dir + "/install/bin/mace_run",
                    libs,
                    opts=opts,
                    envs=envs)
    run_target.run_target(target_abi, install_dir, target, dev)

    if DeviceType.GPU in runtime_list:
        opencl_dir = workdir + "/opencl"
        util.mkdir_p(opencl_dir)
        dev.pull(
            Target(install_dir + "/interior/mace_cl_compiled_program.bin"),
            "%s/%s_compiled_opencl_kernel.%s.%s.bin" %
            (opencl_dir, model_name, dev.info()["ro.product.model"].replace(
                ' ', ''), dev.info()["ro.board.platform"]))
        if flags.tune:
            dev.pull(
                Target(install_dir + "/interior/tune_params"),
                "%s/%s_tuned_opencl_parameter.%s.%s.bin" %
                (opencl_dir, model_name,
                 dev.info()["ro.product.model"].replace(
                     ' ', ''), dev.info()["ro.board.platform"]))

    if flags.validate:
        validate_model_file = util.download_or_get_model(
            model_conf[ModelKeys.model_file_path],
            model_conf[ModelKeys.model_sha256_checksum], tmpdirname)

        validate_weight_file = ""
        if ModelKeys.weight_file_path in model_conf:
            validate_weight_file = util.download_or_get_model(
                model_conf[ModelKeys.weight_file_path],
                model_conf[ModelKeys.weight_sha256_checksum], tmpdirname)

        dev.pull(Target(target_output_dir), tmpdirname + "/validate_out")
        output_file_prefix = tmpdirname + "/validate_out/" + model_name
        validate.validate(model_conf[ModelKeys.platform], validate_model_file,
                          validate_weight_file, input_file_prefix,
                          output_file_prefix,
                          input_tensors_info[ModelKeys.input_shapes],
                          output_tensors_info[ModelKeys.output_shapes],
                          input_tensors_info[ModelKeys.input_data_formats],
                          output_tensors_info[ModelKeys.output_data_formats],
                          input_tensors_info[ModelKeys.input_tensors],
                          output_tensors_info[ModelKeys.output_tensors],
                          flags.validate_threshold,
                          input_tensors_info[ModelKeys.input_data_types],
                          flags.backend, "", "")
    if should_generate_data:
        shutil.rmtree(tmpdirname)
Esempio n. 11
0
def normalize_model_config(conf):
    conf = copy.deepcopy(conf)
    if ModelKeys.subgraphs in conf:
        subgraph = conf[ModelKeys.subgraphs][0]
        del conf[ModelKeys.subgraphs]
        conf.update(subgraph)

    conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])
    conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])

    if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1:
        conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT
    else:
        if ModelKeys.data_type in conf:
            conf[ModelKeys.data_type] = parse_internal_data_type(
                conf[ModelKeys.data_type])
        else:
            conf[ModelKeys.data_type] = mace_pb2.DT_HALF

    # parse input
    conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])
    conf[ModelKeys.input_tensors] = [
        str(i) for i in conf[ModelKeys.input_tensors]
    ]
    input_count = len(conf[ModelKeys.input_tensors])
    conf[ModelKeys.input_shapes] = [
        parse_int_array(shape)
        for shape in to_list(conf[ModelKeys.input_shapes])
    ]
    mace_check(
        len(conf[ModelKeys.input_shapes]) == input_count,
        "input node count and shape count do not match")

    input_data_types = [
        parse_data_type(dt)
        for dt in to_list(conf.get(ModelKeys.input_data_types, ["float32"]))
    ]

    if len(input_data_types) == 1 and input_count > 1:
        input_data_types = [input_data_types[0]] * input_count
    mace_check(
        len(input_data_types) == input_count,
        "the number of input_data_types should be "
        "the same as input tensors")
    conf[ModelKeys.input_data_types] = input_data_types

    input_data_formats = [
        parse_data_format(df)
        for df in to_list(conf.get(ModelKeys.input_data_formats, ["NHWC"]))
    ]
    if len(input_data_formats) == 1 and input_count > 1:
        input_data_formats = [input_data_formats[0]] * input_count
    mace_check(
        len(input_data_formats) == input_count,
        "the number of input_data_formats should be "
        "the same as input tensors")
    conf[ModelKeys.input_data_formats] = input_data_formats

    input_ranges = [
        parse_float_array(r)
        for r in to_list(conf.get(ModelKeys.input_ranges, ["-1.0,1.0"]))
    ]
    if len(input_ranges) == 1 and input_count > 1:
        input_ranges = [input_ranges[0]] * input_count
    mace_check(
        len(input_ranges) == input_count,
        "the number of input_ranges should be "
        "the same as input tensors")
    conf[ModelKeys.input_ranges] = input_ranges

    # parse output
    conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])
    conf[ModelKeys.output_tensors] = [
        str(i) for i in conf[ModelKeys.output_tensors]
    ]
    output_count = len(conf[ModelKeys.output_tensors])
    conf[ModelKeys.output_shapes] = [
        parse_int_array(shape)
        for shape in to_list(conf[ModelKeys.output_shapes])
    ]
    mace_check(
        len(conf[ModelKeys.output_tensors]) == output_count,
        "output node count and shape count do not match")

    output_data_types = [
        parse_data_type(dt)
        for dt in to_list(conf.get(ModelKeys.output_data_types, ["float32"]))
    ]
    if len(output_data_types) == 1 and output_count > 1:
        output_data_types = [output_data_types[0]] * output_count
    mace_check(
        len(output_data_types) == output_count,
        "the number of output_data_types should be "
        "the same as output tensors")
    conf[ModelKeys.output_data_types] = output_data_types

    output_data_formats = [
        parse_data_format(df)
        for df in to_list(conf.get(ModelKeys.output_data_formats, ["NHWC"]))
    ]
    if len(output_data_formats) == 1 and output_count > 1:
        output_data_formats = [output_data_formats[0]] * output_count
    mace_check(
        len(output_data_formats) == output_count,
        "the number of output_data_formats should be "
        "the same as output tensors")
    conf[ModelKeys.output_data_formats] = output_data_formats

    if ModelKeys.check_tensors in conf:
        conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])
        conf[ModelKeys.check_shapes] = [
            parse_int_array(shape)
            for shape in to_list(conf[ModelKeys.check_shapes])
        ]
        mace_check(
            len(conf[ModelKeys.check_tensors]) == len(
                conf[ModelKeys.check_shapes]),
            "check tensors count and shape count do not match.")

    MaceLogger.summary(conf)

    return conf
Esempio n. 12
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
from utils import device
from utils.util import MaceLogger

cwd = os.path.dirname(__file__)

# TODO: Remove bazel deps
try:
    device.execute("bazel version")
except:  # noqa
    MaceLogger.warning("No bazel, use cmake.")
    device.execute("bash tools/cmake/cmake-generate-proto-py-host.sh")
else:
    try:
        device.execute("bazel build //mace/proto:mace_py")
        device.execute("cp -f bazel-genfiles/mace/proto/mace_pb2.py %s" % cwd)

        device.execute("bazel build //mace/proto:micro_mem_py")
        device.execute("cp -f bazel-genfiles/mace/proto/micro_mem_pb2.py %s" %
                       cwd)

        device.execute("bazel build //third_party/caffe:caffe_py")
        device.execute(
            "cp -f bazel-genfiles/third_party/caffe/caffe_pb2.py %s" % cwd)
    except:  # noqa
        MaceLogger.error("Failed in proto files' building")
Esempio n. 13
0
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
from utils import device
from utils.util import MaceLogger

cwd = os.path.dirname(__file__)

# TODO: Remove bazel deps
try:
    device.execute("bazel build //mace/proto:mace_py")
    device.execute("cp -f bazel-genfiles/mace/proto/mace_pb2.py %s" % cwd)

    device.execute("bazel build //third_party/caffe:caffe_py")
    device.execute("cp -f bazel-genfiles/third_party/caffe/caffe_pb2.py %s" %
                   cwd)
except:  # noqa
    MaceLogger.warning("No bazel, use cmake.")
Esempio n. 14
0
                        default="arm64-v8a",
                        help="Target ABI: only support arm64-v8a")
    parser.add_argument("--target_soc",
                        type=str,
                        default="all",
                        help="serialno for adb connection")
    parser.add_argument("--apu_path",
                        type=str,
                        default="",
                        help="path for storing apu so files on device")

    return parser.parse_known_args()


if __name__ == "__main__":
    flags, args = parse_args()
    run_devices = device.choose_devices(flags.target_abi, flags.target_soc)
    device_num = len(run_devices)
    if device_num == 0:  # for CI
        MaceLogger.warning("No Android devices are plugged in, "
                           "you need to copy `apu` so files by yourself.")
    elif device_num > 1:  # for CI
        MaceLogger.warning("More than one Android devices are plugged in, "
                           "you need to copy `apu` so files by yourself.")
    else:
        device_id = run_devices[0]
        android_device = device.create_device(flags.target_abi, device_id)
        apu_so_paths = get_apu_so_paths(android_device)
        for apu_so_path in apu_so_paths:
            device.execute("cp -f %s %s" % (apu_so_path, flags.apu_path), True)
Esempio n. 15
0
        f.write(source)


if __name__ == '__main__':
    flags = parse_args()
    codegen_dir = "mace/codegen/models"
    device.execute("rm -rf %s/*" % codegen_dir)

    models = []
    if flags.config:
        conf = config_parser.parse(flags.config)

        for name, model_conf in conf["models"].items():
            model_conf = config_parser.normalize_model_config(model_conf)
            if not flags.model_name or name == flags.model_name:
                MaceLogger.info("Encrypt model %s" % name)
                encrypt(name, "build/%s/model/%s.pb" % (name, name),
                        "build/%s/model/%s.data" % (name, name),
                        model_conf[ModelKeys.runtime],
                        codegen_dir + "/" + name, not flags.no_obfuscate,
                        flags.gencode_model, flags.gencode_param)
                models.append(name)
                os.rename("%s/%s/%s.pb" % (codegen_dir, name, name),
                          "build/%s/model/%s.pb" % (name, name))
                os.rename("%s/%s/%s.data" % (codegen_dir, name, name),
                          "build/%s/model/%s.data" % (name, name))
    else:
        device_type = config_parser.parse_device_type(flags.device)
        encrypt(flags.model_name, flags.model_file, flags.params_file,
                device_type, codegen_dir, not flags.no_obfuscate,
                flags.gencode_model, flags.gencode_param)