def validate_onnx_model(model_file, input_file, mace_out_file, input_names, input_shapes, input_data_formats, output_names, output_shapes, output_data_formats, validation_threshold, input_data_types, backend, log_file): import onnx if backend == "tensorflow": from onnx_tf.backend import prepare print("valivate on onnx tensorflow backend.") elif backend == "caffe2" or backend == "pytorch": from caffe2.python.onnx.backend import prepare print("valivate on onnx caffe2 backend.") else: util.MaceLogger.error( VALIDATION_MODULE, "onnx backend framwork '" + backend + "' is invalid.") if not os.path.isfile(model_file): util.MaceLogger.error( VALIDATION_MODULE, "Input graph file '" + model_file + "' does not exist!") model = onnx.load(model_file) input_dict = {} for i in range(len(input_names)): input_value = load_data( util.formatted_file_name(input_file, input_names[i]), input_data_types[i]) input_value = input_value.reshape(input_shapes[i]) if input_data_formats[i] == DataFormat.NHWC and \ len(input_shapes[i]) == 4: input_value = input_value.transpose((0, 3, 1, 2)) input_dict[input_names[i]] = input_value onnx_outputs = [] for i in range(len(output_names)): out_shape = output_shapes[i][:] if output_data_formats[i] == DataFormat.NHWC and \ len(out_shape) == 4: out_shape[1], out_shape[2], out_shape[3] = \ out_shape[3], out_shape[1], out_shape[2] onnx_outputs.append( onnx.helper.make_tensor_value_info(output_names[i], onnx.TensorProto.FLOAT, out_shape)) model.graph.output.extend(onnx_outputs) rep = prepare(model) output_values = rep.run(input_dict) for i in range(len(output_names)): out_name = output_names[i] value = output_values[out_name].flatten() output_file_name = util.formatted_file_name(mace_out_file, output_names[i]) mace_out_value = load_data(output_file_name) if output_data_formats[i] == DataFormat.NHWC and \ len(output_shapes[i]) == 4: mace_out_value = mace_out_value.reshape(output_shapes[i]) \ .transpose((0, 3, 1, 2)) compare_output(output_names[i], mace_out_value, value, validation_threshold, log_file)
def validate_tf_model(model_file, input_file, mace_out_file, input_names, input_shapes, input_data_formats, output_names, output_shapes, output_data_formats, validation_threshold, input_data_types, log_file): import tensorflow as tf if not os.path.isfile(model_file): util.MaceLogger.error( VALIDATION_MODULE, "Input graph file '" + model_file + "' does not exist!") tf.reset_default_graph() input_graph_def = tf.GraphDef() with open(model_file, "rb") as f: data = f.read() input_graph_def.ParseFromString(data) tf.import_graph_def(input_graph_def, name="") with tf.Session() as session: with session.graph.as_default() as graph: tf.import_graph_def(input_graph_def, name="") input_dict = {} for i in range(len(input_names)): input_value = load_data( util.formatted_file_name(input_file, input_names[i]), input_data_types[i]) input_value = input_value.reshape(input_shapes[i]) if input_data_formats[i] == DataFormat.NCHW and \ len(input_shapes[i]) == 4: input_value = input_value.transpose((0, 2, 3, 1)) elif input_data_formats[i] == DataFormat.OIHW and \ len(input_shapes[i]) == 4: # OIHW -> HWIO input_value = input_value.transpose((2, 3, 1, 0)) input_node = graph.get_tensor_by_name( normalize_tf_tensor_name(input_names[i])) input_dict[input_node] = input_value output_nodes = [] for name in output_names: output_nodes.extend([ graph.get_tensor_by_name( normalize_tf_tensor_name(name)) ]) output_values = session.run(output_nodes, feed_dict=input_dict) for i in range(len(output_names)): output_file_name = util.formatted_file_name( mace_out_file, output_names[i]) mace_out_value = load_data(output_file_name) if output_data_formats[i] == DataFormat.NCHW and \ len(output_shapes[i]) == 4: mace_out_value = mace_out_value. \ reshape(output_shapes[i]).transpose((0, 2, 3, 1)) compare_output(output_names[i], mace_out_value, output_values[i], validation_threshold, log_file)
def validate_caffe_model(model_file, input_file, mace_out_file, weight_file, input_names, input_shapes, input_data_formats, output_names, output_shapes, output_data_formats, validation_threshold, log_file): os.environ['GLOG_minloglevel'] = '1' # suprress Caffe verbose prints import caffe if not os.path.isfile(model_file): util.MaceLogger.error( VALIDATION_MODULE, "Input graph file '" + model_file + "' does not exist!") if not os.path.isfile(weight_file): util.MaceLogger.error( VALIDATION_MODULE, "Input weight file '" + weight_file + "' does not exist!") caffe.set_mode_cpu() net = caffe.Net(model_file, caffe.TEST, weights=weight_file) for i in range(len(input_names)): input_value = load_data( util.formatted_file_name(input_file, input_names[i])) input_value = input_value.reshape(input_shapes[i]) if input_data_formats[i] == DataFormat.NHWC and \ len(input_shapes[i]) == 4: input_value = input_value.transpose((0, 3, 1, 2)) input_blob_name = input_names[i] try: if input_names[i] in net.top_names: input_blob_name = net.top_names[input_names[i]][0] except ValueError: pass new_shape = input_value.shape net.blobs[input_blob_name].reshape(*new_shape) for index in range(input_value.shape[0]): net.blobs[input_blob_name].data[index] = input_value[index] net.forward() for i in range(len(output_names)): value = net.blobs[output_names[i]].data output_file_name = util.formatted_file_name(mace_out_file, output_names[i]) mace_out_value = load_data(output_file_name) if output_data_formats[i] == DataFormat.NHWC and \ len(output_shapes[i]) == 4: mace_out_value = mace_out_value.reshape(output_shapes[i]) \ .transpose((0, 3, 1, 2)) compare_output(output_names[i], mace_out_value, value, validation_threshold, log_file)
def validate_with_file(output_names, output_shapes, mace_out_file, validation_outputs_data, validation_threshold, log_file): for i in range(len(output_names)): if validation_outputs_data[i].startswith("http://") or \ validation_outputs_data[i].startswith("https://"): validation_file_name = util.formatted_file_name( mace_out_file, output_names[i] + '_validation') six.moves.urllib.request.urlretrieve(validation_outputs_data[i], validation_file_name) else: validation_file_name = validation_outputs_data[i] value = load_data(validation_file_name) out_shape = output_shapes[i] if len(out_shape) == 4: out_shape[1], out_shape[2], out_shape[3] = \ out_shape[3], out_shape[1], out_shape[2] value = value.reshape(out_shape).transpose((0, 2, 3, 1)) output_file_name = util.formatted_file_name(mace_out_file, output_names[i]) mace_out_value = load_data(output_file_name) compare_output(output_names[i], mace_out_value, value, validation_threshold, log_file)
def generate_input_data(input_file, input_node, input_shape, input_ranges, input_data_type): np.random.seed() for i in range(len(input_node)): data = np.random.random(input_shape[i]) * ( input_ranges[i][1] - input_ranges[i][0]) + input_ranges[i][0] input_file_name = util.formatted_file_name(input_file, input_node[i]) MaceLogger.info('Generate input file: %s' % input_file_name) if input_data_type[i] == mace_pb2.DT_FLOAT: np_data_type = np.float32 elif input_data_type[i] == mace_pb2.DT_INT32: np_data_type = np.int32 data.astype(np_data_type).tofile(input_file_name)
def run_model_for_device(flags, args, dev, model_name, model_conf): runtime = flags.runtime target_abi = flags.target_abi install_dir = run_target.default_install_dir(target_abi) + "/" + model_name sysdir = install_dir + "/interior" dev.mkdir(sysdir) if not runtime: runtime = model_conf[ModelKeys.runtime] if runtime == DeviceType.CPU_GPU: runtime = DeviceType.GPU else: runtime = config_parser.parse_device_type(runtime) # install models to devices workdir = flags.output + "/" + model_name model_file = model_name + ".pb" model_data_file = model_name + ".data" model_path = workdir + "/model/" + model_file model_data_path = workdir + "/model/" + model_data_file if os.path.exists(model_path) and os.path.exists(model_data_path): dev.install(Target(model_path), install_dir) dev.install(Target(model_data_path), install_dir) else: MaceLogger.warning("No models exist in %s, use --model_file and" " --model_data_file specified in args" % model_path) if ModelKeys.check_tensors in model_conf: model_conf[ModelKeys.output_tensors] = model_conf[ ModelKeys.check_tensors] model_conf[ModelKeys.output_shapes] = model_conf[ ModelKeys.check_shapes] model_file_path = "" if not flags.gencode_model: model_file_path = install_dir + "/" + model_file model_data_file_path = "" if not flags.gencode_param: model_data_file_path = install_dir + "/" + model_data_file model_args = { "model_name": model_name, "model_file": model_file_path, "model_data_file": model_data_file_path, "input_node": ",".join(model_conf[ModelKeys.input_tensors]), "input_shape": join_2d_array(model_conf[ModelKeys.input_shapes]), "output_node": ",".join(model_conf[ModelKeys.output_tensors]), "output_shape": join_2d_array(model_conf[ModelKeys.output_shapes]), "input_data_format": ",".join([df.name for df in model_conf[ModelKeys.input_data_formats]]), "output_data_format": ",".join([df.name for df in model_conf[ModelKeys.output_data_formats]]), "device": runtime.name } opts = [ "--%s=%s" % (arg_key, arg_val) for arg_key, arg_val in model_args.items() ] + args should_generate_data = (flags.validate or flags.tune or "--benchmark" in opts) if should_generate_data: tmpdirname = tempfile.mkdtemp() input_file_prefix = tmpdirname + "/" + model_name if ModelKeys.validation_inputs_data in model_conf: input_tensor = model_conf[ModelKeys.input_tensors] input_data = model_conf[ModelKeys.validation_inputs_data] mace_check( len(input_tensor) == len(input_data), "len(input_tensor) != len(validate_data") for i in range(len(input_tensor)): util.download_or_get_file( model_conf[ModelKeys.validation_inputs_data][i], "", util.formatted_file_name(input_file_prefix, input_tensor[i])) else: generate_input_data(input_file_prefix, model_conf[ModelKeys.input_tensors], model_conf[ModelKeys.input_shapes], model_conf[ModelKeys.input_ranges], model_conf[ModelKeys.input_data_types]) dev.install(Target(tmpdirname), install_dir + "/validate_in") target_input_file = "%s/validate_in/%s" % (install_dir, model_name) target_output_dir = "%s/validate_out" % install_dir dev.mkdir(target_output_dir) target_output_file = target_output_dir + "/" + model_name opts += [ "--input_file=%s" % target_input_file, "--output_file=%s" % target_output_file ] # run envs = flags.envs.split(" ") + ["MACE_INTERNAL_STORAGE_PATH=%s" % sysdir] if flags.tune: envs += [ "MACE_TUNING=1", "MACE_RUN_PARAMETER_PATH=%s/interior/tune_params" % install_dir ] opts += ["--round=0"] if flags.vlog_level > 0: envs += ["MACE_CPP_MIN_VLOG_LEVEL=%s" % flags.vlog_level] build_dir = flags.build_dir + "/" + target_abi libs = [] if model_conf[ModelKeys.runtime] == DeviceType.HEXAGON: libs += ["third_party/nnlib/%s/libhexagon_controller.so" % target_abi] elif model_conf[ModelKeys.runtime] == DeviceType.APU: libs += ["third_party/apu/libapu-frontend.so"] target = Target(build_dir + "/install/bin/mace_run", libs, opts=opts, envs=envs) run_target.run_target(target_abi, install_dir, target, device_ids=flags.target_socs) if runtime == DeviceType.GPU: opencl_dir = workdir + "/opencl" util.mkdir_p(opencl_dir) dev.pull( Target(install_dir + "/interior/mace_cl_compiled_program.bin"), "%s/%s_compiled_opencl_kernel.%s.%s.bin" % (opencl_dir, model_name, dev.info()["ro.product.model"].replace( ' ', ''), dev.info()["ro.board.platform"])) if flags.tune: dev.pull( Target(install_dir + "/interior/tune_params"), "%s/%s_tuned_opencl_parameter.%s.%s.bin" % (opencl_dir, model_name, dev.info()["ro.product.model"].replace( ' ', ''), dev.info()["ro.board.platform"])) if flags.validate: validate_model_file = util.download_or_get_model( model_conf[ModelKeys.model_file_path], model_conf[ModelKeys.model_sha256_checksum], tmpdirname) validate_weight_file = "" if ModelKeys.weight_file_path in model_conf: validate_weight_file = util.download_or_get_model( model_conf[ModelKeys.weight_file_path], model_conf[ModelKeys.weight_sha256_checksum], tmpdirname) dev.pull(Target(target_output_dir), tmpdirname + "/validate_out") output_file_prefix = tmpdirname + "/validate_out/" + model_name validate.validate( model_conf[ModelKeys.platform], validate_model_file, validate_weight_file, input_file_prefix, output_file_prefix, model_conf[ModelKeys.input_shapes], model_conf[ModelKeys.output_shapes], model_conf[ModelKeys.input_data_formats], model_conf[ModelKeys.output_data_formats], model_conf[ModelKeys.input_tensors], model_conf[ModelKeys.output_tensors], flags.validate_threshold, model_conf[ModelKeys.input_data_types], flags.backend, "", "") if should_generate_data: shutil.rmtree(tmpdirname)