コード例 #1
0
def get_node_metadata(model):
    value_info = {}
    for val in model.graph.value_info:
        value_info[val.name] = (
            val.type.tensor_type.elem_type,
            common.proto_val_to_dimension_tuple(val),
        )
    return value_info
コード例 #2
0
def main():
    if len(sys.argv) < 3:
        print("Model file or scaling factor unspecified.", file=sys.stderr)
        exit(1)

    file_name = sys.argv[1]
    scaling_factor = int(sys.argv[2])
    file_path = "models/" + file_name
    model_name = file_name[:-5]  # name without the '.onnx' extension
    model = onnx.load(file_path)
    graph_def = model.graph

    # Generating input
    input_dims = common.proto_val_to_dimension_tuple(model.graph.input[0])
    input_array = numpy.random.random(input_dims)
    # input_array = numpy.ones(input_dims, dtype=float)
    print("Generated random input of dimension " + str(input_dims))
    np.save("debug/" + model_name + "/" + model_name + "_input", input_array)

    (chunk, cnt) = common.numpy_float_array_to_fixed_point_val_str(
        input_array, scaling_factor)
    f = open("debug/" + model_name + "/" + model_name + "_input.inp", "w")
    f.write(chunk)
    f.close()

    model_name_to_val_dict = {
        init_vals.name: numpy_helper.to_array(init_vals).tolist()
        for init_vals in model.graph.initializer
    }

    preprocess_batch_normalization(graph_def, model_name_to_val_dict)

    chunk_n = ""
    cnt_n = 0
    for init_vals in model.graph.initializer:
        (chunk_1, cnt_1) = common.numpy_float_array_to_fixed_point_val_str(
            np.asarray(model_name_to_val_dict[init_vals.name],
                       dtype=np.float32),
            scaling_factor,
        )
        chunk_n += chunk_1
        cnt_n += cnt_1

    f = open("debug/" + model_name + "/" + model_name + "_weights.inp", "w")
    f.write(chunk_n)
    f.close()

    f = open(
        "debug/" + model_name + "/" + model_name +
        "_combined_input_weights.inp", "w")
    f.write(chunk + chunk_n)
    f.close()

    print("Total " + str(cnt + cnt_n) + " integers were written in " +
          model_name + "_combined_input_weights.inp")
コード例 #3
0
def inferShapes(model):
    if DEBUG:
        print(model.graph.value_info)

    for input in model.graph.input:
        model.graph.value_info.append(
            make_tensor_value_info(
                input.name,
                common.get_data_type(input),
                common.proto_val_to_dimension_tuple(input),
            ))

    for output in model.graph.output:
        model.graph.value_info.append(
            make_tensor_value_info(
                output.name,
                common.get_data_type(output),
                common.proto_val_to_dimension_tuple(output),
            ))

    if DEBUG:
        print(model.graph.value_info)

    for init_vals in model.graph.initializer:
        model.graph.value_info.append(
            make_tensor_value_info(init_vals.name, init_vals.data_type,
                                   tuple(init_vals.dims)))

    if DEBUG:
        print("Shape inference *****************")
        print(model.graph.value_info)

    inferred_model = onnx.shape_inference.infer_shapes(model)

    if DEBUG:
        print("Printing shape ******************")
        print(inferred_model.graph.value_info)
        print("Done ******************")

    return inferred_model
コード例 #4
0
 def __init__(self, node):
     self.name = node.name
     if isinstance(node, ValueInfoProto):  # input
         self.shape = list(common.proto_val_to_dimension_tuple(node))
         self.data_type = node.type.tensor_type.elem_type
         # When weights are stripped from the model by the server,
         # the doc_string field is set to this exact MPC_MODEL_WEIGHTS
         # magic keyword
         if node.doc_string == "MPC_MODEL_WEIGHTS":
             self.party = AST.Party.SERVER
         else:
             self.party = AST.Party.CLIENT
     elif isinstance(node, TensorProto):  # initializers
         self.shape = list(node.dims)
         self.data_type = node.data_type
         self.party = AST.Party.SERVER
     else:
         assert False, "Unexpected input type"
コード例 #5
0
ファイル: create_input.py プロジェクト: shas19/EzPC
def main():
    if (len(sys.argv) < 3):
        print("Model file or scaling factor unspecified.", file=sys.stderr)
        exit(1)

    file_name = sys.argv[1]
    scaling_factor = int(sys.argv[2])
    file_path = 'models/' + file_name
    model_name = file_name[:-5]  # name without the '.onnx' extension
    model = onnx.load(file_path)
    graph_def = model.graph

    # Generating input
    input_dims = common.proto_val_to_dimension_tuple(model.graph.input[0])
    input_array = numpy.random.random(input_dims)
    # input_array = numpy.ones(input_dims, dtype=float)
    print('Generated random input of dimension ' + str(input_dims))
    np.save('debug/' + model_name + '/' + model_name + '_input', input_array)

    (chunk, cnt) = common.numpy_float_array_to_fixed_point_val_str(
        input_array, scaling_factor)

    model_name_to_val_dict = {
        init_vals.name: numpy_helper.to_array(init_vals).tolist()
        for init_vals in model.graph.initializer
    }

    preprocess_batch_normalization(graph_def, model_name_to_val_dict)

    for init_vals in model.graph.initializer:
        (chunk_1, cnt_1) = common.numpy_float_array_to_fixed_point_val_str(
            np.asarray(model_name_to_val_dict[init_vals.name],
                       dtype=np.float32), scaling_factor)
        chunk += chunk_1
        cnt += cnt_1

    f = open('debug/' + model_name + '/' + model_name + '_input.h', 'w')
    f.write(chunk)
    f.close()

    print('Total ' + str(cnt) + ' integers were written in ' + model_name +
          '_input.h')
コード例 #6
0
ファイル: onnx_run_tf.py プロジェクト: shas19/EzPC
def main():
    # First read the ONNX file
    if (len(sys.argv) < 2):
        print("TF python file unspecified.", file=sys.stderr)
        exit(1)

    file_name = sys.argv[1]
    file_path = 'models/' + file_name
    model_name = file_name[:-5]  # name without the '.onnx' extension
    model = onnx.load(file_path)
    model = preprocess_for_tf(model)

    x = np.load('debug/' + model_name + '/' + model_name + '_input.npy')
    x = x.astype(np.float32)

    input_name = model.graph.input[0].name
    output_name = model.graph.output[0].name

    if (len(sys.argv) > 2):
        intermediate_layer_value_info = helper.ValueInfoProto()
        intermediate_layer_value_info_name = 'tf_' + sys.argv[2]
        intermediate_layer_value_info = helper.make_tensor_value_info(
            intermediate_layer_value_info_name, TensorProto.FLOAT, [])
        model.graph.output.extend([intermediate_layer_value_info])
        output = prepare(model).run(x)
        pred = getattr(output, intermediate_layer_value_info_name)
        np.save('debug/' + model_name + '/' + model_name + '_debug', pred)
        with open('debug/onnx_debug.txt', 'w') as f:
            f.write(common.numpy_float_array_to_float_val_str(pred))
        print("Saving the onnx runtime intermediate output for " +
              intermediate_layer_value_info.name)
        exit()

    output = prepare(model).run(x)
    pred = getattr(output, output_name)
    np.save('debug/' + model_name + '/' + model_name + '_output', pred)
    with open('debug/onnx_output.txt', 'w') as f:
        f.write(common.numpy_float_array_to_float_val_str(pred))
    output_dims = common.proto_val_to_dimension_tuple(model.graph.output[0])
    print("Saving the onnx runtime output of dimension " + str(output_dims))
コード例 #7
0
ファイル: onnx_run.py プロジェクト: rasswanth-s/EzPC
file_name = sys.argv[1]
file_path = 'models/' + file_name
model_name = file_name[:-5] # name without the '.onnx' extension
model = onnx.load(file_path)
sess = onnxruntime.InferenceSession(file_path) 

x = np.load('debug/' + model_name + '/' + model_name + '_input.npy')
x = x.astype(np.float32)

input_name = model.graph.input[0].name

if (len(sys.argv) > 2):
	intermediate_layer_value_info = helper.ValueInfoProto()
	intermediate_layer_value_info.name = sys.argv[2]
	model.graph.output.extend([intermediate_layer_value_info])
	onnx.save(model, file_path + '_1')
	sess = onnxruntime.InferenceSession(file_path + '_1') 
	pred = sess.run([intermediate_layer_value_info.name], {input_name: x})
	np.save('debug/' + model_name + '/' + model_name + '_debug', pred)
	with open('debug/onnx_debug.txt', 'w') as f:
		f.write(common.numpy_float_array_to_float_val_str(pred))
	print("Saving the onnx runtime intermediate output for " + intermediate_layer_value_info.name)
	exit() 

pred = sess.run(None, {input_name: x})
np.save('debug/' + model_name + '/' + model_name + '_output', pred)
with open('debug/onnx_output.txt', 'w') as f:
		f.write(common.numpy_float_array_to_float_val_str(pred))
output_dims = common.proto_val_to_dimension_tuple(model.graph.output[0])
print("Saving the onnx runtime output of dimension " + str(output_dims))
コード例 #8
0
def main():
    sys.setrecursionlimit(10000)
    # First read the ONNX file
    if (len(sys.argv) < 2):
        print("TF python file unspecified.", file=sys.stderr)
        exit(1)
    file_name = sys.argv[1]
    file_path = 'models/' + file_name
    model_name = file_name[:-5]  # name without the '.onnx' extension

    # load the model and extract the graph
    model = onnx.load(file_path)
    graph_def = model.graph

    print(model.graph.value_info)
    # Before shape inference (model.graph.value_info) should have shapes of all the variables and constants
    model.graph.value_info.append(
        make_tensor_value_info(
            model.graph.input[0].name, TensorProto.FLOAT,
            common.proto_val_to_dimension_tuple(model.graph.input[0])))
    model.graph.value_info.append(
        make_tensor_value_info(
            model.graph.output[0].name, TensorProto.FLOAT,
            common.proto_val_to_dimension_tuple(model.graph.output[0])))

    print(model.graph.value_info)

    for init_vals in model.graph.initializer:
        model.graph.value_info.append(
            make_tensor_value_info(init_vals.name, TensorProto.FLOAT,
                                   tuple(init_vals.dims)))

    if (DEBUG):
        print("Shape inference *****************")
        print(model.graph.value_info)

    inferred_model = onnx.shape_inference.infer_shapes(model)

    if (DEBUG):
        print("Printing shape ******************")
        print(inferred_model.graph.value_info)
        print("Done ******************")

    # value_info: dictionary of name -> (type, dimension tuple)
    value_info = {}
    for val in inferred_model.graph.value_info:
        value_info[val.name] = (val.type.tensor_type.elem_type,
                                common.proto_val_to_dimension_tuple(val))

    # Iterate through the ONNX graph nodes and translate them to SeeDot AST nodes
    program = None
    innermost_let_ast_node = None
    node_name_to_out_var_dict = {}
    out_var_count = 0
    mtdAST = MtdAST()

    (program, innermost_let_ast_node, out_var_count) = process_input_variables(
        program, innermost_let_ast_node, node_name_to_out_var_dict,
        out_var_count, mtdAST, graph_def, value_info)

    process_onnx_nodes(innermost_let_ast_node, node_name_to_out_var_dict,
                       out_var_count, mtdAST, graph_def, value_info)

    PrintAST().visit(program)

    common.write_debug_info(node_name_to_out_var_dict)

    with open('debug/' + model_name + '/' + model_name + '.pkl', 'wb') as f:
        pickle.dump(program, f)