def test_load_linear_regressor(m_): shape_dict = {"m": m_} m = pm.parameter("m") mu = pm.parameter(name="mu", default=1.0) x = pm.input("x", shape=(m)) y = pm.input("y") w = pm.state("w", shape=(m)) graph = pm.linear_regressor_train(x, w, y, mu, m) test_graph, input_info, out_info, keys = linear(m=m_, coarse=True) assert len(test_graph.nodes.keys()) == len(graph.nodes.keys()) assert op_counts(test_graph) == op_counts(graph) shape_val_pass = pm.NormalizeGraph(shape_dict) new_graph = shape_val_pass(graph) test_res = new_graph(keys, input_info) np.testing.assert_allclose(test_res, out_info["w"]) test_graph_lowered, input_info, new_out_info, keys = linear(m=m_) flatten_pass = pm.Lower({}) test_flatten_pass = pm.Lower({}) flattened_g = flatten_pass(new_graph) ref_lowered = test_flatten_pass(test_graph_lowered, {}) assert len(ref_lowered.nodes.keys()) == len(flattened_g.nodes.keys()) assert op_counts(ref_lowered) == op_counts(flattened_g) all_vals = flattened_g(keys, input_info) np.testing.assert_allclose(new_out_info["w"], all_vals)
def test_multi_dim_norm(): with pm.Node(name="elem") as graph: m = pm.parameter(name="m") n = pm.parameter(name="n") x = pm.input("x", shape=(m, n)) w = pm.state("w", shape=(m, n)) i = pm.index(0, m - 1, name="i") j = pm.index(0, n - 1, name="j") w[i, j] = (w[i, j] * x[i, j]) m_ = 3 n_ = 4 x_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_)) w_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_)) coarse_eval = graph("w", x=x_, w=w_) np_result = x_ * w_ np.testing.assert_allclose(coarse_eval, np_result) shape_pass = NormalizeGraph({"m": m_, "n": n_}) graph_shapes = shape_pass(graph) shape_res = graph_shapes("w", x=x_, w=w_) np.testing.assert_allclose(shape_res, np_result) lower_pass = pm.Lower({}) lowered_graph = lower_pass(graph_shapes, {}) input_info = {} for i in range(m_): for j in range(n_): input_info[f"w/w({i}, {j})"] = w_[i, j] input_info[f"x/x({i}, {j})"] = x_[i, j] fine_grained_eval = lowered_graph("w/w(2, 3)", input_info) assert fine_grained_eval == np_result[2, 3]
def test_conv_embedded_values(x_shape, w_shape, params): shape_dict = { "n": x_shape[0], "ic": x_shape[1], "ih": x_shape[2], "iw": x_shape[3], "nf": w_shape[0], "kh": w_shape[2], "kw": w_shape[3], "stride": params["stride"], "pad": params["pad"] } graph, input_info0, out_info, keys = conv(x_shape, w_shape, params, coarse=True, debug_matrix=True) ngraph, input_info1, out_info, keys = conv(x_shape, w_shape, params, coarse=False, debug_matrix=True) lower_pass = pm.Lower({}) lowered = lower_pass(ngraph) pb_path = f"{OUTPATH}/{graph.name}.srdfg" pm.pb_store(lowered, OUTPATH) node = pm.pb_load(pb_path) assert len(node.nodes) == len(lowered.nodes) assert list(node.nodes.keys()) == list(lowered.nodes.keys())
def from_onnx(filepath, infer_shapes=True, use_filename=True, lower=False, verbose=False): onnx_proto, graph_name = load_onnx_proto(filepath) onnx_proto = update_node_names(onnx_proto) onnx_proto = update_edge_names(onnx_proto) attr = get_model_attributes(onnx_proto) if infer_shapes: onnx_graph = shape_inference.infer_shapes(onnx_proto).graph else: onnx_graph = onnx_proto.graph for n in onnx_graph.node: if n.op_type not in NODE_NAMES and n.name not in NODE_NAMES: raise RuntimeError( f"Support for {n.op_type} or {n.name} is not currently included in PolyMath" ) graph = generate_srdfg(onnx_graph, verbose=verbose) if use_filename: graph_name = filepath.split("/")[-1].split(".")[0] graph.set_name(graph_name) if lower: lower_pass = pm.Lower(ONNX_OP_NAMES) graph = lower_pass(graph) return graph
def create_training_graph(graph, loss_func="cross_entropy", optimizer="sgd", **optimizer_kwargs): autodiff_pass = pm.AutoDiffGraph(loss_func, optimizer, optimizer_kwargs) train_graph = autodiff_pass(graph) lower_pass = pm.Lower(pm.DNN_TRAINING_OPS) lowered_train_graph = lower_pass(train_graph) return lowered_train_graph
def generate_tvm(graph, input_dict, filepath, context_dict=None): assert len(input_dict) > 0 shape_dict = {k: v.shape if isinstance(v, np.ndarray) else v for k,v in input_dict.items()} shape_dict['populate'] = False shape_pass = pm.NormalizeGraph(shape_dict) lower_pass = pm.Lower(TVM_OPS) tvm_pass = TVMPass() shaped = shape_pass(graph) lowered = lower_pass(shaped) result = tvm_pass(lowered) return tvm_pass.tvm_ir['tvm_code']
def test_linear(m_): shape_dict = {"m": m_} graph, input_info, out_info, keys = linear(**shape_dict, coarse=True) shape_val_pass = pm.NormalizeGraph(shape_dict) new_graph = shape_val_pass(graph) test_res = new_graph(keys, input_info) np.testing.assert_allclose(test_res, out_info["w"]) graph, input_info, new_out_info, keys = linear(**shape_dict) flatten_pass = pm.Lower({}) flattened_g = flatten_pass(new_graph) all_vals = flattened_g(keys, input_info) np.testing.assert_allclose(new_out_info["w"], all_vals)
def test_translate_multi_dense(x1_shape, w1_shape, w2_shape): graph, input_info, out_info, keys = two_layer_dense(x1_shape, w1_shape, w2_shape, coarse=True, debug_matrix=True) tinput_info = copy.deepcopy(input_info) res0 = graph(keys, tinput_info) np.testing.assert_allclose(res0, out_info["y"].astype(res0.dtype)) graph, input_info, out_info, keys = two_layer_dense(x1_shape, w1_shape, w2_shape, coarse=False, debug_matrix=True) lower_pass = pm.Lower({}) lowered_graph = lower_pass(graph) res = lowered_graph(keys, input_info) np.testing.assert_allclose(np.asarray(res).reshape(out_info["y"].shape), out_info["y"].astype(res[0].dtype))
def test_conv_embedded_values(x_shape, w_shape, params): shape_dict = {"n": x_shape[0], "ic": x_shape[1], "ih": x_shape[2], "iw": x_shape[3], "nf": w_shape[0], "kh": w_shape[2], "kw": w_shape[3], "stride": params["stride"], "pad": params["pad"]} graph, input_info0, out_info, keys = conv(x_shape, w_shape, params, coarse=True, debug_matrix=True) ngraph, input_info1, out_info, keys = conv(x_shape, w_shape, params, coarse=False, debug_matrix=True) lower_pass = pm.Lower({}) lowered = lower_pass(ngraph) res0 = np.asarray(lowered(keys, input_info1)).reshape(out_info["out"].shape) np.testing.assert_allclose(res0, out_info["out"]) tabla_path = f"{OUTPATH}/{graph.name}_tabla.json" tabla_ir, tabla_graph = pm.generate_tabla(graph, shape_dict, tabla_path, context_dict=input_info1, add_kwargs=True, debug=True)
def generate_dnnweaver(graph, input_dict, filepath, debug=False, add_kwargs=False, context_dict=None): shape_dict = { k: v.shape if isinstance(v, np.ndarray) else v for k, v in input_dict.items() } shape_dict['populate'] = False shape_pass = pm.NormalizeGraph(shape_dict, debug=debug) lower_pass = pm.Lower(DNNWEAVER_OPS, debug=debug) dnnw_pass = DNNWeaverPass(debug=debug) shaped = shape_pass(graph) lowered = lower_pass(shaped) result = dnnw_pass(lowered) return dnnw_pass.dnnw_ir['dnnweaver_code'], result
def test_reco(): m_ = 3 n_ = 3 k_ = 2 graph, input_info, out_info, keys = reco(m=m_, n=n_, k=k_, coarse=True) shape_val_pass = pm.NormalizeGraph({"m": m_, "n": n_, "k": k_}) new_graph = shape_val_pass(graph) test_res = new_graph(keys, input_info) np.testing.assert_allclose(test_res[0], out_info["w1"]) np.testing.assert_allclose(test_res[1], out_info["w2"]) graph, input_info, new_out_info, keys = reco(m=m_, n=n_, k=k_) flatten_pass = pm.Lower({}) flattened_g = flatten_pass(new_graph) all_vals = flattened_g(keys, input_info) out1 = np.asarray(list(all_vals[0:6])).reshape(new_out_info["w2"].shape) out2 = np.asarray(list(all_vals[6:])).reshape(new_out_info["w2"].shape) np.testing.assert_allclose(new_out_info["w1"], out1) np.testing.assert_allclose(new_out_info["w2"], out2)
def test_load_nested_linear_regressor(m_): shape_dict = {"m": m_} with pm.Node(name="nested_linear") as graph: m = pm.parameter(name="m") mu = pm.parameter(name="mu", default=1.0) x = pm.input("x", shape=(m)) y = pm.input("y") w = pm.state("w", shape=(m)) pm.linear_regressor_train(x, w, y, mu, m, name="linear_regressor") j = pm.index(0, m-1, name="j") tw = (w[j] - 4).set_name("tw") test_graph, input_info, out_info, keys = linear(m=m_, coarse=True) shape_val_pass = pm.NormalizeGraph(shape_dict) new_graph = shape_val_pass(graph) test_res = new_graph("tw", input_info) np.testing.assert_allclose(test_res, (out_info["w"] - 4)) ref_graph, input_info, new_out_info, keys = linear(m=m_) flatten_pass = pm.Lower({}) keys = [f"tw/tw({i},)" for i in range(m_)] flattened_g = flatten_pass(new_graph) all_vals = flattened_g(keys, input_info)
def generate_tabla(graph, input_dict, filepath, context_dict=None, add_kwargs=False, debug=True): assert len(input_dict) > 0 shape_pass = pm.NormalizeGraph(input_dict, debug=debug) context_dict = context_dict or {} lower_pass = pm.Lower({}, debug=debug) print(f"Starting graph normalization...") shaped = shape_pass(graph) print(f"Finished graph normalization. Executing lower pass.") lowered = lower_pass(shaped) print(f"Finished graph lowering, generating TABLA dfg.") for k in list(context_dict.keys()): if k not in lowered.nodes: context_dict.pop(k) tabla_pass = TablaPass(context_dict, add_kwargs=add_kwargs, debug=debug) res = tabla_pass(lowered) print(f"Finished generating TABLA dfg, now storing to JSON file at {filepath}.") tabla_nodes = [node for _, node in tabla_pass.dfg.items()] with open(filepath, "w") as f: json.dump(tabla_nodes, f, indent=4) return tabla_nodes, res