def test_load_linear_regressor(m_): shape_dict = {"m": m_} m = pm.parameter("m") mu = pm.parameter(name="mu", default=1.0) x = pm.input("x", shape=(m)) y = pm.input("y") w = pm.state("w", shape=(m)) graph = pm.linear_regressor_train(x, w, y, mu, m) test_graph, input_info, out_info, keys = linear(m=m_, coarse=True) assert len(test_graph.nodes.keys()) == len(graph.nodes.keys()) assert op_counts(test_graph) == op_counts(graph) shape_val_pass = pm.NormalizeGraph(shape_dict) new_graph = shape_val_pass(graph) test_res = new_graph(keys, input_info) np.testing.assert_allclose(test_res, out_info["w"]) test_graph_lowered, input_info, new_out_info, keys = linear(m=m_) flatten_pass = pm.Lower({}) test_flatten_pass = pm.Lower({}) flattened_g = flatten_pass(new_graph) ref_lowered = test_flatten_pass(test_graph_lowered, {}) assert len(ref_lowered.nodes.keys()) == len(flattened_g.nodes.keys()) assert op_counts(ref_lowered) == op_counts(flattened_g) all_vals = flattened_g(keys, input_info) np.testing.assert_allclose(new_out_info["w"], all_vals)
def gen_from_shape(graph_type, input_shape, params=None): if graph_type == "linear": x = pm.input(name="x", shape=input_shape) w = pm.state(name="w", shape=input_shape) y = pm.input(name="y") mu = pm.parameter(name="mu", default=1.0) m = pm.parameter(name="m", default=input_shape) return pm.linear_regressor_train(x, w, y, mu, m, name="linear_regressor") elif graph_type == "logistic": x = pm.input(name="x", shape=input_shape) w = pm.state(name="w", shape=input_shape) y = pm.input(name="y") mu = pm.parameter(name="mu", default=1.0) m = pm.parameter(name="m", default=input_shape) return pm.logistic_regressor_train(x, w, y, mu, m, name="logistic_regressor") elif graph_type == "svm": x = pm.input(name="x", shape=input_shape) w = pm.state(name="w", shape=input_shape) y = pm.input(name="y") mu = pm.parameter(name="mu", default=1.0) m = pm.parameter(name="m", default=input_shape) return pm.svm_classifier_train(x, w, y, mu, m, name="svm_classifier")
def test_load_nested_linear_regressor(m_): shape_dict = {"m": m_} with pm.Node(name="nested_linear") as graph: m = pm.parameter(name="m") mu = pm.parameter(name="mu", default=1.0) x = pm.input("x", shape=(m)) y = pm.input("y") w = pm.state("w", shape=(m)) pm.linear_regressor_train(x, w, y, mu, m, name="linear_regressor") j = pm.index(0, m-1, name="j") tw = (w[j] - 4).set_name("tw") test_graph, input_info, out_info, keys = linear(m=m_, coarse=True) shape_val_pass = pm.NormalizeGraph(shape_dict) new_graph = shape_val_pass(graph) test_res = new_graph("tw", input_info) np.testing.assert_allclose(test_res, (out_info["w"] - 4)) ref_graph, input_info, new_out_info, keys = linear(m=m_) flatten_pass = pm.Lower({}) keys = [f"tw/tw({i},)" for i in range(m_)] flattened_g = flatten_pass(new_graph) all_vals = flattened_g(keys, input_info)