def __init__(self,
                 step_size=0.0005,
                 max_num_epochs=500,
                 init_param_scale=0.01):
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size
        self.init_param_scale = init_param_scale

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x")  # to hold a vector input
        self.y = nodes.ValueNode(node_name="y")  # to hold a scalar response
        self.w = nodes.ValueNode(node_name="w")  # to hold the parameter vector
        self.b = nodes.ValueNode(
            node_name="b")  # to hold the bias parameter (scalar)

        self.affine = nodes.VectorScalarAffineNode(
            self.w, self.x, self.b,
            node_name="affine")  # to hold a affine transform
        #self.active = nodes.ExpNode(self.affine, node_name="active") # to hold a activation function node using exp
        self.prediction = nodes.ExpNode(
            self.affine, node_name="predict")  # to hold a prediction node
        self.objective = nodes.PoissonLikehoodNode(
            self.prediction, self.y,
            node_name="objective")  # to hold a negative log likehood node
        # Group nodes into types to construct computation graph function
        self.inputs = [self.x]
        self.outcomes = [self.y]
        self.parameters = [self.w, self.b]

        self.graph = graph.ComputationGraphFunction(self.inputs, self.outcomes,
                                                    self.parameters,
                                                    self.prediction,
                                                    self.objective)
Exemplo n.º 2
0
    def __init__(self, l2_reg=1, step_size=.005, max_num_epochs=5000):
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size
        # Build computation graph
        self.x = nodes.ValueNode(node_name="x")  # to hold a vector input
        self.y = nodes.ValueNode(node_name="y")  # to hold a scalar response
        self.w = nodes.ValueNode(node_name="w")  # to hold the parameter vector
        self.b = nodes.ValueNode(
            node_name="b")  # to hold the bias parameter (scalar)
        self.l2_reg = nodes.ValueNode(
            node_name="l2_reg")  # to hold the reg parameter (scalar)
        self.prediction = nodes.VectorScalarAffineNode(x=self.x,
                                                       w=self.w,
                                                       b=self.b,
                                                       node_name="prediction")
        self.squareloss = nodes.SquaredL2DistanceNode(a=self.prediction,
                                                      b=self.y,
                                                      node_name="square loss")
        self.l2penalty = nodes.L2NormPenaltyNode(l2_reg=l2_reg,
                                                 w=self.w,
                                                 node_name="l2 penalty")
        self.objective = nodes.SumNode(a=self.squareloss,
                                       b=self.l2penalty,
                                       node_name="objective")

        # Group nodes into types to construct computation graph function
        self.inputs = [self.x]
        self.outcomes = [self.y]
        self.parameters = [self.w, self.b]

        self.graph = graph.ComputationGraphFunction(self.inputs, self.outcomes,
                                                    self.parameters,
                                                    self.prediction,
                                                    self.objective)
    def __init__(self, l2_reg=1, step_size=.005,\
                 max_num_epochs = 5000):
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x") 
        self.y = nodes.ValueNode(node_name="y") 
        self.w = nodes.ValueNode(node_name="w") 
        self.b = nodes.ValueNode(node_name="b") 
        self.prediction = nodes.VectorScalarAffineNode(x=self.x, 
                                                       w=self.w, 
                                                       b=self.b,
                                                       node_name="prediction")

        self.square_loss = nodes.SquaredL2DistanceNode(a = self.prediction,
                                                       b = self.y,
                                                       node_name = "square_loss")

        self.l2_reg_loss = nodes.L2NormPenaltyNode(l2_reg = l2_reg,
                                                   w = self.w,
                                                   node_name = "l2_reg_loss")

        self.total_loss = nodes.SumNode(a=self.square_loss,
                                        b=self.l2_reg_loss,
                                        node_name="total_loss")

        self.graph = graph.ComputationGraphFunction(inputs=[self.x],
                                                    outcomes=[self.y],
                                                    parameters=[self.w,self.b],
                                                    prediction=self.prediction,
                                                    objective=self.total_loss)       
Exemplo n.º 4
0
    def __init__(self, l2_reg=1, step_size=.005,  max_num_epochs = 5000):
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x") # to hold a vector input
        self.y = nodes.ValueNode(node_name="y") # to hold a scalar response
        self.w = nodes.ValueNode(node_name="w") # to hold the parameter vector
        self.b = nodes.ValueNode(node_name="b") # to hold the bias parameter (scalar)
        self.prediction = nodes.VectorScalarAffineNode(x=self.x, 
                                                       w=self.w, 
                                                       b=self.b,
                                                       node_name="prediction")
        
        # Square Loss
        self.square_loss = nodes.SquaredL2DistanceNode(a = self.prediction,
                                                       b = self.y,
                                                       node_name = "square_loss")
        # L2 Regularization Loss
        self.l2_reg_loss = nodes.L2NormPenaltyNode(l2_reg = l2_reg,
                                                   w = self.w,
                                                   node_name = "l2_reg_loss")

        # Total Loss
        self.total_loss = nodes.SumNode(a=self.square_loss,
                                        b=self.l2_reg_loss,
                                        node_name="total_loss")

        # Computational Graph
        self.graph = graph.ComputationGraphFunction(inputs=[self.x],
                                                    outcomes=[self.y],
                                                    parameters=[self.w,self.b],
                                                    prediction=self.y,
                                                    objective=self.total_loss)       
Exemplo n.º 5
0
    def __init__(self, l2_reg=1, step_size=.005,  max_num_epochs = 5000):
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x") # to hold a vector input
        self.y = nodes.ValueNode(node_name="y") # to hold a scalar response
        self.w = nodes.ValueNode(node_name="w") # to hold the parameter vector
        self.b = nodes.ValueNode(node_name="b") # to hold the bias parameter (scalar)
        self.prediction = nodes.VectorScalarAffineNode(x=self.x, w=self.w, b=self.b,
                                                 node_name="prediction")
    def test_SumNode(self):
        max_allowed_rel_err = 1e-5
        a = nodes.ValueNode("a")
        b = nodes.ValueNode("b")
        dims = ()
        a_val = np.array(np.random.standard_normal(dims))
        b_val = np.array(np.random.standard_normal(dims))
        sum_node = nodes.SumNode(a, b, "sum node")

        init_vals = {"a":a_val, "b":b_val}
        max_rel_err = test_utils.test_node_backward(sum_node, init_vals, delta=1e-7)
        self.assertTrue(max_rel_err < max_allowed_rel_err)
Exemplo n.º 7
0
    def __init__(self,
                 num_hidden_units=10,
                 step_size=.005,
                 init_param_scale=0.01,
                 max_num_epochs=5000):
        self.num_hidden_units = num_hidden_units
        self.init_param_scale = 0.01
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x")  # to hold a vector input
        self.y = nodes.ValueNode(node_name="y")  # to hold a scalar response
Exemplo n.º 8
0
    def test_AffineNode(self):
        W = nodes.ValueNode(node_name="W")
        x = nodes.ValueNode(node_name="x")
        b = nodes.ValueNode(node_name="b")
        affine_node = nodes.AffineNode(W, x, b, "affine")
        m = 8
        d = 5
        init_vals = {"W":np.random.randn(m,d),
                     "b":np.random.randn(m),
                     "x":np.random.randn(d)}

        max_rel_err = test_utils.test_node_backward(affine_node, init_vals, delta=1e-7)
        max_allowed_rel_err = 1e-5
        self.assertTrue(max_rel_err < max_allowed_rel_err)
    def __init__(self, l2_reg=1, step_size=.005,  max_num_epochs = 5000):
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x") # to hold a vector input
        self.y = nodes.ValueNode(node_name="y") # to hold a scalar response
        self.w = nodes.ValueNode(node_name="w") # to hold the parameter vector
        self.b = nodes.ValueNode(node_name="b") # to hold the bias parameter (scalar)
        self.prediction = nodes.VectorScalarAffineNode(x=self.x, w=self.w, b=self.b,
                                                 node_name="prediction")
        
        self.objective = nodes.SquaredL2DistanceNode(a=self.prediction, b=self.y,
                        node_name="square loss") + 
                        nodes.L2NormPenaltyNode(l2_reg=self.l2_reg, w=self.w,
                                                node_name="l2 penalty")
 def test_backward_VectorScalarAffineNode(self):
     max_allowed_rel_err = 1e-5
     w = nodes.ValueNode("w")
     x = nodes.ValueNode("x")
     b = nodes.ValueNode("b")
     affine_node = nodes.VectorScalarAffineNode(x, w, b, "affine node")
     num_ftrs = 5
     init_vals = {
         "w": np.random.randn(num_ftrs),
         "x": np.random.randn(num_ftrs),
         "b": np.array(np.random.randn())
     }
     max_rel_err = test_utils.test_node_backward(affine_node,
                                                 init_vals,
                                                 delta=1e-7)
     self.assertTrue(max_rel_err < max_allowed_rel_err)
 def test_L2NormPenaltyNode(self):
     max_allowed_rel_err = 1e-5
     l2_reg = np.array(4.0)
     w = nodes.ValueNode("w")
     l2_norm_node = nodes.L2NormPenaltyNode(l2_reg, w, "l2 norm node")
     d = (5)
     init_vals = {"w":np.array(np.random.standard_normal(d))}
     max_rel_err = test_utils.test_node_backward(l2_norm_node, init_vals, delta=1e-7)
     self.assertTrue(max_rel_err < max_allowed_rel_err)
Exemplo n.º 12
0
    def test_TanhNode(self):
        a = nodes.ValueNode(node_name="a")
        tanh_node = nodes.TanhNode(a, "tanh")
        m = 8
        d = 5
        init_vals = {"a":np.random.randn(m,d)}

        max_rel_err = test_utils.test_node_backward(tanh_node, init_vals, delta=1e-7)
        max_allowed_rel_err = 1e-5
        self.assertTrue(max_rel_err < max_allowed_rel_err)
Exemplo n.º 13
0
    def __init__(self, num_hidden_units=10, step_size=.005, init_param_scale=0.01, max_num_epochs = 5000):
        self.num_hidden_units = num_hidden_units
        self.init_param_scale = 0.01
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x") # to hold a vector input
        self.y = nodes.ValueNode(node_name="y") # to hold a scalar response
        ##TODO
        self.W1 = nodes.ValueNode(node_name="W1")
        self.b1 = nodes.ValueNode(node_name="b1")
        self.W2 = nodes.ValueNode(node_name="w2")
        self.b2 = nodes.ValueNode(node_name="b2")
        self.L = nodes.AffineNode(self.W1, self.x, self.b1, node_name="L")
        self.h = nodes.TanhNode(self.L, node_name="h")
        self.prediction = nodes.VectorScalarAffineNode(self.h, self.W2, self.b2, node_name="prediction")
        self.objective = nodes.SquaredL2DistanceNode(a=self.prediction, b=self.y,
                                                 node_name="objective")
        self.inputs = [self.x]
        self.outcomes = [self.y]
        self.parameters = [self.W1, self.b1, self.W2, self.b2]
        self.graph = graph.ComputationGraphFunction(self.inputs, self.outcomes,
                                                             self.parameters, self.prediction,
                                                             self.objective)
    def __init__(self, num_hidden_units=10, step_size=.005, init_param_scale=0.01, max_num_epochs = 5000):
        self.num_hidden_units = num_hidden_units  #nodes in 1 hidden layer
        self.init_param_scale = 0.01
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x") # to hold a vector input
        self.y = nodes.ValueNode(node_name="y") # to hold a scalar response
        ## TODO
        self.W1= nodes.ValueNode(node_name="W1")
        self.W2= nodes.ValueNode(node_name="W2")
        self.b1= nodes.ValueNode(node_name="b1")
        self.b2= nodes.ValueNode(node_name="b2")

        self.comb_1=nodes.AffineNode(w=self.W1, x=self.x, b=self.b1, node_name="affine_comb_1")
        self.tanh_1=nodes.TanhNode(a=self.comb_1, node_name="hidden_layer_1_res")
        self.prediction=nodes.VectorScalarAffineNode(x=self.tanh_1, w=self.W2, b=self.b2, node_name="prediction")
        self.obj_function=nodes.SquaredL2DistanceNode(a=self.prediction, b=self.y, node_name="objective_function")
        
        # Group nodes into types to construct computation graph function
        self.inputs = [self.x]
        self.outcomes = [self.y]
        self.parameters = [self.W1, self.W2, self.b1, self.b2]

        self.graph = graph.ComputationGraphFunction(self.inputs, self.outcomes, self.parameters, self.prediction, self.obj_function)
    def __init__(self, num_hidden_units=10, step_size=.005, init_param_scale=0.01, max_num_epochs = 5000):
        self.num_hidden_units = num_hidden_units
        self.init_param_scale = init_param_scale
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x") # to hold a vector input
        self.y = nodes.ValueNode(node_name="y") # to hold a scalar response
        self.W1 = nodes.ValueNode(node_name="W1") # parameter matrix for first affine layer
        self.b1 = nodes.ValueNode(node_name="b1") # bias vector for first affine layer
        self.w2 = nodes.ValueNode(node_name="w2") # parameter vectormatrix for final affine layer
        self.b2 = nodes.ValueNode(node_name="b2") # bias scalar for final affine layer
        # nodes for nonlinear hidden layer
        self.L = nodes.AffineNode(self.W1, self.x, self.b1, node_name="Affine") 
        self.h = nodes.TanhNode(self.L, node_name="Tanh")
        # Prediction and objective
        self.prediction = nodes.VectorScalarAffineNode(x=self.h, w=self.w2, b=self.b2,
                                                       node_name="prediction")
        self.objective = nodes.SquaredL2DistanceNode(a=self.prediction, b=self.y,
                                                     node_name="square loss")

        # Group nodes into types to construct computation graph function
        self.inputs = [self.x]
        self.outcomes = [self.y]
        self.parameters = [self.W1, self.b1, self.w2, self.b2]

        self.graph = graph.ComputationGraphFunction(self.inputs, self.outcomes,
                                                    self.parameters, self.prediction,
                                                    self.objective)
    def test_backward_SquaredL2DistanceNode(self):
        max_allowed_rel_err = 1e-5
        a = nodes.ValueNode("a")
        b = nodes.ValueNode("b")
        node = nodes.SquaredL2DistanceNode(a, b, "L2 dist node")
        dims = ()
        init_vals = {
            "a": np.array(np.random.standard_normal(dims)),
            "b": np.array(np.random.standard_normal(dims))
        }
        max_rel_err = test_utils.test_node_backward(node,
                                                    init_vals,
                                                    delta=1e-7)
        self.assertTrue(max_rel_err < max_allowed_rel_err)

        # Not used for linear regression, but can also apply the
        # node to higher dimensional arrays
        dims = (10)
        init_vals = {
            "a": np.array(np.random.standard_normal(dims)),
            "b": np.array(np.random.standard_normal(dims))
        }
        max_rel_err = test_utils.test_node_backward(node,
                                                    init_vals,
                                                    delta=1e-7)
        self.assertTrue(max_rel_err < max_allowed_rel_err)

        dims = (10, 10)
        init_vals = {
            "a": np.array(np.random.standard_normal(dims)),
            "b": np.array(np.random.standard_normal(dims))
        }
        max_rel_err = test_utils.test_node_backward(node,
                                                    init_vals,
                                                    delta=1e-7)
        self.assertTrue(max_rel_err < max_allowed_rel_err)
Exemplo n.º 17
0
    def __init__(self,
                 num_hidden_units=10,
                 step_size=.005,
                 init_param_scale=0.01,
                 max_num_epochs=5000):
        self.num_hidden_units = num_hidden_units
        self.init_param_scale = 0.01
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x")  # to hold a vector input
        self.y = nodes.ValueNode(node_name="y")  # to hold a scalar response
        self.W1 = nodes.ValueNode(
            node_name="W1")  # to hold the parameter matrix
        self.w2 = nodes.ValueNode(
            node_name="w2")  # to hold the parameter vector
        self.b1 = nodes.ValueNode(
            node_name="b1")  # to hold the bias parameter (vector)
        self.b2 = nodes.ValueNode(
            node_name="b2")  # to hold the bias parameter (scalar)
        self.affine = nodes.AffineNode(W1=self.W1,
                                       x=self.x,
                                       b=self.b1,
                                       node_name="affine")
        self.tanh = nodes.TanhNode(a=self.affine, node_name="tanh")
        self.prediction = nodes.VectorScalarAffineNode(x=self.tanh,
                                                       w=self.w2,
                                                       b=self.b2,
                                                       node_name="prediction")
        self.objective = nodes.SquaredL2DistanceNode(a=self.prediction,
                                                     b=self.y,
                                                     node_name="square loss")

        # Group nodes into types to construct computation graph function
        self.inputs = [self.x]
        self.outcomes = [self.y]
        self.parameters = [self.W1, self.b1, self.w2, self.b2]

        self.graph = graph.ComputationGraphFunction(self.inputs, self.outcomes,
                                                    self.parameters,
                                                    self.prediction,
                                                    self.objective)
Exemplo n.º 18
0
    def __init__(self,
                 num_hidden_units=10,
                 step_size=.005,
                 init_param_scale=0.01,
                 max_num_epochs=5000):
        self.num_hidden_units = num_hidden_units
        self.init_param_scale = 0.01
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x")  # to hold a vector input
        self.y = nodes.ValueNode(node_name="y")  # to hold a scalar response
        ## TODO
        self.W1 = nodes.ValueNode(node_name="W1")
        self.w2 = nodes.ValueNode(node_name="w2")
        self.b1 = nodes.ValueNode(node_name="b1")
        self.b2 = nodes.ValueNode(node_name="b2")

        self.hidden = nodes.AffineNode(W=self.W1,
                                       x=self.x,
                                       b=self.b1,
                                       node_name="hidden")

        self.tanh = nodes.TanhNode(a=self.hidden, node_name="tanh")

        self.prediction = nodes.VectorScalarAffineNode(x=self.tanh,
                                                       w=self.w2,
                                                       b=self.b2,
                                                       node_name="prediction")

        self.loss = nodes.SquaredL2DistanceNode(a=self.prediction,
                                                b=self.y,
                                                node_name="loss")

        self.graph = graph.ComputationGraphFunction(
            inputs=[self.x],
            outcomes=[self.y],
            parameters=[self.W1, self.w2, self.b1, self.b2],
            prediction=self.prediction,
            objective=self.loss)
    def __init__(self,
                 num_hidden_units=10,
                 step_size=.005,
                 init_param_scale=0.01,
                 max_num_epochs=5000):
        self.num_hidden_units = num_hidden_units
        self.init_param_scale = 0.01
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x")  # to hold a vector input
        self.y = nodes.ValueNode(node_name="y")  # to hold a scalar response
        self.b1 = nodes.ValueNode(node_name='b1')
        self.b2 = nodes.ValueNode(node_name='b2')
        self.W1 = nodes.ValueNode(node_name='W1')
        #         self.W2 = nodes.ValueNode(node_name='W2')
        self.w2 = nodes.ValueNode(
            node_name='w2')  ## mlp_regression.t.py때문에 W2가 아닌 w2로 해줘야함.
        self.L = nodes.AffineNode(W=self.W1,
                                  x=self.x,
                                  b=self.b1,
                                  node_name='L')
        self.h = nodes.TanhNode(a=self.L, node_name='L')
        self.prediction = nodes.AffineNode(W=self.w2,
                                           x=self.h,
                                           b=self.b2,
                                           node_name='prediction')
        self.objective = nodes.SquaredL2DistanceNode(self.y,
                                                     self.prediction,
                                                     node_name='objective')

        self.inputs = [self.x]
        self.outcomes = [self.y]
        self.parameters = [self.W1, self.b1, self.w2, self.b2]

        self.graph = graph.ComputationGraphFunction(self.inputs, self.outcomes,
                                                    self.parameters,
                                                    self.prediction,
                                                    self.objective)
Exemplo n.º 20
0
    def __init__(self,
                 num_hidden_units=10,
                 step_size=.005,
                 init_param_scale=0.01,
                 max_num_epochs=5000):
        self.num_hidden_units = num_hidden_units
        self.init_param_scale = 0.01
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x")  # to hold a vector input
        self.y = nodes.ValueNode(node_name="y")  # to hold a scalar response
        ## TODO
        self.w1 = nodes.ValueNode(
            node_name="w1"
        )  # to hold a matrix parameter w1 for the hidden layer
        self.w2 = nodes.ValueNode(
            node_name="w2"
        )  # to hold a vector parameter input w2 for prediction
        self.b1 = nodes.ValueNode(
            node_name="b1")  # to hold a vector bias input for the hidden layer
        self.b2 = nodes.ValueNode(
            node_name="b2")  # to hold a scalar bias for prediction
        self.affine = nodes.AffineNode(
            self.w1, self.x, self.b1,
            node_name="affine")  # to hold a affine transform
        self.active = nodes.TanhNode(
            self.affine, node_name="active"
        )  # to hold a activation function node using tanh
        self.prediction = nodes.VectorScalarAffineNode(
            self.w2, self.x, self.b2,
            node_name="predict")  # to hold a prediction node
        self.objective = nodes.SquaredL2DistanceNode(
            self.predict, self.y,
            node_name="objective")  # to hold a square_loss node
Exemplo n.º 21
0
    def __init__(
        self,
        n_hidden_units=10,
        l2_reg=0,
        step_size=0.005,
        init_param_scale=0.01,
        max_num_epochs=5000,
    ):
        self.n_hidden_units = n_hidden_units
        self.init_param_scale = 0.01
        self.max_num_epochs = max_num_epochs
        self.step_size = step_size
        self.l2_reg = l2_reg

        # Build computation graph
        self.x = nodes.ValueNode(node_name="x")  # to hold a vector input
        self.y = nodes.ValueNode(node_name="y")  # to hold a scalar response
        self.W1 = nodes.ValueNode(
            node_name="W1")  # to hold the parameter matrix
        self.W2 = nodes.ValueNode(
            node_name="W2")  # to hold the parameter vector
        self.b1 = nodes.ValueNode(
            node_name="b1")  # to hold the bias parameter (vector)
        self.b2 = nodes.ValueNode(
            node_name="b2")  # to hold the bias parameter (scalar)

        f1 = nodes.AffineNode(x=self.x,
                              W=self.W1,
                              b=self.b1,
                              node_name="Hidden Layer")
        a1 = nodes.TanhNode(a=f1, node_name="Hidden Activation")
        self.prediction = nodes.VectorScalarAffineNode(x=a1,
                                                       w=self.W2,
                                                       b=self.b2,
                                                       node_name="Output")

        data_loss = nodes.SquaredL2DistanceNode(a=self.prediction,
                                                b=self.y,
                                                node_name="Data Loss")
        reg_loss1 = nodes.L2NormPenaltyNode(l2_reg=self.l2_reg,
                                            w=self.W1,
                                            node_name="W1 Decay")
        reg_loss2 = nodes.L2NormPenaltyNode(l2_reg=self.l2_reg,
                                            w=self.W2,
                                            node_name="W2 Decay")
        total_reg_loss = nodes.SumNode(a=reg_loss1,
                                       b=reg_loss2,
                                       node_name="Regularization Loss")

        self.objective = nodes.SumNode(a=data_loss,
                                       b=total_reg_loss,
                                       node_name="Total Loss")

        self.inputs = [self.x]
        self.outcomes = [self.y]
        self.parameters = [self.W1, self.W2, self.b1, self.b2]

        self.graph = graph.ComputationGraphFunction(self.inputs, self.outcomes,
                                                    self.parameters,
                                                    self.prediction,
                                                    self.objective)