Exemple #1
0
    def run(self, xs):
        """
        Runs the model for a batch of examples.

        Although words have different lengths, our data processing guarantees
        that within a single batch, all words will be of the same length (L).

        Here `xs` will be a list of length L. Each element of `xs` will be a
        node with shape (batch_size x self.num_chars), where every row in the
        array is a one-hot vector encoding of a character. For example, if we
        have a batch of 8 three-letter words where the last word is "cat", then
        xs[1] will be a node that contains a 1 at position (7, 0). Here the
        index 7 reflects the fact that "cat" is the last word in the batch, and
        the index 0 reflects the fact that the letter "a" is the inital (0th)
        letter of our combined alphabet for this task.

        Your model should use a Recurrent Neural Network to summarize the list
        `xs` into a single node of shape (batch_size x hidden_size), for your
        choice of hidden_size. It should then calculate a node of shape
        (batch_size x 5) containing scores, where higher scores correspond to
        greater probability of the word originating from a particular language.

        Inputs:
            xs: a list with L elements (one per character), where each element
                is a node with shape (batch_size x self.num_chars)
        Returns:
            A node with shape (batch_size x 5) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        h=nn.AddBias(nn.Linear(xs[0],self.w),self.b)
        h=nn.ReLU(h)
        for i in range(1,len(xs)):
            h=nn.ReLU(nn.Add(nn.AddBias(nn.Linear(xs[i],self.w),self.b),nn.AddBias(nn.Linear(h,self.w_hidden),self.b_hidden)))
        return nn.AddBias(nn.Linear(h,self.w_last),self.b_last)
Exemple #2
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        y0 = nn.Linear(x, self.w0)              # Input is (batch_size x 784) and (784, self.dim1). Output is (batch_size, self.dim1).
        
        # The input to AddBias is (batch_size x num_features) and (1 x num_features), where num_features = self.dim1.
        # The output is (batch_size x num_features), where num_features = self.dim1. 
        y1 = nn.ReLU(nn.AddBias(y0, self.b0))  

        # Then, the input to Linear is (batch_size x input_features) and (input_features x output_features), 
        # meaning output is (batch_size x, output_features). In this case, the output will be (batch_size x self.dim2).
        y2 = nn.Linear(y1, self.w1)                

        # Input to add bias is (batch_size x self.dim2) and (1 x self.dim2). Output is (batch_size x self.dim2).
        y3 = nn.ReLU(nn.AddBias(y2, self.b1)) 

        # Input to linear is (batch_size x self.dim2) and (self.dim2 x 10). Output is (batch_size x 10).
        y4 = nn.Linear(y3, self.w2)
        y5 = nn.AddBias(y4, self.b2)
        return y5        
Exemple #3
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        xm1 = nn.Linear(x, self.m1)
        xm1_add_b1 = nn.AddBias(xm1, self.b1)
        relu = nn.ReLU(xm1_add_b1)
        xm2 = nn.Linear(relu, self.m2)
        xm2_add_b2 = nn.AddBias(xm2, self.b2)
        relu2 = nn.ReLU(xm2_add_b2)
        xm3 = nn.Linear(relu2, self.m3)
        xm3_add_b3 = nn.AddBias(xm3, self.b3)
        relu3 = nn.ReLU(xm3_add_b3)
        xm4 = nn.Linear(relu3, self.m4)
        y_predict = nn.AddBias(xm4, self.b4)

        return y_predict
Exemple #4
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        data1 = nn.Linear(x, self.w1)
        predicted_y1 = nn.AddBias(data1, self.b1)
        relu1 = nn.ReLU(predicted_y1)

        data2 = nn.Linear(relu1, self.w2)
        predicted_y2 = nn.AddBias(data2, self.b2)
        relu2 = nn.ReLU(predicted_y2)

        data3 = nn.Linear(relu2, self.w3)
        predicted_y3 = nn.AddBias(data3, self.b3)

        return predicted_y3
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        "*** YOUR CODE HERE ***"
        xm_1 = nn.Linear(x, self.m_1)
        layer_1 = nn.AddBias(xm_1, self.b_1)
        
        non_lin_1 = nn.ReLU(layer_1)

        xm_2 = nn.Linear(non_lin_1, self.m_2)
        layer_2 = nn.AddBias(xm_2, self.b_2)

        #non_lin_2 = nn.ReLU(layer_2)

        #xm_3 = nn.Linear(non_lin_2, self.m_3)
        #layer_3 = nn.AddBias(xm_3, self.b_3)

        #non_lin_3 = nn.ReLU(layer_3)
    
        return layer_2
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"

        xm_1 = nn.Linear(x, self.m_1)
        layer_1 = nn.AddBias(xm_1, self.b_1)
        
        non_lin_1 = nn.ReLU(layer_1)

        xm_2 = nn.Linear(non_lin_1, self.m_2)
        layer_2 = nn.AddBias(xm_2, self.b_2)

        #non_lin_2 = nn.ReLU(layer_2)

        #xm_3 = nn.Linear(non_lin_2, self.m_3)
        #layer_3 = nn.AddBias(xm_3, self.b_3)

        #non_lin_3 = nn.ReLU(layer_3)
    
        return layer_2
    def __init__(self):
        # Our dataset contains words from five different languages, and the
        # combined alphabets of the five languages contain a total of 47 unique
        # characters.
        # You can refer to self.num_chars or len(self.languages) in your code
        self.num_chars = 47
        self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"]

        # Initialize your model parameters here
        "*** YOUR CODE HERE ***"
        self.hidden_lsize = 250
        self.learning_rate = -.01
        self.batch_size = 200

    
        m_1f = nn.Parameter(self.num_chars, self.hidden_lsize)
        b_1f = nn.Parameter(1, self.hidden_lsize)
        m_2f = nn.Parameter(self.hidden_lsize, 10)
        b_2f = nn.Parameter(1, self.hidden_lsize)
        xm_1 = nn.Linear(x, m_1f)
        layer_1 = nn.AddBias(xm_1, b_1f)
        non_lin_1 = nn.ReLU(layer_1)
        xm_2 = nn.Linear(non_lin_1, m_2f)
        self.f_initial = nn.AddBias(xm_2, b_2f)
        

        self.w = nn.Parameter(self.batch_size, self.num_chars)
        self.w_hidden = nn.Parameter(self.hidden_lsize, 1)
Exemple #8
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        affine = nn.Linear(x, self.w1)
        bias1 = nn.AddBias(affine, self.b1)
        relued = nn.ReLU(bias1)
        affine2 = nn.Linear(relued, self.w2)
        bias2 = nn.AddBias(affine2, self.b2)
        relued2 = nn.ReLU(bias2)
        affine3 = nn.Linear(relued2, self.w3)
        bias3 = nn.AddBias(affine3, self.b3)
        relued3 = nn.ReLU(bias3)
        affine4 = nn.Linear(relued3, self.w4)
        bias4 = nn.AddBias(affine4, self.b4)
        relued4 = nn.ReLU(bias4)
        out = nn.Linear(relued4, self.w5)

        return out
Exemple #9
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        first_coefficient = nn.Linear(x, self.weight1)
        first_predict = nn.AddBias(first_coefficient, self.bia1)
        first_layer = nn.ReLU(first_predict)
        second_coefficient = nn.Linear(first_layer, self.weight2)
        second_predict = nn.AddBias(second_coefficient, self.bia2)
        second_layer = nn.ReLU(second_predict)
        third_coefficient = nn.Linear(second_layer, self.weight3)
        third_predict = nn.AddBias(third_coefficient, self.bia3)
        third_layer = nn.ReLU(third_predict)
        output = nn.AddBias(nn.Linear(third_layer, self.weight4), self.bia4)
        return output
Exemple #10
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        # hidden layer 1
        trans_1 = nn.Linear(x, self.w_1)
        trans_bias_1 = nn.AddBias(trans_1, self.b_1)
        layer_1 = nn.ReLU(trans_bias_1)

        # hidden layer 2
        trans_2 = nn.Linear(layer_1, self.w_2)
        trans_bias_2 = nn.AddBias(trans_2, self.b_2)
        layer_2 = nn.ReLU(trans_bias_2)

        # hidden layer 3
        trans_3 = nn.Linear(layer_2, self.w_3)
        trans_bias_3 = nn.AddBias(trans_3, self.b_3)
        layer_3 = nn.ReLU(trans_bias_3)

        # output vector (no relu)
        last_trans = nn.Linear(layer_3, self.output_wt)
        return nn.AddBias(last_trans, self.output_bias)
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"

        firstlay = nn.Linear(x, self.weight1)
        firstwbias = nn.AddBias(firstlay, self.b1)
        relu1 = nn.ReLU(firstwbias)

        secondlay = nn.Linear(relu1, self.weight2)
        secondwbias = nn.AddBias(secondlay, self.b2)
        relu2 = nn.ReLU(secondwbias)

        outputlay = nn.Linear(relu2, self.weight3)
        outputlaywbias = nn.AddBias(outputlay, self.b3)

        return outputlaywbias
Exemple #12
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """

        lin1 = nn.Linear(x, self.m1)
        bias1 = nn.AddBias(lin1, self.b1)
        relu1 = nn.ReLU(bias1)

        lin2 = nn.Linear(relu1, self.m2)
        bias2 = nn.AddBias(lin2, self.b2)
        relu2 = nn.ReLU(bias2)

        lin3 = nn.Linear(relu2, self.m3)
        bias3 = nn.AddBias(lin3, self.b3)

        return bias3
Exemple #13
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        is_bias = False
        for layer in self.layers[:-2]:
            if not is_bias:
                x = nn.Linear(x, layer)
                is_bias = True
                continue
            x = nn.AddBias(x, layer)
            x = nn.ReLU(x)
            is_bias = False
        # for the last layer, no relu, just multiply and bias
        x = nn.Linear(x, self.layers[-2])
        output = nn.AddBias(x, self.layers[-1])
        return output
Exemple #14
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        xw1 = nn.Linear(x, self.w1)
        xw1b1 = nn.AddBias(xw1, self.b1)
        reluxw1b1 = nn.ReLU(xw1b1)
        reluxw1b1w2 = nn.Linear(reluxw1b1, self.w2)
        reluxw1b1w2b2 = nn.AddBias(reluxw1b1w2, self.b2)
        reluxw1b1w2b2w3 = nn.ReLU(reluxw1b1w2b2)
        reluxw1b1w2b2w3b3 = nn.Linear(reluxw1b1w2b2w3, self.w3)
        reluxw1b1w2b2w3b3last = nn.AddBias(reluxw1b1w2b2w3b3, self.b3)
        # do like 3 layers with relu, linear, addbias
        return reluxw1b1w2b2w3b3last
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        w1 = self.w1
        b1 = self.b1
        w2 = self.w2
        b2 = self.b2
        w3 = self.w3
        b3 = self.b3

        w1x = nn.Linear(x, w1)
        w1x_plus_b1 = nn.AddBias(w1x, b1)
        relu = nn.ReLU(w1x_plus_b1)
        w2x = nn.Linear(relu, w2)
        f1_of_x = nn.AddBias(w2x, b2)
        relu2 = nn.ReLU(f1_of_x)
        w3x = nn.Linear(relu2, w3)
        f2_of_x = nn.AddBias(w3x, b3)
        return f2_of_x
Exemple #16
0
    def run(self, x):
        """
		Runs the model for a batch of examples.

		Your model should predict a node with shape (batch_size x 10),
		containing scores. Higher scores correspond to greater probability of
		the image belonging to a particular class.

		Inputs:
			x: a node with shape (batch_size x 784)
		Output:
			A node with shape (batch_size x 10) containing predicted scores
				(also called logits)
		"""
        "*** YOUR CODE HERE ***"

        #f(x)=relu(relu(x⋅W1+b1)⋅W2+b2)⋅W3+b3
        #layer1
        layer1 = nn.Linear(x, self.m1)
        layer1WithBias = nn.AddBias(layer1, self.bias1)
        layer1Rectified = nn.ReLU(layer1WithBias)
        #layer2
        layer2 = nn.Linear(layer1Rectified, self.m2)
        layer2WithBias = nn.AddBias(layer2, self.bias2)
        layer2Rectified = nn.ReLU(layer2WithBias)
        #rectified one doesent work
        return layer2WithBias
Exemple #17
0
 def run(self, xs):
     layer = nn.Linear(nn.DataNode(xs[0].data), self.weight[0])
     for x in xs:
         layer = nn.ReLU(
             nn.AddBias(
                 nn.Linear(nn.Add(nn.Linear(x, self.weight[0]), layer),
                           self.weight[1]), self.bias[1]))
     return nn.AddBias(nn.Linear(layer, self.weight[2]), self.bias[2])
Exemple #18
0
 def run(self, x):
     first_layer_output = nn.AddBias(nn.Linear(x, self.W1), self.b1)
     hidden_layer_1_output = nn.ReLU(first_layer_output)
     output_of_second_layer = nn.AddBias(
         nn.Linear(hidden_layer_1_output, self.W2), self.b2)
     #hidden_layer_2_output = nn.ReLU(output_of_second_layer)
     #output = nn.AddBias(nn.Linear(hidden_layer_2_output, self.W3), self.b3)
     return output_of_second_layer
Exemple #19
0
    def run(self, xs):
        """
        Runs the model for a batch of examples.

        Although words have different lengths, our data processing guarantees
        that within a single batch, all words will be of the same length (L).

        Here `xs` will be a list of length L. Each element of `xs` will be a
        node with shape (batch_size x self.num_chars), where every row in the
        array is a one-hot vector encoding of a character. For example, if we
        have a batch of 8 three-letter words where the last word is "cat", then
        xs[1] will be a node that contains a 1 at position (7, 0). Here the
        index 7 reflects the fact that "cat" is the last word in the batch, and
        the index 0 reflects the fact that the letter "a" is the inital (0th)
        letter of our combined alphabet for this task.

        Your model should use a Recurrent Neural Network to summarize the list
        `xs` into a single node of shape (batch_size x hidden_size), for your
        choice of hidden_size. It should then calculate a node of shape
        (batch_size x 5) containing scores, where higher scores correspond to
        greater probability of the word originating from a particular language.

        Inputs:
            xs: a list with L elements (one per character), where each element
                is a node with shape (batch_size x self.num_chars)
        Returns:
            A node with shape (batch_size x 5) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"
        # self.batch_size = len(xs[0].data)
        # self.expander = nn.Parameter(self.num_chars, self.batch_size)
        xw1 = nn.Linear(xs[0], self.w1)  # 1x47 * 47x100 == 1x100
        xw1b1 = nn.AddBias(xw1, self.b1)  # 1x100 + 1x100
        reluxw1b1 = nn.ReLU(xw1b1)
        last_node = reluxw1b1
        # expanded_node = nn.Linear(self.expander, reluxw1b1)
        for i in range(1, len(xs)):
            # print(i)
            # self.batch_size = len(xs[i].data)
            # self.expander = nn.Parameter(self.num_chars, self.batch_size)
            # expanded_node_added = nn.Add(self.w2, expanded_node)
            hw = nn.Linear(last_node, self.w2)
            loop_xw1 = nn.Linear(xs[i], self.w1)
            loop_xw1b1 = nn.AddBias(loop_xw1, self.b1)
            loop_reluxw1b1 = nn.ReLU(loop_xw1b1)
            hw_plus_loop_reluxw1b1 = nn.Add(hw, loop_reluxw1b1)
            last_node = hw_plus_loop_reluxw1b1
            # expanded_node = nn.Linear(self.expander, loop_reluxw1b1)
        end_xw1 = nn.Linear(last_node, self.end_w1)
        end_xw1b1 = nn.AddBias(end_xw1, self.end_b1)  # 1x100 + 1x100
        end_reluxw1b1 = nn.ReLU(end_xw1b1)
        end_reluxw1b1w2 = nn.Linear(end_reluxw1b1, self.end_w2)
        end_reluxw1b1w2b2 = nn.AddBias(end_reluxw1b1w2,
                                       self.end_b2)  # 1x100 + 1x100
        end_reluxw1b1w2b2last = nn.ReLU(end_reluxw1b1w2b2)
        shrunkyclunk = nn.Linear(end_reluxw1b1w2b2last, self.shrinker)
        return shrunkyclunk
Exemple #20
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        relu = nn.ReLU(nn.AddBias(nn.Linear(x, self.w1), self.b1))
        return nn.AddBias(nn.Linear(relu, self.w2), self.b2)
Exemple #21
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        res = nn.ReLU(nn.AddBias(nn.Linear(x, self.layers[0]), self.layers[1]))
        return nn.AddBias(nn.Linear(res, self.layers[-2]), self.layers[-1])
Exemple #22
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        "*** YOUR CODE HERE ***"
        layer = nn.ReLU(nn.AddBias(nn.Linear(x, self.weight0), self.bias0))
        return nn.AddBias(nn.Linear(layer, self.weight1), self.bias1)
Exemple #23
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        lay1 = nn.ReLU(nn.AddBias(nn.Linear(x, self.m1), self.b1))
        f_x = nn.AddBias(nn.Linear(lay1, self.m2), self.b2)
        return f_x
Exemple #24
0
    def run(self, xs):
        current_output = nn.AddBias(nn.Linear(xs[0], self.W), self.b)
        current_output = nn.ReLU(current_output)

        for i in range(1, len(xs)):
            current_output = nn.AddBias(
                nn.Add(nn.Linear(xs[i], self.W),
                       nn.Linear(current_output, self.W_hidden)), self.b)
            current_output = nn.ReLU(current_output)

        output = nn.AddBias(nn.Linear(current_output, self.W_last),
                            self.b_last)
        return output
Exemple #25
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        "*** YOUR CODE HERE ***"
        firstLayer = nn.ReLU(nn.AddBias(nn.Linear(x, self.W1), self.b1))
        outputLayer = nn.AddBias(nn.Linear(firstLayer, self.W2), self.b2)
        return outputLayer
Exemple #26
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        "*** YOUR CODE HERE ***"
        wx1 = nn.Linear(x, self.w0)
        wx2 = nn.Linear(nn.ReLU(nn.AddBias(wx1, self.b0)), self.w1)
        return nn.AddBias(wx2, self.b1)
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        layer1 = nn.AddBias(nn.Linear(x, self.w1), self.b1)  ## hidden layer 1
        hlayer1 = nn.ReLU(layer1)  ## g(x) transformation
        flayer = nn.AddBias(nn.Linear(hlayer1, self.w2), self.b2)
        return flayer
Exemple #28
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        "*** YOUR CODE HERE ***"
        plus_b = nn.AddBias(nn.Linear(x, self.first_weight), self.bias_1)
        lt = nn.Linear(nn.ReLU(plus_b), self.second_weight)
        return nn.AddBias(lt, self.bias_2)
Exemple #29
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        w1 = nn.Linear(x, self.weights1)
        out1 = nn.ReLU(nn.AddBias(w1, self.bias1))
        wout = nn.Linear(out1, self.weightsout)
        return nn.AddBias(wout, self.biasout)
Exemple #30
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        "*** YOUR CODE HERE ***"
        z = nn.ReLU(nn.AddBias(nn.Linear(x, self.w1), self.b1))
        predicted_y = nn.AddBias(nn.Linear(z, self.w2), self.b2)
        return predicted_y