示例#1
0
文件: models.py 项目: Solasel/ML
    def get_prediction(self, x):
        """
        Calculates the predicted class for a single data point `x`.

        Returns: 1 or -1
        """
        return 1.0 if nn.as_scalar(nn.DotProduct(x, self.w)) >= 0 else -1.0
示例#2
0
    def get_prediction(self, x):
        """
        Calculates the predicted class for a single data point `x`.

        Returns: 1 or -1
        """
        "*** YOUR CODE HERE ***"
        return 1 if nn.as_scalar(nn.DotProduct(self.w, x)) >= 0 else -1
示例#3
0
    def run(self, x):
        """
        Calculates the score assigned by the perceptron to a data point x.

        Inputs:
            x: a node with shape (1 x dimensions)
        Returns: a node containing a single number (the score)
        """
        return nn.DotProduct(x, self.get_weights())
示例#4
0
 def train(self, dataset):
     """
     Train the perceptron until convergence.
     """
     done = False
     while not done:
         # iterates until none of the products w*x*y are <= 0
         done = True
         for item in dataset.iterate_once(1):
             x = item[0]
             y = item[1]
             prod1 = nn.DotProduct(x, self.w)
             prod2 = nn.DotProduct(prod1, y)
             if nn.as_scalar(prod2) <= 0:
                 done = False
                 prediction = self.get_prediction(x)
                 # update weights
                 self.w.update(x, prediction)
 def run(self, x):
     """
     Calculates the score assigned by the perceptron to a data point x.
     Inputs:
         x: a node with shape (1 x dimensions)
     Returns: a node containing a single number (the score)
     """
     "*** YOUR CODE HERE ***"
     return nn.DotProduct(self.w, x)
示例#6
0
    def run(self, x):
        """
        Calculates the score assigned by the perceptron to a data point x.

        Inputs:
            x: a node with shape (1 x dimensions)
        Returns: a node containing a single number (the score)
        """
        "*** YOUR CODE HERE ***"
        # using computing the dot product over the default weights with the values from the dataset
        return nn.DotProduct(self.get_weights(), x)
示例#7
0
 def run(self, x):
     """
     Calculates the score assigned by the perceptron to a data point x.
     
     Inputs:
         x: a node with shape (1 x dimensions)
     Returns: a node containing a single number (the score)
     Deberiais obtener el producto escalar (o producto punto) que es "equivalente" a la distancia del coseno
     """
     "*** YOUR CODE HERE ***"
     return nn.DotProduct(self.w, x)
示例#8
0
    def run(self, x):
        """
        Calculates the score assigned by the perceptron to a data point x.

        Inputs:
            x: a node with shape (1 x dimensions)
        Returns: a node containing a single number (the score)
        """
        "*** YOUR CODE HERE ***"
        # Compute dot product between weights and the input.
        return nn.DotProduct(x, self.get_weights())
示例#9
0
    def get_prediction(self, x):
        """
        Calculates the predicted class for a single data point `x`.

        Returns: 1 or -1
        """
        "*** YOUR CODE HERE ***"
        scalar = nn.DotProduct(x, self.w)
        if nn.as_scalar(scalar) >= 0:
            return 1
        else:
            return -1
示例#10
0
    def run(self, x):
        """
        Calculates the score assigned by the perceptron to a data point x.

        Inputs:
            x: a node with shape (1 x dimensions)
        Returns: a node containing a single number (the score)
        """
        "*** YOUR CODE HERE ***"
        # return the score - i.e the the dot product of the given weight and the weight vector
        score = nn.DotProduct(x, self.get_weights())
        return score
示例#11
0
    def get_prediction(self, x):
        """
        Calculates the predicted class for a single data point `x`.

        Returns: 1 or -1
        """
        "*** YOUR CODE HERE ***"
        temp = nn.DotProduct(self.w,x)

        if (nn.as_scalar(temp) > -0.000001):
            return 1
        else:
            return -1
示例#12
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        #just an arbitrary nonzero loss node
        loss = nn.DotProduct(self.w1, self.w1)

        while nn.as_scalar(loss) >= .00001:
            for x, y in dataset.iterate_once(self.batch_size):
                loss = self.get_loss(x, y)
                gradients = nn.gradients(loss, self.weights)
                for i in range(len(self.weights)):
                    self.weights[i].update(gradients[i], -self.learning_rate)
示例#13
0
    def run(self, x):
        """
        Calculates the score assigned by the perceptron to a data point x.

        Inputs:
            x: a node with shape (1 x dimensions)
        Returns: a node containing a single number (the score)
        """
        "*** YOUR CODE HERE ***"
        
        #print("Printing x (input)")
        #print(x)
        #print("Printing w (self.weights)")
        #print(self.w)
        return nn.DotProduct(x, self.w)
示例#14
0
    def run(self, xs):
        """
        Runs the model for a batch of examples.

        Although words have different lengths, our data processing guarantees
        that within a single batch, all words will be of the same length (L).

        Here `xs` will be a list of length L. Each element of `xs` will be a
        node with shape (batch_size x self.num_chars), where every row in the
        array is a one-hot vector encoding of a character. For example, if we
        have a batch of 8 three-letter words where the last word is "cat", then
        xs[1] will be a node that contains a 1 at position (7, 0). Here the
        index 7 reflects the fact that "cat" is the last word in the batch, and
        the index 0 reflects the fact that the letter "a" is the inital (0th)
        letter of our combined alphabet for this task.

        Your model should use a Recurrent Neural Network to summarize the list
        `xs` into a single node of shape (batch_size x hidden_size), for your
        choice of hidden_size. It should then calculate a node of shape
        (batch_size x 5) containing scores, where higher scores correspond to
        greater probability of the word originating from a particular language.

        Inputs:
            xs: a list with L elements (one per character), where each element
                is a node with shape (batch_size x self.num_chars)
        Returns:
            A node with shape (batch_size x 5) containing predicted scores
                (also called logits)
        """
        "*** YOUR CODE HERE ***"

        #first summarize xs into vector
        z_curr = nn.DotProduct(self.w, xs[0])
        h_curr = self.f_initial(xs[0])

        for i in range(1, len(xs)):
            z_curr = nn.Add(nn.Linear(xs[i], self.w), nn.Linear(h_curr, self.w_hidden))
            h_curr = z_curr
        return z_curr
示例#15
0
 def run(self, x):
     return nn.DotProduct(self.w, x)