コード例 #1
0
 def train(self, dataset):
     """
     Train the perceptron until convergence.
     """
     "*** YOUR CODE HERE ***"
     converged = False
     while not converged:
         converged = True
         for x, y in dataset.iterate_once(1):
             y_star = self.get_prediction(x)
             y = nn.as_scalar(y)
             if y_star != y:
                 converged = False
                 self.w.update(x, y)
コード例 #2
0
 def train(self, dataset):
     """
     Trains the model.
     """
     batch_size = 50
     loss = None
     while True:
         for x, y in dataset.iterate_once(batch_size):
             loss = self.get_loss(x, y)
             gradient = nn.gradients(loss, self.params)
             for i in range(len(self.params)):
                 self.params[i].update(gradient[i], -self.learningRate)
         if loss and nn.as_scalar(loss) < self.trainEndLoss:
             break
コード例 #3
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     for x, y in dataset.iterate_forever(self.batch_sizes):
         loss = self.get_loss(x,y)
         if nn.as_scalar(loss) < .001:
             break
         grad_w1, grad_b1, grad_w2, grad_b2 = nn.gradients(loss, [self.W1, self.b1, self.W2, self.b2])
         self.W1.update(grad_w1, self.learningRate)
         self.b1.update(grad_b1, self.learningRate)
         self.W2.update(grad_w2, self.learningRate)
         self.b2.update(grad_b2, self.learningRate)
コード例 #4
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        alpha = -0.005
        convergent = False
        iter = 0
        while (not convergent):
            final_loss = 0
            iter += 1
            for x, y in dataset.iterate_once(self.batch_size):
                loss = self.get_loss(x, y)
                grad_param_list = nn.gradients(loss, self.param_list)
                for i in range(len(self.param_list)):
                    self.param_list[i].update(grad_param_list[i], alpha)

            final_loss = self.get_loss(
                nn.Constant(dataset.x), nn.Constant(dataset.y)
            )  # Convergence condition ripped off from autograder.py
            if (nn.as_scalar(final_loss) < 0.02):
                print(str(iter) + "-  " + str(nn.as_scalar(final_loss)))
                convergent = True
コード例 #5
0
 def train(self, dataset):
     """
     Train the perceptron until convergence.
     """
     while True:
         convergence = True
         for x, y in dataset.iterate_once(1):
             y = nn.as_scalar(y)
             y_pred = self.get_prediction(x)
             if y_pred != y:
                 convergence = False
                 self.w.update(x, y)
         if convergence:
             break
コード例 #6
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     batch_size = 1
     loss = float('inf')
     while loss >= .02:
         for x, y in dataset.iterate_once(batch_size):
             loss = self.get_loss(x, y)
             grads = nn.gradients(loss, self.params)
             loss = nn.as_scalar(loss)
             for i in range(len(self.params)):
                 self.params[i].update(grads[i], -self.lr)
コード例 #7
0
    def get_prediction(self, x):
        """
        Calculates the predicted class for a single data point `x`.

        Returns: 1 or -1
        """
        "*** YOUR CODE HERE ***"
        # print(f'x: {x}')
        # print(f'self.run(x): {self.run(x)}')
        # print(f'nn.as_scalar(self.run(x)): {nn.as_scalar(self.run(x))}')
        if nn.as_scalar(self.run(x)) >= 0:
            return 1
        else:
            return -1
コード例 #8
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"

        loss = 1
        while loss >= 0.01:
            for x, y in dataset.iterate_once(self.batchSize):
                loss = self.get_loss(x, y)
                gradWRTList = nn.gradients(loss, self.parametersList)
                for i in range(len(self.parametersList)):
                    self.parametersList[i].update(gradWRTList[i], -self.learningRate)
                loss = nn.as_scalar(loss)
コード例 #9
0
    def train(self, dataset):
        """
        Trains the model.
        """
        currLoss = float("inf")
        params = [self.w0, self.b0, self.w1, self.b1]

        while currLoss > 0.015:
            for x, y in dataset.iterate_once(50):
                currLoss = self.get_loss(x, y)
                gradients = nn.gradients(currLoss, params)
                currLoss = nn.as_scalar(currLoss)

                for p in range(len(params)):
                    params[p].update(gradients[p], -0.01)
コード例 #10
0
ファイル: models.py プロジェクト: byeongminP/CS188
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     batch_size = 100
     loss = float('inf')
     acc = 0
     while acc < .85:
         for x, y in dataset.iterate_once(batch_size):
             grads = nn.gradients(self.get_loss(x, y), self.hyp)
             loss = nn.as_scalar(self.get_loss(x, y))
             for i in range(len(self.hyp)):
                 self.hyp[i].update(grads[i], -self.multiplier)
         acc = dataset.get_validation_accuracy()
コード例 #11
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        training_loss = 1.0
        while training_loss > 0.02:
            for x, y in dataset.iterate_once(self.batch_size):
                loss = self.get_loss(x, y)
                gradient = nn.gradients(loss, self.weights + self.bias)
                for i in range(3):
                    self.weights[i].update(gradient[i], self.learning_rate)
                    self.bias[i].update(gradient[i + 3], self.learning_rate)

                training_loss = nn.as_scalar(loss)
コード例 #12
0
    def get_prediction(self, x):
        """
		Calculates the predicted class for a single data point `x`.

		Returns: 1 or -1
		"""
        "*** YOUR CODE HERE ***"
        #gets the dot product as a scalar value
        a = nn.as_scalar(self.run(x))
        #print(a)
        #returns 1 if positive
        if a >= 0:
            return 1
        else:
            return -1
コード例 #13
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     batch_size = 200
     loss = 0.01
     while loss >= 0.01:
         for x, y in dataset.iterate_once(batch_size):
             square_loss = self.get_loss(x, y)
             loss = nn.as_scalar(square_loss)
             gradients_list = nn.gradients(square_loss, self.para_list)
             for i in range(len(gradients_list)):
                 self.para_list[i].update(gradients_list[i],
                                          -self.LearningRate)
コード例 #14
0
ファイル: models.py プロジェクト: vshirsat/CS188-Project5
 def train(self, dataset):
     """
     Train the perceptron until convergence.
     """
     flag = True
     for x, y in dataset.iterate_once(1):
         y_pred = self.get_prediction(x)
         y_true = nn.as_scalar(y)
         if y_pred != y_true:
             direction = x
             multiplier = y_true
             self.w.update(direction, multiplier)
             flag = False
     if not flag:
         self.train(dataset)
コード例 #15
0
 def train(self, dataset):
     """
     Train the perceptron until convergence.
     """
     "*** YOUR CODE HERE ***"
     batch_size = 1
     run = True
     while run:
         run = False
         for x, y in dataset.iterate_once(batch_size):
             y_hat = self.get_prediction(x)
             y = nn.as_scalar(y)
             if y_hat != y:
                 self.w.update(direction=x, multiplier=y)
                 run = True
コード例 #16
0
 def train(self, dataset):
     """
     Trains the model. Stop when avg loss < .02
     """
     "*** YOUR CODE HERE ***"
     for batch in dataset.iterate_forever(self.batchSize):
         loss = self.get_loss(batch[0], batch[1])
         if nn.as_scalar(loss) < .015:
             break
         gradients = nn.gradients(loss, self.weights + self.bias)
         weight_grads = gradients[:3]
         bias_grads = gradients[3:]
         for i in range(3):
             self.weights[i].update(weight_grads[i], self.learning_rate)
             self.bias[i].update(bias_grads[i], self.learning_rate)
コード例 #17
0
 def train(self, dataset):
     """
     Trains the model.
     """
     for x, y in dataset.iterate_forever(self.batch_size):
         loss = self.get_loss(x, y)
         if nn.as_scalar(loss) <= 0.001:
             break
         grads = nn.gradients(loss, [self.w1, self.b1, self.w2, self.b2, self.w3, self.b3])
         self.w1.update(grads[0], -self.learning_rate)
         self.b1.update(grads[1], -self.learning_rate)
         self.w2.update(grads[2], -self.learning_rate)
         self.b2.update(grads[3], -self.learning_rate)
         self.w3.update(grads[4], -self.learning_rate)
         self.b3.update(grads[5], -self.learning_rate)
コード例 #18
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     loss_sca = 1
     while loss_sca > 0.01:
         for x, y in dataset.iterate_once(10):
             loss = self.get_loss(x, y)
             loss_sca = nn.as_scalar(loss)
             g = nn.gradients(loss, [self.m0, self.b0, self.m1, self.b1])
             self.m0.update(g[0], -self.learningRate)
             self.b0.update(g[1], -self.learningRate)
             self.m1.update(g[2], -self.learningRate)
             self.b1.update(g[3], -self.learningRate)
コード例 #19
0
ファイル: models.py プロジェクト: sid-mishra910/Pacman-AI
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     while True:
         loss = 0
         for x,y in dataset.iterate_once(50):
             gradients = nn.gradients(self.get_loss(x,y), [self.w,self.bias])
             self.w.update(gradients[0],-self.alpha)
             self.bias.update(gradients[1],-self.alpha)
             loss += nn.as_scalar(self.get_loss(x,y))
         print(loss)
         if loss < 0.6:
             break
コード例 #20
0
ファイル: s.py プロジェクト: zsano1/Intro-to-AI
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     while True:
         for x,y in dataset.iterate_once(100):
             loss=self.get_loss(x,y)
             g_w1,g_w2,g_b1,g_b2= nn.gradients(loss, [self.w1,self.w2,self.b1,self.b2])
             self.w1.update(g_w1,-0.005)
             self.w2.update(g_w2,-0.005)
             self.b1.update(g_b1,-0.005)
             self.b2.update(g_b2,-0.005)
         if(nn.as_scalar(self.get_loss(x,y)))<0.02:
             return
コード例 #21
0
ファイル: models.py プロジェクト: dhruvsirohi1/AI-projects
    def train(self, dataset):
        """
        Train the perceptron until convergence.
        """
        "*** YOUR CODE HERE ***"
        while True:
            err = 0
            for x, y in dataset.iterate_once(1):
                pred = self.get_prediction(x)

                if nn.as_scalar(y) != pred:
                    self.w.update(x, -1 * pred)
                    err = err + 1
            if err == 0:
                break
コード例 #22
0
ファイル: models.py プロジェクト: archkt/cs188
    def train(self, dataset):
        """
        Train the perceptron until convergence.
        """
        "*** YOUR CODE HERE ***"

        while True:
            update = True
            for x, y in dataset.iterate_once(1):
                scalar_y = nn.as_scalar(y)
                if self.get_prediction(x) != scalar_y:
                    nn.Parameter.update(self.get_weights(), x, scalar_y)
                    update = False
            if update:
                break
コード例 #23
0
 def train(self, dataset):
     """
     Train the perceptron until convergence.
     """
     "*** YOUR CODE HERE ***"
     batch_size = 1
     check = True
     # it should loop untill the value converges?
     while check:
         check = False
         temp = self.get_weights()
         # gergular singal iteration, maybe i can use the other function
         for x, y_star in dataset.iterate_once(batch_size):
             y = self.get_prediction(x)
             # if the predicted value is the same at current weight do nothing else update
             # default weight vector
             if y == nn.as_scalar(y_star):
                 continue
             nn.Parameter.update(self.get_weights(), x,
                                 nn.as_scalar(y_star))
             check = True
         # keep looping until value converges
         if temp != self.get_weights():
             check = True
コード例 #24
0
ファイル: models.py プロジェクト: soojung96/machinelearning
    def get_loss(self, x, y):
        """
        Computes the loss for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
            y: a node with shape (batch_size x 1), containing the true y-values
                to be used for training
        Returns: a loss node
        """
        "*** YOUR CODE HERE ***"
        predicted_y = self.run(x)
        loss = nn.SquareLoss(predicted_y, y)
        print("loss: " + str(nn.as_scalar(loss)))
        return loss
コード例 #25
0
ファイル: models.py プロジェクト: AliR19/ENSE496AC
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     while True:
         for x, y in dataset.iterate_once(self.batch_size):
             loss = self.get_loss(x,y)
             gradient = nn.gradients(loss, [self.w0, self.w1, self.b0, self.b1])
             self.w0.update(gradient[0],-0.005)
             self.w1.update(gradient[1],-0.005)
             self.b0.update(gradient[2],-0.005)
             self.b1.update(gradient[3],-0.005)
         if nn.as_scalar(self.get_loss(nn.Constant(dataset.x), nn.Constant(dataset.y)))<0.02:
             return 
コード例 #26
0
ファイル: models.py プロジェクト: cleomart/Machine-Learning
 def train(self, dataset):
     """
     Train the perceptron until convergence.
     """
     "*** YOUR CODE HERE ***"
     batch_size = 1
     update = True
     while update:
         update = False
         for x, y in dataset.iterate_once(batch_size):
             true_y = nn.as_scalar(y)
             predict_y = self.get_prediction(x)
             if true_y != predict_y:
                 update = True
                 self.w.update(x, true_y)
コード例 #27
0
 def train(self, dataset):
     """
     Train the perceptron until convergence.
     """
     "*** YOUR CODE HERE ***"
     no_mistakes = False
     while not no_mistakes: 
         no_mistakes = True 
         for x, y in dataset.iterate_once(batch_size=1):
             y_true = nn.as_scalar(y)
             pred = self.get_prediction(x)
             if y_true != pred: 
                 no_mistakes = False
                 self.w.update(x, y_true)
     return
コード例 #28
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     for x, y in dataset.iterate_forever(5):
         fn_loss = self.get_loss(x, y)
         if nn.as_scalar(fn_loss) <= 0.000007:
             break
         # //get gradients and continuosly udpate weights
         grad_1, grad_b1, grad_2, grad_b2 = nn.gradients(
             fn_loss, [self.l1, self.l1b, self.two, self.l2b])
         self.l1.update(grad_1, -self.multiplier)
         self.l1b.update(grad_b1, -self.multiplier)
         self.two.update(grad_2, -self.multiplier)
         self.l2b.update(grad_b2, -self.multiplier)
コード例 #29
0
    def train(self, dataset):
        """
        Train the perceptron until convergence.
        """
        batch_size = 1
        is_done = False

        while not is_done:
            is_done = True
            for x, y in dataset.iterate_once(batch_size):
                predicted_y = self.get_prediction(x)
                true_y = nn.as_scalar(y)
                if predicted_y == true_y:
                    continue
                is_done = False
                self.w.update(x, true_y)
コード例 #30
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     for x, y in dataset.iterate_forever(self.batch_size):
         temp1 = [self.weight1, self.weight2, self.bias1, self.bias2]
         temp2 = list(nn.gradients(self.get_loss(x, y), temp1))
         [
             temp1[i].update(temp2[i], self.learning_rate)
             for i in range(len(temp1))
         ]
         if nn.as_scalar(
                 self.get_loss(nn.Constant(dataset.x), nn.Constant(
                     dataset.y))) < .02:
             return