Пример #1
0
    def train(self, dataset):
        """
        Trains the model.
        """
        flag = True

        while (dataset.get_validation_accuracy() < .97):
            validation_acccuracy = 0
            for x, y in dataset.iterate_once(50):
                loss = self.get_loss(x, y)
                if nn.as_scalar(loss) > 0.02:
                    grad_wrt_w1, grad_wrt_w2, grad_wrt_w3, grad_wrt_b1, grad_wrt_b2, grad_wrt_b3 \
                     = nn.gradients(loss, [self.w1, self.w2, self.w3, self.b1, self.b2, self.b3])
                    self.w1.update(grad_wrt_w1, self.multiplier)
                    self.w2.update(grad_wrt_w2, self.multiplier)
                    self.w3.update(grad_wrt_w3, self.multiplier)
                    self.b1.update(grad_wrt_b1, self.multiplier)
                    self.b2.update(grad_wrt_b2, self.multiplier)
                    self.b3.update(grad_wrt_b3, self.multiplier)
Пример #2
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     accuracy = 0
     turn = 0
     for x, y in dataset.iterate_forever(self.batch_size):
         turn += 1
         if (turn%64==0):
             accuracy = dataset.get_validation_accuracy()
             print(accuracy)
         if (accuracy>=0.85):
             break
         trueY = y
         loss = self.get_loss(x, trueY)
         mixedGrad = nn.gradients(loss, self.m)
         for i in range(0, self.layers):
             self.m[i].update(mixedGrad[i], -self.multiplier)
Пример #3
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     while True:
         for x, y in dataset.iterate_once(self.batch_size):
             loss = self.get_loss(x, y)
             gradients = nn.gradients(
                 loss,
                 [self.w0, self.b0, self.w1, self.b1, self.w2, self.b2])
             self.w0.update(gradients[0], -self.learning_rate)
             self.b0.update(gradients[1], -self.learning_rate)
             self.w1.update(gradients[2], -self.learning_rate)
             self.b1.update(gradients[3], -self.learning_rate)
             self.w2.update(gradients[4], -self.learning_rate)
             self.b2.update(gradients[5], -self.learning_rate)
         if dataset.get_validation_accuracy() >= 0.975:
             break
Пример #4
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     for x, y_ in dataset.iterate_forever(self.batch_size):
         loss = self.get_loss(x, y_)
         if nn.as_scalar(loss) < 0.002:
             break
         else:
             g_w1, g_w2, g_w3, g_b1, g_b2, g_b3 = nn.gradients(
                 loss,
                 [self.w1, self.w2, self.w3, self.b1, self.b2, self.b3])
             self.w3.update(g_w3, -self.lr)
             self.b3.update(g_b3, -self.lr)
             self.w2.update(g_w2, -self.lr)
             self.b2.update(g_b2, -self.lr)
             self.w1.update(g_w1, -self.lr)
             self.b1.update(g_b1, -self.lr)
Пример #5
0
 def train(self, dataset):
     """
     Trains the model.
     """
     for epoch in range(5):
         for x, y in dataset.iterate_once(20):
             x_prime = self.run(x)
             loss = self.get_loss(x, y)
             if nn.as_scalar(loss) >= 0.0:
                 factor = -1
             else:
                 factor = 1
             if nn.as_scalar(loss) != 0.0:
                 grad = nn.gradients(loss,
                                     [self.w1, self.w2, self.w3, self.w_h])
                 self.w1.update(grad[0], self.learning_rate * factor)
                 self.w2.update(grad[1], self.learning_rate * factor)
                 self.w3.update(grad[2], self.learning_rate * factor)
                 self.w_h.update(grad[3], self.learning_rate * factor)
Пример #6
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        batch_size = int(self.batch_size_ratio * dataset.x.shape[0])
        while len(dataset.x) % batch_size != 0:
            batch_size += 1
        while True:
            for x, y in dataset.iterate_once(batch_size):

                loss = self.get_loss(x, y)
                params = self.get_parameters()
                gradients = nn.gradients(loss, params)
                for i in range(len(params)):
                    param = params[i]
                    param.update(gradients[i], -self.learning_rate)
            if dataset.get_validation_accuracy() > self.threshold:
                break
Пример #7
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     arr = [-0.0062, -0.0061, -0.0051, -0.0063]
     while True:
         for x, y in dataset.iterate_once(1):
             arr2 = [
                 self.first_weight, self.second_weight, self.bias_1,
                 self.bias_2
             ]
             gradient = nn.gradients(self.get_loss(x, y), arr2)
             self.first_weight.update(gradient[0], arr[0])
             self.second_weight.update(gradient[1], arr[1])
             self.bias_1.update(gradient[2], arr[2])
             self.bias_2.update(gradient[3], arr[3])
         if dataset.get_validation_accuracy() > .97:
             return
Пример #8
0
 def train(self, dataset):
     """
     Trains the model.
     """
     avg_loss = 1
     while (avg_loss > self.loss_margin):
         sum_loss = 0
         count = 0
         for x, y in dataset.iterate_once(1):
             loss = self.get_loss(x, y)
             grad_wrt_w1, grad_wrt_b1, grad_wrt_w2, grad_wrt_b2 = nn.gradients(
                 loss, [self.w1, self.b1, self.w2, self.b2])
             self.w1.update(grad_wrt_w1, -self.alpha)
             self.b1.update(grad_wrt_b1, -self.alpha)
             self.w2.update(grad_wrt_w2, -self.alpha)
             self.b2.update(grad_wrt_b2, -self.alpha)
             sum_loss += nn.as_scalar(loss)
             count += 1
         avg_loss = sum_loss / count
Пример #9
0
    def train(self, dataset):
        """
        Trains the model.
        """

        multiplier = -.01
        batch_size = get_large(dataset.x.shape[0])
        loss_int = 1
        while loss_int > .0001:
            for x, y in dataset.iterate_once(batch_size):
                loss = self.get_loss(x, y)
                grad_wrt_w1, grad_wrt_b1, grad_wrt_w2, grad_wrt_b2 = nn.gradients(
                    loss, [self.w1, self.b1, self.w2, self.b2])
                self.w1.update(grad_wrt_w1, multiplier)
                self.b1.update(grad_wrt_b1, multiplier)
                self.w2.update(grad_wrt_w2, multiplier)
                self.b2.update(grad_wrt_b2, multiplier)
                new_loss = self.get_loss(x, y)
                loss_int = nn.as_scalar(new_loss)
Пример #10
0
 def train(self, dataset):
     """
     Trains the model.
     """
     #   self.learning_rate = 0.4
     count = 0
     for x, y in dataset.iterate_forever(batch_size=5):
         loss = self.get_loss(x, y)
         if (count % 100 == 0):
             if dataset.get_validation_accuracy() > 0.81:
                 break
         gradient_list = nn.gradients(self.get_loss(
             x, y), [self.weights, self.bias, self.weights2, self.bias2])
         sign = -1
         self.weights.update(gradient_list[0], self.learning_rate * sign)
         self.bias.update(gradient_list[1], self.learning_rate * sign)
         self.weights2.update(gradient_list[2], self.learning_rate * sign)
         self.bias2.update(gradient_list[3], self.learning_rate * sign)
         count += 1
Пример #11
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        i = 0
        m = 10000
        for x, y_true in dataset.iterate_forever(self.batch_size):
            loss = self.get_loss(x, y_true)
            # print(str(loss))
            # print(str(nn.as_scalar(loss)))
            # if nn.as_scalar(loss) < 0.02:
            #     return

            parameters = self.weights + self.biases
            a = self.learning_rate

            gradients = nn.gradients(loss, parameters)

            for gradient, parameter in zip(gradients, parameters):
                parameter.update(gradient, -a)

            i += 1
            if i % m == 0:
                acc = dataset.get_validation_accuracy()
                # if acc > 0.9:
                #     self.learning_rate = 0.2
                if acc > 0.94:
                    self.learning_rate = 0.2
                # if acc > 0.94:
                    # self.learning_rate = 0.1
                if acc > 0.95:
                    self.learning_rate = 0.1
                if acc > 0.96:
                    self.learning_rate = 0.005
                # if acc > 0.968:
                #     self.hidden_layer_size = 600
                    # m = 10
                if acc > 0.969:
                    m = 10
                if acc >= 0.97:
                    return
Пример #12
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     validation_accuracy = 0
     while validation_accuracy < 0.975:
         for x, y in dataset.iterate_once(self.batch_size):
             curr_loss = self.get_loss(x, y)
             parameters = []
             for i in range(self.layer_num):
                 parameters.append(self.network_weights[i])
                 parameters.append(self.network_biases[i])
             grad = nn.gradients(curr_loss, parameters)
             for i in range(self.layer_num):
                 self.network_weights[i].update(grad[2 * i],
                                                self.learning_rate)
                 self.network_biases[i].update(grad[2 * i + 1],
                                               self.learning_rate)
         validation_accuracy = dataset.get_validation_accuracy()
Пример #13
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        #update weights and biases to minimize your cost function (given by get_loss)
        for x, y in dataset.iterate_forever(self.batch_size):
            loss_node = self.get_loss(x, y)
            grad_wrt_w1, grad_wrt_w2, grad_wrt_b1, grad_wrt_b2 = nn.gradients(
                loss_node, [self.w1, self.w2, self.b1, self.b2])
            (self.w1).update(grad_wrt_w1, self.multiplier)
            (self.b1).update(grad_wrt_b1, self.multiplier)

            (self.w2).update(grad_wrt_w2, self.multiplier)
            (self.b2).update(grad_wrt_b2, self.multiplier)

            if nn.as_scalar(
                    self.get_loss(nn.Constant(dataset.x), nn.Constant(
                        dataset.y))) < 0.02:
                return
Пример #14
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     batch_size = self.batch_size
     inv_learning = -1 * self.learning_rate
     changed = False
     while True:
         changed = False
         for x, y in dataset.iterate_once(batch_size):
             grad = nn.gradients(
                 self.get_loss(x, y),
                 [self.weight1, self.weight2, self.bias1, self.bias2])
             self.weight1.update(grad[0], inv_learning)
             self.weight2.update(grad[1], inv_learning)
             self.bias1.update(grad[2], inv_learning)
             self.bias2.update(grad[3], inv_learning)
         if nn.as_scalar(self.get_loss(x, y)) < 0.02:
             break
Пример #15
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     alpha = 0.15
     while (dataset.get_validation_accuracy() < 0.85):
         # print(dataset.get_validation_accuracy())
         for xs, y in dataset.iterate_once(100):
             grad_loss = self.get_loss(xs, y)
             grad = nn.gradients(grad_loss, [
                 self.W, self.b, self.W_hidden, self.b_hidden, self.W_end,
                 self.b_end
             ])
             self.W.update(grad[0], -alpha)
             self.b.update(grad[1], -alpha)
             self.W_hidden.update(grad[2], -alpha)
             self.b_hidden.update(grad[3], -alpha)
             self.W_end.update(grad[4], -alpha)
             self.b_end.update(grad[5], -alpha)
Пример #16
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"

        accuracy = 0

        paramlist = [
            self.weight1, self.b1, self.weight2, self.b2, self.weight3, self.b3
        ]

        while accuracy < .975:
            for t1, t2 in dataset.iterate_once(50):
                currloss = self.get_loss(t1, t2)
                gradient = nn.gradients(currloss, paramlist)

                for i in range(6):
                    paramlist[i].update(gradient[i], -self.learningrate)
                accuracy = dataset.get_validation_accuracy()
Пример #17
0
    def train(self, dataset):
        """
        Trains the model.
        """
        for x, y in dataset.iterate_forever(self.batch_size):
            loss = self.get_loss(x, y)
            multiplier = self.learning_rate
            if dataset.get_validation_accuracy() < 0.975:
                multiplier = -multiplier
                grad_wrt_w1, grad_wrt_w2, grad_wrt_b1, grad_wrt_b2 = nn.gradients(
                    loss,
                    [self.weights1, self.weights2, self.bias1, self.bias2])

                self.weights1.update(grad_wrt_w1, multiplier)
                self.bias1.update(grad_wrt_b1, multiplier)

                self.weights2.update(grad_wrt_w2, multiplier)
                self.bias2.update(grad_wrt_b2, multiplier)
            else:
                return
Пример #18
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"

        while True:

            for x, y in dataset.iterate_once(self.batch_size):
                loss = self.get_loss(x, y)
                grad = nn.gradients(loss, [self.w, self.wh, self.wf])

                #print(grad[0], grad[1], grad[2])
                self.w.update(grad[0], -0.005)
                self.wh.update(grad[1], -0.005)
                self.wf.update(grad[2], -0.005)

            print(dataset.get_validation_accuracy())
            if dataset.get_validation_accuracy() >= 0.86:
                return
Пример #19
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     # params to update
     params = [self.w, self.w_hidden, self.w_final]
     multiplier = self.learning_rate * -1
     # stop when accuracy is more than .82 for autograder
     while dataset.get_validation_accuracy() < 0.86:
         # retrieve batches of training examples
         for n, m in dataset.iterate_once(self.batch_size):
             # construct loss node
             getLoss = self.get_loss(n, m)
             # gradients of the loss with respect to the parameters
             gradi = nn.gradients(getLoss, params)
             # update our parameters
             self.w.update(gradi[0], multiplier)
             self.w_hidden.update(gradi[1], multiplier)
             self.w_final.update(gradi[2], multiplier)
Пример #20
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        while (True):

            for x, y in dataset.iterate_once(1):
                gradient = nn.gradients(self.get_loss(x, y),
                                        [self.w1, self.w2, self.b1, self.b2])

                self.w1.update(gradient[0], -0.01)
                self.w2.update(gradient[1], -0.01)
                self.b1.update(gradient[2], -0.01)
                self.b2.update(gradient[3], -0.01)

            if nn.as_scalar(
                    self.get_loss(nn.Constant(dataset.x), nn.Constant(
                        dataset.y))) < 0.02:
                return
Пример #21
0
 def train(self, dataset):
     """
     Trains the model.
     """
     check = True
     while check:
         for x, y in dataset.iterate_once(self.batch_size):
             loss = self.get_loss(x, y)
             gradient = nn.gradients(loss, self.paramArray)
             self.w0.update(gradient[0], self.lr)
             self.w1.update(gradient[1], self.lr)
             self.w2.update(gradient[2], self.lr)
             self.b0.update(gradient[3], self.lr)
             self.b1.update(gradient[4], self.lr)
             self.b2.update(gradient[5], self.lr)
         if nn.as_scalar(
                 self.get_loss(nn.Constant(dataset.x), nn.Constant(
                     dataset.y))) <= .02:
             check = False
             break
Пример #22
0
    def train(self, dataset):
        try:
            dataset.get_validation_accuracy()
        except Exception:

            def get_accuracy(s):
                sample = list(
                    map(lambda p: nn.as_scalar(self.get_loss(*p)),
                        s.iterate_once(self.batchSize)))
                return 1 - sum(sample) / len(sample)

            dataset.get_validation_accuracy = partial(get_accuracy, dataset)

        while dataset.get_validation_accuracy() < self.targetAccuracy:
            for x, y in dataset.iterate_once(self.batchSize):
                gradients = nn.gradients(self.get_loss(x, y),
                                         self.weight + self.bias)
                for param, grad in zip(chain(self.weight, self.bias),
                                       gradients):
                    param.update(grad, -self.learningRate)
Пример #23
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        while True:
            for x, y in dataset.iterate_once(1):
                loss = self.get_loss(self.computePolyFeatures(x), y)
                gradients = nn.gradients(loss, [self.weight, self.bias])
                self.weight.update(gradients[0], -self.alpha)
                self.bias.update(gradients[1], -self.alpha)

            print(
                nn.as_scalar(
                    self.get_loss(nn.Constant(dataset.x),
                                  nn.Constant(dataset.y))))
            if nn.as_scalar(
                    self.get_loss(nn.Constant(dataset.x), nn.Constant(
                        dataset.y))) < 0.15:
                return
Пример #24
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        flag = True
        while flag:
            for x, y in dataset.iterate_once(self.batch_size):
                grad_wrt_m1, grad_wrt_b1, grad_wrt_m2, grad_wrt_b2 =\
                    nn.gradients(self.get_loss(x, y), [self.m1, self.b1, self.m2, self.b2])

                self.m1.update(grad_wrt_m1, -self.alpha)
                self.b1.update(grad_wrt_b1, -self.alpha)
                self.m2.update(grad_wrt_m2, -self.alpha)
                self.b2.update(grad_wrt_b2, -self.alpha)

            if nn.as_scalar(
                    self.get_loss(nn.Constant(dataset.x), nn.Constant(
                        dataset.y))) < 0.02:
                flag = False
Пример #25
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        validation_accuracy = dataset.get_validation_accuracy()
        while validation_accuracy <= .975:
            print(validation_accuracy)
            for x, y in dataset.iterate_once(1):
                # if dataset.get_validation_accuracy() >= .975:
                #     break
                loss = self.get_loss(x, y)
                grad_wrt_w1, grad_wrt_b1, grad_wrt_w2, grad_wrt_b2 = nn.gradients(
                    loss, [self.w1, self.b1, self.w2, self.b2])
                self.w1.update(grad_wrt_w1, -0.005)
                self.b1.update(grad_wrt_b1, -0.005)
                self.w2.update(grad_wrt_w2, -0.005)
                self.b2.update(grad_wrt_b2, -0.005)

            validation_accuracy = dataset.get_validation_accuracy()
Пример #26
0
    def train(self, dataset):
        """
		Trains the model.
		"""
        omega = float("inf")

        while (omega > 0.019):
            i = 0
            omega = 0
            iteration = dataset.iterate_once(1)
            for x, y in iteration:
                omega = omega + nn.as_scalar(self.get_loss(x, y))
                gradients = nn.gradients(self.get_loss(x, y),
                                         [self.w1, self.w2, self.w3])
                self.w1.update(gradients[0], self.learning_rate)
                self.w2.update(gradients[1], self.learning_rate)
                self.w3.update(gradients[2], self.learning_rate)
                i = i + 1

            omega = omega / i
Пример #27
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        adjustable_rate = -0.09
        while True:

            for x, y in dataset.iterate_once(self.batch_size):
                loss = self.get_loss(x, y)
                gradients = nn.gradients(loss, [self.w, self.w_hidden, self.output_w])
                learning_rate = min(-0.004, adjustable_rate)

                self.w.update(gradients[0], learning_rate)
                self.w_hidden.update(gradients[1], learning_rate)
                self.output_w.update(gradients[2], learning_rate)

            adjustable_rate += 0.002
            if dataset.get_validation_accuracy() >= 0.89:
                return
Пример #28
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        while True:
            for x, y in dataset.iterate_once(self.batch_size):
                gradient = nn.gradients(
                    self.get_loss(x, y),
                    [self.weight0, self.weight1, self.bias0, self.bias1])
                self.weight0.update(gradient[0], -0.01)
                self.weight1.update(gradient[1], -0.01)
                self.bias0.update(gradient[2], -0.01)
                self.bias1.update(gradient[3], -0.01)

            if nn.as_scalar(
                    self.get_loss(nn.Constant(dataset.x), nn.Constant(
                        dataset.y))) < 0.02:
                print("done2")
                return
Пример #29
0
    def train(self, dataset):
        """
        Trains the model.
        """
        "*** YOUR CODE HERE ***"
        flag1 = True
        flag2 = True
        # flag3 = False
        while True:
            for x, y in dataset.iterate_once(self.batch_size):
                loss = self.get_loss(x, y)
                # grad = nn.gradients(loss, [self.w1, self.b1, self.w2, self.b2, self.w3, self.b3])
                grad = nn.gradients(loss, [
                    self.w1, self.b1, self.w2, self.b2, self.w3, self.b3,
                    self.w4, self.b4
                ])

                self.w1.update(grad[0], -self.rate)
                self.b1.update(grad[1], -self.rate)
                self.w2.update(grad[2], -self.rate)
                self.b2.update(grad[3], -self.rate)
                self.w3.update(grad[4], -self.rate)
                self.b3.update(grad[5], -self.rate)
                self.w4.update(grad[6], -self.rate)
                self.b4.update(grad[7], -self.rate)
                # if flag3 and dataset.get_validation_accuracy() > 0.973:
                #     return

            if dataset.get_validation_accuracy() > 0.96 and flag1:
                print("changing")
                self.rate = 0.005
                self.batch_size = 15
                flag1 = False
            if dataset.get_validation_accuracy() > 0.97 and flag2:
                print("changing")
                self.rate = 0.001
                self.batch_size = 20
                flag2 = False
                # flag3 = True
            if dataset.get_validation_accuracy() > 0.973:
                return
Пример #30
0
 def train(self, dataset):
     """
     Trains the model.
     """
     "*** YOUR CODE HERE ***"
     batch_size = 10
     check = False
     while True:
         for xs, y in dataset.iterate_once(batch_size):
             loss = self.get_loss(xs, y)
             grad_w1, grad_w2, grad_w3, grad_b1, grad_b2 = nn.gradients(loss, [self.w1, \
                 self.w2, self.w3, self.b1, self.b2])
             self.w1.update(grad_w1, self.learning_rate)
             self.w2.update(grad_w2, self.learning_rate)
             self.w3.update(grad_w3, self.learning_rate)
             self.b1.update(grad_b1, self.learning_rate)
             self.b2.update(grad_b2, self.learning_rate)
             if check and dataset.get_validation_accuracy() > 0.84:
                 return
         if dataset.get_validation_accuracy() > 0.81:
             check = True