Exemplo n.º 1
0
    def learn(self, Xtrain, ytrain):
        """ using the algorithm 2 in the book """
        timeIs = int(datetime.datetime.now(tz=pytz.utc).timestamp() * 1000)
        numsamples = Xtrain.shape[0]
        featuresNum = Xtrain.shape[1]
        eta_max = 1
        self.weights = np.random.rand(featuresNum)
        err = float('inf') # a very large number
        tolerance = 0.00001
        Xless = Xtrain[:, self.params['features']]
        errRange = scrReg.geterror(np.dot(Xless, self.weights), ytrain)
        maxIterCounter = 0
        self.weightArr = [[0 for weight in range(featuresNum)] for epoch in range(1000)]
        self.weightTimeArr = [[0 for weight in range(featuresNum)] for epoch in range(1000)]
        self.concat = []
        while abs(errRange - err) > tolerance :
            taw = 0.7			
            err = errRange
            g = np.divide(np.dot(Xless.T,np.subtract(np.dot(Xless, self.weights),ytrain)), numsamples)
            w = self.weights

 #           self.epoch_weight.append(self.weights)
            self.lineSearch(self.weights, errRange, g, Xless, ytrain, numsamples)
            errRange = scrReg.geterror(np.dot(Xless, self.weights), ytrain)
            if maxIterCounter < 1000:
                    self.weightArr[maxIterCounter] = self.weights
                    timeNow = int(datetime.datetime.now(tz=pytz.utc).timestamp() * 1000)
                    timeDiff = timeNow - timeIs
                    self.concat.append(timeDiff)
                    maxIterCounter += 1

        if maxIterCounter < 1000:
             del self.weightArr[maxIterCounter - 1: -1]
Exemplo n.º 2
0
    def learn(self, Xtrain, ytrain):
        self.weights = np.random.randn(Xtrain.shape[1])
        self.noofruns = 5
        self.error = np.zeros(1000)
        n = Xtrain.shape[0]
        stepsize = 0.01
        beta1 = 0.9
        beta2 = 0.999
        m = 0
        v = 0
        t = 0

        for t in range(self.params["iteration"]):
            for j in range(n):
                XjW = (Xtrain[j].T.dot(self.weights))
                gradient = (XjW - ytrain[j]) * Xtrain[j]
                mt = beta1 * m + (1 - beta1) * gradient
                vt = beta2 * v + (1 - beta2) * np.power(gradient, 2)
                #Without bias correction we will directly be using mt and vt in self.weight
                #self.weights = self.weights - stepsize * mt / (np.sqrt(vt) + 10e-8)
                #Bias corrected
                mhat = mt / (1 - beta1)
                vhat = vt / (1 - beta2)
                self.weights = self.weights - stepsize * mhat / (
                    np.sqrt(vhat) + 10e-8)
                self.error[t] += error.geterror(np.dot(Xtrain, self.weights),
                                                ytrain)
Exemplo n.º 3
0
    def learn(self, Xtrain, ytrain):
        n = Xtrain.shape[0]
        self.weights = np.random.rand(Xtrain.shape[1]) * self.params['regwgt']
        err = np.Infinity
        tolerance = self.params['tolerance']
        stepsize = self.params['stepsize']

        cw = error.geterror(Xtrain.dot(self.weights), ytrain)

        while abs(cw - err) > tolerance:
            err = cw
            term = np.subtract((np.dot(Xtrain, self.weights)), ytrain)
            gradient = np.dot(Xtrain.T, term) / n
            stepsize = self.lsearch(Xtrain, ytrain, self.weights, gradient, cw)
            self.weights = self.weights - stepsize * gradient
            cw = error.geterror(np.dot(Xtrain, self.weights), ytrain)
Exemplo n.º 4
0
    def learn(self, Xtrain, ytrain):

        n = Xtrain.shape[0]
        regwgt = self.params['regwgt']
        self.weights = np.zeros(Xtrain.shape[1])
        err = np.Infinity
        tolerance = self.params['tolerance']
        XX = Xtrain.T.dot(Xtrain) / n
        Xy = Xtrain.T.dot(ytrain) / n
        stepsize = 1 / (2 * np.linalg.norm(XX))

        cw = error.geterror(Xtrain.dot(self.weights), ytrain)

        while abs(cw - err) > tolerance:
            err = cw
            term = np.subtract(self.weights,
                               stepsize * np.dot(XX, self.weights))
            termn = np.add(term, stepsize * Xy)

            self.weights = self.prox(termn, stepsize, regwgt)
            cw = error.geterror(Xtrain.dot(self.weights), ytrain)
Exemplo n.º 5
0
    def learn(self, Xtrain, ytrain):
        n = Xtrain.shape[0]

        self.weights = np.random.rand(Xtrain.shape[1]) * self.params['regwgt']
        no = self.params["stepsize"]

        for i in range(self.params["epochs"]):
            for j in range(n):
                XjW = (Xtrain[j].T.dot(self.weights))
                gradient = (XjW - ytrain[j]) * Xtrain[j]
                nt = no / (1 + i)
                self.weights = np.subtract(self.weights, nt * gradient)

            self.error[i] += error.geterror(np.dot(Xtrain, self.weights),
                                            ytrain)
Exemplo n.º 6
0
 def lineSearch(self, w, cw, g, Xless, ytrain, numsamples):
         taw = 0.7
         linesearch_tolerance = 0.000001
         eta = 1
         obj = cw
         counter = 0
         while (counter < 100):
             counter += 1
             newWeight = w - eta * g				
             newCw = scrReg.geterror(np.dot(Xless, newWeight), ytrain)
             if newCw < (obj - linesearch_tolerance):
                 self.weights = newWeight
                 break
             eta = taw * eta
         if counter == 100:
             print("Could not improve the solution")
Exemplo n.º 7
0
    def lsearch(self, Xtrain, ytrain, weight, gradient, cw):

        t = 0.5
        tolerance = self.params['tolerance']
        stepsize = 1.0
        obj = cw
        maxinteration = self.params['maxiteration']
        iteration = 0
        for iteration in range(maxinteration):
            weight = self.weights - stepsize * gradient
            if (cw < obj - tolerance): break
            else: stepsize = t * stepsize
            cw = error.geterror(np.dot(Xtrain, weight), ytrain)
            iteration = iteration + 1
        if iteration == maxinteration:
            stepsize = 0
            return stepsize
        return stepsize
Exemplo n.º 8
0
    def learn(self, Xtrain, ytrain):
        self.weights = np.random.randn(Xtrain.shape[1])
        self.noofruns = 5
        self.error = np.zeros(1000)
        n = Xtrain.shape[0]
        stepsize = 0.01
        beta = 0.9
        v = 0
        t = 0

        for t in range(self.params["iteration"]):
            for j in range(n):
                XjW = (Xtrain[j].T.dot(self.weights))
                gradient = (XjW - ytrain[j]) * Xtrain[j]
                vt = beta * v + (1 - beta) * gradient
                #With out bias correction, we will be using vt directly in self.weight
                #self.weights = self.weights - stepsize * vt
                #Bias corrected
                vhat = vt / (1 - beta)
                self.weights = self.weights - stepsize * vhat
                self.error[t] += error.geterror(np.dot(Xtrain, self.weights),
                                                ytrain)