コード例 #1
0
    def learn(self, Xtrain, ytrain):
        #ll=loglikihood(Xtrain,ytrain,beta)
        #step=1
        #eps=tolerance
        self.weights = np.ones(Xtrain.shape[1])
        p = utils.sigmoid(np.dot(Xtrain, self.weights))      #(500*9) * (9*1) = 500*1
     
        
        #for i in range(20):
        #    p = logsig(X * w)
        #    w_old=w/np.sum(abs(w))
        #    P =
        #x = np.multiply(P,(1-p))
        #x = np.dot(Xtrain.T,x)
        #self.weights = self.weights + np.dot(np.dot(np.linalg.inv(np.dot(x,Xtrain),Xtrain.T),(ytrain-p)))
        #    w = w + 0.1*inv(X' * diag(p .* (1 - p)) * X) * X' * (y - p)
        #    eps = sum(abs(w_old - w / sum(abs(w))))
        #    plot_logreg_figure(X0, X1, X, w)
        #    ll = get_log_likelihood(X, y, w)
        #    step = step + 1
        #

        # w = step_size * X.T * (y-p) 
        #print(p)
        for i in range(500):
            p = utils.sigmoid(np.dot(Xtrain, self.weights))  #(500*9) * (9*1) = 500*1
            self.weights = self.weights + self.step_size * np.dot(Xtrain.T,(ytrain-p)) #(500*9) * (500*1) = (9*1)
コード例 #2
0
    def forward(self, x_t: np.float64):
        self.t += 1
        t = self.t
        h = self.h[t-1]

        self.input_gate[t] = sigmoid(np.dot(self.W_hi, h) +
                                     np.dot(self.W_xi, x_t) + self.b_i)

        self.forget_gate[t] = sigmoid(np.dot(self.W_hi, h) +
                                      np.dot(self.W_xf, x_t) + self.b_f)

        self.output_gate[t] = sigmoid(np.dot(self.W_ho, h) +
                                      np.dot(self.W_xo, x_t) + self.b_o)

        self.cell_update[t] = tanh(np.dot(self.W_hj, h) +
                                   np.dot(self.W_xj, x_t) + self.b_j)

        self.c[t] = self.input_gate[t] * self.cell_update[t] + \
            self.forget_gate[t] * self.c[t-1]
        self.ct[t] = tanh(self.c[t])
        self.h[t] = self.output_gate[t] * self.ct[t]

        self.x[t] = x_t

        return self.h[t]
    def learn(self, Xtrain, ytrain):
       """ Learns using the traindata """

       # Initial random weights ( Better if initialized using linear regression optimal wieghts)
       #Xless = Xtrain[:,self.params['features']]
       weights = np.dot(np.dot(np.linalg.inv(np.dot(Xtrain.T,Xtrain)), Xtrain.T),ytrain)



       # w(t+1) = w(t) + eta * v
       #pone = self.probabilityOfOne(self.weights, Xtrain[i])
       p = utils.sigmoid(np.dot(Xtrain, weights))
       tolerance = 0.1
       #error = utils.crossentropy( Xtrain, ytrain, self.weights)
       error = np.linalg.norm(np.subtract(ytrain, p))
       err = np.linalg.norm(np.subtract(ytrain,  p))
       #err = 0
       #soldweights =self.weights
       while np.abs(error - err) < tolerance:
           P = np.diag(p)
           
           I = np.identity(P.shape[0])
           #Hess_inv =-np.linalg.inv(np.dot(np.dot(np.dot(Xtrain.T,self.P),np.subtract(I,self.P)),Xtrain))
           #Hess_inv=-np.linalg.inv(np.dot(np.dot(Xtrain.T,np.dot(P,(I-P))),Xtrain))
           Hess_inv=-np.linalg.inv(np.dot(np.dot(Xtrain.T,np.dot(P,(I-P))),Xtrain))
           First_Grad= np.dot(Xtrain.T, np.subtract(ytrain,p))#np.dot(Xtrain.T, np.subtract(ytrain, p))
           #oldweights = self.weights
           weights= weights - (np.dot(Hess_inv, First_Grad))
           p = utils.sigmoid(np.dot(Xtrain, weights))

           # error = utils.crossentropy(Xtrain, ytrain, self.weights)
           err = np.linalg.norm(np.subtract(ytrain,  p))

       self.weights = weights
コード例 #4
0
    def learn(self, Xtrain, ytrain):
        #ll=loglikihood(Xtrain,ytrain,beta)
        #step=1
        #eps=tolerance
        self.weights = np.ones(Xtrain.shape[1])
        p = utils.sigmoid(np.dot(Xtrain,
                                 self.weights))  #(500*9) * (9*1) = 500*1

        #for i in range(20):
        #    p = logsig(X * w)
        #    w_old=w/np.sum(abs(w))
        #    P =
        #x = np.multiply(P,(1-p))
        #x = np.dot(Xtrain.T,x)
        #self.weights = self.weights + np.dot(np.dot(np.linalg.inv(np.dot(x,Xtrain),Xtrain.T),(ytrain-p)))
        #    w = w + 0.1*inv(X' * diag(p .* (1 - p)) * X) * X' * (y - p)
        #    eps = sum(abs(w_old - w / sum(abs(w))))
        #    plot_logreg_figure(X0, X1, X, w)
        #    ll = get_log_likelihood(X, y, w)
        #    step = step + 1
        #

        # w = step_size * X.T * (y-p)
        #print(p)
        for i in range(500):
            p = utils.sigmoid(np.dot(Xtrain,
                                     self.weights))  #(500*9) * (9*1) = 500*1
            self.weights = self.weights + self.step_size * np.dot(
                Xtrain.T, (ytrain - p))  #(500*9) * (500*1) = (9*1)
コード例 #5
0
    def learn(self, X, y):
        w = np.array([randrange(-10, 10) for i in range(0, X.shape[1])])

        epoch_error = []
        alpha = .1
        #self.regwgt = self.params['regwgt']
        for epoch in range(0, 500):

            small_p = utils.sigmoid(np.dot(X, w))
            err_old = geterror(
                [1 if x > .5 else 0 for x in utils.sigmoid(np.dot(X, w))], y)
            if self.regularizer == 'basic':
                w = w - alpha * (X.T.dot(np.subtract(small_p, y)))
            elif self.regularizer == 'l2':
                w = w - alpha * (X.T.dot(np.subtract(small_p, y)) +
                                 2 * self.regwgt * w)
            elif self.regularizer == 'l1':
                w = self.prox_func(
                    w - alpha * (X.T.dot(np.subtract(small_p, y))),
                    self.regwgt)
            elif self.regularizer == 'elasticNet':
                w = self.prox_func(
                    w - alpha * (X.T.dot(np.subtract(small_p, y)) +
                                 ((self.lmbda1 * (1 - self.lmbda2))) * w),
                    self.lmbda1 * self.lmbda2)

            err_new = geterror(
                [1 if x > .5 else 0 for x in utils.sigmoid(np.dot(X, w))], y)
            if err_new > err_old:
                alpha = alpha * .1

            epoch_error.append(err_new)
        self.weights = w
コード例 #6
0
 def _costFunction(self, Xtrain, Ytrain, tempWeights):
     #print Xtrain.shape,tempWeights.shape
     #print np.dot(Xtrain,tempWeights).shape
     return np.sum((
         (-Ytrain) * np.log(utils.sigmoid(np.dot(Xtrain, tempWeights)))) -
                   ((1 - Ytrain) *
                    (np.log(1 - utils.sigmoid(np.dot(Xtrain, tempWeights))))
                    )) / Xtrain.shape[0]
コード例 #7
0
 def runNN(self, inputs):
   assert len(inputs) == NN.ni, 'incorrect number of inputs'
   self.ai = np.tanh(inputs)
   for j in range(NN.nh):
       self.ah[j] = sigmoid(sum([self.ai[i] * self.wi[i][j] for i in range(NN.ni)]))
   for k in range(NN.no):
       self.ao[k] = sigmoid(sum([self.ah[j] * self.wo[j][k] for j in range(NN.nh)]))
   return self.ao
コード例 #8
0
    def learn(self, Xtrain, ytrain):
        Xshape = Xtrain.shape
        regwgt = self.params['regwgt']
        w = np.dot(np.dot(np.linalg.inv(np.dot(Xtrain.T, Xtrain)), Xtrain.T),
                   ytrain)
        err = float('INF')
        p = utils.sigmoid(np.dot(Xtrain, w))
        tolerance = 0.1
        numsamples = Xshape[0]
        XX_n = np.dot(Xtrain.T, Xtrain) / numsamples
        eeta = 1 / np.dot(2, np.linalg.norm((XX_n)))
        stepsize = 0.1
        while True:
            P = np.diag(p)
            I = np.identity(p.shape[0])

            # weight update based on type of regularizer selected
            if self.params['regularizer'] is 'l1':
                gradient = np.dot(Xtrain.T, np.subtract(ytrain, p))
                hessian_inv = -np.linalg.inv(
                    np.dot(np.dot(Xtrain.T, np.dot(P, (I - P))), Xtrain))
                w = np.subtract(w, np.dot(hessian_inv, gradient))
                w = utils.proximalOperator(w, regwgt, eeta)

            elif self.params['regularizer'] is 'l2':
                gradient = np.dot(Xtrain.T, np.subtract(
                    ytrain, p)) + regwgt * self.regularizer[1](w)
                hessian_inv = -np.linalg.inv(
                    np.dot(np.dot(Xtrain.T, np.dot(P, (I - P))), Xtrain) +
                    regwgt)
                w = np.subtract(w, stepsize * np.dot(hessian_inv, gradient))

            elif self.params['regularizer'] is 'elastic':
                gradient = np.dot(Xtrain.T, np.subtract(
                    ytrain, p)) + regwgt * self.regularizer[1](w)
                hessian_inv = -np.linalg.inv(
                    np.dot(np.dot(Xtrain.T, np.dot(P, (I - P))), Xtrain) +
                    regwgt)
                w = np.subtract(w, stepsize * np.dot(hessian_inv, gradient))
                w = utils.proximalOperator(w, regwgt, eeta)

            else:
                gradient = np.dot(Xtrain.T, np.subtract(ytrain, p))
                hessian_inv = -np.linalg.inv(
                    np.dot(np.dot(Xtrain.T, np.dot(P, (I - P))), Xtrain))
                w = np.subtract(w, np.dot(hessian_inv, gradient))
            p = utils.sigmoid(np.dot(Xtrain, w))

            newerr = np.linalg.norm(np.subtract(ytrain, p))
            if abs(err - newerr) < tolerance:
                break
            elif newerr - err > 0:
                stepsize /= 10
            err = newerr

        self.weights = w

        return self.weights
コード例 #9
0
    def predict(self, Xtest):
        ah = utils.sigmoid(np.dot(Xtest, self.wi.T))
        ah = np.hstack((np.ones((ah.shape[0], 1)), ah))

        ytest = utils.sigmoid(np.dot(ah, self.wo.T))
        ytest[ytest >= 0.5] = 1
        ytest[ytest < 0.5] = 0

        return ytest
コード例 #10
0
    def predict(self, Xtest):
        hidden = utils.sigmoid(np.dot(Xtest, self.w_input.T))
        ytest = utils.sigmoid(np.dot(hidden, self.w_output.T))

        for i in range(len(ytest)):
            if ytest[i] <= 0.5:
                ytest[i] = 0
            else:
                ytest[i] = 1
        return ytest
コード例 #11
0
 def update(self, inp, out):
     nput = np.array(inp).reshape(self.ni, self.no)
     h = utils.sigmoid(np.dot(self.wi, nput))
     yhat = utils.sigmoid(np.dot(self.wo, h))
     # y_i = np.array(out).reshape(1, 1)
     # if we fail, then the result is not accurate
     delta_i_first = -out / yhat + (1 - out) / (1 - yhat)
     delta_i = np.dot(np.dot(delta_i_first, yhat), 1 - yhat)
     # delta_i = np.dot(np.dot(self.dtransfer(np.dot(y_i, yhat)), yhat), 1 - yhat)
     self.wo = self.wo - self.stepsize * np.dot(delta_i, h.T)
     hadamard = self.wo.T * h * (1 - h)
     self.wi = self.wi - self.stepsize * np.dot(delta_i * hadamard, nput.T)
コード例 #12
0
 def predict(self, Xtest):
     diffSigmoid = np.vectorize(lambda x: utils.dsigmoid(x))
     z1 = np.dot(self.wi, Xtest.T)  #hL*n
     a2 = np.array(
         [utils.sigmoid(z1[:, x]) for x in xrange(Xtest.shape[0])])  #n*hL
     a2 = np.insert(a2, 0, 1, axis=1)
     z2 = np.dot(self.wo, a2.T)  #k*n
     a3f = np.array(
         [utils.sigmoid(z2[:, x]) for x in xrange(Xtest.shape[0])])  #n*k
     ytest = np.array([
         1 if a3f[x, 1] > a3f[x, 0] else 0 for x in xrange(Xtest.shape[0])
     ])
     return ytest
コード例 #13
0
 def predict(self, Xtest):
     ylayer1 = np.dot(Xtest, self.wi)
     hiddenOut = utils.sigmoid(ylayer1)
     ylayer2 = hiddenOut.dot(self.wo)
     ylayer2 = np.add(ylayer2, self.biasO)
     predicted = utils.sigmoid(ylayer2)
     ytest = np.zeros(Xtest.shape[0])
     for index in range(Xtest.shape[0]):
         if predicted[index, 0] > predicted[index, 1]:
             ytest[index] = np.float64(0)
         else:
             ytest[index] = np.float64(1)
     return ytest
コード例 #14
0
    def learn(self, Xtrain, ytrain):
        """ Learns using the traindata """

        # Initial random weights ( Better if initialized using linear regression optimal wieghts)
        #Xless = Xtrain[:,self.params['features']]
        weights =  np.dot(np.dot(np.linalg.pinv(np.dot(Xtrain.T,Xtrain)), Xtrain.T),ytrain)
        
        
        numsamples = Xtrain.shape[0]
        numofEpochs = 10
        # for epoch in range(numofEpochs):
        #     p = np.random.permutation(numsamples)
        #     for i in range(Xtrain.shape[0]):
        #         #error =  np.dot(Xtrain,self.weights) - ytrain

        #         pone = self.probabilityOfOne(self.weights, Xtrain[i])
        #         # update weights
        #         prod = ytrain[p][i] - pone



        #         self.weights = self.weights - np.dot(np.dot(np.linalg.pinv( np.dot( (Xtrain[p] * pone) , np.dot( (np.identity(Xtrain.shape[1]) - pone), Xtrain[p] ) )   ), Xtrain[p].T), prod)  

        

        # w(t+1) = w(t) + eta * v        
        p = utils.sigmoid(np.dot(Xtrain, weights))
        tolerance = 0.1
        #error = utils.crossentropy( Xtrain, ytrain, self.weights)
        error = np.linalg.norm(np.subtract(ytrain, p))
        err = np.linalg.norm(np.subtract(ytrain,  p))
       #err = 0
       #soldweights =self.weights
        while np.abs(error - err) < tolerance:
            P = np.diag(p)

            I = np.identity(P.shape[0])
            #Hess_inv =-np.linalg.inv(np.dot(np.dot(np.dot(Xtrain.T,self.P),np.subtract(I,self.P)),Xtrain))
            #Hess_inv=-np.linalg.inv(np.dot(np.dot(Xtrain.T,np.dot(P,(I-P))),Xtrain))
            Hess_inv=-np.linalg.inv(np.dot(np.dot(Xtrain.T,np.dot(P,(I-P))),Xtrain))
            First_Grad= np.dot(Xtrain.T, np.subtract(ytrain,p)) - 2 *self.params['regwgt'] * utils.dl2(weights) - self.params['regwgt'] * utils.dl1(weights)
 
            #np.dot(Xtrain.T, np.subtract(ytrain, p))
           #oldweights = self.weights
            weights= weights - (np.dot(Hess_inv, First_Grad))
            p = utils.sigmoid(np.dot(Xtrain, weights))

            # error = utils.crossentropy(Xtrain, ytrain, self.weights)
            err = np.linalg.norm(np.subtract(ytrain,  p))

        self.weights = weights
コード例 #15
0
    def learn(self,Xtrain, ytrain):
        Xshape = Xtrain.shape
        regwgt = self.params['regwgt']
        w = np.dot(np.dot(np.linalg.inv(np.dot(Xtrain.T,Xtrain)),Xtrain.T),ytrain)
        err = float('INF')
        p = utils.sigmoid(np.dot(Xtrain, w))
        tolerance = 0.1
        numsamples = Xshape[0]
        XX_n = np.dot(Xtrain.T,Xtrain)/numsamples
        eeta = 1 / np.dot(2, np.linalg.norm((XX_n)))
        stepsize = 0.1
        while True:
            P = np.diag(p)
            I = np.identity(p.shape[0])

            # weight update based on type of regularizer selected
            if self.params['regularizer'] is 'l1':
                gradient = np.dot(Xtrain.T, np.subtract(ytrain, p))
                hessian_inv = -np.linalg.inv(np.dot(np.dot (Xtrain.T, np.dot(P, (I - P))), Xtrain))
                w = np.subtract(w, np.dot(hessian_inv, gradient))
                w = utils.proximalOperator(w, regwgt, eeta)

            elif self.params['regularizer'] is 'l2':
                gradient = np.dot(Xtrain.T, np.subtract(ytrain, p))+ regwgt*self.regularizer[1](w)
                hessian_inv = -np.linalg.inv(np.dot(np.dot (Xtrain.T, np.dot(P, (I - P))), Xtrain) + regwgt)
                w = np.subtract(w, stepsize*np.dot(hessian_inv, gradient))

            elif self.params['regularizer'] is 'elastic':
                gradient = np.dot(Xtrain.T, np.subtract(ytrain, p)) + regwgt * self.regularizer[1](w)
                hessian_inv = -np.linalg.inv(np.dot(np.dot(Xtrain.T, np.dot(P, (I - P))), Xtrain) + regwgt)
                w = np.subtract(w, stepsize * np.dot(hessian_inv, gradient))
                w = utils.proximalOperator(w, regwgt, eeta)

            else:
                gradient = np.dot(Xtrain.T, np.subtract(ytrain, p))
                hessian_inv = -np.linalg.inv(np.dot(np.dot(Xtrain.T,np.dot(P,(I - P))), Xtrain))
                w = np.subtract(w, np.dot(hessian_inv, gradient))
            p = utils.sigmoid(np.dot(Xtrain, w))

            newerr = np.linalg.norm(np.subtract(ytrain,p))
            if abs(err - newerr)<tolerance:
                break
            elif newerr - err > 0:
                stepsize /= 10
            err = newerr

        self.weights = w

        return self.weights
 def logit_cost(self, theta, X, y):
     # print np.shape(X)
     tt = X.shape[0]
     theta = np.reshape(theta, (len(theta), 1))
     y = np.reshape(y, (len(y), 1))
     J = (1.0 / tt) * (
         -np.transpose(y).dot(np.log(utils.sigmoid(X.dot(theta))))
         - np.transpose(1 - y).dot(np.log(1 - utils.sigmoid(X.dot(theta))))
     )
     grad = (1.0 / tt) * (
         -np.transpose(X).dot(y * np.divide(utils.sigmoid_der(X.dot(theta)), utils.sigmoid(X.dot(theta))))
         + np.transpose(X).dot((1 - y) * np.divide(utils.sigmoid_der(X.dot(theta)), 1 - utils.sigmoid(X.dot(theta))))
     )
     # When you write your own minimizers, you will also return a gradient here
     # print 'X^T theta %f' %X.dot(theta)[0]
     return {"loss": J, "gradient": grad}
コード例 #17
0
    def learn(self, X, y):
        hls = self.params['nh']
        w2 = np.zeros(shape=(X.shape[1], hls))
        w1 = np.array([randrange(-100, 100) for i in range(0, hls)])
        alpha = .001

        for epoch in range(0, self.params['epochs']):
            state = np.random.get_state()
            np.random.shuffle(X)
            np.random.set_state(state)
            np.random.shuffle(y)
            for t in range(0, np.shape(X)[0]):

                y2 = utils.sigmoid(np.dot(X[t], w2))
                y1 = self.sigmoid(np.dot(y2, w1.T))
                del1 = y1 - y[t]
                grad1 = np.dot(del1.T, y2)
                del2 = np.array([(w1 * del1)[i] * y2[i] * (1 - y2[i])
                                 for i in range(len(w1))])
                grad2 = np.array([X[t] * i for i in del2]).T

                w2 = w2 - alpha * grad2
                w1 = w1 - alpha * grad1
        self.wi = w2
        self.wo = w1
コード例 #18
0
 def predict(self, Xtest):
     """
         Use the parameters computed in self.learn to give predictions on new
         observations.
         """
     ytest = np.zeros(Xtest.shape[0], dtype=int)
     if (self.params['kernel'] == 'hamming'):
         print('')
         Ktest = np.zeros([Xtest.shape[0], self.params['k']])
         for i in range (0, Xtest.shape[0]):
             for j in range (0, self.params['k']):
                 Ktest[i][j] = self.hamming(Xtest[i], self.kcentre[j])
     
     
     else:
         
         Ktest = np.dot(Xtest, self.kcentre.T)
     ### YOUR CODE HERE
     sig = np.dot(Ktest, self.weights)
     sig = utils.sigmoid(sig)
     #print (sig)
     sig = np.round(sig)
     #print (sig)
     for i in range (0, ytest.shape[0]):
         ytest[i] = int(sig[i])
     ### END YOUR CODE
     #print (ytest)
     assert len(ytest) == Xtest.shape[0]
     return ytest
コード例 #19
0
 def predict(self, Xtest):
     ytest = np.dot(Xtest, self.weights)
     ytest = utils.sigmoid(ytest)
     ytest[ytest > 0.5] = 1
     ytest[ytest < 0.5] = 0
     ytest = np.squeeze(ytest)
     return ytest
コード例 #20
0
    def predict(self, Xtest):
        """
        Use the parameters computed in self.learn to give predictions on new
        observations.
        """
        ### YOUR CODE HERE
        value = utils.sigmoid(np.dot(Xtest, self.weights.T))
        ytest = np.zeros(value.shape)
        for i in range(value.shape[0]):
            maxIndex = 0
            maxValue = 0
            for j in range(value.shape[1]):
                if value[i][j]>maxValue:
                    maxIndex = j
                    maxValue = value[i][j]
            ytest[i][maxIndex] = 1
        for i in ytest:
            print i
        ytest = self.y_digit(ytest)



        ### END YOUR CODE
        assert len(ytest) == Xtest.shape[0]
        return ytest
コード例 #21
0
 def train(self, state, gangliaI, reward, T):
     self.train_U = np.dot(self.w.T,
                           state)  # #    self.C2 * self.train_U + self.C1 *
     self.train_I = utils.sigmoid(self.train_U)
     self.error_out = gangliaI - self.train_I
     self.w += T * reward * self.ETA * np.outer(
         state, self.error_out) * self.train_I * (1. - self.train_I)
コード例 #22
0
    def logit_cost(self, theta, X, y):
        """
        Compute cost for logistic regression using theta as the parameters.
        """

        cost = 0.0
        num_samples = X.shape[0]

        ### YOUR CODE HERE
        for cnt in range(num_samples):
            cost += -y[cnt] * np.log(utils.sigmoid(np.dot(theta, X[cnt, :])))
            cost += -(1 - y[cnt]) * np.log(
                1 - utils.sigmoid(np.dot(theta, X[cnt, :])))
        ### END YOUR CODE

        return cost
コード例 #23
0
    def predict(self, Xtest):

        ytest = np.zeros(Xtest.shape[0], dtype=int)
        ytest = utils.sigmoid(np.dot(Xtest, self.weights)) >= 0.5

        assert len(ytest) == Xtest.shape[0]
        return ytest
コード例 #24
0
    def get_reparam_func(
            target_image: torch.Tensor
    ) -> Callable[[torch.Tensor], torch.Tensor]:
        minimum = target_image.min()
        value_range = target_image.max() - minimum

        return lambda x: (utilities.sigmoid(x) * value_range) + minimum
コード例 #25
0
 def predict(self, Xtest):
     temp=self.weights[self.weights ==0]
     print("The shape of temp is",temp.shape)
     yvec = np.dot(Xtest, self.weights)
     ytest=utils.sigmoid(yvec)
     ytest[ytest >= 0.5] = 1     
     ytest[ytest < 0.5] = 0    
     return ytest
コード例 #26
0
    def logit_cost(self, theta, X, y):
        #print np.shape(X)
        tt = X.shape[0]
        theta = np.reshape(theta, (len(theta), 1))
        y = np.reshape(y, (len(y), 1))
        J = (1. / tt) * (
            -np.transpose(y).dot(np.log(utils.sigmoid(X.dot(theta)))) -
            np.transpose(1 - y).dot(np.log(1 - utils.sigmoid(X.dot(theta)))))
        grad = (1. / tt) * (
            -np.transpose(X).dot(y * np.divide(utils.sigmoid_der(X.dot(theta)),
                                               utils.sigmoid(X.dot(theta)))) +
            np.transpose(X).dot(
                (1 - y) * np.divide(utils.sigmoid_der(X.dot(theta)),
                                    1 - utils.sigmoid(X.dot(theta)))))

        #print 'X^T theta %f' %X.dot(theta)[0]
        return {'loss': J, 'gradient': grad}
コード例 #27
0
    def logit_cost_grad(self, theta, X, y):
        """
        Compute gradients of the cost with respect to theta.
        """
        grad = np.zeros(len(theta))
        y_hat = utils.sigmoid(np.dot(X, theta))
        grad = np.dot(X.T, (y_hat - y)) / y.shape[0] + self.params['regwgt']*self.regularizer[1](theta)

        return grad
コード例 #28
0
 def logit_cost_grad(self, theta, X, y):
     """
     Compute gradients of the cost with respect to theta.
     """
     grad = np.zeros(len(theta))
     ### YOUR CODE HERE
     grad=np.dot((utils.sigmoid(np.dot(X, theta.T)) - y).T,X)+self.params['regwgt']*self.regularizer[1](theta)
     #ask ta
     return grad
コード例 #29
0
    def logit_cost(self, theta, X, y):
        """
        Compute cost for logistic regression using theta as the parameters.
        """
        cost = 0.0
        y_hat = utils.sigmoid(np.dot(X, theta))
        cost = (np.dot(-y.T, np.log(y_hat)) - np.dot((1 - y).T, np.log(1 - y_hat)))/X.shape[0] + self.params['regwgt']*self.regularizer[0](theta)

        return cost
コード例 #30
0
    def update_knowledge(self, alpha, txn, bit_matrix):
        """Takes alpha and transition matrix (expects a 2d array)"""
        # first convert state binary to int to get the row in coherence matrix
        row_ptr = utilities.bool2int(self.knowledge_state)
        # get the corresponding probabilites from the matrix
        coh_prob_tx = txn[row_ptr]
        ones_list = np.zeros(number_of_bits)
        dissonance_list = []
        disagreements = []

        for index, curr_bit_state in enumerate(self.knowledge_state):
            # now look for neighbors who disagree in this bit value

            neigh_disagreement_count = self.count_dissimilar_neighbors(index)

            # compute d as (# of neighbors disagree on bit/# of neighbors)
            if len(self.neighbors) > 0:
                d = neigh_disagreement_count / len(self.neighbors)
            else:
                d = 0

            #TODO: Handle the viral parameter - in general, if d = 0 and viral is set,
            #TODO: it should not be possible to make that transition

            if d > 0:
                dissonance = utilities.sigmoid(d, self.tau)

            else:
                dissonance = 0

            dissonance_list.append(dissonance)

            # keeping track of disagreement of bits/total neighbors
            disagreements.append(d)
            # transition probabilities given social pressure for moving to a state
            # with a '1' at this bit
            ones_list[index] = (1 -
                                dissonance if curr_bit_state else dissonance)

        zeros_list = 1 - ones_list
        tmp_soc_mat = ones_list * bit_matrix + zeros_list * (1 - bit_matrix)

        # Probabilities for each state given social pressure
        soc_prob_tx = np.prod(tmp_soc_mat, 1)
        #TODO logs soc_prob_tx for each agent at each time step

        probs = alpha * soc_prob_tx + (1 - alpha) * coh_prob_tx
        self.next_state_probs = probs
        self.soc_probs = soc_prob_tx
        self.next_state = utilities.int2bool(
            np.random.choice(range(2**number_of_bits), 1, p=probs)[0],
            number_of_bits)
        self.dissonance_lst = dissonance_list
        self.state_disagreements = disagreements

        return soc_prob_tx
コード例 #31
0
    def predict(self, Xtest):
        #Xtest = Xtest2[:,:-1]
        xvec = np.dot(Xtest, self.weights)
        ytest = utils.sigmoid(xvec)
        ytest[ytest >= 0.5] = 1     
        ytest[ytest < 0.5] = 0
 	print("Logistic Q3:"),
        print time.time() - self.tlo

        return ytest
コード例 #32
0
    def predict(self, Xtest):
        xvec = np.dot(Xtest, self.weights)
        ytest = utils.sigmoid(xvec)
        ym = utils.mean(ytest)
        ytest[ytest >= ym] = 1     
        ytest[ytest < ym] = 0    
 	print("Logistic Q2:"),
        print time.time() - self.tlo

        return ytest
コード例 #33
0
    def fit(self, Xtrain, ytrain):
        print("We're in fit in LogitReg")
        self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xtrain.T,Xtrain)),Xtrain.T),ytrain)
        self.xvec = np.dot(Xtrain, self.weights)#1000x1
        self.p = utils.sigmoid(self.xvec) #1000x1
        self.P = np.diagflat(self.p) #1000x1000

        lambdaa = 0.01
        for reps in range(self.reps):
            self.weights -= (1/Xtrain.shape[0])*1*np.dot(np.dot(np.linalg.inv(np.dot(np.dot(np.dot(Xtrain.T,self.P),np.eye(np.size(Xtrain.shape[0]))-self.P),Xtrain)+lambdaa),Xtrain.T),(ytrain-self.p))
コード例 #34
0
ファイル: classalgorithms.py プロジェクト: PawPatel/Learning
    def learn(self, Xtrain, ytrain):
        iterate = 10 #set num of iterations to perform descent
        I = np.identity(Xtrain.shape[0])

        #initialize the weights using linear regression
        self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xtrain.T, Xtrain)), Xtrain.T), ytrain)

        for i in range(iterate):
            p = utils.sigmoid(np.dot(Xtrain, self.weights))
            P = np.diag(p)
            self.weights += np.dot(np.linalg.inv(np.dot(np.dot(Xtrain.T, P), np.dot(I-P, Xtrain)) + self.alpha*np.diag(np.absolute(self.weights))), np.dot(Xtrain.T, ytrain - p) + self.alpha*self.weights**2)  
コード例 #35
0
ファイル: classalgorithms.py プロジェクト: PawPatel/Learning
    def learn(self, Xtrain, ytrain):
        iterate = 10 #set num of iterations to perform descent
        I = np.identity(Xtrain.shape[0])

        #initialize the weights using linear regression
        self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xtrain.T, Xtrain)), Xtrain.T), ytrain)

        for i in range(iterate):
            p = utils.sigmoid(np.dot(Xtrain, self.weights))
            P = np.diag(p)
            # Use Hessian or Not (for Madelon data set, Hessian is expensive to compute) First line is Newton, Second is Grad Desc
            self.weights += np.dot(np.dot(np.linalg.inv(np.dot(np.dot(Xtrain.T, P), np.dot(I-P, Xtrain))), Xtrain.T), ytrain - p)  
コード例 #36
0
    def learn(self, Xtrain, ytrain):
        xvec = np.dot(Xtrain, self.weights)
        p = utils.sigmoid(np.dot(Xtrain, self.weights))  #(500*9) * (9*1) = 500*1
        P = np.diagflat(p)

        for j in range(500):
            for i in range(Xtrain.shape[0]):
                xvec = np.dot(Xtrain[i], self.weights)  #(1*9) * (9*1) = 500*1
                delta = np.divide((2*ytrain[i]-1)*np.sqrt(np.square(xvec)+1)-xvec,np.square(xvec)+1)
                delta = np.dot(Xtrain[i].T,delta)
                first_term = np.divide((2*ytrain[i]-1)*xvec - np.sqrt(np.square(xvec)+1)-xvec,np.power(np.square(xvec)+1,3/2))
                second_term = 2*xvec*np.divide((2*ytrain[i]-1)*np.sqrt(np.square(xvec)+1)-xvec,np.square(np.square(xvec)+1))
                hessian = np.dot(Xtrain[i].T,Xtrain[i])*(first_term-second_term)
                self.weights = self.weights + self.step_size * delta/hessian #(500*9) * (500*1) = (9*1)
コード例 #37
0
ファイル: classalgorithms.py プロジェクト: PawPatel/Learning
    def learn(self, Xtrain, ytrain, stepsize, iterate):
        self.stepsize = stepsize
        self.iterate = iterate

        #I = np.identity(Xtrain.shape[0])

        #initialize the weights using linear regression
        self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xtrain.T, Xtrain)), Xtrain.T), ytrain)

        for i in range(self.iterate):
            p = utils.sigmoid(np.dot(Xtrain, self.weights))
            #P = np.diag(p)
            # Use Hessian or Not (for Madelon data set, Hessian is expensive to compute) First line is Newton, Second is Grad Desc
            #self.weights += self.stepsize*np.dot(np.dot(np.linalg.inv(np.dot(np.dot(Xtrain.T, P), np.dot(I-P, Xtrain))), Xtrain.T), ytrain - p)  
            self.weights +=  -self.stepsize*np.dot(Xtrain.T, ytrain - p) 
コード例 #38
0
ファイル: main.py プロジェクト: mjenczmyk/neural-networks
def main():

    learning_rate = 0.6
    beta = 0.7
    inner_size = 128
    
    topology = [INPUT_SIZE, inner_size, OUTPUT_SIZE]
    activation = lambda x: sigmoid(x, beta)
    activation_diff = lambda x: sigmoid_diff(x, beta)

    network = NeuralNetwork(topology, activation, activation_diff)

    for i in xrange(200):
        network.training_session(DATA, 100, learning_rate)
        network.test_effectiveness(DATA, 200)
コード例 #39
0
 def learn(self, Xtrain, ytrain):
     self.weights = np.ones(Xtrain.shape[1])
     centropy = utils.cross_entropy(Xtrain, ytrain, self.weights) + self.regloss(self.regularizer)
     converged = False
     iters = 0
     while not converged:
         output = utils.sigmoid(np.dot(Xtrain, self.weights))
         error = output - ytrain
         self.weights = self.weights - self.alpha * (np.dot(Xtrain.T, error) + self.reggradient(self.regularizer))
         newcentropy = utils.cross_entropy(Xtrain, ytrain, self.weights) + self.regloss(self.regularizer)
         if abs(centropy - newcentropy) <= self.ep:
             converged = True
         centropy = newcentropy
         iters += 1
         if iters > self.max_iter:
             converged = True
コード例 #40
0
 def learn(self, Xtrain, ytrain):
     self.weights = np.ones(Xtrain.shape[1])
     centropy = utils.cross_entropy(Xtrain, ytrain, self.weights)
     converged = False
     iters = 0
     while not converged:
         output = utils.sigmoid(np.dot(Xtrain, self.weights))
         # if use log-likelihood instead of cross-entropy, then take gradient, error = -error, updata rule is w = w + \delta(w)
         error = output - ytrain
         self.weights = self.weights - self.alpha * np.dot(Xtrain.T, error)
         newcentropy = utils.cross_entropy(Xtrain, ytrain, self.weights)
         if abs(centropy - newcentropy) <= self.ep:
             converged = True
         centropy = newcentropy
         iters += 1
         if iters > self.max_iter:
             converged = True
コード例 #41
0
	def setUp(self):
		weights_in = -2 + np.random.rand(NN.ni, NN.nh) * 4
		weights_out = -2 + np.random.rand(NN.nh, NN.no) * 4
		self.net1 = NN()
		self.net1.wi = weights_in.copy()
		self.net1.wo = weights_out.copy()
		self.net2 = NN()
		self.net2.wi = weights_in.copy()
		self.net2.wo = weights_out.copy()
		inputs = -2 + np.random.rand(4) * 8
		for i in range(NN.ni):
			self.net1.ai[i] = np.tanh(inputs[i])
		for j in range(NN.nh):
			self.net1.ah[j] = sigmoid(sum([self.net1.ai[i] * self.net1.wi[i][j] for i in range(NN.ni)]))
		for k in range(NN.no):
			self.net1.ao[k] = sum([self.net1.ah[j] * self.net1.wo[j][k] for j in range(NN.nh)])
		self.net1.ao = np.where(self.net1.ao > 0.5, 1.0, 0.0)
		self.net2.activate(inputs)
コード例 #42
0
	def constrain_bounded(self, which, lower, upper):
		"""Set bounded constraints. 

		Arguments
		---------
		which -- np.array(dtype=int), or regular expression object or string
		upper -- (float) the upper bound on the constraint
		lower -- (float) the lower bound on the constraint
		"""
		matches = self.grep_param_names(which)
		assert not np.any(matches[:,None]==self.all_constrained_indices()), "Some indices are already constrained"
		assert lower < upper, "lower bound must be smaller than upper bound!"
		self.constrained_bounded_indices.append(matches)
		self.constrained_bounded_uppers.append(upper)
		self.constrained_bounded_lowers.append(lower)
		#check to ensure constraint is in place
		x = self.get_param()
		for i,xx in enumerate(x):
			if ((xx<=lower)|(xx>=upper)) & (i in matches):
				x[i] = sigmoid(xx)*(upper-lower) + lower
		self.set_param(x)
コード例 #43
0
	def expand_param(self,x):
		""" takes the vector x, which is then modified (by untying, reparameterising or inserting fixed values), and then call self.set_param"""
		#work out how many places are fixed, and where they are. tricky logic!
		Nfix_places = 0.
		if len(self.tied_indices):
			Nfix_places += np.hstack(self.tied_indices).size-len(self.tied_indices)
		if len(self.constrained_fixed_indices):
			Nfix_places += np.hstack(self.constrained_fixed_indices).size
		if Nfix_places:
			fix_places = np.hstack(self.constrained_fixed_indices+[t[1:] for t in self.tied_indices])
		else:
			fix_places = []
		free_places = np.setdiff1d(np.arange(Nfix_places+x.size,dtype=np.int),fix_places)
		#put the models values in the vector xx
		xx = np.zeros(Nfix_places+free_places.size,dtype=np.float64)
		xx[free_places] = x
		[np.put(xx,i,v) for i,v in zip(self.constrained_fixed_indices, self.constrained_fixed_values)]
		[np.put(xx,i,v) for i,v in [(t[1:],xx[t[0]]) for t in self.tied_indices] ]
		xx[self.constrained_positive_indices] = np.exp(xx[self.constrained_positive_indices])
		xx[self.constrained_negative_indices] = -np.exp(xx[self.constrained_negative_indices])
		[np.put(xx,i,low+sigmoid(xx[i])*(high-low)) for i,low,high in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
		self.set_param(xx)
コード例 #44
0
 def predict(self, Xtest, weights=None):
     if weights is None:
         weights = self.weights
     p = utils.sigmoid(np.dot(Xtest, weights))
     p = utils.threshold_probs(p)
     return p
 def predict(self, Xtest):
     probs = utils.sigmoid(Xtest.dot(self.weights))
     ytest = utils.threshold_probs(probs)
     return ytest
コード例 #46
0
 def predict(self, Xtest):
     # print self.weights
     ytest = utils.sigmoid(np.dot(Xtest, self.weights))
     ytest[ytest >= 0.5] = 1
     ytest[ytest < 0.5] = 0
     return ytest
コード例 #47
0
ファイル: classalgorithms.py プロジェクト: PawPatel/Learning
    def predict(self, Xtest):
        pred = utils.sigmoid(np.dot(Xtest, self.weights))
        pred[pred < .5] = 0
        pred[pred > .5] = 1

        return pred 
コード例 #48
0
 def predict(self, Xtest):
     p = utils.sigmoid(np.dot(Xtest, self.weights))
     p = utils.threshold_probs(p)
     return p