コード例 #1
0
    def learn(self, X, y):
        self.weights = np.random.randn(X.shape[1], 1)

        maxiter = self.params["epochs"]
        for i in range(1, maxiter):
            eta = self.params["stepsize"] / i
            self.weights -= eta * X.T @ (utils.sigmoid(X @ self.weights) - y)
コード例 #2
0
    def learn(self, X, y):
        kernel = self.params['kernel']
        centers = self.params['centers']
        self.centers = X[:centers]

        Ktrain = np.zeros((len(X), self.params['centers']))

        if kernel == 'linear':
            numsamples, numfeatures = X.shape
            for i in range(numsamples):
                for j in range(centers):
                    Ktrain[i][j] = self.linear(self.centers[j], X[i])

        elif kernel == 'hamming':
            X = X.reshape(-1, 1)
            numsamples, numfeatures = X.shape
            for i in range(numsamples):
                for j in range(centers):
                    Ktrain[i][j] = self.hamming_distance(self.centers[j], X[i])
        else:
            raise Exception(
                'KernelLogisticRegression -> can only handle linear and hamming kernels'
            )

        self.weights = np.random.rand(centers, 1)

        for i in range(self.params['epochs']):
            for j in np.random.permutation(numsamples):
                k = Ktrain[j].reshape(1, -1)
                h = utils.sigmoid(k @ self.weights)
                self.weights = self.weights - self.params['stepsize'] * (
                    k.T @ (h - y[j]))
コード例 #3
0
    def learn(self, X, y):
        self.weights = np.random.rand(X.shape[1]).reshape(X.shape[1], 1)

        for i in range(self.params['epochs']):
            h = utils.sigmoid(X @ self.weights)
            self.weights = self.weights - self.params['stepsize'] * (
                X.T @ (h - y))
コード例 #4
0
ファイル: classalgorithms.py プロジェクト: YangAusDu/466
 def predict(self, Xtest):
     output = utils.sigmoid(np.dot(Xtest, self.weights))
     #print("predicting")
     threshold_probs = 0.5
     ypred = np.zeros(len(Xtest))
     for index in range(len(output)):
         if output[index] >= threshold_probs:
             ypred[index] = 1
         else:
             ypred[index] = 0
     return np.reshape(ypred, [len(Xtest), 1])
コード例 #5
0
    def predict(self, Xtest):
        numsamples = Xtest.shape[0]
        numfeatures = Xtest.shape[1]
        predictions = []

        for i in range(numsamples):
            prob = utils.sigmoid(self.weights.T @ Xtest[i, :])
            if prob < 0.5:
                predictions.append(0)
            else:
                predictions.append(1)

        return np.reshape(predictions, [numsamples, 1])
コード例 #6
0
ファイル: classalgorithms.py プロジェクト: YangAusDu/466
    def learn(self, X, y):
        self.weights = np.zeros(X.shape[1])
        X = np.array(X)
        y = np.array(y)
        stepsize = self.params['stepsize']
        epochs = self.params['epochs']

        #using stochastic gradient descent
        for epoch in range(epochs):
            array = np.arange(len(X))
            np.random.shuffle(array)
            for each_index in array:
                gradient = utils.sigmoid(np.dot(X[each_index],self.weights)) - y[each_index]
                self.weights = self.weights - (stepsize) * gradient * X[each_index]
コード例 #7
0
    def predict(self, Xtest):
        numsamples = Xtest.shape[0]
        numfeatures = len(Xtest[0])
        predictions = []

        K = np.zeros((numsamples, self.params['centers']))

        for n in range(numsamples):
            for i, C in enumerate(self.centers):
                K[n, i] = hamming(Xtest[n], C)

        for n in range(numsamples):
            prob = utils.sigmoid(K[n] @ self.weights)
            if prob < 0.5:
                predictions.append(0)
            else:
                predictions.append(1)

        return np.reshape(predictions, [numsamples, 1])
コード例 #8
0
    def predict(self, Xtest):
        kernel = self.params['kernel']
        centers = self.params['centers']
        Ktest = np.zeros((len(Xtest), self.params['centers']))

        if kernel == 'linear':
            numsamples, numfeatures = Xtest.shape
            for i in range(numsamples):
                for j in range(centers):
                    Ktest[i][j] = self.linear(self.centers[j], Xtest[i])

        elif kernel == 'hamming':
            Xtest = Xtest.reshape(-1, 1)
            numsamples, numfeatures = Xtest.shape
            for i in range(numsamples):
                for j in range(centers):
                    Ktest[i][j] = self.hamming_distance(
                        self.centers[j], Xtest[i])

        return np.round(utils.sigmoid(np.dot(Ktest, self.weights)))
コード例 #9
0
    def learn(self, X, y):
        numfeatures = len(X[0])
        numsamples = X.shape[0]

        K = np.zeros((numsamples, self.params['centers']))
        self.weights = np.random.randn(self.params['centers'], 1)
        index = np.random.choice(numsamples, size=self.params['centers'])
        self.centers = X[index].copy()

        for n in range(numsamples):
            for i, C in enumerate(self.centers):
                K[n, i] = hamming(X[n], C)

        assert ((K @ self.weights).shape == y.shape)

        maxiter = self.params["epochs"]
        for i in range(1, maxiter):
            eta = self.params["stepsize"] / i
            self.weights -= eta * \
                K.T @ (utils.sigmoid(K @ self.weights) - y)
コード例 #10
0
    def learn(self, X, y):
        """
        implements SGD updates
        """

        self.weights = np.zeros(X.shape[1])
        self.g = 0

        for p in range(self.epochs):
            Xr, yr = transform.randomize_data(X, y)
            for k in range(0, X.shape[0], self.batch_size):
                gradient = 0
                for i in range(k, min(k + self.batch_size, X.shape[0])):
                    # only line changed from linear regression
                    dot = utils.sigmoid(np.dot(Xr[i], self.weights))
                    error = dot - yr[i]
                    gradient += (error * Xr[i])

                gt_square = np.square(gradient/ self.batch_size)

                # AdaGrad
                self.g += gt_square
                step_size = np.sqrt(np.reciprocal(self.g))
                self.weights = self.weights - np.multiply(step_size, gradient / self.batch_size)
コード例 #11
0
    def predict(self, Xtest, threshold=0.5):

        probs= utils.sigmoid(np.dot(Xtest, self.weights))
        ytest = utils.threshold_probs(probs, threshold=threshold)

        return ytest
コード例 #12
0
 def predict(self, Xtest):
     return np.round(utils.sigmoid(np.dot(Xtest, self.weights)))