コード例 #1
0
    def fit(self, X, y):
        n, d = X.shape

        # Initial guess
        self.w = np.zeros(d)
        utils.check_gradient(self, X, y)
        (self.w, f) = minimizers.findMin(self.funObj, self.w, self.maxEvals,
                                         self.verbose, X, y)
コード例 #2
0
    def fit(self):

        (self.w, self.alpha, f,
         _) = minimizers.findMin(self.funObj, self.w, self.alpha,
                                 self.maxEvals, self.verbose, self.X, self.y)

        print("Training error: %.3f" %
              utils.classification_error(self.predict(self.X), self.y))
コード例 #3
0
    def fit(self, X, y):
        n, d = X.shape
        self.n_classes = np.unique(y).size
        self.k = np.unique(y).size
        # Initial guess
        self.w = np.zeros(d * self.k)
        # utils.check_gradient(self, X, y)
        (self.w, f) = minimizers.findMin(self.funObj, self.w, self.maxEvals,
                                         self.verbose, X, y)

        self.w = np.reshape(self.w, (d, k))
コード例 #4
0
ファイル: linear_model.py プロジェクト: jaysc96/CS340
    def fit(self, X, y):
        n, d = X.shape
        self.n_classes = np.unique(y).size

        # Initial guess
        self.W = np.zeros((d, self.n_classes))

        for i in range(self.n_classes):
            ytmp = y.copy().astype(float)
            ytmp[y == i] = 1
            ytmp[y != i] = -1

            self.W[:, i], _ = minimizers.findMin(self.funObj, self.W[:, i],
                                         self.maxEvals,
                                         self.verbose,
                                         X, ytmp)
コード例 #5
0
    def fit(self, X, y):
        n, d = X.shape
        self.n_classes = np.unique(y).size

        # Initial guess
        self.W = np.zeros((d, self.n_classes))

        for i in range(self.n_classes):
            ytmp = y.copy().astype(float)
            ytmp[y == i] = 1
            ytmp[y != i] = -1

            # self.W[:, i] = np.linalg.lstsq(np.dot(X.T, X), np.dot(X.T, ytmp))[0]
            self.w = np.zeros(d)

            self.W[:,
                   i] = minimizers.findMin(self.funObj, self.w, self.maxEvals,
                                           self.verbose, X, ytmp)[0]
コード例 #6
0
ファイル: linear_model.py プロジェクト: jaysc96/CS340
    def fit(self, X, y):
        n, d = X.shape    
        w0 = np.zeros(d)
        minimize = lambda ind: minimizers.findMin(self.funObj, 
                                                  w0[ind], 
                                                  self.maxEvals, 0, 
                                                  X[:, ind], y)
        selected = set()
        selected.add(0) # always include the bias variable 
        minLoss = np.inf
        oldLoss = 0
        bestFeature = -1

        while minLoss != oldLoss:
            oldLoss = minLoss

            if self.verbose > 1:
                print("Epoch %d " % len(selected))
                print("Selected feature: %d" % (bestFeature))
                print("Min Loss: %.3f\n" % minLoss)

            for i in range(d):
                if i in selected:
                    continue
                
                selected_new = selected | {i} # add "i" to the set
                # TODO: Fit the model with 'i' added to the features,
                # then compute the score and update the minScore/minInd
                w = np.zeros(d)
                w[list(selected_new)], loss = minimize(list(selected_new))
                self.L0 = (self.lammy * np.count_nonzero(w))
                loss += self.L0
                if loss < minLoss:
                    minLoss = loss
                    bestFeature = i
                    w0 = w

            selected.add(bestFeature)

        
        # re-train the model one last time using the selected features
        self.w = w0
        self.w[list(selected)], _ = minimize(list(selected))
コード例 #7
0
    def fit(self, X, y):
        n, d = X.shape
        w0 = np.zeros(d)
        minimize = lambda ind: minimizers.findMin(self.funObj, w0[ind], self.
                                                  maxEvals, 0, X[:, ind], y)
        selected = set()
        selected.add(0)  # always include the bias variable
        minLoss = np.inf
        oldLoss = 0
        bestFeature = -1

        minScore = np.inf
        minIndex = -1

        # ignore = false
        while minLoss != oldLoss:
            oldLoss = minLoss
            if self.verbose > 1:
                print("Epoch %d " % len(selected))
                print("Selected feature: %d" % (bestFeature))
                print("Min Loss: %.3f\n" % minLoss)

            for i in range(d):
                if i in selected:
                    continue

                selected_new = selected | {i}  # add "i" to the set

                new_w, value = minimize(list(selected_new))
                # print value
                if value < minScore:
                    minScore = value
                    minIndex = i
                # TODO: Fit the model with 'i' added to the features,
                # then compute the score and update the minScore/minInd

            selected.add(minIndex)
            minLoss = minScore

        # re-train the model one last time using the selected features
        self.w = w0
        self.w[list(selected)], _ = minimize(list(selected))
コード例 #8
0
ファイル: linear_model.py プロジェクト: clementfung/distbayes
    def fit(self):

        (self.w, self.alpha, f,
         _) = minimizers.findMin(self.funObj, self.w, self.alpha,
                                 self.maxEvals, self.verbose, self.X, self.y)
コード例 #9
0
 def minimize(ind):
     return minimizers.findMin(self.funObj, w0[ind], self.alpha,
                               self.maxEvals, self.verbose,
                               self.X[:, ind], self.y)