Example #1
0
    def compress(self, X):
        n = X.shape[0]

        # Compute Euclidean distances
        D = common.euclidean_dist_squared(X, X)
        D = np.sqrt(D)

        G = D
        for i in range(n):  # Construct the graph G
            G[i, :][D[i, :] > np.partition(D[i, :], self.nn)[self.nn]] = 0

        # Use geodesic distances
        D = common.dijkstra(G)  # D is symmetric

        # If two points are disconnected (distance is Inf)
        # then set their distance to the maximum
        # distance in the graph, to encourage them to be far apart.
        D[np.isinf(D)] = D[~np.isinf(D)].max()

        # Initialize low-dimensional representation with PCA
        pca = PCA(self.k)
        pca.fit(X)
        Z = pca.transform(X)

        # Solve for the minimizer
        z, f = findMin.findMin(self._fun_obj_z, Z.flatten(), 500, D)
        Z = z.reshape(n, self.k)
        return Z
Example #2
0
    def fit(self, X):
        n, d = X.shape
        k = self.k
        self.mu = np.mean(X, 0)
        X = X - self.mu

        # Randomly initial Z, W
        z = np.random.randn(n * k)
        w = np.random.randn(k * d)

        for i in range(10):  # do 10 "outer loop" iterations
            z, f = findMin.findMin(self._fun_obj_z, z, 10, w, X, k)
            w, f = findMin.findMin(self._fun_obj_w, w, 10, z, X, k)
            print('Iteration %d, loss = %.1f' % (i, f))

        self.W = w.reshape(k, d)
Example #3
0
 def compress(self, X):
     n, d = X.shape
     k = self.k
     X = X - self.mu
     # We didn't enforce that W was orthogonal
     # so we need to optimize to find Z
     # (or do some matrix operations)
     z = np.zeros(n * k)
     z, f = findMin.findMin(self._fun_obj_z, z, 100, self.W.flatten(), X, k)
     Z = z.reshape(n, k)
     return Z
Example #4
0
 def fit(self, X, y):
     n, d = X.shape
     self.X = X
     K = self.kernel_fun(X, X, **self.kernel_args)
     common.check_gradient(self, K, y, n, verbose=self.verbose)
     self.u, f = findMin.findMin(self.funObj,
                                 np.zeros(n),
                                 self.maxEvals,
                                 K,
                                 y,
                                 verbose=self.verbose)
    def fit(self, X, y):
        n, d = X.shape

        # Initial guess
        self.w = np.zeros(d)
        common.check_gradient(self, X, y)
        (self.w, f) = findMin.findMin(self.funObj,
                                      self.w,
                                      self.maxEvals,
                                      X,
                                      y,
                                      verbose=self.verbose)
    def fit(self, X, y):
        n, d = X.shape
        self.n_classes = np.unique(y).size
        self.W = np.zeros((self.n_classes, d))
        self.w = self.W.flatten()
        common.check_gradient(self, X, y)

        # Optimizer takes kd and returns kd
        (self.w, f) = findMin.findMin(self.funObj,
                                      self.w,
                                      self.maxEvals,
                                      X,
                                      y,
                                      verbose=self.verbose)
        self.W = self.w.reshape((self.n_classes, d))
Example #7
0
    def compress(self, X):
        n = X.shape[0]
        k = self.k

        # Compute Euclidean distances
        D = common.euclidean_dist_squared(X,X)
        D = np.sqrt(D)

        # Initialize low-dimensional representation with PCA
        pca = PCA(k)
        pca.fit(X)
        Z = pca.transform(X)

        # Solve for the minimizer
        z, f = findMin.findMin(self._fun_obj_z, Z.flatten(), 500, D)
        Z = z.reshape(n, k)
        return Z
    def fit(self, X, y):
        if y.ndim == 1:
            y = y[:, None]

        self.layer_sizes = [X.shape[1]] + self.hidden_layer_sizes + [y.shape[1]]
        self.classification = y.shape[1] > 1  # assume it's classification iff y has more than 1 column

        # random init
        scale = 0.01
        weights = list()
        for i in range(len(self.layer_sizes) - 1):
            W = scale * np.random.randn(self.layer_sizes[i + 1], self.layer_sizes[i])
            b = scale * np.random.randn(1, self.layer_sizes[i + 1])
            weights.append((W, b))
        weights_flat = _flatten_weights(weights)

        # utils.check_gradient(self, X, y, len(weights_flat), epsilon=1e-6)
        weights_flat_new, f = findMin.findMin(self.funObj, weights_flat, self.max_iter, X, y, verbose=True)
        self.weights = _unflatten_weights(weights_flat_new, self.layer_sizes)
    def fit(self, X, y):
        n, d = X.shape
        self.n_classes = np.unique(y).size

        # Initial guess
        self.W = np.zeros((self.n_classes, d))

        for i in range(self.n_classes):
            ytmp = y.copy().astype(float)
            ytmp[y == i] = 1
            ytmp[y != i] = -1

            self.w = self.W[i]
            common.check_gradient(self, X, ytmp)
            (self.W[i], f) = findMin.findMin(self.funObj,
                                             self.W[i],
                                             self.maxEvals,
                                             X,
                                             ytmp,
                                             verbose=self.verbose)
    def fit(self, X, y):
        n, d = X.shape
        minimize = lambda ind: findMin.findMin(self.funObj,
                                               np.zeros(len(ind)),
                                               self.maxEvals,
                                               X[:, ind],
                                               y,
                                               verbose=0)
        selected = set()
        selected.add(0)
        minLoss = np.inf
        oldLoss = 0
        bestFeature = -1

        while minLoss != oldLoss:
            oldLoss = minLoss
            print("Epoch %d " % len(selected))
            print("Selected feature: %d" % (bestFeature))
            print("Min Loss: %.3f\n" % minLoss)

            for i in range(d):  # find the best feature
                if i in selected:
                    continue

                selected_new = selected | {
                    i
                }  # tentatively add feature "i" to the selected set
                # then compute the loss and update the minLoss/bestFeature
                w, f = minimize(list(selected_new))
                # Add the L0 term
                f += self.L0_lambda * len(selected_new)

                if f <= minLoss:
                    minLoss = f
                    bestFeature = i

            selected.add(bestFeature)

        self.w = np.zeros(d)
        self.w[list(selected)], _ = minimize(list(selected))