示例#1
0
    def fit(self, X, sess, learning_rate=0.05, inner_iteration = 50,
            iteration=20, batch_size=40, verbose=False):
        ## The first layer must be the input layer, so they should have same sizes.
        assert X.shape[1] == self.layers_sizes[0]
        ## initialize L, S, mu(shrinkage operator)
        self.L = np.zeros(X.shape)
        self.S = np.zeros(X.shape)

        if verbose:
            print ("X shape: ", X.shape)
            print ("L shape: ", self.L.shape)
            print ("S shape: ", self.S.shape)

        for it in range(iteration):
            if verbose:
                print ("Out iteration: " , it)
            ## alternating project, first project to L
            self.L = np.array(X - self.S,dtype=float)
            ## Using L to train the auto-encoder
            self.SAE.fit(self.L, sess = sess,
                                    iteration = inner_iteration,
                                    learning_rate = learning_rate,
                                    batch_size = batch_size,
                                    verbose = verbose)
            ## get optmized L
            self.L = self.SAE.getRecon(X = self.L, sess = sess)
            ## alternating project, now project to S and shrink S
            self.S = SHR.l21shrink(self.lambda_, (X - self.L).T).T

        return self.L, self.S
示例#2
0
def test_shrinkage():

    # Take the data from the data forlder of RobustAutoencoder repo ->

    x_data = np.random.randn(1000, 500) + 1
    # Take the data from the data forlder of RobustAutoencoder repo <-

    # import other shrinkage ->
    re_path = os.path.abspath(os.path.join(
        '../RobustAutoencoder/model/'))  # path is relative to notebook path.
    if re_path not in sys.path:
        sys.path.append(re_path)

    from shrink import l21shrink as other21
    from shrink import l1shrink as other1
    # import other shrinkage <-

    # compare shrinking ->
    eps = np.linspace(0, 2, 20)

    test21_fail = False
    for ee in eps:
        other_m = other21.l21shrink(ee, x_data)
        my_m = l21shrink(ee, x_data)

        print("Test shrink21 fails:   ", np.any(other_m != my_m))

        test21_fail = np.any(other_m != my_m)
        if test21_fail:
            break

    test1_fail = False
    for ee in eps:
        other_m = other1.shrink(ee, x_data.ravel())
        my_m = shrink(ee, x_data.ravel())

        print("Test shrink1 fails:   ", np.any(other_m != my_m))

        test1_fail = np.any(other_m != my_m)
        if test1_fail:
            break
示例#3
0
    def fit(self,
            X,
            sess,
            learning_rate=0.15,
            inner_iteration=50,
            iteration=20,
            batch_size=133,
            re_init=False,
            verbose=False):
        ## The first layer must be the input layer, so they should have same sizes.
        assert X.shape[1] == self.layers_sizes[0]
        ## initialize L, S
        self.L = np.zeros(X.shape)
        self.S = np.zeros(X.shape)
        ##LS0 = self.L + self.S
        ## To estimate the size of input X
        if verbose:
            print "X shape: ", X.shape
            print "L shape: ", self.L.shape
            print "S shape: ", self.S.shape

        for it in xrange(iteration):
            if verbose:
                print "Out iteration: ", it
            ## alternating project, first project to L
            self.L = X - self.S
            ## Using L to train the auto-encoder
            self.AE.fit(self.L,
                        sess=sess,
                        iteration=inner_iteration,
                        learning_rate=learning_rate,
                        batch_size=batch_size,
                        init=re_init,
                        verbose=verbose)
            ## get optmized L
            self.L = self.AE.getRecon(X=self.L, sess=sess)
            ## alternating project, now project to S and shrink S
            self.S = SHR.l21shrink(self.lambda_, (X - self.L).T).T
        return self.L, self.S