コード例 #1
0
    def fit(self, X, sess, learning_rate=0.15, inner_iteration = 50,
            iteration=20, batch_size=50, verbose=False):
        ## The first layer must be the input layer, so they should have same sizes.
        assert X.shape[1] == self.layers_sizes[0]

        ## initialize L, S, mu(shrinkage operator)
        self.L = np.zeros(X.shape)
        self.S = np.zeros(X.shape)

        mu = (X.size) / (4.0 * nplin.norm(X,1))
        print "shrink parameter:", self.lambda_ / mu
        LS0 = self.L + self.S

        XFnorm = nplin.norm(X,'fro')
        if verbose:
            print "X shape: ", X.shape
            print "L shape: ", self.L.shape
            print "S shape: ", self.S.shape
            print "mu: ", mu
            print "XFnorm: ", XFnorm

        for it in xrange(iteration):
            if verbose:
                print "Out iteration: " , it
            ## alternating project, first project to L
            self.L = X - self.S
            ## Using L to train the auto-encoder
            self.AE.fit(X = self.L, sess = sess,
                                    iteration = inner_iteration,
                                    learning_rate = learning_rate,
                                    batch_size = batch_size,
                                    verbose = verbose)
            ## get optmized L
            self.L = self.AE.getRecon(X = self.L, sess = sess)
            ## alternating project, now project to S
            self.S = SHR.shrink(self.lambda_/mu, (X - self.L).reshape(X.size)).reshape(X.shape)

            ## break criterion 1: the L and S are close enough to X
            c1 = nplin.norm(X - self.L - self.S, 'fro') / XFnorm
            ## break criterion 2: there is no changes for L and S 
            c2 = np.min([mu,np.sqrt(mu)]) * nplin.norm(LS0 - self.L - self.S) / XFnorm

            if verbose:
                print "c1: ", c1
                print "c2: ", c2

            if c1 < self.error and c2 < self.error :
                print "early break"
                break
            ## save L + S for c2 check in the next iteration
            LS0 = self.L + self.S
            
        return self.L , self.S
コード例 #2
0
def test_shrinkage():

    # Take the data from the data forlder of RobustAutoencoder repo ->

    x_data = np.random.randn(1000, 500) + 1
    # Take the data from the data forlder of RobustAutoencoder repo <-

    # import other shrinkage ->
    re_path = os.path.abspath(os.path.join(
        '../RobustAutoencoder/model/'))  # path is relative to notebook path.
    if re_path not in sys.path:
        sys.path.append(re_path)

    from shrink import l21shrink as other21
    from shrink import l1shrink as other1
    # import other shrinkage <-

    # compare shrinking ->
    eps = np.linspace(0, 2, 20)

    test21_fail = False
    for ee in eps:
        other_m = other21.l21shrink(ee, x_data)
        my_m = l21shrink(ee, x_data)

        print("Test shrink21 fails:   ", np.any(other_m != my_m))

        test21_fail = np.any(other_m != my_m)
        if test21_fail:
            break

    test1_fail = False
    for ee in eps:
        other_m = other1.shrink(ee, x_data.ravel())
        my_m = shrink(ee, x_data.ravel())

        print("Test shrink1 fails:   ", np.any(other_m != my_m))

        test1_fail = np.any(other_m != my_m)
        if test1_fail:
            break
コード例 #3
0
    def fit(self,
            train_dataset,
            path,
            model_name,
            iteration=30,
            batch_size=128,
            learning_rate=1e-4,
            epochs=20,
            verbose=False):
        # Initialize L, S dtyp: tensor
        X = train_dataset.tensors[0]
        self.L = torch.zeros(X.size())
        self.S = torch.zeros(X.size())

        # Calculate mu(shrinkage operator)
        X_numpy = X.detach().cpu().numpy()
        #        mu = (X_numpy.size)/(4.0*np.linalg.norm(X_numpy,1))
        mu = (X_numpy.size) / (4.0 * np.linalg.norm(
            X_numpy.reshape(-1, X_numpy.shape[-1] * X_numpy.shape[-1]), 1))
        print("Shrink parameter:", self.lambda_ / mu)
        LS0 = self.L + self.S

        XFnorm = torch.norm(X, 'fro')  # Frobenius norm
        if verbose:
            print("X shape:", X.shape)
            print("mu:", mu)
            print("XFnorm:", XFnorm)

        for it in range(iteration):
            print('iteration:', it)
            if verbose:
                print("Out iteration:", it)

            self.L = X - self.S
            # Convert L to trian_loader
            ae_dataset = Data.TensorDataset(self.L)
            ae_train_loader = Data.DataLoader(dataset=ae_dataset,
                                              batch_size=batch_size,
                                              shuffle=True)

            # Use L to train autoencoder and get optimized(reconstructed) L
            model = self.ae.train(device=self.device,
                                  model=self.dae,
                                  train_loader=ae_train_loader,
                                  learning_rate=learning_rate,
                                  epochs=epochs)
            recon_loader = Data.DataLoader(dataset=ae_dataset,
                                           batch_size=1,
                                           shuffle=False)
            self.L = self.ae.reconstruction(self.device, model,
                                            recon_loader).detach().cpu()
            # Alternate project of S
            self.S = SHR.shrink(self.lambda_ / mu,
                                (X - self.L).reshape(-1)).reshape(X.shape)

            # Break criterion 1: L and S are close enought to X
            c1 = torch.norm((X - self.L - self.S), 'fro') / XFnorm
            # Break criterion 2: there is no change for L and S
            c2 = np.min([mu, np.sqrt(mu)
                         ]) * torch.norm(LS0 - self.L - self.S) / XFnorm
            self.errors.append(c1)

            if it == iteration - 1:
                print("save autoencoder:")
                torch.save(model.state_dict(),
                           path + 'model_rda_' + model_name + '.pth')
                # plots
                print("plot examples of reconstruction:")
                self.plot(path, X[:10], self.L[:10])
            if verbose:
                print("c1:", c1)
                print("c2:", c2)

            if c1 < self.error and c2 < self.error:
                print("early break")
                break

            LS0 = self.L + self.S

        return self.L
コード例 #4
0
ファイル: RobustCNNVAE.py プロジェクト: huiminren/RobustVAE
    def fit(self,
            X,
            path="",
            num_gen=10,
            iteration=20,
            num_epoch=100,
            batch_size=64,
            verbose=False):
        # initialize L, S, mu(shrinkage operator)
        self.L = np.zeros(X.shape)
        self.S = np.zeros(X.shape)

        # since the input dimension of nplin must be 1D or 2D, change the shape only for nplin.
        # https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.linalg.norm.html
        X_norm = X.reshape(-1, X.shape[-1] * X.shape[-1])

        mu = (X_norm.size) / (4.0 * nplin.norm(X_norm, 1))
        print("shrink parameter:", self.lambda_ / mu)
        LS0 = self.L + self.S

        XFnorm = nplin.norm(X_norm, 'fro')
        if verbose:
            print("X shape: ", X.shape)
            print("L shape: ", self.L.shape)
            print("S shape: ", self.S.shape)
            print("mu: ", mu)
            print("XFnorm: ", XFnorm)

        for it in range(iteration):
            if verbose:
                print("Out iteration: ", it)
            ## alternating project, first project to L
            self.L = X - self.S
            ## Using L to train the auto-encoder
            self.cnnvae.fit(X_in=self.L,
                            path=path,
                            file_name="vae_loss" + str(it) + ".npy",
                            num_epoch=num_epoch,
                            batch_size=batch_size)
            ## get optmized L
            self.L = self.cnnvae.reconstructor(self.L)
            ## alternating project, now project to S
            self.S = SHR.shrink(self.lambda_ / mu,
                                (X - self.L).reshape(X.size)).reshape(X.shape)

            ## break criterion 1: the L and S are close enough to X
            c1 = nplin.norm((X - self.L - self.S).reshape(
                -1, X.shape[-1] * X.shape[-1]), 'fro') / XFnorm
            ## break criterion 2: there is no changes for L and S
            c2 = np.min([mu, np.sqrt(mu)
                         ]) * nplin.norm(LS0 - self.L - self.S) / XFnorm
            self.errors.append(c1)

            if it % 1 == 0:
                print("generation images:")
                self.cnnvae.gen_plot(FLAG_gen=True,
                                     x="",
                                     num_gen=num_gen,
                                     path=path,
                                     fig_name="generator_" + str(it) + ".png")

            if it == iteration - 1:
                print("generate fid images")
                self.cnnvae.generation_fid(path=path)

            if verbose:
                print("c1: ", c1)
                print("c2: ", c2)

            if c1 < self.error and c2 < self.error:
                print("early break")
                break
            ## save L + S for c2 check in the next iteration
            LS0 = self.L + self.S

        return self.L, self.S, np.array(self.errors)
コード例 #5
0
    def fit(self, X, path = "", num_gen=10, iteration=20, num_epoch = 100, 
            batch_size=64, verbose=False):
        
        """
        X: input data
        path: path of saving loss and generation
        num_gene: number of generated images
        iteration: number of outer iteration
        num_epoch: number of epoch for each VAE (inner iteration)
        batch_size: batch size of VAE
        """
        
        ## initialize L, S, mu(shrinkage operator)
        self.L = np.zeros(X.shape)
        self.S = np.zeros(X.shape)
        
        mu = (X.size) / (4.0 * nplin.norm(X,1))
        print ("shrink parameter:", self.lambda_ / mu)
        LS0 = self.L + self.S

        XFnorm = nplin.norm(X,'fro')
        if verbose:
            print ("X shape: ", X.shape)
            print ("L shape: ", self.L.shape)
            print ("S shape: ", self.S.shape)
            print ("mu: ", mu)
            print ("XFnorm: ", XFnorm)
        
        
        for it in range(iteration):
            if verbose:
                print ("Out iteration: " , it)
            ## alternating project, first project to L
            self.L = X - self.S
            ## Using L to train the auto-encoder
            self.vae.fit(X = self.L, path = path, file_name = "vae_loss"+str(it),
                         num_epoch = num_epoch, batch_size = batch_size)
            ## get optmized L
            self.L = self.vae.reconstructor(self.L)
            ## alternating project, now project to S
            self.S = SHR.shrink(self.lambda_/mu, (X - self.L).reshape(X.size)).reshape(X.shape)

            ## break criterion 1: the L and S are close enough to X
            c1 = nplin.norm(X - self.L - self.S, 'fro') / XFnorm
            ## break criterion 2: there is no changes for L and S 
            c2 = np.min([mu,np.sqrt(mu)]) * nplin.norm(LS0 - self.L - self.S) / XFnorm
            self.errors.append(c1)
            
            # generate sample images for visual check and FID computation
            if it == iteration-1:
                print("generation images:")
                self.vae.gen_plot(FLAG_gen = True, x="", num_gen=num_gen, 
                              path=path, fig_name="generator_"+str(it)+".png")
                print("generate fid images")
                self.vae.generation_fid(path=path)
            
            if verbose:
                print ("c1: ", c1)
                print ("c2: ", c2)

            if c1 < self.error and c2 < self.error :
                print ("early break")
                break
            ## save L + S for c2 check in the next iteration
            LS0 = self.L + self.S
        
        return self.L , self.S, np.array(self.errors)