Esempio n. 1
0
    def fit_transform(self, X, y, W_init):

        ################################
        # Declare our latent variables #
        ################################
        if self.decoderActiv == 'softplus':
            W_l = softplus_inverse(W_init)
            W_r = tf.Variable(W_l.astype(np.float32))
        else:
            W_r = tf.Variable(W_init.astype(np.float32))

        phi = tf.Variable(np.zeros((self.n_components, 1)).astype(np.float32))
        B = tf.Variable(.1 * rand.randn(1).astype(np.float32))

        #######################
        # Change X to float32 #
        #######################
        X = X.astype(np.float32)
        y = y.astype(np.float32)
        N, p = X.shape

        A_enci = 1 / np.sqrt(p) * rand.randn(p, self.n_components).astype(
            np.float32)
        B_enci = 1 / np.sqrt(self.n_components) * rand.randn(
            self.n_components).astype(np.float32)
        A_enc = tf.Variable(A_enci)
        B_enc = tf.Variable(B_enci)

        #####################
        # Get the optimizer #
        #####################
        optimizer = getOptimizer(self.LR, self.trainingMethod)
        optimizer2 = getOptimizer(.1 * self.LR, self.trainingMethod)

        losses_gen = np.zeros(self.nIter)
        losses_sup = np.zeros(self.nIter)

        trainable_variables = [W_r, phi, B, A_enc, B_enc]

        ############################
        # Actually train the model #
        ############################
        for t in trange(self.nIter):
            idx = rand.choice(N, size=self.batchSize, replace=False)
            X_batch = X[idx]
            Y_batch = y[idx]
            with tf.GradientTape() as tape:
                W_un = activateVariable(W_r, self.decoderActiv)
                W = tf.math.l2_normalize(W_un, axis=1) * np.sqrt(p)
                S_latent = tf.matmul(X_batch, A_enc) + B_enc
                S = activateVariable(S_latent, self.factorActiv)
                X_recon = tf.matmul(S, W)
                loss_gen = tf.nn.l2_loss(X_batch - X_recon) / N
                y_hat = tf.matmul(S, phi) + B
                ce = tf.nn.sigmoid_cross_entropy_with_logits(
                    Y_batch, tf.squeeze(y_hat))
                loss_sup = tf.reduce_mean(ce)
                loss_reg = tf.nn.l2_loss(phi)
                loss = loss_gen + self.reg * loss_reg + self.mu * loss_sup

            gradients = tape.gradient(loss, trainable_variables)
            optimizer.apply_gradients(zip(gradients, trainable_variables))

            losses_gen[t] = loss_gen.numpy()
            losses_sup[t] = loss_sup.numpy()

        self.losses_gen = losses_gen
        self.losses_sup = losses_sup

        W_f = activateVariable(W_r, self.decoderActiv)
        W_ = tf.math.l2_normalize(W_f, axis=1) * np.sqrt(p)
        S_r = tf.matmul(X, A_enc) + B_enc
        S_f = activateVariable(S_r, self.decoderActiv)

        self.components_ = W_.numpy()
        Scores = S_f.numpy()
        self.phi_ = phi.numpy()
        self.b_ = B.numpy()
        self.A_enc = A_enc.numpy()
        self.B_enc = B_enc.numpy()
        self.reconstruction_err_ = _beta_divergence(X,
                                                    Scores,
                                                    self.components_,
                                                    beta=self.beta)
        yh = np.dot(Scores, self.phi_) + self.b_
        fpr, tpr, _ = roc_curve(y, yh)
        self.sup_loss_ = auc(fpr, tpr)
        return Scores
    def fit_transform(self, X, y, S_init=None, W_init=None):

        ######################################
        # Initialize the scores and features #
        ######################################
        if S_init is None:
            S_init, W_init = nndsvda_init(X, self.n_components)
        else:
            pass

        ################################
        # Declare our latent variables #
        ################################
        if self.decoderActiv == 'softplus':
            W_l = softplus_inverse(W_init)
            W_r = tf.Variable(W_l.astype(np.float32))
        else:
            W_r = tf.Variable(W_init.astype(np.float32))
        if self.factorActiv == 'softplus':
            S_l = softplus_inverse(S_init)
            S_r = tf.Variable(S_l.astype(np.float32))
        else:
            S_r = tf.Variable(S_init.astype(np.float32))
        phi = tf.Variable(np.zeros((self.n_components, 1)).astype(np.float32))
        B = tf.Variable(.1 * rand.randn(1).astype(np.float32))

        X_recon_orig = np.dot(S_init, W_init)

        #######################
        # Change X to float32 #
        #######################
        X = X.astype(np.float32)
        y = y.astype(np.float32)
        N, p = X.shape

        #####################
        # Get the optimizer #
        #####################
        optimizer = getOptimizer(self.LR, self.trainingMethod)
        optimizer2 = getOptimizer(.1 * self.LR, self.trainingMethod)

        losses_gen = np.zeros(self.nIter)
        losses_sup = np.zeros(self.nIter)

        trainable_variables = [W_r, phi, B, S_r]
        trainable_variables2 = [S_r]

        ############################
        # Actually train the model #
        ############################
        for t in trange(self.nIter):

            nInner = 1
            #for i in range(nInner):
            if 1 == 1:
                with tf.GradientTape() as tape:
                    W_un = activateVariable(W_r, self.decoderActiv)
                    W = tf.math.l2_normalize(W_un, axis=1) * np.sqrt(p)
                    S = activateVariable(S_r, self.factorActiv)
                    X_recon = tf.matmul(S, W)
                    loss_gen = tf.nn.l2_loss(X - X_recon) / N
                    y_hat = tf.matmul(S, phi) + B
                    ce = tf.nn.sigmoid_cross_entropy_with_logits(
                        y, tf.squeeze(y_hat))
                    loss_sup = tf.reduce_mean(ce)
                    loss_reg = tf.nn.l2_loss(phi)
                    loss = loss_gen + self.reg * loss_reg + self.mu * loss_sup
                gradients = tape.gradient(loss, trainable_variables)
                optimizer.apply_gradients(zip(gradients, trainable_variables))

            losses_gen[t] = loss_gen.numpy()
            losses_sup[t] = loss_sup.numpy()

        self.losses_gen = losses_gen
        self.losses_sup = losses_sup

        W_f = activateVariable(W_r, self.decoderActiv)
        W_ = tf.math.l2_normalize(W_f, axis=1) * np.sqrt(p)
        S_f = activateVariable(S_r, self.decoderActiv)

        self.components_ = W_.numpy()
        Scores = S_f.numpy()
        self.phi_ = phi.numpy()
        self.b_ = B.numpy()
        self.reconstruction_err_ = _beta_divergence(X,
                                                    Scores,
                                                    self.components_,
                                                    beta=self.beta)
        yh = y_hat.numpy()
        fpr, tpr, _ = roc_curve(y, yh)
        self.sup_loss_ = auc(fpr, tpr)
        return Scores
    def fit_transform(self, X, y):

        ######################################
        # Initialize the scores and features #
        ######################################
        S_init, W_init = nndsvda_init(X, self.n_components)

        ################################
        # Declare our latent variables #
        ################################
        if self.decoderActiv == 'softplus':
            W_l = softplus_inverse(W_init)
            W_r = tf.Variable(W_l.astype(np.float32))
        else:
            W_r = tf.Variable(W_init.astype(np.float32))
        if self.factorActiv == 'softplus':
            S_l = softplus_inverse(S_init)
            S_r = tf.Variable(S_l.astype(np.float32))
        else:
            S_r = tf.Variable(S_init.astype(np.float32))
        phi = tf.Variable(np.zeros((self.n_blessed, 1)).astype(np.float32))
        B = tf.Variable(.1 * rand.randn(1).astype(np.float32))

        #######################
        # Change X to float32 #
        #######################
        X = X.astype(np.float32)
        y = y.astype(np.float32)

        #####################
        # Get the optimizer #
        #####################
        optimizer = getOptimizer(self.LR, self.trainingMethod)

        losses = np.zeros(self.nIter)

        trainable_variables = [W_r, S_r]

        ############################
        # Actually train the model #
        ############################
        for t in trange(self.nIter):

            with tf.GradientTape() as tape:
                W = activateVariable(W_r, self.decoderActiv)
                S = activateVariable(S_r, self.factorActiv)
                X_recon = tf.matmul(S, W)
                loss_gen = tf.nn.l2_loss(X - X_recon) / N
                y_hat = tf.matmul(S[:, :self.n_blessed], phi) + B
                loss_sup = tf.nn.softmax_cross_entropy_with_logits(y, y_hat)
                loss = loss_gen + self.mu * loss_sup

            gradients = tape.gradient(loss, trainable_variables)
            optimizer.apply_gradients(zip(gradients, trainable_variables))

        W_f = activateVariable(W_r, self.decoderActiv)
        S_f = activateVariable(S_r, self.decoderActiv)

        self.components_ = W_f.numpy()
        Scores = S_f.numpy()
        self.phi_ = phi.numpy()
        self.b_ = B.numpy()
        self.reconstruction_err_ = _beta_divergence(X,
                                                    Scores,
                                                    self.components,
                                                    beta=self.beta)
        yh = y_hat.numpy()
        fpr, tpr, _ = roc_curve(y, yh)
        self.sup_loss_ = auc(fpr, tpr)
        return Scores
Esempio n. 4
0
    def fit_transform(self, X, y, W_init):

        ################################
        # Declare our latent variables #
        ################################
        if self.decoderActiv == 'softplus':
            W_l = softplus_inverse(W_init)
            W_r = tf.Variable(W_l.astype(np.float32))
        else:
            W_r = tf.Variable(W_init.astype(np.float32))

        phi = tf.Variable(np.zeros((self.n_components, 1)).astype(np.float32))
        B = tf.Variable(.1 * rand.randn(1).astype(np.float32))

        #print(tf.debugging.set_log_device_placement(True))

        #######################
        # Change X to float32 #
        #######################
        X = X.astype(np.float32)
        y = y.astype(np.float32)
        N, p = X.shape

        A_enci = 1 / np.sqrt(p) * rand.randn(p, self.n_components).astype(
            np.float32)
        B_enci = 1 / np.sqrt(self.n_components) * rand.randn(
            self.n_components).astype(np.float32)
        A_enc = tf.Variable(A_enci)
        B_enc = tf.Variable(B_enci)

        #####################
        # Get the optimizer #
        #####################
        optimizer = getOptimizer(self.LR, self.trainingMethod)
        optimizer2 = getOptimizer(self.LR, self.trainingMethod)
        optimizer3 = getOptimizer(self.LR, self.trainingMethod)

        losses_gen = np.zeros(self.nIter)
        losses_sup = np.zeros(self.nIter)

        #trainable_variables = [W_r,A_enc,B_enc]
        trainable_variables = [W_r, phi, B, A_enc, B_enc]
        trainable_variables3 = [W_r, A_enc, B_enc]
        trainable_variables2 = [phi, B, A_enc, B_enc]
        print('Hello')

        for t in trange(2000):
            idx = rand.choice(N, size=self.batchSize, replace=False)
            X_batch = X[idx]
            with tf.GradientTape() as tape:
                #W = activateVariable(W_r,
                #			self.decoderActiv)
                W = tf.nn.softplus(W_r)
                #W = tf.nn.relu(W_r) + 1e-5
                #W = tf.math.l2_normalize(W_un,axis=1)*np.sqrt(p)
                reg_W = tf.reduce_mean(tf.square(W))
                S_latent = tf.matmul(X_batch, A_enc) + B_enc
                S = tf.nn.softplus(S_latent)
                reg_S = tf.reduce_mean(tf.square(S))
                X_recon = tf.matmul(S, W)
                loss_gen = tf.reduce_mean(tf.square(X_recon - X_batch))
                loss = loss_gen + reg_W + reg_S

            gradients3 = tape.gradient(loss, trainable_variables3)
            optimizer3.apply_gradients(zip(gradients3, trainable_variables3))

        ############################
        # Actually train the model #
        ############################
        for t in trange(self.nIter):
            idx = rand.choice(N, size=self.batchSize, replace=False)
            X_batch = X[idx]
            Y_batch = y[idx]
            with tf.GradientTape() as tape:
                #W = activateVariable(W_r,
                #			self.decoderActiv)
                W = tf.nn.softplus(W_r)
                #W = tf.nn.relu(W_r) + 1e-5
                #W = tf.math.l2_normalize(W_un,axis=1)*np.sqrt(p)
                reg_W = tf.reduce_mean(tf.square(W))
                S_latent = tf.matmul(X_batch, A_enc) + B_enc
                S = tf.nn.softplus(S_latent)
                reg_S = tf.reduce_mean(tf.square(S))
                X_recon = tf.matmul(S, W)
                loss_gen = tf.reduce_mean(tf.square(X_recon - X_batch))
                y_hat = tf.matmul(S, phi) + B
                ce = tf.nn.sigmoid_cross_entropy_with_logits(
                    Y_batch, tf.squeeze(y_hat))
                loss_sup = tf.reduce_mean(ce)
                loss_reg = tf.nn.l2_loss(phi)
                loss = loss_gen + self.reg * loss_reg + self.mu * loss_sup + reg_W + reg_S
            if t == 0:
                print(np.mean((W_init - W.numpy())**2))
                X_r = X_recon.numpy()
                print(np.mean((X_r - X_batch)**2))

            gradients = tape.gradient(loss, trainable_variables)
            optimizer.apply_gradients(zip(gradients, trainable_variables))

            losses_gen[t] = loss_gen.numpy()
            losses_sup[t] = loss_sup.numpy()
            '''
			for i in range(1):
				idx = rand.choice(N,size=self.batchSize,replace=False)
				X_batch = X[idx]
				Y_batch = y[idx]
				with tf.GradientTape() as tape:
					S_latent = tf.matmul(X_batch,A_enc) + B_enc
					S = tf.nn.softplus(S_latent)
					y_hat = tf.matmul(S,phi) + B
					ce = tf.nn.sigmoid_cross_entropy_with_logits(Y_batch,
												tf.squeeze(y_hat))
					loss_sup = tf.reduce_mean(ce)
					loss_reg = tf.nn.l2_loss(phi)
					loss = self.reg*loss_reg + self.mu*loss_sup 

				gradients2 = tape.gradient(loss,trainable_variables2)
				optimizer2.apply_gradients(zip(gradients2,
											trainable_variables2))
			'''

        self.losses_gen = losses_gen
        self.losses_sup = losses_sup

        #W_f = activateVariable(W_r,self.decoderActiv)
        #W_ = tf.math.l2_normalize(W_f,axis=1)*np.sqrt(p)
        W_ = tf.nn.softplus(W_r)
        S_r = tf.matmul(X, A_enc) + B_enc
        S_f = activateVariable(S_r, self.decoderActiv)

        self.components_ = W_.numpy()
        Scores = S_f.numpy()
        self.phi_ = phi.numpy()
        self.b_ = B.numpy()
        self.A_enc = A_enc.numpy()
        self.B_enc = B_enc.numpy()
        self.W_ = W_
        self.reconstruction_err_ = _beta_divergence(X,
                                                    Scores,
                                                    self.components_,
                                                    beta=self.beta)
        yh = np.dot(Scores, self.phi_) + self.b_
        fpr, tpr, _ = roc_curve(y, yh)
        self.sup_loss_ = auc(fpr, tpr)
        return Scores