Пример #1
0
    def transform_Y(self, X, y):
        ######################################
        # Initialize the scores and features #
        ######################################
        S_init, _ = nndsvda_init(X, self.n_components)

        ################################
        # Declare our latent variables #
        ################################
        W = tf.constant(self.components_.astype(np.float32))
        Phi_ = self.phi_
        B = self.b_
        if self.factorActiv == 'softplus':
            S_l = softplus_inverse(S_init)
            S_r = tf.Variable(S_l.astype(np.float32))
        else:
            S_r = tf.Variable(S_init.astype(np.float32))

        #######################
        # Change X to float32 #
        #######################
        X = X.astype(np.float32)
        y = y.astype(np.float32)

        #####################
        # Get the optimizer #
        #####################
        optimizer = getOptimizer(self.LR, self.trainingMethod)

        trainable_variables = [S_r]

        losses = np.zeros(self.nIter)

        ############################
        # Actually train the model #
        ############################
        for t in trange(self.nIter):

            with tf.GradientTape() as tape:
                S = activateVariable(S_r, self.factorActiv)
                X_recon = tf.matmul(S, W)
                y_hat = tf.matmul(S, Phi_) + B
                ce = tf.nn.sigmoid_cross_entropy_with_logits(
                    y, tf.squeeze(y_hat))
                loss_sup = tf.reduce_mean(ce)
                loss = tf.nn.l2_loss(X - X_recon) + self.mu * loss_sup

            losses[t] = loss.numpy()

            gradients = tape.gradient(loss, trainable_variables)
            optimizer.apply_gradients(zip(gradients, trainable_variables))

        S_f = activateVariable(S_r, self.factorActiv)

        Scores = S_f.numpy()
        return Scores, losses
Пример #2
0
    def transform_noY(self, X):
        ######################################
        # Initialize the scores and features #
        ######################################
        S_init, _ = nndsvda_init(X, self.n_components)

        ################################
        # Declare our latent variables #
        ################################
        W = tf.constant(self.components_.astype(np.float32))
        if self.factorActiv == 'softplus':
            S_l = softplus_inverse(S_init)
            S_r = tf.Variable(S_l.astype(np.float32))
        else:
            S_r = tf.Variable(S_init.astype(np.float32))

        #######################
        # Change X to float32 #
        #######################
        X = X.astype(np.float32)

        #####################
        # Get the optimizer #
        #####################
        optimizer = getOptimizer(self.LR, self.trainingMethod)

        trainable_variables = [S_r]

        losses = np.zeros(self.nIter)

        ############################
        # Actually train the model #
        ############################
        for t in trange(self.nIter):

            with tf.GradientTape() as tape:
                S = activateVariable(S_r, self.factorActiv)
                X_recon = tf.matmul(S, W)
                loss = tf.nn.l2_loss(X - X_recon)

            losses[t] = loss.numpy()

            gradients = tape.gradient(loss, trainable_variables)
            optimizer.apply_gradients(zip(gradients, trainable_variables))

        S_f = activateVariable(S_r, self.factorActiv)

        Scores = S_f.numpy()

        return Scores, losses
Пример #3
0
    def fit_transform(self, X, y):

        ######################################
        # Initialize the scores and features #
        ######################################
        S_init, W_init = nndsvda_init(X, self.n_components)

        ################################
        # Declare our latent variables #
        ################################
        if self.decoderActiv == 'softplus':
            W_l = softplus_inverse(W_init)
            W_r = tf.Variable(W_l.astype(np.float32))
        else:
            W_r = tf.Variable(W_init.astype(np.float32))
        if self.factorActiv == 'softplus':
            S_l = softplus_inverse(S_init)
            S_r = tf.Variable(S_l.astype(np.float32))
        else:
            S_r = tf.Variable(S_init.astype(np.float32))
        phi = tf.Variable(np.zeros((self.n_blessed, 1)).astype(np.float32))
        B = tf.Variable(.1 * rand.randn(1).astype(np.float32))

        #######################
        # Change X to float32 #
        #######################
        X = X.astype(np.float32)
        y = y.astype(np.float32)

        #####################
        # Get the optimizer #
        #####################
        optimizer = getOptimizer(self.LR, self.trainingMethod)

        losses = np.zeros(self.nIter)

        trainable_variables = [W_r, S_r]

        ############################
        # Actually train the model #
        ############################
        for t in trange(self.nIter):

            with tf.GradientTape() as tape:
                W = activateVariable(W_r, self.decoderActiv)
                S = activateVariable(S_r, self.factorActiv)
                X_recon = tf.matmul(S, W)
                loss_gen = tf.nn.l2_loss(X - X_recon) / N
                y_hat = tf.matmul(S[:, :self.n_blessed], phi) + B
                loss_sup = tf.nn.softmax_cross_entropy_with_logits(y, y_hat)
                loss = loss_gen + self.mu * loss_sup

            gradients = tape.gradient(loss, trainable_variables)
            optimizer.apply_gradients(zip(gradients, trainable_variables))

        W_f = activateVariable(W_r, self.decoderActiv)
        S_f = activateVariable(S_r, self.decoderActiv)

        self.components_ = W_f.numpy()
        Scores = S_f.numpy()
        self.phi_ = phi.numpy()
        self.b_ = B.numpy()
        self.reconstruction_err_ = _beta_divergence(X,
                                                    Scores,
                                                    self.components,
                                                    beta=self.beta)
        yh = y_hat.numpy()
        fpr, tpr, _ = roc_curve(y, yh)
        self.sup_loss_ = auc(fpr, tpr)
        return Scores
Пример #4
0
    def fit_transform(self, X, y, S_init=None, W_init=None):

        ######################################
        # Initialize the scores and features #
        ######################################
        if S_init is None:
            S_init, W_init = nndsvda_init(X, self.n_components)
        else:
            pass

        ################################
        # Declare our latent variables #
        ################################
        if self.decoderActiv == 'softplus':
            W_l = softplus_inverse(W_init)
            W_r = tf.Variable(W_l.astype(np.float32))
        else:
            W_r = tf.Variable(W_init.astype(np.float32))
        if self.factorActiv == 'softplus':
            S_l = softplus_inverse(S_init)
            S_r = tf.Variable(S_l.astype(np.float32))
        else:
            S_r = tf.Variable(S_init.astype(np.float32))
        phi = tf.Variable(np.zeros((self.n_components, 1)).astype(np.float32))
        B = tf.Variable(.1 * rand.randn(1).astype(np.float32))

        X_recon_orig = np.dot(S_init, W_init)

        #######################
        # Change X to float32 #
        #######################
        X = X.astype(np.float32)
        y = y.astype(np.float32)
        N, p = X.shape

        #####################
        # Get the optimizer #
        #####################
        optimizer = getOptimizer(self.LR, self.trainingMethod)
        optimizer2 = getOptimizer(.1 * self.LR, self.trainingMethod)

        losses_gen = np.zeros(self.nIter)
        losses_sup = np.zeros(self.nIter)

        trainable_variables = [W_r, phi, B, S_r]
        trainable_variables2 = [S_r]

        ############################
        # Actually train the model #
        ############################
        for t in trange(self.nIter):

            nInner = 1
            #for i in range(nInner):
            if 1 == 1:
                with tf.GradientTape() as tape:
                    W_un = activateVariable(W_r, self.decoderActiv)
                    W = tf.math.l2_normalize(W_un, axis=1) * np.sqrt(p)
                    S = activateVariable(S_r, self.factorActiv)
                    X_recon = tf.matmul(S, W)
                    loss_gen = tf.nn.l2_loss(X - X_recon) / N
                    y_hat = tf.matmul(S, phi) + B
                    ce = tf.nn.sigmoid_cross_entropy_with_logits(
                        y, tf.squeeze(y_hat))
                    loss_sup = tf.reduce_mean(ce)
                    loss_reg = tf.nn.l2_loss(phi)
                    loss = loss_gen + self.reg * loss_reg + self.mu * loss_sup
                gradients = tape.gradient(loss, trainable_variables)
                optimizer.apply_gradients(zip(gradients, trainable_variables))

            losses_gen[t] = loss_gen.numpy()
            losses_sup[t] = loss_sup.numpy()

        self.losses_gen = losses_gen
        self.losses_sup = losses_sup

        W_f = activateVariable(W_r, self.decoderActiv)
        W_ = tf.math.l2_normalize(W_f, axis=1) * np.sqrt(p)
        S_f = activateVariable(S_r, self.decoderActiv)

        self.components_ = W_.numpy()
        Scores = S_f.numpy()
        self.phi_ = phi.numpy()
        self.b_ = B.numpy()
        self.reconstruction_err_ = _beta_divergence(X,
                                                    Scores,
                                                    self.components_,
                                                    beta=self.beta)
        yh = y_hat.numpy()
        fpr, tpr, _ = roc_curve(y, yh)
        self.sup_loss_ = auc(fpr, tpr)
        return Scores