Esempio n. 1
0
        def loadmat(ds_id, split, labels):
            mat = sslbookdata.sslbookdata.pkg_loadmat(f'data/data{ds_id}.mat')

            if np.min(mat['y']) == -1:
                mat['y'][mat['y'] == -1] = 0

            mat['y'] = init_matrix(Y=np.reshape(mat['y'], (-1, )),
                                   labeledIndexes=[True] * mat['y'].shape[0])

            if self.use_splits:
                try:
                    allsplits = sslbookdata.sslbookdata.pkg_loadmat(
                        f'data/splits{ds_id}-labeled{labels}.mat')
                    labeledIndexes = allsplits['idxLabs'][split, :] - 1
                except:
                    raise ValueError("Could not load splits")

                return mat, labeledIndexes
            else:
                return mat, None
Esempio n. 2
0
        def load_secstr(split, labels, extra_unlabeled=False):
            def explode(Xin):
                m, d0 = Xin.shape
                ks = np.unique(Xin)
                k = len(ks)
                d1 = k * d0
                X = np.zeros((m, d1), dtype='u1')
                l = 0
                for i in range(k):
                    X[:, l:l + d0] = Xin == ks[i]
                    l = l + d0
                return X

            mat = sslbookdata.sslbookdata.pkg_loadmat('data/data8.mat')

            if np.min(mat['y']) == -1:
                mat['y'][mat['y'] == -1] = 0

            mat['y'] = init_matrix(Y=np.reshape(mat['y'], (-1, )),
                                   labeledIndexes=[True] * mat['y'].shape[0])

            mat['X'] = explode(mat['T'])

            if extra_unlabeled:
                mat2 = sslbookdata.sslbookdata.pkg_loadmat(
                    'data/data8extra.mat')
                Xextra = explode(mat2['T'])
                yextra_shape = (Xextra.shape[0], mat['y'].shape[1])
                mat['y'] = np.concatenate(
                    [mat['y'], np.zeros(yextra_shape)], axis=0)
                mat['X'] = np.concatenate([mat['X'], Xextra], axis=0)

            allsplits = sslbookdata.sslbookdata.pkg_loadmat(
                f'data/splits8-labeled{labels}.mat')
            labeledIndexes = allsplits['idxLabs'][split, :] - 1

            return mat, labeledIndexes
Esempio n. 3
0
    def LGCLVO(self,
               X,
               W,
               Y,
               labeledIndexes,
               mu=99.0,
               useEstimatedFreq=True,
               tuning_iter=0,
               hook=None,
               constant_prop=False,
               useZ=True,
               normalize_rows=True):

        labeledIndexes, noisyIndexes = labeledIndexes

        Y = np.copy(Y)
        #We make a deep copy of labeledindexes
        labeledIndexes = np.array(labeledIndexes)
        lids = np.where(labeledIndexes)[0]
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        W = 0.5 * (W + W.transpose())

        num_labeled = Y[labeledIndexes].shape[0]
        num_unlabeled = Y.shape[0] - num_labeled
        num_classes = Y.shape[1]

        D = gutils.deg_matrix(W, flat=True)
        if not useEstimatedFreq is None:
            if isinstance(useEstimatedFreq, bool):
                estimatedFreq = np.sum(Y[labeledIndexes], axis=0) / num_labeled
            else:
                estimatedFreq = useEstimatedFreq

        else:
            estimatedFreq = np.repeat(1 / num_classes, num_classes)

        if scipy.sparse.issparse(W):
            l = np.sum(labeledIndexes)

            itertool_prod = [[i, j] for i in range(l) for j in range(l)]

            row = np.asarray([lids[i] for i in range(l)])
            col = np.asarray([i for i in range(l)])
            data = np.asarray([1.0] * l)
            temp_Y = _to_np(
                scipy.sparse.coo_matrix((data, (row, col)),
                                        shape=(W.shape[0], l)))

            PL = LGC_iter_TF(X,
                             W,
                             Y=temp_Y,
                             labeledIndexes=labeledIndexes,
                             alpha=1 / (1 + mu),
                             num_iter=1000)

            PL = PL[labeledIndexes, :]
            PL[range(PL.shape[0]), range(PL.shape[0])] = 0  #Set diagonal to 0

            PL = PL

            del temp_Y

            row = np.asarray(
                [lids[x[0]] for x in itertool_prod if x[0] != x[1]])
            col = np.asarray(
                [lids[x[1]] for x in itertool_prod if x[0] != x[1]])
            data = [PL[x[0], x[1]] for x in itertool_prod if x[0] != x[1]]
            P = scipy.sparse.coo_matrix((data, (row, col)),
                                        shape=W.shape).tocsr()

            P = P
        else:
            #Identity matrix
            I = np.identity(W.shape[0])
            #Get graph laplacian
            L = gutils.lap_matrix(W, which_lap='sym')
            #Propagation matrix
            P = np.zeros(W.shape)
            P[np.ix_(labeledIndexes,
                     labeledIndexes)] = np.linalg.inv(I + 0.5 *
                                                      (L + L.transpose()) /
                                                      mu)[np.ix_(
                                                          labeledIndexes,
                                                          labeledIndexes)]
            P[labeledIndexes, labeledIndexes] = 0
            P[np.ix_(labeledIndexes, labeledIndexes)] = P[np.ix_(
                labeledIndexes, labeledIndexes)] / np.sum(P[np.ix_(
                    labeledIndexes, labeledIndexes)],
                                                          axis=0,
                                                          keepdims=False)

        W = scipy.sparse.csr_matrix(W)

        Z = []

        detected_noisylabels = []
        suggested_labels = []
        where_noisylabels = []
        Q_values = []

        Y_flat = np.argmax(Y, axis=1)

        def divide_row_by_sum(e):

            e = _to_np(e)
            if normalize_rows:
                e = e / np.sum(e + 1e-100, axis=1, keepdims=True)
                return e
            else:
                return e

        def find_argmin(Q, class_to_unlabel):
            id_min_line = np.argmin(Q[:, class_to_unlabel])
            id_min_col = class_to_unlabel
            return id_min_line, id_min_col, Q[id_min_line, id_min_col]

        #######################################################################################
        '''BEGIN iterations'''

        Q = None
        cleanIndexes = np.copy(labeledIndexes)
        for i_iter in range(tuning_iter):

            found_noisy = True

            if np.sum(labeledIndexes) > 0 and found_noisy:
                '''Z matrix - The binary values of current Y are replaced with their corresponding D entries.
                    Then, we normalize each row so that row sums to its estimated influence
                '''
                useZ = False
                if i_iter >= 0:
                    if (not self.use_baseline) or Q is None:
                        if useZ:
                            Z = gutils.calc_Z(Y,
                                              labeledIndexes,
                                              D,
                                              estimatedFreq,
                                              weigh_by_degree=False)
                            F = P @ Z
                            if scipy.sparse.issparse(F):
                                F = np.asarray(F.toarray())

                            #Compute graph gradient
                            Q = (divide_row_by_sum(F) - divide_row_by_sum(Z))
                        else:
                            F = P @ Y
                            if scipy.sparse.issparse(F):
                                F = np.asarray(F.toarray())
                            Q = (divide_row_by_sum(F) - divide_row_by_sum(Y))
                #import scipy.stats

                #During label tuning, we'll also 'unlabel' the argmax
                unlabeledIndexes = np.logical_not(cleanIndexes)
                if self.early_stop:
                    Q[np.sum(F, axis=1) == 0.0, :] = 9999

                Q[unlabeledIndexes, :] = np.inf

                #Find minimum unlabeled index
                if constant_prop:
                    expectedNumLabels = estimatedFreq * np.sum(labeledIndexes)
                    actualNumLabels = np.sum(Y[labeledIndexes, :], axis=0)
                    temp = expectedNumLabels - actualNumLabels
                    class_priority = np.argsort(temp)

                    found_noisy = False
                    for class_to_unlabel in class_priority:
                        id_min_line, id_min_col, val = find_argmin(
                            Q, class_to_unlabel)
                        if val < 0:
                            #This means that the class would have a different label under the modified label prop
                            found_noisy = True
                            break

                else:
                    id_min = np.argmin(Q)
                    id_min_line = id_min // num_classes
                    id_min_col = id_min % num_classes  #The class previously assigned to instance X_{id_min_line}
                    found_noisy = Q[id_min_line, id_min_col] < 0

                if found_noisy:

                    id_max_col = np.argmax(
                        Q[id_min_line, :])  #The new, suggested class

                    detected_noisylabels.append(id_min_col)
                    where_noisylabels.append(id_min_line)

                    suggested_labels.append(id_max_col)
                    Q_values.append(1 + Q[id_min_line, id_min_col])

                    #Unlabel OP
                    if labeledIndexes[id_min_line] == False:
                        raise Exception(
                            "Error: unlabeled instance was selected")
                    if not Y[id_min_line, id_min_col] == 1:
                        raise Exception("Error: picked wrong class to unlabel")

                    labeledIndexes[id_min_line] = False
                    cleanIndexes[id_min_line] = False

                    if not Y[id_min_line, id_min_col] == 1:
                        raise Exception(
                            "Tried to remove label from unlabeled instance")

                    Y[id_min_line, id_min_col] = 0
                    if self.relabel:
                        labeledIndexes[id_min_line] = True
                        Y[id_min_line, :] = 0
                        Y[id_min_line, id_max_col] = 1

            if not hook is None:
                hook._step(step=(i_iter + 1),
                           X=X,
                           W=W,
                           Y=Y,
                           labeledIndexes=labeledIndexes)
        '''
        MATPLOTLIB stuff 
        '''

        import cv2 as cv

        #ret2,th2 = cv.threshold(255*np.asarray(Q_values).astype(np.uint8),0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)

        import matplotlib
        matplotlib.use("TkAgg")
        import matplotlib.pyplot as plt

        fig = plt.figure(figsize=(5 * 3, 2 * 3))
        ax = fig.add_subplot()
        #ax.plot(np.arange(len(Q_values)),Q_values)
        ax.scatter(np.arange(len(Q_values)),
                   Q_values,
                   c=noisyIndexes[where_noisylabels])
        ax.set_xlabel("#Labels Removed", fontsize=22)
        ax.set_ylabel("Consistency with LGC", fontsize=22)

        ax.axvline(np.sum(noisyIndexes), color='red')

        # We change the fontsize of minor ticks label
        ax.tick_params(axis='both', which='major', labelsize=18)
        ax.tick_params(axis='both', which='minor', labelsize=18)

        # For the minor ticks, use no labels; default NullFormatter.
        ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.1))
        fig.tight_layout()
        plt.axhline(np.max(Q_values[0:(1 + np.sum(noisyIndexes))]),
                    color='green')
        plt.grid(True, axis='y', linestyle='-', alpha=0.5, which='major')
        plt.grid(True, axis='y', linestyle='--', alpha=0.5, which='minor')

        #plt.axvline(th2,color='purple')
        plt.savefig(
            '/home/klaus/eclipse-workspace/NoisyGSSL/results/python_plotly/' +
            'mnist_alpha=0.99_noise=0.3_thresh_static.png')

        #print(th2)
        plt.show()
        '''END iterations'''
        LOG.info(
            "NUMBER OF DETECTED NOISY INSTANCES:{}".format(
                len(detected_noisylabels)), LOG.ll.FILTER)

        return Y, labeledIndexes
Esempio n. 4
0
 def __RF(self,X,W,Y,labeledIndexes,n_estimators, hook=None):
     rf = RandomForestClassifier(n_estimators=n_estimators,verbose=2)
     rf.fit(X[labeledIndexes,:],np.argmax(Y[labeledIndexes,:],axis=1) )
     pred = rf.predict(X)
     
     return init_matrix(pred, np.ones(X.shape[0],).astype(np.bool))   
    def __MR(self, X, W, Y, labeledIndexes, p, optimize_labels, hook=None):
        """
            -------------------------------------------------------------
                INITIALIZATION
            --------------------------------------------------------------
        """
        ORACLE_Y = Y.copy()
        Y = np.copy(Y)
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        l = np.reshape(np.array(np.where(labeledIndexes)), (-1))
        num_lab = l.shape[0]

        if not isinstance(p, int):
            p = int(p * num_lab)

        if p > Y.shape[0]:
            p = Y.shape[0]
            LOG.warn("Warning: p greater than the number of labeled indexes",
                     LOG.ll.CLASSIFIER)
        #W = gutils.scipy_to_np(W)
        #W =  0.5* (W + W.T)
        L = gutils.lap_matrix(W)
        D = gutils.deg_matrix(W, flat=True, pwr=-1.0)

        L = 0.5 * (L + L.T)

        def check_symmetric(a, tol=1e-8):
            return np.allclose(a, a.T, atol=tol)

        def is_pos_sdef(x):
            return np.all(np.linalg.eigvals(x) >= -1e-06)

        import scipy.sparse
        sym_err = L - L.T
        sym_check_res = np.all(np.abs(sym_err.data) < 1e-7)  # tune this value
        assert sym_check_res
        """---------------------------------------------------------------------------------------------------
                EIGENFUNCTION EXTRACTION
        ---------------------------------------------------------------------------------------------------
        """
        import time
        start_time = time.time()
        eigenVectors, eigenValues = W.load_eigenfunctions(p)

        time_elapsed = time.time() - start_time
        LOG.info("Took {} seconds to calculate eigenvectors".format(
            int(time_elapsed)))
        U = eigenVectors
        LAMBDA = eigenValues
        """
        -------------------------------------------------------------------------
            Import and setup Tensorflow
        ------------------------------------------------------------------------------
        """
        import tensorflow as tf
        import tf_labelprop.gssl.classifiers.lgc_lvo_aux as aux
        gpus = tf.config.experimental.list_physical_devices('GPU')

        #tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*8)])
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        """
        -------------------------------------------------------------------------
            Define Constants on GPU
        ------------------------------------------------------------------------------
        """
        U, X, Y = [tf.constant(x.astype(np.float32)) for x in [U, X, Y]]
        _U_times_U = tf.multiply(U, U)
        N = X.shape[0]
        """
            -----------------------------------------------------------------------------
            DEFINE VARS
            --------------------------------------------------------------------------------
        """
        MU = tf.Variable(0.1, name="MU")

        LAMBDA = tf.constant(LAMBDA.astype(np.float32), name="LAMBDA")
        PI = tf.Variable(tf.ones(shape=(tf.shape(Y)[0], ), dtype=tf.float32),
                         name="PI")
        _l = LAMBDA.numpy()
        """
            -----------------------------------------------------------------------------
            DEFINE FORWARD
            --------------------------------------------------------------------------------
        """

        def forward(Y, U, PI, mode='train', p=None, remove_diag=True):
            if p is None:
                p = 99999

            pi_Y = aux.spd_matmul(aux.to_sp_diag(tf.abs(PI)), Y)

            alpha = self.get_alpha(MU)
            """
                Maybe apply custom convolution to LAMBDA, otherwise just fit LGC's alpha using the corresponding filter 1/(1-alpha + alpha*lambda)
            """
            #tf.print(alpha)
            a = alpha - alpha * LAMBDA
            lambda_tilde = 1 / (1 - a)
            """ Set entries corresponding to eigvector e_i to zero for i > p """
            lambda_tilde = tf.where(
                tf.less_equal(tf.range(0, lambda_tilde.shape[0]), p),
                lambda_tilde, 0 * lambda_tilde)

            _self_infl = aux.mult_each_row_by(
                tf.square(U), by=lambda_tilde
            )  #Square each element of U, then dot product of each row with lambda_tilde
            B = _self_infl
            _self_infl = tf.reduce_sum(_self_infl, axis=1)

            A = aux.mult_each_col_by((tf.transpose(U) @ pi_Y), by=lambda_tilde)
            _P_op = U @ (A)
            if not remove_diag:
                _diag_P_op = tf.zeros_like(
                    aux.mult_each_col_by(pi_Y, by=_self_infl))
            else:
                _diag_P_op = aux.mult_each_col_by(pi_Y, by=_self_infl)

            if mode == 'eval':
                return aux.divide_by_row(_P_op - _diag_P_op)
            else:
                return A, B, aux.divide_by_row(_P_op - _diag_P_op)

        def forward_eval(Y, U, PI, mode='train', p=None, remove_diag=True):
            if p is None:
                p = 99999

            pi_Y = aux.spd_matmul(aux.to_sp_diag(tf.abs(PI)), Y)

            alpha = self.get_alpha(MU)
            """
                Maybe apply custom convolution to LAMBDA, otherwise just fit LGC's alpha using the corresponding filter 1/(1-alpha + alpha*lambda)
            """
            #tf.print(alpha)
            a = alpha - alpha * LAMBDA
            lambda_tilde = 1 / (1 - a)
            """ Set entries corresponding to eigvector e_i to zero for i > p """
            lambda_tilde = tf.where(
                tf.less_equal(tf.range(0, lambda_tilde.shape[0]), p),
                lambda_tilde, 0 * lambda_tilde)

            _self_infl = aux.mult_each_row_by(
                tf.square(U), by=lambda_tilde
            )  #Square each element of U, then dot product of each row with lambda_tilde
            _self_infl = tf.reduce_sum(_self_infl, axis=1)

            A = aux.mult_each_col_by((tf.transpose(U) @ pi_Y), by=lambda_tilde)
            _P_op = U @ (A)
            if not remove_diag:
                _diag_P_op = tf.zeros_like(
                    aux.mult_each_col_by(pi_Y, by=_self_infl))
            else:
                _diag_P_op = aux.mult_each_col_by(pi_Y, by=_self_infl)

            return aux.divide_by_row(_P_op - _diag_P_op)

        """
            -----------------------------------------------------------------------------
                DEFINE LOSSES and learning schedule
            --------------------------------------------------------------------------------
        """
        losses = {
            'xent':
            lambda y_, y: tf.reduce_mean(-tf.reduce_sum(y_ * tf.cast(
                tf.math.log(aux.smooth_labels(y, factor=0.01)), tf.float32),
                                                        axis=[1])),
            'sq_loss':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.square(y_ - y), axis=[1])),
            'abs_loss':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.abs(y_ - y), axis=[1])),
            'hinge':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.maximum(1. - y_ * y, tf.zeros_like(y)),
                              axis=1))
        }

        NUM_ITER = 10
        Y_l = tf.gather(Y, indices=np.where(labeledIndexes)[0], axis=0)
        U_l = tf.gather(U, indices=np.where(labeledIndexes)[0], axis=0)
        PI_l = tf.gather(PI, indices=np.where(labeledIndexes)[0], axis=0)
        """
            -----------------------------------------------------------------------------
            LEARNING
            --------------------------------------------------------------------------------
        """
        L = []
        df = pd.DataFrame()
        max_acc, min_loss = [0, np.inf]
        best_p = np.inf
        for i in range(NUM_ITER, 0, -1):
            MU.assign(i)

            A, B, _ = forward(Y_l, U_l, PI_l, mode='train')

            a1 = np.zeros_like(Y_l)
            a2 = np.zeros_like(Y_l)

            for i1 in range(p):
                a2 += mult_each_col_by(X=Y_l, by=B[:, i1])
                a1 += mult_each_col_by(
                    np.tile(A[i1, :][None, :], [a1.shape[0], 1]), U_l[:, i1])

                pred_L = aux.divide_by_row(a1 - a2)

                loss_sq = losses['sq_loss'](pred_L, Y_l)
                loss = losses['xent'](pred_L, Y_l)

                loss_xent = losses['xent'](pred_L, Y_l)

                acc = aux.accuracy(Y_l, pred_L)
                _not_lab = np.where(np.logical_not(labeledIndexes))[0]

                if self.DEBUG:
                    acc_true = aux.accuracy(
                        tf.gather(ORACLE_Y, indices=_not_lab, axis=0),
                        tf.gather(forward_eval(Y, U, PI, mode='eval', p=i1),
                                  indices=_not_lab,
                                  axis=0))
                    prop = np.max(
                        pd.value_counts(tf.argmax(pred_L, 1).numpy(),
                                        normalize=True).values)
                else:
                    acc_true = 0
                    prop = 0

                L.append(
                    np.array(
                        [i, i1, loss_sq, loss, loss_xent, acc, acc_true,
                         prop])[None, :])
                if (max_acc < acc) or (acc == max_acc and min_loss > loss):
                    print(
                        f"acc: {acc},p:{i1},Mu:{int(MU.numpy())}alpha:{self.get_alpha(MU.numpy()).numpy()}"
                    )
                    best_p = int(i1)
                    best_MU = int(MU.numpy())
                    max_acc = acc
                    min_loss = loss.numpy()
                    """
                    if self.DEBUG:
                        alpha = self.get_alpha(MU)
                        I = np.identity(Y.shape[0], dtype = np.float32)
                        match_true = tf.gather(np.linalg.inv(I- alpha*(I - gutils.lap_matrix(W,'sym')))@Y,_not_lab,axis=0)
                        F = forward_eval(Y,U,PI,mode='eval',p=best_p)
                        
                        match_approx = tf.gather(F,indices=_not_lab,axis=0)
                        match = aux.accuracy(match_true, match_approx)
                        
                        print(f"Match rate {np.round(100*match,3)} ")
                        print(f"LGC_acc = {np.round(100*aux.accuracy(match_true,tf.gather(ORACLE_Y,indices=_not_lab,axis=0)),3)} ")
                        print(f"LGCLVO_acc = {np.round(100*aux.accuracy(match_approx,tf.gather(ORACLE_Y,indices=_not_lab,axis=0)),3)} ")
                    """

            if i % 1 == 0:
                """ Print info """
                if not hook is None:
                    if self.hook_iter_mode == "labeled":
                        plot_y = np.zeros_like(Y)
                        plot_y[labeledIndexes] = Y_l.numpy()
                    else:
                        MU.assign(best_MU)
                        plot_y = tf.clip_by_value(
                            forward(Y, U, PI, p=best_p, mode='eval'), 0,
                            999999).numpy()

                    hook._step(step=i,
                               X=X,
                               W=W,
                               Y=plot_y,
                               labeledIndexes=labeledIndexes)
                alpha = self.get_alpha(MU)

                LOG.info(
                    f"Acc: {max_acc.numpy():.3f};  Loss: {loss.numpy():.3f}; alpha = {alpha.numpy():.3f};"
                )

        if self.DEBUG:
            df = pd.DataFrame(np.concatenate(L, axis=0),
                              index=range(len(L)),
                              columns=[
                                  'i', 'p', 'loss_sq', 'loss', 'loss_xent',
                                  'acc', 'acc_true', 'prop'
                              ])
            self.create_3d_mesh(df)

        print(f"BEst mu: {best_MU}; best p: {best_p}")
        MU.assign(best_MU)
        print(MU)

        return forward_eval(Y, U, PI, mode='eval', p=None).numpy()
        """
        ----------------------------------------------------
            PART 2
        -------------------------------------------------
        
        
        """

        opt = tf.keras.optimizers.Adam(0.05)

        max_acc = 0
        for i in range(7000):
            #MU.assign(i)
            with tf.GradientTape() as t:
                _, _, pred_L = forward(Y_l,
                                       U_l,
                                       tf.gather(
                                           PI,
                                           indices=np.where(labeledIndexes)[0],
                                           axis=0),
                                       mode='train',
                                       p=best_p)
                loss_sq = losses['sq_loss'](pred_L, Y_l)
                loss = losses['xent'](pred_L, Y_l)

                loss_xent = losses['xent'](pred_L, Y_l)

            acc = aux.accuracy(Y_l, pred_L)
            _not_lab = np.where(np.logical_not(labeledIndexes))[0]
            acc_true = aux.accuracy(
                tf.gather(ORACLE_Y, indices=_not_lab, axis=0),
                tf.gather(forward(Y, U, PI, mode='eval')[0],
                          indices=_not_lab,
                          axis=0))

            L.append(
                np.array([i, loss_sq, loss, loss_xent, acc,
                          acc_true])[None, :])
            """
                Project labels such that they sum up to the original amount
            """
            pi = PI.numpy()
            pi[labeledIndexes] = np.sum(
                labeledIndexes) * pi[labeledIndexes] / (np.sum(
                    pi[labeledIndexes]))
            PI.assign(pi)
            """
                TRAINABLE VARIABLES GO HERE
            """
            trainable_variables = []
            if optimize_labels:
                trainable_variables.append(PI)
            """
                Apply gradients
            """
            gradients = t.gradient(loss, trainable_variables)
            opt.apply_gradients(zip(gradients, trainable_variables))

            if acc > max_acc:
                print(max_acc)
                best_trainable_variables = [
                    k.numpy() for k in trainable_variables
                ]
                max_acc = acc
                min_loss = loss
                counter_since_best = 0

        for k in range(len(trainable_variables)):
            trainable_variables[k].assign(best_trainable_variables[k])

        return forward(Y, U, PI, mode='eval', p=None).numpy()
        """
        
        for c in df.columns:
            if c.startswith('loss'):
                df[c] = (df[c] - df[c].min())/(df[c].max()-df[c].min())
        
        for c in df.columns:
            if not c in 'i':
                plt.plot(df['i'],df[c],label=c)
        plt.legend()
        plt.show()
        
        #plt.scatter(range(lambda_tilde.shape[0]),np.log10(lambda_tilde/LAMBDA),s=2)
        #plt.show()
        """
        return tf.clip_by_value(forward(Y, U, PI, mode='eval')[0], 0,
                                999999).numpy()
    def __MR(self, X, W, Y, labeledIndexes, p, optimize_labels, hook=None):
        """
            -------------------------------------------------------------
                INITIALIZATION
            --------------------------------------------------------------
        """

        ORACLE_Y = Y.copy()
        Y = np.copy(Y)
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        l = np.reshape(np.array(np.where(labeledIndexes)), (-1))
        num_lab = l.shape[0]

        if not isinstance(p, int):
            p = int(p * num_lab)

        if p > Y.shape[0]:
            p = Y.shape[0]
            LOG.warn("Warning: p greater than the number of labeled indexes",
                     LOG.ll.CLASSIFIER)
        #W = gutils.scipy_to_np(W)
        #W =  0.5* (W + W.T)
        L = gutils.lap_matrix(W, which_lap='sym')
        D = gutils.deg_matrix(W, flat=True, pwr=-1.0)

        L = 0.5 * (L + L.T)

        def check_symmetric(a, tol=1e-8):
            return np.allclose(a, a.T, atol=tol)

        def is_pos_sdef(x):
            return np.all(np.linalg.eigvals(x) >= -1e-06)

        import scipy.sparse
        sym_err = L - L.T
        sym_check_res = np.all(np.abs(sym_err.data) < 1e-7)  # tune this value
        assert sym_check_res
        """---------------------------------------------------------------------------------------------------
                EIGENFUNCTION EXTRACTION
        ---------------------------------------------------------------------------------------------------
        """
        import time
        start_time = time.time()

        import os.path as osp
        from tf_labelprop.settings import INPUT_FOLDER

        cache_eigvec = osp.join(INPUT_FOLDER, 'eigenVectors.npy')
        cache_eigval = osp.join(INPUT_FOLDER, 'eigenValues.npy')

        if False:
            eigenValues, eigenVectors = np.load(cache_eigval), np.load(
                cache_eigvec)
            eigenVectors = eigenVectors[:, :p]
            eigenValues = eigenValues[:p]
        else:

            eigenVectors, eigenValues = W.load_eigenfunctions(p)

            time_elapsed = time.time() - start_time
            LOG.info("Took {} seconds to calculate eigenvectors".format(
                int(time_elapsed)))
            idx = eigenValues.argsort()
            eigenValues = eigenValues[idx]
            LOG.debug(eigenValues)
            assert eigenValues[0] <= eigenValues[eigenValues.shape[0] - 1]
            eigenVectors = eigenVectors[:, idx]
            np.save(cache_eigval, arr=eigenValues)
            np.save(cache_eigvec, arr=eigenVectors)
        U = eigenVectors
        LAMBDA = eigenValues

        U = U[:, np.argsort(LAMBDA)]
        LAMBDA = LAMBDA[np.argsort(LAMBDA)]

        import tensorflow as tf

        gpus = tf.config.experimental.list_physical_devices('GPU')

        #tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*8)])
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        """
        -------------------------------------------------------------------------
            Define Constants on GPU
        ------------------------------------------------------------------------------
        """
        U, X, Y = [tf.constant(x.astype(np.float32)) for x in [U, X, Y]]

        _U_times_U = tf.multiply(U, U)
        N = X.shape[0]

        def to_sp_diag(x):
            n = tf.cast(x.shape[0], tf.int64)
            indices = tf.concat([
                tf.range(n, dtype=tf.int64)[None, :],
                tf.range(n, dtype=tf.int64)[None, :]
            ],
                                axis=0)
            return tf.sparse.SparseTensor(indices=tf.transpose(indices),
                                          values=x,
                                          dense_shape=[n, n])

        @tf.function
        def smooth_labels(labels, factor=0.001):
            # smooth the labels
            labels = tf.cast(labels, tf.float32)
            labels *= (1 - factor)
            labels += (factor / tf.cast(tf.shape(labels)[0], tf.float32))
            # returned the smoothed labels
            return labels

        @tf.function
        def divide_by_row(x, eps=1e-07):
            x = tf.maximum(x, 0 * x)
            x = x + eps  # [N,C]    [N,1]
            return x / (tf.reduce_sum(x, axis=-1)[:, None])

        def spd_matmul(x, y):
            return tf.sparse.sparse_dense_matmul(x, y)

        def mult_each_row_by(X, by):
            """ Elementwise multiplies each row by a given row vector.
            
                For a 2D tensor, also correponds to multiplying each column by the respective scalar in the given row vector
                
                Args:
                    X (Tensor)  
                    by (Tensor[shape=(N,)]): row vector
            
            """
            #[N,C]  [N,1]
            return X * by[None, :]

        def mult_each_col_by(X, by):
            #[N,C]  [1,C]
            return X * by[:, None]

        @tf.function
        def accuracy(y_true, y_pred):
            acc = tf.cast(
                tf.equal(tf.argmax(y_true, axis=-1),
                         tf.argmax(y_pred, axis=-1)), tf.float32)
            acc = tf.cast(acc, tf.float32)
            return tf.reduce_mean(acc)

        """
            -----------------------------------------------------------------------------
            DEFINE VARS
            --------------------------------------------------------------------------------
        """

        MU = tf.Variable(0.1, name="MU")

        LAMBDA = tf.constant(LAMBDA.astype(np.float32), name="LAMBDA")
        PI = tf.Variable(tf.ones(shape=(tf.shape(Y)[0], ), dtype=tf.float32),
                         name="PI")
        _l = LAMBDA.numpy()
        CUTOFF = tf.Variable(0.0, name='CUTOFF')
        CUTOFF_K = tf.Variable(1.0)

        @tf.function
        def get_alpha(MU):
            return tf.pow(2.0, -tf.math.reciprocal(tf.abs(100 * MU)))

        @tf.function
        def to_prob(x):
            return tf.nn.softmax(x, axis=1)

        @tf.function
        def cutoff(x):
            return 1.0 / (1.0 + tf.exp(-CUTOFF_K * (CUTOFF - x)))

        model = tf.keras.Sequential()
        model.add(tf.keras.layers.Conv1D(8, kernel_size=5, padding='same'))
        model.add(tf.keras.layers.Activation('relu'))
        model.add(tf.keras.layers.Conv1D(8, kernel_size=5, padding='same'))
        model.add(tf.keras.layers.Activation('relu'))
        model.add(tf.keras.layers.Conv1D(1, kernel_size=3, padding='same'))

        model.add(tf.keras.layers.Flatten())
        """
            -----------------------------------------------------------------------------
            DEFINE FORWARD
            --------------------------------------------------------------------------------
        """

        @tf.function
        def forward(Y, U, PI, mode='train', remove_diag=True):
            if mode == 'train':
                U = tf.gather(U, indices=np.where(labeledIndexes)[0], axis=0)
                Y = tf.gather(Y, indices=np.where(labeledIndexes)[0], axis=0)
                #F = tf.gather(F,indices=np.where(labeledIndexes)[0],axis=0)

                PI = tf.gather(PI, indices=np.where(labeledIndexes)[0], axis=0)

            pi_Y = spd_matmul(to_sp_diag(tf.abs(PI)), Y)

            alpha = get_alpha(MU)
            """
                Maybe apply custom convolution to LAMBDA, otherwise just fit LGC's alpha using the corresponding filter 1/(1-alpha + alpha*lambda)
            """
            if not self.custom_conv:
                lambda_tilde = tf.math.reciprocal(1 - alpha + alpha * LAMBDA)
            else:
                #lambda_tilde = tf.math.reciprocal(1-alpha + alpha*LAMBDA)
                _lambda = (LAMBDA -
                           tf.reduce_mean(LAMBDA)) / tf.math.reduce_std(LAMBDA)
                lambda_tilde = tf.clip_by_value(
                    2 * tf.nn.sigmoid(
                        tf.reshape(model(_lambda[None, :, None]), (-1, ))), 0,
                    1)
                lambda_tilde = tf.sort(lambda_tilde, direction='DESCENDING')
            lambda_tilde = tf.reshape(divide_by_row(lambda_tilde[None, :]),
                                      (-1, ))

            _self_infl = mult_each_row_by(
                tf.square(U), by=lambda_tilde
            )  #Square each element of U, then dot product of each row with lambda_tilde
            _self_infl = tf.reduce_sum(_self_infl, axis=1)

            _P_op = U @ (mult_each_col_by(
                (tf.transpose(U) @ pi_Y), by=lambda_tilde))
            if not remove_diag:
                _diag_P_op = tf.zeros_like(
                    mult_each_col_by(pi_Y, by=_self_infl))
            else:
                _diag_P_op = mult_each_col_by(pi_Y, by=_self_infl)
            return divide_by_row(_P_op - _diag_P_op), lambda_tilde, pi_Y

        """
            -----------------------------------------------------------------------------
                DEFINE LOSSES and learning schedule
            --------------------------------------------------------------------------------
        """
        losses = {
            'xent':
            lambda y_, y: tf.reduce_mean(-tf.reduce_sum(y_ * tf.cast(
                tf.math.log(smooth_labels(y, factor=0.01)), tf.float32),
                                                        axis=[1])),
            'sq_loss':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.square(y_ - y), axis=[1])),
            'abs_loss':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.abs(y_ - y), axis=[1])),
            'hinge':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.maximum(1. - y_ * y, tf.zeros_like(y)),
                              axis=1))
        }

        NUM_ITER = 700
        lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            0.5, decay_steps=200, decay_rate=0.9, staircase=False)

        opt = tf.keras.optimizers.Adam(0.05)

        Y_l = tf.gather(Y, indices=np.where(labeledIndexes)[0], axis=0)

        #import matplotlib.pyplot as plt
        #import matplotlib
        #matplotlib.use('tkagg')
        import pandas as pd
        """
            -----------------------------------------------------------------------------
            LEARNING
            --------------------------------------------------------------------------------
        """
        L = []
        df = pd.DataFrame()
        max_acc, min_loss = [0, np.inf]
        for i in range(NUM_ITER):
            #MU.assign(i)
            with tf.GradientTape() as t:
                # no need to watch a variable:
                # trainable variables are always watched
                pred_L, lambda_tilde, pi_Y = forward(Y, U, PI, mode='train')
                loss_sq = losses['sq_loss'](pred_L, Y_l)
                loss = losses['xent'](pred_L, Y_l)

                loss_xent = losses['xent'](pred_L, Y_l)

            acc = accuracy(Y_l, pred_L)
            _not_lab = np.where(np.logical_not(labeledIndexes))[0]
            acc_true = accuracy(
                tf.gather(ORACLE_Y, indices=_not_lab, axis=0),
                tf.gather(forward(Y, U, PI, mode='eval')[0],
                          indices=_not_lab,
                          axis=0))

            L.append(
                np.array([i, loss_sq, loss, loss_xent, acc,
                          acc_true])[None, :])
            """
                TRAINABLE VARIABLES GO HERE
            """
            if self.custom_conv:
                trainable_variables = model.weights
            else:
                trainable_variables = [MU]
            if optimize_labels:
                trainable_variables.append(PI)

            if acc > max_acc:
                print(max_acc)
                best_trainable_variables = [
                    k.numpy() for k in trainable_variables
                ]
                max_acc = acc
                min_loss = loss
                counter_since_best = 0
            elif acc <= max_acc:

                counter_since_best += 1
                if counter_since_best > 2000:
                    break
            """
                Apply gradients
            """
            gradients = t.gradient(loss, trainable_variables)
            opt.apply_gradients(zip(gradients, trainable_variables))
            """
                Project labels such that they sum up to the original amount
            """
            pi = PI.numpy()
            pi[labeledIndexes] = np.sum(
                labeledIndexes) * pi[labeledIndexes] / (np.sum(
                    pi[labeledIndexes]))
            PI.assign(pi)

            if i % 10 == 0:
                """ Print info """
                if not hook is None:
                    if self.hook_iter_mode == "labeled":
                        plot_y = np.zeros_like(Y)
                        plot_y[labeledIndexes] = Y_l.numpy()
                    else:
                        plot_y = tf.clip_by_value(
                            forward(Y, U, PI, mode='eval')[0], 0,
                            999999).numpy()
                    hook._step(step=i,
                               X=X,
                               W=W,
                               Y=plot_y,
                               labeledIndexes=labeledIndexes)
                alpha = get_alpha(MU)
                PI_l = tf.gather(PI,
                                 indices=np.where(labeledIndexes)[0],
                                 axis=0)
                LOG.info(
                    f"Acc: {acc.numpy():.3f}; ACC_TRUE:{acc_true.numpy():.3f}  Loss: {loss.numpy():.3f}; alpha = {alpha.numpy():.3f}; PI mean = {tf.reduce_mean(PI_l).numpy():.3f} "
                )

        #plt.scatter(range(lambda_tilde.shape[0]),np.log10(lambda_tilde/LAMBDA),s=2)
        #plt.show()
        for k in range(len(trainable_variables)):
            trainable_variables[k].assign(best_trainable_variables[k])
        return tf.clip_by_value(forward(Y, U, PI, mode='eval')[0], 0,
                                999999).numpy()
Esempio n. 7
0
    def __MR(self, X, W, Y, labeledIndexes, p, tuning_iter, hook=None):
        Y = np.copy(Y)
        if Y.ndim == 1:
            Y[np.logical_not(labeledIndexes)] = 0
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0
        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        l = np.reshape(np.array(np.where(labeledIndexes)), (-1))
        num_lab = l.shape[0]

        if not isinstance(p, int):
            p = int(p * num_lab)
        if p > Y.shape[0]:
            p = Y.shape[0]
            LOG.warn("Warning: p greater than the number of labeled indexes",
                     LOG.ll.FILTER)

        W = scipy_to_np(W)
        L = gutils.lap_matrix(W, which_lap='sym')
        D = gutils.deg_matrix(W)

        def check_symmetric(a, tol=1e-8):
            return np.allclose(a, a.T, atol=tol)

        if check_symmetric(L):
            E = sp.eigh(L, D, eigvals=(1, p))[1]
        else:
            LOG.warn("Warning: Laplacian not symmetric", LOG.ll.FILTER)
            eigenValues, eigenVectors = sp.eig(L, D)
            idx = eigenValues.argsort()
            eigenValues = eigenValues[idx]
            assert eigenValues[0] <= eigenValues[eigenValues.shape[0] - 1]
            eigenVectors = eigenVectors[:, idx]
            E = eigenVectors[:, 1:(p + 1)]

        e_lab = E[labeledIndexes, :]
        """ TIKHONOV REGULARIZATION. Currently set to 0."""
        TIK = np.zeros(shape=e_lab.shape)
        try:
            A = np.linalg.inv(e_lab.T @ e_lab + TIK.T @ TIK) @ e_lab.T
        except:
            A = np.linalg.pinv(e_lab.T @ e_lab + TIK.T @ TIK) @ e_lab.T
        F = np.zeros(shape=Y.shape)

        y_m = np.argmax(Y, axis=1)[labeledIndexes]

        for i in range(Y.shape[1]):
            c = np.ones(num_lab)
            c[y_m != i] = -1
            a = A @ np.transpose(c)
            LOG.debug(a, LOG.ll.FILTER)
            for j in np.arange(F.shape[0]):
                F[j, i] = np.dot(a, E[j, :])

        ERmat = -1 * np.ones((Y.shape[0], ))

        Y_amax = np.argmax(Y, axis=1)
        for i in np.where(labeledIndexes):
            ERmat[i] = np.square(Y[i, Y_amax[i]] - F[i, Y_amax[i]])

        removed_Lids = np.argsort(ERmat)
        removed_Lids = removed_Lids[::-1]

        labeledIndexes = np.array(labeledIndexes)
        Y = np.copy(Y)
        for i in range(tuning_iter):
            labeledIndexes[removed_Lids[i]] = False
            if not hook is None:
                hook._step(step=i,
                           X=X,
                           W=W,
                           Y=Y,
                           labeledIndexes=labeledIndexes)

        return Y, labeledIndexes
Esempio n. 8
0
def apply_noise(Y, labeledIndexes, A, seed=None, deterministic=True):
    """ Corrupts a set percentage of initial labels with noise.
    
    Args:
        Y (`[NDArray[int].shape[N,C]`) : Matrix encoding initial beliefs.
        A (`[NDArray[int].shape[C,C]`): Transition probabilities between each class.
        labeledIndexes (`NDArray[bool].shape[N]`) : determines which indices are to be considered as labeled.
        seed (float) : Optional. Used to reproduce results. 
        
    Returns:
        `NDArray[int].shape[N,C]` : Belief matrix after corruption.
        
    """
    np.random.seed(seed)
    old_A = np.copy(np.asarray(A))
    if not np.all(old_A <= 1):
        LOG.debug(old_A, LOG.ll.NOISE)
        raise Exception("trans. mat has value >1")
    old_Y = np.copy(Y)
    is_flat = np.ndim(Y) == 1
    if is_flat:
        Y = gutils.init_matrix(Y, labeledIndexes)
    c = Y.shape[1]
    n = Y.shape[0]

    Y = Y[labeledIndexes, :]
    Y_flat = np.argmax(Y, axis=1)

    vec = np.random.RandomState(seed).permutation(Y.shape[0])
    assert not vec is None
    cursor = np.zeros((c), dtype=np.int32)

    if deterministic == True:
        A = transition_count_mat(Y, A)
    else:

        class_freq = [int(np.sum(Y[:, i])) for i in range(c)]

        num_clean = np.sum(labeledIndexes) * sum(
            [old_A[i, i] for i in range(c)]) / c

        num_clean = int(np.round(num_clean))
        num_noisy = np.sum(labeledIndexes) - num_clean

        ##########3
        perm = np.random.permutation(Y.shape[0])[0:num_noisy]
        A = np.zeros((c, c))
        for i in range(c):
            A[i, i] = class_freq[i]

        for my_id in perm:
            j = np.argmax(Y[my_id, :])
            A[j, j] -= 1
            new_j = j
            while new_j == j:
                new_j = np.random.choice(c)
            A[j, new_j] += 1

        assert np.sum(A) == np.sum(labeledIndexes)
        LOG.debug(A, LOG.ll.NOISE)
        ###############

    for i in np.arange(Y_flat.shape[0]):
        current_class = Y_flat[vec[i]]
        while A[current_class, cursor[current_class]] == 0:
            cursor[current_class] += 1
            assert cursor[current_class] < c
        Y_flat[vec[i]] = cursor[current_class]
        A[current_class, cursor[current_class]] -= 1

    noisy_Y = np.zeros(shape=(n, c))
    labeledIndexes_where = np.where(labeledIndexes)[0]
    for l in range(Y_flat.shape[0]):
        noisy_Y[labeledIndexes_where[l], Y_flat[l]] = 1
    noisy_Y[np.logical_not(labeledIndexes), :] = old_Y[
        np.logical_not(labeledIndexes), :]
    LOG.info(
        "Changed {} percent of entries".format(
            np.round(1 - gutils.accuracy(np.argmax(Y, axis=1), Y_flat), 6)),
        LOG.ll.NOISE)

    if is_flat:
        old_Y[labeledIndexes] = np.argmax(noisy_Y[labeledIndexes], axis=1)
        return old_Y
    else:
        return noisy_Y
    def LGCLVO(self,
               X,
               W,
               Y,
               labeledIndexes,
               mu=99.0,
               lgc_iter=10000,
               hook=None,
               which_loss="xent"):
        if which_loss is None:
            return Y, labeledIndexes

        Y = np.copy(Y).astype(np.float32)
        #We make a deep copy of labeledindexes
        labeledIndexes = np.array(labeledIndexes)
        lids = np.where(labeledIndexes)[0]
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")
        """ Ensure that it is symmetric """
        W = 0.5 * (W + W.transpose())

        num_labeled = Y[labeledIndexes].shape[0]
        num_unlabeled = Y.shape[0] - num_labeled
        num_classes = Y.shape[1]

        if True:
            l = np.sum(labeledIndexes)

            itertool_prod = [[i, j] for i in range(l) for j in range(l)]

            row = np.asarray([lids[i] for i in range(l)])
            col = np.asarray([i for i in range(l)])
            data = np.asarray([1.0] * l)
            temp_Y = _to_np(
                scipy.sparse.coo_matrix((data, (row, col)),
                                        shape=(W.shape[0], l)))

            PL = LGC_iter_TF(X,
                             W,
                             Y=temp_Y,
                             labeledIndexes=labeledIndexes,
                             alpha=1 / (1 + mu),
                             num_iter=lgc_iter).astype(np.float32)

            PL = PL[labeledIndexes, :]
            PL[range(PL.shape[0]), range(PL.shape[0])] = 0  #Set diagonal to 0

            PL = PL

            del temp_Y

            row = np.asarray(
                [lids[x[0]] for x in itertool_prod if x[0] != x[1]])
            col = np.asarray(
                [lids[x[1]] for x in itertool_prod if x[0] != x[1]])
            data = [PL[x[0], x[1]] for x in itertool_prod if x[0] != x[1]]
            P = scipy.sparse.coo_matrix((data, (row, col)),
                                        shape=W.shape).tocsr()
            P = P
        else:
            #Identity matrix
            I = np.identity(W.shape[0])
            #Get graph laplacian
            L = gutils.lap_matrix(W, which_lap='sym')
            L = 0.5 * (L + L.transpose())
            #Propagation matrix
            P = np.zeros(W.shape).astype(np.float32)
            P[np.ix_(labeledIndexes,
                     labeledIndexes)] = np.linalg.inv(I - (1 / 1 + mu) *
                                                      (I - L))[np.ix_(
                                                          labeledIndexes,
                                                          labeledIndexes)]
            P[labeledIndexes, labeledIndexes] = 0
            P[np.ix_(labeledIndexes, labeledIndexes)] = P[np.ix_(
                labeledIndexes, labeledIndexes)] / np.sum(P[np.ix_(
                    labeledIndexes, labeledIndexes)],
                                                          axis=0,
                                                          keepdims=False)
            PL = P[np.ix_(labeledIndexes, labeledIndexes)]

        W = scipy.sparse.csr_matrix(W)

        def divide_row_by_sum(e):
            e = _to_np(e)
            e = e / np.sum(e + 1e-100, axis=1, keepdims=True)
            return e

        PL = divide_row_by_sum(PL)

        import tensorflow as tf
        A = PL
        B = Y[labeledIndexes, :]
        PTP = np.transpose(A) @ A
        PT = np.transpose(A)
        SIGMA = lambda: tf.linalg.tensor_diag(
            tf.clip_by_value(_SIGMA, 0.0, tf.float32.max))
        C = lambda: tf.linalg.tensor_diag(_C)

        _SIGMA = tf.Variable(np.ones((PL.shape[0], ), dtype=np.float32))

        _C = tf.Variable(_SIGMA)

        to_prob = lambda x: tf.nn.softmax(x, axis=1)
        xent = lambda y_, y: tf.reduce_mean(-tf.reduce_sum(
            y_ * tf.cast(tf.math.log(y + 1e-06), tf.float32), axis=[1]))

        sq_loss = lambda y_, y: tf.reduce_mean(
            tf.reduce_sum(tf.square(y_ - y), axis=[1]))
        norm_s = lambda: _SIGMA * tf.gather(
            tf.math.reciprocal_no_nan(
                tf.reduce_sum(to_prob(A @ SIGMA() @ B), axis=0)),
            tf.argmax(B, axis=1))

        if which_loss == "xent":
            loss = lambda: xent(to_prob(A @ SIGMA() @ B), B)
        elif which_loss == "mse":
            loss = lambda: sq_loss(
                to_prob(A @ SIGMA() @ B), B
            )  #+ 1*tf.reduce_sum(tf.square( tf.reduce_mean(to_prob(A@SIGMA()@B),axis=0) - tf.reduce_mean(B,axis=0)))

        acc = lambda: 100.0 * tf.math.reduce_mean(
            tf.cast(
                tf.equal(tf.argmax(to_prob(A @ SIGMA() @ B), axis=1),
                         tf.argmax(B, axis=1)), tf.float32))

        #0.99 - 0.07466477900743484
        #0.9 - 0.0856625959277153

        opt = tf.keras.optimizers.Adam(learning_rate=0.7)

        #for i in range(2000):
        #    opt.minimize(loss, [_C])
        #    print(loss().numpy())

        #for i in range(200):
        #    opt.minimize(loss, [_C])
        #    print(loss().numpy())

        np.set_printoptions(precision=3)
        #raise ""

        #0.99 - 0.06267
        #0.9 - 0.06164

        for i in range(5000):
            opt.minimize(loss, [_SIGMA])
            #_SIGMA.assign(norm_s())
            print("LOO loss: {}".format(loss().numpy()))

        self.Fl = (lambda: to_prob(A @ SIGMA() @ B))().numpy()

        Y[labeledIndexes, :] = self.Fl

        return Y, labeledIndexes
        """
        
        Yl = Y[labeledIndexes,:]
        it_counter = 0
        loss  = np.inf
        LR = 0.1
        for i in range(1000000):
            grad_SIGMA = 2*np.transpose(C@A)@((C@A@SIGMA@B)-B)@np.transpose(B)
            grad_C = 2*(C@A@SIGMA@B-B)@(np.transpose(B)@np.transpose(SIGMA)@np.transpose(A))
            
            SIGMA -= LR*(np.diag(np.diagonal(grad_SIGMA)))
            C -= LR*(np.diag(np.diagonal(grad_C)))
            
            SIGMA =  np.maximum(SIGMA,np.zeros_like(SIGMA))
            new_loss = np.sum(np.square((C@A)@SIGMA@B - B))
            if new_loss > loss:
                LR *= 0.5
                it_counter += 1
                if it_counter == 10:
                    break
            else:
                it_counter = 0
                loss = new_loss
                print(new_loss)
        """

        return Y, labeledIndexes
        for _ in range(10):
            Yl = Y[labeledIndexes, :]
            PL_masked = PL * (Yl @ np.transpose(Yl))

            labeled_ids = np.where(labeledIndexes)[0]
            for i, l_id in enumerate(labeled_ids):
                den = np.square(np.max(Y[l_id, :])) * np.sum(
                    np.square(PL[:, i]))
                den += 1e-30
                num = np.sum(PL_masked[:, i])
                Y[l_id, :] *= (num / den)

        return Y, labeledIndexes
Esempio n. 10
0
    def __LGC_iter_TF(self,
                      X,
                      W,
                      Y,
                      labeledIndexes,
                      alpha=0.1,
                      useEstimatedFreq=True,
                      num_iter=1000,
                      hook=None):
        from tf_labelprop.gssl.classifiers._lgc_tf import LGC_iter_TF
        """ Init """
        import scipy.sparse
        if not scipy.sparse.issparse(W):
            W = scipy.sparse.csr_matrix(W)
        Y = np.copy(Y)
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0
        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")
        """ Estimate frequency of classes"""
        num_labeled = Y[labeledIndexes].shape[0]
        num_classes = Y.shape[1]
        if not useEstimatedFreq is None:
            if isinstance(useEstimatedFreq, bool):
                estimatedFreq = np.sum(Y[labeledIndexes], axis=0) / num_labeled
            else:
                estimatedFreq = useEstimatedFreq

        else:
            estimatedFreq = np.repeat(1 / num_classes, num_classes)
        omega = estimatedFreq
        """  """
        mu = (1 - alpha) / alpha
        n = Y.shape[0]
        c = Y.shape[1]
        print(np.concatenate([Y, np.ones((n, 1))], axis=1))
        """ stuff that has matrix multiplication with theta """
        PY1 = LGC_iter_TF(X, W, np.concatenate([Y, np.ones((n, 1))], axis=1),
                          labeledIndexes, alpha, num_iter, hook)
        PY1 = np.asarray(PY1)
        F_lgc, theta_1n = (1 / mu) * PY1[:, :-1], (1 / mu) * PY1[:, -1]
        theta_1n_ratio = (theta_1n /
                          (np.sum(theta_1n)))[:, np.newaxis]  #Shape: nx1
        """ Intermediate calc """
        zeta = n * omega - np.sum(F_lgc, axis=0)  #Shape: 1xc
        zeta = np.reshape(zeta, (1, c))

        ypsilon = np.ones(shape=(n,1)) - np.sum(F_lgc,axis=1)[:,np.newaxis] -\
             theta_1n_ratio * (n - np.sum(F_lgc.flatten())) #Shape: nx1

        F = F_lgc
        F += theta_1n_ratio @ zeta
        F += (1 / c) * (ypsilon @ np.ones((1, c)))
        import pandas as pd
        print(pd.Series(np.argmax(F, axis=1)).value_counts() / n)

        log_args = [
            np.round(x, 3)
            for x in [np.sum(F, axis=1)[0:10],
                      np.sum(F, axis=0), n * omega]
        ]
        LOG.info(
            "F sum on rows: {} (expected 1,1,...,1); F sum col: {} (expected {})"
            .format(*log_args))

        return F
Esempio n. 11
0
def select_input(dataset,seed,use_chapelle_splits=False,labeled_percent=None,num_labeled=None,
                 ensure_one_per_class=True,is_stratified=False,**kwargs):
    """ Gets the input dataset, according to some specification.
    
    Currently, the following keyword arguments are required:
    
        * dataset : identifies the dataset. Currently, this may be
        
            1. The name of any of the toy datasets.
            2. `sk_gaussian` to use `sklearn's` ``make_blob`` command at runtime.
               requires ``dataset_sd`` config to determine the dispersion.            
            3. `sk_spiral` to use `sklearn's` ``make_moons`` command at runtime.
               requires ``dataset_sd`` config to determine the dispersion.
        
        * seed : Specifies the seed for reproducibility purposes.
        * labeled_percent or num_labeled : Specifies the percentage/amount of instances to be marked as 'labeled'.
        
        Args:
            `**kwargs`: Key-value pairs with the configuration options of the input.
            
        Returns:
            (tuple): tuple containing:
                1. (`NDArray[float].shape[N,D]`) : An input matrix, describing N instances of dimension D.
                2. (Union[:class:`tf_labelprop.gssl.graph.gssl_affmat.AffMat`,None]) : An affinity matrix, when applicable.
                3. (`NDArray[float].shape[N,C]`) : A belief matrix corresponding to the clean labels. Every row is one-hot, marking down the correct label.
                4. (`NDArray[bool].shape[N]`): A boolean array, indicating which instances are to be interpreted as labeled.
        
        Raises:
            KeyError: If one of the required keys is not found.
    """
    
    """
    -------------------------------------------------------------------
        Read Dataset X,Y
    -------------------------------------------------------------------
    """
    assert issubclass(dataset,GSSLDataset)
    kwargs = dict(kwargs)
    
    if dataset == ChapelleDataset and use_chapelle_splits:

        if (num_labeled is None):
            raise KeyError("To use Chapelle's datasets with the custom benchmark splits, please specify `num_labeled` directly")
        ds = dataset(split=seed,labels=num_labeled,use_splits=True,**kwargs)
        ds_x, _, ds_y = ds.load()
        labeledIndexes = np.array(ds.labeledIndexes)

        
    else:
        ds_x, _, ds_y = dataset(**kwargs).load()
        if len(ds_y.shape) == 1:
            print(ds_y.shape)
            print(np.min(ds_y))
            
            print(np.max(ds_y))
            
            ds_y = gutils.init_matrix(ds_y, ds_y >= 0)
        """
        -------------------------------------------------------------------
            Define Labeled Indices
        -------------------------------------------------------------------
        """
        _where_known = np.where(np.max(ds_y,axis=1) > 0 )[0]
        
        if not num_labeled is None:
            labeled_percent = num_labeled / len(_where_known) 
        if (num_labeled is None) and (labeled_percent is None):
            raise KeyError("Please use 'labeled_percent' or 'num_labeled' as a key in the input specification.")
        
        print(labeled_percent)
        
        labeledIndexes = np.array([False]*ds_y.shape[0])
        

        
        labeledIndexes[_where_known] = gutils.split_indices(ds_y[_where_known,:],
                                                            split_p=labeled_percent,
                                                            ensure_one_per_class=ensure_one_per_class,
                                                            is_stratified=is_stratified,
                                                            seed=seed)
        print(np.mean(labeledIndexes))
    
    return ds_x.astype(np.float32), _, ds_y.astype(np.float32), labeledIndexes