Ejemplo n.º 1
0
    def load_eigenfunctions(self,m,which_lap='sym',D=None,remove_first_eig=False):
        """ Extract  ``m`` eigenvectors and eigenvalues of the laplacian, in non-decreasing order. 
        
            Args:
                which_lap (str) : Chooses the type of laplacian. One of `sym`,`comb` or `rw`.           
                m (int) : number of eigenvectors to extract
                D (`[NDArray[float].shape[N,N]`) : extra matrix for generalized eigenvalue problem
    
            
            Returns:
                Pair[NDArray[float].shape[M,N],NDArray[float].shape[M]] : matrix of eigenvectors, and vector of eigenvalues
                
        """
        if not self.cache_dir is None:
            eigvec_path = osp.join(self.cache_dir,f'eigvec_{which_lap}.npy')
            eigval_path = osp.join(self.cache_dir,f'eigval_{which_lap}.npy')
            files_exist = (osp.isfile(eigvec_path)) and (osp.isfile(eigval_path))
            if files_exist:
                LOG.info(f"Loading eigenfunctions in {eigvec_path} ...")
                EIGVAL = np.load(eigval_path)
                EigVec = np.load(eigvec_path)
                if EIGVAL.shape[0] >= m:
                    return EigVec[:,:m], EIGVAL[:m]

        
        L = lap_matrix(self,which_lap)
        print(f"Extracting {m} eigenvectors for matrix L (shape: {L.shape}, #edges= {L.data.shape}")
        m = min(m,L.shape[0]-1)
        eigVec, eigVal = extract_lap_eigvec(L,m,D,remove_first_eig)
        if (not self.cache_dir is None):
            LOG.info(f"Saving  eigenfunctions to {eigvec_path} ...")
            np.save(eigvec_path,eigVec)
            np.save(eigval_path,eigVal)
        return eigVec, eigVal
Ejemplo n.º 2
0
    def __LGC(self, X, W, Y, labeledIndexes, alpha=0.1, hook=None):
        import scipy.sparse
        if scipy.sparse.issparse(W):
            W = W.todense()

        Y = self.CLEAN_UNLABELED_ROWS(Y, labeledIndexes)
        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")
        #Get D^{-1/2}
        d_sqrt = gutils.deg_matrix(W, pwr=-1 / 2)

        I = np.identity(Y.shape[0])
        S = I - gutils.lap_matrix(W, which_lap='sym')

        from tf_labelprop.gssl.graph.gssl_utils import scipy_to_np as to_np
        F = (np.matmul(np.linalg.inv(I - alpha * S), Y))

        return to_np(F)
Ejemplo n.º 3
0
    def __GTAM(self,X,W,Y,labeledIndexes,mu = 99.0,useEstimatedFreq=True,num_iter = None,
             constant_prop=False,hook=None):
        '''BEGIN initialization'''
        Y = self.CLEAN_UNLABELED_ROWS(Y, labeledIndexes)
        labeledIndexes = np.array(labeledIndexes)

        
        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")
        
        num_labeled = Y[labeledIndexes].shape[0]
        num_unlabeled = Y.shape[0] - num_labeled
        num_classes = Y.shape[1]
        
        
        
        """ Estimate frequency of classes"""
        if isinstance(useEstimatedFreq,bool):
            if useEstimatedFreq == False:
                estimatedFreq = np.repeat(1/num_classes,num_classes)
            elif useEstimatedFreq == True:
                estimatedFreq = np.sum(Y[labeledIndexes],axis=0) / num_labeled
        LOG.debug("Estimated frequency: {}".format(estimatedFreq),LOG.ll.CLASSIFIER)

        
        
        
        D = gutils.deg_matrix(W, flat=True)
        #Identity matrix
        I = np.identity(W.shape[0])
        #Get graph laplacian
        L = gutils.lap_matrix(W, which_lap='sym')
        #Propagation matrix
        from scipy.linalg import inv as invert
        P = invert( I- 1/(1+mu) *(I-L) )*mu/(1+mu)
        
        P_t = P.transpose()
        #Matrix A
        A = ((P_t @ L) @ P) + mu* ((P_t - I) @ (P - I))
        A = 0.5*(A + A.transpose())
        
        if not hook is None:
            W = scipy.sparse.coo_matrix(W)
        
        Z = []
        Q = None
        
        
        #Determine nontuning iter
        if num_iter is None:
            num_iter = num_unlabeled
        else:
            num_iter = min(num_iter,num_unlabeled)
            
        id_min_line, id_min_col = -1,-1
        '''END initialization'''
        #######################################################################################
        '''BEGIN iterations'''
        for i in np.arange(num_iter):

            '''Z matrix - The binary values of current Y are replaced with their corresponding D entries.
                Then, we normalize each row so that row sums to its estimated influence
            '''
            ul = np.logical_not(labeledIndexes)
            
            Z = gutils.calc_Z(Y, labeledIndexes, D, estimatedFreq,weigh_by_degree=True)


            if Q is None:
                #Compute graph gradient
                Q = np.matmul(A,Z)
                if not hook is None:
                    Q_pure = np.copy(Q)
                
                Q[labeledIndexes,:] = np.inf
                
            else:
                Q[id_min_line,:] = np.inf
                d_sj = np.sum(Z[labeledIndexes,id_min_col])
                d_sj1 = d_sj + Z[id_min_line,id_min_col]
                Q[ul,id_min_col] =\
                 (d_sj/(d_sj1) * Q[ul,id_min_col]) + (Z[id_min_line,id_min_col]/d_sj1 * A[ul,id_min_line])
            
            #Find minimum unlabeled index
            
            if constant_prop:
                    expectedNumLabels = estimatedFreq * sum(labeledIndexes)
                    actualNumLabels = np.sum(Y[labeledIndexes],axis=0)
                    class_to_label = np.argmax(expectedNumLabels-actualNumLabels)
                    id_min_col = class_to_label
                    id_min_line = np.argmin(Q[:,class_to_label])
                
                    
            else:
                id_min = np.argmin(Q)
                id_min_line = id_min // num_classes
                id_min_col = id_min % num_classes
            
                
            
            #Update Y and labeledIndexes
            labeledIndexes[id_min_line] = True
            Y[id_min_line,id_min_col] = 1
            
            
            
            #Maybe plot current iteration
            
            
            if not hook is None:
                hook._step(step=i,Y=Y,labeledIndexes=labeledIndexes,P=P,Z=Z,Q=Q_pure,
                           id_min_line=id_min_line,id_min_col=id_min_col)
        '''END iterations'''    
        ######################################################################################################
        if self.return_labels:
            return np.asarray(Z)
        else:
            return np.asarray(P@Z)
        return np.asarray(P@Z)
Ejemplo n.º 4
0
    def LDST(self,
             X,
             W,
             Y,
             labeledIndexes,
             mu=99.0,
             useEstimatedFreq=True,
             tuning_iter=0,
             hook=None,
             constant_prop=False,
             useZ=False,
             weigh_by_degree=False):
        '''BEGIN initialization'''
        '''BEGIN initialization'''
        Y = self.CLEAN_UNLABELED_ROWS(Y, labeledIndexes)
        labeledIndexes = np.array(labeledIndexes)

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        num_labeled = Y[labeledIndexes].shape[0]
        num_unlabeled = Y.shape[0] - num_labeled
        num_classes = Y.shape[1]
        """ Estimate frequency of classes"""
        if isinstance(useEstimatedFreq, bool):
            if useEstimatedFreq == False:
                estimatedFreq = np.repeat(1 / num_classes, num_classes)
            elif useEstimatedFreq == True:
                estimatedFreq = np.sum(Y[labeledIndexes], axis=0) / num_labeled

        D = gutils.deg_matrix(W, flat=True)
        #Identity matrix
        I = np.identity(W.shape[0])
        #Get graph laplacian
        L = gutils.lap_matrix(W, which_lap='sym')
        #Propagation matrix
        from scipy.linalg import inv as invert
        P = invert(I - 1 / (1 + mu) * (I - L)) * mu / (1 + mu)

        P_t = P.transpose()
        #Matrix A
        A = ((P_t @ L) @ P) + mu * ((P_t - I) @ (P - I))
        A = 0.5 * (A + A.transpose())
        import scipy.sparse
        if not hook is None:
            W = scipy.sparse.coo_matrix(W)
        Z = []

        #######################################################################################
        '''BEGIN iterations'''
        for i in np.arange(tuning_iter):
            '''Z matrix - The binary values of current Y are replaced with their corresponding D entries.
                Then, we normalize each row so that row sums to its estimated influence
            '''
            if useZ:
                Z = gutils.calc_Z(Y,
                                  labeledIndexes,
                                  D,
                                  estimatedFreq,
                                  weigh_by_degree=weigh_by_degree,
                                  reciprocal=False)
                Q = np.matmul(A, Z)
            else:
                Q = np.matmul(A, Y)

            #During label tuning, we'll also 'unlabel' the argmax

            unlabeledIndexes = np.logical_not(labeledIndexes)
            temp = Q[unlabeledIndexes, :]
            Q[unlabeledIndexes, :] = -np.inf
            id_max = np.argmax(Q)

            id_max_line = id_max // num_classes
            id_max_col = id_max % num_classes

            Q[unlabeledIndexes, :] = temp

            Q[labeledIndexes, :] = np.inf

            #Find minimum unlabeled index
            if constant_prop:
                id_min_line = np.argmin(Q[:, id_max_col])
                id_min_col = id_max_col
            else:
                id_min = np.argmin(Q)
                id_min_line = id_min // num_classes
                id_min_col = id_min % num_classes

            #Label OP
            labeledIndexes[id_min_line] = True
            Y[id_min_line, id_min_col] = 1

            #Unlabel OP
            labeledIndexes[id_max_line] = False
            Y[id_max_line, id_max_col] = 0

            if not hook is None:
                hook._step(step=i,
                           X=X,
                           W=W,
                           Y=Y,
                           labeledIndexes=labeledIndexes,
                           l_i=id_max_line,
                           l_j=id_max_col,
                           ul_i=id_min_line,
                           ul_j=id_min_col)
        '''END iterations'''

        return Y, labeledIndexes
Ejemplo n.º 5
0
    def LGCLVO(self,
               X,
               W,
               Y,
               labeledIndexes,
               mu=99.0,
               useEstimatedFreq=True,
               tuning_iter=0,
               hook=None,
               constant_prop=False,
               useZ=True,
               normalize_rows=True):

        labeledIndexes, noisyIndexes = labeledIndexes

        Y = np.copy(Y)
        #We make a deep copy of labeledindexes
        labeledIndexes = np.array(labeledIndexes)
        lids = np.where(labeledIndexes)[0]
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        W = 0.5 * (W + W.transpose())

        num_labeled = Y[labeledIndexes].shape[0]
        num_unlabeled = Y.shape[0] - num_labeled
        num_classes = Y.shape[1]

        D = gutils.deg_matrix(W, flat=True)
        if not useEstimatedFreq is None:
            if isinstance(useEstimatedFreq, bool):
                estimatedFreq = np.sum(Y[labeledIndexes], axis=0) / num_labeled
            else:
                estimatedFreq = useEstimatedFreq

        else:
            estimatedFreq = np.repeat(1 / num_classes, num_classes)

        if scipy.sparse.issparse(W):
            l = np.sum(labeledIndexes)

            itertool_prod = [[i, j] for i in range(l) for j in range(l)]

            row = np.asarray([lids[i] for i in range(l)])
            col = np.asarray([i for i in range(l)])
            data = np.asarray([1.0] * l)
            temp_Y = _to_np(
                scipy.sparse.coo_matrix((data, (row, col)),
                                        shape=(W.shape[0], l)))

            PL = LGC_iter_TF(X,
                             W,
                             Y=temp_Y,
                             labeledIndexes=labeledIndexes,
                             alpha=1 / (1 + mu),
                             num_iter=1000)

            PL = PL[labeledIndexes, :]
            PL[range(PL.shape[0]), range(PL.shape[0])] = 0  #Set diagonal to 0

            PL = PL

            del temp_Y

            row = np.asarray(
                [lids[x[0]] for x in itertool_prod if x[0] != x[1]])
            col = np.asarray(
                [lids[x[1]] for x in itertool_prod if x[0] != x[1]])
            data = [PL[x[0], x[1]] for x in itertool_prod if x[0] != x[1]]
            P = scipy.sparse.coo_matrix((data, (row, col)),
                                        shape=W.shape).tocsr()

            P = P
        else:
            #Identity matrix
            I = np.identity(W.shape[0])
            #Get graph laplacian
            L = gutils.lap_matrix(W, which_lap='sym')
            #Propagation matrix
            P = np.zeros(W.shape)
            P[np.ix_(labeledIndexes,
                     labeledIndexes)] = np.linalg.inv(I + 0.5 *
                                                      (L + L.transpose()) /
                                                      mu)[np.ix_(
                                                          labeledIndexes,
                                                          labeledIndexes)]
            P[labeledIndexes, labeledIndexes] = 0
            P[np.ix_(labeledIndexes, labeledIndexes)] = P[np.ix_(
                labeledIndexes, labeledIndexes)] / np.sum(P[np.ix_(
                    labeledIndexes, labeledIndexes)],
                                                          axis=0,
                                                          keepdims=False)

        W = scipy.sparse.csr_matrix(W)

        Z = []

        detected_noisylabels = []
        suggested_labels = []
        where_noisylabels = []
        Q_values = []

        Y_flat = np.argmax(Y, axis=1)

        def divide_row_by_sum(e):

            e = _to_np(e)
            if normalize_rows:
                e = e / np.sum(e + 1e-100, axis=1, keepdims=True)
                return e
            else:
                return e

        def find_argmin(Q, class_to_unlabel):
            id_min_line = np.argmin(Q[:, class_to_unlabel])
            id_min_col = class_to_unlabel
            return id_min_line, id_min_col, Q[id_min_line, id_min_col]

        #######################################################################################
        '''BEGIN iterations'''

        Q = None
        cleanIndexes = np.copy(labeledIndexes)
        for i_iter in range(tuning_iter):

            found_noisy = True

            if np.sum(labeledIndexes) > 0 and found_noisy:
                '''Z matrix - The binary values of current Y are replaced with their corresponding D entries.
                    Then, we normalize each row so that row sums to its estimated influence
                '''
                useZ = False
                if i_iter >= 0:
                    if (not self.use_baseline) or Q is None:
                        if useZ:
                            Z = gutils.calc_Z(Y,
                                              labeledIndexes,
                                              D,
                                              estimatedFreq,
                                              weigh_by_degree=False)
                            F = P @ Z
                            if scipy.sparse.issparse(F):
                                F = np.asarray(F.toarray())

                            #Compute graph gradient
                            Q = (divide_row_by_sum(F) - divide_row_by_sum(Z))
                        else:
                            F = P @ Y
                            if scipy.sparse.issparse(F):
                                F = np.asarray(F.toarray())
                            Q = (divide_row_by_sum(F) - divide_row_by_sum(Y))
                #import scipy.stats

                #During label tuning, we'll also 'unlabel' the argmax
                unlabeledIndexes = np.logical_not(cleanIndexes)
                if self.early_stop:
                    Q[np.sum(F, axis=1) == 0.0, :] = 9999

                Q[unlabeledIndexes, :] = np.inf

                #Find minimum unlabeled index
                if constant_prop:
                    expectedNumLabels = estimatedFreq * np.sum(labeledIndexes)
                    actualNumLabels = np.sum(Y[labeledIndexes, :], axis=0)
                    temp = expectedNumLabels - actualNumLabels
                    class_priority = np.argsort(temp)

                    found_noisy = False
                    for class_to_unlabel in class_priority:
                        id_min_line, id_min_col, val = find_argmin(
                            Q, class_to_unlabel)
                        if val < 0:
                            #This means that the class would have a different label under the modified label prop
                            found_noisy = True
                            break

                else:
                    id_min = np.argmin(Q)
                    id_min_line = id_min // num_classes
                    id_min_col = id_min % num_classes  #The class previously assigned to instance X_{id_min_line}
                    found_noisy = Q[id_min_line, id_min_col] < 0

                if found_noisy:

                    id_max_col = np.argmax(
                        Q[id_min_line, :])  #The new, suggested class

                    detected_noisylabels.append(id_min_col)
                    where_noisylabels.append(id_min_line)

                    suggested_labels.append(id_max_col)
                    Q_values.append(1 + Q[id_min_line, id_min_col])

                    #Unlabel OP
                    if labeledIndexes[id_min_line] == False:
                        raise Exception(
                            "Error: unlabeled instance was selected")
                    if not Y[id_min_line, id_min_col] == 1:
                        raise Exception("Error: picked wrong class to unlabel")

                    labeledIndexes[id_min_line] = False
                    cleanIndexes[id_min_line] = False

                    if not Y[id_min_line, id_min_col] == 1:
                        raise Exception(
                            "Tried to remove label from unlabeled instance")

                    Y[id_min_line, id_min_col] = 0
                    if self.relabel:
                        labeledIndexes[id_min_line] = True
                        Y[id_min_line, :] = 0
                        Y[id_min_line, id_max_col] = 1

            if not hook is None:
                hook._step(step=(i_iter + 1),
                           X=X,
                           W=W,
                           Y=Y,
                           labeledIndexes=labeledIndexes)
        '''
        MATPLOTLIB stuff 
        '''

        import cv2 as cv

        #ret2,th2 = cv.threshold(255*np.asarray(Q_values).astype(np.uint8),0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)

        import matplotlib
        matplotlib.use("TkAgg")
        import matplotlib.pyplot as plt

        fig = plt.figure(figsize=(5 * 3, 2 * 3))
        ax = fig.add_subplot()
        #ax.plot(np.arange(len(Q_values)),Q_values)
        ax.scatter(np.arange(len(Q_values)),
                   Q_values,
                   c=noisyIndexes[where_noisylabels])
        ax.set_xlabel("#Labels Removed", fontsize=22)
        ax.set_ylabel("Consistency with LGC", fontsize=22)

        ax.axvline(np.sum(noisyIndexes), color='red')

        # We change the fontsize of minor ticks label
        ax.tick_params(axis='both', which='major', labelsize=18)
        ax.tick_params(axis='both', which='minor', labelsize=18)

        # For the minor ticks, use no labels; default NullFormatter.
        ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.1))
        fig.tight_layout()
        plt.axhline(np.max(Q_values[0:(1 + np.sum(noisyIndexes))]),
                    color='green')
        plt.grid(True, axis='y', linestyle='-', alpha=0.5, which='major')
        plt.grid(True, axis='y', linestyle='--', alpha=0.5, which='minor')

        #plt.axvline(th2,color='purple')
        plt.savefig(
            '/home/klaus/eclipse-workspace/NoisyGSSL/results/python_plotly/' +
            'mnist_alpha=0.99_noise=0.3_thresh_static.png')

        #print(th2)
        plt.show()
        '''END iterations'''
        LOG.info(
            "NUMBER OF DETECTED NOISY INSTANCES:{}".format(
                len(detected_noisylabels)), LOG.ll.FILTER)

        return Y, labeledIndexes
Ejemplo n.º 6
0
    def LDST(self,
             X,
             W,
             Y,
             labeledIndexes,
             mu=99.0,
             useEstimatedFreq=True,
             tuning_iter=0,
             hook=None,
             constant_prop=False,
             useZ=True):
        Y = self.CLEAN_UNLABELED_ROWS(Y, labeledIndexes)
        labeledIndexes = np.array(labeledIndexes)

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        num_labeled = Y[labeledIndexes].shape[0]
        num_unlabeled = Y.shape[0] - num_labeled
        num_classes = Y.shape[1]
        """ Estimate frequency of classes"""
        if isinstance(useEstimatedFreq, bool):
            if useEstimatedFreq == False:
                estimatedFreq = np.repeat(1 / num_classes, num_classes)
            elif useEstimatedFreq == True:
                estimatedFreq = np.sum(Y[labeledIndexes], axis=0) / num_labeled

        D = gutils.deg_matrix(W, flat=True)
        #Identity matrix
        I = np.identity(W.shape[0])
        #Get graph laplacian
        L = gutils.lap_matrix(W, which_lap='sym')
        #Propagation matrix
        from scipy.linalg import inv as invert
        P = invert(I - 1 / (1 + mu) * (I - L)) * mu / (1 + mu)

        P_t = P.transpose()
        #Matrix A
        A = ((P_t @ L) @ P) + mu * ((P_t - I) @ (P - I))
        A = 0.5 * (A + A.transpose())
        import scipy.sparse
        if not hook is None:
            W = scipy.sparse.coo_matrix(W)
        Z = []

        #######################################################################################
        '''BEGIN iterations'''
        for i_iter in np.arange(tuning_iter):

            if np.sum(labeledIndexes) > 0:
                '''Z matrix - The binary values of current Y are replaced with their corresponding D entries.
                    Then, we normalize each row so that row sums to its estimated influence
                '''

                if useZ:
                    Z = gutils.calc_Z(Y,
                                      labeledIndexes,
                                      D,
                                      estimatedFreq,
                                      weigh_by_degree=self.weigh_by_degree)
                    #Compute graph gradient
                    Q = np.matmul(A, Z)

                else:
                    Q = np.matmul(A, Y)

                for i_labeled in np.where(labeledIndexes)[0]:
                    assigned_class = np.argmax(Y[i_labeled, :])
                    other_classes = list(range(Y.shape[1]))
                    other_classes.remove(assigned_class)

                    best_other = min([Q[i_labeled, j] for j in other_classes])

                    for j in range(Y.shape[1]):
                        if self.gradient_fix:
                            Q[i_labeled, assigned_class] = -best_other
                        Q[i_labeled, other_classes] = -np.inf
                #During label tuning, we'll also 'unlabel' the argmax
                unlabeledIndexes = np.logical_not(labeledIndexes)
                Q[unlabeledIndexes, :] = -np.inf

                #Find minimum unlabeled index
                if constant_prop:
                    raise ""
                    """expectedNumLabels = estimatedFreq * sum(labeledIndexes)
                    actualNumLabels = np.sum(Y[labeledIndexes],axis=0)
                    class_to_unlabel = np.argmax(actualNumLabels - expectedNumLabels)
                    
                    id_max_line = np.argmax(Q[:,class_to_unlabel])
                    id_max_col = class_to_unlabel
                    """

                else:
                    id_max = np.argmax(Q)
                    id_max_line = id_max // num_classes
                    id_max_col = id_max % num_classes

                if not Y[id_max_line, id_max_col] == 1:
                    print(Y[id_max_line, :])
                    raise Exception(
                        "Tried to remove label from unlabeled instance")

                #Unlabel OP
                labeledIndexes[id_max_line] = False
                Y[id_max_line, id_max_col] = 0

            if not hook is None:
                hook._step(step=i_iter + 1,
                           X=X,
                           W=W,
                           Y=Y,
                           labeledIndexes=labeledIndexes)
        '''END iterations'''
        return Y, labeledIndexes
    def __MR(self, X, W, Y, labeledIndexes, p, optimize_labels, hook=None):
        """
            -------------------------------------------------------------
                INITIALIZATION
            --------------------------------------------------------------
        """

        ORACLE_Y = Y.copy()
        Y = np.copy(Y)
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        l = np.reshape(np.array(np.where(labeledIndexes)), (-1))
        num_lab = l.shape[0]

        if not isinstance(p, int):
            p = int(p * num_lab)

        if p > Y.shape[0]:
            p = Y.shape[0]
            LOG.warn("Warning: p greater than the number of labeled indexes",
                     LOG.ll.CLASSIFIER)
        #W = gutils.scipy_to_np(W)
        #W =  0.5* (W + W.T)
        L = gutils.lap_matrix(W, which_lap='sym')
        D = gutils.deg_matrix(W, flat=True, pwr=-1.0)

        L = 0.5 * (L + L.T)

        def check_symmetric(a, tol=1e-8):
            return np.allclose(a, a.T, atol=tol)

        def is_pos_sdef(x):
            return np.all(np.linalg.eigvals(x) >= -1e-06)

        import scipy.sparse
        sym_err = L - L.T
        sym_check_res = np.all(np.abs(sym_err.data) < 1e-7)  # tune this value
        assert sym_check_res
        """---------------------------------------------------------------------------------------------------
                EIGENFUNCTION EXTRACTION
        ---------------------------------------------------------------------------------------------------
        """
        import time
        start_time = time.time()

        import os.path as osp
        from tf_labelprop.settings import INPUT_FOLDER

        cache_eigvec = osp.join(INPUT_FOLDER, 'eigenVectors.npy')
        cache_eigval = osp.join(INPUT_FOLDER, 'eigenValues.npy')

        if False:
            eigenValues, eigenVectors = np.load(cache_eigval), np.load(
                cache_eigvec)
            eigenVectors = eigenVectors[:, :p]
            eigenValues = eigenValues[:p]
        else:

            eigenVectors, eigenValues = W.load_eigenfunctions(p)

            time_elapsed = time.time() - start_time
            LOG.info("Took {} seconds to calculate eigenvectors".format(
                int(time_elapsed)))
            idx = eigenValues.argsort()
            eigenValues = eigenValues[idx]
            LOG.debug(eigenValues)
            assert eigenValues[0] <= eigenValues[eigenValues.shape[0] - 1]
            eigenVectors = eigenVectors[:, idx]
            np.save(cache_eigval, arr=eigenValues)
            np.save(cache_eigvec, arr=eigenVectors)
        U = eigenVectors
        LAMBDA = eigenValues

        U = U[:, np.argsort(LAMBDA)]
        LAMBDA = LAMBDA[np.argsort(LAMBDA)]

        import tensorflow as tf

        gpus = tf.config.experimental.list_physical_devices('GPU')

        #tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*8)])
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        """
        -------------------------------------------------------------------------
            Define Constants on GPU
        ------------------------------------------------------------------------------
        """
        U, X, Y = [tf.constant(x.astype(np.float32)) for x in [U, X, Y]]

        _U_times_U = tf.multiply(U, U)
        N = X.shape[0]

        def to_sp_diag(x):
            n = tf.cast(x.shape[0], tf.int64)
            indices = tf.concat([
                tf.range(n, dtype=tf.int64)[None, :],
                tf.range(n, dtype=tf.int64)[None, :]
            ],
                                axis=0)
            return tf.sparse.SparseTensor(indices=tf.transpose(indices),
                                          values=x,
                                          dense_shape=[n, n])

        @tf.function
        def smooth_labels(labels, factor=0.001):
            # smooth the labels
            labels = tf.cast(labels, tf.float32)
            labels *= (1 - factor)
            labels += (factor / tf.cast(tf.shape(labels)[0], tf.float32))
            # returned the smoothed labels
            return labels

        @tf.function
        def divide_by_row(x, eps=1e-07):
            x = tf.maximum(x, 0 * x)
            x = x + eps  # [N,C]    [N,1]
            return x / (tf.reduce_sum(x, axis=-1)[:, None])

        def spd_matmul(x, y):
            return tf.sparse.sparse_dense_matmul(x, y)

        def mult_each_row_by(X, by):
            """ Elementwise multiplies each row by a given row vector.
            
                For a 2D tensor, also correponds to multiplying each column by the respective scalar in the given row vector
                
                Args:
                    X (Tensor)  
                    by (Tensor[shape=(N,)]): row vector
            
            """
            #[N,C]  [N,1]
            return X * by[None, :]

        def mult_each_col_by(X, by):
            #[N,C]  [1,C]
            return X * by[:, None]

        @tf.function
        def accuracy(y_true, y_pred):
            acc = tf.cast(
                tf.equal(tf.argmax(y_true, axis=-1),
                         tf.argmax(y_pred, axis=-1)), tf.float32)
            acc = tf.cast(acc, tf.float32)
            return tf.reduce_mean(acc)

        """
            -----------------------------------------------------------------------------
            DEFINE VARS
            --------------------------------------------------------------------------------
        """

        MU = tf.Variable(0.1, name="MU")

        LAMBDA = tf.constant(LAMBDA.astype(np.float32), name="LAMBDA")
        PI = tf.Variable(tf.ones(shape=(tf.shape(Y)[0], ), dtype=tf.float32),
                         name="PI")
        _l = LAMBDA.numpy()
        CUTOFF = tf.Variable(0.0, name='CUTOFF')
        CUTOFF_K = tf.Variable(1.0)

        @tf.function
        def get_alpha(MU):
            return tf.pow(2.0, -tf.math.reciprocal(tf.abs(100 * MU)))

        @tf.function
        def to_prob(x):
            return tf.nn.softmax(x, axis=1)

        @tf.function
        def cutoff(x):
            return 1.0 / (1.0 + tf.exp(-CUTOFF_K * (CUTOFF - x)))

        model = tf.keras.Sequential()
        model.add(tf.keras.layers.Conv1D(8, kernel_size=5, padding='same'))
        model.add(tf.keras.layers.Activation('relu'))
        model.add(tf.keras.layers.Conv1D(8, kernel_size=5, padding='same'))
        model.add(tf.keras.layers.Activation('relu'))
        model.add(tf.keras.layers.Conv1D(1, kernel_size=3, padding='same'))

        model.add(tf.keras.layers.Flatten())
        """
            -----------------------------------------------------------------------------
            DEFINE FORWARD
            --------------------------------------------------------------------------------
        """

        @tf.function
        def forward(Y, U, PI, mode='train', remove_diag=True):
            if mode == 'train':
                U = tf.gather(U, indices=np.where(labeledIndexes)[0], axis=0)
                Y = tf.gather(Y, indices=np.where(labeledIndexes)[0], axis=0)
                #F = tf.gather(F,indices=np.where(labeledIndexes)[0],axis=0)

                PI = tf.gather(PI, indices=np.where(labeledIndexes)[0], axis=0)

            pi_Y = spd_matmul(to_sp_diag(tf.abs(PI)), Y)

            alpha = get_alpha(MU)
            """
                Maybe apply custom convolution to LAMBDA, otherwise just fit LGC's alpha using the corresponding filter 1/(1-alpha + alpha*lambda)
            """
            if not self.custom_conv:
                lambda_tilde = tf.math.reciprocal(1 - alpha + alpha * LAMBDA)
            else:
                #lambda_tilde = tf.math.reciprocal(1-alpha + alpha*LAMBDA)
                _lambda = (LAMBDA -
                           tf.reduce_mean(LAMBDA)) / tf.math.reduce_std(LAMBDA)
                lambda_tilde = tf.clip_by_value(
                    2 * tf.nn.sigmoid(
                        tf.reshape(model(_lambda[None, :, None]), (-1, ))), 0,
                    1)
                lambda_tilde = tf.sort(lambda_tilde, direction='DESCENDING')
            lambda_tilde = tf.reshape(divide_by_row(lambda_tilde[None, :]),
                                      (-1, ))

            _self_infl = mult_each_row_by(
                tf.square(U), by=lambda_tilde
            )  #Square each element of U, then dot product of each row with lambda_tilde
            _self_infl = tf.reduce_sum(_self_infl, axis=1)

            _P_op = U @ (mult_each_col_by(
                (tf.transpose(U) @ pi_Y), by=lambda_tilde))
            if not remove_diag:
                _diag_P_op = tf.zeros_like(
                    mult_each_col_by(pi_Y, by=_self_infl))
            else:
                _diag_P_op = mult_each_col_by(pi_Y, by=_self_infl)
            return divide_by_row(_P_op - _diag_P_op), lambda_tilde, pi_Y

        """
            -----------------------------------------------------------------------------
                DEFINE LOSSES and learning schedule
            --------------------------------------------------------------------------------
        """
        losses = {
            'xent':
            lambda y_, y: tf.reduce_mean(-tf.reduce_sum(y_ * tf.cast(
                tf.math.log(smooth_labels(y, factor=0.01)), tf.float32),
                                                        axis=[1])),
            'sq_loss':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.square(y_ - y), axis=[1])),
            'abs_loss':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.abs(y_ - y), axis=[1])),
            'hinge':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.maximum(1. - y_ * y, tf.zeros_like(y)),
                              axis=1))
        }

        NUM_ITER = 700
        lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            0.5, decay_steps=200, decay_rate=0.9, staircase=False)

        opt = tf.keras.optimizers.Adam(0.05)

        Y_l = tf.gather(Y, indices=np.where(labeledIndexes)[0], axis=0)

        #import matplotlib.pyplot as plt
        #import matplotlib
        #matplotlib.use('tkagg')
        import pandas as pd
        """
            -----------------------------------------------------------------------------
            LEARNING
            --------------------------------------------------------------------------------
        """
        L = []
        df = pd.DataFrame()
        max_acc, min_loss = [0, np.inf]
        for i in range(NUM_ITER):
            #MU.assign(i)
            with tf.GradientTape() as t:
                # no need to watch a variable:
                # trainable variables are always watched
                pred_L, lambda_tilde, pi_Y = forward(Y, U, PI, mode='train')
                loss_sq = losses['sq_loss'](pred_L, Y_l)
                loss = losses['xent'](pred_L, Y_l)

                loss_xent = losses['xent'](pred_L, Y_l)

            acc = accuracy(Y_l, pred_L)
            _not_lab = np.where(np.logical_not(labeledIndexes))[0]
            acc_true = accuracy(
                tf.gather(ORACLE_Y, indices=_not_lab, axis=0),
                tf.gather(forward(Y, U, PI, mode='eval')[0],
                          indices=_not_lab,
                          axis=0))

            L.append(
                np.array([i, loss_sq, loss, loss_xent, acc,
                          acc_true])[None, :])
            """
                TRAINABLE VARIABLES GO HERE
            """
            if self.custom_conv:
                trainable_variables = model.weights
            else:
                trainable_variables = [MU]
            if optimize_labels:
                trainable_variables.append(PI)

            if acc > max_acc:
                print(max_acc)
                best_trainable_variables = [
                    k.numpy() for k in trainable_variables
                ]
                max_acc = acc
                min_loss = loss
                counter_since_best = 0
            elif acc <= max_acc:

                counter_since_best += 1
                if counter_since_best > 2000:
                    break
            """
                Apply gradients
            """
            gradients = t.gradient(loss, trainable_variables)
            opt.apply_gradients(zip(gradients, trainable_variables))
            """
                Project labels such that they sum up to the original amount
            """
            pi = PI.numpy()
            pi[labeledIndexes] = np.sum(
                labeledIndexes) * pi[labeledIndexes] / (np.sum(
                    pi[labeledIndexes]))
            PI.assign(pi)

            if i % 10 == 0:
                """ Print info """
                if not hook is None:
                    if self.hook_iter_mode == "labeled":
                        plot_y = np.zeros_like(Y)
                        plot_y[labeledIndexes] = Y_l.numpy()
                    else:
                        plot_y = tf.clip_by_value(
                            forward(Y, U, PI, mode='eval')[0], 0,
                            999999).numpy()
                    hook._step(step=i,
                               X=X,
                               W=W,
                               Y=plot_y,
                               labeledIndexes=labeledIndexes)
                alpha = get_alpha(MU)
                PI_l = tf.gather(PI,
                                 indices=np.where(labeledIndexes)[0],
                                 axis=0)
                LOG.info(
                    f"Acc: {acc.numpy():.3f}; ACC_TRUE:{acc_true.numpy():.3f}  Loss: {loss.numpy():.3f}; alpha = {alpha.numpy():.3f}; PI mean = {tf.reduce_mean(PI_l).numpy():.3f} "
                )

        #plt.scatter(range(lambda_tilde.shape[0]),np.log10(lambda_tilde/LAMBDA),s=2)
        #plt.show()
        for k in range(len(trainable_variables)):
            trainable_variables[k].assign(best_trainable_variables[k])
        return tf.clip_by_value(forward(Y, U, PI, mode='eval')[0], 0,
                                999999).numpy()
Ejemplo n.º 8
0
    def __MR(self, X, W, Y, labeledIndexes, p, optimize_labels, hook=None):
        """
            -------------------------------------------------------------
                INITIALIZATION
            --------------------------------------------------------------
        """
        ORACLE_Y = Y.copy()
        Y = np.copy(Y)
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        l = np.reshape(np.array(np.where(labeledIndexes)), (-1))
        num_lab = l.shape[0]

        if not isinstance(p, int):
            p = int(p * num_lab)

        if p > Y.shape[0]:
            p = Y.shape[0]
            LOG.warn("Warning: p greater than the number of labeled indexes",
                     LOG.ll.CLASSIFIER)
        #W = gutils.scipy_to_np(W)
        #W =  0.5* (W + W.T)
        L = gutils.lap_matrix(W)
        D = gutils.deg_matrix(W, flat=True, pwr=-1.0)

        L = 0.5 * (L + L.T)

        def check_symmetric(a, tol=1e-8):
            return np.allclose(a, a.T, atol=tol)

        def is_pos_sdef(x):
            return np.all(np.linalg.eigvals(x) >= -1e-06)

        import scipy.sparse
        sym_err = L - L.T
        sym_check_res = np.all(np.abs(sym_err.data) < 1e-7)  # tune this value
        assert sym_check_res
        """---------------------------------------------------------------------------------------------------
                EIGENFUNCTION EXTRACTION
        ---------------------------------------------------------------------------------------------------
        """
        import time
        start_time = time.time()
        eigenVectors, eigenValues = W.load_eigenfunctions(p)

        time_elapsed = time.time() - start_time
        LOG.info("Took {} seconds to calculate eigenvectors".format(
            int(time_elapsed)))
        U = eigenVectors
        LAMBDA = eigenValues
        """
        -------------------------------------------------------------------------
            Import and setup Tensorflow
        ------------------------------------------------------------------------------
        """
        import tensorflow as tf
        import tf_labelprop.gssl.classifiers.lgc_lvo_aux as aux
        gpus = tf.config.experimental.list_physical_devices('GPU')

        #tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*8)])
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        """
        -------------------------------------------------------------------------
            Define Constants on GPU
        ------------------------------------------------------------------------------
        """
        U, X, Y = [tf.constant(x.astype(np.float32)) for x in [U, X, Y]]
        _U_times_U = tf.multiply(U, U)
        N = X.shape[0]
        """
            -----------------------------------------------------------------------------
            DEFINE VARS
            --------------------------------------------------------------------------------
        """
        MU = tf.Variable(0.1, name="MU")

        LAMBDA = tf.constant(LAMBDA.astype(np.float32), name="LAMBDA")
        PI = tf.Variable(tf.ones(shape=(tf.shape(Y)[0], ), dtype=tf.float32),
                         name="PI")
        _l = LAMBDA.numpy()
        """
            -----------------------------------------------------------------------------
            DEFINE FORWARD
            --------------------------------------------------------------------------------
        """

        def forward(Y, U, PI, mode='train', p=None, remove_diag=True):
            if p is None:
                p = 99999

            pi_Y = aux.spd_matmul(aux.to_sp_diag(tf.abs(PI)), Y)

            alpha = self.get_alpha(MU)
            """
                Maybe apply custom convolution to LAMBDA, otherwise just fit LGC's alpha using the corresponding filter 1/(1-alpha + alpha*lambda)
            """
            #tf.print(alpha)
            a = alpha - alpha * LAMBDA
            lambda_tilde = 1 / (1 - a)
            """ Set entries corresponding to eigvector e_i to zero for i > p """
            lambda_tilde = tf.where(
                tf.less_equal(tf.range(0, lambda_tilde.shape[0]), p),
                lambda_tilde, 0 * lambda_tilde)

            _self_infl = aux.mult_each_row_by(
                tf.square(U), by=lambda_tilde
            )  #Square each element of U, then dot product of each row with lambda_tilde
            B = _self_infl
            _self_infl = tf.reduce_sum(_self_infl, axis=1)

            A = aux.mult_each_col_by((tf.transpose(U) @ pi_Y), by=lambda_tilde)
            _P_op = U @ (A)
            if not remove_diag:
                _diag_P_op = tf.zeros_like(
                    aux.mult_each_col_by(pi_Y, by=_self_infl))
            else:
                _diag_P_op = aux.mult_each_col_by(pi_Y, by=_self_infl)

            if mode == 'eval':
                return aux.divide_by_row(_P_op - _diag_P_op)
            else:
                return A, B, aux.divide_by_row(_P_op - _diag_P_op)

        def forward_eval(Y, U, PI, mode='train', p=None, remove_diag=True):
            if p is None:
                p = 99999

            pi_Y = aux.spd_matmul(aux.to_sp_diag(tf.abs(PI)), Y)

            alpha = self.get_alpha(MU)
            """
                Maybe apply custom convolution to LAMBDA, otherwise just fit LGC's alpha using the corresponding filter 1/(1-alpha + alpha*lambda)
            """
            #tf.print(alpha)
            a = alpha - alpha * LAMBDA
            lambda_tilde = 1 / (1 - a)
            """ Set entries corresponding to eigvector e_i to zero for i > p """
            lambda_tilde = tf.where(
                tf.less_equal(tf.range(0, lambda_tilde.shape[0]), p),
                lambda_tilde, 0 * lambda_tilde)

            _self_infl = aux.mult_each_row_by(
                tf.square(U), by=lambda_tilde
            )  #Square each element of U, then dot product of each row with lambda_tilde
            _self_infl = tf.reduce_sum(_self_infl, axis=1)

            A = aux.mult_each_col_by((tf.transpose(U) @ pi_Y), by=lambda_tilde)
            _P_op = U @ (A)
            if not remove_diag:
                _diag_P_op = tf.zeros_like(
                    aux.mult_each_col_by(pi_Y, by=_self_infl))
            else:
                _diag_P_op = aux.mult_each_col_by(pi_Y, by=_self_infl)

            return aux.divide_by_row(_P_op - _diag_P_op)

        """
            -----------------------------------------------------------------------------
                DEFINE LOSSES and learning schedule
            --------------------------------------------------------------------------------
        """
        losses = {
            'xent':
            lambda y_, y: tf.reduce_mean(-tf.reduce_sum(y_ * tf.cast(
                tf.math.log(aux.smooth_labels(y, factor=0.01)), tf.float32),
                                                        axis=[1])),
            'sq_loss':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.square(y_ - y), axis=[1])),
            'abs_loss':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.abs(y_ - y), axis=[1])),
            'hinge':
            lambda y_, y: tf.reduce_mean(
                tf.reduce_sum(tf.maximum(1. - y_ * y, tf.zeros_like(y)),
                              axis=1))
        }

        NUM_ITER = 10
        Y_l = tf.gather(Y, indices=np.where(labeledIndexes)[0], axis=0)
        U_l = tf.gather(U, indices=np.where(labeledIndexes)[0], axis=0)
        PI_l = tf.gather(PI, indices=np.where(labeledIndexes)[0], axis=0)
        """
            -----------------------------------------------------------------------------
            LEARNING
            --------------------------------------------------------------------------------
        """
        L = []
        df = pd.DataFrame()
        max_acc, min_loss = [0, np.inf]
        best_p = np.inf
        for i in range(NUM_ITER, 0, -1):
            MU.assign(i)

            A, B, _ = forward(Y_l, U_l, PI_l, mode='train')

            a1 = np.zeros_like(Y_l)
            a2 = np.zeros_like(Y_l)

            for i1 in range(p):
                a2 += mult_each_col_by(X=Y_l, by=B[:, i1])
                a1 += mult_each_col_by(
                    np.tile(A[i1, :][None, :], [a1.shape[0], 1]), U_l[:, i1])

                pred_L = aux.divide_by_row(a1 - a2)

                loss_sq = losses['sq_loss'](pred_L, Y_l)
                loss = losses['xent'](pred_L, Y_l)

                loss_xent = losses['xent'](pred_L, Y_l)

                acc = aux.accuracy(Y_l, pred_L)
                _not_lab = np.where(np.logical_not(labeledIndexes))[0]

                if self.DEBUG:
                    acc_true = aux.accuracy(
                        tf.gather(ORACLE_Y, indices=_not_lab, axis=0),
                        tf.gather(forward_eval(Y, U, PI, mode='eval', p=i1),
                                  indices=_not_lab,
                                  axis=0))
                    prop = np.max(
                        pd.value_counts(tf.argmax(pred_L, 1).numpy(),
                                        normalize=True).values)
                else:
                    acc_true = 0
                    prop = 0

                L.append(
                    np.array(
                        [i, i1, loss_sq, loss, loss_xent, acc, acc_true,
                         prop])[None, :])
                if (max_acc < acc) or (acc == max_acc and min_loss > loss):
                    print(
                        f"acc: {acc},p:{i1},Mu:{int(MU.numpy())}alpha:{self.get_alpha(MU.numpy()).numpy()}"
                    )
                    best_p = int(i1)
                    best_MU = int(MU.numpy())
                    max_acc = acc
                    min_loss = loss.numpy()
                    """
                    if self.DEBUG:
                        alpha = self.get_alpha(MU)
                        I = np.identity(Y.shape[0], dtype = np.float32)
                        match_true = tf.gather(np.linalg.inv(I- alpha*(I - gutils.lap_matrix(W,'sym')))@Y,_not_lab,axis=0)
                        F = forward_eval(Y,U,PI,mode='eval',p=best_p)
                        
                        match_approx = tf.gather(F,indices=_not_lab,axis=0)
                        match = aux.accuracy(match_true, match_approx)
                        
                        print(f"Match rate {np.round(100*match,3)} ")
                        print(f"LGC_acc = {np.round(100*aux.accuracy(match_true,tf.gather(ORACLE_Y,indices=_not_lab,axis=0)),3)} ")
                        print(f"LGCLVO_acc = {np.round(100*aux.accuracy(match_approx,tf.gather(ORACLE_Y,indices=_not_lab,axis=0)),3)} ")
                    """

            if i % 1 == 0:
                """ Print info """
                if not hook is None:
                    if self.hook_iter_mode == "labeled":
                        plot_y = np.zeros_like(Y)
                        plot_y[labeledIndexes] = Y_l.numpy()
                    else:
                        MU.assign(best_MU)
                        plot_y = tf.clip_by_value(
                            forward(Y, U, PI, p=best_p, mode='eval'), 0,
                            999999).numpy()

                    hook._step(step=i,
                               X=X,
                               W=W,
                               Y=plot_y,
                               labeledIndexes=labeledIndexes)
                alpha = self.get_alpha(MU)

                LOG.info(
                    f"Acc: {max_acc.numpy():.3f};  Loss: {loss.numpy():.3f}; alpha = {alpha.numpy():.3f};"
                )

        if self.DEBUG:
            df = pd.DataFrame(np.concatenate(L, axis=0),
                              index=range(len(L)),
                              columns=[
                                  'i', 'p', 'loss_sq', 'loss', 'loss_xent',
                                  'acc', 'acc_true', 'prop'
                              ])
            self.create_3d_mesh(df)

        print(f"BEst mu: {best_MU}; best p: {best_p}")
        MU.assign(best_MU)
        print(MU)

        return forward_eval(Y, U, PI, mode='eval', p=None).numpy()
        """
        ----------------------------------------------------
            PART 2
        -------------------------------------------------
        
        
        """

        opt = tf.keras.optimizers.Adam(0.05)

        max_acc = 0
        for i in range(7000):
            #MU.assign(i)
            with tf.GradientTape() as t:
                _, _, pred_L = forward(Y_l,
                                       U_l,
                                       tf.gather(
                                           PI,
                                           indices=np.where(labeledIndexes)[0],
                                           axis=0),
                                       mode='train',
                                       p=best_p)
                loss_sq = losses['sq_loss'](pred_L, Y_l)
                loss = losses['xent'](pred_L, Y_l)

                loss_xent = losses['xent'](pred_L, Y_l)

            acc = aux.accuracy(Y_l, pred_L)
            _not_lab = np.where(np.logical_not(labeledIndexes))[0]
            acc_true = aux.accuracy(
                tf.gather(ORACLE_Y, indices=_not_lab, axis=0),
                tf.gather(forward(Y, U, PI, mode='eval')[0],
                          indices=_not_lab,
                          axis=0))

            L.append(
                np.array([i, loss_sq, loss, loss_xent, acc,
                          acc_true])[None, :])
            """
                Project labels such that they sum up to the original amount
            """
            pi = PI.numpy()
            pi[labeledIndexes] = np.sum(
                labeledIndexes) * pi[labeledIndexes] / (np.sum(
                    pi[labeledIndexes]))
            PI.assign(pi)
            """
                TRAINABLE VARIABLES GO HERE
            """
            trainable_variables = []
            if optimize_labels:
                trainable_variables.append(PI)
            """
                Apply gradients
            """
            gradients = t.gradient(loss, trainable_variables)
            opt.apply_gradients(zip(gradients, trainable_variables))

            if acc > max_acc:
                print(max_acc)
                best_trainable_variables = [
                    k.numpy() for k in trainable_variables
                ]
                max_acc = acc
                min_loss = loss
                counter_since_best = 0

        for k in range(len(trainable_variables)):
            trainable_variables[k].assign(best_trainable_variables[k])

        return forward(Y, U, PI, mode='eval', p=None).numpy()
        """
        
        for c in df.columns:
            if c.startswith('loss'):
                df[c] = (df[c] - df[c].min())/(df[c].max()-df[c].min())
        
        for c in df.columns:
            if not c in 'i':
                plt.plot(df['i'],df[c],label=c)
        plt.legend()
        plt.show()
        
        #plt.scatter(range(lambda_tilde.shape[0]),np.log10(lambda_tilde/LAMBDA),s=2)
        #plt.show()
        """
        return tf.clip_by_value(forward(Y, U, PI, mode='eval')[0], 0,
                                999999).numpy()
Ejemplo n.º 9
0
    def __MR(self, X, W, Y, labeledIndexes, p, tuning_iter, hook=None):
        Y = np.copy(Y)
        if Y.ndim == 1:
            Y[np.logical_not(labeledIndexes)] = 0
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0
        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")

        l = np.reshape(np.array(np.where(labeledIndexes)), (-1))
        num_lab = l.shape[0]

        if not isinstance(p, int):
            p = int(p * num_lab)
        if p > Y.shape[0]:
            p = Y.shape[0]
            LOG.warn("Warning: p greater than the number of labeled indexes",
                     LOG.ll.FILTER)

        W = scipy_to_np(W)
        L = gutils.lap_matrix(W, which_lap='sym')
        D = gutils.deg_matrix(W)

        def check_symmetric(a, tol=1e-8):
            return np.allclose(a, a.T, atol=tol)

        if check_symmetric(L):
            E = sp.eigh(L, D, eigvals=(1, p))[1]
        else:
            LOG.warn("Warning: Laplacian not symmetric", LOG.ll.FILTER)
            eigenValues, eigenVectors = sp.eig(L, D)
            idx = eigenValues.argsort()
            eigenValues = eigenValues[idx]
            assert eigenValues[0] <= eigenValues[eigenValues.shape[0] - 1]
            eigenVectors = eigenVectors[:, idx]
            E = eigenVectors[:, 1:(p + 1)]

        e_lab = E[labeledIndexes, :]
        """ TIKHONOV REGULARIZATION. Currently set to 0."""
        TIK = np.zeros(shape=e_lab.shape)
        try:
            A = np.linalg.inv(e_lab.T @ e_lab + TIK.T @ TIK) @ e_lab.T
        except:
            A = np.linalg.pinv(e_lab.T @ e_lab + TIK.T @ TIK) @ e_lab.T
        F = np.zeros(shape=Y.shape)

        y_m = np.argmax(Y, axis=1)[labeledIndexes]

        for i in range(Y.shape[1]):
            c = np.ones(num_lab)
            c[y_m != i] = -1
            a = A @ np.transpose(c)
            LOG.debug(a, LOG.ll.FILTER)
            for j in np.arange(F.shape[0]):
                F[j, i] = np.dot(a, E[j, :])

        ERmat = -1 * np.ones((Y.shape[0], ))

        Y_amax = np.argmax(Y, axis=1)
        for i in np.where(labeledIndexes):
            ERmat[i] = np.square(Y[i, Y_amax[i]] - F[i, Y_amax[i]])

        removed_Lids = np.argsort(ERmat)
        removed_Lids = removed_Lids[::-1]

        labeledIndexes = np.array(labeledIndexes)
        Y = np.copy(Y)
        for i in range(tuning_iter):
            labeledIndexes[removed_Lids[i]] = False
            if not hook is None:
                hook._step(step=i,
                           X=X,
                           W=W,
                           Y=Y,
                           labeledIndexes=labeledIndexes)

        return Y, labeledIndexes
Ejemplo n.º 10
0
    def LGCLVO(self,
               X,
               W,
               Y,
               labeledIndexes,
               mu=99.0,
               lgc_iter=10000,
               hook=None,
               which_loss="xent"):
        if which_loss is None:
            return Y, labeledIndexes

        Y = np.copy(Y).astype(np.float32)
        #We make a deep copy of labeledindexes
        labeledIndexes = np.array(labeledIndexes)
        lids = np.where(labeledIndexes)[0]
        if Y.ndim == 1:
            Y = gutils.init_matrix(Y, labeledIndexes)
        Y[np.logical_not(labeledIndexes), :] = 0

        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")
        """ Ensure that it is symmetric """
        W = 0.5 * (W + W.transpose())

        num_labeled = Y[labeledIndexes].shape[0]
        num_unlabeled = Y.shape[0] - num_labeled
        num_classes = Y.shape[1]

        if True:
            l = np.sum(labeledIndexes)

            itertool_prod = [[i, j] for i in range(l) for j in range(l)]

            row = np.asarray([lids[i] for i in range(l)])
            col = np.asarray([i for i in range(l)])
            data = np.asarray([1.0] * l)
            temp_Y = _to_np(
                scipy.sparse.coo_matrix((data, (row, col)),
                                        shape=(W.shape[0], l)))

            PL = LGC_iter_TF(X,
                             W,
                             Y=temp_Y,
                             labeledIndexes=labeledIndexes,
                             alpha=1 / (1 + mu),
                             num_iter=lgc_iter).astype(np.float32)

            PL = PL[labeledIndexes, :]
            PL[range(PL.shape[0]), range(PL.shape[0])] = 0  #Set diagonal to 0

            PL = PL

            del temp_Y

            row = np.asarray(
                [lids[x[0]] for x in itertool_prod if x[0] != x[1]])
            col = np.asarray(
                [lids[x[1]] for x in itertool_prod if x[0] != x[1]])
            data = [PL[x[0], x[1]] for x in itertool_prod if x[0] != x[1]]
            P = scipy.sparse.coo_matrix((data, (row, col)),
                                        shape=W.shape).tocsr()
            P = P
        else:
            #Identity matrix
            I = np.identity(W.shape[0])
            #Get graph laplacian
            L = gutils.lap_matrix(W, which_lap='sym')
            L = 0.5 * (L + L.transpose())
            #Propagation matrix
            P = np.zeros(W.shape).astype(np.float32)
            P[np.ix_(labeledIndexes,
                     labeledIndexes)] = np.linalg.inv(I - (1 / 1 + mu) *
                                                      (I - L))[np.ix_(
                                                          labeledIndexes,
                                                          labeledIndexes)]
            P[labeledIndexes, labeledIndexes] = 0
            P[np.ix_(labeledIndexes, labeledIndexes)] = P[np.ix_(
                labeledIndexes, labeledIndexes)] / np.sum(P[np.ix_(
                    labeledIndexes, labeledIndexes)],
                                                          axis=0,
                                                          keepdims=False)
            PL = P[np.ix_(labeledIndexes, labeledIndexes)]

        W = scipy.sparse.csr_matrix(W)

        def divide_row_by_sum(e):
            e = _to_np(e)
            e = e / np.sum(e + 1e-100, axis=1, keepdims=True)
            return e

        PL = divide_row_by_sum(PL)

        import tensorflow as tf
        A = PL
        B = Y[labeledIndexes, :]
        PTP = np.transpose(A) @ A
        PT = np.transpose(A)
        SIGMA = lambda: tf.linalg.tensor_diag(
            tf.clip_by_value(_SIGMA, 0.0, tf.float32.max))
        C = lambda: tf.linalg.tensor_diag(_C)

        _SIGMA = tf.Variable(np.ones((PL.shape[0], ), dtype=np.float32))

        _C = tf.Variable(_SIGMA)

        to_prob = lambda x: tf.nn.softmax(x, axis=1)
        xent = lambda y_, y: tf.reduce_mean(-tf.reduce_sum(
            y_ * tf.cast(tf.math.log(y + 1e-06), tf.float32), axis=[1]))

        sq_loss = lambda y_, y: tf.reduce_mean(
            tf.reduce_sum(tf.square(y_ - y), axis=[1]))
        norm_s = lambda: _SIGMA * tf.gather(
            tf.math.reciprocal_no_nan(
                tf.reduce_sum(to_prob(A @ SIGMA() @ B), axis=0)),
            tf.argmax(B, axis=1))

        if which_loss == "xent":
            loss = lambda: xent(to_prob(A @ SIGMA() @ B), B)
        elif which_loss == "mse":
            loss = lambda: sq_loss(
                to_prob(A @ SIGMA() @ B), B
            )  #+ 1*tf.reduce_sum(tf.square( tf.reduce_mean(to_prob(A@SIGMA()@B),axis=0) - tf.reduce_mean(B,axis=0)))

        acc = lambda: 100.0 * tf.math.reduce_mean(
            tf.cast(
                tf.equal(tf.argmax(to_prob(A @ SIGMA() @ B), axis=1),
                         tf.argmax(B, axis=1)), tf.float32))

        #0.99 - 0.07466477900743484
        #0.9 - 0.0856625959277153

        opt = tf.keras.optimizers.Adam(learning_rate=0.7)

        #for i in range(2000):
        #    opt.minimize(loss, [_C])
        #    print(loss().numpy())

        #for i in range(200):
        #    opt.minimize(loss, [_C])
        #    print(loss().numpy())

        np.set_printoptions(precision=3)
        #raise ""

        #0.99 - 0.06267
        #0.9 - 0.06164

        for i in range(5000):
            opt.minimize(loss, [_SIGMA])
            #_SIGMA.assign(norm_s())
            print("LOO loss: {}".format(loss().numpy()))

        self.Fl = (lambda: to_prob(A @ SIGMA() @ B))().numpy()

        Y[labeledIndexes, :] = self.Fl

        return Y, labeledIndexes
        """
        
        Yl = Y[labeledIndexes,:]
        it_counter = 0
        loss  = np.inf
        LR = 0.1
        for i in range(1000000):
            grad_SIGMA = 2*np.transpose(C@A)@((C@A@SIGMA@B)-B)@np.transpose(B)
            grad_C = 2*(C@A@SIGMA@B-B)@(np.transpose(B)@np.transpose(SIGMA)@np.transpose(A))
            
            SIGMA -= LR*(np.diag(np.diagonal(grad_SIGMA)))
            C -= LR*(np.diag(np.diagonal(grad_C)))
            
            SIGMA =  np.maximum(SIGMA,np.zeros_like(SIGMA))
            new_loss = np.sum(np.square((C@A)@SIGMA@B - B))
            if new_loss > loss:
                LR *= 0.5
                it_counter += 1
                if it_counter == 10:
                    break
            else:
                it_counter = 0
                loss = new_loss
                print(new_loss)
        """

        return Y, labeledIndexes
        for _ in range(10):
            Yl = Y[labeledIndexes, :]
            PL_masked = PL * (Yl @ np.transpose(Yl))

            labeled_ids = np.where(labeledIndexes)[0]
            for i, l_id in enumerate(labeled_ids):
                den = np.square(np.max(Y[l_id, :])) * np.sum(
                    np.square(PL[:, i]))
                den += 1e-30
                num = np.sum(PL_masked[:, i])
                Y[l_id, :] *= (num / den)

        return Y, labeledIndexes
Ejemplo n.º 11
0
    def __SIIS(self,X,W,Y,labeledIndexes,m,alpha,beta,rho,max_iter,hook=None):
        Y = self.CLEAN_UNLABELED_ROWS(Y, labeledIndexes)
        
        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")
        
        if m is None:
            m = W.shape[0]
        
        
        
        c = Y.shape[1]
        

        
        
        D = gutils.deg_matrix(W, pwr=1.0)

        
        L = gutils.lap_matrix(W,  which_lap='sym')
        
        U, SIGMA = W.load_eigenfunctions(m=m,remove_first_eig=False)
        
        U = scipy.sparse.csr_matrix(U)
        SIGMA =  _to_np(scipy.sparse.diags([SIGMA],[0]))
        
    

        
        
        J = gutils.labels_indicator(labeledIndexes)
        
        """ !!! """
        P = SIISClassifier.edge_mat(W) 
        
        
        
        """ Initialize params """
        LAMB_1 = np.ones((P.shape[0],c))
        LAMB_2 = np.ones((Y.shape[0],c))
        mu = 1.0
        mu_max = 10000000.0
        eps = 1/(10000)
        
        """ Reusable matrices """
        JU = _to_np(J@U)
        PU = _to_np(P@U)
        PU_T = PU.transpose()
        JU_T = JU.transpose()
        
        
        
        A = np.zeros((m,c))
        Q = None
        B = None
        
        improvement  = 1
        iter = 0
        
        """ TODO: Tensorflow version 
            import tensorflow as tf
            with tf.Session() as sess:
                A = tf.Variable(1e-06*tf.ones((m,c),dtype=tf.float64))
                sess.run(tf.global_variables_initializer())
                
                C = tf.reduce_sum(tf.linalg.norm(tf.matmul(PU,A),axis=1)) +\
                 alpha*tf.reduce_sum(tf.linalg.norm(tf.matmul(_to_np(U)[labeledIndexes,:],A)-Y[labeledIndexes,:],axis=1)) +\
                 beta* tf.trace(tf.matmul(tf.matmul(tf.transpose(A),SIGMA),A))
                opt = tf.train.AdamOptimizer(learning_rate=0.5*1e-02)
                opt_min = opt.minimize(C)
                sess.run(tf.global_variables_initializer())
                for i in range(2000):
                    sess.run(opt_min)
                    LOG.debug(sess.run(C),LOG.ll.CLASSIFIER)
                LOG.debug(sess.run(C),LOG.ll.CLASSIFIER)    
                F = _to_np(U)@sess.run(A)
                
                LOG.debug(F.shape,LOG.ll.CLASSIFIER)
            
        
        """
        A = np.zeros((m,c))
        while  iter <= max_iter and improvement > eps:
            
            """ Update Q """
            N = PU@A - (1/mu)*LAMB_1
            N_norm = np.linalg.norm(N, axis=1)
            
            
            to_zero = N_norm <= (1/mu)
            mult = ((N_norm - (1/mu))/N_norm)
            N = N * mult[:,np.newaxis]
            
            
            N[to_zero,:] = 0.0
            Q = N 
            
            """ Update B """
            M = JU@A - Y - (1/mu)*LAMB_2
            M_norm = np.linalg.norm(M,axis=1)
            to_zero = M_norm <= (alpha/mu)
            mult = ((M_norm - (alpha/mu))/M_norm)
            M = M * mult[:,np.newaxis]
            M[to_zero,:] = 0.0 
            B = M
            
            
            old_A = A
            """ Update A """
            
            A_inv_term = 2*beta*SIGMA + mu*PU_T@PU + mu*JU_T@JU
            A_inv_term = np.linalg.inv(A_inv_term) 
            A = A_inv_term @ \
                (PU_T@ LAMB_1 + JU_T@LAMB_2 +\
                  mu * PU_T@Q + mu* JU_T @ (B + Y) )
        
            """ Update Lagrangian coeffs """
            LAMB_1 = LAMB_1 + mu* (Q - PU@A)
            LAMB_2 = LAMB_2 + mu*(B- JU@A + Y)
            """ Update penalty coeffficients """
            mu = min(rho*mu,mu_max)
        
        
            
            if not old_A is None:
                improvement = (np.max(np.abs(A-old_A)))/np.max(np.abs(old_A))
                
            
            
            LOG.debug("Iter {}".format(iter),LOG.ll.CLASSIFIER)
            iter += 1
        
        C = np.sum(np.linalg.norm(PU@A,axis=1)) + alpha*np.sum(np.linalg.norm(JU@A - Y,axis=1)) +\
             beta*np.trace(A.T@SIGMA@A)
        LOG.debug("Iter {} - Cost {}".format(iter,C),LOG.ll.CLASSIFIER)
            
        
        F = U@A
            
            
        for i in range(F.shape[0]):
            mx = np.argmax(F[i,:])
            F[i,:] = 0.0
            F[i,mx] = 1.0
        
        
        return F
Ejemplo n.º 12
0
    def __LGC(self,
              X,
              W,
              Y,
              labeledIndexes,
              alpha=0.1,
              useEstimatedFreq=None,
              hook=None):
        """ Init """
        import scipy.sparse
        if scipy.sparse.issparse(W):
            W = W.todense()
        Y = self.CLEAN_UNLABELED_ROWS(Y, labeledIndexes)
        if not W.shape[0] == Y.shape[0]:
            raise ValueError("W,Y shape not compatible")
        """ Estimate frequency of classes"""
        num_labeled = Y[labeledIndexes].shape[0]
        num_classes = Y.shape[1]
        if not useEstimatedFreq is None:
            if isinstance(useEstimatedFreq, bool):
                estimatedFreq = np.sum(Y[labeledIndexes], axis=0) / num_labeled
            else:
                estimatedFreq = useEstimatedFreq

        else:
            estimatedFreq = np.repeat(1 / num_classes, num_classes)
        omega = estimatedFreq
        """  """
        mu = (1 - alpha) / alpha
        n = Y.shape[0]
        c = Y.shape[1]

        I = np.identity(Y.shape[0])
        S = I - gutils.lap_matrix(W, which_lap='sym')
        """ stuff that has matrix multiplication with theta """
        theta = (1 / mu) * np.asarray(np.linalg.inv(I - alpha * S))
        F_lgc = (theta @ Y) * mu
        theta_1n = np.sum(theta, axis=1).flatten()
        theta_1n_ratio = (theta_1n /
                          (np.sum(theta_1n)))[:, np.newaxis]  #Shape: nx1
        print(theta_1n_ratio.shape)
        """ Intermediate calc """
        zeta = n * omega - np.sum(F_lgc, axis=0)  #Shape: 1xc
        zeta = np.reshape(zeta, (1, c))

        ypsilon = np.ones(shape=(n,1)) - np.sum(F_lgc,axis=1)[:,np.newaxis] -\
             theta_1n_ratio * (n - np.sum(F_lgc.flatten())) #Shape: nx1

        F = F_lgc
        F += theta_1n_ratio @ zeta
        F += (1 / c) * (ypsilon @ np.ones((1, c)))

        log_args = [
            np.round(x, 3)
            for x in [np.sum(F, axis=1)[0:10],
                      np.sum(F, axis=0), n * omega]
        ]
        LOG.info(
            "F sum on rows: {} (expected 1,1,...,1); F sum col: {} (expected {})"
            .format(*log_args))

        return F