コード例 #1
0
 def __init__(self,
              numScales,
              numLayers,
              adjacencyMatrix,
              computeBound=True,
              normalize=True,
              alpha=2,
              beta=2,
              K=20):
     super().__init__(numScales, numLayers, adjacencyMatrix)
     S = graphTools.adjacencyToLaplacian(self.W)  # Laplacian
     if normalize:
         self.S = graphTools.normalizeLaplacian(S)
     else:
         self.S = S
     self.E, self.V = graphTools.computeGFT(self.S, order='increasing')
     eMax = np.max(np.diag(self.E))
     x1 = np.diag(self.E)[np.floor(self.N / 4).astype(np.int)]
     x2 = np.diag(self.E)[np.ceil(3 * self.N / 4).astype(np.int)]
     # Low-pass average operator
     #self.U = self.V[:, 0] # v1
     self.U = (1 / self.N) * np.ones(self.N)
     # Construct wavelets
     self.H, self.B, self.C = monicCubicWavelet(self.V, self.E, self.J,
                                                alpha, beta, x1, x2, K,
                                                eMax, computeBound)
コード例 #2
0
 def __init__(self,
              numScales,
              numLayers,
              adjacencyMatrix,
              normalize=True,
              R=3,
              doWarping=True):
     super().__init__(numScales, numLayers, adjacencyMatrix)
     S = graphTools.adjacencyToLaplacian(self.W)  # Laplacian
     if normalize:
         self.S = graphTools.normalizeLaplacian(S)
     else:
         self.S = S
     self.E, self.V = graphTools.computeGFT(self.S, order='increasing')
     eMax = np.max(np.diag(self.E))
     if R > self.J:
         R = self.J - 1
     # Low-pass average operator
     #self.U = self.V[:, 0] # v1
     self.U = (1 / self.N) * np.ones(self.N)
     # Construct wavelets
     self.H = tightHannWavelet(self.V, self.E, self.J, R, eMax, doWarping)
コード例 #3
0
    # Note that monic cubic polynomials and tight Hann's wavelets have other
    # parameters that are being set by default to the values in the respective
    # papers.

    # We want to determine which eigenbasis to use. We try to use the Laplacian
    # since it's the same used in the wavelet cases, and seems to be the one
    # holding more "interpretability". If the Laplacian doesn't exist (which
    # could happen if the graph is directed or has negative edge weights), then
    # we use the eigenbasis of the adjacency.
    if doGFT:
        if G.L is not None:
            S = G.L
            if normalizeGSOforGFT:
                S = graphTools.normalizeLaplacian(S)
            _, GFT = graphTools.computeGFT(S, order='increasing')
        else:
            S = G.W
            if normalizeGSOforGFT:
                S = graphTools.normalizeAdjacency(S)
            _, GFT = graphTools.computeGFT(S, order='totalVariation')

        GFT = GFT.conj().T

    # The bound will only be computed for the monic cubic polynomials (easier
    # closed-form expression), so we need to be sure that it is here to compute
    # the bound.
    if computeBound and doMonicCubic:
        # B is the maximum value of the filter on each scale (vector of size J)
        B = modelsGST[monicCubicName].getFilterBound()
        # Pick the maximum filter bound
コード例 #4
0
            ##############

            #\\\ Optimizer options
            #   (If different from the default ones, change here.)
            thisOptimAlg = optimAlg
            thisLearningRate = learningRate
            thisBeta1 = beta1
            thisBeta2 = beta2

            #\\\ GSO
            # The coarsening technique is defined for the normalized and
            # rescaled Laplacian, whereas for the other ones we use the
            # normalized adjacency
            if 'crs' in thisModel:
                L = graphTools.normalizeLaplacian(G.L)
                EL, VL = graphTools.computeGFT(L, order='increasing')
                S = 2 * L / np.max(np.real(EL)) - np.eye(nNodes)
            else:
                #S = G.S.copy()/np.max(np.real(G.E))
                S = G.S.copy() / np.max(np.real(G.E))

            modelDict['GSO'] = S

            ################
            # ARCHITECTURE #
            ################

            thisArchit = callArchit(**modelDict)

            #############
            # OPTIMIZER #
コード例 #5
0
 def __init__(self,
              G,
              nTrain,
              nValid,
              nTest,
              sourceNodes,
              tMax=None,
              dataType=np.float64,
              device='cpu'):
     # Initialize parent
     super().__init__()
     # store attributes
     self.dataType = dataType
     self.device = device
     self.nTrain = nTrain
     self.nValid = nValid
     self.nTest = nTest
     # If no tMax is specified, set it the maximum possible.
     if tMax == None:
         tMax = G.N
     #\\\ Generate the samples
     # Get the largest eigenvalue of the weighted adjacency matrix
     EW, VW = graph.computeGFT(G.W, order='totalVariation')
     eMax = np.max(EW)
     # Normalize the matrix so that it doesn't explode
     Wnorm = G.W / eMax
     # total number of samples
     nTotal = nTrain + nValid + nTest
     # sample source nodes
     sampledSources = np.random.choice(sourceNodes, size=nTotal)
     # sample diffusion times
     sampledTimes = np.random.choice(tMax, size=nTotal)
     # Since the signals are generated as W^t * delta, this reduces to the
     # selection of a column of W^t (the column corresponding to the source
     # node). Therefore, we generate an array of size tMax x N x N with all
     # the powers of the matrix, and then we just simply select the
     # corresponding column for the corresponding time
     lastWt = np.eye(G.N, G.N)
     Wt = lastWt.reshape([1, G.N, G.N])
     for t in range(1, tMax):
         lastWt = lastWt @ Wnorm
         Wt = np.concatenate((Wt, lastWt.reshape([1, G.N, G.N])), axis=0)
     x = Wt[sampledTimes, :, sampledSources]
     # Now, we have the signals and the labels
     signals = x  # nTotal x N (CS notation)
     # Finally, we have to match the source nodes to the corresponding labels
     # which start at 0 and increase in integers.
     nodesToLabels = {}
     for it in range(len(sourceNodes)):
         nodesToLabels[sourceNodes[it]] = it
     labels = [nodesToLabels[x] for x in sampledSources]  # nTotal
     # Split and save them
     self.samples['train']['signals'] = signals[0:nTrain, :]
     self.samples['train']['targets'] = np.array(labels[0:nTrain])
     self.samples['valid']['signals'] = signals[nTrain:nTrain + nValid, :]
     self.samples['valid']['targets'] = np.array(labels[nTrain:nTrain +
                                                        nValid])
     self.samples['test']['signals'] = signals[nTrain + nValid:nTotal, :]
     self.samples['test']['targets'] = np.array(labels[nTrain +
                                                       nValid:nTotal])
     # Change data to specified type and device
     self.astype(self.dataType)
     self.to(self.device)
コード例 #6
0
ファイル: dataTools.py プロジェクト: sklin93/Graph-WaveNet
    def __init__(self, G, K, nTrain, nValid, nTest, horizon, F_t = 5, 
                pooltype='weighted', FPoolDecay=0.8, EPoolDecay=0.8,
                sigmaSpatial = 1, sigmaTemporal = 0, rhoSpatial = 0, 
                rhoTemporal = 0, dataType = np.float64, device = 'cpu'):
        # store attributes
        assert K%F_t == 0 # for cleaner F prediction
        self.K = K
        self.dataType = dataType
        self.device = device
        self.nTrain = nTrain
        self.nValid = nValid
        self.nTest = nTest
        self.horizon = horizon
        self.sigmaSpatial = sigmaSpatial
        self.sigmaTemporal = sigmaTemporal
        self.rhoSpatial = rhoSpatial
        self.rhoTemporal = rhoTemporal
        #\\\ Generate the samples
        # Get the largest eigenvalue of the weighted adjacency matrix
        EW, VW = graph.computeGFT(G.W, order = 'totalVariation')
        eMax = np.max(EW)
        # Normalize the matrix so that it doesn't explode
        Wnorm = G.W / eMax
        # total number of samples
        nTotal = nTrain + nValid + nTest
        # x_0
        x_t = np.random.rand(nTotal,G.N);
        x = [x_t]
        # Temporal noise
        tempNoise = np.random.multivariate_normal(np.zeros(self.horizon),
                                                  np.power(self.sigmaTemporal,2)*np.eye(self.horizon) + 
                                                  np.power(self.rhoTemporal,2)*np.ones((self.horizon,self.horizon)),
                                                  (nTotal, G.N))
        tempNoise = np.transpose(tempNoise, (2,0,1)) #(horizon, nTotal, G.N)
        # Create LS
        A = Wnorm # = A x_t + w (Gaussian noise)
        for t in range(self.horizon-1):
            # spatialNoise (for each t): (nTotal, G.N)
            spatialNoise = np.random.multivariate_normal(np.zeros(G.N), 
                                 np.power(self.sigmaSpatial,2)*np.eye(G.N) + 
                                 np.power(self.rhoSpatial,2)*np.ones((G.N,G.N)), nTotal)
            x_tplus1 = np.matmul(x_t,A) + spatialNoise + tempNoise[t, :, :]
            x_t = x_tplus1
            x.append(x_t)

        x = np.stack(x, axis=-1) # (nTotal, G.N, horizon)
        
        # synthetic F (coarse temporal) and E (coarse spacial)
        F = self._gen_F(x, F_t, pooltype, FPoolDecay) #(nTotal, horizen//F_t, G.N)
        E = self._gen_E(x, G, pooltype, EPoolDecay) #(nTotal, horizen, G.nCommunities)
        FE = np.stack((F,E), axis=-1) # combined signal, along feature dimension
        # # signals and labels for F
        # F_idxer = np.arange(K//F_t)[None, :] + np.arange((horizon-K)//F_t+1)[:, None]
        # F_signals = F[:, F_idxer[:-K//F_t], :]
        # F_labels = F[:, F_idxer[K//F_t:], :]
        # # signals and labels for E
        # E_idxer = np.arange(K)[None, :] + np.arange(horizon-K+1)[:, None]
        # E_signals = E[:, E_idxer[:-K], :]
        # E_labels = E[:, E_idxer[K:], :]

        # sliding window indexer
        idxer = np.arange(K)[None, :] + np.arange(horizon-K+1)[:, None]
        signals = FE[:, idxer[:-K], :, :]
        labels = FE[:, idxer[K:], :, :]
        # Split and save them
        self.samples = {}
        self.samples['train'] = {}
        self.samples['train']['x'] = signals[0:nTrain, :]
        self.samples['train']['y'] = labels[0:nTrain, :]
        self.samples['val'] = {} 
        self.samples['val']['x'] = signals[nTrain:nTrain+nValid, :]
        self.samples['val']['y'] = labels[nTrain:nTrain+nValid, :]
        self.samples['test'] = {}
        self.samples['test']['x'] = signals[nTrain+nValid:nTotal, :]
        self.samples['test']['y'] = labels[nTrain+nValid:nTotal, :]

        # Change data to specified type and device
        self.astype(self.dataType)
        self.to(self.device)