Beispiel #1
0
            Ghat.computeGFT()
            S = Ghat.W / np.max(np.real(Ghat.E))
            S, order = graphTools.permIdentity(S)
            #S = torch.tensor(S).to(device)

            ################
            # ARCHITECTURE #
            ################

            #\\\\\\\
            #\\\ MODEL: GIN
            #\\\\\\\\\\\\

            GIN = archit.SelectionGNN(hParamsGIN['F'], hParamsGIN['K'],
                                      hParamsGIN['bias'], hParamsGIN['sigma'],
                                      hParamsGIN['N'], hParamsGIN['rho'],
                                      hParamsGIN['alpha'],
                                      hParamsGIN['dimLayersMLP'], S)
            GIN.to(device)  # Move architecture to the selected device
            # Optimizer
            optimizer = optim.Adam(GIN.parameters(),
                                   lr=learningRate,
                                   betas=(beta1, beta2))
            # Loss function
            chosenLoss = lossFunction()
            # Model
            modelBind = model.Model(GIN, chosenLoss, optimizer,
                                    hParamsGIN['name'], saveDir, order)

            ############
            # TRAINING # (and VALIDATION)
Beispiel #2
0
        #\\\ Ordering
        S, order = graphTools.permIdentity(G.S / np.max(np.diag(G.E)))
        # order is an np.array with the ordering of the nodes with respect
        # to the original GSO (the original GSO is kept in G.S).

        ################
        # ARCHITECTURE #
        ################

        thisArchit = archit.SelectionGNN(  # Graph filtering
            hParamsSelGNNnpl['F'],
            hParamsSelGNNnpl['K'],
            hParamsSelGNNnpl['bias'],
            # Nonlinearity
            hParamsSelGNNnpl['sigma'],
            # Pooling
            hParamsSelGNNnpl['N'],
            hParamsSelGNNnpl['rho'],
            hParamsSelGNNnpl['alpha'],
            # MLP
            hParamsSelGNNnpl['dimLayersMLP'],
            # Structure
            S)
        # This is necessary to move all the learnable parameters to be
        # stored in the device (mostly, if it's a GPU)
        thisArchit.to(device)

        #############
        # OPTIMIZER #
        #############

        if thisTrainer == 'ADAM':
Beispiel #3
0
            ################
            # Override parameters with grid parameters.
            hParamsPolynomial['F'] = nFeatures
            hParamsPolynomial['K'] = nShifts
            hParamsPolynomial['N'] = [nNodes]

            if doPrint:
                print('COMBINATION {0}, {1}'.format(str(hParamsPolynomial['F']), str(hParamsPolynomial['K'])))

            thisArchit = archit.SelectionGNN(  # Graph filtering
                hParamsPolynomial['F'],
                hParamsPolynomial['K'],
                hParamsPolynomial['bias'],
                # Nonlinearity
                hParamsPolynomial['sigma'],
                # Pooling
                hParamsPolynomial['N'],
                hParamsPolynomial['rho'],
                hParamsPolynomial['alpha'],
                # MLP
                hParamsPolynomial['dimLayersMLP'],
                # Structure
                S)
            # This is necessary to move all the learnable parameters to be
            # stored in the device (mostly, if it's a GPU)
            thisArchit.to(device)

            #############
            # OPTIMIZER #
            #############

            if thisTrainer == 'ADAM':