예제 #1
0
def objective(trial):
################################
#### INIT OPTIM AND MODEL ######
################################
    learningRate = trial.suggest_loguniform('learning_rate', 1.5e-3, 1e-2)
    #doLearningRateDecay = True # Learning rate decay
    #learningRateDecayRate = 0.9 # Rate
    #learningRateDecayPeriod = 1 # How many epochs after which update the lr
    trainingOptions['learningRateDecayRate'] = trial.suggest_uniform('learningRateDecayRate', 0.8, 0.999)
    trainingOptions['learningRateDecayPeriod'] = 1#trial.suggest_int('learningRateDecayPeriod', 1, 10)
    beta1 = trial.suggest_uniform('beta1', 0.89, 0.91)
    beta2 = trial.suggest_uniform('beta2', 0.99, 0.9999)
    trainingOptions['trial'] = trial
    #\\\ Optimizer
    thisOptim = optim.Adam(arch.parameters(), lr = learningRate, betas = (beta1,beta2))

    #\\\ Model
    trainable_model = model.Model(arch.to(device), lossFunction(), thisOptim, trainer, evaluator, device, args.model_name, '')
    print("Training model %s..." % trainable_model, end = ' ', flush = True)

    
    thisTrainVars = trainable_model.train(data, nEpochs, batchSize, **trainingOptions)

    # Save the variables
    lossTrain = thisTrainVars['lossTrain']
    costTrain = thisTrainVars['costTrain']
    lossValid = thisTrainVars['lossValid']
    costValid = thisTrainVars['costValid']

    print("OK", flush = True)

    #######################################
    ####### EVALUATION       ##############
    #######################################

    thisEvalVars = trainable_model.evaluate(data)
    costBest = thisEvalVars['costBest']
    costLast = thisEvalVars['costLast']
    print("Last Evaluation \t%s: %6.2f%% [Best] %6.2f%% [Last]" % (trainable_model, costBest * 100, costLast * 100))
    return costBest
예제 #2
0
            thisOptim = optim.RMSprop(thisArchit.parameters(),
                                      lr=learningRate,
                                      alpha=beta1)

        ########
        # LOSS #
        ########

        thisLossFunction = lossFunction  # (if different from default, change
        # it here)

        #########
        # MODEL #
        #########

        SelGNNnpl = model.Model(thisArchit, thisLossFunction, thisOptim,
                                thisName, saveDir, order)

        modelsGNN[thisName] = SelGNNnpl

        writeVarValues(
            varsFile, {
                'name': thisName,
                'thisTrainer': thisTrainer,
                'thisLearningRate': thisLearningRate,
                'thisBeta1': thisBeta1,
                'thisBeta2': thisBeta2
            })

        if doPrint:
            print("OK.")
예제 #3
0
            elif thisTrainer == 'RMSprop':
                thisOptim = optim.RMSprop(thisArchit.parameters(),
                                          lr=learningRate,
                                          alpha=beta1)

            ########
            # LOSS #
            ########

            thisLossFunction = lossFunction

            #########
            # MODEL #
            #########

            Polynomial = model.Model(thisArchit, thisLossFunction, thisOptim,
                                     thisName, saveDir, order)

            modelsGNN[thisName] = Polynomial

            writeVarValues(
                varsFile, {
                    'name': thisName,
                    'thisTrainer': thisTrainer,
                    'thisLearningRate': thisLearningRate,
                    'thisBeta1': thisBeta1,
                    'thisBeta2': thisBeta2
                })

        ###################################################################
        #                                                                   #
        #                    TRAINING                                       #
예제 #4
0
                                      alpha=beta1)

        ########
        # LOSS #
        ########

        # Initialize the loss function
        thisLossFunction = loss.adaptExtraDimensionLoss(lossFunction)

        #########
        # MODEL #
        #########

        # Create the model
        modelCreated = model.Model(thisArchit, thisLossFunction, thisOptim,
                                   thisTrainer, thisEvaluator, thisDevice,
                                   thisName, saveDir)

        # Store it
        modelsGNN[thisName] = modelCreated

        # Write the main hyperparameters
        writeVarValues(
            varsFile, {
                'name': thisName,
                'thisOptimizationAlgorithm': thisOptimAlg,
                'thisTrainer': thisTrainer,
                'thisEvaluator': thisEvaluator,
                'thisLearningRate': thisLearningRate,
                'thisBeta1': thisBeta1,
                'thisBeta2': thisBeta2
예제 #5
0
            #\\\\\\\\\\\\

            GIN = archit.SelectionGNN(hParamsGIN['F'], hParamsGIN['K'],
                                      hParamsGIN['bias'], hParamsGIN['sigma'],
                                      hParamsGIN['N'], hParamsGIN['rho'],
                                      hParamsGIN['alpha'],
                                      hParamsGIN['dimLayersMLP'], S)
            GIN.to(device)  # Move architecture to the selected device
            # Optimizer
            optimizer = optim.Adam(GIN.parameters(),
                                   lr=learningRate,
                                   betas=(beta1, beta2))
            # Loss function
            chosenLoss = lossFunction()
            # Model
            modelBind = model.Model(GIN, chosenLoss, optimizer,
                                    hParamsGIN['name'], saveDir, order)

            ############
            # TRAINING # (and VALIDATION)
            ############

            if doPrint:
                print("\t%15s: Training..." % GINname, end=' ', flush=True)

            modelBind.train(data, nEpochs, batchSize, **trainingOptions)

            if doPrint:
                print("OK", flush=True)

            ##############
            # EVALUATION #
예제 #6
0
        thisOptim = optim.SGD(thisArchit.parameters(), lr=learningRate)
    elif thisTrainer == 'RMSprop':
        thisOptim = optim.RMSprop(thisArchit.parameters(),
                                  lr=learningRate, alpha=beta1)

    ########
    # LOSS #
    ########

    thisLossFunction = lossFunction

    #########
    # MODEL #
    #########

    EdgeVariant = model.Model(thisArchit, thisLossFunction, thisOptim,
                              thisName, saveDir, order)

    modelsGNN[thisName] = EdgeVariant

    writeVarValues(varsFile,
                   {'name': thisName,
                    'thisTrainer': thisTrainer,
                    'thisLearningRate': thisLearningRate,
                    'thisBeta1': thisBeta1,
                    'thisBeta2': thisBeta2})

###################################################################
#                                                                   #
#                    TRAINING                                       #
#                                                                   #
#####################################################################
        elif thisTrainer == 'RMSprop':
            thisOptim = optim.RMSprop(thisArchit.parameters(),
                                      lr=learningRate,
                                      alpha=beta1)

        ########
        # LOSS #
        ########

        thisLossFunction = loss.adaptExtraDimensionLoss(lossFunction)

        #########
        # MODEL #
        #########

        modelCreated = model.Model(thisArchit, thisLossFunction, thisOptim,
                                   thisName, saveDir)

        modelsGNN[thisName] = modelCreated

        writeVarValues(
            varsFile, {
                'name': thisName,
                'thisTrainer': thisTrainer,
                'thisLearningRate': thisLearningRate,
                'thisBeta1': thisBeta1,
                'thisBeta2': thisBeta2
            })

        if doPrint:
            print("OK")
예제 #8
0
파일: main3.py 프로젝트: juanelenter/DNAi
                        nn.ReLU, [N],
                        gml.NoPool, [1], [1],
                        S,
                        batchnorm=True), torch.nn.Linear(N, 1, bias=True))

################################
#### INIT OPTIM AND MODEL ######
################################

#\\\ Optimizer
thisOptim = optim.Adam(arch.parameters(),
                       lr=learningRate,
                       betas=(beta1, beta2))

#\\\ Model
trainable_model = model.Model(arch.to(device), lossFunction(), thisOptim,
                              trainer, evaluator, device, args.model_name, '')

#######################################
####### TRAINING DE VERDURA ###########
#######################################

print("Training model %s..." % trainable_model, end=' ', flush=True)

#Train
trainingOptions = {}
trainingOptions['saveDir'] = ''
trainingOptions['printInterval'] = 1
trainingOptions['validationInterval'] = validationInterval
trainingOptions['doEarlyStopping'] = True
trainingOptions['earlyStoppingLag'] = args.early_stopping
trainingOptions['doLearningRateDecay'] = doLearningRateDecay
예제 #9
0
def train_net(data, h_parameters, phi=None):
    # Now, we are in position to know the number of nodes (for now; this might
    # change later on when the graph is created and the options on whether to
    # make it connected, etc., come into effect)
    nNodes = data.selectedAuthor['all']['wordFreq'].shape[1]

    #########
    # GRAPH #
    #########

    # Create graph
    nodesToKeep = []  # here we store the list of nodes kept after all
    # modifications to the graph, so we can then update the data samples
    # accordingly; since lists are passed as pointers (mutable objects)
    # we can store the node list without necessary getting an output to the
    # function
    G = graphTools.Graph('fuseEdges', nNodes,
                         data.selectedAuthor['train']['WAN'], 'sum',
                         graphNormalizationType, keepIsolatedNodes,
                         forceUndirected, forceConnected, nodesToKeep)
    G.computeGFT()  # Compute the GFT of the stored GSO

    # And re-update the number of nodes for changes in the graph (due to
    # enforced connectedness, for instance)
    if phi is None:
        nNodes = G.N
        nodesToKeep = np.array(nodesToKeep)
        # And re-update the data (keep only the nodes that are kept after isolated
        # nodes or nodes to make the graph connected have been removed)
        data.samples['train']['signals'] = \
            data.samples['train']['signals'][:, nodesToKeep]
        data.samples['valid']['signals'] = \
            data.samples['valid']['signals'][:, nodesToKeep]
        data.samples['test']['signals'] = \
            data.samples['test']['signals'][:, nodesToKeep]
    else:
        nNodes = phi.shape[0]

    # Once data is completely formatted and in appropriate fashion, change its
    # type to torch and move it to the appropriate device
    data.astype(torch.float64)
    data.to(device)

    ##################################################################
    #                                                                   #
    #                    MODELS INITIALIZATION                          #
    #                                                                   #
    #####################################################################

    # Override parameters with grid parameters.
    hParamsPolynomial['F'] = h_parameters[0]
    hParamsPolynomial['K'] = h_parameters[1]

    # This is the dictionary where we store the models (in a model.Model
    # class, that is then passed to training).
    modelsGNN = {}

    # If a new model is to be created, it should be called for here.

    # \\\\\\\\\\
    # \\\ MODEL 2: Polynomial GNN
    # \\\\\\\\\\\\

    thisName = hParamsPolynomial['name']

    ##############
    # PARAMETERS #
    ##############

    # \\\ Optimizer options
    #   (If different from the default ones, change here.)
    thisTrainer = trainer
    thisLearningRate = learningRate
    thisBeta1 = beta1
    thisBeta2 = beta2

    if phi is None:
        # \\\ Ordering
        S, order = graphTools.permIdentity(G.S / np.max(np.diag(G.E)))
        # order is an np.array with the ordering of the nodes with respect
        # to the original GSO (the original GSO is kept in G.S).
    else:
        # compute the Eigenvalues of matrix
        e, V = np.linalg.eig(phi)
        # \\\ Ordering
        highest_eig_val = np.max(np.diag(e)).real

        if highest_eig_val == 0:
            S, order = graphTools.permIdentity(phi)
        else:
            S, order = graphTools.permIdentity(phi / highest_eig_val)
        # order is an np.array with the ordering of the nodes with respect
        # to the original GSO (the original GSO is kept in G.S).

    ################
    # ARCHITECTURE #
    ################

    hParamsPolynomial['N'] = [nNodes]

    if doPrint:
        print('')
        print('COMBINATION {0}, {1}'.format(str(hParamsPolynomial['F']),
                                            str(hParamsPolynomial['K'])))

    thisArchit = archit.SelectionGNN(  # Graph filtering
        hParamsPolynomial['F'],
        hParamsPolynomial['K'],
        hParamsPolynomial['bias'],
        # Nonlinearity
        hParamsPolynomial['sigma'],
        # Pooling
        hParamsPolynomial['N'],
        hParamsPolynomial['rho'],
        hParamsPolynomial['alpha'],
        # MLP
        hParamsPolynomial['dimLayersMLP'],
        # Structure
        S)
    # This is necessary to move all the learnable parameters to be
    # stored in the device (mostly, if it's a GPU)
    thisArchit.to(device)

    #############
    # OPTIMIZER #
    #############

    if thisTrainer == 'ADAM':
        thisOptim = optim.Adam(thisArchit.parameters(),
                               lr=learningRate,
                               betas=(beta1, beta2))
    elif thisTrainer == 'SGD':
        thisOptim = optim.SGD(thisArchit.parameters(), lr=learningRate)
    elif thisTrainer == 'RMSprop':
        thisOptim = optim.RMSprop(thisArchit.parameters(),
                                  lr=learningRate,
                                  alpha=beta1)

    ########
    # LOSS #
    ########

    thisLossFunction = lossFunction

    #########
    # MODEL #
    #########

    Polynomial = model.Model(thisArchit, thisLossFunction, thisOptim, thisName,
                             saveDir, order)

    modelsGNN[thisName] = Polynomial

    ###################################################################
    #                                                                   #
    #                    TRAINING                                       #
    #                                                                   #
    #####################################################################

    ############
    # TRAINING #
    ############

    # On top of the rest of the training options, we pass the identification
    # of this specific data split realization.

    # This is the function that trains the models detailed in the dictionary
    # modelsGNN using the data data, with the specified training options.
    train.MultipleModels(modelsGNN,
                         data,
                         nEpochs=nEpochs,
                         batchSize=batchSize,
                         **trainingOptions)

    return modelsGNN['PolynomiGNN']