예제 #1
0
import os, math, torch, kora.install.rdkit, pandas as pd
from model.parsing import parse_train_args
from data_model.data import construct_loader
from util import Standardizer, create_logger, get_loss_func
from model.main import GNN
import csv
import numpy as np
from model.training import *

args = parse_train_args()
torch.manual_seed(args.seed)

train_loader, val_loader = construct_loader(args)
mean = train_loader.dataset.mean
std = train_loader.dataset.std
stdzer = Standardizer(mean, std, args.task)
loss = get_loss_func(args)

# load model
model = GNN(args, train_loader.dataset.num_node_features,
            train_loader.dataset.num_edge_features).to(args.device)
print('Model architecture: ', model)
state_dict = torch.load(args.model_path, map_location=args.device
                        ) if 'best_model' in args.model_path else torch.load(
                            args.model_path,
                            map_location=args.device)['model_state_dict']
model.load_state_dict(state_dict)


def train_and_save_predictions(loader, preds_path, viz_dir=None, viz_ids=None):
    # predict on train data
예제 #2
0
    def train(self, X, T, **params):

        verbose = params.pop('verbose', False)
        # training parameters
        _lambda = params.pop('Lambda', 0.)

        #parameters for scg
        niter = params.pop('niter', 1000)
        wprecision = params.pop('wprecision', 1e-10)
        fprecision = params.pop('fprecision', 1e-10)
        wtracep = params.pop('wtracep', False)
        ftracep = params.pop('ftracep', False)

        # optimization
        optim = params.pop('optim', 'scg')

        if self.stdX == None:
            explore = params.pop('explore', False)
            self.stdX = Standardizer(X, explore)
        Xs = self.stdX.standardize(X)
        if self.stdT == None and self.stdTarget:
            self.stdT = Standardizer(T)
            T = self.stdT.standardize(T)

        def gradientf(weights):
            self.unpack(weights)
            Y, Z = self.forward(Xs)
            error = self._errorf(T, Y)
            return self.backward(error, Z, T, _lambda)

        def optimtargetf(weights):
            """ optimization target function : MSE 
            """
            self.unpack(weights)
            #self._weights[:] = weights[:]  # unpack
            Y, _ = self.forward(Xs)
            Wnb = np.array([])
            for i in range(self._nLayers):
                if len(Wnb) == 0:
                    Wnb = self._W[i][1:, ].reshape(
                        self._W[i].size - self._W[i][0, ].size, 1)
                else:
                    Wnb = np.vstack((Wnb, self._W[i][1:, ].reshape(
                        self._W[i].size - self._W[i][0, ].size, 1)))
            wpenalty = _lambda * np.dot(Wnb.flat, Wnb.flat)
            return self._objectf(T, Y, wpenalty)

        if optim == 'scg':
            result = scg(self.cp_weight(),
                         gradientf,
                         optimtargetf,
                         wPrecision=wprecision,
                         fPrecision=fprecision,
                         nIterations=niter,
                         wtracep=wtracep,
                         ftracep=ftracep,
                         verbose=False)
            self.unpack(result['w'][:])
            self.f = result['f']
        elif optim == 'steepest':
            result = steepest(self.cp_weight(),
                              gradientf,
                              optimtargetf,
                              nIterations=niter,
                              xPrecision=wprecision,
                              fPrecision=fprecision,
                              xtracep=wtracep,
                              ftracep=ftracep)
            self.unpack(result['w'][:])
        if ftracep:
            self.ftrace = result['ftrace']
        if 'reason' in result.keys() and verbose:
            print(result['reason'])

        return result