示例#1
0
import os, math, torch, kora.install.rdkit, pandas as pd
from model.parsing import parse_train_args
from data_model.data import construct_loader
from util import Standardizer, create_logger, get_loss_func
from model.main import GNN
import csv
import numpy as np
from model.training import *

args = parse_train_args()
torch.manual_seed(args.seed)

train_loader, val_loader = construct_loader(args)
mean = train_loader.dataset.mean
std = train_loader.dataset.std
stdzer = Standardizer(mean, std, args.task)
loss = get_loss_func(args)

# load model
model = GNN(args, train_loader.dataset.num_node_features,
            train_loader.dataset.num_edge_features).to(args.device)
print('Model architecture: ', model)
state_dict = torch.load(args.model_path, map_location=args.device
                        ) if 'best_model' in args.model_path else torch.load(
                            args.model_path,
                            map_location=args.device)['model_state_dict']
model.load_state_dict(state_dict)


def train_and_save_predictions(loader, preds_path, viz_dir=None, viz_ids=None):
    # predict on train data
示例#2
0
    def train(self, X, T, **params):

        verbose = params.pop('verbose', False)
        # training parameters
        _lambda = params.pop('Lambda', 0.)

        #parameters for scg
        niter = params.pop('niter', 1000)
        wprecision = params.pop('wprecision', 1e-10)
        fprecision = params.pop('fprecision', 1e-10)
        wtracep = params.pop('wtracep', False)
        ftracep = params.pop('ftracep', False)

        # optimization
        optim = params.pop('optim', 'scg')

        if self.stdX == None:
            explore = params.pop('explore', False)
            self.stdX = Standardizer(X, explore)
        Xs = self.stdX.standardize(X)
        if self.stdT == None and self.stdTarget:
            self.stdT = Standardizer(T)
            T = self.stdT.standardize(T)

        def gradientf(weights):
            self.unpack(weights)
            Y, Z = self.forward(Xs)
            error = self._errorf(T, Y)
            return self.backward(error, Z, T, _lambda)

        def optimtargetf(weights):
            """ optimization target function : MSE 
            """
            self.unpack(weights)
            #self._weights[:] = weights[:]  # unpack
            Y, _ = self.forward(Xs)
            Wnb = np.array([])
            for i in range(self._nLayers):
                if len(Wnb) == 0:
                    Wnb = self._W[i][1:, ].reshape(
                        self._W[i].size - self._W[i][0, ].size, 1)
                else:
                    Wnb = np.vstack((Wnb, self._W[i][1:, ].reshape(
                        self._W[i].size - self._W[i][0, ].size, 1)))
            wpenalty = _lambda * np.dot(Wnb.flat, Wnb.flat)
            return self._objectf(T, Y, wpenalty)

        if optim == 'scg':
            result = scg(self.cp_weight(),
                         gradientf,
                         optimtargetf,
                         wPrecision=wprecision,
                         fPrecision=fprecision,
                         nIterations=niter,
                         wtracep=wtracep,
                         ftracep=ftracep,
                         verbose=False)
            self.unpack(result['w'][:])
            self.f = result['f']
        elif optim == 'steepest':
            result = steepest(self.cp_weight(),
                              gradientf,
                              optimtargetf,
                              nIterations=niter,
                              xPrecision=wprecision,
                              fPrecision=fprecision,
                              xtracep=wtracep,
                              ftracep=ftracep)
            self.unpack(result['w'][:])
        if ftracep:
            self.ftrace = result['ftrace']
        if 'reason' in result.keys() and verbose:
            print(result['reason'])

        return result
示例#3
0
class NeuralNet:
    """ neural network class for regression
        
        Parameters
        ----------
        nunits: list
            the number of inputs, hidden units, and outputs

        Methods
        -------
        set_hunit    
            update/initiate weights

        pack 
            pack multiple weights of each layer into one vector

        forward
            forward processing of neural network

        backward
            back-propagation of neural network

        train
            train the neural network

        use
            appply the trained network for prediction

        Attributes
        ----------
        _nLayers
            the number of hidden unit layers 

        rho
            learning rate

        _W
            weights
        _weights
            weights in one dimension (_W is referencing _weight)

        stdX
            standardization class for data
        stdT
            standardization class for target

        Notes
        -----
        
    """
    def __init__(self, nunits):
        self._nLayers = len(nunits) - 1
        self.rho = [1] * self._nLayers
        self._W = []
        wdims = []
        lenweights = 0
        for i in range(self._nLayers):
            nwr = nunits[i] + 1
            nwc = nunits[i + 1]
            wdims.append((nwr, nwc))
            lenweights = lenweights + nwr * nwc

        self._weights = np.random.uniform(-0.1, 0.1, lenweights)
        start = 0  # fixed index error 20110107
        for i in range(self._nLayers):
            end = start + wdims[i][0] * wdims[i][1]
            self._W.append(self._weights[start:end])
            self._W[i].resize(wdims[i])
            start = end

        self.stdX = None
        self.stdT = None
        self.stdTarget = True

    def add_ones(self, w):
        return np.hstack((np.ones((w.shape[0], 1)), w))

    def get_nlayers(self):
        return self._nLayers

    def set_hunit(self, w):
        for i in range(self._nLayers - 1):
            if w[i].shape != self._W[i].shape:
                print("set_hunit: shapes do not match!")
                break
            else:
                self._W[i][:] = w[i][:]

    def pack(self, w):
        return np.hstack(map(np.ravel, w))

    def unpack(self, weights):
        self._weights[:] = weights[:]  # unpack

    def cp_weight(self):
        return copy(self._weights)

    def RBF(self, X, m=None, s=None):
        if m is None: m = np.mean(X)
        if s is None: s = 2  #np.std(X)
        r = 1. / (np.sqrt(2 * np.pi) * s)
        return r * np.exp(-(X - m)**2 / (2 * s**2))

    def forward(self, X):
        t = X
        Z = []

        for i in range(self._nLayers):
            Z.append(t)
            if i == self._nLayers - 1:
                t = np.dot(self.add_ones(t), self._W[i])
            else:
                t = np.tanh(np.dot(self.add_ones(t), self._W[i]))
                #t = self.RBF(np.dot(np.hstack((np.ones((t.shape[0],1)),t)),self._W[i]))
        return (t, Z)

    def backward(self, error, Z, T, lmb=0):
        delta = error
        N = T.size
        dws = []
        for i in range(self._nLayers - 1, -1, -1):
            rh = float(self.rho[i]) / N
            if i == 0:
                lmbterm = 0
            else:
                lmbterm = lmb * np.vstack((np.zeros(
                    (1, self._W[i].shape[1])), self._W[i][1:, ]))
            dws.insert(0,
                       (-rh * np.dot(self.add_ones(Z[i]).T, delta) + lmbterm))
            if i != 0:
                delta = np.dot(delta, self._W[i][1:, :].T) * (1 - Z[i]**2)
        return self.pack(dws)

    def _errorf(self, T, Y):
        return T - Y

    def _objectf(self, T, Y, wpenalty):
        return 0.5 * np.mean(np.square(T - Y)) + wpenalty

    def train(self, X, T, **params):

        verbose = params.pop('verbose', False)
        # training parameters
        _lambda = params.pop('Lambda', 0.)

        #parameters for scg
        niter = params.pop('niter', 1000)
        wprecision = params.pop('wprecision', 1e-10)
        fprecision = params.pop('fprecision', 1e-10)
        wtracep = params.pop('wtracep', False)
        ftracep = params.pop('ftracep', False)

        # optimization
        optim = params.pop('optim', 'scg')

        if self.stdX == None:
            explore = params.pop('explore', False)
            self.stdX = Standardizer(X, explore)
        Xs = self.stdX.standardize(X)
        if self.stdT == None and self.stdTarget:
            self.stdT = Standardizer(T)
            T = self.stdT.standardize(T)

        def gradientf(weights):
            self.unpack(weights)
            Y, Z = self.forward(Xs)
            error = self._errorf(T, Y)
            return self.backward(error, Z, T, _lambda)

        def optimtargetf(weights):
            """ optimization target function : MSE 
            """
            self.unpack(weights)
            #self._weights[:] = weights[:]  # unpack
            Y, _ = self.forward(Xs)
            Wnb = np.array([])
            for i in range(self._nLayers):
                if len(Wnb) == 0:
                    Wnb = self._W[i][1:, ].reshape(
                        self._W[i].size - self._W[i][0, ].size, 1)
                else:
                    Wnb = np.vstack((Wnb, self._W[i][1:, ].reshape(
                        self._W[i].size - self._W[i][0, ].size, 1)))
            wpenalty = _lambda * np.dot(Wnb.flat, Wnb.flat)
            return self._objectf(T, Y, wpenalty)

        if optim == 'scg':
            result = scg(self.cp_weight(),
                         gradientf,
                         optimtargetf,
                         wPrecision=wprecision,
                         fPrecision=fprecision,
                         nIterations=niter,
                         wtracep=wtracep,
                         ftracep=ftracep,
                         verbose=False)
            self.unpack(result['w'][:])
            self.f = result['f']
        elif optim == 'steepest':
            result = steepest(self.cp_weight(),
                              gradientf,
                              optimtargetf,
                              nIterations=niter,
                              xPrecision=wprecision,
                              fPrecision=fprecision,
                              xtracep=wtracep,
                              ftracep=ftracep)
            self.unpack(result['w'][:])
        if ftracep:
            self.ftrace = result['ftrace']
        if 'reason' in result.keys() and verbose:
            print(result['reason'])

        return result

    def use(self, X, retZ=False):
        if self.stdX:
            Xs = self.stdX.standardize(X)
        else:
            Xs = X
        Y, Z = self.forward(Xs)
        if self.stdT is not None:
            Y = self.stdT.unstandardize(Y)
        if retZ:
            return Y, Z
        return Y
示例#4
0
class NeuralNetLog:
    def __init__(self, nunits):
        self._nLayers = len(nunits) - 1
        self.rho = [1] * self._nLayers
        self._W = []
        wdims = []
        lenweights = 0
        for i in range(self._nLayers):
            nwr = nunits[i] + 1
            nwc = nunits[i + 1]
            wdims.append((nwr, nwc))
            lenweights = lenweights + nwr * nwc

        self._weights = np.random.uniform(-0.1, 0.1, lenweights)
        start = 0  # fixed index error 20110107
        for i in range(self._nLayers):
            end = start + wdims[i][0] * wdims[i][1]
            self._W.append(self._weights[start:end])
            self._W[i].resize(wdims[i])
            start = end

        self.stdX = None
        self.stdT = None
        self.stdTarget = True

    def add_ones(self, w):
        return np.hstack((np.ones((w.shape[0], 1)), w))

    def get_nlayers(self):
        return self._nLayers

    def set_hunit(self, w):
        for i in range(self._nLayers - 1):
            if w[i].shape != self._W[i].shape:
                print("set_hunit: shapes do not match!")
                break
            else:
                self._W[i][:] = w[i][:]

    def pack(self, w):
        return np.hstack(map(np.ravel, w))

    def unpack(self, weights):
        self._weights[:] = weights[:]  # unpack

    def cp_weight(self):
        return copy.copy(self._weights)

    def RBF(self, X, m=None, s=None):
        if m is None: m = np.mean(X)
        if s is None: s = 2  #np.std(X)
        r = 1. / (np.sqrt(2 * np.pi) * s)
        return r * np.exp(-(X - m)**2 / (2 * s**2))

    def forward(self, X):
        t = X
        Z = []

        for i in range(self._nLayers):
            Z.append(t)
            if i == self._nLayers - 1:
                #t = np.tanh(np.dot(self.add_ones(t), self._W[i]))
                #t = np.dot(self.add_ones(t), self._W[i])
                #expmat = np.exp(np.dot(self.add_ones(t), self._W[i]))
                #print(expmat.shape)
                #denom = np.sum(expmat,axis=0)
                #t = expmat/denom
                t = 1 / (1 + np.exp(-np.dot(self.add_ones(t), self._W[i])))

                #print(t)
            else:
                t = np.tanh(np.dot(self.add_ones(t), self._W[i]))
                #t = self.RBF(np.dot(np.hstack((np.ones((t.shape[0],1)),t)),self._W[i]))
        return (t, Z)

    def backward(self, error, Z, T, lmb=0):
        delta = error
        N = T.size
        dws = []
        for i in range(self._nLayers - 1, -1, -1):
            rh = float(self.rho[i]) / N
            if i == 0:
                lmbterm = 0
            else:
                lmbterm = lmb * np.vstack((np.zeros(
                    (1, self._W[i].shape[1])), self._W[i][1:, ]))
            dws.insert(0,
                       (-rh * np.dot(self.add_ones(Z[i]).T, delta) + lmbterm))
            if i != 0:
                #print(delta)
                #print("p2")
                #print(Z)
                #print("p3")
                #print(self._W[i][1:, :].T)
                delta = np.dot(delta, self._W[i][1:, :].T) * (1 - Z[i]**2)
        return self.pack(dws)

    def _errorf(self, T, Y):
        return T - Y

    def _objectf(self, T, Y, wpenalty):
        return -(np.sum(np.sum((T * np.log(Y)), axis=1), axis=0)) + wpenalty

    def train(self, X, T, **params):
        verbose = params.pop('verbose', False)
        # training parameters
        _lambda = params.pop('Lambda', 0)

        #parameters for scg
        niter = params.pop('niter', 1000)
        wprecision = params.pop('wprecision', 1e-10)
        fprecision = params.pop('fprecision', 1e-10)
        wtracep = params.pop('wtracep', False)
        ftracep = params.pop('ftracep', False)

        # optimization
        optim = params.pop('optim', 'scg')

        if self.stdX == None:
            explore = params.pop('explore', False)
            self.stdX = Standardizer(X, explore)
        Xs = self.stdX.standardize(X)
        if self.stdT == None and self.stdTarget and False:
            self.stdT = Standardizer(T)
            T = self.stdT.standardize(T)

        def gradientf(weights):
            self.unpack(weights)
            Y, Z = self.forward(Xs)
            error = self._errorf(T, Y)
            return self.backward(error, Z, T, _lambda)

        def optimtargetf(weights):
            """ optimization target function : MSE 
            """
            self.unpack(weights)
            #self._weights[:] = weights[:]  # unpack
            Y, _ = self.forward(Xs)
            Wnb = np.array([])
            for i in range(self._nLayers):
                if len(Wnb) == 0:
                    Wnb = self._W[i][1:, ].reshape(
                        self._W[i].size - self._W[i][0, ].size, 1)
                else:
                    Wnb = np.vstack((Wnb, self._W[i][1:, ].reshape(
                        self._W[i].size - self._W[i][0, ].size, 1)))
            wpenalty = _lambda * np.dot(Wnb.flat, Wnb.flat)
            return self._objectf(T, Y, wpenalty)

        if optim == 'scg':
            result = scg(self.cp_weight(),
                         gradientf,
                         optimtargetf,
                         wPrecision=wprecision,
                         fPrecision=fprecision,
                         nIterations=niter,
                         wtracep=wtracep,
                         ftracep=ftracep,
                         verbose=False)
            self.unpack(result['w'][:])
            self.f = result['f']
        elif optim == 'steepest':
            result = steepest(self.cp_weight(),
                              gradientf,
                              optimtargetf,
                              nIterations=niter,
                              xPrecision=wprecision,
                              fPrecision=fprecision,
                              xtracep=wtracep,
                              ftracep=ftracep)
            self.unpack(result['w'][:])
        if ftracep:
            self.ftrace = result['ftrace']
        if 'reason' in result.keys() and verbose:
            print(result['reason'])

        return result

    def use(self, X, retZ=False):
        if self.stdX:
            Xs = self.stdX.standardize(X)
        else:
            Xs = X
        Y, Z = self.forward(Xs)
        if self.stdT is not None:
            Y = self.stdT.unstandardize(Y)
        if retZ:
            return Y, Z
        return Y