Ejemplo n.º 1
0
def testModel(outName,
              DM=None,
              testSet='all',
              ep=None,
              reTest=False,
              batchSize=20):
    # load master
    master = loadMaster(outName)
    if ep is None:
        ep = master['nEpoch']
    outFolder = nameFolder(outName)
    testFileName = 'testP-{}-Ep{}.npz'.format(testSet, ep)
    testFile = os.path.join(outFolder, testFileName)

    if os.path.exists(testFile) and reTest is False:
        print('load saved test result')
        npz = np.load(testFile, allow_pickle=True)
        yP = npz['yP']
        ycP = npz['ycP']
    else:
        statTup = loadStat(outName)
        model = loadModel(outName, ep=ep)
        # load test data
        if DM is None:
            DM = dbBasin.DataModelFull(master['dataName'])
        varTup = (master['varX'], master['varXC'], master['varY'],
                  master['varYC'])
        # test for full sequence for now
        sd = '1979-01-01'
        ed = '2020-01-01'
        dataTup = DM.extractData(varTup, testSet, sd, ed)
        dataTup = DM.transIn(dataTup, varTup, statTup=statTup)
        sizeLst = trainBasin.getSize(dataTup)
        if master['optNaN'] == [2, 2, 0, 0]:
            master['optNaN'] = [0, 0, 0, 0]
        dataTup = trainBasin.dealNaN(dataTup, master['optNaN'])
        x = dataTup[0]
        xc = dataTup[1]
        ny = sizeLst[2]
        # test model - point by point
        yOut, ycOut = trainBasin.testModel(model,
                                           x,
                                           xc,
                                           ny,
                                           batchSize=batchSize)
        yP = DM.transOut(yOut, statTup[2], master['varY'])
        ycP = DM.transOut(ycOut, statTup[3], master['varYC'])
        np.savez(testFile, yP=yP, ycP=ycP)
    return yP, ycP
Ejemplo n.º 2
0
def trainModel(outName):
    outFolder = nameFolder(outName)
    dictP = loadMaster(outName)

    # load data
    DF = dbBasin.DataFrameBasin(dictP['dataName'])
    dictVar = {k: dictP[k]
               for k in ('varX', 'varXC', 'varY', 'varYC')}
    DM = dbBasin.DataModelBasin(DF, subset=dictP['trainSet'], **dictVar)
    if dictP['borrowStat'] is not None:
        DM.loadStat(dictP['borrowStat'])
    DM.trans(mtdX=dictP['mtdX'], mtdXC=dictP['mtdXC'],
             mtdY=dictP['mtdY'], mtdYC=dictP['mtdYC'])
    DM.saveStat(outFolder)
    dataTup = DM.getData()
    dataTup = trainBasin.dealNaN(dataTup, dictP['optNaN'])

    # define loss
    lossFun = getattr(crit, dictP['crit'])()
    # define model
    model = defineModel(dataTup, dictP)

    if torch.cuda.is_available():
        lossFun = lossFun.cuda()
        model = model.cuda()

    if dictP['optim'] == 'AdaDelta':
        optim = torch.optim.Adadelta(model.parameters())
    else:
        raise RuntimeError('optimizor function not specified')

    lossLst = list()
    nEp = dictP['nEpoch']
    sEp = dictP['saveEpoch']
    logFile = os.path.join(outFolder, 'log')
    if os.path.exists(logFile):
        os.remove(logFile)
    for k in range(0, nEp, sEp):
        model, optim, lossEp = trainBasin.trainModel(
            dataTup, model, lossFun, optim, batchSize=dictP['batchSize'],
            nEp=sEp, cEp=k, logFile=logFile,
            optBatch=dictP['optBatch'], nIterEp=dictP['nIterEp'])
        # save model
        saveModelState(outName, k+sEp, model, optim=optim)
        lossLst = lossLst+lossEp

    lossFile = os.path.join(outFolder, 'loss.csv')
    pd.DataFrame(lossLst).to_csv(lossFile, index=False, header=False)
Ejemplo n.º 3
0
def testModel(outName,  DF=None, testSet='all', ep=None, reTest=False, batchSize=20):
    # load master
    dictP = loadMaster(outName)
    if ep is None:
        ep = dictP['nEpoch']
    outFolder = nameFolder(outName)
    testFileName = 'testP-{}-Ep{}.npz'.format(testSet, ep)
    testFile = os.path.join(outFolder, testFileName)

    if os.path.exists(testFile) and reTest is False:
        print('load saved test result')
        npz = np.load(testFile, allow_pickle=True)
        yP = npz['yP']
        ycP = npz['ycP']
    else:
        # load test data
        if DF is None:
            DF = dbBasin.DataFrameBasin(dictP['dataName'])
        dictVar = {k: dictP[k]
                   for k in ('varX', 'varXC', 'varY', 'varYC')}
        DM = dbBasin.DataModelBasin(DF, subset=testSet, **dictVar)
        DM.loadStat(outFolder)
        dataTup = DM.getData()
        dataTup = trainBasin.dealNaN(dataTup, dictP['optNaN'])

        model = defineModel(dataTup, dictP)
        model = loadModelState(outName, ep, model)
        # test
        x = dataTup[0]
        xc = dataTup[1]
        ny = np.shape(dataTup[2])[2]
        # test model - point by point
        yOut, ycOut = trainBasin.testModel(
            model, x, xc, ny, batchSize=batchSize)
        yP = DM.transOutY(yOut)
        ycP = DM.transOutYC(ycOut)
        np.savez(testFile, yP=yP, ycP=ycP)
    return yP, ycP
Ejemplo n.º 4
0
outName = 'weathering-FPR2QC-t365-B10'
ep = 100

# save
outFolder = basinFull.nameFolder(outName)
modelFile = os.path.join(outFolder, 'model_ep{}'.format(ep))
model = torch.load(modelFile)
modelStateFile = os.path.join(outFolder, 'modelState_ep{}'.format(ep))
torch.save(model.state_dict(), modelStateFile)

# load
dictP = basinFull.loadMaster(outName)
DF = dbBasin.DataFrameBasin(dictP['dataName'])
dictVar = {k: dictP[k] for k in ('varX', 'varXC', 'varY', 'varYC')}
DM = dbBasin.DataModelBasin(DF, subset='A10', **dictVar)
DM.loadStat(outFolder)
dataTup = DM.getData()
[nx, nxc, ny, nyc, nt, ns] = trainBasin.getSize(dataTup)
dataTup = trainBasin.dealNaN(dataTup, dictP['optNaN'])
if dictP['modelName'] == 'CudnnLSTM':
    model = rnn.CudnnLstmModel(nx=nx + nxc,
                               ny=ny + nyc,
                               hiddenSize=dictP['hiddenSize'])
elif dictP['modelName'] == 'LstmModel':
    model = rnn.LstmModel(nx=nx + nxc,
                          ny=ny + nyc,
                          hiddenSize=dictP['hiddenSize'])
else:
    raise RuntimeError('Model not specified')
model.load_state_dict(torch.load(modelStateFile))
Ejemplo n.º 5
0
def trainModel(outName):
    outFolder = nameFolder(outName)
    dictP = loadMaster(outName)

    # load data
    DM = dbBasin.DataModelFull(dictP['dataName'])
    varTup = (dictP['varX'], dictP['varXC'], dictP['varY'], dictP['varYC'])
    dataTup = DM.extractData(varTup, dictP['subset'], dictP['sd'], dictP['ed'])
    if dictP['borrowStat'] is None:
        dataTup, statTup = DM.transIn(dataTup, varTup)
    else:
        statTup = loadStat(dictP['borrowStat'])
        dataTup = DM.transIn(dataTup, varTup, statTup=statTup)
    dataTup = trainBasin.dealNaN(dataTup, dictP['optNaN'])
    wrapStat(outName, statTup)

    # train model
    [nx, nxc, ny, nyc, nt, ns] = trainBasin.getSize(dataTup)
    # define loss
    lossFun = getattr(crit, dictP['crit'])()
    if dictP['crit'] == 'SigmaLoss':
        ny = ny * 2
        nyc = nyc * 2
    # define model
    if dictP['modelName'] == 'CudnnLSTM':
        model = rnn.CudnnLstmModel(nx=nx + nxc,
                                   ny=ny + nyc,
                                   hiddenSize=dictP['hiddenSize'])
    elif dictP['modelName'] == 'LstmModel':
        model = rnn.LstmModel(nx=nx + nxc,
                              ny=ny + nyc,
                              hiddenSize=dictP['hiddenSize'])
    else:
        raise RuntimeError('Model not specified')

    if torch.cuda.is_available():
        lossFun = lossFun.cuda()
        model = model.cuda()

    if dictP['optim'] == 'AdaDelta':
        optim = torch.optim.Adadelta(model.parameters())
    else:
        raise RuntimeError('optimizor function not specified')

    lossLst = list()
    nEp = dictP['nEpoch']
    sEp = dictP['saveEpoch']
    logFile = os.path.join(outFolder, 'log')
    if os.path.exists(logFile):
        os.remove(logFile)
    for k in range(0, nEp, sEp):
        model, optim, lossEp = trainBasin.trainModel(
            dataTup,
            model,
            lossFun,
            optim,
            batchSize=dictP['batchSize'],
            nEp=sEp,
            cEp=k,
            logFile=logFile)
        # save model
        saveModel(outName, k + sEp, model, optim=optim)
        lossLst = lossLst + lossEp

    lossFile = os.path.join(outFolder, 'loss.csv')
    pd.DataFrame(lossLst).to_csv(lossFile, index=False, header=False)
Ejemplo n.º 6
0
# load data
sd = '1982-01-01'
ed = '2018-12-31'
DM = dbBasin.DataModelFull.new('test', siteNoLst, sdStr=sd, edStr=ed)

# define inputs
# varX = gridMET.varLst+['datenum', 'sinT', 'cosT']
varX = ['datenum', 'sinT', 'cosT', 'runoff']
varXC = None
varY = codeLst
varYC = None
varTup = [varX, varXC, varY, varYC]
dataTupRaw = DM.extractData(varTup, 'all', sd, ed)
dataTup, statTup = DM.transIn(dataTupRaw, varTup)
dataTup = trainBasin.dealNaN(dataTup, [1, 1, 0, 0])
sizeLst = trainBasin.getSize(dataTup)
[nx, nxc, ny, nyc, nt, ns] = sizeLst
xTensor, yTensor = trainBasin.subsetRandom(dataTup, [365, 100], sizeLst)

dataLst = dataTup
batchSize = [365, 100]
# rewrite subset
[x, xc, y, yc] = dataLst
[rho, nbatch] = batchSize
[nx, nxc, ny, nyc, nt, ns] = sizeLst
iS = np.random.randint(0, ns, [nbatch])
matB = ~np.isnan(y[rho:, :, :])

s1 = np.sum(matB, axis=2)
s2 = np.sum(matB, axis=(0, 2))
Ejemplo n.º 7
0
mtdY = dbBasin.io.extractVarMtd(varY)

varYC = None
mtdYC = dbBasin.io.extractVarMtd(varYC)
trainSet = 'rmYr5'
testSet = 'pkYr5'

d1 = dbBasin.DataModelBasin(DF,
                            subset=trainSet,
                            varX=varX,
                            varY=varY,
                            varXC=varXC,
                            varYC=varYC)
d1.trans(mtdX=mtdX, mtdXC=mtdXC, mtdY=mtdY, mtdYC=mtdYC)
dataLst = d1.getData()
dataLst = trainBasin.dealNaN(dataLst, [1, 1, 0, 0])

# train
importlib.reload(test)
sizeLst = trainBasin.getSize(dataLst)
[nx, nxc, ny, nyc, nt, ns] = sizeLst
model = test.LSTM(nx + nxc, ny + nyc, 256).cuda()
lossFun = crit.RmseLoss().cuda()
optim = torch.optim.Adadelta(model.parameters())

rho = 365
nbatch = 20
nEp = 100
matB = ~np.isnan(dataLst[2][rho:, :, :])
nD = np.sum(np.any(matB, axis=2))
if nbatch * rho > nD: