Пример #1
0
 def transOut(self, data, stat, var):
     mtd = self.extractVarMtd(var)
     # normalize data out
     t0 = time.time()
     if data.shape[-1] == 0:
         out = None
     else:
         out = transform.transOutAll(data, mtd, stat)
     t1 = time.time() - t0
     print('transform out {}'.format(time.time() - t0))
     return out
Пример #2
0
    loss.backward()
    optim.step()
    pred = yP.detach().cpu().numpy()
    lossLst.append(loss.item())
    print(i, loss.item())

fig, ax = plt.subplots(1, 1)
# ax.plot(errLst, lossLst, '*')
ax.plot(range(len(lossLst)), lossLst)
fig.show()

xT = torch.from_numpy(xx).float().cuda()
yP = model(xT)
pred = yP.detach().cpu().numpy()
obs = yy
qP = transform.transOutAll(pred, ['log-norm'], statLst=statY)
qT = transform.transOutAll(obs, ['log-norm'], statLst=statY)
prcp = xx[:, :, 0]

fig, ax = plt.subplots(1, 1)
k = np.random.randint(0, ns)
# k=0
t2 = np.datetime64(wqData.info.iloc[k]['date'], 'D')
t1 = t2 - np.timedelta64(365, 'D')
t = np.arange(t1, t2)
axplot.plotTS(ax, t, [pred[k, :], obs[k, :]])
# axplot.plotTS(ax.twinx(), t, [prcp[k,:]],cLst='g',styLst='--')
# axplot.plotTS(ax, t, [qP[k, :], qT[k, :]])
ax.set_title(k)
fig.show()
Пример #3
0
wT = s1 / s2
wS = s2 / np.sum(s2)
iS = np.random.choice(ns, nbatch, p=wS)
iT = np.zeros(nbatch)
for k in range(nbatch):
    iT[k] = np.random.choice(nt - rho, p=wT[:, iS[k]]) + rho

# plot weights
fig, axes = plt.subplots(ns, 1, figsize=(12, 3))
for k in range(ns):
    axes[k].plot(np.arange(nt), wT[:, k])
    ax2 = axes[k].twinx()
    ax2.plot(np.arange(nt), y[:, k, 1], 'r*')
fig.show()

df = dbBasin.readSiteTS(siteNoLst[0],
                        varLst=varY + varX,
                        freq='D',
                        sd=np.datetime64(sd),
                        ed=np.datetime64(ed))
fig, ax = plt.subplots(1, 1, figsize=(12, 3))
k = 1
j = 15
yTemp = yTensor.detach().cpu().numpy()
xTemp = xTensor.detach().cpu().numpy()
datenum = transform.transOutAll(xTemp[:, :, -3:-2], ['norm'], [statTup[0][-3]])
ax.plot(dataTupRaw[0][:, 0, -3], dataTup[2][:, 0, k], 'k*')
# ax.plot(df['datenum'], np.log(df[varTup[2][k]]+1), 'k*')
ax.plot(datenum[:, j, 0], yTemp[:, j, k], 'r.')
fig.show()
Пример #4
0
    loss.backward()
    optim.step()
    pred = yP.detach().cpu().numpy()
    lossLst.append(loss.item())
    print(i, loss.item())

fig, ax = plt.subplots(1, 1)
# ax.plot(errLst, lossLst, '*')
ax.plot(range(len(lossLst)), lossLst)
fig.show()

xT = torch.from_numpy(xx).float().cuda()
yP = model(xT)
pred = yP.detach().cpu().numpy()
obs = yy
qP = transform.transOutAll(pred[:, :, 0], ['log-norm'], statLst=statY)
qT = transform.transOutAll(obs[:, :, 0], ['log-norm'], statLst=statY)
prcp = xx[:, :, 0]

fig, axes = plt.subplots(2, 1)
k = np.random.randint(0, ns)
# k=0
t2 = np.datetime64(wqData.info.iloc[k]['date'], 'D')
t1 = t2 - np.timedelta64(365, 'D')
t = np.arange(t1, t2)
axplot.plotTS(axes[0],
              t, [pred[k, :, 0], obs[k, :, 0]],
              styLst=['*', '--'],
              cLst='rb')
axplot.plotTS(axes[1],
              t, [pred[k, :, 20], obs[k, :, 10]],
Пример #5
0
(x, xc) = trainTS.dealNaN((x, xc), [1, 1])
nt = x.shape[0]
xT = torch.from_numpy(np.concatenate([x, np.tile(xc, [nt, 1, 1])],
                                     axis=-1)).float()
if torch.cuda.is_available():
    xT = xT.cuda()

model = model.train(mode=False)
# yP, gate = model(xT)
yP, b, gate = model(xT)
yO = yP.detach().cpu().numpy()
# gate = gate.detach().cpu().numpy()

predY = transform.transOut(yO[:, :, 0], mtdY[0], statY[0])
predYC = transform.transOutAll(yO[:, :, 1:], mtdYC, statYC)
obsY = dfY.values
obsYC = dfYC.values

t = dfY.index.values
fig, axes = plt.subplots(4, 1)
axplot.plotTS(axes[0], t, [predY, obsY], styLst='---', cLst='rb')
axes[0].set_title('streamflow')
axes[0].set_xticks([])
codePdf = usgs.codePdf
for k, code in enumerate(codeLst):
    axplot.plotTS(axes[k + 1],
                  t, [predYC[:, 0, k], obsYC[:, k]],
                  styLst='-*',
                  cLst='rb')
    axes[k + 1].set_title(code + ' ' + codePdf.loc[code]['shortName'])
Пример #6
0
codeLst2 = [
    '00095', '00400', '00405', '00600', '00605', '00618', '00660', '00665',
    '00681', '00915', '00925', '00930', '00935', '00940', '00945', '00950',
    '00955', '70303', '71846', '80154'
]

# plot hist
importlib.reload(axplot)
importlib.reload(transform)
importlib.reload(usgs)

varRLst = [code + '-R' for code in usgs.newC]
mtdLst = waterQuality.extractVarMtd(varRLst)
matRN, stat = transform.transInAll(matR, mtdLst)
matRN2 = transform.transOutAll(matRN, mtdLst, stat)

fig, axes = plt.subplots(5, 4)
ticks = [-0.5, 0, 0.5, 1]
for k, code in enumerate(codeLst2):
    j, i = utils.index2d(k, 5, 4)
    ax = axes[j, i]
    siteNoCode = dictSite[code]
    indS = [siteNoLst.index(siteNo) for siteNo in siteNoCode]
    ic = usgs.newC.index(code)
    data = matRN2[indS, :, ic]
    x1 = utils.flatData(data)
    x2 = utils.rmExt(x1, p=5)

    s, p = scipy.stats.kstest(x2 / np.std(x2) - np.mean(x2), 'laplace')
    # s, p = scipy.stats.shapiro(x2)
Пример #7
0
            yP = model(xT)
        except:
            pass
    optim.zero_grad()
    yP = model(xT)
    loss = lossFun(yP, yT)
    loss.backward()
    optim.step()
    ct = time.time() - t0
    logStr = 'Epoch {} Loss {:.3f} time {:.2f}'.format(iEp, loss.item(), ct)
    lossLst.append(loss.item())
    print(logStr)

fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(range(nEp), lossLst)
fig.show()

# test
xA = np.expand_dims(x, axis=1)
xF = torch.from_numpy(xA).float().cuda()
yF = model(xF)
yO = yF.detach().cpu().numpy()[:, 0, :]
yOut = transform.transOutAll(yO, mtdY, statY)

# plot
fig, ax = plt.subplots(1, 1, figsize=(16, 6))
# axplot.plotTS(ax, df.index, [y, yO], styLst='--', cLst='kr')
tBar = [np.d]
axplot.plotTS(ax, df.index, [Y, yOut], styLst='--', cLst='kr')
fig.show()