Beispiel #1
0
 def __init__(self, *, nx, ny, hiddenSize, dr=0.8):
     super(LstmModel, self).__init__()
     self.nx = nx
     self.ny = ny
     self.hiddenSize = hiddenSize
     self.ct = 0
     self.nLayer = 1
     self.linearIn = torch.nn.Linear(nx, hiddenSize)
     self.lstm = rnn.CudnnLstm(inputSize=hiddenSize, hiddenSize=16, dr=dr)
     self.lstm2 = rnn.CudnnLstm(inputSize=16, hiddenSize=hiddenSize, dr=dr)
     self.linearOut = torch.nn.Linear(hiddenSize, ny)
     self.gpu = 1
Beispiel #2
0
 def __init__(self,
              *,
              nx,
              ny,
              nobs,
              hiddenSize,
              nkernel=(10, 5),
              kernelSize=(3, 3),
              stride=(2, 1),
              dr=0.5,
              poolOpt=None,
              cnndr=0.0):
     # two convolutional layer
     super(CNN1dLCmodel, self).__init__()
     self.nx = nx
     self.ny = ny
     self.obs = nobs
     self.hiddenSize = hiddenSize
     nlayer = len(nkernel)
     self.features = nn.Sequential()
     ninchan = 1  # need to modify the hardcode: 4 for smap and 1 for FDC
     Lout = nobs
     for ii in range(nlayer):
         ConvLayer = cnn.CNN1dkernel(
             ninchannel=ninchan,
             nkernel=nkernel[ii],
             kernelSize=kernelSize[ii],
             stride=stride[ii],
         )
         self.features.add_module("CnnLayer%d" % (ii + 1), ConvLayer)
         if cnndr != 0.0:
             self.features.add_module("dropout%d" % (ii + 1),
                                      nn.Dropout(p=cnndr))
         ninchan = nkernel[ii]
         Lout = cnn.calConvSize(lin=Lout,
                                kernel=kernelSize[ii],
                                stride=stride[ii])
         self.features.add_module("Relu%d" % (ii + 1), nn.ReLU())
         if poolOpt is not None:
             self.features.add_module("Pooling%d" % (ii + 1),
                                      nn.MaxPool1d(poolOpt[ii]))
             Lout = cnn.calPoolSize(lin=Lout, kernel=poolOpt[ii])
     self.Ncnnout = int(
         Lout * nkernel[-1])  # total CNN feature number after convolution
     Nf = self.Ncnnout + nx
     self.linearIn = torch.nn.Linear(Nf, hiddenSize)
     self.lstm = rnn.CudnnLstm(inputSize=hiddenSize,
                               hiddenSize=hiddenSize,
                               dr=dr)
     self.linearOut = torch.nn.Linear(hiddenSize, ny)
     self.gpu = 1
     self.name = "CNN1dLCmodel"
     self.is_legacy = True
Beispiel #3
0
    def __init__(self,
                 *,
                 nx,
                 ny,
                 ct,
                 opt=1,
                 hiddenSize=64,
                 cnnSize=32,
                 cp1=(64, 3, 2),
                 cp2=(128, 5, 2),
                 dr=0.5):
        super(LstmCnnForcast, self).__init__()

        if opt == 1:
            cnnSize = hiddenSize

        self.nx = nx
        self.ny = ny
        self.ct = ct
        self.ctRm = True
        self.hiddenSize = hiddenSize
        self.opt = opt
        self.cnnSize = cnnSize
        self.name = "LstmCnnForcast"
        self.is_legacy = True

        if opt == 1:
            self.cnn = cnn.Cnn1d(nx=nx + 1,
                                 nt=ct,
                                 cnnSize=cnnSize,
                                 cp1=cp1,
                                 cp2=cp2)
        if opt == 2:
            self.cnn = cnn.Cnn1d(nx=1,
                                 nt=ct,
                                 cnnSize=cnnSize,
                                 cp1=cp1,
                                 cp2=cp2)

        self.lstm = rnn.CudnnLstm(inputSize=hiddenSize,
                                  hiddenSize=hiddenSize,
                                  dr=dr)
        self.linearIn = torch.nn.Linear(nx + cnnSize, hiddenSize)
        self.linearOut = torch.nn.Linear(hiddenSize, ny)
Beispiel #4
0
 def __init__(self, *, nx, ny, hiddenSize, dr=0.5):
     super(CudnnLstmModel, self).__init__()
     self.nx = nx
     self.ny = ny
     self.hiddenSize = hiddenSize
     self.ct = 0
     self.nLayer = 1
     self.linearIn = torch.nn.Linear(nx, hiddenSize)
     if torch.__version__ > "1.9":
         # 2021-10-24. SCP: incorporate newer version of torch LSTM to avoid "weights not contiguous on memory" issue
         self.lstm = torch.nn.LSTM(hiddenSize, hiddenSize, 2, dropout=dr)
     else:
         self.lstm = rnn.CudnnLstm(inputSize=hiddenSize,
                                   hiddenSize=hiddenSize,
                                   dr=dr)
     self.linearOut = torch.nn.Linear(hiddenSize, ny)
     self.gpu = 1
     self.name = "CudnnLstmModel"
     self.is_legacy = True
Beispiel #5
0
    def __init__(self,
                 *,
                 nx,
                 ny,
                 ct,
                 opt=1,
                 hiddenSize=64,
                 cnnSize=32,
                 cp1=(64, 3, 2),
                 cp2=(128, 5, 2),
                 dr=0.5):
        super(LstmCnnCond, self).__init__()

        # opt == 1: cnn output as initial state of LSTM (h0)
        # opt == 2: cnn output as additional output of LSTM
        # opt == 3: cnn output as constant input of LSTM

        if opt == 1:
            cnnSize = hiddenSize

        self.nx = nx
        self.ny = ny
        self.ct = ct
        self.ctRm = False
        self.hiddenSize = hiddenSize
        self.opt = opt
        self.name = "LstmCnnCond"
        self.is_legacy = True

        self.cnn = cnn.Cnn1d(nx=nx, nt=ct, cnnSize=cnnSize, cp1=cp1, cp2=cp2)

        self.lstm = rnn.CudnnLstm(inputSize=hiddenSize,
                                  hiddenSize=hiddenSize,
                                  dr=dr)
        if opt == 3:
            self.linearIn = torch.nn.Linear(nx + cnnSize, hiddenSize)
        else:
            self.linearIn = torch.nn.Linear(nx, hiddenSize)
        if opt == 2:
            self.linearOut = torch.nn.Linear(hiddenSize + cnnSize, ny)
        else:
            self.linearOut = torch.nn.Linear(hiddenSize, ny)
Beispiel #6
0
cIn = cOut
cOut, f, p = cp2
conv2 = nn.Conv1d(cIn, cOut, f).cuda()
pool2 = nn.MaxPool1d(p).cuda()
lTmp = cnn.calCnnSize(lTmp, f, 0, 1, 1) / p

flatLength = int(cOut * lTmp)
fc1 = nn.Linear(flatLength, cnnSize).cuda()
fc2 = nn.Linear(cnnSize, cnnSize).cuda()

if opt == 3:
    linearIn = torch.nn.Linear(nx + cnnSize, hiddenSize).cuda()
else:
    linearIn = torch.nn.Linear(nx, hiddenSize).cuda()
lstm = rnn.CudnnLstm(inputSize=hiddenSize, hiddenSize=hiddenSize, dr=dr).cuda()

if opt == 2:
    linearOut = torch.nn.Linear(hiddenSize + cnnSize, ny).cuda()
else:
    linearOut = torch.nn.Linear(hiddenSize, ny).cuda()

# forward
x1 = torch.cat([cTrain, xTrain[0:ct, :, :]], 2).permute(1, 2, 0)
x1 = pool1(F.relu(conv1(x1)))
x1 = pool2(F.relu(conv2(x1)))
x1 = x1.view(-1, flatLength)
x1 = F.relu(fc1(x1))
x1 = fc2(x1)

x2 = xTrain[ct:, :, :]