Exemplo n.º 1
0
    def __init__(self, *, nx, nt, cnnSize=32, cp1=(64, 3, 2), cp2=(128, 5, 2)):
        super(Cnn1d, self).__init__()
        self.nx = nx
        self.nt = nt
        cOut, f, p = cp1
        self.conv1 = nn.Conv1d(nx, cOut, f)
        self.pool1 = nn.MaxPool1d(p)
        lTmp = int(cnn.calConvSize(nt, f, 0, 1, 1) / p)

        cIn = cOut
        cOut, f, p = cp2
        self.conv2 = nn.Conv1d(cIn, cOut, f)
        self.pool2 = nn.MaxPool1d(p)
        lTmp = int(cnn.calConvSize(lTmp, f, 0, 1, 1) / p)

        self.flatLength = int(cOut * lTmp)
        self.fc1 = nn.Linear(self.flatLength, cnnSize)
        self.fc2 = nn.Linear(cnnSize, cnnSize)
        self.name = "Cnn1d"
        self.is_legacy = True
Exemplo n.º 2
0
 def __init__(self,
              *,
              nx,
              ny,
              nobs,
              hiddenSize,
              nkernel=(10, 5),
              kernelSize=(3, 3),
              stride=(2, 1),
              dr=0.5,
              poolOpt=None,
              cnndr=0.0):
     # two convolutional layer
     super(CNN1dLCmodel, self).__init__()
     self.nx = nx
     self.ny = ny
     self.obs = nobs
     self.hiddenSize = hiddenSize
     nlayer = len(nkernel)
     self.features = nn.Sequential()
     ninchan = 1  # need to modify the hardcode: 4 for smap and 1 for FDC
     Lout = nobs
     for ii in range(nlayer):
         ConvLayer = cnn.CNN1dkernel(
             ninchannel=ninchan,
             nkernel=nkernel[ii],
             kernelSize=kernelSize[ii],
             stride=stride[ii],
         )
         self.features.add_module("CnnLayer%d" % (ii + 1), ConvLayer)
         if cnndr != 0.0:
             self.features.add_module("dropout%d" % (ii + 1),
                                      nn.Dropout(p=cnndr))
         ninchan = nkernel[ii]
         Lout = cnn.calConvSize(lin=Lout,
                                kernel=kernelSize[ii],
                                stride=stride[ii])
         self.features.add_module("Relu%d" % (ii + 1), nn.ReLU())
         if poolOpt is not None:
             self.features.add_module("Pooling%d" % (ii + 1),
                                      nn.MaxPool1d(poolOpt[ii]))
             Lout = cnn.calPoolSize(lin=Lout, kernel=poolOpt[ii])
     self.Ncnnout = int(
         Lout * nkernel[-1])  # total CNN feature number after convolution
     Nf = self.Ncnnout + nx
     self.linearIn = torch.nn.Linear(Nf, hiddenSize)
     self.lstm = rnn.CudnnLstm(inputSize=hiddenSize,
                               hiddenSize=hiddenSize,
                               dr=dr)
     self.linearOut = torch.nn.Linear(hiddenSize, ny)
     self.gpu = 1
     self.name = "CNN1dLCmodel"
     self.is_legacy = True
Exemplo n.º 3
0
 def __init__(self,
              *,
              nx,
              ny,
              rho,
              nkernel=(10, 5),
              kernelSize=(3, 3),
              stride=(2, 1),
              padding=(1, 1),
              dr=0.5,
              poolOpt=None):
     # two convolutional layer
     super(LstmCnn1d, self).__init__()
     self.nx = nx
     self.ny = ny
     self.rho = rho
     nlayer = len(nkernel)
     self.features = nn.Sequential()
     ninchan = nx
     Lout = rho
     for ii in range(nlayer):
         # First layer: no dimension reduction
         ConvLayer = cnn.CNN1dkernel(
             ninchannel=ninchan,
             nkernel=nkernel[ii],
             kernelSize=kernelSize[ii],
             stride=stride[ii],
             padding=padding[ii],
         )
         self.features.add_module("CnnLayer%d" % (ii + 1), ConvLayer)
         ninchan = nkernel[ii]
         Lout = cnn.calConvSize(lin=Lout,
                                kernel=kernelSize[ii],
                                stride=stride[ii])
         if poolOpt is not None:
             self.features.add_module("Pooling%d" % (ii + 1),
                                      nn.MaxPool1d(poolOpt[ii]))
             Lout = cnn.calPoolSize(lin=Lout, kernel=poolOpt[ii])
     self.Ncnnout = int(
         Lout * nkernel[-1])  # total CNN feature number after convolution
     self.name = "LstmCnn1d"
     self.is_legacy = True