Exemple #1
0
 def __init__(self, img_size_list, input_channel, hidden_channels, kernel_size, num_layers, bidirectional = False):
     super(ConvLSTMNetwork, self).__init__()
     
     self.hidden_channels = hidden_channels
     self.num_layers = num_layers
     self.bidirectional = bidirectional
     
     convlstm_layer = []
     for i in range(num_layers):
         layer = convlstm.ConvLSTM(img_size_list[i],
                                 input_channel, 
                                 hidden_channels[i],
                                 kernel_size[i],
                                 0.2, 0.,
                                 batch_first=True, 
                                 bias=True, 
                                 peephole=True,
                                 layer_norm=True,
                                 return_sequence=config.SEQUENCE_OUTPUT,
                                 bidirectional=self.bidirectional)
         convlstm_layer.append(layer)
         input_channel = hidden_channels[i] * (2 if self.bidirectional else 1)
        
     self.convlstm_layer = torch.nn.ModuleList(convlstm_layer)
     self.flatten = Flatten()
     self.linear2 = torch.nn.Linear(hidden_channels[-1]*(2 if self.bidirectional else 1)*16, 2)
Exemple #2
0
 def __init__(self, s_dim, a_dim):
     super(CNet, self).__init__()
     self.convlstm = convlstm.ConvLSTM(input_dim = 1, hidden_dim = 1, kernel_size = (KS,KS), num_layers = CONVLSTM_LAYERS, batch_first=True)
     self.fcs = nn.Linear(RESOLUTION_sum, 1000)
     self.fcs.weight.data.normal_(0.1, 0.02)
     self.fca = nn.Linear(2*a_dim, 1000)
     self.fca.weight.data.normal_(0.1, 0.02)
     self.out = nn.Linear(1000,1)
     self.out.weight.data.normal_(0, 0.1)
Exemple #3
0
    def __init__(self, input_width, input_height, hidden_channels,
            kernel_size):
        super(ConvLSTM, self).__init__()
        self.input_height = input_height
        self.input_width = input_width

        input_channels = 1
        self.lstm = convlstm.ConvLSTM(
            input_channels, hidden_channels, kernel_size, num_layers=1,
            batch_first=True)

        input_size = input_width * input_height
        hidden_size = hidden_channels * input_size
        self.out = nn.Linear(hidden_size, input_size)
Exemple #4
0
 def __init__(self, s_dim, a_dim):
     super(ANet, self).__init__()
     self.convlstm = convlstm.ConvLSTM(input_dim = 1, hidden_dim = 1, kernel_size = (KS,KS), num_layers = CONVLSTM_LAYERS, batch_first=True)
     self.fc1 = nn.Linear(RESOLUTION_sum, 2*a_dim)
     self.fc1.weight.data.normal_(0, 0.1)
Exemple #5
0
 def __init__(self, in_chans, hidden_chans, kernel_size, layers, bias,
              dropout):
     super(LSTMLayer, self).__init__()
     self.h0c0 = None
     self.lstm = convlstm.ConvLSTM(in_chans, hidden_chans, kernel_size,
                                   layers, bias, dropout)
Exemple #6
0
import torch
import convlstm

SEQ_LEN = 16
BATCH_SIZE = 8
IN_CHANS = 256
HIDDEN_CHANS = 150
HEIGHT = 32
WIDTH = 32
KERNEL_SIZE = 5
LAYERS = 3
DROPOUT = 0.5

size = SEQ_LEN * BATCH_SIZE * IN_CHANS * HEIGHT * WIDTH
data = torch.arange(size).reshape(SEQ_LEN, BATCH_SIZE, IN_CHANS, HEIGHT,
                                  WIDTH).float().cuda()
lstm = convlstm.ConvLSTM(IN_CHANS,
                         HIDDEN_CHANS,
                         KERNEL_SIZE,
                         LAYERS,
                         dropout=DROPOUT).cuda()
out = lstm(data)[0]

assert (tuple(out.size()) == (SEQ_LEN, BATCH_SIZE, HIDDEN_CHANS, HEIGHT,
                              WIDTH))
print(out.mean())