Esempio n. 1
0
    def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,
                 batch_first=False, bias=True, return_all_layers=False):
        super(Decode_ConvLSTM, self).__init__()

        self._check_kernel_size_consistency(kernel_size)

        # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
        kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
        hidden_dim  = self._extend_for_multilayer(hidden_dim, num_layers)
        if not len(kernel_size) == len(hidden_dim) == num_layers:
            raise ValueError('Inconsistent list length.')

        self.height, self.width = input_size

        self.input_dim  = input_dim
        self.hidden_dim = hidden_dim
        self.kernel_size = kernel_size
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.bias = bias
        self.return_all_layers = return_all_layers

        cell_list = []
        for i in range(0, self.num_layers):
            cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i-1]

            cell_list.append(ConvLSTMCell(input_size=(self.height, self.width),
                                          input_dim=cur_input_dim,
                                          hidden_dim=self.hidden_dim[i],
                                          kernel_size=self.kernel_size[i],
                                          bias=self.bias))

        self.cell_list = nn.ModuleList(cell_list)
        
        self.conv_model = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=7,stride=2,padding=3),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 128, kernel_size=5,stride=2,padding=2),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 96, kernel_size=3,stride=1,padding=1),
            nn.BatchNorm2d(96),
            nn.ReLU(),
            nn.Conv2d(96, 32, kernel_size=3,stride=1,padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, input_dim, kernel_size=3,stride=1,padding=1)
        )

        self.deconv_model = nn.Sequential(
            nn.ConvTranspose2d(hidden_dim[0], 32, kernel_size=3,stride=2,padding=1,
                                          output_padding=1, groups=1, bias=True),
            nn.ReLU(),
            nn.ConvTranspose2d(32, 32, kernel_size=5,stride=2,padding=2,
                                          output_padding=1, groups=1, bias=True),
            nn.ReLU(),            
            nn.Conv2d(32, 1, kernel_size=3,stride=1,padding=1)
        )
Esempio n. 2
0
 def __init__(self, input_size, hidden_size, error_init_size=None):
     """
     Create a generative cell (error, top_down_state, r_state) -> r_state
     :param input_size: {'error': error_size, 'up_state': r_state_size}, r_state_size can be 0
     :param hidden_size: int, shooting dimensionality
     :param error_init_size: tuple, full size of initial (null) error
     """
     super(GenerativeCell,self).__init__()
     self.input_size = input_size
     #print "input_size", self.input_size
     self.hidden_size = hidden_size
     self.error_init_size = error_init_size
     self.memory = ConvLSTMCell(input_size['error']+input_size['up_state'], hidden_size)
Esempio n. 3
0
 def __init__(self, in_channels):
     super(BasicPredNet, self).__init__()
     self.conv1 = ConvReLU(in_channels=in_channels,
                           out_channels=8,
                           kernel_size=3)
     self.conv2 = ConvReLU(in_channels=8, out_channels=8, kernel_size=3)
     self.conv3 = ConvReLU(in_channels=8, out_channels=8, kernel_size=3)
     self.conv4 = ConvReLU(in_channels=8, out_channels=4, kernel_size=1)
     # not implement
     # self.lstm = ConvReLU(in_channels=4, out_channels=8, kernel_size=3)
     self.lstm = ConvLSTMCell(4, 8)
     self.trans_conv5 = TransConvReLU(in_channels=8,
                                      out_channels=8,
                                      kernel_size=1)
     self.trans_conv6 = TransConvReLU(in_channels=8,
                                      out_channels=8,
                                      kernel_size=3)
     self.trans_conv7 = TransConvReLU(in_channels=8,
                                      out_channels=8,
                                      kernel_size=3)
     self.trans_conv8 = TransConvReLU(in_channels=8,
                                      out_channels=3,
                                      kernel_size=3)
     pass
Esempio n. 4
0
timesteps = 15
shape = [40, 40]
kernel = [3, 3]
channels = 1
filters = 12

# Create a placeholder for videos.
inputs = tf.placeholder(tf.float32,
                        [batch_size, timesteps] + shape + [channels])
print(inputs)
noisy_movies, shifted_movies = generate_movies()
print(np.shape(noisy_movies))

# Add the ConvLSTM step.
from ConvLSTMCell import ConvLSTMCell
cell = ConvLSTMCell(shape, filters, kernel)
outputs, state = tf.nn.dynamic_rnn(cell, inputs, dtype=inputs.dtype)

# There's also a ConvGRUCell that is more memory efficient.
from ConvGRUCell import ConvGRUCell
cell = ConvGRUCell(shape, filters, kernel)
outputs, state = tf.nn.dynamic_rnn(cell, inputs, dtype=inputs.dtype)

# It's also possible to enter 2D input or 4D input instead of 3D.
shape = [100]
kernel = [3]
inputs = tf.placeholder(tf.float32,
                        [batch_size, timesteps] + shape + [channels])
cell = ConvLSTMCell(shape, filters, kernel)
outputs, state = tf.nn.bidirectional_dynamic_rnn(cell,
                                                 cell,