Example #1
0
File: local.py Project: lengjia/RRL
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(LocallyConnected1D, self).__init__(**kwargs)
   self.filters = filters
   self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   if self.padding != 'valid':
     raise ValueError('Invalid border mode for LocallyConnected1D '
                      '(only "valid" is supported): ' + padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.activation = activations.get(activation)
   self.use_bias = use_bias
   self.kernel_initializer = initializers.get(kernel_initializer)
   self.bias_initializer = initializers.get(bias_initializer)
   self.kernel_regularizer = regularizers.get(kernel_regularizer)
   self.bias_regularizer = regularizers.get(bias_regularizer)
   self.activity_regularizer = regularizers.get(activity_regularizer)
   self.kernel_constraint = constraints.get(kernel_constraint)
   self.bias_constraint = constraints.get(bias_constraint)
   self.input_spec = InputSpec(ndim=3)
Example #2
0
  def build(self, input_shape):
    if isinstance(input_shape, list):
      input_shape = input_shape[0]
    input_shape = tensor_shape.TensorShape(input_shape).as_list()

    batch_size = input_shape[0] if self.stateful else None
    self.input_dim = input_shape[2]
    self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))

    self.states = [None]
    if self.stateful:
      self.reset_states()

    self.kernel = self.add_weight(
        shape=(self.input_dim, self.units),
        name='kernel',
        initializer=self.kernel_initializer,
        regularizer=self.kernel_regularizer,
        constraint=self.kernel_constraint)
    self.recurrent_kernel = self.add_weight(
        shape=(self.units, self.units),
        name='recurrent_kernel',
        initializer=self.recurrent_initializer,
        regularizer=self.recurrent_regularizer,
        constraint=self.recurrent_constraint)
    if self.use_bias:
      self.bias = self.add_weight(
          shape=(self.units,),
          name='bias',
          initializer=self.bias_initializer,
          regularizer=self.bias_regularizer,
          constraint=self.bias_constraint)
    else:
      self.bias = None
    self.built = True
Example #3
0
File: local.py Project: lengjia/RRL
 def build(self, input_shape):
   input_shape = tensor_shape.TensorShape(input_shape).as_list()
   input_dim = input_shape[2]
   if input_dim is None:
     raise ValueError('Axis 2 of input should be fully-defined. '
                      'Found shape:', input_shape)
   output_length = conv_utils.conv_output_length(
       input_shape[1], self.kernel_size[0], self.padding, self.strides[0])
   self.kernel_shape = (output_length, self.kernel_size[0] * input_dim,
                        self.filters)
   self.kernel = self.add_weight(
       shape=self.kernel_shape,
       initializer=self.kernel_initializer,
       name='kernel',
       regularizer=self.kernel_regularizer,
       constraint=self.kernel_constraint)
   if self.use_bias:
     self.bias = self.add_weight(
         shape=(output_length, self.filters),
         initializer=self.bias_initializer,
         name='bias',
         regularizer=self.bias_regularizer,
         constraint=self.bias_constraint)
   else:
     self.bias = None
   self.input_spec = InputSpec(ndim=3, axes={2: input_dim})
   self.built = True
Example #4
0
 def __init__(self, pool_size=2, strides=None, padding='valid', **kwargs):
     super(_Pooling1D, self).__init__(**kwargs)
     if strides is None:
         strides = pool_size
     self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
     self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.input_spec = InputSpec(ndim=3)
Example #5
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.conv_layers = {c: [] for c in ['i', 'f', 'c', 'o', 'a', 'ahat']}

        for l in range(self.nb_layers):
            for c in ['i', 'f', 'c', 'o']:
                act = self.LSTM_activation if c == 'c' else self.LSTM_inner_activation
                self.conv_layers[c].append(
                    Convolution2D(self.R_stack_sizes[l],
                                  self.R_filt_sizes[l],
                                  padding='same',
                                  data_format="channels_last",
                                  activation=act))
            act = 'relu' if l == 0 else self.A_activation
            self.conv_layers['ahat'].append(
                Convolution2D(self.stack_sizes[l],
                              self.Ahat_filt_sizes[l],
                              padding='same',
                              data_format="channels_last",
                              activation=act))
            if l < self.nb_layers - 1:
                self.conv_layers['a'].append(
                    Convolution2D(self.stack_sizes[l + 1],
                                  self.A_filt_sizes[l],
                                  padding='same',
                                  data_format="channels_last",
                                  activation=self.A_activation))

        self.upsample = UpSampling2D(data_format="channels_last")  # upsampling
        self.pool = MaxPooling2D(data_format="channels_last")  # downsampling

        self._trainable_weights = []
        nb_row, nb_col = (input_shape[-3], input_shape[-2])
        # Super model
        for c in sorted(self.conv_layers.keys()):
            for l in range(len(self.conv_layers[c])):
                ds_factor = 2**l
                if c == 'ahat':
                    nb_channels = self.R_stack_sizes[l]
                elif c == 'a':
                    nb_channels = 2 * self.stack_sizes[l]
                else:  # i, c, o, f
                    nb_channels = self.stack_sizes[l] * 2 + self.R_stack_sizes[
                        l]
                    if l < self.nb_layers - 1:
                        nb_channels += self.R_stack_sizes[l + 1]
                in_shape = (input_shape[0], nb_row // ds_factor,
                            nb_col // ds_factor, nb_channels
                            )  # up -> downsampling
                self.conv_layers[c][l].build(in_shape)
                self._trainable_weights += self.conv_layers[c][
                    l].trainable_weights

        self.states = [None] * self.nb_layers * 3  # ['r', 'c', 'e']
        if self.extrap_start_time is not None:
            self.t_extrap = K.variable(np.array(self.extrap_start_time),
                                       'int32')
            self.states += [None] * 2
Example #6
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape).as_list()
        if self.data_format == 'channels_last':
            input_row, input_col = input_shape[1:-1]
            input_filter = input_shape[3]
        else:
            input_row, input_col = input_shape[2:]
            input_filter = input_shape[1]
        if input_row is None or input_col is None:
            raise ValueError('The spatial dimensions of the inputs to '
                             ' a LocallyConnected2D layer '
                             'should be fully-defined, but layer received '
                             'the inputs shape ' + str(input_shape))

        output_row = conv_utils.conv_output_length(input_row,
                                                   self.kernel_size[0],
                                                   self.padding,
                                                   self.strides[0])
        output_col = conv_utils.conv_output_length(input_col,
                                                   self.kernel_size[1],
                                                   self.padding,
                                                   self.strides[1])
        self.output_row = output_row
        self.output_col = output_col
        self.kernel_shape = (output_row * output_col, self.kernel_size[0] *
                             self.kernel_size[1] * input_filter, self.filters)
        self.kernel = self.add_weight(shape=self.kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(output_row, output_col,
                                               self.filters),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        if self.data_format == 'channels_first':
            self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
        else:
            self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
        self.built = True
Example #7
0
  def __init__(self,
               units,
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               use_bias=True,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               dropout=0.,
               recurrent_dropout=0.,
               **kwargs):
    super(LSTM, self).__init__(**kwargs)
    self.units = units
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.use_bias = use_bias

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.dropout = min(1., max(0., dropout))
    self.recurrent_dropout = min(1., max(0., recurrent_dropout))
    self.state_spec = [
        InputSpec(shape=(None, self.units)),
        InputSpec(shape=(None, self.units))
    ]
Example #8
0
 def build(self, input_shape):
   input_shape = tensor_shape.TensorShape(input_shape).as_list()
   assert len(input_shape) >= 3
   self.input_spec = InputSpec(shape=input_shape)
   child_input_shape = [input_shape[0]] + input_shape[2:]
   if not self.layer.built:
     self.layer.build(child_input_shape)
     self.layer.built = True
   super(TimeDistributed, self).build()
Example #9
0
 def build(self, input_shape):
   assert len(input_shape) >= 2
   input_dim = input_shape[-1]
   super(Dense, self).build(input_shape)
   self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
   if self.kernel_constraint:
     self.constraints[self.kernel] = self.kernel_constraint
   if self.use_bias and self.bias_constraint:
     self.constraints[self.bias] = self.bias_constraint
Example #10
0
 def __init__(self, rate, data_format=None, **kwargs):
   super(SpatialDropout3D, self).__init__(rate, **kwargs)
   if data_format is None:
     data_format = K.image_data_format()
   if data_format not in {'channels_last', 'channels_first'}:
     raise ValueError('data_format must be in '
                      '{"channels_last", "channels_first"}')
   self.data_format = data_format
   self.input_spec = InputSpec(ndim=5)
Example #11
0
 def __init__(self,
              pool_size=(2, 2, 2),
              strides=None,
              padding='valid',
              data_format=None,
              **kwargs):
     super(_Pooling3D, self).__init__(**kwargs)
     if strides is None:
         strides = pool_size
     self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
     self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=5)
Example #12
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        self.kernel = self.add_weight((input_dim, self.units),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight((self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
Example #13
0
 def __init__(self,
              return_sequences=False,
              return_state=False,
              go_backwards=False,
              stateful=False,
              unroll=False,
              implementation=0,
              **kwargs):
   super(Recurrent, self).__init__(**kwargs)
   self.return_sequences = return_sequences
   self.return_state = return_state
   self.go_backwards = go_backwards
   self.stateful = stateful
   self.unroll = unroll
   self.implementation = implementation
   self.supports_masking = True
   self.input_spec = [InputSpec(ndim=3)]
   self.state_spec = None
   self.dropout = 0
   self.recurrent_dropout = 0
Example #14
0
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape).as_list()
     param_shape = input_shape[1:]
     self.param_broadcast = [False] * len(param_shape)
     if self.shared_axes is not None:
         for i in self.shared_axes:
             param_shape[i - 1] = 1
             self.param_broadcast[i - 1] = True
     self.alpha = self.add_weight(shape=param_shape,
                                  name='alpha',
                                  initializer=self.alpha_initializer,
                                  regularizer=self.alpha_regularizer,
                                  constraint=self.alpha_constraint)
     # Set input spec
     axes = {}
     if self.shared_axes:
         for i in range(1, len(input_shape)):
             if i not in self.shared_axes:
                 axes[i] = input_shape[i]
     self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
     self.built = True
  def build(self, input_shape):
    input_shape = tensor_shape.TensorShape(input_shape).as_list()
    dim = input_shape[self.axis]
    if dim is None:
      raise ValueError('Axis ' + str(self.axis) + ' of '
                       'input tensor should have a defined dimension '
                       'but the layer received an input with shape ' +
                       str(input_shape) + '.')
    self.input_spec = InputSpec(ndim=len(input_shape), axes={self.axis: dim})
    shape = (dim,)

    if self.scale:
      self.gamma = self.add_weight(
          shape,
          name='gamma',
          initializer=self.gamma_initializer,
          regularizer=self.gamma_regularizer,
          constraint=self.gamma_constraint)
    else:
      self.gamma = None
    if self.center:
      self.beta = self.add_weight(
          shape,
          name='beta',
          initializer=self.beta_initializer,
          regularizer=self.beta_regularizer,
          constraint=self.beta_constraint)
    else:
      self.beta = None
    self.moving_mean = self.add_weight(
        shape,
        name='moving_mean',
        initializer=self.moving_mean_initializer,
        trainable=False)
    self.moving_variance = self.add_weight(
        shape,
        name='moving_variance',
        initializer=self.moving_variance_initializer,
        trainable=False)
    self.built = True
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1),
              return_sequences=False,
              go_backwards=False,
              stateful=False,
              **kwargs):
   super(ConvRecurrent2D, self).__init__(**kwargs)
   self.filters = filters
   self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
   self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
                                                   'dilation_rate')
   self.return_sequences = return_sequences
   self.go_backwards = go_backwards
   self.stateful = stateful
   self.input_spec = InputSpec(ndim=5)
Example #17
0
    def build(self, input_shape):
        if len(input_shape) < 4:
            raise ValueError(
                'Inputs to `DepthwiseConv2D` should have rank 4. '
                'Received input shape:', str(input_shape))
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = 3
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs to '
                             '`DepthwiseConv2D` '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[channel_axis])
        depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1],
                                  input_dim, self.depth_multiplier)

        self.depthwise_kernel = self.add_weight(
            shape=depthwise_kernel_shape,
            initializer=self.depthwise_initializer,
            name='depthwise_kernel',
            regularizer=self.depthwise_regularizer,
            constraint=self.depthwise_constraint)

        if self.use_bias:
            self.bias = self.add_weight(shape=(input_dim *
                                               self.depth_multiplier, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        # Set input spec.
        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
        self.built = True
Example #18
0
 def __init__(self, **kwargs):
   super(Flatten, self).__init__(**kwargs)
   self.input_spec = InputSpec(min_ndim=3)
Example #19
0
 def __init__(self, dims, **kwargs):
   super(Permute, self).__init__(**kwargs)
   self.dims = tuple(dims)
   self.input_spec = InputSpec(ndim=len(self.dims) + 1)
Example #20
0
 def __init__(self, size=2, data_format=None, **kwargs):
     super(DepthToSpace, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.size = size
     self.input_spec = InputSpec(ndim=4)
Example #21
0
 def __init__(self, rate, **kwargs):
   super(SpatialDropout1D, self).__init__(rate, **kwargs)
   self.input_spec = InputSpec(ndim=3)
  def build(self, input_shape):
    input_shape = tensor_shape.TensorShape(input_shape).as_list()
    # TODO(fchollet): better handling of input spec
    self.input_spec = InputSpec(shape=input_shape)

    if self.stateful:
      self.reset_states()
    else:
      # initial states: 2 all-zero tensor of shape (filters)
      self.states = [None, None]

    if self.data_format == 'channels_first':
      channel_axis = 1
    else:
      channel_axis = -1
    if input_shape[channel_axis] is None:
      raise ValueError('The channel dimension of the inputs '
                       'should be defined. Found `None`.')
    input_dim = input_shape[channel_axis]
    kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
    self.kernel_shape = kernel_shape
    recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)

    self.kernel = self.add_weight(
        kernel_shape,
        initializer=self.kernel_initializer,
        name='kernel',
        regularizer=self.kernel_regularizer,
        constraint=self.kernel_constraint)
    self.recurrent_kernel = self.add_weight(
        recurrent_kernel_shape,
        initializer=self.recurrent_initializer,
        name='recurrent_kernel',
        regularizer=self.recurrent_regularizer,
        constraint=self.recurrent_constraint)
    if self.use_bias:
      self.bias = self.add_weight(
          (self.filters * 4,),
          initializer=self.bias_initializer,
          name='bias',
          regularizer=self.bias_regularizer,
          constraint=self.bias_constraint)
      if self.unit_forget_bias:
        bias_value = np.zeros((self.filters * 4,))
        bias_value[self.filters:self.filters * 2] = 1.
        K.set_value(self.bias, bias_value)
    else:
      self.bias = None

    self.kernel_i = self.kernel[:, :, :, :self.filters]
    self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]
    self.kernel_f = self.kernel[:, :, :, self.filters:self.filters * 2]
    self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.filters:
                                                    self.filters * 2]
    self.kernel_c = self.kernel[:, :, :, self.filters * 2:self.filters * 3]
    self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters * 2:
                                                    self.filters * 3]
    self.kernel_o = self.kernel[:, :, :, self.filters * 3:]
    self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:]

    if self.use_bias:
      self.bias_i = self.bias[:self.filters]
      self.bias_f = self.bias[self.filters:self.filters * 2]
      self.bias_c = self.bias[self.filters * 2:self.filters * 3]
      self.bias_o = self.bias[self.filters * 3:]
    else:
      self.bias_i = None
      self.bias_f = None
      self.bias_c = None
      self.bias_o = None
    self.built = True
Example #23
0
  def build(self, input_shape):
    if isinstance(input_shape, list):
      input_shape = input_shape[0]
    input_shape = tensor_shape.TensorShape(input_shape).as_list()
    batch_size = input_shape[0] if self.stateful else None
    self.input_dim = input_shape[2]
    self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))

    self.states = [None, None]
    if self.stateful:
      self.reset_states()

    self.kernel = self.add_weight(
        shape=(self.input_dim, self.units * 4),
        name='kernel',
        initializer=self.kernel_initializer,
        regularizer=self.kernel_regularizer,
        constraint=self.kernel_constraint)
    self.recurrent_kernel = self.add_weight(
        shape=(self.units, self.units * 4),
        name='recurrent_kernel',
        initializer=self.recurrent_initializer,
        regularizer=self.recurrent_regularizer,
        constraint=self.recurrent_constraint)

    if self.use_bias:
      if self.unit_forget_bias:

        def bias_initializer(_, *args, **kwargs):
          return K.concatenate([
              self.bias_initializer((self.units,), *args, **kwargs),
              initializers.Ones()((self.units,), *args, **kwargs),
              self.bias_initializer((self.units * 2,), *args, **kwargs),
          ])
      else:
        bias_initializer = self.bias_initializer
      self.bias = self.add_weight(
          shape=(self.units * 4,),
          name='bias',
          initializer=bias_initializer,
          regularizer=self.bias_regularizer,
          constraint=self.bias_constraint)
    else:
      self.bias = None

    self.kernel_i = self.kernel[:, :self.units]
    self.kernel_f = self.kernel[:, self.units:self.units * 2]
    self.kernel_c = self.kernel[:, self.units * 2:self.units * 3]
    self.kernel_o = self.kernel[:, self.units * 3:]

    self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
    self.recurrent_kernel_f = self.recurrent_kernel[:, self.units:
                                                    self.units * 2]
    self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2:
                                                    self.units * 3]
    self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]

    if self.use_bias:
      self.bias_i = self.bias[:self.units]
      self.bias_f = self.bias[self.units:self.units * 2]
      self.bias_c = self.bias[self.units * 2:self.units * 3]
      self.bias_o = self.bias[self.units * 3:]
    else:
      self.bias_i = None
      self.bias_f = None
      self.bias_c = None
      self.bias_o = None
    self.built = True
 def __init__(self, data_format=None, **kwargs):
   super(_GlobalPooling3D, self).__init__(**kwargs)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.input_spec = InputSpec(ndim=5)
 def __init__(self, **kwargs):
   super(_GlobalPooling1D, self).__init__(**kwargs)
   self.input_spec = InputSpec(ndim=3)
Example #26
0
    def __init__(self,
                 stack_sizes,
                 R_stack_sizes,
                 A_filt_sizes,
                 Ahat_filt_sizes,
                 R_filt_sizes,
                 pixel_max=1.,
                 error_activation='relu',
                 A_activation='relu',
                 LSTM_activation='tanh',
                 LSTM_inner_activation='hard_sigmoid',
                 output_mode='error',
                 extrap_start_time=None,
                 use_roi_loss=False,
                 threshold=None,
                 **kwargs):
        self.stack_sizes = stack_sizes  # output_dim for each layer
        self.nb_layers = len(stack_sizes)  # layer num
        assert len(
            R_stack_sizes
        ) == self.nb_layers, 'len(R_stack_sizes) must equal len(stack_sizes)'
        self.R_stack_sizes = R_stack_sizes  # R state dim
        assert len(A_filt_sizes) == (
            self.nb_layers -
            1), 'len(A_filt_sizes) must equal len(stack_sizes) - 1'
        self.A_filt_sizes = A_filt_sizes  # A2E filter
        assert len(
            Ahat_filt_sizes
        ) == self.nb_layers, 'len(Ahat_filt_sizes) must equal len(stack_sizes)'
        self.Ahat_filt_sizes = Ahat_filt_sizes  # Ahat2E filter
        assert len(R_filt_sizes) == (
            self.nb_layers), 'len(R_filt_sizes) must equal len(stack_sizes)'
        self.R_filt_sizes = R_filt_sizes  # R_l+1, El 2 R_l filter

        self.extrap_start_time = extrap_start_time
        self.use_roi_loss = use_roi_loss
        self.threshold = threshold

        self.pixel_max = pixel_max
        self.error_activation = activations.get(error_activation)
        self.A_activation = activations.get(A_activation)
        self.LSTM_activation = activations.get(LSTM_activation)
        self.LSTM_inner_activation = activations.get(LSTM_inner_activation)

        default_output_modes = ['prediction', 'error', 'all']
        layer_output_modes = [
            layer + str(n) for n in range(self.nb_layers)
            for layer in ['R', 'E', 'A', 'Ahat']
        ]
        assert output_mode in default_output_modes + layer_output_modes, 'Invalid output_mode: ' + str(
            output_mode)
        self.output_mode = output_mode
        if self.output_mode in layer_output_modes:
            self.output_layer_type = self.output_mode[:-1]
            self.output_layer_num = int(self.output_mode[-1])
        else:
            self.output_layer_type = None
            self.output_layer_num = None

        # self.dim_ordering = 'tf'
        self.channel_axis = -1
        self.row_axis = -3
        self.column_axis = -2

        super(PredNet, self).__init__(**kwargs)
        self.input_spec = [InputSpec(ndim=5)
                           ]  # (batch_num, time_step, row, column, channel)
Example #27
0
 def __init__(self, n, **kwargs):
   super(RepeatVector, self).__init__(**kwargs)
   self.n = n
   self.input_spec = InputSpec(ndim=2)
 def __init__(self, size=size_mult, data_format=None, **kwargs):
     super(BilinearUpSampling2D, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.size = conv_utils.normalize_tuple(size, 2, 'size')
     self.input_spec = InputSpec(ndim=4)
Example #29
0
    def build(self, input_shape):
        if isinstance(input_shape, list):
            input_shape = input_shape[0]
        input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())
        batch_size = input_shape[0] if self.stateful else None
        self.input_spec[0] = InputSpec(shape=(batch_size, None) +
                                       input_shape[2:])

        if self.stateful:
            self.reset_states()
        else:
            # initial states: 2 all-zero tensor of shape (filters)
            self.states = [None, None]

        if self.data_format == 'channels_first':
            channel_axis = 2
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        state_shape = [None] * 4
        state_shape[channel_axis] = input_dim
        state_shape = tuple(state_shape)
        self.state_spec = [
            InputSpec(shape=state_shape),
            InputSpec(shape=state_shape)
        ]
        kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
        self.kernel_shape = kernel_shape
        recurrent_kernel_shape = self.kernel_size + (self.filters,
                                                     self.filters * 4)

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        self.recurrent_kernel = self.add_weight(
            shape=recurrent_kernel_shape,
            initializer=self.recurrent_initializer,
            name='recurrent_kernel',
            regularizer=self.recurrent_regularizer,
            constraint=self.recurrent_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters * 4, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
            if self.unit_forget_bias:
                bias_value = np.zeros((self.filters * 4, ))
                bias_value[self.filters:self.filters * 2] = 1.
                K.set_value(self.bias, bias_value)
        else:
            self.bias = None

        self.kernel_i = self.kernel[:, :, :, :self.filters]
        self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]
        self.kernel_f = self.kernel[:, :, :, self.filters:self.filters * 2]
        self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.
                                                        filters:self.filters *
                                                        2]
        self.kernel_c = self.kernel[:, :, :, self.filters * 2:self.filters * 3]
        self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters *
                                                        2:self.filters * 3]
        self.kernel_o = self.kernel[:, :, :, self.filters * 3:]
        self.recurrent_kernel_o = self.recurrent_kernel[:, :, :,
                                                        self.filters * 3:]

        if self.use_bias:
            self.bias_i = self.bias[:self.filters]
            self.bias_f = self.bias[self.filters:self.filters * 2]
            self.bias_c = self.bias[self.filters * 2:self.filters * 3]
            self.bias_o = self.bias[self.filters * 3:]
        else:
            self.bias_i = None
            self.bias_f = None
            self.bias_c = None
            self.bias_o = None
        self.built = True