Ejemplo n.º 1
0
    def call(self, inputs, params=None):
        if params[self.name + '/depthwise_kernel:0'] is None:
            return super(layers.SeparableConv2D, self).call(inputs)
        else:
            depthwise_kernel = params.get(self.name + '/depthwise_kernel:0')
            pointwise_kernel = params.get(self.name + '/pointwise_kernel:0')
            bias = params.get(self.name + '/bias:0')
        # Apply the actual ops.
        if self.data_format == 'channels_last':
            strides = (1, ) + self.strides + (1, )
        else:
            strides = (1, 1) + self.strides
        outputs = nn.separable_conv2d(
            inputs,
            depthwise_kernel,
            pointwise_kernel,
            strides=strides,
            padding=self.padding.upper(),
            rate=self.dilation_rate,
            data_format=conv_utils.convert_data_format(self.data_format,
                                                       ndim=4))

        if self.use_bias:
            outputs = nn.bias_add(outputs,
                                  bias,
                                  data_format=conv_utils.convert_data_format(
                                      self.data_format, ndim=4))

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Ejemplo n.º 2
0
  def call(self, inputs):
    if self.data_format == 'channels_first':
      # Reshape to channels last
      inputs = array_ops.transpose(inputs, (0, 2, 3, 1))

    # Apply the actual ops.
    outputs = nn.separable_conv2d(
        inputs,
        self.depthwise_kernel,
        self.pointwise_kernel,
        strides=(1,) + self.strides + (1,),
        padding=self.padding.upper(),
        rate=self.dilation_rate)

    if self.data_format == 'channels_first':
      # Reshape to channels first
      outputs = array_ops.transpose(outputs, (0, 3, 1, 2))

    if self.bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs
Ejemplo n.º 3
0
def separable_conv1d(value,
                     depthwise_kernel,
                     pointwise_kernel,
                     bias=None,
                     strides=(1, ),
                     padding='VALID',
                     data_format='NWC',
                     dilation_rate=(1, ),
                     name=None):
    if data_format[-1] == 'C':
        strides = (1, ) + strides * 2 + (1, )
        data_format = 'NHWC'
        axis = 1
    else:
        strides = (1, 1) + strides * 2
        data_format = 'NCHW'
        axis = 2
    value = array_ops.expand_dims(value, axis=axis)
    depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
    pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
    value = nn.separable_conv2d(input=value,
                                depthwise_filter=depthwise_kernel,
                                pointwise_filter=pointwise_kernel,
                                strides=strides,
                                padding=padding,
                                data_format=data_format,
                                dilations=dilation_rate,
                                name=name)
    if bias is not None:
        value = nn.bias_add(value, bias, data_format=data_format)
    value = array_ops.squeeze(value, axis=axis)
    return value
Ejemplo n.º 4
0
  def call(self, inputs):
    if self.data_format == 'channels_first':
      # Reshape to channels last
      inputs = array_ops.transpose(inputs, (0, 2, 3, 1))

    # Apply the actual ops.
    outputs = nn.separable_conv2d(
        inputs,
        self.depthwise_kernel,
        self.pointwise_kernel,
        strides=(1,) + self.strides + (1,),
        padding=self.padding.upper())

    if self.data_format == 'channels_first':
      # Reshape to channels first
      outputs = array_ops.transpose(outputs, (0, 3, 1, 2))

    if self.bias:
      outputs = nn.bias_add(
          outputs,
          self.bias,
          data_format=utils.convert_data_format(self.data_format, ndim=4))

    if self.activation is not None:
      return self.activation(outputs)
    return outputs
Ejemplo n.º 5
0
 def forward(self, inputs):
     strides = (1,) + self.strides + (1,)\
         if self.data_format[-1] == 'C' else (1, 1) + self.strides
     outputs = nn.separable_conv2d(input=inputs,
                                   depthwise_filter=self.depthwise_kernel,
                                   pointwise_filter=self.pointwise_kernel,
                                   strides=strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilations=self.dilation_rate)
     if self.use_bias:
         outputs = nn.bias_add(outputs,
                               self.bias,
                               data_format=self.data_format)
     if self.activation is not None:
         outputs = self.activation(outputs)
     return outputs
Ejemplo n.º 6
0
    def call(self, inputs):
        # Apply the actual ops.
        if self.data_format == 'channels_last':
            strides = (1, ) + self.strides + (1, )
        else:
            strides = (1, 1) + self.strides

        outputs = nn.separable_conv2d(inputs,
                                      self.masked_depthwise_kernel,
                                      self.masked_pointwise_kernel,
                                      strides=strides,
                                      padding=self.padding.upper(),
                                      rate=self.dilation_rate,
                                      data_format=utils.convert_data_format(
                                          self.data_format, ndim=4))

        if self.bias is not None:
            if self.data_format == 'channels_first':
                if self.rank == 1:
                    # nn.bias_add does not accept a 1D input tensor.
                    bias = array_ops.reshape(self.bias, (1, self.filters, 1))
                    outputs += bias
                if self.rank == 2:
                    outputs = nn.bias_add(outputs,
                                          self.bias,
                                          data_format='NCHW')
                if self.rank == 3:
                    # As of Mar 2017, direct addition is significantly slower than
                    # bias_add when computing gradients. To use bias_add, we collapse Z
                    # and Y into a single dimension to obtain a 4D input tensor.
                    outputs_shape = outputs.shape.as_list()
                    outputs_4d = array_ops.reshape(outputs, [
                        outputs_shape[0], outputs_shape[1],
                        outputs_shape[2] * outputs_shape[3], outputs_shape[4]
                    ])
                    outputs_4d = nn.bias_add(outputs_4d,
                                             self.bias,
                                             data_format='NCHW')
                    outputs = array_ops.reshape(outputs_4d, outputs_shape)
            else:
                outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')

        if self.activation is not None:
            return self.activation(outputs)
        return outputs