class MultiConv(Layer): def __init__(self, convs, channelsAxis=-1, trainable=True, name=None, dtype=None): super().__init__(trainable, name, dtype) self.convs = convs self.concat = Concatenate(channelsAxis) def build(self, input_shape): for conv in self.convs: conv.build(input_shape) self.concat.build( [conv.compute_output_shape(input_shape) for conv in self.convs]) super().build(input_shape) def call(self, inputs, **kwargs): appliedConvs = [] for conv in self.convs: appliedConvs.append(conv.call(inputs)) return self.concat.call(appliedConvs)
def build(self, input_shape): with K.name_scope( self.name ): # name scope used to make sure weights get unique names self.conv_layers = [] self.residual_layers = [] name = 'conv1D_bottleneck_0' with K.name_scope( name ): # name scope used to make sure weights get unique names self.bot_layers = Conv1D(filters=self.nb_filters, kernel_size=1, strides=self.strides, padding=self.padding, use_bias=False, name=name) self.bot_layers.build(input_shape) bot_output = self.bot_layers.compute_output_shape(input_shape) conv_output_shape = [] self._build_conv(bot_output, output_shape=conv_output_shape) output_shape = self._build_residual(input_shape) conv_output_shape.append(output_shape) concate_layer = Concatenate(axis=2) concate_layer.build(conv_output_shape) output_shape = concate_layer.compute_output_shape( conv_output_shape) with K.name_scope('norm_0'): self.batchnorm = BatchNormalization() self.batchnorm.build(output_shape) super(inception, self).build(input_shape)