def bottleneck(block_id, inputs, output_filters, expansion_factor): # Inverted residual block bn_name = bn_name_template.format(block_id) + '_' input_shape = KK.eval(KK.shape(inputs)[-1]) expanded_filters = output_filters * expansion_factor hidden = layers.Conv2D(filters=expanded_filters, kernel_size=(1, 1), strides=1, padding='same', name=bn_name + "conv0")(inputs) hidden = layers.BatchNormalization(name=bn_name + 'bn0')(hidden) hidden = layers.ReLU(6., name=bn_name + 'relu0')(hidden) hidden = layers.DepthwiseConv2D(kernel_size=(3, 3), strides=1, padding='same', name=bn_name + 'depth_conv0')(hidden) hidden = layers.BatchNormalization(name=bn_name + 'bn1')(hidden) hidden = layers.ReLU(6., name=bn_name + 'relu1')(hidden) hidden = layers.Conv2D(filters=output_filters, kernel_size=(1, 1), strides=1, padding='same', name=bn_name + 'conv1')(hidden) hidden = layers.BatchNormalization(name=bn_name + 'bn2')(hidden) if input_shape == output_filters: print("************** Residual connection made. **************") hidden = layers.Add(name=bn_name + 'add0')([inputs, hidden]) else: print("!!!!!!!!!!!!!! No residual connection made. !!!!!!!!!!!!!!") print("input_shape: {}\texpanded_filters: {}".format( input_shape, expanded_filters)) return hidden
def build(self, input_shape): base_model = self.model(3, 'imagenet') ext_model = self.model(6, None) ext_weights = [] for base_weight, ext_weight in zip(base_model.get_weights(), ext_model.get_weights()): if base_weight.shape != ext_weight.shape: ext_weight[:, :, :3, :] = base_weight ext_weights.append(ext_weight) ext_model.set_weights(ext_weights) self.backbone = ext_model self.shortcuts = [ models.Sequential([ addon_layers.SpectralNormalization( SameConv(filters, 3, use_bias=False)), layers.ReLU(), layers.BatchNormalization(), addon_layers.SpectralNormalization( SameConv(filters, 3, use_bias=False)), layers.ReLU(), layers.BatchNormalization() ]) for filters in [32, 32, 64, 128, 256, 512] ] self.up2 = ResizeByScale(2) super().build(input_shape)
def __init__(self, shape): self.re_rate = 0.9 self.model = models.Sequential() self.model.add( layers.Conv3D(16, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate), input_shape=shape)) self.model.add(layers.ReLU()) self.model.add( layers.Conv3D(16, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.ReLU()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Dropout(rate=0.25)) self.model.add( layers.Conv3D(32, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.ReLU()) self.model.add( layers.Conv3D(32, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.ReLU()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Dropout(rate=0.25)) self.model.add(layers.Flatten()) self.model.add(layers.Dense(16)) self.model.add(layers.Dense(4, activation='softmax'))
def gen_model(shape): input = layers.Input(shape=(shape)) # First conv layer c_1 = layers.Conv2D(48, (3, 8), padding='same')(input) c_2 = layers.Conv2D(32, (3, 32), padding='same')(input) c_3 = layers.Conv2D(16, (3, 64), padding='same')(input) c_4 = layers.Conv2D(16, (3, 90), padding='same')(input) conv_1 = layers.Concatenate()([c_1, c_2, c_3, c_4]) x = layers.BatchNormalization()(conv_1) x = layers.ReLU()(x) # x = layers.MaxPooling2D((5,5))(x) x = layers.AveragePooling2D((5, 5))(x) # Second conv layer x = layers.Conv2D(224, 5)(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # x = layers.MaxPooling2D((6,4))(x) x = layers.AveragePooling2D((6, 4))(x) # Output layer x = layers.Flatten()(x) # x = layers.Dropout(0.5)(x) x = layers.Dense(64)(x) x = layers.Dense(1, activation='sigmoid')(x) model = models.Model(input, x) return model
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 pointwise_conv_filters = int(pointwise_conv_filters * alpha) if strides == (1, 1): x = inputs else: x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(inputs) x = layers.DepthwiseConv2D((3, 3), padding='same' if strides == (1, 1) else 'valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name='conv_dw_%d' % block_id)(x) x = layers.BatchNormalization( axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x) x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x) x = layers.Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%d' % block_id)(x) x = layers.BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x) return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
def residual_block_entry(x, nb_filters): """ Create a residual block using Depthwise Separable Convolutions x : input into residual block nb_filters: number of filters """ shortcut = x # First Depthwise Separable Convolution x = layers.SeparableConv2D(nb_filters, (3, 3), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # Second depthwise Separable Convolution x = layers.SeparableConv2D(nb_filters, (3, 3), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # Create pooled feature maps, reduce size by 75% x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) # Add strided convolution to identity link to double number of filters to # match output of residual block for the add operation shortcut = layers.Conv2D(nb_filters, (1, 1), strides=(2, 2), padding='same')(shortcut) shortcut = layers.BatchNormalization()(shortcut) x = layers.add([x, shortcut]) return x
def build_generator(latent_dim): latent_input = layers.Input(shape=(latent_dim, )) class_input = layers.Input(shape=(1, ), dtype='int32') emb = layers.Embedding(10, latent_dim, embeddings_initializer='glorot_normal')(class_input) emb = layers.Flatten()(emb) generator_input = layers.Multiply()([latent_input, emb]) x = layers.Dense(1024)(generator_input) x = layers.ReLU()(x) x = layers.Dense(128 * 7 * 7)(x) x = layers.ReLU()(x) x = layers.Reshape((128, 7, 7))(x) x = layers.UpSampling2D((2, 2))(x) x = layers.Conv2D(256, 5, padding='same', bias_initializer='glorot_normal')(x) x = layers.ReLU()(x) x = layers.UpSampling2D((2, 2))(x) x = layers.Conv2D(128, 5, padding='same', bias_initializer='glorot_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(1, 2, padding='same', bias_initializer='glorot_normal')(x) fake_image = layers.Activation('tanh')(x) generator = keras.models.Model(input=[latent_input, class_input], output=fake_image) return generator
def convo_block(y, filtros, num_conv, res=0): if (res): x = y s = 2 for i in range(num_conv): y = layers.Conv2D(filtros, kernel_size=(3, 3), strides=(s, s), padding='same')(y) s = 1 y = layers.BatchNormalization()(y) y = layers.GaussianNoise(0.3)(y) if (i < num_conv - 1): y = layers.ReLU()(y) if (res): x = layers.Conv2D(filtros, kernel_size=(1, 1), strides=(2, 2), padding='same')(x) y = layers.add([x, y]) y = layers.ReLU()(y) else: y = layers.MaxPooling2D(pool_size=(2, 2))(y) return y
def conv_block(n_filters, x, strides=(2, 2)): """ Create Block of Convolutions with feature pooling Increase the number of filters by 4X n_filters: number of filters x : input into the block """ # construct the identity link # increase filters by 4X to match shape when added to output of block shortcut = layers.Conv2D(4 * n_filters, (1, 1), strides=strides)(x) shortcut = layers.BatchNormalization()(shortcut) # construct the 1x1, 3x3, 1x1 convolution block # feature pooling when strides=(2, 2) x = layers.Conv2D(n_filters, (1, 1), strides=strides)(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(n_filters, (3, 3), strides=(1, 1), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # increase the number of filters by 4X x = layers.Conv2D(4 * n_filters, (1, 1), strides=(1, 1))(x) x = layers.BatchNormalization()(x) # add the identity link to the output of the convolution block x = layers.add([x, shortcut]) x = layers.ReLU()(x) return x
def make_model(self): """ Makes the action-value neural network model using Keras. :return: action-value neural network. :rtype: Keras' model. """ model = models.Sequential() model.add( layers.Dense(self.hidden_units[0], activation=activations.linear, input_dim=self.input_shape)) model.add(layers.ReLU()) model.add( layers.Dense(self.hidden_units[1], activation=activations.linear)) model.add(layers.ReLU()) model.add( layers.Dense(self.hidden_units[2], activation=activations.linear)) model.add(layers.ReLU()) model.add(layers.Dense(self.output_size, activation=activations.linear)) model.compile(loss=losses.mse, optimizer=optimizers.Adam(lr=self.learning_rate)) model.summary() return model
def make_model(self): self.model = models.Sequential() # input layer self.model.add(layers.Conv2D(4, 2, input_shape=self.state_input_shape)) self.model.add(layers.BatchNormalization()) self.model.add(layers.ReLU()) self.model.add(layers.Dropout(0.2)) # maxpooling self.model.add(layers.MaxPooling2D(pool_size=4)) self.model.add(layers.Dropout(0.2)) self.model.add(layers.Flatten()) # hidden 8 neuron self.model.add(layers.Dense(8)) self.model.add(layers.BatchNormalization()) self.model.add(layers.ReLU()) # output layer (4 neuron output) self.model.add(layers.Dense(self.action_output_size)) self.model.add(layers.BatchNormalization()) self.model.add(layers.Activation('linear')) self.model.compile(optimizer=optimizers.Adam(lr=self.alpha), loss='mse', metrics=['accuracy'])
def create_value_head(self, previous_block): value_block = layers.Conv2D( filters=1, kernel_size=(1, 1), strides=1, padding="same", data_format="channels_last", activation="linear", use_bias=False, kernel_regularizer=regularizers.l2( l=self.regularization_const))(previous_block) value_block = layers.BatchNormalization(axis=1)(value_block) value_block = layers.ReLU()(value_block) value_block = layers.Flatten()(value_block) value_block = layers.Dense( units=256, activation="linear", use_bias=False, kernel_regularizer=regularizers.l2(l=self.regularization_const), )(value_block) value_block = layers.ReLU()(value_block) value_block = layers.Dense( units=1, activation="tanh", use_bias=False, kernel_regularizer=regularizers.l2(l=self.regularization_const), name="value_head")(value_block) return value_block
def depthwise_block(x, nb_filters, alpha, strides): """ Create a Depthwise Separable Convolution block inputs : input tensor nb_filters: number of filters alpha : width multiplier strides : strides """ # Apply the width filter to the number of feature maps filters = int(nb_filters * alpha) # Strided convolution to match number of filters if strides == (2, 2): x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(x) padding = 'valid' else: padding = 'same' # Depthwise Convolution x = layers.DepthwiseConv2D((3, 3), strides, padding=padding)(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # Pointwise Convolution x = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) return x
def convBlock(input,filters): x = layers.Conv2D(filters, (3, 3), padding='same')(input) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(filters, (3, 3), padding='same')(x) x = layers.BatchNormalization()(x) return layers.ReLU()(x)
def _depthwise_conv_block_td(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): channel_axis = 3 pointwise_conv_filters = int(pointwise_conv_filters * alpha) if strides == (1, 1): x = inputs else: x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(inputs) x = TimeDistributed(layers.DepthwiseConv2D( (3, 3), padding='same' if strides == (1, 1) else 'valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False), name='conv_dw_td_%d' % block_id)(x) x = TimeDistributed(layers.BatchNormalization(axis=channel_axis), name='conv_dw_td_%d_bn' % block_id)(x) x = layers.ReLU(6., name='conv_dw_td_%d_relu' % block_id)(x) x = TimeDistributed(layers.Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1)), name='conv_pw_td_%d' % block_id)(x) x = TimeDistributed(layers.BatchNormalization(axis=channel_axis), name='conv_pw_rd_%d_bn' % block_id)(x) return layers.ReLU(6., name='conv_pw_td_%d_relu' % block_id)(x)
def upConvBlock(input,skip,filters): x = layers.UpSampling2D((2, 2))(input) x = layers.Conv2D(filters, (3, 3), padding='same')(layers.Concatenate()([x,skip])) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(filters/2, (3, 3), padding='same')(x) x = layers.BatchNormalization()(x) return layers.ReLU()(x)
def ResBlock(x, filters): res = x x = L.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(x) x = L.ReLU()(x) x = L.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(x) x = L.ReLU()(x) x = L.Add()([x, res]) return x
def residual_unit(input, size, filters, downsample, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer): """ Residual unit using pre-activation as described in: https://arxiv.org/pdf/1603.05027v2.pdf Note that we use 1x1 convolutions to transform the dimension of residuals so we can increase filters and downsample spatial dimensions. Ideally we would do zero-padding along filter axis and downsample through stride 2 or some type of pooling. However, this would require more code complexity (while reducing parameter complexity). We may implement a lambda layer doing exactly this later on. :param input: The input tensor. :param siez: The size of the convolutional filters. :param filters: The number of filters in each convolutional layer of the residual unit. :param downsample: Whether to downsample at the beginning of the layer. If so we downsample by 2 and we use 1x1 convolutions to resize the residual. :param kernel_initializer: Kernel initializer for all layers in module. :param bias_initializer: Bias initializer for all layers in module. :param kernel_regularizer: Kernel regularizer for all layers in module. :param bias_regularizer: Bias regularizer for all layers in module. :return: The output of the residual unit, which consists of the sum of the output of the previous layer and the output of the layers in the residual unit. """ strides = 2 if downsample else 1 def get_convolution(filters, strides): return lyr.Conv2D(filters, size, strides=strides, padding='same', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer) int1 = lyr.BatchNormalization()(input) int2 = lyr.ReLU()(int1) int3 = get_convolution(filters, strides)(int2) int4 = lyr.BatchNormalization()(int3) int5 = lyr.ReLU()(int4) int6 = get_convolution(filters, 1)(int5) # If downsampling we use convolutional filters to increase filters # and reduce the size of the image. This gets dimensions to match. if downsample: res = get_convolution(filters, 2)(input) else: res = input out = lyr.Add()([res, int6]) return out
def bottleneck_identity_block(inputs, filters=(64, 64, 256), activation='relu', kernel_size=(3, 3)): """ Follows the bottleneck architecture implementation of an identity block. :param inputs: :param filters: :param activation: :param kernel_size: :return: """ # Set residual and shortcut as inputs for clarity shortcut = inputs residual = inputs f1, f2, f3 = filters # Filters take specific numbers only # First convolution layer -> BN -> activation residual = Conv2D(f1, kernel_size=(1, 1), strides=(1, 1), padding='valid')(residual) residual = BatchNormalization()(residual) if activation == 'relu': residual = layers.ReLU()(residual) elif activation == 'prelu': residual = layers.PReLU()(residual) else: raise NotImplementedError # Second convolution layer -> BN -> activation # Padding is 'same' to ensure same dims residual = Conv2D(f2, kernel_size=kernel_size, strides=(1, 1), padding='same')(residual) residual = BatchNormalization()(residual) if activation == 'relu': output = layers.ReLU()(residual) elif activation == 'prelu': output = layers.PReLU()(residual) else: raise NotImplementedError residual = Conv2D(f3, kernel_size=(1, 1), strides=(1, 1), padding='valid')(residual) residual = BatchNormalization()(residual) output = layers.Add()([shortcut, residual]) if activation == 'relu': output = layers.ReLU()(output) elif activation == 'prelu': output = layers.PReLU()(output) else: raise NotImplementedError return output
def _inverted_res_block(self, inputs, expansion, stride, alpha, filters, block_id): channel_axis = -1 in_channels = backend.int_shape(inputs)[channel_axis] pointwise_conv_filters = int(filters * alpha) pointwise_filters = self._make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'block_{}_'.format(block_id) if block_id: # Expand x = layers.Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) x = layers.BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = layers.ReLU(6., name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise if stride == 2: x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3), name=prefix + 'pad')(x) x = layers.DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same' if stride == 1 else 'valid', name=prefix + 'depthwise')(x) x = layers.BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x) # Project x = layers.Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) x = layers.BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if in_channels == pointwise_filters and stride == 1: return layers.Add(name=prefix + 'add')([inputs, x]) return x
def build_model(self): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" # Define input layers states = layers.Input(shape=(self.state_size, ), name='states') actions = layers.Input(shape=(self.action_size, ), name='actions') # Add hidden layer(s) for state pathway #net_states = layers.Dense(units=32, activation='relu')(states) #net_states = layers.Dense(units=64, activation='relu')(net_states) net_states = layers.Dense( units=400, kernel_regularizer=layers.regularizers.l2(1e-6))(states) net_states = layers.BatchNormalization()(net_states) net_states = layers.ReLU()(net_states) net_states = layers.Dense( units=300, kernel_regularizer=layers.regularizers.l2(1e-6))(net_states) net_states = layers.BatchNormalization()(net_states) net_states = layers.ReLU()(net_states) # Add hidden layer(s) for action pathway #net_actions = layers.Dense(units=32, activation='relu')(actions) #net_actions = layers.Dense(units=64, activation='relu')(net_actions) net_actions = layers.Dense( units=400, kernel_regularizer=layers.regularizers.l2(1e-6))(actions) net_actions = layers.BatchNormalization()(net_actions) net_actions = layers.ReLU()(net_actions) net_actions = layers.Dense( units=300, kernel_regularizer=layers.regularizers.l2(1e-6))(net_actions) net_actions = layers.BatchNormalization()(net_actions) net_actions = layers.ReLU()(net_actions) # Try different layer sizes, activations, add batch normalization, regularizers, etc. # Combine state and action pathways net = layers.Add()([net_states, net_actions]) net = layers.Activation('relu')(net) # Add more layers to the combined network if needed # Add final output layer to prduce action values (Q values) Q_values = layers.Dense(units=1, name='q_values')(net) # Create Keras model self.model = models.Model(inputs=[states, actions], outputs=Q_values) # Define optimizer and compile model for training with built-in loss function optimizer = optimizers.Adam() self.model.compile(optimizer=optimizer, loss='mse') # Compute action gradients (derivative of Q values w.r.t. to actions) action_gradients = K.gradients(Q_values, actions) # Define an additional function to fetch action gradients (to be used by actor model) self.get_action_gradients = K.function( inputs=[*self.model.input, K.learning_phase()], outputs=action_gradients)
def video_middle_part(video_part_in): video_part_in = layers.BatchNormalization()(video_part_in) video_part_in = layers.ReLU()(video_part_in) # (None, 15, 4, 16) video_part_in = layers.Conv2D(16, (3, 3), strides=(2, 1), padding='same')( video_part_in) # (None, 15, 4, 16) video_part_in = layers.BatchNormalization()(video_part_in) video_part_in = layers.ReLU()(video_part_in) # (None, 5, 4, 16) video_part_in = Flatten()(video_part_in) # 320 return video_part_in
def bottleneck_projection_block(inputs, filters, activation='relu', kernel_size=(3, 3), strides=(2, 2)): # Set residual and shortcut as inputs for clarity shortcut = inputs residual = inputs f1, f2, f3 = filters # Filters take specific numbers only # First convolution layer -> BN -> activation residual = Conv2D(f1, kernel_size=(1, 1), strides=strides, padding='valid')(residual) residual = BatchNormalization()(residual) if activation == 'relu': residual = layers.ReLU()(residual) elif activation == 'prelu': residual = layers.PReLU()(residual) else: raise NotImplementedError # Second convolution layer -> BN -> activation # Padding is 'same' to ensure same dims residual = Conv2D(f2, kernel_size=kernel_size, strides=(1, 1), padding='same')(residual) residual = BatchNormalization()(residual) if activation == 'relu': output = layers.ReLU()(residual) elif activation == 'prelu': output = layers.PReLU()(residual) else: raise NotImplementedError residual = Conv2D(f3, kernel_size=(1, 1), strides=(1, 1), padding='valid')(residual) residual = BatchNormalization()(residual) # Projection of shortcut through convolutional block with same strides as first conv layer shortcut = Conv2D(f3, kernel_size=(1, 1), strides=strides, padding='valid')(shortcut) shortcut = BatchNormalization()(shortcut) # Add shortcut to residual, pass through activation output = layers.Add()([shortcut, residual]) if activation == 'relu': output = layers.ReLU()(output) elif activation == 'prelu': output = layers.PReLU()(output) else: raise NotImplementedError return output
def value_head(x): x = layers.Conv1D(1, (1), activation='relu', padding='same')(x) x = layers.BatchNormalization(axis=1)(x) x = layers.ReLU()(x) x = layers.Flatten()(x) x = layers.Dense(256, activation='relu')(x) x = layers.ReLU()(x) x = layers.Dense(1, activation='tanh', name='value_head')(x) return x
def audio_middle_part(audio_part_in): audio_part_in = layers.BatchNormalization()(audio_part_in) audio_part_in = layers.ReLU()(audio_part_in) # (None, 15, 4, 16) audio_part_in = layers.Conv2D(16, (5, 3), strides=(3, 1), padding='same')( audio_part_in) # (None, 15, 4, 16) audio_part_in = layers.BatchNormalization()(audio_part_in) audio_part_in = layers.ReLU()(audio_part_in) # (None, 5, 4, 16) audio_part_in = Flatten()(audio_part_in) # 320 return audio_part_in
def MyModel_Conv2D( name='MyModel_Conv2D', sta_num=20, lt_shape=(3,15,20), st_shape=(72,20), wd_shape=(4,1), ef_shape=(4,24), op_shape=(1,20), optimizer='adam', metrics=['mae'], loss='mse' )->Model: print('model name:',name) lt=layers.Input(shape=lt_shape) st=layers.Input(shape=st_shape) lt_wd=layers.Input(shape=(lt_shape[0],wd_shape[1])) st_wd=layers.Input(shape=(wd_shape[1],)) lt_ef=layers.Input(shape=(lt_shape[0],ef_shape[1])) st_ef=layers.Input(shape=(ef_shape[1],)) y1=layers.Reshape((lt_shape[0],lt_shape[1],lt_shape[2],1))(lt) y1=layers.ConvLSTM2D(filters=64,kernel_size=(6,6),padding='same',return_sequences=True)(y1) y1=layers.Dropout(0.5)(y1) y1=layers.ConvLSTM2D(filters=64,kernel_size=(6,6),padding='same',return_sequences=True)(y1) y1=layers.Dropout(0.25)(y1) y1=layers.Dense(1)(y1) y1=layers.Reshape(lt_shape)(y1) y1=layers.TimeDistributed(attention())(y1) y1=layers.Flatten()(y1) y2=layers.Reshape((st_shape[0],st_shape[1],1))(st) y2=layers.Conv2D(filters=64,kernel_size=(6,6),padding='same')(y2) y2=layers.ReLU()(y2) y2=layers.Conv2D(filters=64,kernel_size=(6,6),padding='same')(y2) y2=layers.ReLU()(y2) y2=layers.Dense(1)(y2) y2=layers.Reshape(st_shape)(y2) y2=attention()(y2) ltef=layers.concatenate([lt_wd,lt_ef]) stef=layers.concatenate([st_wd,st_ef]) stef=layers.Reshape((1,wd_shape[1]+ef_shape[1]))(stef) ef=layers.concatenate([ltef,stef],axis=1) ef=layers.LSTM(64,return_sequences=True)(ef) ef=layers.Dropout(0.2)(ef) ef=layers.LSTM(64,return_sequences=True)(ef) ef=layers.Dropout(0.2)(ef) ef=layers.ReLU()(ef) ef=attention()(ef) y=layers.concatenate([y1,y2,ef]) y=layers.Dense(sta_num)(y) model=Model([lt,st,lt_wd,st_wd,lt_ef,st_ef],[y],name='MyModel') model.compile(optimizer=optimizer,loss=loss,metrics=metrics) return model
def binarize(self, fuse): p = layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(fuse) p = layers.BatchNormalization()(p) p = layers.ReLU()(p) p = layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), kernel_initializer='he_normal', use_bias=False)(p) p = layers.BatchNormalization()(p) p = layers.ReLU()(p) p = layers.Conv2DTranspose(1, (2, 2), strides=(2, 2), kernel_initializer='he_normal', activation='sigmoid')(p) return p
def fully_connected(x, output_dims, bn=True, relu6=False, activation=True): x = layers.Dense(output_dims)(x) if bn: x = layers.BatchNormalization()(x) if activation: if relu6: x = layers.ReLU(6.)(x) else: x = layers.ReLU()(x) return x
def conv_bn_relu(x, output_channels, bn = True, relu6 = False): # according to MobileNet paper, ReLU6 is more robust when quantize the model x = layers.Conv2D(output_channels, [1,1], kernel_initializer='he_normal', use_bias=not bn)(x) if bn: x = layers.BatchNormalization()(x) if relu6: x = layers.ReLU(6.)(x) else : x = layers.ReLU()(x) return x
def test_relu_tf_ops(): inputs = layers.Input((3, )) # Test that `relu` op gets used. outputs = layers.ReLU()(inputs) assert outputs.op.name.lower().endswith('/relu') # Test that `leakyrelu` op gets used. outputs = layers.ReLU(negative_slope=0.2)(inputs) assert outputs.op.name.lower().endswith('/leakyrelu') # Test that `relu6` op gets used. outputs = layers.ReLU(max_value=6)(inputs) assert outputs.op.name.lower().endswith('/relu6')