def build_model_arc(self): output_dim = len(self.pre_processor.label2idx) config = self.hyper_parameters embed_model = self.embedding.embed_model layers_region = [ L.Conv1D(**config['region_embedding']), L.BatchNormalization(), L.PReLU(), L.Dropout(**config['region_dropout']) ] layers_main = [ L.GlobalMaxPooling1D(), L.Dense(**config['dense']), L.BatchNormalization(), L.PReLU(), L.Dropout(**config['dropout']), L.Dense(output_dim, **config['activation']) ] if isinstance(embed_model, keras.Model): first_layer_output = embed_model.output first_layer_input = embed_model.inputs else: first_layer_output = embed_model first_layer_input = embed_model tensor_out = first_layer_output # build region tensors for layer in layers_region: tensor_out = layer(tensor_out) # build the base pyramid layer tensor_out = self.conv_block(tensor_out, **config['conv_block']) # build the above pyramid layers while `steps > 2` seq_len = tensor_out.shape[1].value if seq_len is None: raise ValueError('`sequence_length` should be explicitly assigned, but it is `None`.') for i in range(floor(log2(seq_len)) - 2): tensor_out = self.resnet_block(tensor_out, stage=i + 1, **config['resnet_block']) for layer in layers_main: tensor_out = layer(tensor_out) self.tf_model = tf.keras.Model(first_layer_input, tensor_out)
def conv_block(self, inputs, filters: int, kernel_size: int = 3, activation: str = 'linear', shortcut: bool = True): layers_conv_unit = [] layers_conv_unit.append(L.BatchNormalization()) layers_conv_unit.append(L.PReLU()) layers_conv_unit.append( L.Conv1D(filters=filters, kernel_size=kernel_size, strides=1, padding='same', activation=activation)) layers_conv_block = layers_conv_unit * 2 tensor_out = inputs for layer in layers_conv_block: tensor_out = layer(tensor_out) if shortcut: tensor_out = L.Add()([inputs, tensor_out]) return tensor_out