示例#1
0
def downsample(inputs,
               pool_type: str = 'max',
               sorted: bool = True,
               stage: int = 1):  # noqa: A002
    layers_pool = []
    if pool_type == 'max':
        layers_pool.append(
            L.MaxPooling1D(pool_size=3,
                           strides=2,
                           padding='same',
                           name=f'pool_{stage}'))
    elif pool_type == 'k_max':
        k = int(inputs.shape[1].value / 2)
        layers_pool.append(
            KMaxPoolingLayer(k=k, sorted=sorted, name=f'pool_{stage}'))
    elif pool_type == 'conv':
        layers_pool.append(
            L.Conv1D(filters=inputs.shape[-1].value,
                     kernel_size=3,
                     strides=2,
                     padding='same',
                     name=f'pool_{stage}'))
        layers_pool.append(L.BatchNormalization())
    elif pool_type is None:
        layers_pool = []
    else:
        raise ValueError(f'unsupported pooling type `{pool_type}`!')

    tensor_out = inputs
    for layer in layers_pool:
        tensor_out = layer(tensor_out)
    return tensor_out
示例#2
0
    def build_model_arc(self):
        output_dim = len(self.pre_processor.label2idx)
        config = self.hyper_parameters
        embed_model = self.embedding.embed_model

        layers_rcnn_seq = [
            L.SpatialDropout1D(**config['spatial_dropout']),
            L.Bidirectional(L.GRU(**config['rnn_0'])),
            L.Conv1D(**config['conv_0'])
        ]

        layers_sensor = [
            L.GlobalMaxPooling1D(),
            AttentionWeightedAverageLayer(),
            L.GlobalAveragePooling1D()
        ]
        layer_concat = L.Concatenate(**config['concat'])

        layers_full_connect = [
            L.Dropout(**config['dropout']),
            L.Dense(**config['dense']),
            L.Dense(output_dim, **config['activation_layer'])
        ]

        tensor = embed_model.output
        for layer in layers_rcnn_seq:
            tensor = layer(tensor)

        tensors_sensor = [layer(tensor) for layer in layers_sensor]
        tensor_output = layer_concat(tensors_sensor)

        for layer in layers_full_connect:
            tensor_output = layer(tensor_output)

        self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
示例#3
0
    def build_model_arc(self):
        output_dim = len(self.pre_processor.label2idx)
        config = self.hyper_parameters
        embed_model = self.embedding.embed_model

        layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
        layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
        layers_sensor = [KMaxPoolingLayer(**config['maxpool_i4']), L.Flatten()]
        layer_concat = L.Concatenate(**config['merged_tensor'])
        layers_seq = []
        layers_seq.append(L.Dropout(**config['dropout']))
        layers_seq.append(L.Dense(**config['dense']))
        layers_seq.append(L.Dense(output_dim, **config['activation_layer']))

        embed_tensor = layer_embed_dropout(embed_model.output)
        tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
        tensors_sensor = []
        for tensor_conv in tensors_conv:
            tensor_sensor = tensor_conv
            for layer_sensor in layers_sensor:
                tensor_sensor = layer_sensor(tensor_sensor)
            tensors_sensor.append(tensor_sensor)
        tensor = layer_concat(tensors_sensor)
        # tensor = L.concatenate(tensors_sensor, **config['merged_tensor'])
        for layer in layers_seq:
            tensor = layer(tensor)

        self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
示例#4
0
    def build_model_arc(self):
        output_dim = len(self.pre_processor.label2idx)
        config = self.hyper_parameters
        embed_model = self.embedding.embed_model
        layers_seq = []
        layers_seq.append(L.Conv1D(**config['conv_layer']))
        layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
        layers_seq.append(L.GRU(**config['gru_layer']))
        layers_seq.append(L.Dense(output_dim, **config['activation_layer']))

        tensor = embed_model.output
        for layer in layers_seq:
            tensor = layer(tensor)

        self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
示例#5
0
    def build_model_arc(self):
        output_dim = len(self.pre_processor.label2idx)
        config = self.hyper_parameters
        embed_model = self.embedding.embed_model

        layers_region = [
            L.Conv1D(**config['region_embedding']),
            L.BatchNormalization(),
            L.PReLU(),
            L.Dropout(**config['region_dropout'])
        ]

        layers_main = [
            L.GlobalMaxPooling1D(),
            L.Dense(**config['dense']),
            L.BatchNormalization(),
            L.PReLU(),
            L.Dropout(**config['dropout']),
            L.Dense(output_dim, **config['activation'])
        ]

        tensor_out = embed_model.output

        # build region tensors
        for layer in layers_region:
            tensor_out = layer(tensor_out)

        # build the base pyramid layer
        tensor_out = conv_block(tensor_out, **config['conv_block'])
        # build the above pyramid layers while `steps > 2`
        seq_len = tensor_out.shape[1].value
        if seq_len is None:
            raise ValueError(
                '`sequence_length` should be explicitly assigned, but it is `None`.'
            )
        for i in range(floor(log2(seq_len)) - 2):
            tensor_out = resnet_block(tensor_out,
                                      stage=i + 1,
                                      **config['resnet_block'])
        for layer in layers_main:
            tensor_out = layer(tensor_out)

        self.tf_model = tf.keras.Model(embed_model.inputs, tensor_out)
示例#6
0
    def build_model_arc(self):
        output_dim = len(self.pre_processor.label2idx)
        config = self.hyper_parameters
        embed_model = self.embedding.embed_model

        layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
        layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
        layers_sensor = []
        layers_sensor.append(L.GlobalMaxPooling1D())
        layers_sensor.append(AttentionWeightedAverageLayer())
        layers_sensor.append(L.GlobalAveragePooling1D())
        layer_view = L.Concatenate(**config['v_col3'])
        layer_allviews = L.Concatenate(**config['merged_tensor'])
        layers_seq = []
        layers_seq.append(L.Dropout(**config['dropout']))
        layers_seq.append(L.Dense(**config['dense']))
        layers_seq.append(L.Dense(output_dim, **config['activation_layer']))

        embed_tensor = layer_embed_dropout(embed_model.output)
        tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
        tensors_matrix_sensor = []
        for tensor_conv in tensors_conv:
            tensor_sensors = []
            tensor_sensors = [
                layer_sensor(tensor_conv) for layer_sensor in layers_sensor
            ]
            # tensor_sensors.append(L.GlobalMaxPooling1D()(tensor_conv))
            # tensor_sensors.append(AttentionWeightedAverageLayer()(tensor_conv))
            # tensor_sensors.append(L.GlobalAveragePooling1D()(tensor_conv))
            tensors_matrix_sensor.append(tensor_sensors)
        tensors_views = [
            layer_view(list(tensors))
            for tensors in zip(*tensors_matrix_sensor)
        ]
        tensor = layer_allviews(tensors_views)
        # tensors_v_cols = [L.concatenate(tensors, **config['v_col3']) for tensors
        #                   in zip(*tensors_matrix_sensor)]
        # tensor = L.concatenate(tensors_v_cols, **config['merged_tensor'])
        for layer in layers_seq:
            tensor = layer(tensor)

        self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
示例#7
0
    def build_model_arc(self):
        """
        build model architectural
        """
        output_dim = len(self.pre_processor.label2idx)
        config = self.hyper_parameters
        embed_model = self.embedding.embed_model

        layer_conv = L.Conv1D(**config['layer_conv'], name='layer_conv')
        layer_lstm = L.LSTM(**config['layer_lstm'], name='layer_lstm')
        layer_dropout = L.Dropout(**config['layer_dropout'],
                                  name='layer_dropout')
        layer_time_distributed = L.TimeDistributed(
            L.Dense(output_dim, **config['layer_time_distributed']),
            name='layer_time_distributed')
        layer_activation = L.Activation(**config['layer_activation'])

        tensor = layer_conv(embed_model.output)
        tensor = layer_lstm(tensor)
        tensor = layer_dropout(tensor)
        tensor = layer_time_distributed(tensor)
        output_tensor = layer_activation(tensor)

        self.tf_model = keras.Model(embed_model.inputs, output_tensor)
示例#8
0
def conv_block(inputs,
               filters: int,
               kernel_size: int = 3,
               activation: str = 'linear',
               shortcut: bool = True):
    layers_conv_unit = []
    layers_conv_unit.append(L.BatchNormalization())
    layers_conv_unit.append(L.PReLU())
    layers_conv_unit.append(
        L.Conv1D(filters=filters,
                 kernel_size=kernel_size,
                 strides=1,
                 padding='same',
                 activation=activation))
    layers_conv_block = layers_conv_unit * 2

    tensor_out = inputs
    for layer in layers_conv_block:
        tensor_out = layer(tensor_out)

    if shortcut:
        tensor_out = L.Add()([inputs, tensor_out])

    return tensor_out