Пример #1
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # Reduce the tensor to a vector.
        if len(output_node.shape) > 2:
            output_node = reduction.SpatialReduction().build(hp, output_node)

        if self.dropout is not None:
            dropout = self.dropout
        else:
            dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0)

        if dropout > 0:
            output_node = layers.Dropout(dropout)(output_node)
        output_node = layers.Dense(self.shape[-1])(output_node)
        if isinstance(self.loss, keras.losses.BinaryCrossentropy):
            output_node = layers.Activation(activations.sigmoid, name=self.name)(
                output_node
            )
        else:
            output_node = layers.Softmax(name=self.name)(output_node)
        return output_node
Пример #2
0
 def build(self, hp, inputs=None):
     inputs = nest.flatten(inputs)
     utils.validate_num_inputs(inputs, 1)
     input_node = inputs[0]
     if len(input_node.shape) > 2:
         return layers.Flatten()(input_node)
     return input_node
Пример #3
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node
        output_node = reduction.Flatten().build(hp, output_node)

        use_batchnorm = self.use_batchnorm
        if use_batchnorm is None:
            use_batchnorm = hp.Boolean("use_batchnorm", default=False)
        if self.dropout is not None:
            dropout = self.dropout
        else:
            dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0)

        for i in range(utils.add_to_hp(self.num_layers, hp)):
            units = utils.add_to_hp(self.num_units, hp,
                                    "units_{i}".format(i=i))
            output_node = layers.Dense(units)(output_node)
            if use_batchnorm:
                output_node = layers.BatchNormalization()(output_node)
            output_node = layers.ReLU()(output_node)
            if dropout > 0:
                output_node = layers.Dropout(dropout)(output_node)
        return output_node
Пример #4
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        shape = input_node.shape.as_list()
        if len(shape) != 3:
            raise ValueError('Expect the input tensor to have '
                             'at least 3 dimensions for rnn models, '
                             'but got {shape}'.format(shape=input_node.shape))

        feature_size = shape[-1]
        output_node = input_node

        bidirectional = self.bidirectional
        if bidirectional is None:
            bidirectional = hp.Boolean('bidirectional', default=True)
        layer_type = self.layer_type or hp.Choice(
            'layer_type', ['gru', 'lstm'], default='lstm')
        num_layers = self.num_layers or hp.Choice('num_layers', [1, 2, 3],
                                                  default=2)
        rnn_layers = {'gru': layers.GRU, 'lstm': layers.LSTM}
        in_layer = rnn_layers[layer_type]
        for i in range(num_layers):
            return_sequences = True
            if i == num_layers - 1:
                return_sequences = self.return_sequences
            if bidirectional:
                output_node = layers.Bidirectional(
                    in_layer(feature_size,
                             return_sequences=return_sequences))(output_node)
            else:
                output_node = in_layer(
                    feature_size,
                    return_sequences=return_sequences)(output_node)
        return output_node
Пример #5
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node
        output_node = reduction.Flatten().build(hp, output_node)

        num_layers = self.num_layers or hp.Choice("num_layers", [1, 2, 3],
                                                  default=2)
        use_batchnorm = self.use_batchnorm
        if use_batchnorm is None:
            use_batchnorm = hp.Boolean("use_batchnorm", default=False)
        if self.dropout is not None:
            dropout = self.dropout
        else:
            dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0)

        for i in range(num_layers):
            units = hp.Choice(
                "units_{i}".format(i=i),
                [16, 32, 64, 128, 256, 512, 1024],
                default=32,
            )
            output_node = layers.Dense(units)(output_node)
            if use_batchnorm:
                output_node = layers.BatchNormalization()(output_node)
            output_node = layers.ReLU()(output_node)
            if dropout > 0:
                output_node = layers.Dropout(dropout)(output_node)
        return output_node
Пример #6
0
    def build(self, hp, inputs=None):
        if self.num_classes:
            expected = self.num_classes if self.num_classes > 2 else 1
            if self.output_shape[-1] != expected:
                raise ValueError('The data doesn\'t match the expected shape. '
                                 'Expecting {} but got {}'.format(
                                     expected, self.output_shape[-1]))
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # Reduce the tensor to a vector.
        if len(output_node.shape) > 2:
            output_node = reduction.SpatialReduction().build(hp, output_node)

        if self.dropout_rate is not None:
            dropout_rate = self.dropout_rate
        else:
            dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5],
                                     default=0)

        if dropout_rate > 0:
            output_node = layers.Dropout(dropout_rate)(output_node)
        output_node = layers.Dense(self.output_shape[-1])(output_node)
        if self.loss == 'binary_crossentropy':
            output_node = layers.Activation(activations.sigmoid,
                                            name=self.name)(output_node)
        else:
            output_node = layers.Softmax(name=self.name)(output_node)
        return output_node
Пример #7
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        shape = input_node.shape.as_list()
        if len(shape) != 3:
            raise ValueError(
                "Expect the input tensor of RNNBlock to have dimensions of "
                "[batch_size, time_steps, vec_len], "
                "but got {shape}".format(shape=input_node.shape)
            )

        feature_size = shape[-1]
        output_node = input_node

        bidirectional = utils.add_to_hp(self.bidirectional, hp)
        layer_type = utils.add_to_hp(self.layer_type, hp)
        num_layers = utils.add_to_hp(self.num_layers, hp)
        rnn_layers = {"gru": layers.GRU, "lstm": layers.LSTM}
        in_layer = rnn_layers[layer_type]
        for i in range(num_layers):
            return_sequences = True
            if i == num_layers - 1:
                return_sequences = self.return_sequences
            if bidirectional:
                output_node = layers.Bidirectional(
                    in_layer(feature_size, return_sequences=return_sequences)
                )(output_node)
            else:
                output_node = in_layer(
                    feature_size, return_sequences=return_sequences
                )(output_node)
        return output_node
Пример #8
0
    def build(self, hp, inputs=None):
        """
        # Arguments
             hp: HyperParameters. The hyperparameters for building the model.
             inputs: Tensor of Shape [batch_size, seq_len]

        # Returns
            Output Tensor of shape `[batch_size, seq_len, embedding_dim]`.
        """
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        pretraining = self.pretraining or hp.Choice(
            'pretraining', ['random', 'glove', 'fasttext', 'word2vec', 'none'],
            default='none')
        embedding_dim = self.embedding_dim or hp.Choice(
            'embedding_dim', [32, 64, 128, 256, 512], default=128)
        num_heads = self.num_heads or hp.Choice('num_heads', [8, 16, 32],
                                                default=8)

        dense_dim = self.dense_dim or hp.Choice(
            'dense_dim', [128, 256, 512, 1024, 2048], default=2048)
        dropout_rate = self.dropout_rate or hp.Choice(
            'dropout_rate', [0.0, 0.25, 0.5], default=0)

        ffn = tf.keras.Sequential([
            layers.Dense(dense_dim, activation="relu"),
            layers.Dense(embedding_dim),
        ])

        layernorm1 = layers.LayerNormalization(epsilon=1e-6)
        layernorm2 = layers.LayerNormalization(epsilon=1e-6)
        dropout1 = layers.Dropout(dropout_rate)
        dropout2 = layers.Dropout(dropout_rate)
        # Token and Position Embeddings
        input_node = nest.flatten(inputs)[0]
        token_embedding = Embedding(max_features=self.max_features,
                                    pretraining=pretraining,
                                    embedding_dim=embedding_dim,
                                    dropout_rate=dropout_rate).build(
                                        hp, input_node)
        maxlen = input_node.shape[-1]
        batch_size = tf.shape(input_node)[0]
        positions = self.pos_array_funct(maxlen, batch_size)
        position_embedding = Embedding(max_features=maxlen,
                                       pretraining=pretraining,
                                       embedding_dim=embedding_dim,
                                       dropout_rate=dropout_rate).build(
                                           hp, positions)
        output_node = tf.keras.layers.Add()(
            [token_embedding, position_embedding])
        attn_output = MultiHeadSelfAttention(embedding_dim,
                                             num_heads).build(hp, output_node)
        attn_output = dropout1(attn_output)
        add_inputs_1 = tf.keras.layers.Add()([output_node, attn_output])
        out1 = layernorm1(add_inputs_1)
        ffn_output = ffn(out1)
        ffn_output = dropout2(ffn_output)
        add_inputs_2 = tf.keras.layers.Add()([out1, ffn_output])
        output = layernorm2(add_inputs_2)
        return output
Пример #9
0
    def build(self, hp, inputs=None):
        """
        # Arguments
             hp: HyperParameters. The hyperparameters for building the model.
             inputs: Tensor of Shape [batch_size, seq_len]

        # Returns
            Output Tensor of shape `[batch_size, seq_len, embedding_dim]`.
        """
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        pretraining = utils.add_to_hp(self.pretraining, hp)
        embedding_dim = utils.add_to_hp(self.embedding_dim, hp)
        num_heads = utils.add_to_hp(self.num_heads, hp)

        dense_dim = utils.add_to_hp(self.dense_dim, hp)
        dropout = utils.add_to_hp(self.dropout, hp)

        ffn = tf.keras.Sequential(
            [
                layers.Dense(dense_dim, activation="relu"),
                layers.Dense(embedding_dim),
            ]
        )

        layernorm1 = layers.LayerNormalization(epsilon=1e-6)
        layernorm2 = layers.LayerNormalization(epsilon=1e-6)
        dropout1 = layers.Dropout(dropout)
        dropout2 = layers.Dropout(dropout)
        # Token and Position Embeddings
        input_node = nest.flatten(inputs)[0]
        token_embedding = Embedding(
            max_features=self.max_features,
            pretraining=pretraining,
            embedding_dim=embedding_dim,
            dropout=dropout,
        ).build(hp, input_node)
        maxlen = input_node.shape[-1]
        batch_size = tf.shape(input_node)[0]
        positions = self.pos_array_funct(maxlen, batch_size)
        position_embedding = Embedding(
            max_features=maxlen,
            pretraining=pretraining,
            embedding_dim=embedding_dim,
            dropout=dropout,
        ).build(hp, positions)
        output_node = tf.keras.layers.Add()([token_embedding, position_embedding])
        attn_output = MultiHeadSelfAttention(embedding_dim, num_heads).build(
            hp, output_node
        )
        attn_output = dropout1(attn_output)
        add_inputs_1 = tf.keras.layers.Add()([output_node, attn_output])
        out1 = layernorm1(add_inputs_1)
        ffn_output = ffn(out1)
        ffn_output = dropout2(ffn_output)
        add_inputs_2 = tf.keras.layers.Add()([out1, ffn_output])
        return layernorm2(add_inputs_2)
Пример #10
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        dropout = self.dropout or hp.Choice("dropout", [0.0, 0.25, 0.5], default=0)

        if dropout > 0:
            output_node = layers.Dropout(dropout)(output_node)
        output_node = reduction.Flatten().build(hp, output_node)
        output_node = layers.Dense(self.shape[-1], name=self.name)(output_node)
        return output_node
Пример #11
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        kernel_size = self.kernel_size or hp.Choice("kernel_size", [3, 5, 7],
                                                    default=3)
        num_blocks = self.num_blocks or hp.Choice("num_blocks", [1, 2, 3],
                                                  default=2)
        num_layers = self.num_layers or hp.Choice("num_layers", [1, 2],
                                                  default=2)
        separable = self.separable
        if separable is None:
            separable = hp.Boolean("separable", default=False)

        if separable:
            conv = layer_utils.get_sep_conv(input_node.shape)
        else:
            conv = layer_utils.get_conv(input_node.shape)

        max_pooling = self.max_pooling
        if max_pooling is None:
            max_pooling = hp.Boolean("max_pooling", default=True)
        pool = layer_utils.get_max_pooling(input_node.shape)

        if self.dropout is not None:
            dropout = self.dropout
        else:
            dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0)

        for i in range(num_blocks):
            for j in range(num_layers):
                output_node = conv(
                    hp.Choice(
                        "filters_{i}_{j}".format(i=i, j=j),
                        [16, 32, 64, 128, 256, 512],
                        default=32,
                    ),
                    kernel_size,
                    padding=self._get_padding(kernel_size, output_node),
                    activation="relu",
                )(output_node)
            if max_pooling:
                output_node = pool(
                    kernel_size - 1,
                    padding=self._get_padding(kernel_size - 1, output_node),
                )(output_node)
            if dropout > 0:
                output_node = layers.Dropout(dropout)(output_node)
        return output_node
Пример #12
0
    def build(self, hp, inputs=None):
        """
        # Arguments
             hp: HyperParameters. The hyperparameters for building the model.
             inputs: Tensor of Shape [batch_size, seq_len, embedding_dim]

        # Returns
            Self-Attention outputs of shape `[batch_size, seq_len, embedding_dim]`.
        """
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        shape = input_node.shape.as_list()
        if len(shape) != 3:
            raise ValueError('Expect the input tensor to have '
                             '3 dimensions for multi-head self-attention, '
                             'but got {shape}'.format(shape=input_node.shape))
        # input.shape = [batch_size, seq_len, embedding_dim]
        head_size = self.head_size or hp.Choice(
            'head_size', [32, 64, 128, 256, 512], default=128)
        num_heads = self.num_heads
        if num_heads is None:
            num_heads = 8

        if head_size % num_heads != 0:  # how to evaluate this condition
            raise ValueError(f"embedding dimension = {head_size} should be "
                             f"divisible by number of heads = {num_heads}")
        projection_dim = head_size // num_heads
        query_dense = layers.Dense(head_size)
        key_dense = layers.Dense(head_size)
        value_dense = layers.Dense(head_size)
        combine_heads = layers.Dense(head_size)
        batch_size = tf.shape(input_node)[0]
        query = query_dense(input_node)  # (batch_size, seq_len, head_size)
        key = key_dense(input_node)  # (batch_size, seq_len, head_size)
        value = value_dense(input_node)  # (batch_size, seq_len, head_size)
        query, key, value = [
            self.separate_heads(var, batch_size, num_heads, projection_dim)
            for var in [query, key, value]
        ]
        attention, weights = self.attention(query, key, value)
        attention = tf.transpose(attention, perm=[
            0, 2, 1, 3
        ])  # (batch_size, seq_len, num_heads, projection_dim)
        concat_attention = tf.reshape(
            attention, (batch_size, tf.shape(attention)[1],
                        self.head_size))  # (batch_size, seq_len, head_size)
        output = combine_heads(
            concat_attention)  # (batch_size, seq_len, head_size)
        return output
Пример #13
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # No need to reduce.
        if len(output_node.shape) <= 2:
            return output_node

        if self.reduction_type is None:
            reduction_type = hp.Choice(REDUCTION_TYPE,
                                       [FLATTEN, GLOBAL_MAX, GLOBAL_AVG])
            with hp.conditional_scope(REDUCTION_TYPE, [reduction_type]):
                return self._build_block(hp, output_node, reduction_type)
        else:
            return self._build_block(hp, output_node, self.reduction_type)
Пример #14
0
    def build(self, hp, inputs=None):
        if self.output_dim and self.output_shape[-1] != self.output_dim:
            raise ValueError('The data doesn\'t match the output_dim. '
                             'Expecting {} but got {}'.format(
                                 self.output_dim, self.output_shape[-1]))
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        dropout_rate = self.dropout_rate or hp.Choice(
            'dropout_rate', [0.0, 0.25, 0.5], default=0)

        if dropout_rate > 0:
            output_node = layers.Dropout(dropout_rate)(output_node)
        output_node = reduction.Flatten().build(hp, output_node)
        output_node = layers.Dense(self.output_shape[-1],
                                   name=self.name)(output_node)
        return output_node
Пример #15
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        kernel_size = utils.add_to_hp(self.kernel_size, hp)

        separable = self.separable
        if separable is None:
            separable = hp.Boolean("separable", default=False)

        if separable:
            conv = layer_utils.get_sep_conv(input_node.shape)
        else:
            conv = layer_utils.get_conv(input_node.shape)

        max_pooling = self.max_pooling
        if max_pooling is None:
            max_pooling = hp.Boolean("max_pooling", default=True)
        pool = layer_utils.get_max_pooling(input_node.shape)

        for i in range(utils.add_to_hp(self.num_blocks, hp)):
            for j in range(utils.add_to_hp(self.num_layers, hp)):
                output_node = conv(
                    utils.add_to_hp(
                        self.filters, hp, "filters_{i}_{j}".format(i=i, j=j)
                    ),
                    kernel_size,
                    padding=self._get_padding(kernel_size, output_node),
                    activation="relu",
                )(output_node)
            if max_pooling:
                output_node = pool(
                    kernel_size - 1,
                    padding=self._get_padding(kernel_size - 1, output_node),
                )(output_node)
            if utils.add_to_hp(self.dropout, hp) > 0:
                output_node = layers.Dropout(utils.add_to_hp(self.dropout, hp))(
                    output_node
                )
        return output_node
Пример #16
0
    def build(self, hp, inputs=None):
        """
        # Arguments
             hp: HyperParameters. The hyperparameters for building the model.
             inputs: Tensor of Shape [batch_size, seq_len, embedding_dim]

        # Returns
            Self-Attention outputs of shape `[batch_size, seq_len, embedding_dim]`.
        """
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        num_heads = self.num_heads
        head_size = (
            self.head_size
            or hp.Choice("head_size_factor", [4, 8, 16, 32, 64], default=16) *
            num_heads)

        projection_dim = head_size // num_heads
        query_dense = layers.Dense(head_size)
        key_dense = layers.Dense(head_size)
        value_dense = layers.Dense(head_size)
        combine_heads = layers.Dense(head_size)
        batch_size = tf.shape(input_node)[0]
        query = query_dense(input_node)  # (batch_size, seq_len, head_size)
        key = key_dense(input_node)  # (batch_size, seq_len, head_size)
        value = value_dense(input_node)  # (batch_size, seq_len, head_size)
        query, key, value = [
            self.separate_heads(var, batch_size, num_heads, projection_dim)
            for var in [query, key, value]
        ]
        attention, weights = self.attention(query, key, value)
        attention = tf.transpose(attention, perm=[
            0, 2, 1, 3
        ])  # (batch_size, seq_len, num_heads, projection_dim)
        concat_attention = tf.reshape(
            attention, (batch_size, tf.shape(attention)[1],
                        self.head_size))  # (batch_size, seq_len, head_size)
        output = combine_heads(
            concat_attention)  # (batch_size, seq_len, head_size)
        return output
Пример #17
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # No need to reduce.
        if len(output_node.shape) <= 2:
            return output_node

        reduction_type = self.reduction_type or hp.Choice(
            'reduction_type', ['flatten', 'global_max', 'global_avg'],
            default='global_avg')
        if reduction_type == 'flatten':
            output_node = Flatten().build(hp, output_node)
        elif reduction_type == 'global_max':
            output_node = layer_utils.get_global_max_pooling(
                output_node.shape)()(output_node)
        elif reduction_type == 'global_avg':
            output_node = layer_utils.get_global_average_pooling(
                output_node.shape)()(output_node)
        return output_node
Пример #18
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # No need to reduce.
        if len(output_node.shape) <= 2:
            return output_node

        reduction_type = self.reduction_type or hp.Choice(
            'reduction_type', ['flatten', 'global_max', 'global_avg'],
            default='global_avg')

        if reduction_type == 'flatten':
            output_node = Flatten().build(hp, output_node)
        elif reduction_type == 'global_max':
            output_node = tf.math.reduce_max(output_node, axis=-2)
        elif reduction_type == 'global_avg':
            output_node = tf.math.reduce_mean(output_node, axis=-2)
        elif reduction_type == 'global_min':
            output_node = tf.math.reduce_min(output_node, axis=-2)

        return output_node
Пример #19
0
def test_validate_num_inputs_error():
    with pytest.raises(ValueError) as info:
        utils.validate_num_inputs([1, 2, 3], 2)

    assert "Expected 2 elements in the inputs list" in str(info.value)