Exemple #1
0
 def build(self, hp, inputs=None):
     input_node = nest.flatten(inputs)[0]
     # TODO: support more pretrained embedding layers.
     # glove, fasttext, and word2vec
     pretraining = utils.add_to_hp(self.pretraining, hp)
     embedding_dim = utils.add_to_hp(self.embedding_dim, hp)
     if pretraining != "none":
         # TODO: load from pretrained weights
         layer = layers.Embedding(
             input_dim=self.max_features,
             output_dim=embedding_dim,
             input_length=input_node.shape[1],
         )
         # trainable=False,
         # weights=[embedding_matrix])
     else:
         layer = layers.Embedding(
             input_dim=self.max_features, output_dim=embedding_dim
         )
         # input_length=input_node.shape[1],
         # trainable=True)
     output_node = layer(input_node)
     dropout = utils.add_to_hp(self.dropout, hp)
     if dropout > 0:
         output_node = layers.Dropout(dropout)(output_node)
     return output_node
Exemple #2
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node
        output_node = reduction.Flatten().build(hp, output_node)

        use_batchnorm = self.use_batchnorm
        if use_batchnorm is None:
            use_batchnorm = hp.Boolean("use_batchnorm", default=False)
        if self.dropout is not None:
            dropout = self.dropout
        else:
            dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0)

        for i in range(utils.add_to_hp(self.num_layers, hp)):
            units = utils.add_to_hp(self.num_units, hp,
                                    "units_{i}".format(i=i))
            output_node = layers.Dense(units)(output_node)
            if use_batchnorm:
                output_node = layers.BatchNormalization()(output_node)
            output_node = layers.ReLU()(output_node)
            if dropout > 0:
                output_node = layers.Dropout(dropout)(output_node)
        return output_node
Exemple #3
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        shape = input_node.shape.as_list()
        if len(shape) != 3:
            raise ValueError(
                "Expect the input tensor of RNNBlock to have dimensions of "
                "[batch_size, time_steps, vec_len], "
                "but got {shape}".format(shape=input_node.shape)
            )

        feature_size = shape[-1]
        output_node = input_node

        bidirectional = utils.add_to_hp(self.bidirectional, hp)
        layer_type = utils.add_to_hp(self.layer_type, hp)
        num_layers = utils.add_to_hp(self.num_layers, hp)
        rnn_layers = {"gru": layers.GRU, "lstm": layers.LSTM}
        in_layer = rnn_layers[layer_type]
        for i in range(num_layers):
            return_sequences = True
            if i == num_layers - 1:
                return_sequences = self.return_sequences
            if bidirectional:
                output_node = layers.Bidirectional(
                    in_layer(feature_size, return_sequences=return_sequences)
                )(output_node)
            else:
                output_node = in_layer(
                    feature_size, return_sequences=return_sequences
                )(output_node)
        return output_node
Exemple #4
0
    def build(self, hp, inputs=None):
        """
        # Arguments
             hp: HyperParameters. The hyperparameters for building the model.
             inputs: Tensor of Shape [batch_size, seq_len]

        # Returns
            Output Tensor of shape `[batch_size, seq_len, embedding_dim]`.
        """
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        pretraining = utils.add_to_hp(self.pretraining, hp)
        embedding_dim = utils.add_to_hp(self.embedding_dim, hp)
        num_heads = utils.add_to_hp(self.num_heads, hp)

        dense_dim = utils.add_to_hp(self.dense_dim, hp)
        dropout = utils.add_to_hp(self.dropout, hp)

        ffn = tf.keras.Sequential(
            [
                layers.Dense(dense_dim, activation="relu"),
                layers.Dense(embedding_dim),
            ]
        )

        layernorm1 = layers.LayerNormalization(epsilon=1e-6)
        layernorm2 = layers.LayerNormalization(epsilon=1e-6)
        dropout1 = layers.Dropout(dropout)
        dropout2 = layers.Dropout(dropout)
        # Token and Position Embeddings
        input_node = nest.flatten(inputs)[0]
        token_embedding = Embedding(
            max_features=self.max_features,
            pretraining=pretraining,
            embedding_dim=embedding_dim,
            dropout=dropout,
        ).build(hp, input_node)
        maxlen = input_node.shape[-1]
        batch_size = tf.shape(input_node)[0]
        positions = self.pos_array_funct(maxlen, batch_size)
        position_embedding = Embedding(
            max_features=maxlen,
            pretraining=pretraining,
            embedding_dim=embedding_dim,
            dropout=dropout,
        ).build(hp, positions)
        output_node = tf.keras.layers.Add()([token_embedding, position_embedding])
        attn_output = MultiHeadSelfAttention(embedding_dim, num_heads).build(
            hp, output_node
        )
        attn_output = dropout1(attn_output)
        add_inputs_1 = tf.keras.layers.Add()([output_node, attn_output])
        out1 = layernorm1(add_inputs_1)
        ffn_output = ffn(out1)
        ffn_output = dropout2(ffn_output)
        add_inputs_2 = tf.keras.layers.Add()([out1, ffn_output])
        return layernorm2(add_inputs_2)
Exemple #5
0
    def build(self, hp, inputs=None):
        input_node = nest.flatten(inputs)[0]
        output_node = input_node

        # Translate
        translation_factor = utils.add_to_hp(self.translation_factor, hp)
        if translation_factor not in [0, (0, 0)]:
            height_factor, width_factor = self._get_fraction_value(
                translation_factor
            )
            output_node = layers.RandomTranslation(height_factor, width_factor)(
                output_node
            )

        # Flip
        horizontal_flip = self.horizontal_flip
        if horizontal_flip is None:
            horizontal_flip = hp.Boolean("horizontal_flip", default=True)
        vertical_flip = self.vertical_flip
        if self.vertical_flip is None:
            vertical_flip = hp.Boolean("vertical_flip", default=True)
        if not horizontal_flip and not vertical_flip:
            flip_mode = ""
        elif horizontal_flip and vertical_flip:
            flip_mode = "horizontal_and_vertical"
        elif horizontal_flip and not vertical_flip:
            flip_mode = "horizontal"
        elif not horizontal_flip and vertical_flip:
            flip_mode = "vertical"
        if flip_mode != "":
            output_node = layers.RandomFlip(mode=flip_mode)(output_node)

        # Rotate
        rotation_factor = utils.add_to_hp(self.rotation_factor, hp)
        if rotation_factor != 0:
            output_node = layers.RandomRotation(rotation_factor)(output_node)

        # Zoom
        zoom_factor = utils.add_to_hp(self.zoom_factor, hp)
        if zoom_factor not in [0, (0, 0)]:
            height_factor, width_factor = self._get_fraction_value(zoom_factor)
            # TODO: Add back RandomZoom when it is ready.
            # output_node = layers.RandomZoom(
            # height_factor, width_factor)(output_node)

        # Contrast
        contrast_factor = utils.add_to_hp(self.contrast_factor, hp)
        if contrast_factor not in [0, (0, 0)]:
            output_node = layers.RandomContrast(contrast_factor)(output_node)

        return output_node
Exemple #6
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        kernel_size = utils.add_to_hp(self.kernel_size, hp)

        separable = self.separable
        if separable is None:
            separable = hp.Boolean("separable", default=False)

        if separable:
            conv = layer_utils.get_sep_conv(input_node.shape)
        else:
            conv = layer_utils.get_conv(input_node.shape)

        max_pooling = self.max_pooling
        if max_pooling is None:
            max_pooling = hp.Boolean("max_pooling", default=True)
        pool = layer_utils.get_max_pooling(input_node.shape)

        if self.dropout is not None:
            dropout = self.dropout
        else:
            dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0)

        for i in range(utils.add_to_hp(self.num_blocks, hp)):
            for j in range(utils.add_to_hp(self.num_layers, hp)):
                output_node = conv(
                    utils.add_to_hp(
                        self.filters, hp, "filters_{i}_{j}".format(i=i, j=j)
                    ),
                    kernel_size,
                    padding=self._get_padding(kernel_size, output_node),
                    activation="relu",
                )(output_node)
            if max_pooling:
                output_node = pool(
                    kernel_size - 1,
                    padding=self._get_padding(kernel_size - 1, output_node),
                )(output_node)
            if dropout > 0:
                output_node = layers.Dropout(dropout)(output_node)
        return output_node
Exemple #7
0
    def build(self, hp, inputs=None):
        input_tensor = nest.flatten(inputs)[0]

        tokenizer_layer = keras_layers.BertTokenizer(
            max_sequence_length=utils.add_to_hp(self.max_sequence_length, hp))
        output_node = tokenizer_layer(input_tensor)

        bert_encoder = keras_layers.BertEncoder()

        output_node = bert_encoder(output_node)
        bert_encoder.load_pretrained_weights()

        return output_node