def test_get_metadata_functional(self):
     inputs1 = keras.Input(shape=(10, ), name='model_input1')
     inputs2 = keras.Input(shape=(10, ), name='model_input2')
     x = keras.layers.Dense(32, activation='relu')(inputs1)
     x = keras.layers.Dense(32, activation='relu')(x)
     x = keras.layers.concatenate([x, inputs2])
     outputs = keras.layers.Dense(1, activation='sigmoid')(x)
     fun_model = keras.Model(inputs=[inputs1, inputs2],
                             outputs=outputs,
                             name='fun')
     builder = keras_metadata_builder.KerasGraphMetadataBuilder(fun_model)
     generated_md = builder.get_metadata()
     expected_md = {
         'inputs': {
             'model_input1': {
                 'input_tensor_name': 'model_input1:0',
                 'modality': 'numeric',
                 'encoding': 'identity'
             },
             'model_input2': {
                 'input_tensor_name': 'model_input2:0',
                 'modality': 'numeric',
                 'encoding': 'identity'
             }
         },
         'outputs': {
             'dense_2/Sigmoid': {
                 'output_tensor_name': 'dense_2/Sigmoid:0'
             }
         },
         'framework': 'Tensorflow',
         'tags': ['explainable_ai_sdk']
     }
     self.assertDictEqual(expected_md, generated_md)
Exemplo n.º 2
0
    def sample_input(cls, sequence_len: int) -> Dict[str, tf.Tensor]:
        """
        Returns sample inputs.

        Args:
            sequence_len (int): sequence_len

        Returns:
            Dict[str, tf.Tensor]:
        """
        inputs = {
            "input_ids":
            keras.Input(
                shape=(sequence_len, ),
                dtype=tf.int32,
                name="input_ids",
            ),
            "attention_mask":
            keras.Input(
                shape=(sequence_len, ),
                dtype=tf.int32,
                name="attention_mask",
            ),
            "token_type_ids":
            keras.Input(
                shape=(sequence_len, ),
                dtype=tf.int32,
                name="token_type_ids",
            ),
        }
        return inputs
Exemplo n.º 3
0
def PersonalizedAttentivePooling(dim1, dim2, dim3, seed=0):
    """Soft alignment attention implement.

    Attributes:
        dim1 (int): first dimention of value shape.
        dim2 (int): second dimention of value shape.
        dim3 (int): shape of query

    Returns:
        object: weighted summary of inputs value.
    """
    vecs_input = keras.Input(shape=(dim1, dim2), dtype="float32")
    query_input = keras.Input(shape=(dim3, ), dtype="float32")

    user_vecs = layers.Dropout(0.2)(vecs_input)
    user_att = layers.Dense(
        dim3,
        activation="tanh",
        kernel_initializer=keras.initializers.glorot_uniform(seed=seed),
        bias_initializer=keras.initializers.Zeros(),
    )(user_vecs)
    user_att2 = layers.Dot(axes=-1)([query_input, user_att])
    user_att2 = layers.Activation("softmax")(user_att2)
    user_vec = layers.Dot((1, 1))([user_vecs, user_att2])

    model = keras.Model([vecs_input, query_input], user_vec)
    return model
Exemplo n.º 4
0
    def _build_userencoder(self, titleencoder, type="ini"):
        """The main function to create user encoder of LSTUR.

        Args:
            titleencoder (object): the news encoder of LSTUR.

        Return:
            object: the user encoder of LSTUR.
        """
        hparams = self.hparams
        his_input_title = keras.Input(
            shape=(hparams.his_size, hparams.title_size), dtype="int32"
        )
        user_indexes = keras.Input(shape=(1,), dtype="int32")

        user_embedding_layer = layers.Embedding(
            len(self.train_iterator.uid2index),
            hparams.gru_unit,
            trainable=True,
            embeddings_initializer="zeros",
        )

        long_u_emb = layers.Reshape((hparams.gru_unit,))(
            user_embedding_layer(user_indexes)
        )
        click_title_presents = layers.TimeDistributed(titleencoder)(his_input_title)

        if type == "ini":
            user_present = layers.GRU(
                hparams.gru_unit,
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                recurrent_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                bias_initializer=keras.initializers.Zeros(),
            )(
                layers.Masking(mask_value=0.0)(click_title_presents),
                initial_state=[long_u_emb],
            )
        elif type == "con":
            short_uemb = layers.GRU(
                hparams.gru_unit,
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                recurrent_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                bias_initializer=keras.initializers.Zeros(),
            )(layers.Masking(mask_value=0.0)(click_title_presents))

            user_present = layers.Concatenate()([short_uemb, long_u_emb])
            user_present = layers.Dense(
                hparams.gru_unit,
                bias_initializer=keras.initializers.Zeros(),
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
            )(user_present)

        model = keras.Model(
            [his_input_title, user_indexes], user_present, name="user_encoder"
        )
        return model
Exemplo n.º 5
0
    def _build_lstur(self):
        """The main function to create LSTUR's logic. The core of LSTUR
        is a user encoder and a news encoder.

        Returns:
            object: a model used to train.
            object: a model used to evaluate and inference.
        """
        hparams = self.hparams

        his_input_title = keras.Input(
            shape=(hparams.his_size, hparams.title_size), dtype="int32"
        )
        pred_input_title = keras.Input(
            shape=(hparams.npratio + 1, hparams.title_size), dtype="int32"
        )
        pred_input_title_one = keras.Input(
            shape=(
                1,
                hparams.title_size,
            ),
            dtype="int32",
        )
        pred_title_reshape = layers.Reshape((hparams.title_size,))(pred_input_title_one)
        user_indexes = keras.Input(shape=(1,), dtype="int32")

        embedding_layer = layers.Embedding(
            self.word2vec_embedding.shape[0],
            hparams.word_emb_dim,
            weights=[self.word2vec_embedding],
            trainable=True,
        )

        titleencoder = self._build_newsencoder(embedding_layer)
        self.userencoder = self._build_userencoder(titleencoder, type=hparams.type)
        self.newsencoder = titleencoder

        user_present = self.userencoder([his_input_title, user_indexes])
        news_present = layers.TimeDistributed(self.newsencoder)(pred_input_title)
        news_present_one = self.newsencoder(pred_title_reshape)

        preds = layers.Dot(axes=-1)([news_present, user_present])
        preds = layers.Activation(activation="softmax")(preds)

        pred_one = layers.Dot(axes=-1)([news_present_one, user_present])
        pred_one = layers.Activation(activation="sigmoid")(pred_one)

        model = keras.Model([user_indexes, his_input_title, pred_input_title], preds)
        scorer = keras.Model(
            [user_indexes, his_input_title, pred_input_title_one], pred_one
        )

        return model, scorer
Exemplo n.º 6
0
    def _build_newsencoder(self, embedding_layer):
        """The main function to create news encoder of LSTUR.

        Args:
            embedding_layer (object): a word embedding layer.

        Return:
            object: the news encoder of LSTUR.
        """
        hparams = self.hparams
        sequences_input_title = keras.Input(shape=(hparams.title_size,), dtype="int32")
        embedded_sequences_title = embedding_layer(sequences_input_title)

        y = layers.Dropout(hparams.dropout)(embedded_sequences_title)
        y = layers.Conv1D(
            hparams.filter_num,
            hparams.window_size,
            activation=hparams.cnn_activation,
            padding="same",
            bias_initializer=keras.initializers.Zeros(),
            kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
        )(y)
        print(y)
        y = layers.Dropout(hparams.dropout)(y)
        y = layers.Masking()(
            OverwriteMasking()([y, ComputeMasking()(sequences_input_title)])
        )
        pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y)
        print(pred_title)
        model = keras.Model(sequences_input_title, pred_title, name="news_encoder")
        return model
Exemplo n.º 7
0
def get_keras_model_v1():
    import tensorflow.compat.v1.keras as keras

    inputs = keras.Input(shape=(784,), name="img")
    x = keras.layers.Dense(64, activation="relu")(inputs)
    x = keras.layers.Dense(64, activation="relu")(x)
    outputs = keras.layers.Dense(10, activation="softmax")(x)

    model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
    return model
    def test_get_metadata_multiple_outputs_incorrect_output(self):
        inputs1 = keras.Input(shape=(10, ), name='model_input')
        x = keras.layers.Dense(32, activation='relu')(inputs1)
        x = keras.layers.Dense(32, activation='relu')(x)
        outputs1 = keras.layers.Dense(1, activation='sigmoid')(x)
        outputs2 = keras.layers.Dense(1, activation='relu')(x)
        fun_model = keras.Model(inputs=[inputs1],
                                outputs=[outputs1, outputs2],
                                name='fun')

        with self.assertRaisesRegex(
                ValueError, 'Provided output is not one of model outputs'):
            keras_metadata_builder.KerasGraphMetadataBuilder(
                fun_model, outputs_to_explain=[fun_model.layers[0].output])
    def test_get_metadata_multiple_outputs(self):
        inputs1 = keras.Input(shape=(10, ), name='model_input')
        x = keras.layers.Dense(32, activation='relu')(inputs1)
        x = keras.layers.Dense(32, activation='relu')(x)
        outputs1 = keras.layers.Dense(1, activation='sigmoid')(x)
        outputs2 = keras.layers.Dense(1, activation='relu')(x)
        fun_model = keras.Model(inputs=[inputs1],
                                outputs=[outputs1, outputs2],
                                name='fun')

        builder = keras_metadata_builder.KerasGraphMetadataBuilder(
            fun_model, outputs_to_explain=[fun_model.outputs[0]])
        generated_md = builder.get_metadata()
        expected_outputs = {
            'dense_2/Sigmoid': {
                'output_tensor_name': 'dense_2/Sigmoid:0'
            }
        }
        self.assertDictEqual(expected_outputs, generated_md['outputs'])
def neural_network(input_shape):
    inputs = keras.Input(shape=input_shape)

    #Layer 1
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_1')(inputs)
    x = Conv2D(32, kernel_size=(5, 5), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(4, 4))(x)

    #Layer 2
    x = Conv2D(64, kernel_size=(5, 5), padding='same', name='Conv2D_2')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_3')(x)

    x = Flatten(name='Flatten')(x)

    #Layer 3
    #model.add(Dense(256,name = 'Dense_1'))
    #model.add(BatchNormalization(name = 'BatchNormalization_2'))
    #model.add(LeakyReLU(alpha=0.1))
    #model.add(Dropout(0.5,name = 'Dropout_1'))

    #Layer 4
    x = Dense(128, name='Dense_2')(x)
    x = BatchNormalization(name='BatchNormalization_3')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(0.5, name='Dropout_2')(x)

    #Layer 5
    x = Dense(128, name='Dense_3')(x)
    x = BatchNormalization(name='BatchNormalization_4')(x)
    x = LeakyReLU(alpha=0.1)(x)
    #model.add(Dropout(0.5,name = 'Dropout_3'))

    outputs = Dense(1, activation='sigmoid', name='Dense_4')(x)

    model = Model(inputs, outputs)
    return model
Exemplo n.º 11
0
def build_model(
    n_classes: int,
    n_packet_features: int,
    n_meta_features: int = 7,
    dilations: bool = True,
    tag: str = "varcnn",
):
    """Build the Var-CNN model.

    The resulting model takes a single input of shape
    (n_samples, n_packet_features + n_meta_features). The meta features
    must be the rightmost (last) features in the matrix.  The model
    handles separating the two types of features and reshaping them
    as necessary.

    Parameters:
    -----------
    n_classes :
        The number of classes to be predicted.

    n_packet_features :
        The number of packet features such as the number of interarrival
        times or the number of packet directions or sizes.

    n_meta_features:
        The number of meta features such as total packet counts, total
        transmission duration, etc.
    """
    use_metadata = n_meta_features > 0

    # Constructs dir or time ResNet
    input_layer = keras.Input(
        shape=(n_packet_features + n_meta_features, ), name="input")

    layer = (Crop(end=n_packet_features)(input_layer)
             if use_metadata else input_layer)
    layer = layers.Reshape((n_packet_features, 1))(layer)
    output_layer = ResNet18(
        layer, tag, block=(dilated_basic_1d if dilations else basic_1d))

    concat_params = [output_layer]
    combined = concat_params[0]

    # Construct MLP for metadata
    if use_metadata:
        metadata_output = Crop(start=-n_meta_features)(input_layer)
        # consider this the embedding of all the metadata
        metadata_output = layers.Dense(32)(metadata_output)
        metadata_output = layers.BatchNormalization()(
            metadata_output)
        metadata_output = layers.Activation('relu')(metadata_output)

        concat_params.append(metadata_output)
        combined = layers.Concatenate()(concat_params)

    # Better to have final fc layer if combining multiple models
    if len(concat_params) > 1:
        combined = layers.Dense(1024)(combined)
        combined = layers.BatchNormalization()(combined)
        combined = layers.Activation('relu')(combined)
        combined = layers.Dropout(0.5)(combined)

    model_output = layers.Dense(units=n_classes, activation='softmax',
                                name='model_output')(combined)

    model = keras.Model(inputs=input_layer, outputs=model_output)
    model.compile(
        loss='categorical_crossentropy', metrics=['accuracy'],
        optimizer=keras.optimizers.Adam(0.001))

    return model