Exemplo n.º 1
0
def PersonalizedAttentivePooling(dim1, dim2, dim3, seed=0):
    """Soft alignment attention implement.

    Attributes:
        dim1 (int): first dimention of value shape.
        dim2 (int): second dimention of value shape.
        dim3 (int): shape of query
    
    Returns:
        weighted summary of inputs value.
    """
    vecs_input = keras.Input(shape=(dim1, dim2), dtype="float32")
    query_input = keras.Input(shape=(dim3, ), dtype="float32")

    user_vecs = layers.Dropout(0.2)(vecs_input)
    user_att = layers.Dense(
        dim3,
        activation="tanh",
        kernel_initializer=keras.initializers.glorot_uniform(seed=seed),
        bias_initializer=keras.initializers.Zeros(),
    )(user_vecs)
    user_att2 = layers.Dot(axes=-1)([query_input, user_att])
    user_att2 = layers.Activation("softmax")(user_att2)
    user_vec = layers.Dot((1, 1))([user_vecs, user_att2])

    model = keras.Model([vecs_input, query_input], user_vec)
    return model
Exemplo n.º 2
0
def AttentionModel(sr=16000, iLen=25000):
    
    inputs = L.Input(x_train.shape[1:], name='Input')

    x = L.Conv2D(10, (5, 1), activation='relu', padding='same', name='Conv1')(inputs)
    x = L.BatchNormalization(name='BN1')(x)
    x = L.Conv2D(1, (5, 1), activation='relu', padding='same', name='Conv2')(x)
    x = L.BatchNormalization(name='BN2')(x)

    x = L.Reshape(x.shape[1:-1],name='Squeeze')(x)

    n_units = 64
    x = L.LSTM(n_units, return_sequences=True, name='LSTM_Sequences')(x)  

    # Calculate Unit Importance
    xLast = L.Lambda(lambda q: q[:, -1], name='FinalSequence')(x)  # [b_s, vec_dim]
    xLast = L.Dense(xLast.shape[-1], name='UnitImportance')(xLast)

    # Calculate attention
    attScores = L.Dot(axes=[1, 2],name='AttentionScores')([xLast, x])
    attScores = L.Softmax(name='AttentionSoftmax')(attScores)  

    x = L.Dot(axes=[1, 1], name='AttentionVector')([attScores, x])  
    x = L.Dense(32, activation='relu', name='FC')(x)
    outputs = L.Dense(5, activation='softmax', name='Output')(x)
    model = Model(inputs=[inputs], outputs=[outputs], name='Attention')

    return model
Exemplo n.º 3
0
    def build(self):
        query_input = self.new_query_input(size=20)
        url_input = self.new_url_input()
        title_input = self.new_title_input()
        body_input = self.new_body_input()
        inputs = [query_input, url_input, title_input, body_input]

        word_embedding = layers.Embedding(self.total_words, self.embedding_dim)
        query = layers.GlobalMaxPooling1D()(word_embedding(query_input))
        url = layers.GlobalMaxPooling1D()(word_embedding(url_input))
        title = layers.GlobalMaxPooling1D()(word_embedding(title_input))
        body = layers.GlobalMaxPooling1D()(word_embedding(body_input))
        input_features = [query, url, title, body]

        query_url = layers.Dot(axes=1)([query, url])
        query_title = layers.Dot(axes=1)([query, title])
        query_body = layers.Dot(axes=1)([query, body])
        interactions = layers.Add()([query_url, query_title, query_body])

        features = []
        for feature in input_features:
            feature = layers.Dense(1, activation='relu')(feature)
            features.append(feature)
        features = layers.Add()(features)
        features = AddBias0()(features)

        output = layers.Activation('sigmoid', name='label')(features + interactions)
        return tf.keras.Model(inputs=inputs, outputs=output, name=self.name)
Exemplo n.º 4
0
def AttRNNSpeechModel(nCategories, samplingrate=16000,
                      inputLength=16000, rnn_func=L.LSTM):
    # simple LSTM
    sr = samplingrate
    iLen = inputLength

    inputs = L.Input((inputLength,), name='input')

    x = L.Reshape((1, -1))(inputs)

    m = Melspectrogram(n_dft=1024, n_hop=128, input_shape=(1, iLen),
                       padding='same', sr=sr, n_mels=80,
                       fmin=40.0, fmax=sr / 2, power_melgram=1.0,
                       return_decibel_melgram=True, trainable_fb=False,
                       trainable_kernel=False,
                       name='mel_stft')
    m.trainable = False

    x = m(x)

    x = Normalization2D(int_axis=0, name='mel_stft_norm')(x)

    # note that Melspectrogram puts the sequence in shape (batch_size, melDim, timeSteps, 1)
    # we would rather have it the other way around for LSTMs

    x = L.Permute((2, 1, 3))(x)

    x = L.Conv2D(10, (5, 1), activation='relu', padding='same')(x)
    x = L.BatchNormalization()(x)
    x = L.Conv2D(1, (5, 1), activation='relu', padding='same')(x)
    x = L.BatchNormalization()(x)

    # x = Reshape((125, 80)) (x)
    # keras.backend.squeeze(x, axis)
    x = L.Lambda(lambda q: K.squeeze(q, -1), name='squeeze_last_dim')(x)

    x = L.Bidirectional(rnn_func(64, return_sequences=True)
                        )(x)  # [b_s, seq_len, vec_dim]
    x = L.Bidirectional(rnn_func(64, return_sequences=True)
                        )(x)  # [b_s, seq_len, vec_dim]

    xFirst = L.Lambda(lambda q: q[:, -1])(x)  # [b_s, vec_dim]
    query = L.Dense(128)(xFirst)

    # dot product attention
    attScores = L.Dot(axes=[1, 2])([query, x])
    attScores = L.Softmax(name='attSoftmax')(attScores)  # [b_s, seq_len]

    # rescale sequence
    attVector = L.Dot(axes=[1, 1])([attScores, x])  # [b_s, vec_dim]

    x = L.Dense(64, activation='relu')(attVector)
    x = L.Dense(32)(x)

    output = L.Dense(nCategories, activation='softmax', name='output')(x)

    model = Model(inputs=[inputs], outputs=[output])

    return model
Exemplo n.º 5
0
    def _build_lstur(self):
        """The main function to create LSTUR's logic. The core of LSTUR
        is a user encoder and a news encoder.

        Returns:
            object: a model used to train.
            object: a model used to evaluate and inference.
        """
        hparams = self.hparams

        his_input_title = keras.Input(shape=(hparams.his_size,
                                             hparams.title_size),
                                      dtype="int32")
        pred_input_title = keras.Input(shape=(hparams.npratio + 1,
                                              hparams.title_size),
                                       dtype="int32")
        pred_input_title_one = keras.Input(
            shape=(
                1,
                hparams.title_size,
            ),
            dtype="int32",
        )
        pred_title_reshape = layers.Reshape(
            (hparams.title_size, ))(pred_input_title_one)
        user_indexes = keras.Input(shape=(1, ), dtype="int32")

        embedding_layer = layers.Embedding(
            self.word2vec_embedding.shape[0],
            hparams.word_emb_dim,
            weights=[self.word2vec_embedding],
            trainable=True,
        )

        titleencoder = self._build_newsencoder(embedding_layer)
        self.userencoder = self._build_userencoder(titleencoder,
                                                   type=hparams.type)
        self.newsencoder = titleencoder

        user_present = self.userencoder([his_input_title, user_indexes])
        news_present = layers.TimeDistributed(
            self.newsencoder)(pred_input_title)
        news_present_one = self.newsencoder(pred_title_reshape)

        preds = layers.Dot(axes=-1)([news_present, user_present])
        preds = layers.Activation(activation="softmax")(preds)

        pred_one = layers.Dot(axes=-1)([news_present_one, user_present])
        pred_one = layers.Activation(activation="sigmoid")(pred_one)

        model = keras.Model([user_indexes, his_input_title, pred_input_title],
                            preds)
        scorer = keras.Model(
            [user_indexes, his_input_title, pred_input_title_one], pred_one)

        return model, scorer
def attention_speech_model(num_category,
                           sampling_rate=16000,
                           input_length=16000):

    inputs = layers.Input((input_length, ), name='input')
    x = layers.Reshape((1, -1))(inputs)

    m = Melspectrogram(input_shape=(1, input_length),
                       n_dft=1024,
                       n_hop=128,
                       padding='same',
                       sr=sampling_rate,
                       n_mels=80,
                       fmin=40.0,
                       fmax=sampling_rate / 2,
                       power_melgram=1.0,
                       return_decibel_melgram=True,
                       trainable_fb=False,
                       trainable_kernel=False,
                       name='mel_tft')
    m.trainable = False
    x = m(x)

    x = Normalization2D(int_axis=0, name='norm')(x)
    x = layers.Permute((2, 1, 3))(x)

    x = layers.Conv2D(10, (5, 1), activation='relu', padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.BatchNormalization()(x)
    x = layers.Conv2D(1, (5, 1), activation='relu', padding='same')(x)
    x = layers.BatchNormalization()(x)

    x = layers.Lambda(lambda t: K.squeeze(t, -1), name='squeeze_last_dim')(x)
    x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x)
    x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x)

    x_first = layers.Lambda(lambda t: t[:, t.shape[1] // 2])(x)
    query = layers.Dense(128)(x_first)

    attention_scores = layers.Dot([1, 2])([query, x])
    attention_scores = layers.Softmax(
        name='attention_softmax')(attention_scores)
    attention_vector = layers.Dot(axes=[1, 1])([attention_scores, x])

    x = layers.Dense(64)(attention_vector)
    x = layers.LeakyReLU()(x)
    x = layers.Dropout(0.5)(x)

    x = layers.Dense(32)(x)
    x = layers.Dropout(0.5)(x)

    out = layers.Dense(num_category, activation='softmax', name="output")(x)
    model = Model(inputs=inputs, outputs=out)
    return model
Exemplo n.º 7
0
def create_model(vocabulary_size, embedding_dim):

    inputs = {
        "target": layers.Input(name="target", shape=(), dtype="int32"),
        "context": layers.Input(name="context", shape=(), dtype="int32"),
    }
    # Initialize item embeddings.
    embed_item = layers.Embedding(
        input_dim=vocabulary_size,
        output_dim=embedding_dim,
        embeddings_initializer="he_normal",
        embeddings_regularizer=keras.regularizers.l2(1e-6),
        name="item_embeddings",
    )
    # Lookup embeddings for target.
    target_embeddings = embed_item(inputs["target"])
    # Lookup embeddings for context.
    context_embeddings = embed_item(inputs["context"])
    # Add one hidden layer
    target_hidden_1 = layers.Dense(
        4, activation=tf.nn.leaky_relu)(target_embeddings)
    context_hidden_1 = layers.Dense(
        4, activation=tf.nn.leaky_relu)(context_embeddings)
    target_hidden_2 = layers.Dense(
        4, activation=tf.nn.leaky_relu)(target_hidden_1)
    context_hidden_2 = layers.Dense(
        4, activation=tf.nn.leaky_relu)(context_hidden_1)
    # Compute dot similarity between target and context embeddings.
    logits = layers.Dot(axes=1, normalize=False, name="dot_similarity")(
        [target_hidden_2, context_hidden_2])
    # Create the model.
    model = keras.Model(inputs=inputs, outputs=logits)
    return model
Exemplo n.º 8
0
    def __init__(self, config_parameters):
        super(lsm_network_v1, self).__init__()
        self.input_dim = config_parameters['input_dim']
        self.hidden_num = config_parameters['hidden_num']
        self.hidden_len = config_parameters['hidden_len']
        self.label_num = config_parameters['label_num']
        self.Y_labeled = config_parameters['Y_labeled']
        self.alpha = config_parameters['alpha']
        self.dense1 = layers.Dense(self.hidden_len[0], activation='relu')

        self.layer = {}
        for i in range(self.label_num):
            self.layer['output_{}'.format(i)] = layers.Dense(
                2, activation='sigmoid')
            for j in range(self.hidden_num):
                self.layer['label{}_layer{}'.format(i, j)] = layers.Dense(
                    self.hidden_len[j], activation='relu')

        self.layer['K'] = layers.Dense(self.hidden_len[-1],
                                       activation='tanh',
                                       name='K')
        self.layer['Dot'] = layers.Dense(self.hidden_len[-1], use_bias=False)
        self.layer['ouput_final'] = layers.Dense(self.label_num,
                                                 activation='softmax',
                                                 name='outputs')
        self.flatten = layers.Flatten()
        self.dense_1 = layers.Dense(1)
        self.dot = layers.Dot((1, 1))
    def generate_model(self, embed_size = 50, classification = False):
        inp = kl.Input(name = 'input_player', shape=[1])
        target = kl.Input(name = 'target_player', shape=[1])

        # Embedding input player(shape = (None, 1, 50))
        inp_embed = kl.Embedding(name = 'input_embedding',
                                 input_dim = self.player_num,
                                 output_dim = embed_size)(inp)
        # Embedding target player(shape = (None, 1, 50))
        target_embed = kl.Embedding(name = 'target_embedding',
                                    input_dim = self.player_num,
                                    output_dim = embed_size)(target)

        # Merge layers with dot product
        merged = kl.Dot(name = 'dot_product',
                        normalize = True,
                        axes = 2)([inp_embed, target_embed])
        merged = kl.Reshape(target_shape = [1])(merged)

        # If classification
        if classification:
            merged = kl.Dense(1, activation = 'sigmoid')(merged)
            model = Model(inputs = [inp, target], outputs = merged)
            model.compile(optimizer = 'Adam', loss = 'binary_crossentropy',
                          metrics = ['accuracy'])
        else:
            model = Model(inputs = [inp, target], outputs = merged)
            model.compile(optimizer = 'Adam', loss = 'mse')
        return model
Exemplo n.º 10
0
 def __init__(self, **kwargs):
     super(CapsSimilarity, self).__init__(**kwargs)
     self.layer_normal1 = layers.LayerNormalization()
     # self.dot = layers.Dot((2, 2), normalize=True)
     self.dot = layers.Dot((2, 2))
     self.layer_normal2 = layers.LayerNormalization()
     self.activation = layers.ELU()
Exemplo n.º 11
0
    def build(self):
        text_inputs = [
            self.new_query_input(size=20),
            self.new_url_input(),
            self.new_title_input(),
            self.new_body_input(),
        ]
        inputs = text_inputs

        word_embedding = layers.Embedding(self.total_words, self.embedding_dim)
        text_features = [word_embedding(text_input) for text_input in text_inputs]
        text_features = [layers.GlobalMaxPooling1D()(feature) for feature in text_features]
        input_features = text_features

        interactions = []
        for feature1, feature2 in itertools.combinations(input_features, 2):
            interactions.append(layers.Dot(axes=1)([feature1, feature2]))
        interactions = layers.Add()(interactions)

        features = []
        for feature in input_features:
            feature = layers.Dense(1, activation='relu')(feature)
            features.append(feature)
        features = layers.Add()(features)
        features = AddBias0()(features)

        output = layers.Activation('sigmoid', name='label')(features + interactions)
        return tf.keras.Model(inputs=inputs, outputs=output, name=self.name)
Exemplo n.º 12
0
def create_baseline_model():
    # Receive the user as an input.
    user_input = layers.Input(name="user_id", shape=(), dtype=tf.string)
    # Get user embedding.
    user_embedding = embedding_encoder(
        vocabulary=user_vocabulary, embedding_dim=base_embedding_dim, name="user"
    )(user_input)

    # Receive the movie as an input.
    movie_input = layers.Input(name="movie_id", shape=(), dtype=tf.string)
    # Get embedding.
    movie_embedding = embedding_encoder(
        vocabulary=movie_vocabulary, embedding_dim=base_embedding_dim, name="movie"
    )(movie_input)

    # Compute dot product similarity between user and movie embeddings.
    logits = layers.Dot(axes=1, name="dot_similarity")(
        [user_embedding, movie_embedding]
    )
    # Convert to rating scale.
    prediction = keras.activations.sigmoid(logits) * 5
    # Create the model.
    model = keras.Model(
        inputs=[user_input, movie_input], outputs=prediction, name="baseline_model"
    )
    return model
Exemplo n.º 13
0
def create_memory_efficient_model():
    # Take the user as an input.
    user_input = layers.Input(name="user_id", shape=(), dtype=tf.string)
    # Get user embedding.
    user_embedding = QREmbedding(
        vocabulary=user_vocabulary,
        embedding_dim=base_embedding_dim,
        num_buckets=user_embedding_num_buckets,
        name="user_embedding",
    )(user_input)

    # Take the movie as an input.
    movie_input = layers.Input(name="movie_id", shape=(), dtype=tf.string)
    # Get embedding.
    movie_embedding = MDEmbedding(
        blocks_vocabulary=movie_blocks_vocabulary,
        blocks_embedding_dims=movie_blocks_embedding_dims,
        base_embedding_dim=base_embedding_dim,
        name="movie_embedding",
    )(movie_input)

    # Compute dot product similarity between user and movie embeddings.
    logits = layers.Dot(axes=1, name="dot_similarity")(
        [user_embedding, movie_embedding]
    )
    # Convert to rating scale.
    prediction = keras.activations.sigmoid(logits) * 5
    # Create the model.
    model = keras.Model(
        inputs=[user_input, movie_input], outputs=prediction, name="baseline_model"
    )
    return model
Exemplo n.º 14
0
def transformation_block(inputs: tf.Tensor, num_features: int,
                         name: str) -> tf.Tensor:
    transformed_features = transformation_net(inputs, num_features, name=name)
    transformed_features = layers.Reshape(
        (num_features, num_features))(transformed_features)
    return layers.Dot(axes=(2, 1),
                      name=f"{name}_mm")([inputs, transformed_features])
Exemplo n.º 15
0
    def __init__(self, bandit, episodes, time_steps, trials, epsilon, beta, replay_buffer = False, decaying_epsilon = False, big_reward = False):
        super().__init__(bandit)
        self.episodes = episodes
        self.time_steps = time_steps
        self.trials = trials
        self.epsilon = epsilon
        self.beta = beta
        self.buffer_data = []

        self.replay_buffer = replay_buffer
        self.decaying_epsilon = decaying_epsilon
        self.big_reward = big_reward
        #########################################################################################################################################
        
        layer_init = tf.keras.initializers.VarianceScaling()
        act_fn = tf.nn.relu

        #########################################################################################################################################
        bandit_inputs = layers.Input( shape = (bandit.k,2) )
        flatten = layers.Flatten()( bandit_inputs )
        dense1 = layers.Dense( units = 100, activation = act_fn , kernel_initializer = layer_init, bias_initializer = layer_init )( flatten )
        dense2 = layers.Dense( units = 50, activation = act_fn , kernel_initializer = layer_init, bias_initializer = layer_init )( dense1 )
        Q_vals = layers.Dense( units = bandit.k, activation = None , kernel_initializer = layer_init, bias_initializer = layer_init )( dense2 )
        self.Q_value_compute = tf.keras.Model( inputs = bandit_inputs, outputs = Q_vals )
        #########################################################################################################################################


        #########################################################################################################################################
        action_inputs = layers.Input( shape = (bandit.k,) )
        selected_Q_value = layers.Dot( axes = 1 )( [ Q_vals, action_inputs ] )
        self.Q_value_selected = tf.keras.Model( inputs = [ bandit_inputs, action_inputs ], outputs = selected_Q_value )
        self.Q_value_selected.compile( optimizer = 'adam', loss = 'mean_squared_error' )
Exemplo n.º 16
0
 def build(self):
     embedding = layers.Embedding(self.total_words, self.embedding_dim)
     query_inputs, encoded_query, query_encoder = self.build_query_encoder(embedding)
     recipe_inputs, encoded_recipe, recipe_encoder = self.build_recipe_encoder(embedding)
     inputs = query_inputs + recipe_inputs
     output = layers.Dot(axes=1, normalize=True, name='label')([encoded_query, encoded_recipe])
     model = tf.keras.Model(inputs=inputs, outputs=output, name=self.name)
     return model
Exemplo n.º 17
0
def initializeSnakeIdentifier(num_params=NUM_PARAMS, channels=CHANNELS):
    curr_channels = CHANNEL_MULT
    class_in = layers.Input(shape=(1, ))
    embedding_layer = layers.Embedding(OUTPUT_CLASSES, CHANNELS)(class_in)

    img_in = layers.Input(shape=(FINAL_IMG_SIZE, FINAL_IMG_SIZE, 3))
    disc = CustomLayers.Conv2D(curr_channels, kernel_size=3,
                               padding='same')(img_in)

    # curr_channels *= 2
    # disc = CustomLayers.ResBlockCondDownD(curr_channels, name='DiscBlockDown1', down=True)(disc)
    # disc = CustomLayers.ResBlockCondDownD(curr_channels, name='DiscBlock1')(disc)
    disc = CustomLayers.SoftAttentionMax(curr_channels)(disc)

    curr_channels *= 4
    disc = CustomLayers.ResBlockCondDownD(curr_channels,
                                          name='DiscBlockDown2',
                                          down=True)(disc)
    disc = CustomLayers.ResBlockCondDownD(curr_channels,
                                          name='DiscBlock2')(disc)

    curr_channels *= 2
    disc = CustomLayers.ResBlockCondDownD(curr_channels,
                                          name='DiscBlockDown3',
                                          down=True)(disc)
    disc = CustomLayers.ResBlockCondDownD(curr_channels,
                                          name='DiscBlock3')(disc)

    curr_channels *= 2
    disc = CustomLayers.ResBlockCondDownD(curr_channels,
                                          name='DiscBlockDown4',
                                          down=True)(disc)
    disc = CustomLayers.ResBlockCondDownD(curr_channels,
                                          name='DiscBlock4')(disc)

    disc = CustomLayers.ResBlockCondDownD(curr_channels,
                                          name='DiscBlockDown5',
                                          down=True)(disc)
    disc = CustomLayers.ResBlockCondDownD(curr_channels,
                                          name='DiscBlock5')(disc)

    disc = layers.ReLU()(disc)
    disc = CustomLayers.GlobalSumPooling2D()(disc)

    embedding_layer = layers.Flatten()(embedding_layer)
    embedding_layer = layers.Dot((1, 1))([disc, embedding_layer])

    disc = CustomLayers.Dense(1)(disc)

    judge = layers.Add(name='FinalAdd')([disc, embedding_layer])

    discriminator = Model(inputs=[img_in, class_in],
                          outputs=judge,
                          name='Discriminator')

    discriminator.summary()
    return discriminator
Exemplo n.º 18
0
def build_receiver_model(n_images,
                         input_image_shape,
                         embedding_size,
                         temperature,
                         vocabulary_size,
                         optimizer,
                         image_embedding_layer=None,
                         verbose=False,
                         **kwargs):
    image_inputs = [
        layers.Input(shape=input_image_shape,
                     name=f"R_image_in_{i}",
                     dtype="float32") for i in range(n_images)
    ]

    if not image_embedding_layer:
        image_embedding_layer = layers.Dense(embedding_size,
                                             name="R_image_embedding")

    softmax = layers.Softmax()

    symbol_input = layers.Input(shape=[1], dtype="int32", name=f"R_symbol_in")
    symbol_embedding = layers.Embedding(input_dim=vocabulary_size,
                                        output_dim=embedding_size,
                                        name="R_symbol_embedding")
    dot_product = layers.Dot(axes=-1, name="R_dot_product")

    y_images = [image_embedding_layer(x) for x in image_inputs]
    y_symbol = symbol_embedding(symbol_input)
    y = [dot_product([img, y_symbol]) for img in y_images]
    y = layers.concatenate(y, axis=-1)
    y = y / temperature
    y = softmax(y)

    model_predict = models.Model([*image_inputs, symbol_input],
                                 y,
                                 name="R_predict")

    index = layers.Input(shape=[1], dtype="int32", name="R_index_in")
    y_selected = layers.Lambda(
        lambda probs_index: tf.gather(*probs_index, axis=-1),
        name="R_gather")([y, index])

    @tf.function
    def loss(target, prediction):
        return -K.log(prediction) * target

    model_train = models.Model([*image_inputs, symbol_input, index],
                               y_selected,
                               name="R_train")
    model_train.compile(loss=loss, optimizer=optimizer)

    if verbose:
        model_predict.summary()
        model_train.summary()

    return model_predict, model_train
Exemplo n.º 19
0
def cf_model(n_users, n_movies):
    user_input = layers.Input(shape=(1, ))
    user_x = layers.Embedding(n_users, 20, input_length=1,
                              name='user_embed')(user_input)
    item_input = layers.Input(shape=(1, ))
    item_x = layers.Embedding(n_movies, 20, input_length=1,
                              name='item_embed')(item_input)
    rating = layers.Dot(axes=-1)([user_x, item_x])
    rating = layers.Flatten()(rating)
    return models.Model([user_input, item_input], rating)
Exemplo n.º 20
0
    def build(self):
        text_inputs = [
            self.new_query_input(),
            self.new_title_input(),
            self.new_ingredients_input(),
            self.new_description_input(),
        ]
        country_input = self.new_country_input()
        doc_id_input = self.new_doc_id_input()
        inputs = text_inputs + [country_input, doc_id_input]

        word_embedding = layers.Embedding(self.total_words,
                                          self.embedding_dim,
                                          name='word_embedding')
        texts = [
            layers.GlobalMaxPooling1D()(word_embedding(text_input))
            for text_input in text_inputs
        ]
        country_embedding = layers.Embedding(self.total_countries,
                                             self.embedding_dim)
        country = country_embedding(country_input)
        country = tf.reshape(country, shape=(
            -1,
            self.embedding_dim,
        ))
        image_embedding = self.load_pretrained_embedding(
            embedding_filepath=
            f'{project_dir}/data/raw/en_2020-03-16T00_04_34_recipe_image_tagspace5000_300.pkl',
            embedding_dim=300,
            name='image_embedding')
        image = image_embedding(doc_id_input)
        image = tf.reshape(image, shape=(
            -1,
            300,
        ))
        image = layers.Dropout(.2)(image)
        image = layers.Dense(self.embedding_dim)(image)
        input_features = texts + [country, image]

        interactions = []
        for feature1, feature2 in itertools.combinations(input_features, 2):
            interactions.append(layers.Dot(axes=1)([feature1, feature2]))
        interactions = layers.Add()(interactions)

        features = []
        for feature in input_features:
            feature = layers.Dense(1, activation='relu')(feature)
            features.append(feature)
        features = layers.Add()(features)
        features = AddBias0()(features)

        output = layers.Activation('sigmoid',
                                   name='label')(features + interactions)
        return tf.keras.Model(inputs=inputs, outputs=output, name=self.name)
Exemplo n.º 21
0
 def call(self, inputs, **kwargs):
     dim = inputs.shape[1] // self.num_fields
     interactions = []
     query = inputs[:, 0:dim]
     for i in range(self.num_fields - 1):
         field = inputs[:, i * dim:(i + 1) * dim]
         interaction = layers.Dot(axes=1)([query, field])
         interaction = tf.math.scalar_mul(self.field_weights[i],
                                          interaction)
         interactions.append(interaction)
     interactions = layers.Add()(interactions)
     return interactions
Exemplo n.º 22
0
def AttRNNSpeechModel(nCategories,
                      inputShape,
                      rnn_func=layers.LSTM,
                      name="AttNN"):
    inputs = keras.Input(shape=inputShape)
    #x = layers.Flatten()(inputs)

    x = layers.Conv2D(10, 5, activation='relu', padding='same')(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.Conv2D(1, 5, activation='relu', padding='same')(x)
    x = layers.BatchNormalization()(x)

    # x = Reshape((125, 80)) (x)
    # keras.backend.squeeze(x, axis)
    x = layers.Lambda(lambda q: backend.squeeze(q, -1),
                      name='squeeze_last_dim')(x)

    x = layers.Bidirectional(rnn_func(64, return_sequences=True))(
        x)  # [b_s, seq_len, vec_dim]
    x = layers.Bidirectional(rnn_func(64, return_sequences=True))(
        x)  # [b_s, seq_len, vec_dim]

    xFirst = layers.Lambda(lambda q: q[:, -1])(x)  # [b_s, vec_dim]
    query = layers.Dense(128)(xFirst)

    # dot product attention
    attScores = layers.Dot(axes=[1, 2])([query, x])
    attScores = layers.Softmax(name='attSoftmax')(attScores)  # [b_s, seq_len]

    # rescale sequence
    attVector = layers.Dot(axes=[1, 1])([attScores, x])  # [b_s, vec_dim]

    x = layers.Dense(64, activation='relu')(attVector)
    x = layers.Dense(32)(x)

    output = layers.Dense(nCategories, activation='softmax', name='output')(x)

    model = keras.Model(inputs=inputs, outputs=output, name=name)

    return model
Exemplo n.º 23
0
 def call(self, inputs, **kwargs):
     dim = inputs.shape[1] // self.num_fields
     interactions = []
     for i, j in itertools.combinations(range(self.num_fields), 2):
         interaction = layers.Dot(axes=1)([
             inputs[:, i * dim:(i + 1) * dim], inputs[:,
                                                      j * dim:(j + 1) * dim]
         ])
         interaction = tf.math.scalar_mul(self.field_weights[i, j],
                                          interaction)
         interactions.append(interaction)
     interactions = layers.Add()(interactions)
     return interactions
Exemplo n.º 24
0
 def dot_prodCNN(self):
     """
     Siamese CNN with merging by dot product
     """
     _input1, _input2, _pool1, _pool2 = self.siameseBlock()
     _output = layers.Dot(axes=1, normalize=False)([_pool1, _pool2])
     _output = layers.Activation("sigmoid")(_output)
     return self.expCompile([
         _input1,
         _input2,
     ],
                            _output,
                            optimizer=self.optimizer)
Exemplo n.º 25
0
 def _dotProdClassifier(self, input1, input2):
     """
     Add dot product classifier
     Dot product is not normalized but is transformed by sigmoid 
     to probabilities as it helps convergence which is still inferior 
     to conventional methods of merging two embeddings
     !!!The function is for experiments only
     Parameters:
     - input1  -- 1-st operand of dot product 
     - input2  -- 2-nd operand of dot product 
     """
     _output = layers.Dot(axes=1, normalize=False)([input1, input2])
     return tensorflow.keras.layers.Activation("sigmoid")(_output)
Exemplo n.º 26
0
    def __init__(self, vocab_size, embedding_dim):
        super(SkipGram, self).__init__()

        self.embd_in = layers.Embedding(input_dim=vocab_size,
                                        output_dim=embedding_dim)
        self.embd_out_w = layers.Embedding(input_dim=vocab_size,
                                           output_dim=embedding_dim)
        self.embd_out_bias = layers.Embedding(input_dim=vocab_size,
                                              output_dim=1)
        self.dot = layers.Dot(axes=(1, 2))
        self.activation = layers.Activation('sigmoid')

        self.vocab_size = vocab_size
Exemplo n.º 27
0
 def cosineCNN(self):
     """
     Siamese CNN with merging by cosine
     """
     _input1, _input2, _pool1, _pool2 = self.siameseBlock()
     _output = layers.Dot(axes=1, normalize=True)([_pool1, _pool2])
     return self.expCompile([
         _input1,
         _input2,
     ],
                            _output,
                            optimizer=self.optimizer,
                            loss=keras.losses.SquaredHinge(),
                            metrics=[keras.metrics.SquaredHinge()])
Exemplo n.º 28
0
def focus_layer(inputs, l=4 * 4, d_in=64, d_out=64, nv=4):
    x = layers.Reshape((l, d_in))(inputs)

    f = layers.Dense(nv, activation='relu')(inputs)
    v = layers.Dense(d_out, activation='relu')(inputs)

    f = layers.Reshape((l, nv))(f)
    v = layers.Reshape((l, d_out))(v)

    w = layers.Softmax(axis=1)(f)
    out = layers.Dot(axes=(1, 1))([w, v])
    out = layers.Reshape((d_out * nv, ))(out)
    x = layers.BatchNormalization()(x)

    return out
Exemplo n.º 29
0
def NeuralMF(num_items, num_users, latent_dim):
    item_input = layers.Input(shape=[1], name='item-input')
    user_input = layers.Input(shape=[1], name='user-input')

    # MLP Embeddings
    item_embedding_mlp = layers.Embedding(
        num_items + 1, latent_dim, name='movie-embedding-mlp')(item_input)
    item_vec_mlp = layers.Flatten(name='flatten-movie-mlp')(item_embedding_mlp)

    user_embedding_mlp = layers.Embedding(
        num_users + 1, latent_dim, name='user-embedding-mlp')(user_input)
    user_vec_mlp = layers.Flatten(name='flatten-user-mlp')(user_embedding_mlp)

    # MF Embeddings
    item_embedding_mf = layers.Embedding(num_items + 1,
                                         latent_dim,
                                         name='movie-embedding-mf')(item_input)
    item_vec_mf = layers.Flatten(name='flatten-movie-mf')(item_embedding_mf)

    user_embedding_mf = layers.Embedding(num_users + 1,
                                         latent_dim,
                                         name='user-embedding-mf')(user_input)
    user_vec_mf = layers.Flatten(name='flatten-user-mf')(user_embedding_mf)

    # MLP layers
    concat = layers.Concatenate(name='concat')([item_vec_mlp, user_vec_mlp])
    concat_dropout = layers.Dropout(0.2)(concat)
    fc_1 = layers.Dense(100, name='fc-1', activation='relu')(concat_dropout)
    fc_1_bn = layers.BatchNormalization(name='batch-norm-1')(fc_1)
    fc_1_dropout = layers.Dropout(0.2)(fc_1_bn)
    fc_2 = layers.Dense(50, name='fc-2', activation='relu')(fc_1_dropout)
    fc_2_bn = layers.BatchNormalization(name='batch-norm-2')(fc_2)
    fc_2_dropout = layers.Dropout(0.2)(fc_2_bn)

    # Prediction from both layers then concat
    pred_mlp = layers.Dense(10, name='pred-mlp',
                            activation='relu')(fc_2_dropout)
    pred_mf = layers.Dot(name='pred-mf', axes=1)([item_vec_mf, user_vec_mf])
    combine_mlp_mf = layers.Concatenate(name='combine-mlp-mf')(
        [pred_mf, pred_mlp])

    # Last layer
    result = layers.Dense(1, name='result', activation='relu')(combine_mlp_mf)

    model = Model([user_input, item_input], result)

    return model
Exemplo n.º 30
0
def tnet(inputs, num_features):
    bias = keras.initializers.Constant(np.eye(num_features).flatten())
    reg = OrthogonalRegularizer(num_features)

    x = conv_bn(inputs, 32)
    x = conv_bn(x, 64)
    x = conv_bb(x, 512)
    x = layers.GlobalMaxxPooling1D()(x)
    x = dense_bn(x, 256)
    x = dense_bn(x, 128)
    x = layers.Dense(
        num_features * num_features,
        kernel_initializer="zeros",
        bias_initializer=bias,
        activity_regularizer=reg,
    )(x)
    feat_T = layers.Reshape((num_features, num_features))(x)
    return layers.Dot(axes=(2, 1))([inputs, feat_T])