Beispiel #1
0
def keras_model(action_dim, z_dim):

    from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input, Dot, Reshape, Softmax
    from beta_regularizer import BetaRegularization
    s = Input(shape=(110, 84, 1), name='input_s')
    z = Input(shape=(z_dim,), name='input_z')
    conv1 = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(s)
    conv2 = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv1)
    conv2_flat = Flatten()(conv2)
    h1 = Dense(256, activation='relu')(conv2_flat)

    # Predict the next latent
    h2_z = Dense(z_dim * z_dim, activation=None)(h1)  # (B x Z * Z)
    h2_z_reshaped = Reshape((z_dim, z_dim))(h2_z)  # (B x Z x Z)
    z_tp1_matrix = Softmax(axis=-1, name='latent_matrix')(h2_z_reshaped)
    z_tp1 = Dot(axes=1, name='latent')([z_tp1_matrix, z])

    # Predict the next action
    h2_a = Dense(action_dim * z_dim, activation=None)(h1)  # (B x A * Z)
    h2_a_reshaped = Reshape((z_dim, action_dim))(h2_a)       # (B x Z x A)
    a_matrix = Softmax(axis=-1, name='action_matrix')(h2_a_reshaped)
    a = Dot(axes=1, name='action')([a_matrix, z])

    # Predict termination
    h2_b = Dense(z_dim * 2, activation=None)(h1)  # (B x 2 * Z)
    h2_b_reshaped = Reshape((z_dim, 2))(h2_b)     # (B x Z x 2)
    b_matrix = Softmax(axis=-1, name='termination_matrix')(h2_b_reshaped)
    b_matrix = BetaRegularization(1.0, 99.)(b_matrix)
    b = Dot(axes=1, name='termination')([b_matrix, z])

    return tf.keras.Model(inputs=[s, z], outputs=[a, z_tp1, b])
Beispiel #2
0
def build_model(word_index, category_map, subcategory_map):
    print('Build model...')
    # model
    # ------ news encoder -------
    news_encoder = build_news_encoder(word_index, category_map, subcategory_map)

    # ----- user encoder -----
    browsed_input = Input((MAX_BROWSED, MAX_TITLE_LENGTH + MAX_ABSTRACT_LENGTH + 2, ), dtype='int32', name='browsed')
    browsed_news = TimeDistributed(news_encoder)(browsed_input)

    user_input = Input((MAX_BROWSED, 400, ), name='user_input')
    user_r = Attention(200)(user_input)
    user_encoder = Model(user_input, user_r, name='user_encoder')

    train_user_r = user_encoder(browsed_news)
    test_user_r = Input((400, ), name='test_user_r')

    # ----- candidate_news -----
    candidate_input = Input((1+NEG_SAMPLE, MAX_TITLE_LENGTH + MAX_ABSTRACT_LENGTH + 2, ), dtype='int32', name='candidate')
    candidate_r = TimeDistributed(news_encoder)(candidate_input)

    candidate_one_r = Input((400, ), name="candidate_1")

    # ----- click predictor -----
    pred = Dot(axes=-1)([train_user_r, candidate_r])
    pred = Activation(activation='softmax')(pred)
    model = Model([browsed_input, candidate_input], pred)

    pred_one = Dot(axes=-1)([test_user_r, candidate_one_r])
    pred_one = Activation(activation='sigmoid')(pred_one)
    model_test = Model([test_user_r, candidate_one_r], 
                    pred_one)
    return news_encoder, user_encoder, model, model_test
Beispiel #3
0
def build_model(word_index, entity_index):
    # model
    # ------ news encoder -------
    news_encoder = KCNN(word_index, entity_index)

    # ----- user encoder -------
    browsed_input = Input((MAX_BROWSED, MAX_TITLE_LENGTH + MAX_ENTITY_LENGTH, ), dtype='int32', name='browsed')
    browsed_news = TimeDistributed(news_encoder)(browsed_input)
    candidate_input = Input((MAX_TITLE_LENGTH + MAX_ENTITY_LENGTH, ), dtype='int32', name='candidate')
    candidate_news_r = news_encoder(candidate_input)

    user_encoder = build_user_encoder()

    train_user_r = user_encoder([browsed_news, candidate_news_r])

    test_browsed_input = Input((MAX_BROWSED, 300), name='browsed_test')
    candidate_one_r = Input((300, ), name="c_t_1")
    test_user_r = user_encoder([test_browsed_input, candidate_one_r])

    # ----- click predictor -----
    pred = Dot(axes=-1)([train_user_r, candidate_news_r])
    pred = Activation(activation='sigmoid')(pred)

    pred_test = Dot(axes=-1)([test_user_r, candidate_one_r])
    pred_test = Activation(activation='sigmoid')(pred_test)
    
    model = Model([browsed_input, candidate_input], pred)
    model_test = Model([test_browsed_input, candidate_one_r], pred_test)

    return news_encoder, user_encoder, model, model_test
Beispiel #4
0
def Attention(x, y):
    score = Dot(axes=(2, 2))([y, x])

    dist = Activation('softmax')(score)

    attention = Dot(axes=(2, 1))([dist, x])

    return Concatenate()([y, attention])
Beispiel #5
0
    def __init__(self):
        super(LuongAttention, self).__init__()

        self.attentionDot = Dot((2, 2), name="attentionDot")

        self.attention_layer = Activation("softmax", name="attentionSoftMax")

        self.context = Dot((2, 1), name="context")
Beispiel #6
0
def pointnet_base(inputs, use_tnet=True):
    """
    Convolutional portion of pointnet, common across different tasks (classification, segmentation, etc)
    :param inputs: Input tensor with the point cloud shape (BxNxK)
    :param use_tnet: whether to use the transformation subnets or not.
    :return: tensor layer for CONV5 activations
    """

    # Obtain spatial point transform from inputs and convert inputs
    if use_tnet:
        ptransform = transform_net(inputs,
                                   scope='transform_net1',
                                   regularize=False)
        point_cloud_transformed = Dot(axes=(2, 1))([inputs, ptransform])

    # First block of convolutions
    net = conv1d_bn(point_cloud_transformed if use_tnet else inputs,
                    num_filters=64,
                    kernel_size=1,
                    padding='valid',
                    use_bias=True,
                    scope='conv1')
    net = conv1d_bn(net,
                    num_filters=64,
                    kernel_size=1,
                    padding='valid',
                    use_bias=True,
                    scope='conv2')

    # Obtain feature transform and apply it to the network
    if use_tnet:
        ftransform = transform_net(net,
                                   scope='transform_net2',
                                   regularize=True)
        net_transformed = Dot(axes=(2, 1))([net, ftransform])

    # Second block of convolutions
    net = conv1d_bn(net_transformed if use_tnet else net,
                    num_filters=64,
                    kernel_size=1,
                    padding='valid',
                    use_bias=True,
                    scope='conv3')
    net = conv1d_bn(net,
                    num_filters=128,
                    kernel_size=1,
                    padding='valid',
                    use_bias=True,
                    scope='conv4')
    net = conv1d_bn(net,
                    num_filters=1024,
                    kernel_size=1,
                    padding='valid',
                    use_bias=True,
                    scope='conv5')

    return net
Beispiel #7
0
 def __init__(self, **kwargs):
     super(SelfAttention, self).__init__(**kwargs)
     self.dense_q = None
     self.dense_k = None
     self.dense_v = None
     self.dot_context = Dot(axes=(2, 2))
     self.scale = None
     self.normalize = Softmax()
     self.reweight_v = Dot(axes=(2, 1))
Beispiel #8
0
def build_NewModel_NC():
    """Build a no-communication model of simulation.
    """
    number_of_LHV = config.number_of_LHV  # Number of hidden variables, i.e. alpha, beta, gamma
    depth = config.party_depth
    width = config.party_width
    outputsize = config.party_outputsize
    activ = config.activation_func
    activ2 = 'softmax'
    # 6 numbers (two 3D vectors) plus one hidden variable as inputs.
    inputTensor = Input((6 + 6, ))

    # Group input tensor according to whether alpha, beta or gamma hidden variable.
    group_alpha = Lambda(lambda x: x[:, 0:3],
                         output_shape=((3, )))(inputTensor)
    group_beta = Lambda(lambda x: x[:, 3:6], output_shape=((3, )))(inputTensor)

    group_LHV_1 = Lambda(lambda x: x[:, 6:9],
                         output_shape=((3, )))(inputTensor)
    group_alpha_dot_1 = Dot(axes=1)([group_alpha, group_LHV_1])
    group_beta_dot_1 = Dot(axes=1)([group_beta, group_LHV_1])

    group_LHV_2 = Lambda(lambda x: x[:, 9:12],
                         output_shape=((3, )))(inputTensor)
    group_alpha_dot_2 = Dot(axes=1)([group_alpha, group_LHV_2])
    group_beta_dot_2 = Dot(axes=1)([group_beta, group_LHV_2])

    # Route hidden variables to visibile parties Alice and Bob
    group_a = Concatenate()([
        group_alpha, group_LHV_1, group_LHV_2, group_alpha_dot_1,
        group_alpha_dot_2
    ])
    group_b = Concatenate()([
        group_beta, group_LHV_1, group_LHV_2, group_beta_dot_1,
        group_beta_dot_2
    ])

    # Neural network at the parties Alice, Bob
    # Note: increasing the variance of the initialization seemed to help in some cases, especially when the number if outputs per party is 4 or more.
    kernel_init = tf.keras.initializers.VarianceScaling(
        scale=2, mode='fan_in', distribution='truncated_normal', seed=None)
    for _ in range(depth):
        group_a = Dense(width,
                        activation=activ,
                        kernel_initializer=kernel_init)(group_a)
        group_b = Dense(width,
                        activation=activ,
                        kernel_initializer=kernel_init)(group_b)

    # Apply final softmax layer
    group_a = Dense(outputsize, activation=activ2)(group_a)
    group_b = Dense(outputsize, activation=activ2)(group_b)

    outputTensor = Concatenate()([group_a, group_b])

    model = Model(inputTensor, outputTensor)
    return model
Beispiel #9
0
def second_order_interaction(num_outputs, cat_outputs):

    second_order_outputs = []

    for cat in cat_outputs:
        second_order_outputs.append(Dot(axes=-1)([num_outputs, cat]))

    for i in range(len(cat_outputs)):
        for j in range(len(cat_outputs)):
            if i != j:
                second_order_outputs.append(
                    Dot(axes=-1)([cat_outputs[i], cat_outputs[j]]))

    return second_order_outputs
Beispiel #10
0
    def create(n_users,
               n_movies,
               n_factors,
               min_rating,
               max_rating,
               lr=0.001,
               l2_delta=1e-6,
               loss='mean_squared_error'):
        user_input, user_emb, user_bias = biased_embedding_input(
            n_users, n_factors, 'users', l2_delta=l2_delta)

        movie_input, movie_emb, movie_bias = EmbeddingLayer(n_movies,
                                                            n_factors,
                                                            'movies',
                                                            l2_delta=l2_delta)

        # Sum dot result y both biases
        dot = Dot(axes=1, name='dot_product')([user_emb, movie_emb])
        output = Add(name='Add')([dot, user_bias, movie_bias])

        # Apply sigmode activation function
        output = Activation('sigmoid', name='sigmoid_activation')(output)

        # Normalize out between 0..1
        output = Lambda(lambda x: x * (max_rating - min_rating) + min_rating,
                        name='user_rating_prediction')(output)

        model = Model(inputs=[user_input, movie_input],
                      outputs=output,
                      name='Embedding_Dot_Product_Plus_Biases_Model')

        model.compile(loss=loss, optimizer=Adam(lr=lr))

        return model
Beispiel #11
0
    def train(self, nb_users, nb_items, users, items, ratings):
        with self.graph.as_default():
            user_id_input = Input(shape=[1], name='user')
            item_id_input = Input(shape=[1], name='item')

            embedding_size = 5
            user_embedding = Embedding(output_dim=embedding_size,
                                       input_dim=nb_users + 1,
                                       input_length=1,
                                       name='user_embedding')(user_id_input)

            item_embedding = Embedding(output_dim=embedding_size,
                                       input_dim=nb_items + 1,
                                       input_length=1,
                                       name='item_embedding')(item_id_input)

            user_vecs = Flatten()(user_embedding)
            item_vecs = Flatten()(item_embedding)

            y = Dot(axes=1)([user_vecs, item_vecs])

            self.model = Model(inputs=[user_id_input, item_id_input],
                               outputs=y)
            self.model.compile(optimizer='adam', loss='MSE')
            early_stopping = EarlyStopping(monitor='val_loss', patience=2)
            self.model.fit([users, items],
                           ratings,
                           batch_size=64,
                           epochs=20,
                           validation_split=0.1,
                           shuffle=True,
                           callbacks=[early_stopping])
Beispiel #12
0
    def __init__(self, n_items, n_users, n_factors, reg_all, mu):
        super(Model, self).__init__()
        self.n_items = n_items
        self.n_users = n_users
        self.n_factors = n_factors
        self.reg_all = reg_all
        self.mu = mu

        self.embed_latent_item = Embedding(n_items,
                                           n_factors,
                                           input_length=1,
                                           embeddings_regularizer=L2(reg_all),
                                           embeddings_initializer='uniform')
        self.embed_latent_user = Embedding(n_users,
                                           n_factors,
                                           input_length=1,
                                           embeddings_regularizer=L2(reg_all),
                                           embeddings_initializer='uniform')
        self.embed_bias_item = Embedding(n_items,
                                         1,
                                         input_length=1,
                                         embeddings_regularizer=L2(reg_all),
                                         embeddings_initializer='zeros')
        self.embed_bias_user = Embedding(n_users,
                                         1,
                                         input_length=1,
                                         embeddings_regularizer=L2(reg_all),
                                         embeddings_initializer='zeros')
        self.dot = Dot(axes=(1, 1))

        self.reshape_uf = Reshape((n_factors, ))
        self.reshape_if = Reshape((n_factors, ))
        self.reshape_ub = Reshape(())
        self.reshape_ib = Reshape(())
Beispiel #13
0
def get_model(num_users, num_items, latent_dim, num_tasks, regs=[0,0]):
    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name = 'user_input')
    item_input = Input(shape=(1,), dtype='int32', name = 'item_input')
    task_input = Input(shape=(num_tasks,), dtype='float', name = 'task_input') # one-hot task input

    MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding',
                                  embeddings_initializer = init_normal(), embeddings_regularizer = l2(regs[0]), input_length=1)
    MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding',
                                  embeddings_initializer = init_normal(), embeddings_regularizer = l2(regs[1]), input_length=1)   
    
    # Crucial to flatten an embedding vector!
    user_latent = Flatten()(MF_Embedding_User(user_input))
    item_latent = Flatten()(MF_Embedding_Item(item_input))
    
    # Element-wise product of user and item embeddings 
    predict_vector = Multiply()([user_latent, item_latent])
    
    # Final prediction layer
    #prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector)
    #predict_vector = Dense(16, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'fully-connected')(predict_vector)
    predictions = Dense(num_tasks, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'predictions')(predict_vector)
    task_prediction = Dot(1)([predictions, task_input])
    
    model = Model(inputs=[user_input, item_input, task_input], 
                outputs=[task_prediction])

    return model
Beispiel #14
0
    def __init__(self,
                 vocabulary_size,
                 embedding_size,
                 neg_samples,
                 learning_rate=None,
                 word2id=None,
                 id2word=None):
        super().__init__(vocabulary_size,
                         embedding_size,
                         neg_samples,
                         learning_rate=learning_rate,
                         word2id=word2id,
                         id2word=id2word)

        # self.type = 'hypglove'
        self.loss = GloveLoss()

        self.T_bias = Embedding(vocabulary_size,
                                1,
                                input_length=1,
                                name='target_bias')
        self.T_sigma = Embedding(
            vocabulary_size,
            1,
            input_length=1,
            embeddings_initializer=tf.keras.initializers.Constant(value=0.1),
            name='target_sigma')
        self.C_bias = Embedding(vocabulary_size,
                                1,
                                input_length=1,
                                name='context_bias')

        self.dot_layer = Dot(axes=-1)
        self.add_layer = Add()
        self.reshape_layer = Reshape((1, ))
def call_NEW_m3(user_len, movie_len, emb_dim=20, feature_size=50, deeper=True, deeper_k=10):
    input_n = Input(shape=[1])
    input_m = Input(shape=[1])
    input_feature = Input(shape=[feature_size])

    # First arg of Embedding (input_dim) is the size of vocabulary
    emb_n = Embedding(user_len, emb_dim,trainable=True,embeddings_initializer='uniform')(input_n)
    emb_n = Flatten()(emb_n)
    emb_m = Embedding(movie_len, emb_dim,trainable=True,embeddings_initializer='uniform')(input_m)
    emb_m = Flatten()(emb_m)
    x = Dot(axes=1)([emb_n,emb_m])
    
    # User/Movie bias
    emb_n_bias = Embedding(user_len, 1,trainable=True,embeddings_initializer='zeros')(input_n)
    emb_n_bias = Flatten()(emb_n_bias)
    emb_m_bias = Embedding(movie_len, 1,trainable=True,embeddings_initializer='zeros')(input_m)
    emb_m_bias = Flatten()(emb_m_bias)
    
    # Features
    if deeper:
        d1=Dense(deeper_k,use_bias=True,activation='relu')(input_feature)
        d1=Dropout(0.5)(d1)
        d1=Dense(1,use_bias=True,activation='relu')(d1)
    else:
        d1=Dense(1,use_bias=True,activation='relu')(input_feature)
    
    x = Add()([x, emb_n_bias, emb_m_bias, d1])
    x = BiasLayer(4,weights=np.array([[0.2,0.4,0.6,0.8]]))(x)
    x = Activation('sigmoid')(x)
    x = Dense(5,trainable=False,weights=[w1,b1])(x)        
    
    model = Model([input_n,input_m,input_feature], x)
    return model
Beispiel #16
0
 def Attention(self,x):
     tanh_H = Activation('tanh')(x)
     a = Dense(1,activation="softmax")(tanh_H)
     r = Dot(axes=1)([x,a])
     r = Activation('tanh')(r)
     r = Flatten()(r)
     return r
Beispiel #17
0
    def build(self):
        embedding_dim = self.config["params"]["word2vec"]["embedding_dim"]
        vocab_size = self.config["params"]["vocab_size"]
        learning_rate = self.config["hyper_params"]["word2vec"]["lr"]

        target_input = Input(shape=(1, ), name="target")
        context_input = Input(shape=(1, ), name="context")
        embedding = Embedding(vocab_size,
                              embedding_dim,
                              input_length=1,
                              name="embedding")

        target_embedding = embedding(target_input)
        context_embedding = embedding(context_input)

        target = Reshape((embedding_dim, 1))(target_embedding)
        context = Reshape((embedding_dim, 1))(context_embedding)

        similarity = Reshape((1, ))(Dot(1, normalize=False)([target, context]))

        output = Dense(1, activation="sigmoid")(similarity)
        model = Model(inputs=[target_input, context_input], outputs=output)
        model.compile(loss="binary_crossentropy",
                      optimizer=Adam(learning_rate),
                      metrics=['accuracy'])
        self.model = model
Beispiel #18
0
    def create(n_users,
               n_movies,
               n_factors,
               lr=0.001,
               l2_delta=1e-6,
               loss='mean_squared_error'):
        user_input, user_emb = embedding_input(n_users,
                                               n_factors,
                                               'users',
                                               l2_delta=l2_delta)
        movie_input, movie_emb = embedding_input(n_movies,
                                                 n_factors,
                                                 'movies',
                                                 l2_delta=l2_delta)

        output = Dot(axes=1,
                     name='user_rating_prediction')([user_emb, movie_emb])

        model = Model(inputs=[user_input, movie_input],
                      outputs=output,
                      name='Embedding_Dot_Product_Model')

        model.compile(loss=loss, optimizer=Adam(lr))

        return model
def create_physics_model(elastic_stiffness, force_vector, stiffness_low,
                         stiffness_up, batch_input_shape, myDtype):

    inputLayer = Input(shape=(1, ))

    elasticStiffnessLayer = AMatrix(input_shape=inputLayer.shape,
                                    dtype=myDtype,
                                    trainable=False)
    elasticStiffnessLayer.build(input_shape=inputLayer.shape)
    elasticStiffnessLayer.set_weights(
        [np.asarray(elastic_stiffness, dtype=elasticStiffnessLayer.dtype)])
    elasticStiffnessLayer = elasticStiffnessLayer(inputLayer)

    forceMatrixLayer = FMatrix(input_shape=inputLayer.shape,
                               dtype=myDtype,
                               trainable=False)
    forceMatrixLayer.build(input_shape=inputLayer.shape)
    forceMatrixLayer.set_weights(
        [np.asarray(force_vector, dtype=forceMatrixLayer.dtype)])
    forceMatrixLayer = forceMatrixLayer(inputLayer)

    inverseStiffnessLayer = Lambda(lambda x: tf.linalg.inv(x))(
        elasticStiffnessLayer)

    #    deflectionOutputLayer = Lambda(lambda x: tf.linalg.matmul(x[0],x[1]))([inverseStiffnessLayer, forceMatrixLayer])
    deflectionOutputLayer = Dot((1))([inverseStiffnessLayer, forceMatrixLayer])

    functionalModel = Model(inputs=[inputLayer],
                            outputs=[deflectionOutputLayer])

    functionalModel.compile(loss=mae, optimizer=RMSprop(5e-3), metrics=[mse])
    return functionalModel
Beispiel #20
0
def glove_as_matcher(vocab_size, embedding_matrix, max_seq_length):
    e_a = Embedding(vocab_size,
                    300,
                    weights=[embedding_matrix],
                    input_length=max_seq_length,
                    trainable=False)
    e_b = Embedding(vocab_size,
                    300,
                    weights=[embedding_matrix],
                    input_length=max_seq_length,
                    trainable=False)
    inputA = Input(shape=(max_seq_length, ), dtype='int32')
    inputB = Input(shape=(max_seq_length, ), dtype='int32')
    glove_outputA = e_a(inputA)
    glove_outputB = e_b(inputB)

    pred = Dot(1, normalize=True)([glove_outputA, glove_outputB])
    # pred = Lambda(cosine_sim)([glove_outputA, glove_outputB])
    # pred = Dense(2, activation='softmax')(pred)

    # model = Model(inputs=[inputA, inputB], outputs=pred)
    model = Model(inputs=[inputA, inputB],
                  outputs=[glove_outputA, glove_outputB])
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # print(model.summary())
    return model
Beispiel #21
0
def bert_as_matcher(max_seq_length):
    in_idA = Input(shape=(max_seq_length, ), name="input_idsA")
    in_maskA = Input(shape=(max_seq_length, ), name="input_masksA")
    in_segmentA = Input(shape=(max_seq_length, ), name="segment_idAs")
    bert_inputsA = [in_idA, in_maskA, in_segmentA]
    bert_outputA = BH.BertLayer(n_fine_tune_layers=0,
                                name='bert_inputA')(bert_inputsA)

    in_idB = Input(shape=(max_seq_length, ), name="input_idsB")
    in_maskB = Input(shape=(max_seq_length, ), name="input_masksB")
    in_segmentB = Input(shape=(max_seq_length, ), name="segment_idsB")
    bert_inputsB = [in_idB, in_maskB, in_segmentB]
    bert_outputB = BH.BertLayer(n_fine_tune_layers=0,
                                name='bert_inputB')(bert_inputsB)

    pred = Dot(1, normalize=True)([bert_outputA, bert_outputB])

    # bert_output = BH.BertLayer(n_fine_tune_layers=3)(bert_inputs)
    # pred = Dense(2, activation='softmax')(bert_output)
    # pred = Lambda(lambda x: [1 - x, x])(bert_output)
    model = Model(
        inputs=[in_idA, in_maskA, in_segmentA, in_idB, in_maskB, in_segmentB],
        outputs=[bert_outputA, bert_outputB])
    # loss = F1score().loss
    # model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # print(model.summary())
    return model
Beispiel #22
0
        def _one_hop(emb_q, A):
            # calculate weights between query and stories
            x = Reshape((1, self.args.embedding_dim))(emb_q)
            x = Dot(axes=2)([A, x])
            x = Reshape((self.args.fact_number, ))(x)
            x = Activation('softmax')(x)
            match = Reshape((self.args.fact_number, 1))(x)

            # multiply weights to stories
            emb_story, _ = self.__emb_sent_bow(inp_story)
            x = Dot(axes=1)([match, emb_story])
            x = Reshape((self.args.embedding_dim, ))(x)
            x = Dense(self.args.embedding_dim)(x)
            # update query_embedding
            new_q = Add()([x, emb_q])
            return new_q, emb_story
Beispiel #23
0
    def __init__(self,
                 vocabulary_size,
                 embedding_size,
                 neg_samples,
                 learning_rate=None,
                 word2id=None,
                 id2word=None):
        super().__init__(vocabulary_size,
                         embedding_size,
                         neg_samples,
                         learning_rate=learning_rate,
                         word2id=word2id,
                         id2word=id2word)

        # model specific part
        self.loss = GloveLoss()

        self.T_bias = Embedding(vocabulary_size,
                                1,
                                input_length=1,
                                name='target_bias')
        self.C_bias = Embedding(vocabulary_size,
                                1,
                                input_length=1,
                                name='context_bias')

        self.dot_layer = Dot(axes=-1)
        self.add_layer = Add()
        self.reshape_layer = Reshape((1, ))
Beispiel #24
0
def fm_embedding(num_user, num_movie, k):
    input_user = Input(name='user_input', shape=[
        None,
    ], dtype='int32')
    embedding_user = Embedding(name='user_embedding',
                               input_dim=num_user,
                               output_dim=k,
                               input_length=1)(input_user)
    embedding_user = Reshape((k, ))(embedding_user)

    input_movie = Input(name='movie_input', shape=[
        None,
    ], dtype='int32')
    embedding_movie = Embedding(name='movie_embedding',
                                input_dim=num_movie,
                                output_dim=k,
                                input_length=1)(input_movie)
    embedding_movie = Reshape((k, ))(embedding_movie)

    out = Dot(name='inner_product', axes=1,
              normalize=False)([embedding_user, embedding_movie])

    model = Model(inputs=[input_user, input_movie], outputs=out)
    model.compile(loss='mse', optimizer='Adam')

    model.summary()

    return model
Beispiel #25
0
def affinity_graph(name,
                   question_features,
                   image_features,
                   embedding_size=512):
    """
        Compute affinity matrix by combining features Q (T x d) and V (N x d) to C (T x N).
        Therefore we introduce the weight matrix W (d x d) to compute Q (T x d) x W (d x d) x V_T (d x N)  
        
        question_features_shape: T x d, T = question max-length   , d = embedding size     = 512
        image_features_shape   : N x d, N = feature-map size = 196, d = feature-map amount = 512
    
        @param name: the name of the context in which this graph is used e.g. word, phrase or question level
        @param question_features: textual features for the co-attention
        @param image_features: visual features for the co-attention
        @param image_features_size: length of the flatten visual feature maps e.g. 196 for 14 x 14 maps
        
        @return: the affinity graph with tensor T x N
    """
    # we combine the right-hand side V (N x d) x W (d x d) = R (N x d)
    agraph = Dense(embedding_size,
                   name=name +
                   "_affinity_image_features_embedding")(image_features)
    # we combine the left-hand side at axis=2 Q (T x d) x R (N x d) = C (T x N)
    agraph = Dot(axes=(2, 2),
                 name=name + "_affinity")([question_features, agraph])
    agraph = Activation(activation="tanh",
                        name=name + "_affinity_activation")(agraph)
    return agraph
Beispiel #26
0
 def __init__(self,sequence_length=30,hidden_state_length=128):
     super(self_attention, self).__init__()
     # create an initializer for the attention weighting variable 
     u_w_init = tf.random_normal_initializer()
     # create attentiion variable, 1 long 
     self.u_w = tf.Variable(
         initial_value=u_w_init(shape=(1,hidden_state_length*2), dtype="float32"),
         trainable=True,
     )
     self.dense_layer   = Dense(units=hidden_state_length*2,activation=activations.tanh)
     self.softmax_layer = Softmax()
     self.dot_layer1    = Dot(axes=(1))
     self.dot_layer2    = Dot(axes=(1))
     self.reshaper      = Reshape(target_shape=(hidden_state_length*2,sequence_length,))
     self.sequence_length=sequence_length
     self.hidden_state_length=hidden_state_length
def mf(n_person=100, n_item=3000, para_dim=5):
    """
    Input: dimensions of person-item matrix
    Output: neural network with multiple inputs and embedding layers
    """
    p = Input(shape=[1], name='person')
    p_e = Embedding(n_person,
                    para_dim,
                    embeddings_initializer='RandomNormal',
                    name='person_embedding')(p)
    p_e = MaxNorm(max_value=5 * np.sqrt(para_dim))(p_e)  # maxnorm

    i = Input(shape=[1], name='item')
    i_e = Embedding(n_item,
                    para_dim,
                    embeddings_initializer='RandomNormal',
                    name='item_embedding')(i)
    i_e = MaxNorm(max_value=5 * np.sqrt(para_dim))(i_e)  # maxnorm

    d = Input(shape=[1], name='residual')
    d_e = Embedding(n_item,
                    1,
                    embeddings_initializer='RandomNormal',
                    name='res_embed')(i)
    d_e = MaxNorm(max_value=5 * np.sqrt(para_dim))(d_e)  # maxnorm

    output = Dot(axes=-1, name='dotProduct')([p_e, i_e]) + d_e
    #     print(output.shape)
    output = Flatten(name='output')(output)
    main_output = Activation('sigmoid')(output)
    #     print(main_output.shape)
    model = Model([p, i, d], main_output)
    return model
    def __init__(self, num_of_users, num_of_items, num_of_factors):
        # input_user_id
        model_Embedding_user_id = Sequential([
            Embedding(num_of_users,
                      num_of_factors,
                      input_length=1,
                      input_shape=(1, )),
            Reshape((num_of_factors, )),
        ])
        # input_item_id
        model_Embedding_item_id = Sequential([
            Embedding(num_of_items,
                      num_of_factors,
                      input_length=1,
                      input_shape=(1, )),
            Reshape((num_of_factors, ))
        ])
        # create base network
        input_user_id = Input((1, ), name='input_user_id')
        input_item_id = Input((1, ), name='input_item_id')
        embedded_user_id = model_Embedding_user_id(input_user_id)
        embedded_item_id = model_Embedding_item_id(input_item_id)

        output_label = Dot(axes=1)([embedded_user_id, embedded_item_id])

        super(CFModel, self).__init__(inputs=[input_user_id, input_item_id],
                                      outputs=output_label)
def ac_rl_embedding_model(ac_index, rl_index, embedding_size):
    """Model to embed activities and roles using the functional API"""

    # Both inputs are 1-dimensional
    activity = Input(name='activity', shape=[1])
    role = Input(name='role', shape=[1])

    # Embedding the activity (shape will be (None, 1, embedding_size))
    activity_embedding = Embedding(name='activity_embedding',
                                   input_dim=ac_index,
                                   output_dim=embedding_size)(activity)

    # Embedding the role (shape will be (None, 1, embedding_size))
    role_embedding = Embedding(name='role_embedding',
                               input_dim=rl_index,
                               output_dim=embedding_size)(role)

    # Merge the layers with a dot product along the second axis (shape will be (None, 1, 1))
    merged = Dot(name='dot_product', normalize=True,
                 axes=2)([activity_embedding, role_embedding])

    # Reshape to be a single number (shape will be (None, 1))
    merged = Reshape(target_shape=[1])(merged)

    # Loss function is mean squared error
    model = Model(inputs=[activity, role], outputs=merged)
    model.compile(optimizer='Adam', loss='mse')

    return model
Beispiel #30
0
def recommender_v_2(n_users, n_art, n_factors, min_rating, max_rating):
    """
    Defines the recommender system
    :param n_users: users in the system
    :param n_art: artwork in the system
    :param n_factors: number of dimensions for the embedding
    :param min_rating: minimum rating
    :param max_rating: maximum rating
    :return: model
    """
    user = Input(shape=(1, ))
    u = EmbeddingLayer(n_users, n_factors)(user)
    ub = EmbeddingLayer(n_users, 1)(user)

    movie = Input(shape=(1, ))
    m = EmbeddingLayer(n_art, n_factors)(movie)
    mb = EmbeddingLayer(n_art, 1)(movie)
    x = Dot(axes=1)([u, m])
    x = Add()([x, ub, mb])
    x = Activation('sigmoid')(x)
    x = Lambda(lambda x: x * (max_rating - min_rating) + min_rating)(x)
    model = Model(inputs=[user, movie], outputs=x)
    opt = Adam(lr=0.001)
    model.compile(loss='mean_squared_error', optimizer=opt)
    return model