def create_V2A_network(A_dim, V_dim):

    A_input = Input(shape=A_dim)
    AP = AveragePooling1D(pool_size=pool, strides=stride,
                          padding='valid')(A_input)

    V_input = Input(shape=V_dim)
    VP = AveragePooling1D(pool_size=pool, strides=stride,
                          padding='valid')(V_input)

    VL = LSTM(units=1024,
              return_sequences=True,
              stateful=False,
              dropout=0.2,
              recurrent_dropout=0.2,
              kernel_initializer=initializers.lecun_normal(),
              recurrent_initializer=initializers.lecun_uniform())(VP)
    VL = TimeDistributed(
        Dense(units=128,
              kernel_initializer=initializers.lecun_normal(),
              activation='tanh'))(VL)

    VT = TimeDistributed(
        Dense(units=128,
              kernel_initializer=initializers.lecun_normal(),
              activation='tanh'))(VP)
    VL = Average()([VL, VT])

    distance = Lambda(QQ, output_shape=[L, 128])([VL, AP])

    res_model = Model(inputs=[A_input, V_input], outputs=distance)

    #my_model.summary()

    return res_model
Esempio n. 2
0
def network(categorical_columns_item, num_deep_numeric_feature,
            num_wide_numeric_feature, bias):
    input_layers = list()
    embedding_layers = list()

    # net categorical deep feature
    for col, num in categorical_columns_item.items():
        input_deep_cat_layer = Input(shape=(1, ),
                                     name=col + "_categorical_deep_input")
        embedding_layer = Embedding(
            input_dim=num,
            output_dim=min(10, num // 2),
            embeddings_initializer=truncated_normal(mean=0,
                                                    stddev=1 / np.sqrt(num)),
            input_length=1,
            name=col + "_deep_embedding")(input_deep_cat_layer)
        embedding_layer = (Reshape(target_shape=(min(10, num // 2), ),
                                   name=col +
                                   "_deep_reshape")(embedding_layer))
        embedding_layer = Dropout(rate=0.15,
                                  noise_shape=(None, 1),
                                  name=col + "_deep_dropout")(embedding_layer)
        input_layers.append(input_deep_cat_layer)
        embedding_layers.append(embedding_layer)

    # net numeric deep feature
    input_deep_num_layer = Input(shape=(num_deep_numeric_feature, ),
                                 name="numeric_deep_input")
    input_layers.append(input_deep_num_layer)

    # net numeric wide feature
    input_wide_num_layer = Input(shape=(num_wide_numeric_feature, ),
                                 name="numeric_wide_input")
    input_layers.append(input_wide_num_layer)

    hidden_layer = Dense(units=32,
                         kernel_initializer=lecun_normal(),
                         activation="selu")(Concatenate()([
                             Concatenate()(embedding_layers),
                             Dropout(rate=0.15)(input_deep_num_layer)
                         ]))
    hidden_layer = Dense(units=16,
                         kernel_initializer=lecun_normal(),
                         activation="selu")(hidden_layer)
    hidden_layer = Dense(units=8,
                         kernel_initializer=lecun_normal(),
                         activation="selu")(hidden_layer)
    hidden_layer = Concatenate()([hidden_layer, input_wide_num_layer])
    output_layer = Dense(units=1,
                         kernel_initializer=lecun_normal(),
                         bias_initializer=constant(logit(bias)),
                         activation="sigmoid",
                         name="output_layer")(hidden_layer)

    return Model(input_layers, output_layer)
Esempio n. 3
0
    def build_model(self, priors_corr, prior_test, Pi, input_shape, mode):
        self.prior_test = prior_test
        self.priors_corr = priors_corr
        self.Pi = Pi
        input = Input(shape=input_shape)

        x = Dropout(0.2, input_shape=input_shape)(input)
        x = Dense(300,
                  use_bias=False,
                  input_shape=input_shape,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  kernel_regularizer=regularizers.l2(self.weight_decay))(input)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dropout(0.2)(x)
        x = Dense(300,
                  use_bias=False,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dropout(0.2)(x)
        x = Dense(300,
                  use_bias=False,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dropout(0.2)(x)
        x = Dense(300,
                  use_bias=False,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

        g = Dense(1,
                  use_bias=True,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  activation='sigmoid',
                  name="base_model")(x)
        base_model = Model(inputs=input, outputs=g)
        g_bar = Lambda(self.adaption_layer, name="last_layer")(g)
        model = Model(inputs=input, outputs=g_bar)

        self.compile_model(
            model=model,
            base_model=base_model,
            Pi=self.Pi,
            priors_corr=priors_corr,
            prior_test=prior_test,
            mode=self.mode,
        )
Esempio n. 4
0
        def double_nn(opt, dropout, a_func, reg_all, preds_reg, embed_dim, dense_units):

            user_embeddings = Embedding(u_cnt,
                    embed_dim,
                    embeddings_initializer=RandomNormal(mean=0.0, stddev=0.1),
                    embeddings_regularizer=l2(reg_all),
                    input_length=1,
                    trainable=True, name='user_embeddings')

            song_embeddings = Embedding(s_cnt,
                    embed_dim,
                    embeddings_initializer=RandomNormal(mean=0.0, stddev=0.1),
                    embeddings_regularizer=l2(reg_all),
                    input_length=1,
                    trainable=True,name='item_embeddings')


            uid_input = Input(shape=(1,), dtype='int32')
            embedded_usr = user_embeddings(uid_input)
            embedded_usr = Reshape((embed_dim,),name='user_embeddings_reshaped')(embedded_usr)
            ub = create_bias(uid_input, u_cnt, reg_all,'user_biases')


            sid_input = Input(shape=(1,), dtype='int32')
            embedded_song = song_embeddings(sid_input)
            embedded_song = Reshape((embed_dim,),name='item_embeddings_reshaped')(embedded_song)
            mb = create_bias(sid_input, s_cnt, reg_all,'item_biases')

            preds = concatenate([embedded_usr, embedded_song],name='concatenated_embeddings_all')

            if (a_func == 'relu') or (a_func == 'elu'):
                preds = Dense(dense_units, use_bias=True, activation=a_func, kernel_initializer=RandomNormal(mean=0.0, stddev=0.1), kernel_regularizer=regularizers.l2(preds_reg), bias_initializer=RandomNormal(mean=0.0, stddev=0.1),bias_regularizer=regularizers.l2(preds_reg))(preds)
                preds=Dropout(dropout)(preds)
                preds = Dense(1, use_bias=True, activation=a_func, kernel_initializer=RandomNormal(mean=0.0, stddev=0.1), kernel_regularizer=regularizers.l2(preds_reg), bias_initializer=RandomNormal(mean=0.0, stddev=0.1),bias_regularizer=regularizers.l2(preds_reg))(preds)
                preds=Dropout(dropout)(preds)
            elif a_func == 'selu':
                preds = Dense(dense_units, activation=a_func, kernel_initializer=lecun_normal(), name='main_hidden')(preds)
                preds=AlphaDropout(dropout)(preds)
                preds = Dense(1, activation=a_func, kernel_initializer=lecun_normal(), name='main_output')(preds)
                preds=AlphaDropout(dropout)(preds)

            preds = add([ub, preds])
            preds = add([mb, preds])

            model = Model(inputs=[uid_input, sid_input], outputs=preds)
            opt = eval(opt)
            model.compile(loss='mse', optimizer=opt, metrics=[mae])

            return model
def get_model(num_users, num_items, latent_dim, regs=[0, 0]):
    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name='user_input')
    item_input = Input(shape=(1,), dtype='int32', name='item_input')

    # MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', init = init_normal, W_regularizer = l2(regs[0]), input_length=1)
    MF_Embedding_User = Embedding(input_dim=num_users, output_dim=latent_dim,
                                  embeddings_initializer=initializers.random_normal(),
                                  embeddings_regularizer=l2(regs[0]), input_length=1, name='user_embedding')
    # MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', init = init_normal, W_regularizer = l2(regs[1]), input_length=1)
    MF_Embedding_Item = Embedding(input_dim=num_items, output_dim=latent_dim,
                                  embeddings_initializer=initializers.random_normal(),
                                  embeddings_regularizer=l2(regs[1]),
                                  input_length=1, name='item_embedding')
    
    # Crucial to flatten an embedding vector!
    user_latent = Flatten()(MF_Embedding_User(user_input))
    item_latent = Flatten()(MF_Embedding_Item(item_input))
    
    # Element-wise product of user and item embeddings 
    # predict_vector = merge([user_latent, item_latent], mode = 'mul')
    predict_vector = multiply([user_latent, item_latent])
    
    # Final prediction layer
    # prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector)
    prediction = Dense(1, activation='sigmoid',
                       kernel_initializer=initializers.lecun_normal(), name='prediction')(predict_vector)
    
    model_ = Model(input=[user_input, item_input], output=prediction)

    return model_
Esempio n. 6
0
def get_model(train, num_users, num_items, layers=[20, 10, 5, 2]):
    num_layer = len(layers)  # Number of layers in the MLP
    user_matrix = K.constant(getTrainMatrix(train))
    item_matrix = K.constant(getTrainMatrix(train).T)

    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name='user_input')
    item_input = Input(shape=(1,), dtype='int32', name='item_input')

    user_rating = Lambda(lambda x: tf.gather(user_matrix, tf.to_int32(x)))(user_input)
    item_rating = Lambda(lambda x: tf.gather(item_matrix, tf.to_int32(x)))(item_input)
    user_rating = Reshape((num_items, ))(user_rating)
    item_rating = Reshape((num_users, ))(item_rating)
    MLP_Embedding_User = Dense(layers[0]//2, activation="linear" , name='user_embedding')
    MLP_Embedding_Item  = Dense(layers[0]//2, activation="linear" , name='item_embedding')
    user_latent = MLP_Embedding_User(user_rating)
    item_latent = MLP_Embedding_Item(item_rating)

    # The 0-th layer is the concatenation of embedding layers
    vector = concatenate([user_latent, item_latent])
    
    # MLP layers
    for idx in range(1, num_layer):
        layer = Dense(layers[idx], activation='relu', name='layer%d' % idx)
        vector = layer(vector)
        
    # Final prediction layer
    prediction = Dense(1, activation='sigmoid', kernel_initializer=initializers.lecun_normal(),
                       name='prediction')(vector)
    
    model_ = Model(inputs=[user_input, item_input],
                   outputs=prediction)
    
    return model_
Esempio n. 7
0
def get_model(num_users, num_items, layers=[20, 10], reg_layers=[0, 0]):
    assert len(layers) == len(reg_layers)
    num_layer = len(layers)  # Number of layers in the MLP
    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name='user_input')
    item_input = Input(shape=(1,), dtype='int32', name='item_input')

    MLP_Embedding_User = Embedding(input_dim=num_users, output_dim=int(layers[0]/2), name='user_embedding',
                                   embeddings_regularizer=l2(reg_layers[0]), input_length=1)
    MLP_Embedding_Item = Embedding(input_dim=num_items, output_dim=int(layers[0]/2), name='item_embedding',
                                   embeddings_regularizer=l2(reg_layers[0]), input_length=1)
    
    # Crucial to flatten an embedding vector!
    user_latent = Flatten()(MLP_Embedding_User(user_input))
    item_latent = Flatten()(MLP_Embedding_Item(item_input))
    
    # The 0-th layer is the concatenation of embedding layers
    # vector = merge([user_latent, item_latent], mode = 'concat')
    vector = concatenate([user_latent, item_latent])
    
    # MLP layers
    for idx in range(1, num_layer):
        layer = Dense(layers[idx], kernel_regularizer=l2(reg_layers[idx]), activation='relu', name='layer%d' % idx)
        vector = layer(vector)
        
    # Final prediction layer
    prediction = Dense(1, activation='sigmoid', kernel_initializer=initializers.lecun_normal(),
                       name='prediction')(vector)
    
    model_ = Model(inputs=[user_input, item_input],
                   outputs=prediction)
    
    return model_
Esempio n. 8
0
def build_initializer(type, kerasDefaults, seed=None, constant=0.):

    if type == 'constant':
        return initializers.Constant(value=constant)

    elif type == 'uniform':
        return initializers.RandomUniform(
            minval=kerasDefaults['minval_uniform'],
            maxval=kerasDefaults['maxval_uniform'],
            seed=seed)

    elif type == 'normal':
        return initializers.RandomNormal(mean=kerasDefaults['mean_normal'],
                                         stddev=kerasDefaults['stddev_normal'],
                                         seed=seed)

# Not generally available
#    elif type == 'glorot_normal':
#        return initializers.glorot_normal(seed=seed)

    elif type == 'glorot_uniform':
        return initializers.glorot_uniform(seed=seed)

    elif type == 'lecun_uniform':
        return initializers.lecun_uniform(seed=seed)

    elif type == 'lecun_normal':
        return initializers.lecun_normal(seed=seed)

    elif type == 'he_normal':
        return initializers.he_normal(seed=seed)
Esempio n. 9
0
def test_lecun_normal(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    std = np.sqrt(1. / fan_in)
    _runner(initializers.lecun_normal(),
            tensor_shape,
            target_mean=0.,
            target_std=std)
Esempio n. 10
0
def res_unit(input_layer, dense_num):
    tp1 = PReLU()(input_layer)
    tp2 = Dropout(0.2)(BatchNormalization()(tp1))
    tp3 = Dense(dense_num, kernel_initializer=initializers.lecun_normal())(tp2)
    union_layer = Add()([input_layer, tp3])

    return union_layer
def reference_lstm_nodense_medium(input_tensor,
                                  k_init=lecun_normal(seed=1337),
                                  k_reg=l1(),
                                  rec_reg=l1(),
                                  sf=False,
                                  imp=2):
    '''reference BiLSTM with batchnorm and NO dense layers. Output is already batch-normed.
    Expects ORIGINAL input batch size so pad/adjust window size accordingly!'''
    h = LSTM(100,
             kernel_initializer=k_init,
             return_sequences=True,
             recurrent_regularizer=rec_reg,
             kernel_regularizer=k_reg,
             implementation=imp,
             stateful=sf)(input_tensor)
    i = BatchNormalization()(h)
    j = LSTM(100,
             kernel_initializer=k_init,
             return_sequences=True,
             recurrent_regularizer=rec_reg,
             kernel_regularizer=k_reg,
             implementation=imp,
             stateful=sf)(i)
    j = BatchNormalization()(j)
    out = j
    return out
def reference_bilstm_nodense_big(input_tensor,
                                 k_init=lecun_normal(seed=1337),
                                 k_reg=l1(),
                                 rec_reg=l1(),
                                 sf=False,
                                 imp=2,
                                 dense_act='tanh'):
    '''reference SMALL BiLSTM with batchnorm and TD-dense.
    Expects ORIGINAL input batch size so pad/adjust window size accordingly!'''
    h = Bidirectional(
        LSTM(200,
             kernel_initializer=k_init,
             return_sequences=True,
             recurrent_regularizer=rec_reg,
             kernel_regularizer=k_reg,
             implementation=imp,
             stateful=sf))(input_tensor)
    i = BatchNormalization()(h)
    j = Bidirectional(
        LSTM(200,
             kernel_initializer=k_init,
             return_sequences=True,
             recurrent_regularizer=rec_reg,
             kernel_regularizer=k_reg,
             implementation=imp,
             stateful=sf))(i)
    j = BatchNormalization()(j)
    out = j
    return out
Esempio n. 13
0
    def build_model(shape, gru_layers, reg_layers, drop_layers):

        # Embedding Layer
        user_input = Input(shape=(1, ), dtype='int32', name='user_input')

        item_input = Input(shape=(1, ), dtype='int32', name='item_input')

        time_input = Input(shape=(1, ), dtype='int32', name='time_input')

        user_embedding = Flatten()(Embedding(
            input_dim=shape[0],
            output_dim=gru_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_user_embedding')(user_input))
        item_embedding = Flatten()(Embedding(
            input_dim=shape[1],
            output_dim=gru_layers[0],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_item_embedding')(item_input))
        time_embedding = Flatten()(Embedding(
            input_dim=shape[2],
            output_dim=gru_layers[1],
            embeddings_initializer=initializers.random_normal(),
            embeddings_regularizer=regularizers.l2(reg_layers[0]),
            input_length=1,
            name='gru_time_embedding')(time_input))

        user_embedding = Dropout(drop_layers[0])(user_embedding)
        item_embedding = Dropout(drop_layers[0])(item_embedding)
        time_embedding = Dropout(drop_layers[0])(time_embedding)

        gru_vector = Concatenate(axis=1)([user_embedding, item_embedding])
        gru_vector = Reshape(target_shape=(int(gru_layers[1]), -1))(gru_vector)

        for index in range(1, len(gru_layers) - 1):
            layers = GRU(units=gru_layers[index],
                         kernel_initializer=initializers.he_normal(),
                         kernel_regularizer=regularizers.l2(reg_layers[index]),
                         activation='tanh',
                         recurrent_activation='hard_sigmoid',
                         dropout=drop_layers[index],
                         return_sequences=(index != (len(gru_layers) - 2)),
                         name='gru_layer_%d' % index)
            gru_vector = layers([gru_vector, time_embedding])

        gru_vector = Dropout(drop_layers[-1])(gru_vector)

        prediction = Dense(units=gru_layers[-1],
                           activation='relu',
                           kernel_initializer=initializers.lecun_normal(),
                           kernel_regularizer=regularizers.l2(reg_layers[-1]),
                           name='gru_prediction')(gru_vector)
        _model = Model(inputs=[user_input, item_input, time_input],
                       outputs=prediction)
        return _model
Esempio n. 14
0
def get_model(num_users, num_items, layers=[20, 10], reg_layers=[0, 0]):
    assert len(layers) == len(reg_layers)
    num_layer = len(layers)  # Number of layers in the MLP
    # Input variables
    # user_input = Input(shape=(1,), dtype='int32', name='user_input')

    user_input = Input(shape=(1, ), name='user_input')
    item_input = Input(shape=(1, ), dtype='int32', name='item_input')
    user_xz_input = Input(shape=(19, ), name='user_xz_input')

    MLP_Embedding_User = Embedding(input_dim=num_items,
                                   output_dim=int(layers[0] / 2),
                                   name='user_embedding',
                                   embeddings_regularizer=l2(reg_layers[0]),
                                   input_length=1)
    MLP_Embedding_Item = Embedding(input_dim=num_items,
                                   output_dim=int(layers[0] / 2),
                                   name='item_embedding',
                                   embeddings_regularizer=l2(reg_layers[0]),
                                   input_length=1)
    MLP_Embedding_User_xz = Embedding(input_dim=num_users,
                                      output_dim=int(layers[0]),
                                      name='user_xz_embedding',
                                      embeddings_regularizer=l2(reg_layers[0]),
                                      input_length=19)

    user_ = MLP_Embedding_User(user_input)
    item_ = MLP_Embedding_Item(item_input)
    ui_ = MLP_Embedding_User_xz(user_xz_input)

    lstm_1 = LSTM(64, activation='relu')(ui_)

    user_flatten = Flatten()(user_)
    item_flatten = Flatten()(item_)

    u_i_con = concatenate([user_flatten, item_flatten], name='u_i_con')

    dense_1 = Dense(128, activation='relu', name='dense_1')(u_i_con)

    dense_2 = Dense(64, activation='relu', name='dense_2')(dense_1)

    add_1 = add([dense_2, lstm_1], name='add_1')

    dense_3 = Dense(32, activation='relu', name='dense_3')(add_1)

    dense_4 = Dense(16, activation='relu', name='dense_4')(dense_3)

    dense_5 = Dense(8, activation='relu', name='dense_5')(dense_4)

    # Final prediction layer
    prediction = Dense(1,
                       activation='sigmoid',
                       kernel_initializer=initializers.lecun_normal(),
                       name='prediction')(dense_5)

    model_ = Model(inputs=[user_xz_input, user_input, item_input],
                   outputs=prediction)
    print(model_.summary())
    return model_
Esempio n. 15
0
def get_model(train, num_users, num_items, userlayers, itemlayers, layers):
    dmf_num_layer = len(userlayers)  #Number of layers in the DMF
    mlp_num_layer = len(layers)  #Number of layers in the MLP
    user_matrix = K.constant(getTrainMatrix(train))
    item_matrix = K.constant(getTrainMatrix(train).T)
    # Input variables
    user_input = Input(shape=(1, ), dtype='int32', name='user_input')
    item_input = Input(shape=(1, ), dtype='int32', name='item_input')

    # Embedding layer
    user_rating = Lambda(
        lambda x: tf.gather(user_matrix, tf.compat.v1.to_int32(x)))(user_input)
    item_rating = Lambda(
        lambda x: tf.gather(item_matrix, tf.compat.v1.to_int32(x)))(item_input)
    user_rating = Reshape((num_items, ))(user_rating)
    item_rating = Reshape((num_users, ))(item_rating)

    # DMF part
    userlayer = Dense(userlayers[0], activation="linear", name='user_layer0')
    itemlayer = Dense(itemlayers[0], activation="linear", name='item_layer0')
    dmf_user_latent = userlayer(user_rating)
    dmf_item_latent = itemlayer(item_rating)
    for idx in range(1, dmf_num_layer):
        userlayer = Dense(userlayers[idx],
                          activation='relu',
                          name='user_layer%d' % idx)
        itemlayer = Dense(itemlayers[idx],
                          activation='relu',
                          name='item_layer%d' % idx)
        dmf_user_latent = userlayer(dmf_user_latent)
        dmf_item_latent = itemlayer(dmf_item_latent)
    dmf_vector = multiply([dmf_user_latent, dmf_item_latent])

    # MLP part
    MLP_Embedding_User = Dense(layers[0] // 2,
                               activation="linear",
                               name='user_embedding')
    MLP_Embedding_Item = Dense(layers[0] // 2,
                               activation="linear",
                               name='item_embedding')
    mlp_user_latent = MLP_Embedding_User(user_rating)
    mlp_item_latent = MLP_Embedding_Item(item_rating)
    mlp_vector = concatenate([mlp_user_latent, mlp_item_latent])
    for idx in range(1, mlp_num_layer):
        layer = Dense(layers[idx], activation='relu', name="layer%d" % idx)
        mlp_vector = layer(mlp_vector)

    # Concatenate DMF and MLP parts
    predict_vector = concatenate([dmf_vector, mlp_vector])

    # Final prediction layer
    prediction = Dense(1,
                       activation='sigmoid',
                       kernel_initializer=initializers.lecun_normal(),
                       name="prediction")(predict_vector)

    model_ = Model(inputs=[user_input, item_input], outputs=prediction)

    return model_
Esempio n. 16
0
def build_snn_model(
    x_shape=(100, 1, 10),
    n_layers=1,
    n_units=64,
    kernel_reg=1e-9,
    activity_reg=1e-9,
    bias_reg=1e-9,
    dropout_rate=0.5,
    optimizer='nadam',
    lr_rate=1e-5,
    gauss_noise_std=1e-3,
    n_gpus=0,
):
    """build snn model"""

    opts_map = {
        'adam': opts.Adam,
        'nadam': opts.Nadam,
        'adamax': opts.Adamax,
        'sgd': opts.SGD,
        'rmsprop': opts.RMSprop
    }

    snn_cfg = {
        'units':
        int(n_units),
        #'batch_input_shape': (batch_size, x_shape[1], x_shape[2]),
        #'batch_size': batch_size,
        'input_shape':
        x_shape,
        'kernel_regularizer':
        regularizers.l2(kernel_reg),
        'activity_regularizer':
        regularizers.l2(activity_reg),
        'bias_regularizer':
        regularizers.l2(bias_reg),
        'kernel_initializer':
        initializers.lecun_normal(seed=cfg.data_cfg['random_seed']),
        'activation':
        'selu',
    }

    model = Sequential()
    model.add(Dense(**snn_cfg))
    model.add(GaussianNoise(gauss_noise_std))
    model.add(AlphaDropout(dropout_rate))
    if n_layers > 1:
        for i in range(n_layers - 1):
            snn_cfg.pop('batch_input_shape', None)
            model.add(Dense(**snn_cfg))
            model.add(GaussianNoise(gauss_noise_std))
            model.add(AlphaDropout(dropout_rate))
    model.add(Flatten())  # todo: WHy lol
    model.add(Dense(len(cfg.data_cfg['Target_param_names'])))

    opt = opts_map[optimizer](lr=lr_rate)
    model.compile(optimizer=opt, loss='mse')
    return model
def hint_model_1(number_of_labels,
                 first_filters=16,
                 input_shape=(224, 224, 3)):
    model = Sequential()

    # first layer
    model.add(
        Conv2D(filters=first_filters,
               input_shape=input_shape,
               kernel_size=2,
               strides=1,
               activation='relu',
               kernel_initializer=lecun_normal()))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    # second layer
    model.add(
        Conv2D(filters=first_filters * 2,
               kernel_size=2,
               strides=1,
               activation='relu',
               kernel_initializer=lecun_normal()))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    # third layer
    model.add(
        Conv2D(filters=first_filters * 4,
               kernel_size=2,
               strides=1,
               activation='relu',
               kernel_initializer=lecun_normal()))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    #
    model.add(GlobalAveragePooling2D())

    model.add(
        Dense(units=number_of_labels,
              activation='softmax',
              kernel_initializer=lecun_normal()))

    # set the filepath for saving the model's weights
    save_filepath = "saved_models/weights.best.hint_model_1.hdf5"
    return model, save_filepath
Esempio n. 18
0
 def __conv_block(self, x, filters, kernel_size):
     out = Conv2D(filters,
                  kernel_size=kernel_size,
                  padding='same',
                  activation='linear',
                  kernel_initializer=initializers.lecun_normal(),
                  kernel_regularizer=regularizers.l2(self.l2_const))(x)
     out = BatchNormalization(axis=1, momentum=0.9)(out)
     out = LeakyReLU(alpha=0.1)(out)
     return out
def hint_model_2_nin_bn(number_of_labels,
                        first_filters=16,
                        input_shape=(224, 224, 3)):
    ks = 2  # kernel + stride example ks=2 means (2,2) kernel and stride of 2
    model = Sequential()

    model.add(
        Conv2D(filters=first_filters,
               kernel_size=1,
               strides=1,
               kernel_initializer=lecun_normal(),
               input_shape=input_shape))
    model.add(
        Conv2D(filters=first_filters,
               kernel_size=ks,
               strides=ks,
               kernel_initializer=lecun_normal()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    model.add(
        Conv2D(filters=first_filters * 2,
               kernel_size=1,
               strides=1,
               kernel_initializer=lecun_normal()))
    model.add(
        Conv2D(filters=first_filters * 2,
               kernel_size=ks,
               strides=ks,
               kernel_initializer=lecun_normal()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    model.add(
        Conv2D(filters=first_filters * 4,
               kernel_size=1,
               strides=1,
               kernel_initializer=lecun_normal()))
    model.add(
        Conv2D(filters=first_filters * 4,
               kernel_size=ks,
               strides=ks,
               kernel_initializer=lecun_normal()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    model.add(GlobalAveragePooling2D())

    model.add(
        Dense(units=number_of_labels,
              activation='softmax',
              kernel_initializer=lecun_normal()))

    filepath = "saved_models/weights.best.hint_model_2_nin_bn.hdf5"
    return model, filepath
Esempio n. 20
0
def res_model1(input_shape1, input_shape2):

    x_input = Input(shape=(input_shape1, ), name='x_input')
    output_1 = Dropout(0.3)(PReLU()(x_input))
    output0 = Dense(input_shape2,
                    kernel_initializer=initializers.lecun_normal())(output_1)
    output1 = res_unit(output0, input_shape2)

    #	output7=Dense(output_shape,kernel_initializer=initializers.lecun_normal())(Dropout(0.3)(output6))

    model = Model(inputs=x_input, outputs=output1)
    return model
Esempio n. 21
0
    def build_model(self, prior, input_shape):
        input = Input(shape=input_shape)

        x = Dense(300,
                  use_bias=False,
                  input_shape=input_shape,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  kernel_regularizer=regularizers.l2(self.weight_decay))(input)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(300,
                  use_bias=False,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(300,
                  use_bias=False,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(300,
                  use_bias=False,
                  kernel_initializer=initializers.lecun_normal(seed=1),
                  kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        output = Dense(1,
                       use_bias=True,
                       kernel_initializer=initializers.lecun_normal(seed=1))(x)

        model = Model(inputs=input, outputs=output)

        self.compile_model(model=model,
                           loss_type=self.loss_type,
                           theta1=self.theta1,
                           theta2=self.theta2,
                           prior=prior,
                           mode=self.mode)
Esempio n. 22
0
    def build_model(self, num_users, num_item, layers, reg_layers):

        assert len(layers) == len(reg_layers)

        # Input Layer
        user_id_input = Input(shape=(1,), dtype='int64', name='user_id_input')
        user_lc_input = Input(shape=(2,), dtype='int64', name='user_lc_input')

        item_id_input = Input(shape=(1,), dtype='int64', name='item_id_input')
        item_lc_input = Input(shape=(2,), dtype='int64', name='item_lc_input')

        user_id_embedding = self.getEmbedding(num_users, int(layers[0] / 4), 1, reg_layers[0], 'user_id_embedding')
        user_lc_embedding = self.getEmbedding(num_users, int(layers[0] / 4), 2, reg_layers[0], 'user_lc_embedding')

        item_id_embedding = self.getEmbedding(num_item, int(layers[0] / 4), 1, reg_layers[0], 'item_id_embedding')
        item_lc_embedding = self.getEmbedding(num_item, int(layers[0] / 4), 2, reg_layers[0], 'item_lc_embedding')

        user_id_latent = Flatten()(user_id_embedding(user_id_input))
        user_lc_latent = Flatten()(user_lc_embedding(user_lc_input))

        item_id_latent = Flatten()(item_id_embedding(item_id_input))
        item_lc_latent = Flatten()(item_lc_embedding(item_lc_input))

        # concatenate
        predict_user_vector = concatenate([user_id_latent, user_lc_latent])
        predict_item_vector = concatenate([item_id_latent, item_lc_latent])

        mlp_vector = concatenate([predict_user_vector, predict_item_vector])

        # AC-COS
        cosine_vector = dot([user_lc_latent, item_lc_latent], axes=1, normalize=True)

        # AC_EUC
        #euclidean_vector = Lambda(self.euclidean_distance, output_shape=self.eucl_dist_output_shape)([user_lc_latent, item_lc_latent])

        # Middle Layer
        for index in range(1, len(layers) - 1):
            layer = Dense(units=layers[index], kernel_initializer=initializers.random_normal(),
                          kernel_regularizer=l2(reg_layers[index]), activation='relu', name='mlpLayer%d' % index)
            mlp_vector = layer(mlp_vector)

        predict_vector = concatenate([mlp_vector, cosine_vector])

        # Output layer
        prediction = Dense(units=layers[-1], activation='linear', kernel_initializer=initializers.lecun_normal(),
                           kernel_regularizer=l2(reg_layers[-1]), name='prediction')(predict_vector)

        _model = Model(inputs=[user_id_input, user_lc_input, item_id_input, item_lc_input], outputs=prediction)
        plot_model(_model, to_file='model.png')
        return _model
Esempio n. 23
0
def make_cnn_lstm(word_index, max_seq):
    embeds, embed_dim = read_embeds(EFILE, word_index)

    embedding_layer = Embedding(len(word_index) + 1,
                                embed_dim,
                                weights=[embeds],
                                input_length=max_seq,
                                trainable=False)

    sequence_input = Input(shape=(max_seq, ), dtype='int32')
    embedded_sequences = embedding_layer(sequence_input)

    x = Reshape((-1, max_seq, embed_dim))(embedded_sequences)
    x = TimeDistributed(
        Conv1D(100,
               3,
               activation='relu',
               padding='valid',
               kernel_initializer=lecun_normal(seed=None),
               input_shape=(1, max_seq, embed_dim)))(x)
    x = TimeDistributed(MaxPooling1D())(x)
    x = TimeDistributed(Flatten())(x)
    x = LSTM(300,
             activation="relu",
             kernel_initializer=lecun_normal(seed=None))(x)
    x = Dropout(0.2)(x)
    x = Dense(150,
              activation='relu',
              kernel_initializer=lecun_normal(seed=None))(x)
    x = Dense(75,
              activation='relu',
              kernel_initializer=lecun_normal(seed=None))(x)

    preds = Dense(1, activation='sigmoid')(x)

    return sequence_input, x, preds
Esempio n. 24
0
def get_model(train,
              num_users,
              num_items,
              userlayers=[512, 64],
              itemlayers=[1024, 64]):
    num_layer = len(userlayers)  # Number of layers in the MLP
    user_matrix = K.constant(getTrainMatrix(train))
    item_matrix = K.constant(getTrainMatrix(train).T)

    # Input variables
    user = Input(shape=(1, ), dtype='int32', name='user_input')
    item = Input(shape=(1, ), dtype='int32', name='item_input')

    # Multi-hot User representation and Item representation
    user_input = Lambda(lambda x: tf.gather(user_matrix, tf.to_int32(x)))(user)
    item_input = Lambda(lambda x: tf.gather(item_matrix, tf.to_int32(x)))(item)
    user_input = Reshape((num_items, ))(user_input)
    item_input = Reshape((num_users, ))(item_input)
    print(user_input.shape, item_input.shape)

    # DMF part
    userlayer = Dense(userlayers[0], activation="linear", name='user_layer0')
    itemlayer = Dense(itemlayers[0], activation="linear", name='item_layer0')
    user_latent_vector = userlayer(user_input)
    item_latent_vector = itemlayer(item_input)
    print(user_latent_vector.shape, item_latent_vector.shape)
    for idx in range(1, num_layer):
        userlayer = Dense(userlayers[idx],
                          activation='relu',
                          name='user_layer%d' % idx)
        itemlayer = Dense(itemlayers[idx],
                          activation='relu',
                          name='item_layer%d' % idx)
        user_latent_vector = userlayer(user_latent_vector)
        item_latent_vector = itemlayer(item_latent_vector)
        print(user_latent_vector.shape, item_latent_vector.shape)

    predict_vector = multiply([user_latent_vector, item_latent_vector])
    prediction = Dense(1,
                       activation='sigmoid',
                       kernel_initializer=initializers.lecun_normal(),
                       name='prediction')(predict_vector)

    print(prediction.shape)

    model_ = Model(inputs=[user, item], outputs=prediction)

    return model_
Esempio n. 25
0
def model_3(input_shape):
    x_input1 = Input(shape=(input_shape, ), name='x_input1')
    x_input2 = Input(shape=(input_shape, ), name='x_input2')
    check_layer1 = Subtract()([x_input1, x_input2])
    check_layer2 = Lambda(lambda x: K.square(x))(check_layer1)
    check_layer3 = Reshape((input_shape, 1))(check_layer2)
    check_layer4 = Conv1D(
        filters=1,
        kernel_size=input_shape,
        use_bias=False,
        kernel_constraint=Unit_Abs(),
        kernel_initializer=initializers.lecun_normal())(check_layer3)
    #   output7=Dense(output_shape,kernel_initializer=initializers.lecun_normal())(Dropout(0.3)(output6))
    check_layer5 = Flatten()(check_layer4)
    model = Model(inputs=[x_input1, x_input2], outputs=check_layer5)
    return model
Esempio n. 26
0
def get_model(num_users, num_items, mf_dim, layers):
    num_layer = len(layers)  # Number of layers in the MLP
    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name='user_input')
    item_input = Input(shape=(1,), dtype='int32', name='item_input')

    # Embedding layer
    MF_Embedding_User = Embedding(input_dim=num_users, output_dim=mf_dim, name='mf_embedding_user',
                                  embeddings_initializer=initializers.random_normal(),
                                  input_length=1)
    MF_Embedding_Item = Embedding(input_dim=num_items, output_dim=mf_dim, name='mf_embedding_item',
                                  embeddings_initializer=initializers.random_normal(),
                                  input_length=1)

    MLP_Embedding_User = Embedding(input_dim=num_users, output_dim=int(layers[0] / 2), name="mlp_embedding_user",
                                   embeddings_initializer=initializers.random_normal(),
                                   input_length=1)
    MLP_Embedding_Item = Embedding(input_dim=num_items, output_dim=int(layers[0] / 2), name='mlp_embedding_item',
                                   embeddings_initializer=initializers.random_normal(),
                                    input_length=1)

    # MF part
    mf_user_latent = Flatten()(MF_Embedding_User(user_input))
    mf_item_latent = Flatten()(MF_Embedding_Item(item_input))
    # mf_vector = merge([mf_user_latent, mf_item_latent], mode = 'mul') # element-wise multiply
    mf_vector = multiply([mf_user_latent, mf_item_latent])

    # MLP part
    mlp_user_latent = Flatten()(MLP_Embedding_User(user_input))
    mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input))
    # mlp_vector = merge([mlp_user_latent, mlp_item_latent], mode = 'concat')
    mlp_vector = concatenate([mlp_user_latent, mlp_item_latent])
    for idx in range(1, num_layer):
        layer = Dense(layers[idx], activation='relu', name="layer%d" % idx)
        mlp_vector = layer(mlp_vector)

    predict_vector = concatenate([mf_vector, mlp_vector])

    # Final prediction layer
    prediction = Dense(1, activation='sigmoid', kernel_initializer=initializers.lecun_normal(),
                       name="prediction")(predict_vector)

    model_ = Model(inputs=[user_input, item_input],
                   outputs=prediction)

    return model_
def get_model(num_users, num_items, layers=[20, 10], reg_layers=[0, 0]):
    assert len(layers) == len(reg_layers)
    num_layer = len(layers)  # Number of layers in the MLP
    # Input variables
    user_input = Input(shape=(1, ), dtype='int32', name='user_input')
    item_input = Input(shape=(1, ), dtype='int32', name='item_input')

    MLP_Embedding_User = Embedding(input_dim=num_users,
                                   output_dim=int(layers[0] / 2),
                                   name='user_embedding',
                                   embeddings_regularizer=l2(reg_layers[0]),
                                   input_length=1)
    MLP_Embedding_Item = Embedding(input_dim=num_items,
                                   output_dim=int(layers[0] / 2),
                                   name='item_embedding',
                                   embeddings_regularizer=l2(reg_layers[0]),
                                   input_length=1)

    # Crucial to flatten an embedding vector!
    user_latent = Flatten()(MLP_Embedding_User(user_input))
    item_latent = Flatten()(MLP_Embedding_Item(item_input))

    # The 0-th layer is the concatenation of embedding layers
    # vector = merge([user_latent, item_latent], mode = 'concat')
    vector = concatenate([user_latent, item_latent])
    vector_re = Reshape((1, 64))(vector)

    conv_1 = Conv1D(filters=32, kernel_size=1, padding='same')(vector_re)
    conv_2 = Conv1D(filters=16, kernel_size=1, padding='same')(conv_1)
    conv_3 = Conv1D(filters=8, kernel_size=1, padding='same')(conv_2)

    flatten_1 = Flatten()(conv_3)

    # Final prediction layer
    prediction = Dense(1,
                       activation='sigmoid',
                       kernel_initializer=initializers.lecun_normal(),
                       name='prediction')(flatten_1)

    model_ = Model(inputs=[user_input, item_input], outputs=prediction)
    print(model_.summary())
    return model_
Esempio n. 28
0
    def fit(self, X, Y, lambd, keep_prob, learning_rate, xavier=True, iterations=1000, seed=1, gradient_check=False,
            print_cost=False):
        '''
        Trains the model given a X and Y and learning paramaters.

        Args:
            X (ndarry): Samples as columns, features in rows.
            Y(ndarry): Labels.
            lambd(float): If not None or 0, you will get L2 regularization with L2 penalty.
            keep_prob(float): If less than 1.0, dropout regularization will be implemented.
            learning_rate(float): Learning rate.
            xavier(boolean): True for Xavier initialization otherwise random initialization.
            iterations(int): Number of iterations.
            seed(int): Ramdom number generator seed.
            gradient_check(boolean): Switches off dropout to allow checking gradient with a numerical check.
            print_cost(boolean): True to print cost as you train.

        '''

        if type(keep_prob) == list:
            assert len(keep_prob) == len(self.activations), 'keep_prob array must much activation dimension'''

        if type(keep_prob) != list:
            self.keep_prob = np.ones(len(self.activations)) * (keep_prob)
        else:
            self.keep_prob = [x for x in keep_prob]

        inputs = Input(shape=[self.L[0], ])
        x = inputs
        for i in range(1, len(self.L)):
            name = str(i)
            print(name, self.activations[i - 1], self.keep_prob[i - 1])
            x = Dense(self.L[i], name=name + 'Z', kernel_initializer=lecun_normal(seed=seed),
                      kernel_regularizer=l2(lambd))(x)
            x = Activation(self.activations[i - 1], name=name + 'A')(x)
            x = Dropout(1 - self.keep_prob[i - 1], seed=seed, name=name + 'D')(x)

        output_layer = x

        self.model = Model(input=[inputs], output=[output_layer])
        self.model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy', metrics=['accuracy'])
        self.model.fit(x=X, y=Y, verbose=0, epochs=iterations, batch_size=X.shape[0], shuffle=False)
Esempio n. 29
0
    def build_model(shape, gtf_layers, reg_layers, drop_layers):

        # Embedding Layer
        user_input = Input(shape=(1,), dtype='int32', name='user_input')

        item_input = Input(shape=(1,), dtype='int32', name='item_input')

        time_input = Input(shape=(1,), dtype='int32', name='time_input')

        user_embedding = Flatten()(Embedding(input_dim=shape[0], output_dim=gtf_layers[0], input_length=1,
                                             embeddings_initializer=initializers.random_normal(),
                                             embeddings_regularizer=regularizers.l2(reg_layers[0]),
                                             name='gtf_user_embedding')(user_input))
        item_embedding = Flatten()(Embedding(input_dim=shape[1], output_dim=gtf_layers[0], input_length=1,
                                             embeddings_initializer=initializers.random_normal(),
                                             embeddings_regularizer=regularizers.l2(reg_layers[0]),
                                             name='gtf_item_embedding')(item_input))
        time_embedding = Flatten()(Embedding(input_dim=shape[2], output_dim=gtf_layers[0], input_length=1,
                                             embeddings_initializer=initializers.random_normal(),
                                             embeddings_regularizer=regularizers.l2(reg_layers[0]),
                                             name='gtf_time_embedding')(time_input))

        user_embedding = Dropout(drop_layers[0])(user_embedding)
        item_embedding = Dropout(drop_layers[0])(item_embedding)
        time_embedding = Dropout(drop_layers[0])(time_embedding)

        us = Dot(axes=-1)([user_embedding, item_embedding])
        ut = Dot(axes=-1)([user_embedding, time_embedding])
        st = Dot(axes=-1)([item_embedding, time_embedding])

        mf_vector = Add()([us, ut, st])

        mf_vector = Dropout(drop_layers[-1])(mf_vector)

        prediction = Dense(units=gtf_layers[-1], activation='relu', use_bias=True,
                           kernel_initializer=initializers.lecun_normal(),
                           kernel_regularizer=regularizers.l2(reg_layers[-1]), name='gtf_prediction')(mf_vector)

        _model = Model(inputs=[user_input, item_input, time_input], outputs=prediction)
        return _model
Esempio n. 30
0
def fit_model(X,
              Y,
              Z=None,
              num_nodes=0,
              a=1,
              b=0.5,
              size=2,
              kernel='power',
              epochs=50,
              batch_size=100,
              optimizer='adamax',
              loss="mse",
              seed=1):
    from keras.layers import Input, Embedding, BatchNormalization
    from keras.models import Model
    from keras.initializers import lecun_normal
    from .layers import get_distance_layer

    walk_len = X.shape[1]

    # layer specification
    inp = Input(shape=(walk_len, ))
    if Z is not None:
        embedding = Embedding(num_nodes, size, weights=[Z])(inp)
    else:
        embedding = Embedding(num_nodes,
                              size,
                              embeddings_initializer=lecun_normal(seed))(inp)
    batchn = BatchNormalization()(embedding)
    distance = get_distance_layer(kernel, batchn, walk_len, a, b, seed)

    # build and compile model
    model = Model(inp, distance)
    model.compile(optimizer, loss)
    model.fit(X, Y, epochs=epochs, batch_size=batch_size)

    Z = model.layers[1].get_weights()[0]
    return Z
Esempio n. 31
0
def test_lecun_normal(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    std = np.sqrt(1. / fan_in)
    _runner(initializers.lecun_normal(), tensor_shape,
            target_mean=0., target_std=std)