示例#1
0
def test_NFM():
    name = "NFM"

    sample_size = 64
    feature_dim_dict = {'sparse': {'sparse_1': 2, 'sparse_2': 5,
                                   'sparse_3': 10}, 'dense': ['dense_1', 'dense_2', 'dense_3']}
    sparse_input = [np.random.randint(0, dim, sample_size)
                    for dim in feature_dim_dict['sparse'].values()]
    dense_input = [np.random.random(sample_size)
                   for name in feature_dim_dict['dense']]
    y = np.random.randint(0, 2, sample_size)
    x = sparse_input + dense_input

    model = NFM(feature_dim_dict, embedding_size=8,
                hidden_size=[32, 32], keep_prob=0.5, )
    model.compile('adam', 'binary_crossentropy',
                  metrics=['binary_crossentropy'])
    model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5)
    print(name+" test train valid pass!")
    model.save_weights(name + '_weights.h5')
    model.load_weights(name + '_weights.h5')
    print(name+" test save load weight pass!")
    save_model(model,  name + '.h5')
    model = load_model(name + '.h5', custom_objects)
    print(name + " test save load model pass!")

    print(name + " test pass!")
示例#2
0
def test_NFM(hidden_size, sparse_feature_num):
    model_name = "NFM"

    sample_size = SAMPLE_SIZE
    x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
                                          dense_feature_num=sparse_feature_num)

    model = NFM(feature_columns, feature_columns, dnn_hidden_units=[8, 8], dnn_dropout=0.5)
    check_model(model, model_name, x, y)
示例#3
0
def test_NFM(hidden_size, sparse_feature_num):

    model_name = "NFM"

    sample_size = 64
    x, y, feature_dim_dict = get_test_data(
        sample_size, sparse_feature_num, sparse_feature_num)

    model = NFM(feature_dim_dict, embedding_size=8,
                hidden_size=[32, 32], keep_prob=0.5, )
    check_model(model, model_name, x, y)
示例#4
0
def test_NFM(hidden_size, sparse_feature_num):

    model_name = "NFM"

    sample_size = SAMPLE_SIZE
    x, y, feature_dim_dict = get_test_data(sample_size, sparse_feature_num,
                                           sparse_feature_num)

    model = NFM(
        feature_dim_dict,
        embedding_size=8,
        dnn_hidden_units=[32, 32],
        dnn_dropout=0.5,
    )
    check_model(model, model_name, x, y)
示例#5
0
def test_NFM(hidden_size, sparse_feature_num):

    model_name = "NFM"

    sample_size = 64
    feature_dim_dict = {"sparse": {}, 'dense': []}
    for name, num in zip(["sparse", "dense"], [sparse_feature_num, sparse_feature_num]):
        if name == "sparse":
            for i in range(num):
                feature_dim_dict[name][name + '_' +
                                       str(i)] = np.random.randint(1, 10)
        else:
            for i in range(num):
                feature_dim_dict[name].append(name + '_' + str(i))
    sparse_input = [np.random.randint(0, dim, sample_size)
                    for dim in feature_dim_dict['sparse'].values()]
    dense_input = [np.random.random(sample_size)
                   for name in feature_dim_dict['dense']]
    y = np.random.randint(0, 2, sample_size)
    x = sparse_input + dense_input

    model = NFM(feature_dim_dict, embedding_size=8,
                hidden_size=[32, 32], keep_prob=0.5, )
    check_model(model, model_name, x, y)
示例#6
0
    data[feature] = lbe.fit_transform(data[feature])
# 计算每个特征中的 不同特征值的个数
fixlen_feature_columns = [
    SparseFeat(feature, data[feature].nunique()) for feature in sparse_features
]
linear_feature_columns = fixlen_feature_columns
dnn_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)

# 将数据集切分成训练集和测试集
train, test = train_test_split(data, test_size=0.2)
train_model_input = {name: train[name].values for name in feature_names}
test_model_input = {name: test[name].values for name in feature_names}

# 使用NFM进行训练
model = NFM(linear_feature_columns, dnn_feature_columns, task='regression')
model.compile(
    "adam",
    "mse",
    metrics=['mse'],
)
history = model.fit(
    train_model_input,
    train[target].values,
    batch_size=256,
    epochs=1,
    verbose=True,
    validation_split=0.2,
)
# 使用NFM进行预测
pred_ans = model.predict(test_model_input, batch_size=256)
示例#7
0
    dnn_feature_columns = fixlen_feature_columns
    linear_feature_columns = fixlen_feature_columns

    feature_names = get_feature_names(linear_feature_columns +
                                      dnn_feature_columns)

    # 3.generate input data for model

    train, test = train_test_split(data, test_size=0.2)
    train_model_input = {name: train[name] for name in feature_names}
    test_model_input = {name: test[name] for name in feature_names}

    # 4.Define Model,train,predict and evaluate
    model = NFM(linear_feature_columns,
                dnn_feature_columns,
                task='binary',
                dnn_hidden_units=(400, 400, 400),
                dnn_dropout=0.5)
    model.compile(
        "adam",
        "binary_crossentropy",
        metrics=['binary_crossentropy'],
    )

    history = model.fit(
        train_model_input,
        train[target].values,
        batch_size=256,
        epochs=10,
        verbose=2,
        validation_split=0.2,