Esempio n. 1
0
def test_WDL():
    name = "WDL"

    sample_size = 64
    feature_dim_dict = {
        'sparse': {
            'sparse_1': 2,
            'sparse_2': 5,
            'sparse_3': 10
        },
        'dense': ['dense_1', 'dense_2', 'dense_3']
    }
    sparse_input = [
        np.random.randint(0, dim, sample_size)
        for dim in feature_dim_dict['sparse'].values()
    ]
    dense_input = [
        np.random.random(sample_size) for name in feature_dim_dict['dense']
    ]
    y = np.random.randint(0, 2, sample_size)
    x = sparse_input + dense_input

    model = WDL(feature_dim_dict,
                feature_dim_dict,
                hidden_size=[32, 32],
                keep_prob=0.5)
    model.compile('adam',
                  'binary_crossentropy',
                  metrics=['binary_crossentropy'])
    model.fit(x + x, y, batch_size=100, epochs=1, validation_split=0.5)
    print(name + " test train valid pass!")
    model.save_weights(name + '_weights.h5')
    model.load_weights(name + '_weights.h5')
    print(name + "test save load weight pass!")
    save_model(model, name + '.h5')
    model = load_model(name + '.h5', custom_objects)
    print(name + "test save load model pass!")

    print(name + " test pass!")
Esempio n. 2
0
            model_input,
            data[target].values,
            callbacks=[checkpoint],
            batch_size=batch_size,
            epochs=50,
            verbose=1,
            validation_split=0.2,
        )

    elif mode == 'test':
        model = WDL({
            "sparse": sparse_feature_dim,
            "dense": []
        },
                    final_activation='sigmoid')
        model.load_weights(
            'model_save/wdl_sample-ep001-loss0.184-val_loss0.172.h5')

        # model = load_model('model_save/deep_fm_sample-ep001-loss0.192-val_loss0.176.h5')

        data = pd.read_csv("./data/sample/validation.txt")

        # 1.Label Encoding for sparse features,and do simple Transformation for dense features
        for feat in sparse_features:
            lbe = LabelEncoder()
            data[feat] = lbe.fit_transform(data[feat])
        # 2.count #unique features for each sparse field
        sparse_feature_dim = {
            feat: data[feat].nunique()
            for feat in sparse_features
        }
        # 3.generate input data for model