示例#1
0
def test_convkb_train_predict():

    model = ConvKB(batches_count=2,
                   seed=22,
                   epochs=1,
                   k=10,
                   eta=1,
                   embedding_model_params={
                       'num_filters': 16,
                       'filter_sizes': [1],
                       'dropout': 0.0,
                       'is_trainable': True
                   },
                   optimizer='adam',
                   optimizer_params={'lr': 0.001},
                   loss='pairwise',
                   loss_params={},
                   verbose=True)

    X = load_wn18()
    model.fit(X['train'])

    y = model.predict(X['test'][:10])

    print(y)
示例#2
0
def test_convkb_save_restore():

    model = ConvKB(batches_count=2,
                   seed=22,
                   epochs=1,
                   k=10,
                   eta=1,
                   embedding_model_params={
                       'num_filters': 16,
                       'filter_sizes': [1],
                       'dropout': 0.0,
                       'is_trainable': True
                   },
                   optimizer='adam',
                   optimizer_params={'lr': 0.001},
                   loss='pairwise',
                   loss_params={},
                   verbose=True)

    X = load_wn18()
    model.fit(X['train'])
    y1 = model.predict(X['test'][:10])

    save_model(model, 'convkb.tmp')
    del model
    model = restore_model('convkb.tmp')

    y2 = model.predict(X['test'][:10])

    assert np.all(y1 == y2)

    os.remove('convkb.tmp')
示例#3
0
def test_convkb_save_restore():

    X = np.array([['a', 'y', 'b'], ['b', 'y', 'a'], ['a', 'y', 'c'],
                  ['c', 'y', 'a'], ['a', 'y', 'd'], ['c', 'y', 'd'],
                  ['b', 'y', 'c'], ['f', 'y', 'e']])

    X_test = np.array([['f', 'y', 'a'], ['f', 'y', 'b']])

    model = ConvKB(batches_count=1,
                   seed=22,
                   epochs=1,
                   k=10,
                   eta=1,
                   embedding_model_params={
                       'num_filters': 16,
                       'filter_sizes': [1],
                       'dropout': 0.0,
                       'is_trainable': True
                   },
                   optimizer='adam',
                   optimizer_params={'lr': 0.001},
                   loss='pairwise',
                   loss_params={},
                   verbose=True)

    model.fit(X)
    y1 = model.predict(X_test)

    save_model(model, 'convkb.tmp')
    del model
    model = restore_model('convkb.tmp')

    y2 = model.predict(X_test)

    assert np.all(y1 == y2)

    os.remove('convkb.tmp')
     
 for j in range(len(mdl)):
     # Fit & Train model via ampliGraph library
     log_key = mdl[j]+": "+graph_data[i]
     log_file = open("eval_log.txt", "a")
     print("\n\n----"+log_key+"----", file=log_file)
     print("------------------------------------------------")
     print("%d) Implementation Model: %s" % (1, mdl[j]))
     print("------------------------------------------------")
     start_time = time.time()  # START: Training Time Tracker    
     K.clear_session()  # Kills current TF comp-graph & creates a new one
     
     if (mdl[j] == "ComplEx"):
         model = ComplEx(verbose=True)
     elif (mdl[j] == "ConvKB"):
         model = ConvKB(verbose=True)
     elif (mdl[j] == "DistMult"):
         model = DistMult(verbose=True)
     elif (mdl[j] == "HolE"):
         model = HolE(verbose=True)
     elif (mdl[j] == "TransE"):
         model = TransE(verbose=True)
     elif (mdl[j] == "RandomBaseline"):
         model = RandomBaseline(verbose=True)
     tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)  # TensorFlow will tell you all messages that have the label ERROR
     model.fit(train_X)
     
     # Save model at its best-performance point
     save_model(model, 'best_ampliGraph_model.pkl')
     del model  # Delete older model
     # Load recently save best-performance model