Exemple #1
0
def generate_model(X):
    X_train, X_test = train_test_split_no_unseen(X, test_size=100)

    print('Train set size: ', X_train.shape)
    print('Test set size: ', X_test.shape)

    model = ComplEx(batches_count=100,
                    seed=0,
                    epochs=10,
                    k=150,
                    eta=5,
                    optimizer='adam',
                    optimizer_params={'lr': 1e-3},
                    loss='multiclass_nll',
                    regularizer='LP',
                    regularizer_params={
                        'p': 3,
                        'lambda': 1e-5
                    },
                    verbose=True)

    #positives_filter = X

    tf.logging.set_verbosity(tf.logging.ERROR)

    model.fit(X_train, early_stopping=False)

    print("created the model")

    save_model(model, './best_model.pkl')

    return X_test
Exemple #2
0
def test_save_and_restore_model():
    models = ('ComplEx', 'TransE', 'DistMult')

    for model_name in models:
        module = importlib.import_module("ampligraph.latent_features.models")

        print('Doing save/restore testing for model class: ', model_name)

        class_ = getattr(module, model_name)

        model = class_(batches_count=2,
                       seed=555,
                       epochs=20,
                       k=10,
                       optimizer='adagrad',
                       optimizer_params={'lr': 0.1})

        X = np.array([['a', 'y', 'b'], ['b', 'y', 'a'], ['a', 'y', 'c'],
                      ['c', 'y', 'a'], ['a', 'y', 'd'], ['c', 'y', 'd'],
                      ['b', 'y', 'c'], ['f', 'y', 'e']])

        model.fit(X)

        EXAMPLE_LOC = 'unittest_save_and_restore_models'
        save_model(model, EXAMPLE_LOC)
        loaded_model = restore_model(EXAMPLE_LOC)

        assert loaded_model != None
        assert loaded_model.all_params == model.all_params
        assert loaded_model.is_fitted == model.is_fitted
        assert loaded_model.ent_to_idx == model.ent_to_idx
        assert loaded_model.rel_to_idx == model.rel_to_idx

        for i in range(len(loaded_model.trained_model_params)):
            npt.assert_array_equal(loaded_model.trained_model_params[i],
                                   model.trained_model_params[i])

        y_pred_before, _ = model.predict(np.array([['f', 'y', 'e'],
                                                   ['b', 'y', 'd']]),
                                         get_ranks=True)
        y_pred_after, _ = loaded_model.predict(np.array([['f', 'y', 'e'],
                                                         ['b', 'y', 'd']]),
                                               get_ranks=True)
        npt.assert_array_equal(y_pred_after, y_pred_before)

        npt.assert_array_equal(
            loaded_model.get_embeddings(['a', 'b'], type='entity'),
            model.get_embeddings(['a', 'b'], type='entity'))

        shutil.rmtree(EXAMPLE_LOC)
Exemple #3
0
    def train(self, triples, is_update=False):
        logger.warning("Training may take long time!")
        training_array = self._prepare_training_data(triples)
        logger.info("Start Training!")

        if not is_update:
            logger.info("Fitting from scratch!")
            trained_model = self._get_model(is_update=False)
        else:
            logger.info("Continuous training!")
            trained_model = self._get_model(is_update=True)

            if self.update_mode == UpdateMode.ADAPT_RESTART:
                trained_model.copy_old_model_params(self.base_model)
            elif self.update_mode == UpdateMode.ADAPT_PROGRESSIVE:
                trained_model.copy_old_model_params(self.curr_model)

        trained_model.fit(training_array, continue_training=is_update)
        save_model(trained_model,
                   model_name_path=self.get_current_model_filepath())
        logger.info("Done Training model!")
        return trained_model
Exemple #4
0
                verbose=True)

positives_filter = X

import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)

model.fit(data['train'], early_stopping = False)

"""---
# 4.  Saving and restoring a model
"""

from ampligraph.latent_features import save_model, restore_model

save_model(model, './best_model.pkn')

del model

model = restore_model('./best_model.pkn')

if model.is_fitted:
    print('The model is fit!')
else:
    print('The model is not fit! Did you skip a step?')

"""---
# 5. Evaluating a model
"""

from ampligraph.evaluation import evaluate_performance
     model = ComplEx(verbose=True)
 elif (mdl[j] == "ConvKB"):
     model = ConvKB(verbose=True)
 elif (mdl[j] == "DistMult"):
     model = DistMult(verbose=True)
 elif (mdl[j] == "HolE"):
     model = HolE(verbose=True)
 elif (mdl[j] == "TransE"):
     model = TransE(verbose=True)
 elif (mdl[j] == "RandomBaseline"):
     model = RandomBaseline(verbose=True)
 tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)  # TensorFlow will tell you all messages that have the label ERROR
 model.fit(train_X)
 
 # Save model at its best-performance point
 save_model(model, 'best_ampliGraph_model.pkl')
 del model  # Delete older model
 # Load recently save best-performance model
 model = restore_model('./best_ampliGraph_model.pkl')    
 if model.is_fitted:
     print('The model is fit!')
 else:
     print('The model is not fit! Did you skip a step?')
 
 # TRAINING: Evaluate model's performance
 test_X = filter_unseen_entities(test_X, model, verbose=True, strict=False)
 test_y = test_X[:,1]
 scores_validtn = evaluate_performance(test_X, 
                  model=model, 
                  filter_triples=positives_filter,  # positives_filter # Corruption strategy filter defined above
                  use_default_protocol=True,  # corrupt subj and obj separately while evaluating