示例#1
0
    def evaluate_profile(recommender, users_to_evaluate: set):

        evaluator = Evaluator()

        MAP_final, _ = evaluator.evaluateRecommender(recommender,
                                                     users_to_evaluate)

        print("MAP-10 score:", MAP_final)

        return MAP_final
示例#2
0
    def evaluate_hybrid_weights_validation_kfold(recommender_list,
                                                 weights,
                                                 kfold=4,
                                                 parallel_fit=False,
                                                 user_group="all",
                                                 parallelize_evaluation=False):

        num_cores = multiprocessing.cpu_count()

        users_to_evaluate_list = []
        validation_data_list = []
        for i in range(kfold):
            URM_validation, _, validation_data, _ = Helper().get_kfold_data(
                kfold)[i]
            validation_data_list.append(validation_data)

            if user_group == "cold":
                users_to_evaluate_list.append(
                    prepare_cold_users(URM_validation, validation_data))
            elif user_group == "warm":
                raise NotImplementedError
            else:
                users_to_evaluate_list.append(list(validation_data.keys()))

            recommender_list[i].fit(**weights)

        data_list = []

        if parallelize_evaluation and kfold <= num_cores:

            with multiprocessing.Pool(processes=kfold) as p:
                results = p.starmap(
                    Evaluator().evaluate_recommender_kfold,
                    [(recommender_list[i], users_to_evaluate_list[i],
                      validation_data_list[i]) for i in range(kfold)])
                for i in range(len(results)):
                    data_list.append(results[i][0])
            p.close()
        else:
            for i in range(kfold):
                recommender = recommender_list[i]
                # recommender.fit(**weights)

                MAP, _ = Evaluator().evaluate_recommender_kfold(
                    recommender, users_to_evaluate_list[i],
                    validation_data_list[i])

                data_list.append(MAP)

        MAP_final, variance = compute_mean_and_variance(data_list)
        print("Variance over k-fold:", variance)
        print("MAP-10 score:", MAP_final)

        return MAP_final
示例#3
0
 def predict(self, spark, tmp, xgb):
     data = PreProcessor.transVector(tmp, 'features')
     predictions = xgb.predict(
         data, -999).map(lambda row: (row['predictions'][1], row['label']))
     predictions = predictions.toDF("score", "label")
     right = predictions.withColumn("idx", monotonically_increasing_id())
     left = tmp.select(['name', 'idcard',
                        'phone']).withColumn("idx",
                                             monotonically_increasing_id())
     res_df = left.join(right, ['idx'], 'inner').drop('idx')
     evaluator_handle = Evaluator(spark)
     auc = evaluator_handle.evaluateAuc(res_df)
     print("AUC: ", auc)
     return res_df, auc
示例#4
0
    def evaluate_hybrid_weights_test(recommender, weights, exclude_users=None):

        recommender.fit(**weights)

        MAP_final, _ = Evaluator(test_mode=True).evaluateRecommender(
            recommender, exclude_users)

        print("MAP-10 score:", MAP_final)

        return MAP_final
示例#5
0
    def evaluate_hybrid_weights_validation(recommender,
                                           weights,
                                           exclude_users=None):

        recommender.fit(**weights)

        MAP_final, _ = Evaluator().evaluateRecommender(recommender,
                                                       exclude_users)

        print("MAP-10 score:", MAP_final)

        return MAP_final
示例#6
0
    def crossValidationByXGB(self, paramMap):
        split_num_arr = []
        for i in range(self.cv_num):
            split_num_arr.append(1.0 / self.cv_num)
        split_data_arr = self.train.randomSplit(split_num_arr, seed=666)
        split_data_arr.map(lambda x: x.cache())
        kfold_res = self.KFold(split_data_arr)
        train_ks_list = []
        val_ks_list = []
        train_auc_list = []
        val_auc_list = []

        for i in range(len(kfold_res)):
            train_df, validation_df = kfold_res[i]
            xgb_handle = XGBoostClassifier(config['XGBOOST'])
            xgbModel = xgb_handle.train(train_df)
            train_res, train_auc = xgb_handle.predict(self.spark, train_df, xgbModel)
            validation_res, validation_auc = xgb_handle.predict(self.spark, validation_df, xgbModel)

            evaluator_handler = Evaluator(self.spark)
            train_ks = evaluator_handler.evaluateKsCustomized(train_res, "score")
            val_ks = evaluator_handler.evaluateKsCustomized(validation_res,"score")
            print("fold"+str(i)+" train ks: {0}".format(train_ks))
            print("fold" + str(i) + " val ks: {0}".format(val_ks))
            print("fold" + str(i) + " train auc: {0}".format(train_auc))
            print("fold" + str(i) + " val auc: {0}".format(validation_auc))

            train_ks_list.append(train_ks)
            train_auc_list.append(train_auc)
            val_ks_list.append(val_ks)
            val_auc_list.append(validation_auc)

        print('----------------avg----------------')
        print("train average ks is ", np.mean(train_ks_list))
        print("val average ks is ", np.mean(val_ks_list))
        print("train average auc is ", np.mean(train_auc_list))
        print("val average auc is ", np.mean(val_auc_list))
示例#7
0
    def perform_evaluation_slim(recommender, test_data: dict):
        """Takes an already fitted recommender and evaluates on test data.
         If test_mode is false writes the submission"""

        if not test_data:
            print("Missing test data! Exiting...")
            exit(-1)

        print("Performing evaluation on test set...")

        MAP_final = 0.0
        evaluator = Evaluator()

        for user in tqdm(test_data.keys()):
            recommended_items = recommender.recommend(int(user),
                                                      exclude_seen=True)[:10]
            relevant_item = test_data[int(user)]

            MAP_final += evaluator.MAP(recommended_items, relevant_item)

        MAP_final /= len(test_data.keys())
        MAP_final *= 0.665
        print("MAP-10 score:", MAP_final)
        return MAP_final
示例#8
0
    def perform_evaluation(recommender):
        """Takes an already fitted recommender and evaluates on test data.
         If test_mode is false writes the submission"""

        print("Performing evaluation on test set...")

        MAP_final = 0.0
        evaluator, helper = Evaluator(), Helper()
        URM_train, eval_data = helper.URM_train_validation, helper.validation_data

        recommender.fit(URM_train)
        for user in tqdm(eval_data.keys()):
            recommended_items = recommender.recommend(int(user),
                                                      exclude_seen=True)
            relevant_item = eval_data[int(user)]

            MAP_final += evaluator.MAP(recommended_items, relevant_item)

        MAP_final /= len(eval_data.keys())
        print("MAP-10 score:", MAP_final)
        MAP_final *= 0.665
        print("MAP-10 public approx score:", MAP_final)

        return MAP_final
def objective(params):
    print(params)
    total_loss = 0
    for k in range(4):

        URM_train, URM_test, validation_data, test_data = Helper().get_kfold_data(4)[k]

        booster = XGBooster(URM_train, validation_data, HybridElasticNetICFUCF)

        booster.URM_test = URM_test

        booster.fit(train_parameters=deepcopy(params))
        loss, _ = Evaluator(test_mode=True).evaluate_recommender_kfold(booster, test_data, sequential=True)
        total_loss += loss

    total_loss /= 4

    print("Map@10 k-fold score:", total_loss)
    return -total_loss
示例#10
0
def train(spark):
    if config['XGBOOST']['checkpointInitialization'] == 'true':
        checkpoint_path = config['XGBOOST']['checkpoint_path']
        op = os.system("hadoop fs -rmr %s/*" % checkpoint_path)
        if not op:
            print("initialize checkpoint successfully.")
    train_df = Hdfs2Df.readHdfsCsv(spark=spark,
                                   data_path=config['TRAIN']['train_path'])
    test_df = Hdfs2Df.readHdfsCsv(spark=spark,
                                  data_path=config['TRAIN']['test_path'])

    missing = config['XGBOOST']['missing']
    train_df = PreProcessor.transColType(train_df, missing)
    test_df = PreProcessor.transColType(test_df, missing)
    train, train_col = PreProcessor.transVector(train_df, 'features')
    test, test_col = PreProcessor.transVector(train_df, 'features')

    SavaTools.saveModelFeature(train_col,
                               config['TRAIN']['local_model_feature_path'])
    xgb_handle = XGBoostClassifier(config['XGBOOST'])
    xgbModel = xgb_handle.trainAndSave(spark, train,
                                       config['TRAIN']['hdfs_model_path'])

    train_res, train_auc = xgb_handle.predict(spark, train, xgbModel)
    test_res, test_auc = xgb_handle.predict(spark, test, xgbModel)
    train_res.cache()
    test_res.cache()

    evaluator_handle = Evaluator(spark)
    train_ks = evaluator_handle.evaluateKs(train_res, 'train_res', 'score')
    train_auc = evaluator_handle.evaluateAuc(train_res, "score")
    test_ks = evaluator_handle.evaluateKs(test_res, 'test_ks', 'score')
    test_auc = evaluator_handle.evaluateAuc(test_res, "score")

    fscore = xgbModel.booster.getFeatureScore()
    xgb_handle.saveFeatureImportance(
        train_col, fscore, config['TRAIN']['local_model_feature_weights_path'],
        train_auc, test_auc, train_ks, test_ks)
    SavaTools.saveHdfsFile(train_res, config['TRAIN']['train_res_path'])
    SavaTools.saveHdfsFile(train_res, config['TRAIN']['test_res_path'])
from evaluation.Evaluator import Evaluator
if __name__ == "__main__":
    evaluator = Evaluator()
    evaluator.split_data_randomly()
示例#12
0
    def evaluate_on_validation_set(recommender_class,
                                   fit_parameters,
                                   init_params=None,
                                   user_group="all",
                                   users_to_evaluate=None,
                                   Kfold=0,
                                   parallel_fit=False,
                                   parallelize_evaluation=False):

        if init_params is None:
            init_params = {}

        evaluator = Evaluator(test_mode=False)

        if Kfold > 0:
            MAP_final = 0
            fitted_recommenders = []

            num_cores = multiprocessing.cpu_count()

            kfold_data = Helper().get_kfold_data(Kfold)
            URMs = [data[0] for data in kfold_data]
            validation_data_list = [data[2] for data in kfold_data]
            users_to_evaluate_list = []
            for data in kfold_data:
                if user_group == "cold":
                    users_to_evaluate_list.append(
                        prepare_cold_users(data[0], data[2]))
                elif user_group == "warm":
                    users_to_evaluate_list.append(
                        prepare_warm_users(data[0], data[2]))
                else:
                    users_to_evaluate_list.append(list(data[2].keys()))

            if parallel_fit and Kfold <= num_cores:

                if init_params is not None:
                    raise NotImplementedError(
                        "Can't handle init params in parallel fit")

                print("Parallelize fitting recommenders...")

                with multiprocessing.Pool(processes=Kfold) as p:
                    fitted_recommenders = p.map(fit_recommender, [
                        (recommender_class, URM, recommender_id,
                         fit_parameters, "validation")
                        for URM, recommender_id in zip(URMs, range(len(URMs)))
                    ])
                p.close()

                print("Done!")

            else:
                for i in range(Kfold):
                    URM_validation, _, validation_data, _ = Helper(
                    ).get_kfold_data(Kfold)[i]

                    recommender = recommender_class(URM_validation,
                                                    **init_params)
                    recommender.fit(**fit_parameters)

                    fitted_recommenders.append((recommender, i))

            data_list = []

            if parallelize_evaluation and Kfold <= num_cores:

                with multiprocessing.Pool(processes=Kfold) as p:
                    results = p.starmap(
                        Evaluator().evaluate_recommender_kfold,
                        [(recommender, users_to_evaluate_list[recommender_id],
                          validation_data_list[recommender_id])
                         for recommender, recommender_id in fitted_recommenders
                         ])

                    for i in range(len(results)):
                        data_list.append(results[i][0])

                p.close()
            else:
                for recommender, recommender_id in fitted_recommenders:
                    MAP, _ = evaluator.evaluate_recommender_kfold(
                        recommender, users_to_evaluate_list[recommender_id],
                        validation_data_list[recommender_id])
                    data_list.append(MAP)

            MAP_final, variance = compute_mean_and_variance(data_list)
            print("Variance over k-fold:", variance)

        else:
            URM_train = Helper().URM_train_validation

            recommender = recommender_class(URM_train, **init_params)
            recommender.fit(**fit_parameters)

            if user_group == "cold":
                MAP_final, _ = evaluator.evaluate_recommender_on_cold_users(
                    recommender)
            else:
                MAP_final, _ = evaluator.evaluateRecommender(
                    recommender, users_to_evaluate)

        print("MAP-10 score:", MAP_final)

        return MAP_final
import hyperopt as hp
from hyperopt import Trials, fmin, space_eval, STATUS_OK
from SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython
from evaluation.Evaluator import Evaluator
from utils.run import RunRecommender
import numpy as np
from utils.helper import Helper

helper = Helper()

N_KFOLD = 10

MAX_EVALS = 100

evaluator = Evaluator()


### Step 1 : defining the objective function
def objective(params):
    params["topK"] = int(params["topK"])
    params["batch_size"] = int(params["batch_size"])
    params["random_seed"] = 1234
    params["epochs"] = 30
    print("############ Current parameters ############")
    print(params)
    loss = -RunRecommender.evaluate_on_validation_set(
        SLIM_BPR_Cython,
        params,
        Kfold=N_KFOLD,
        parallelize_evaluation=False,
        user_group="warm")
示例#14
0
def LoadMovieLensData():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

contentKNN = ContentKNNAlgorithm()
evaluator.AddAlgorithm(contentKNN, "ContentKNN")

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")

evaluator.Evaluate(True)

evaluator.SampleTopNRecs(ml)
import torch

if torch.cuda.is_available():
    DEVICE = torch.device('cuda:0')  #'
else:
    DEVICE = torch.device('cpu')  #'cuda:0'

VOCAB_SIZE = 8000
MIN_SEQ_LEN = 5
MAX_SEQ_LEN = 20
BATCH_SIZE = 256
GEN_EMBEDDING_DIM = 256
GEN_HIDDEN_DIM = 256

if __name__ == '__main__':
    evaluator = Evaluator(vocab_size=VOCAB_SIZE, min_seq_len=MIN_SEQ_LEN, max_seq_len=MAX_SEQ_LEN, batch_size=BATCH_SIZE, device=DEVICE)

    result = {}
    for i in range(1, 32):
        gen = Generator(evaluator.sos_id, evaluator.eou_id, VOCAB_SIZE, GEN_HIDDEN_DIM, GEN_EMBEDDING_DIM, MAX_SEQ_LEN, teacher_forcing_ratio=0)

        model_path = 'generator_checkpoint' + str(i) + '.pth.tar'
        data = torch.load(model_path, map_location='cpu')
        gen.load_state_dict(data['state_dict'])
        gen.decoder = TopKDecoder(gen.decoder, 5)
        gen.to(DEVICE)

        print('Evaluating ' + model_path)
        result[i] = evaluator.evaluate_embeddings(gen)
        print(result[i])
示例#16
0
    )
    rankings = azr.getPopularityRanks()
    #rankings = defaultdict(int)
    return (azr, data, rankings)


# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

#(azr,evaluationData, rankings) = LoadAmazonData()

# Construct an Evaluator to, you know, evaluate them

######### our evaluation code #########

evaluator = Evaluator(evaluationData, rankings, doTopN=True)

SVDAlgorithm = SVDpp()
evaluator.AddAlgorithm(SVDAlgorithm, "SVD++")
#SVDppAlgorithm = SVDpp()
#evaluator.AddAlgorithm(SVDppAlgorithm, "SVD++")
evaluator.Evaluate()

#evaluator.SampleTopNRecs(ml)

### buit-in fonction for evaluation #######"

#algo = SVD()

# Run 3-fold cross-validation and print results
#cross_validate(algo, evaluationData, measures=['RMSE', 'MAE'], cv=3, verbose=True)
示例#17
0
            discriminator = discriminator.Discriminator(DIS_EMBEDDING_DIM,\
                DIS_HIDDEN_DIM, VOCAB_SIZE, MAX_SEQ_LEN, device=DEVICE).to(DEVICE)
        else:
            discriminator = discriminator_LM2.LM(DIS_EMBEDDING_DIM,
                                                 VOCAB_SIZE,
                                                 device=DEVICE).to(DEVICE)

        if DISCRIMINATOR_CHECKPOINT:
            discriminator.load_state_dict(
                torch.load(DISCRIMINATOR_CHECKPOINT, map_location=DEVICE))

        dis_optimizer = optim.Adagrad(discriminator.parameters(),
                                      lr=DISCRIMINATOR_LR)
        evaluator = Evaluator(vocab_size=VOCAB_SIZE,
                              min_seq_len=MIN_SEQ_LEN,
                              max_seq_len=MAX_SEQ_LEN,
                              batch_size=BATCH_SIZE_TESTING,
                              device=DEVICE)

        # Define critic and dual optimizer
        if AC:
            critic = critic.Critic(DIS_EMBEDDING_DIM,
                                   DIS_HIDDEN_DIM,
                                   VOCAB_SIZE,
                                   MAX_SEQ_LEN,
                                   device=DEVICE).to(DEVICE)
            AC_optimizer = optim.Adagrad([{
                'params': actor.parameters(),
                'lr': ACTOR_LR
            }, {
                'params': critic.parameters(),
示例#18
0
def main():
    """
    Track objects.
    """
    visualize_steps = True
    evaluate = True
    downsampling_factor = 2
    mot_sequence = '02'  #02, 04 or 09
    # ---------------- DATA READING -------------------
    print('Data reading...')
    # Read image sequence into frames list
    video_path = f'test_data/MOT17-{mot_sequence}-raw.webm'
    frame_list = FrameList(video_path)
    frame_list.read_frames()

    # ------------- BACKGROUND MODELING ---------------
    print('Background modeling...')
    # For each frame in frame_list.frames, create likelihood images & suppress shadows.
    filter_type = 'gaussian'
    background_model = BackgroundModel(filter_type)
    background_model.create_binary_images(frame_list.frames,
                                          visualize=visualize_steps)
    background_model.suppress_shadows(frame_list.frames,
                                      visualize=visualize_steps)
    plt.close('all')

    # ------------- FOREGROUND SEGMENTATION -----------
    print('Foreground segmentation...')
    # For each frames in frame_list.frames, remove noise & create objects
    foreground_segmentation = ForegroundSegmentation(frame_list)
    foreground_segmentation.remove_noise()
    foreground_segmentation.label_image(min_box_area=1500,
                                        visualize=visualize_steps)

    # ------------------- TRACKER ---------------------
    print('Tracking...')
    # Create ObjectMatcher and Predictor objects used by Tracker
    object_matcher = ObjectMatcher(match_algorithm='simple')
    predictor = Predictor(Q0=1, R0=1, P0=np.array([1, 1, 0.01, 0.01]), T=1)

    # Create Tracker object
    tracker = Tracker(object_matcher, predictor)

    plt.figure()
    # Match objects between frames 2 by 2
    print('Number of frames =', len(frame_list.frames))
    for i in range(1, len(frame_list.frames) - 1):
        tracker.match_objects(frame_list.frames[i], frame_list.frames[i + 1])
        if (i % 20 == 0):
            print(round(i / len(frame_list.frames) * 100), '%')

    plt.close('all')
    TrackerVisualizer(frame_list)

    # ------------------ EVALUATION -------------------
    print('Evaluating...')
    # This will only work with MOT17 dataset 02, 04 or 09
    if (video_path.startswith('test_data/MOT17') and evaluate):
        evaluator = Evaluator(frame_list,
                              f'test_data/MOT17-{mot_sequence}-gt.txt')
        evaluator.read_gt(downsampling_factor=downsampling_factor)
        #evaluator.generate_eval_csv(mot_sequence, downsampling_factor=downsampling_factor)
        tp_sum, fp_sum, fn_sum, id_switches, precision, recall, avg_tp_overlap = evaluator.generate_metrics(
        )
        print(f'tp_sum: {tp_sum}')
        print(f'fn_sum: {fn_sum}')
        print(f'fp_sum: {fp_sum}')
        print(f'id_switches: {id_switches}')
        print(f'precision: {precision}')
        print(f'recall: {recall}')
        print(f'avg_tp_overlap: {avg_tp_overlap}')

        # Visualize ground truth
        plt.close('all')
        # compute the scores using the dot product
        user_profile = self.URM[user_id]
        scores = user_profile.dot(self.similarity_matrix).toarray().ravel()

        if exclude_seen:
            scores = self.filter_seen(user_id, scores)

        # rank items
        ranking = scores.argsort()[::-1]

        return ranking[:at]


if __name__ == '__main__':

    evaluator = Evaluator()
    helper = Helper()

    MAP_final = 0.0
    # TODO URM_data contains the whole dataset and not the train set
    URM_all = helper.convert_URM_to_csr(helper.URM_data)

    URM_train, URM_test, target_users_test, test_data = split_train_test(
        URM_all, 0.8)

    recommender = SLIMRecommender(URM_train)
    recommender.fit(epochs=1000)

    # Load target users
    # target_users_test = helper.load_target_users_test()
    # relevant_items = helper.load_relevant_items()
示例#20
0
    "num_factors":
    hp.hp.uniformint('num_factors', 1, 5000),
    "batch_size":
    hp.hp.choice('batch_size', [32, 64, 128, 256, 512]),
    "learning_rate":
    hp.hp.loguniform('learning_rate', 1e-6, 1e-2),
    "epochs":
    hp.hp.uniformint('epochs', 1, 1000),
    "validation_every_n":
    hp.hp.choice("validation_every_n", [10]),
    "validation_metric":
    hp.hp.choice('validation_metric', ['MAP']),
    "lower_validations_allowed":
    hp.hp.choice('lower_validations_allowed', [2]),
    "evaluator_object":
    hp.hp.choice('evaluator_object', [Evaluator(test_mode=False)])
}

if __name__ == '__main__':
    ### step 3 : storing the results of every iteration
    bayes_trials = Trials()
    MAX_EVALS = 20

    # Optimize
    best = fmin(
        fn=objective,
        space=MF_space,
        algo=hp.tpe.suggest,
        max_evals=MAX_EVALS,
        trials=bayes_trials,
        verbose=True,