Exemplo n.º 1
0
def hyper_parameter_tuning(num_users, num_items, user_col, item_col,
                           rating_col, keyphrase_vector_col, df_train,
                           df_valid, keyphrase_names, params, save_path):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']
    try:
        df = load_dataframe_csv(table_path, save_path)
    except:
        df = pd.DataFrame(columns=[
            'model', 'rank', 'num_layers', 'train_batch_size',
            'predict_batch_size', 'lambda', 'topK', 'learning_rate', 'epoch',
            'negative_sampling_size'
        ])

    for algorithm in params['models']:

        for rank in params['rank']:

            for num_layers in params['num_layers']:

                for train_batch_size in params['train_batch_size']:

                    for predict_batch_size in params['predict_batch_size']:

                        for lamb in params['lambda']:

                            for learning_rate in params['learning_rate']:

                                for epoch in params['epoch']:

                                    for negative_sampling_size in params[
                                            'negative_sampling_size']:

                                        if ((df['model'] == algorithm) &
                                            (df['rank'] == rank) &
                                            (df['num_layers'] == num_layers) &
                                            (df['train_batch_size']
                                             == train_batch_size) &
                                            (df['predict_batch_size']
                                             == predict_batch_size) &
                                            (df['lambda'] == lamb) &
                                            (df['learning_rate']
                                             == learning_rate) &
                                            (df['epoch'] == epoch) &
                                            (df['negative_sampling_size']
                                             == negative_sampling_size)).any():
                                            continue

                                        format = "model: {0}, rank: {1}, num_layers: {2}, " \
                                                 "train_batch_size: {3}, predict_batch_size: {4}, " \
                                                 "lambda: {5}, learning_rate: {6}, epoch: {7}, " \
                                                 "negative_sampling_size: {8}"
                                        progress.section(
                                            format.format(
                                                algorithm, rank, num_layers,
                                                train_batch_size,
                                                predict_batch_size, lamb,
                                                learning_rate, epoch,
                                                negative_sampling_size))

                                        progress.subsection(
                                            "Initializing Negative Sampler")

                                        negative_sampler = Negative_Sampler(
                                            df_train[[
                                                user_col, item_col,
                                                keyphrase_vector_col
                                            ]],
                                            user_col,
                                            item_col,
                                            rating_col,
                                            keyphrase_vector_col,
                                            num_items=num_items,
                                            batch_size=train_batch_size,
                                            num_keyphrases=len(
                                                keyphrase_names),
                                            negative_sampling_size=
                                            negative_sampling_size)

                                        model = params['models'][algorithm](
                                            num_users=num_users,
                                            num_items=num_items,
                                            text_dim=len(keyphrase_names),
                                            embed_dim=rank,
                                            num_layers=num_layers,
                                            negative_sampler=negative_sampler,
                                            lamb=lamb,
                                            learning_rate=learning_rate)

                                        progress.subsection("Training")

                                        model.train_model(df_train,
                                                          user_col,
                                                          item_col,
                                                          rating_col,
                                                          epoch=epoch)

                                        progress.subsection("Prediction")

                                        prediction, explanation = predict_elementwise(
                                            model,
                                            df_train,
                                            user_col,
                                            item_col,
                                            params['topK'][-1],
                                            batch_size=predict_batch_size,
                                            enable_explanation=True,
                                            keyphrase_names=keyphrase_names)

                                        progress.subsection("Evaluation")

                                        R_valid = to_sparse_matrix(
                                            df_valid, num_users, num_items,
                                            user_col, item_col, rating_col)

                                        result = evaluate(
                                            prediction, R_valid,
                                            params['metric'], params['topK'])

                                        result_dict = {
                                            'model':
                                            algorithm,
                                            'rank':
                                            rank,
                                            'num_layers':
                                            num_layers,
                                            'train_batch_size':
                                            train_batch_size,
                                            'predict_batch_size':
                                            predict_batch_size,
                                            'lambda':
                                            lamb,
                                            'learning_rate':
                                            learning_rate,
                                            'epoch':
                                            epoch,
                                            'negative_sampling_size':
                                            negative_sampling_size
                                        }

                                        for name in result.keys():
                                            result_dict[name] = [
                                                round(result[name][0], 4),
                                                round(result[name][1], 4)
                                            ]

                                        df = df.append(result_dict,
                                                       ignore_index=True)

                                        model.sess.close()
                                        tf.reset_default_graph()

                                        save_dataframe_csv(
                                            df, table_path, save_path)
def converge(num_users,
             num_items,
             user_col,
             item_col,
             rating_col,
             keyphrase_vector_col,
             df_train,
             df_test,
             keyphrase_names,
             df,
             table_path,
             file_name,
             epoch=10):
    progress = WorkSplitter()

    results = pd.DataFrame(columns=[
        'model', 'rank', 'num_layers', 'train_batch_size',
        'predict_batch_size', 'lambda', 'topK', 'learning_rate', 'epoch',
        'negative_sampling_size', 'optimizer'
    ])

    for run in range(3):

        for idx, row in df.iterrows():
            row = row.to_dict()
            if row['model'] not in models:
                continue

            progress.section(json.dumps(row))

            row['metric'] = [
                'R-Precision', 'NDCG', 'Clicks', 'Recall', 'Precision', 'MAP'
            ]
            row['topK'] = [10]

            if 'optimizer' not in row.keys():
                row['optimizer'] = 'Adam'

            negative_sampler = Negative_Sampler(
                df_train[[user_col, item_col, keyphrase_vector_col]],
                user_col,
                item_col,
                rating_col,
                keyphrase_vector_col,
                num_items=num_items,
                batch_size=row['train_batch_size'],
                num_keyphrases=len(keyphrase_names),
                negative_sampling_size=row['negative_sampling_size'])

            model = models[row['model']](num_users=num_users,
                                         num_items=num_items,
                                         text_dim=len(keyphrase_names),
                                         embed_dim=row['rank'],
                                         num_layers=row['num_layers'],
                                         negative_sampler=negative_sampler,
                                         lamb=row['lambda'],
                                         learning_rate=row['learning_rate'])

            batches = negative_sampler.get_batches()

            epoch_batch = 10

            for i in range(epoch // epoch_batch):
                if i == 0:
                    model.train_model(df_train,
                                      user_col,
                                      item_col,
                                      rating_col,
                                      epoch=epoch_batch,
                                      batches=batches,
                                      init_embedding=True)
                else:
                    model.train_model(df_train,
                                      user_col,
                                      item_col,
                                      rating_col,
                                      epoch=epoch_batch,
                                      batches=batches,
                                      init_embedding=False)

                prediction, explanation = predict_elementwise(
                    model,
                    df_train,
                    user_col,
                    item_col,
                    row['topK'][0],
                    batch_size=row['predict_batch_size'],
                    enable_explanation=False,
                    keyphrase_names=keyphrase_names)

                R_test = to_sparse_matrix(df_test, num_users, num_items,
                                          user_col, item_col, rating_col)

                result = evaluate(prediction, R_test, row['metric'],
                                  row['topK'])

                # Note Finished yet
                result_dict = {
                    'model': row['model'],
                    'rank': row['rank'],
                    'num_layers': row['num_layers'],
                    'train_batch_size': row['train_batch_size'],
                    'predict_batch_size': row['predict_batch_size'],
                    'lambda': row['lambda'],
                    'topK': row['topK'][0],
                    'learning_rate': row['learning_rate'],
                    'epoch': (i + 1) * epoch_batch,
                    'negative_sampling_size': row['negative_sampling_size'],
                    'optimizer': row['optimizer']
                }

                for name in result.keys():
                    result_dict[name] = round(result[name][0], 4)
                results = results.append(result_dict, ignore_index=True)
                print("result is \n {}".format(results))

            model.sess.close()
            tf.reset_default_graph()

            save_dataframe_csv(results, table_path, file_name)

    return results
def main(args):
    # Progress bar
    progress = WorkSplitter()

    # Show hyperparameter settings
    progress.section("Parameter Setting")

    print("Data Directory: {}".format(args.data_dir))
    print("Algorithm: {}".format(args.model))
    print("Optimizer: {}".format(args.optimizer))
    print("Corruption Rate: {}".format(args.corruption))
    print("Learning Rate: {}".format(args.learning_rate))
    print("Epoch: {}".format(args.epoch))
    print("Lambda L2: {}".format(args.lamb_l2))
    print("Lambda Keyphrase: {}".format(args.lamb_keyphrase))
    print("Lambda Latent: {}".format(args.lamb_latent))
    print("Lambda Rating: {}".format(args.lamb_rating))
    print("Beta: {}".format(args.beta))
    print("Rank: {}".format(args.rank))
    print("Train Batch Size: {}".format(args.train_batch_size))
    print("Predict Batch Size: {}".format(args.predict_batch_size))
    print("Evaluation Ranking Topk: {}".format(args.topk))
    print("Validation Enabled: {}".format(args.enable_validation))

    # Load Data
    progress.section("Load Data")
    start_time = time.time()

    R_train = load_numpy(path=args.data_dir, name=args.train_set)
    print("Train U-I Dimensions: {}".format(R_train.shape))

    R_train_keyphrase = load_numpy(path=args.data_dir,
                                   name=args.train_keyphrase_set).toarray()
    print("Train Keyphrase U-S Dimensions: {}".format(R_train_keyphrase.shape))

    if args.enable_validation:
        R_valid = load_numpy(path=args.data_dir, name=args.valid_set)
        R_valid_keyphrase = load_numpy(path=args.data_dir,
                                       name=args.valid_keyphrase_set)
    else:
        R_valid = load_numpy(path=args.data_dir, name=args.test_set)
        R_valid_keyphrase = load_numpy(path=args.data_dir,
                                       name=args.test_keyphrase_set)
    print("Elapsed: {}".format(inhour(time.time() - start_time)))

    progress.section("Preprocess Keyphrase Frequency")
    start_time = time.time()

    R_train_keyphrase[R_train_keyphrase != 0] = 1
    R_valid_keyphrase[R_valid_keyphrase != 0] = 1
    print("Elapsed: {}".format(inhour(time.time() - start_time)))

    progress.section("Train")
    start_time = time.time()

    model = models[args.model](matrix_train=R_train,
                               epoch=args.epoch,
                               lamb_l2=args.lamb_l2,
                               lamb_keyphrase=args.lamb_keyphrase,
                               lamb_latent=args.lamb_latent,
                               lamb_rating=args.lamb_rating,
                               beta=args.beta,
                               learning_rate=args.learning_rate,
                               rank=args.rank,
                               corruption=args.corruption,
                               optimizer=args.optimizer,
                               matrix_train_keyphrase=R_train_keyphrase)
    print("Elapsed: {}".format(inhour(time.time() - start_time)))

    progress.section("Predict")
    start_time = time.time()

    rating_score, keyphrase_score = model.predict(R_train.todense())
    prediction = predict(rating_score, args.topk, matrix_Train=R_train)
    print("Elapsed: {}".format(inhour(time.time() - start_time)))

    if args.enable_evaluation:
        progress.section("Create Metrics")
        start_time = time.time()

        metric_names = [
            'R-Precision', 'NDCG', 'Clicks', 'Recall', 'Precision', 'MAP'
        ]
        result = evaluate(prediction, R_valid, metric_names, [args.topk])

        print("-")
        for metric in result.keys():
            print("{}:{}".format(metric, result[metric]))

        if keyphrase_score is not None:
            keyphrase_prediction = predict_keyphrase(keyphrase_score,
                                                     args.topk)
            keyphrase_result = evaluate(keyphrase_prediction,
                                        sparse.csr_matrix(R_valid_keyphrase),
                                        metric_names, [args.topk])

            print("-")
            for metric in keyphrase_result.keys():
                print("{}:{}".format(metric, keyphrase_result[metric]))

        print("Elapsed: {}".format(inhour(time.time() - start_time)))

    model.sess.close()
    tf.reset_default_graph()
def main(args):
    # Progress bar
    progress = WorkSplitter()

    progress.section("Parameter Setting")
    print("Data Directory: {}".format(args.data_dir))
    print("Algorithm: {}".format(args.model))
    print("Learning Rate: {}".format(args.learning_rate))
    print("Epoch: {}".format(args.epoch))
    print("Number of Top Items Evaluated in Recommendation: {}".format(
        args.topk))
    print("Lambda: {}".format(args.lamb))
    print("Rank: {}".format(args.rank))
    print("Train Batch Size: {}".format(args.train_batch_size))
    print("Predict Batch Size: {}".format(args.predict_batch_size))
    print("Negative Sampling Size: {}".format(args.negative_sampling_size))
    print("Number of Keyphrases Evaluated in Explanation: {}".format(
        args.topk_keyphrase))
    print("Enable Validation: {}".format(args.enable_validation))

    progress.section("Load Data")
    num_users = pd.read_csv(args.data_dir + args.user_col +
                            '.csv')[args.user_col].nunique()
    num_items = pd.read_csv(args.data_dir + args.item_col +
                            '.csv')[args.item_col].nunique()
    print("Dataset U-I Dimensions: ({}, {})".format(num_users, num_items))

    df_train = pd.read_csv(args.data_dir + args.train_set)
    df_train = df_train[df_train[args.rating_col] == 1]
    df_train[args.keyphrase_vector_col] = df_train[
        args.keyphrase_vector_col].apply(ast.literal_eval)

    if args.enable_validation:
        df_valid = pd.read_csv(args.data_dir + args.valid_set)
    else:
        df_valid = pd.read_csv(args.data_dir + args.test_set)

    keyphrase_names = pd.read_csv(args.data_dir + args.keyphrase_set)[
        args.keyphrase_col].values
    num_keyphrases = len(keyphrase_names)

    progress.section("Initialize Negative Sampler")
    negative_sampler = Negative_Sampler(
        df_train[[args.user_col, args.item_col, args.keyphrase_vector_col]],
        args.user_col,
        args.item_col,
        args.rating_col,
        args.keyphrase_vector_col,
        num_items,
        batch_size=args.train_batch_size,
        num_keyphrases=num_keyphrases,
        negative_sampling_size=args.negative_sampling_size)

    progress.section("Train")
    model = models[args.model](num_users=num_users,
                               num_items=num_items,
                               text_dim=num_keyphrases,
                               embed_dim=args.rank,
                               num_layers=1,
                               negative_sampler=negative_sampler,
                               lamb=args.lamb,
                               learning_rate=args.learning_rate)

    model.train_model(df_train,
                      user_col=args.user_col,
                      item_col=args.item_col,
                      rating_col=args.rating_col,
                      epoch=args.epoch)

    progress.section("Predict")
    prediction, explanation = predict_elementwise(
        model,
        df_train,
        args.user_col,
        args.item_col,
        args.topk,
        batch_size=args.predict_batch_size,
        enable_explanation=True,
        keyphrase_names=keyphrase_names,
        topk_keyphrase=args.topk_keyphrase)

    metric_names = [
        'R-Precision', 'NDCG', 'Clicks', 'Recall', 'Precision', 'MAP'
    ]

    R_valid = to_sparse_matrix(df_valid, num_users, num_items, args.user_col,
                               args.item_col, args.rating_col)

    result = evaluate(prediction, R_valid, metric_names, [args.topk])

    print("-- General Performance")
    for metric in result.keys():
        print("{}:{}".format(metric, result[metric]))

    df_valid_explanation = predict_explanation(
        model,
        df_valid,
        args.user_col,
        args.item_col,
        topk_keyphrase=args.topk_keyphrase)

    explanation_result = evaluate_explanation(df_valid_explanation, df_valid,
                                              ['Recall', 'Precision'],
                                              [args.topk_keyphrase],
                                              args.user_col, args.item_col,
                                              args.rating_col,
                                              args.keyphrase_vector_col)

    print("-- Explanation Performance")
    for metric in explanation_result.keys():
        print("{}:{}".format(metric, explanation_result[metric]))
Exemplo n.º 5
0
def general(train, test, keyphrase_train, keyphrase_test, params, save_path, final_explanation=False):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']
    df = find_best_hyperparameters(table_path + params['tuning_result_path'], 'NDCG')

    try:
        output_df = load_dataframe_csv(table_path, save_path)
    except:
        output_df = pd.DataFrame(columns=['model', 'rank', 'beta', 'lambda_l2', 'lambda_keyphrase', 'lambda_latent', 'lambda_rating', 'topK', 'learning_rate', 'epoch', 'corruption', 'optimizer'])

    for index, row in df.iterrows():

        algorithm = row['model']
        rank = row['rank']
        beta = row['beta']
        lamb_l2 = row['lambda_l2']
        lamb_keyphrase = row['lambda_keyphrase']
        lamb_latent = row['lambda_latent']
        lamb_rating = row['lambda_rating']
        learning_rate = row['learning_rate']
        epoch = row['epoch']
        corruption = row['corruption']
        optimizer = row['optimizer']

        row['topK'] = [5, 10, 15, 20, 50]
        row['metric'] = ['R-Precision', 'NDCG', 'Clicks', 'Recall', 'Precision', 'MAP']

        format = "model: {}, rank: {}, beta: {}, lambda_l2: {}, lambda_keyphrase: {}, " \
                 "lambda_latent: {}, lambda_rating: {}, learning_rate: {}, " \
                 "epoch: {}, corruption: {}, optimizer: {}"

        progress.section(format.format(algorithm, rank, beta, lamb_l2, lamb_keyphrase, lamb_latent, lamb_rating, learning_rate, epoch, corruption, optimizer))

        progress.subsection("Training")

        model = models[algorithm](matrix_train=train,
                                  epoch=epoch,
                                  lamb_l2=lamb_l2,
                                  lamb_keyphrase=lamb_keyphrase,
                                  lamb_latent=lamb_latent,
                                  lamb_rating=lamb_rating,
                                  beta=beta,
                                  learning_rate=learning_rate,
                                  rank=rank,
                                  corruption=corruption,
                                  optimizer=optimizer,
                                  matrix_train_keyphrase=keyphrase_train)

        progress.subsection("Prediction")

        rating_score, keyphrase_score = model.predict(train.todense())

        progress.subsection("Evaluation")

        if final_explanation:
            prediction = predict_keyphrase(keyphrase_score,
                                           topK=row['topK'][-2])

            result = evaluate_explanation(prediction,
                                          keyphrase_test,
                                          row['metric'],
                                          row['topK'])
        else:
            prediction = predict(rating_score,
                                 topK=row['topK'][-1],
                                 matrix_Train=train)

            result = evaluate(prediction, test, row['metric'], row['topK'])

        result_dict = {'model': algorithm,
                       'rank': rank,
                       'beta': beta,
                       'lambda_l2': lamb_l2,
                       'lambda_keyphrase': lamb_keyphrase,
                       'lambda_latent': lamb_latent,
                       'lambda_rating': lamb_rating,
                       'learning_rate': learning_rate,
                       'epoch': epoch,
                       'corruption': corruption,
                       'optimizer': optimizer}

        for name in result.keys():
            result_dict[name] = [round(result[name][0], 4),
                                 round(result[name][1], 4)]

        output_df = output_df.append(result_dict, ignore_index=True)

        model.sess.close()
        tf.reset_default_graph()

        save_dataframe_csv(output_df, table_path, save_path)

    return output_df
Exemplo n.º 6
0
def hyper_parameter_tuning(train, validation, keyphrase_train, keyphrase_validation, params, save_path, tune_explanation=False):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']

    try:
        df = load_dataframe_csv(table_path, save_path)
    except:
        df = pd.DataFrame(columns=['model', 'rank', 'beta', 'lambda_l2', 'lambda_keyphrase', 'lambda_latent', 'lambda_rating', 'topK', 'learning_rate', 'epoch', 'corruption', 'optimizer'])

    for algorithm in params['models']:

        for rank in params['rank']:

            for beta in params['beta']:

                for lamb_l2 in params['lambda_l2']:

                    for lamb_keyphrase in params['lambda_keyphrase']:

                        for lamb_latent in params['lambda_latent']:

                            for lamb_rating in params['lambda_rating']:

                                for learning_rate in params['learning_rate']:

                                    for epoch in params['epoch']:

                                        for corruption in params['corruption']:

                                            for optimizer in params['optimizer']:

                                                if ((df['model'] == algorithm) &
                                                    (df['rank'] == rank) &
                                                    (df['beta'] == beta) &
                                                    (df['lambda_l2'] == lamb_l2) &
                                                    (df['lambda_keyphrase'] == lamb_keyphrase) &
                                                    (df['lambda_latent'] == lamb_latent) &
                                                    (df['lambda_rating'] == lamb_rating) &
                                                    (df['learning_rate'] == learning_rate) &
                                                    (df['epoch'] == epoch) &
                                                    (df['corruption'] == corruption) &
                                                    (df['optimizer'] == optimizer)).any() or (lamb_latent != lamb_keyphrase):
                                                    continue

                                                format = "model: {}, rank: {}, beta: {}, lambda_l2: {}, " \
                                                    "lambda_keyphrase: {}, lambda_latent: {}, lambda_rating: {}, " \
                                                    "learning_rate: {}, epoch: {}, corruption: {}, optimizer: {}"
                                                progress.section(format.format(algorithm,
                                                                               rank,
                                                                               beta,
                                                                               lamb_l2,
                                                                               lamb_keyphrase,
                                                                               lamb_latent,
                                                                               lamb_rating,
                                                                               learning_rate,
                                                                               epoch,
                                                                               corruption,
                                                                               optimizer))

                                                progress.subsection("Training")

                                                model = models[algorithm](matrix_train=train,
                                                                          epoch=epoch,
                                                                          lamb_l2=lamb_l2,
                                                                          lamb_keyphrase=lamb_keyphrase,
                                                                          lamb_latent=lamb_latent,
                                                                          lamb_rating=lamb_rating,
                                                                          beta=beta,
                                                                          learning_rate=learning_rate,
                                                                          rank=rank,
                                                                          corruption=corruption,
                                                                          optimizer=optimizer,
                                                                          matrix_train_keyphrase=keyphrase_train)

                                                progress.subsection("Prediction")

                                                rating_score, keyphrase_score = model.predict(train.todense())

                                                progress.subsection("Evaluation")

                                                if tune_explanation:
                                                    prediction = predict_keyphrase(keyphrase_score,
                                                                                   topK=params['topK'][-1])

                                                    result = evaluate(prediction,
                                                                      keyphrase_validation,
                                                                      params['metric'],
                                                                      params['topK'])
                                                else:
                                                    prediction = predict(rating_score,
                                                                         topK=params['topK'][-1],
                                                                         matrix_Train=train)

                                                    result = evaluate(prediction,
                                                                      validation,
                                                                      params['metric'],
                                                                      params['topK'])

                                                result_dict = {'model': algorithm,
                                                               'rank': rank,
                                                               'beta': beta,
                                                               'lambda_l2': lamb_l2,
                                                               'lambda_keyphrase': lamb_keyphrase,
                                                               'lambda_latent': lamb_latent,
                                                               'lambda_rating': lamb_rating,
                                                               'learning_rate': learning_rate,
                                                               'epoch': epoch,
                                                               'corruption': corruption,
                                                               'optimizer': optimizer}

                                                for name in result.keys():
                                                    result_dict[name] = [round(result[name][0], 4),
                                                                         round(result[name][1], 4)]

                                                df = df.append(result_dict, ignore_index=True)

                                                model.sess.close()
                                                tf.reset_default_graph()

                                                save_dataframe_csv(df, table_path, save_path)
Exemplo n.º 7
0
def general(num_users, num_items, user_col, item_col, rating_col,
            keyphrase_vector_col, df_train, df_test, keyphrase_names, params,
            save_path):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']
    df = find_best_hyperparameters(table_path + params['tuning_result_path'],
                                   'NDCG')

    try:
        output_df = load_dataframe_csv(table_path, save_path)
    except:
        output_df = pd.DataFrame(columns=[
            'model', 'rank', 'num_layers', 'train_batch_size',
            'predict_batch_size', 'lambda', 'topK', 'learning_rate', 'epoch',
            'negative_sampling_size'
        ])

    for index, row in df.iterrows():

        algorithm = row['model']
        rank = row['rank']
        num_layers = row['num_layers']
        train_batch_size = row['train_batch_size']
        predict_batch_size = row['predict_batch_size']
        lamb = row['lambda']
        learning_rate = row['learning_rate']
        epoch = 300
        negative_sampling_size = row['negative_sampling_size']

        row['topK'] = [5, 10, 15, 20, 50]
        row['metric'] = [
            'R-Precision', 'NDCG', 'Clicks', 'Recall', 'Precision', 'MAP'
        ]

        format = "model: {0}, rank: {1}, num_layers: {2}, train_batch_size: {3}, " \
                 "predict_batch_size: {4}, lambda: {5}, learning_rate: {6}, epoch: {7}, negative_sampling_size: {8}"
        progress.section(
            format.format(algorithm, rank, num_layers, train_batch_size,
                          predict_batch_size, lamb, learning_rate, epoch,
                          negative_sampling_size))

        progress.subsection("Initializing Negative Sampler")

        negative_sampler = Negative_Sampler(
            df_train[[user_col, item_col, keyphrase_vector_col]],
            user_col,
            item_col,
            rating_col,
            keyphrase_vector_col,
            num_items=num_items,
            batch_size=train_batch_size,
            num_keyphrases=len(keyphrase_names),
            negative_sampling_size=negative_sampling_size)

        model = models[algorithm](num_users=num_users,
                                  num_items=num_items,
                                  text_dim=len(keyphrase_names),
                                  embed_dim=rank,
                                  num_layers=num_layers,
                                  negative_sampler=negative_sampler,
                                  lamb=lamb,
                                  learning_rate=learning_rate)

        progress.subsection("Training")

        pretrained_path = load_yaml('config/global.yml',
                                    key='path')['pretrained']
        # try:
        #     model.load_model(pretrained_path+params['tuning_result_path'], row['model'])
        # except:
        model.train_model(df_train,
                          user_col,
                          item_col,
                          rating_col,
                          epoch=epoch)
        # model.save_model(pretrained_path+params['tuning_result_path'], row['model'])

        progress.subsection("Prediction")

        prediction, explanation = predict_elementwise(
            model,
            df_train,
            user_col,
            item_col,
            row['topK'][-1],
            batch_size=row['predict_batch_size'],
            enable_explanation=False,
            keyphrase_names=keyphrase_names)

        R_test = to_sparse_matrix(df_test, num_users, num_items, user_col,
                                  item_col, rating_col)

        result = evaluate(prediction, R_test, row['metric'], row['topK'])

        # Note Finished yet
        result_dict = {
            'model': row['model'],
            'rank': row['rank'],
            'num_layers': row['num_layers'],
            'train_batch_size': row['train_batch_size'],
            'predict_batch_size': row['predict_batch_size'],
            'lambda': row['lambda'],
            'topK': row['topK'][-1],
            'learning_rate': row['learning_rate'],
            'epoch': epoch,
            'negative_sampling_size': row['negative_sampling_size'],
        }

        for name in result.keys():
            result_dict[name] = round(result[name][0], 4)
        output_df = output_df.append(result_dict, ignore_index=True)

        model.sess.close()
        tf.reset_default_graph()

        save_dataframe_csv(output_df, table_path, save_path)

    return output_df