def __init__(self, Data):

        self.Data = Data

        # model
        self.rent_model = tensorrec.TensorRec(n_components=5, loss_graph=tensorrec.loss_graphs.WMRBLossGraph())
        self.rent_model.fit(interactions=self.Data.sparse_rent,
                            user_features=self.Data.user_indicator_features_rent,
                            item_features=self.Data.full_rentitem_features,
                            n_sampled_items=self.Data.n_items_rent)

        self.deal_model = tensorrec.TensorRec(n_components=5, loss_graph=tensorrec.loss_graphs.WMRBLossGraph())
        self.deal_model.fit(interactions=self.Data.sparse_deal,
                            user_features=self.Data.user_indicator_features_deal,
                            item_features=self.Data.full_dealitem_features,
                            n_sampled_items=self.Data.n_items_deal)
Esempio n. 2
0
    def test_custom_loss_graph(self):
        import tensorflow as tf
        import tensorrec

        # Define a custom loss graph
        class SimpleLossGraph(tensorrec.loss_graphs.AbstractLossGraph):
            def connect_loss_graph(self, tf_prediction_serial, tf_interactions_serial, **kwargs):
                """
                This loss function returns the absolute simple error between the predictions and the interactions.
                :param tf_prediction_serial: tf.Tensor
                The recommendation scores as a Tensor of shape [n_samples, 1]
                :param tf_interactions_serial: tf.Tensor
                The sample interactions corresponding to tf_prediction_serial as a Tensor of shape [n_samples, 1]
                :param kwargs:
                Other TensorFlow nodes.
                :return:
                A tf.Tensor containing the learning loss.
                """
                return tf.reduce_mean(tf.abs(tf_interactions_serial - tf_prediction_serial))

        # Build a model with the custom loss function
        model = tensorrec.TensorRec(loss_graph=SimpleLossGraph())

        # Generate some dummy data
        interactions, user_features, item_features = tensorrec.util.generate_dummy_data(num_users=100,
                                                                                        num_items=150,
                                                                                        interaction_density=.05)

        # Fit the model for 5 epochs
        model.fit(interactions, user_features, item_features, epochs=5, verbose=True)

        self.assertIsNotNone(model)
Esempio n. 3
0
    def test_basic_usage(self):
        import numpy as np
        import tensorrec

        # Build the model with default parameters
        model = tensorrec.TensorRec()

        # Generate some dummy data
        interactions, user_features, item_features = tensorrec.util.generate_dummy_data(num_users=100,
                                                                                        num_items=150,
                                                                                        interaction_density=.05)

        # Fit the model for 5 epochs
        model.fit(interactions, user_features, item_features, epochs=5, verbose=True)

        # Predict scores and ranks for all users and all items
        predictions = model.predict(user_features=user_features,
                                    item_features=item_features)
        predicted_ranks = model.predict_rank(user_features=user_features,
                                             item_features=item_features)

        # Calculate and print the recall at 10
        r_at_k = tensorrec.eval.recall_at_k(predicted_ranks, interactions, k=10)
        print(np.mean(r_at_k))

        self.assertIsNotNone(predictions)
Esempio n. 4
0
    def test_custom_repr_graph(self):
        import tensorflow as tf
        import tensorrec

        # Define a custom representation function graph
        class TanhRepresentationGraph(
                tensorrec.representation_graphs.AbstractRepresentationGraph):
            def connect_representation_graph(self, tf_features, n_components,
                                             n_features, node_name_ending):
                """
                This representation function embeds the user/item features by passing them through a single tanh layer.
                :param tf_features: tf.SparseTensor
                The user/item features as a SparseTensor of dimensions [n_users/items, n_features]
                :param n_components: int
                The dimensionality of the resulting representation.
                :param n_features: int
                The number of features in tf_features
                :param node_name_ending: String
                Either 'user' or 'item'
                :return:
                A tuple of (tf.Tensor, list) where the first value is the resulting representation in n_components
                dimensions and the second value is a list containing all tf.Variables which should be subject to
                regularization.
                """
                tf_tanh_weights = tf.Variable(
                    tf.random_normal([n_features, n_components], stddev=.5),
                    name='tanh_weights_%s' % node_name_ending)

                tf_repr = tf.nn.tanh(
                    tf.sparse_tensor_dense_matmul(tf_features,
                                                  tf_tanh_weights))

                # Return repr layer and variables
                return tf_repr, [tf_tanh_weights]

        # Build a model with the custom representation function
        model = tensorrec.TensorRec(user_repr_graph=TanhRepresentationGraph(),
                                    item_repr_graph=TanhRepresentationGraph())

        # Generate some dummy data
        interactions, user_features, item_features = tensorrec.util.generate_dummy_data(
            num_users=100, num_items=150, interaction_density=.05)

        # Fit the model for 5 epochs
        model.fit(interactions,
                  user_features,
                  item_features,
                  epochs=5,
                  verbose=True)

        self.assertIsNotNone(model)
Esempio n. 5
0
def interactions_list_to_sparse_matrix(interactions):
    users_column, items_column, ratings_column, _ = zip(*interactions)
    return sparse.coo_matrix((ratings_column, (users_column, items_column)),
                             shape=(n_users, n_items))


# Create sparse matrices of interaction data
sparse_train_ratings = interactions_list_to_sparse_matrix(train_ratings)
sparse_test_ratings = interactions_list_to_sparse_matrix(test_ratings)

# Construct indicator features for users and items
user_indicator_features = sparse.identity(n_users)
item_indicator_features = sparse.identity(n_items)

# Build a matrix factorization collaborative filter model
cf_model = tensorrec.TensorRec(n_components=5)

# Fit the collaborative filter model
print("Training collaborative filter")
cf_model.fit(interactions=sparse_train_ratings,
             user_features=user_indicator_features,
             item_features=item_indicator_features)

# Create sets of train/test interactions that are only ratings >= 4.0
sparse_train_ratings_4plus = sparse_train_ratings.multiply(
    sparse_train_ratings >= 4.0)
sparse_test_ratings_4plus = sparse_test_ratings.multiply(
    sparse_test_ratings >= 4.0)


# This method consumes item ranks for each user and prints out recall@10 train/test metrics
Esempio n. 6
0
        print("item features shape:", item_features.shape)
        pred_path = '/tmp2/bschang/amazon/pred/{}_bow.tf.pred'.format(c)

    start = time()
    print("Start training tensorrec...")
    print("params")
    print(
        "n_components: {}, epochs: {}, n_sample: {}, user_batch_size: {}, shuffle_batch: {}, method: {}"
        .format(K, epoch, n_sample, user_batch_size, shuffle_batch, method))
    if user_batch_size is None:
        user_batch_size = len(user_set)
    if method == 'ranking':
        model = tensorrec.TensorRec(
            n_components=K,
            # user_repr_graph=NormalizedLinearRepresentationGraph(),
            item_repr_graph=NormalizedLinearRepresentationGraph(),
            loss_graph=BalancedWMRBLossGraph(),
            # loss_graph=BPRLossGraph(),
            normalize_users=True,
            normalize_items=True)
    elif method == 'mf':
        model = tensorrec.TensorRec(n_components=K, )
    model.fit(ui_matrix,
              user_features,
              item_features,
              epochs=epoch,
              verbose=False,
              n_sampled_items=n_sample,
              user_batch_size=user_batch_size,
              shuffle_batch=shuffle_batch)
    end = time()
    print("Spend {0:.2f} secs for training tensorrec.".format(end - start))
Esempio n. 7
0
import numpy as np
import tensorrec

model = tensorrec.TensorRec()

interactions, user_features, item_features = tensorrec.util.generate_dummy_data(
    num_users=150, num_items=100, interaction_density=.05)

model.fit(interactions, user_features, item_features, epochs=5, verbose=True)

predictions = model.predict(user_features=user_features,
                            item_features=item_features)

predicted_ranks = model.predict_rank(user_features=user_features,
                                     item_features=item_features)

r_at_k = tensorrec.eval.recall_at_k(predicted_ranks, interactions, k=10)

print(np.mean(r_at_k))
Esempio n. 8
0
######################### FUNCTION ############################################
################### Recall at k for the whole dataset  ########################


def rec_at_k(predictions, k, test_ratings):
    result = tensorrec.eval.recall_at_k(test_interactions=test_ratings,
                                        predicted_ranks=predictions,
                                        k=k).mean()

    return result


###############################################################################

####### Define the model
cf_model = tensorrec.TensorRec(
    n_components=100, loss_graph=tensorrec.loss_graphs.WMRBLossGraph())

############################# Generate Predictions and Evaluate  ##############

###### Baseline model
random_recs_5 = tensorrec.eval.eval_random_ranks_on_dataset(sp_dh_test_ratings,
                                                            recall_k=5)
random_recall_at_5, _, _ = random_recs_5
print("Baseline model recall@5:", random_recall_at_5)

random_recs_10 = tensorrec.eval.eval_random_ranks_on_dataset(
    sp_dh_test_ratings, recall_k=10)
random_recall_at_10, _, _ = random_recs_10
print("Baseline model recall@10:", random_recall_at_10)

random_recs_20 = tensorrec.eval.eval_random_ranks_on_dataset(
Esempio n. 9
0
# This method converts a list of (user, item, rating, time) to a sparse matrix
def interactions_list_to_sparse_matrix(interactions):
    users_column, items_column, ratings_column, _ = zip(*interactions)
    return sparse.coo_matrix((ratings_column, (users_column, items_column)),
                             shape=(n_users, n_items))


# Create sparse matrices of interaction data
sparse_train_ratings = interactions_list_to_sparse_matrix(train_ratings)
sparse_test_ratings = interactions_list_to_sparse_matrix(test_ratings)

# Construct indicator features for users and items
user_indicator_features = sparse.identity(n_users)
item_indicator_features = sparse.identity(n_items)
# Build a matrix factorization collaborative filter model
cf_model = tensorrec.TensorRec(n_components=5)
# Fit the collaborative filter model
print("Training collaborative filter")
cf_model.fit(interactions=sparse_train_ratings,
             user_features=user_indicator_features,
             item_features=item_indicator_features)

# Create sets of train/test interactions that are only ratings >= 4.0
sparse_train_ratings_4plus = sparse_train_ratings.multiply(
    sparse_train_ratings >= 4.0)
sparse_test_ratings_4plus = sparse_test_ratings.multiply(
    sparse_test_ratings >= 4.0)


# This method consumes item ranks for each user and prints out recall@10 train/test metrics
def check_results(ranks):
import tensorrec
from flask import Flask, request, jsonify
from scipy.sparse import csr_matrix
from tensorrec.representation_graphs import ReLURepresentationGraph

app = Flask(__name__)

model = tensorrec.TensorRec(item_repr_graph=ReLURepresentationGraph(),
                            user_repr_graph=ReLURepresentationGraph())
try:
    model = model.load_model('./model')
except IOError:
    print("没有保存的模型检查点,初始化模型")


@app.route('/ranks', methods=["POST"])
def fitModelAndUpateRanks():
    input = request.get_json()
    print("INPUT: \n" + str(input) + "\n")
    interactions = input['interaction']
    user_features = input['user']
    item_features = input['product']
    # 交互属性值 (矩阵大小用户数*商品数)
    interactions = csr_matrix(
        (interactions['data'], (interactions['row'], interactions['col'])))
    # 性别,余额数量级,积分数量级
    user_features = csr_matrix(
        (user_features['data'], (user_features['row'], user_features['col'])))
    # 分类ID,价格,销量,访问量,好评率,产品得分
    item_features = csr_matrix(
        (item_features['data'], (item_features['row'], item_features['col'])))
Esempio n. 11
0
def fittingtheModel(user_Feature,product_Feature,interaction):
    model = tensorrec.TensorRec()
    #model.fit(interaction, user_Feature, product_Feature, epochs=3, verbose=True)
    model.fit(interaction, user_Feature, product_Feature, epochs=30,learning_rate=0.01, alpha=0.00001, verbose=True)
    print("fitting is done")
    return model
Esempio n. 12
0
    np.reshape(user_gender_features, (user_gender_features.shape[1], 1)),
    np.reshape(user_age_features, (user_age_features.shape[1], 1))
])

full_user_features = sparse.hstack([
    user_indicator_features, user_mood_features, user_temperament_features,
    np.reshape(user_age_features, (user_age_features.shape[1], 1)),
    np.reshape(user_timestamp_features, (user_timestamp_features.shape[1], 1)),
    np.reshape(user_gender_features, (user_gender_features.shape[1], 1)),
    np.reshape(user_is_active_features, (user_is_active_features.shape[1], 1)),
    np.reshape(user_reaction_features, (user_reaction_features.shape[1], 1))
])

# Коллаборативная фильтрация
print("RMSE matrix factorization collaborative filter (cut):")
ranking_cf_model = tensorrec.TensorRec(n_components=5)
ranking_cf_model.fit(interactions=sparse_train_ratings,
                     user_features=cut_user_features,
                     item_features=item_indicator_features)
cut_cf_predicted_ranks = ranking_cf_model.predict_rank(
    user_features=cut_user_features, item_features=item_indicator_features)
check_results(cut_cf_predicted_ranks, sparse_train_ratings,
              sparse_test_ratings)

print("RMSE matrix factorization collaborative filter (full):")
ranking_cf_full_model = tensorrec.TensorRec(n_components=5)
ranking_cf_full_model.fit(interactions=sparse_train_ratings,
                          user_features=full_user_features,
                          item_features=item_indicator_features)
predicted_ranks = ranking_cf_full_model.predict_rank(
    user_features=full_user_features, item_features=item_indicator_features)