コード例 #1
0
def main():
    print("\nStarting '%s'" % sys.argv[0])

    np.random.seed(8000)
    normalization_enabled = False
    optimize_enabled = True
    k = 100
    """ Load dataset """
    datafile = "./data/ml-100k/u.data"
    data = pd.read_csv(datafile,
                       sep='\t',
                       names=["userid", "itemid", "rating", "timestamp"])
    """ Convert rating data to user x movie matrix format """
    data = data.sort_values(by=["userid", "itemid"])
    ratings = pd.pivot_table(data,
                             values="rating",
                             index="userid",
                             columns="itemid")
    ratings.fillna(0, inplace=True)
    """ Construct data """
    users = np.unique(ratings.index.values)
    items = np.unique(ratings.columns.values)
    n_users = len(users)
    n_items = len(items)
    print("n_users=%d n_items=%d" % (n_users, n_items))
    """ Compute mean ratingonly from non-zero elements """
    temp = ratings.copy()
    rating_mean = temp.copy().replace(0, np.NaN).mean().mean()
    rating_mean = 3.5 if rating_mean > 3.5 else rating_mean
    print("Rating mean: %.6f" % rating_mean)

    R_mask = np.zeros(np.shape(ratings))
    R_mask[ratings != 0.000000] = 1

    if normalization_enabled:
        temp = ratings.copy()
        ratings_norm = np.subtract(temp, rating_mean, where=temp != 0)
        ratings_norm = np.multiply(ratings_norm, R_mask)
        assert (np.count_nonzero(ratings_norm) == np.count_nonzero(ratings))
        R = ratings_norm.values
    else:
        R = ratings.values.copy()

    # Setup covariance to treat the item columns as input variables
    covar = np.cov(R, rowvar=False)
    evals, evecs = np.linalg.eigh(covar)

    print("cov_mat shape: %s" % str(np.shape(covar)))
    print("evals shape: %s" % str(np.shape(evals)))
    print("evecs shape: %s" % str(np.shape(evecs)))

    n_components = 10  # principal components
    """ Randomly initialize weights table """
    weights = np.random.normal(0, .1, (n_users, n_components))
    components = evecs[:n_components, :n_items]

    R_hat_mask = np.zeros(np.shape(R), dtype=np.float64)

    if optimize_enabled:
        # optimization parameters
        epochs = 5
        learning_rate = .0001
        lambda_ = .0001
        verbosity = 1
        print("Optimized PCA epochs=%s" % epochs)
        """ We only modify the weight matrix """
        for epoch in range(epochs):
            for u in range(n_users):
                for i in range(n_items):
                    error = R[u, i] - np.dot(weights[u, :], components[:, i])
                    for k in range(n_components):
                        weights[u, k] = weights[u, k] - learning_rate * (
                            error * -2 * components[k, i] + lambda_ *
                            (2 * np.abs(weights[u, k]) +
                             2 * np.abs(components[k, i])))

            R_hat = np.zeros(np.shape(R))
            np.matmul(weights, components, out=R_hat)
            # Get errors only from explicitly rated elements
            np.multiply(R_hat, R_mask, out=R_hat_mask)
            # Compute error: MSE = (1/N) * (R - Rˆ), RMSE = MSEˆ(1/2)
            diff = np.subtract(R, R_hat_mask)
            diff_square = np.square(diff)
            mse = np.divide(diff_square.sum(), np.count_nonzero(R))
            rmse = np.sqrt(mse)
            if epoch % verbosity == 0 or epoch == (epochs - 1):
                print("Epoch %d: RMSE: %.6f" % (epoch, rmse))
    else:
        R_hat = np.matmul(weights, components)
        print("R_hat shape: %s" % str(np.shape(R_hat)))
        assert (np.shape(R) == np.shape(R_hat))

        print("PCA single run")
        np.multiply(R_hat, R_mask, out=R_hat_mask)
        # Compute error: MSE = (1/N) * (R - Rˆ), RMSE = MSEˆ(1/2)
        diff = np.subtract(R, R_hat_mask)
        diff_square = np.square(diff)
        mse = np.divide(diff_square.sum(), np.count_nonzero(R))
        rmse = np.sqrt(mse)
        print("RMSE: %.5f" % rmse)

    assert (R.shape == R_hat.shape)
    sparse_data = sparse.csr_matrix(R)
    predicted_ranks = metrics.rank_matrix(R_hat)
    precision = metrics.precision_at_k(predicted_ranks, sparse_data, k=k)
    recall = metrics.recall_at_k(predicted_ranks, sparse_data, k=k)
    print("Precision:%.3f%% Recall:%.3f%%" % (precision * 100, recall * 100))

    print("\nStoppping '%s" % sys.argv[0])
コード例 #2
0
    def deepfm_test(self):
        train_x, train_y = DeepFM.df2xy(self._ratings)
        #test_x, test_y = DeepFM.df2xy(self.test_data_)

        params = {
            'n_uid': self._ratings.userid.max(),
            'n_mid': self._ratings.itemid.max(),
            # 'n_genre': self.n_genre_,
            'k': self._k,
            'dnn_dim': [64, 64],
            'dnn_dr': 0.5,
            'filepath': '../data/deepfm_weights.h5'
        }

        """ train """
        model = DeepFM(**params)
        train_history = model.fit(train_x,
                                  train_y,
                                  epochs=self._epochs,
                                  batch_size=2048,
                                  validation_split=0.1)

        history = pd.DataFrame(train_history.history)
        history.plot()
        plt.savefig("../data//history.png")

        """ test """
        results = model.evaluate(train_x, train_y)
        print("Validate result:{0}".format(results))

        """ predict """
        y_hat = model.predict(train_x)

        print(np.shape(y_hat))
        # print(np.shape(test_y))

        """ Run Recall and Precision Metrics """
        n_users = np.max(self._ratings.userid.values) + 1
        n_items = np.max(self._ratings.itemid.values) + 1
        print("n_users={0} n_items={1}".format(n_users, n_items))

        # Convert to sparse matrix to run standard metrics
        sparse_train = sparse.coo_matrix((self._ratings.rating.values,
                                          (self._ratings.userid.values, self._ratings.itemid.values)),
                                         shape=(n_users, n_items))

        # sparse_test = sparse.coo_matrix((self.test_data_.rating.values, \
        #                                  (self.test_data_.uid.values, self.test_data_.mid.values)), \
        #                                 shape=(n_users, n_items))
        # pd.DataFrame(data=sparse_test.tocsr().todense().A).to_csv("./testdata.csv")

        # test_prediced
        test_predicted = self._ratings.copy()
        test_predicted.rating = np.round(y_hat)

        sparse_predicted = sparse.coo_matrix((test_predicted.rating.values, \
                                              (test_predicted.userid.values, test_predicted.itemid.values)), \
                                             shape=(n_users, n_items))

        sparse_train_1up = sparse_train.multiply(sparse_train >= 1)
        # sparse_test_1up = sparse_test.multiply(sparse_test >= 1)

        predicted_arr = sparse_predicted.tocsr().todense().A
        predicted_ranks = metrics.rank_matrix(predicted_arr)

        precision_ = metrics.precision_at_k(predicted_ranks, sparse_train, k=self._k)
        recall_ = metrics.recall_at_k(predicted_ranks,  sparse_train, k=self._k)

        print("{0}.xdeepfm_test train precision={1:.4f}% recall={2:.4f}% @k={3}".format(
            __class__.__name__, precision_ * 100, recall_ * 100, self._k))
コード例 #3
0
ファイル: tf_mf_optimized.py プロジェクト: akalingking/RecSys
def main():
    session = tf.Session()
    normalized_on = False
    k = 100
    """ load dataset """
    datafile = "./data/ml-100k/u.data"
    df = pd.read_csv(datafile,
                     sep='\t',
                     names=["userid", "itemid", "rating", "timestamp"])
    n_users = len(np.unique(df.userid))
    n_items = len(np.unique(df.itemid))
    rating_mean = np.mean(df.rating)
    rating_mean = 3.5 if rating_mean > 3.5 else rating_mean
    print("Raw data:")
    print("Shape: %s" % str(df.shape))
    print("Userid size: %d" % n_users)
    print("Itemid size: %d" % n_items)
    print("Rating mean: %.5f" % rating_mean)
    """ Format ratings to user x item matrix """
    df = df.sort_values(by=["userid", "itemid"])
    ratings = pd.pivot_table(df,
                             values="rating",
                             index="userid",
                             columns="itemid")
    ratings.fillna(0, inplace=True)
    print("Raw ratings size", len(ratings))
    ratings = ratings.astype(np.float64)
    """ Construct training data """
    # train_size = 0.7
    ratings_train_ = ratings  #.loc[:int(n_users*train_size), :int(n_items*train_size)]
    users = ratings_train_.index.values
    items = ratings_train_.columns.values
    n_users = len(users)
    n_items = len(items)
    temp = ratings_train_.copy()
    rating_mean = temp.replace(0, np.NaN).mean().mean()
    rating_mean = 3.5 if rating_mean > 3.5 else rating_mean

    print("Training data:")
    print("Shape: %s" % str(ratings_train_.shape))
    print("n_users: %d" % n_users)
    print("n_items: %d" % n_items)
    print("rating mean: %.5f" % rating_mean)

    user_indices = [x for x in range(n_users)]
    item_indices = [x for x in range(n_items)]

    print("Max userid train: ", np.max(users))
    print("Max itemid train", np.max(items))
    print("user_indices size ", len(user_indices))
    print("item_indices size ", len(item_indices))

    if normalized_on:
        ratings_norm = np.zeros(ratings_train_.shape)
        temp = ratings_train_.values
        np.subtract(temp, rating_mean, where=temp != 0, out=ratings_norm)
        ratings = ratings_norm
    else:
        ratings = ratings_train_.values

    # Variables
    n_features = 10  # latent factors
    U = tf.Variable(initial_value=tf.truncated_normal([n_users, n_features]))
    P = tf.Variable(initial_value=tf.truncated_normal([n_features, n_items]))

    result = tf.matmul(U, P)

    result_flatten = tf.reshape(result, [-1])
    assert (result_flatten.shape[0] == n_users * n_items)

    R = tf.gather(result_flatten, user_indices[:-1] * n_items + item_indices)
    assert (R.shape[0] == n_users * n_items)

    R_ = tf.reshape(R, [tf.div(R.shape[0], n_items), len(item_indices)])
    assert (R_.shape == ratings.shape)
    """ Compute error for values from the original ratings matrix 
        so that means excluding values implicitly computed by UxP """
    var = tf.Variable(ratings.astype(np.float32))
    compare = tf.not_equal(var, tf.constant(0.0))
    compare_op = var.assign(tf.where(compare, tf.ones_like(var), var))
    R_masked = tf.multiply(R_, compare_op)
    assert (ratings.shape == R_masked.shape)
    """ Cost function: sum_ij{ |r_ij- rhat_ij| + lambda*(|u_i|+|p_j|)} """
    diff_op = tf.subtract(ratings.astype(np.float32), R_masked)
    diff_op_abs = tf.abs(diff_op)
    base_cost = tf.reduce_sum(diff_op_abs)

    # Regularizer sum_ij{lambda*(|U_i| + |P_j|)}
    lambda_ = tf.constant(.001)
    norm_sums = tf.add(tf.reduce_sum(tf.abs(U)), tf.reduce_sum(tf.abs(P)))
    regularizer = tf.multiply(norm_sums, lambda_)
    cost = tf.add(base_cost, regularizer)
    """ Optimizer """
    lr = tf.constant(.0001)
    global_step = tf.Variable(0, trainable=False)
    decaying_learning_rate = tf.train.exponential_decay(lr,
                                                        global_step,
                                                        10000,
                                                        .96,
                                                        staircase=True)
    optimizer = tf.train.GradientDescentOptimizer(
        decaying_learning_rate).minimize(cost, global_step=global_step)
    """ Run """
    init = tf.global_variables_initializer()
    session.run(init)

    print("Running stochastic gradient descent..")
    epoch = 500
    for i in range(epoch):
        session.run(optimizer)
        if i % 10 == 0 or i == epoch - 1:
            diff_op_train = tf.subtract(ratings.astype(np.float32), R_masked)
            diff_op_train_squared = tf.square(diff_op_train)
            se = tf.reduce_sum(diff_op_train_squared)
            mse = tf.divide(se, n_users * n_items)
            rmse = tf.sqrt(mse)
            print("Train iter: %d MSE: %.5f loss: %.5f" %
                  (i, session.run(rmse), session.run(cost)))

    R_hat = R_.eval(session=session)
    predicted_ranks = metrics.rank_matrix(R_hat)
    interactions = sparse.csr_matrix(ratings)
    precision = metrics.precision_at_k(predicted_ranks, interactions, k=k)
    recall = metrics.recall_at_k(predicted_ranks, interactions, k=k)

    print("Precision:%.3f%% Recall:%.3f%%" % (precision * 100, recall * 100))
コード例 #4
0
def main():
    print("\nStarting '%s'" % sys.argv[0])

    np.random.seed(8000)

    k = 100

    normalization_enabled = False
    """ Load dataset """
    datafile = "./data/ml-100k/u.data"
    data = pd.read_csv(datafile,
                       sep='\t',
                       names=["userid", "itemid", "rating", "timestamp"])
    """ Convert rating data to user x movie matrix format """
    data = data.sort_values(by=["userid", "itemid"])
    ratings = pd.pivot_table(data,
                             values="rating",
                             index="userid",
                             columns="itemid")
    ratings.fillna(0, inplace=True)

    # train_size = 0.7
    # train_row_size = int(len(ratings.index) * train_size)
    # train_col_size = int(len(ratings.columns) * train_size)
    # ratings = ratings.loc[:train_row_size, :train_col_size]
    users = np.unique(ratings.index.values)
    items = np.unique(ratings.columns.values)
    n_users = len(users)
    n_items = len(items)
    assert (np.max(users) == len(users))
    assert (np.max(items) == len(items))
    print("n_users=%d n_items=%d" % (n_users, n_items))
    """ Take the mean only from non-zero elements """
    temp = ratings.copy()
    rating_mean = temp.copy().replace(0, np.NaN).mean().mean()
    rating_mean = 3.5 if rating_mean > 3.5 else rating_mean
    print("Rating mean: %.2f" % rating_mean)

    if normalization_enabled:
        temp = ratings.copy()
        ratings_norm = np.subtract(temp, rating_mean, where=temp != 0)
        R = ratings_norm.values
    else:
        R = ratings.values

    U, S, V = linalg.svds(R, k=k)
    # print ("U: ", np.shape(U))
    # print ("S: ", np.shape(S))
    # print ("V: ", np.shape(V))
    sigma = np.diag(S)
    # print ("Sigma: ", np.shape(sigma))
    """ Generate prediction matrix """
    R_hat = np.dot(np.dot(U, sigma), V)
    assert (np.shape(R) == np.shape(R_hat))

    # Get errors only from explicitly rated elements
    R_mask = np.zeros(np.shape(R))
    R_mask[R != 0.000000] = 1
    R_hat_mask = np.zeros(np.shape(R))
    np.multiply(R_hat, R_mask, out=R_hat_mask)

    # Compute error: MSE = (1/N) * (R - Rˆ), RMSE = MSEˆ(1/2)
    assert (np.count_nonzero(R) == np.count_nonzero(R_hat_mask))
    diff = np.subtract(R, R_hat_mask)
    diff_square = np.square(diff)
    #mse = np.divide(diff_square.sum(), n_users*n_items)
    mse = np.divide(diff_square.sum(), np.count_nonzero(R_mask))
    rmse = np.sqrt(mse)
    print("RMSE: %.6f" % (rmse))

    assert (R.shape == R_hat.shape)
    interactions = sparse.csr_matrix(R)
    predicted_ranks = metrics.rank_matrix(R_hat)
    precision = metrics.precision_at_k(predicted_ranks, interactions, k=k)
    recall = metrics.recall_at_k(predicted_ranks, interactions, k=k)
    print("Precision:%.3f%% Recall:%.3f%%" % (precision * 100, recall * 100))

    print("\nStopping '%s'" % sys.argv[0])
コード例 #5
0
def main():
    print("\nStarting '%s'" % sys.argv[0])

    session = tf.Session()

    normalized_on = True

    """ load dataset """
    datafile = "./data/ml-100k/u.data"
    df = pd.read_csv(datafile, sep='\t', names=["userid", "itemid", "rating", "timestamp"])
    n_users = len(np.unique(df.userid))
    n_items = len(np.unique(df.itemid))
    rating_mean = np.mean(df.rating)
    rating_mean = 3.5 if rating_mean > 3.5 else rating_mean

    print ("Raw data:")
    print ("Shape: %s" % str(df.shape))
    print ("Userid size: %d" % n_users)
    print ("Itemid size: %d" % n_items)
    print ("Rating mean: %.5f" % rating_mean)

    """ Format ratings to user x item matrix """
    df = df.sort_values(by=["userid", "itemid"])
    ratings = pd.pivot_table(df, values="rating", index="userid", columns="itemid")
    ratings.fillna(0, inplace=True)
    print("Raw ratings size", len(ratings))
    ratings = ratings.astype(np.float64)

    """ Construct training data """
    # train_factor = 0.7
    # train_size = int(n_users*train_factor)
    # ratings_train_ = ratings.loc[:train_size, :int(n_items*train_factor)]
    users = ratings.index.values
    items = ratings.columns.values
    n_users = len(users)
    n_items = len(items)
    temp = ratings.copy()
    rating_mean = temp.replace(0, np.NaN).mean().mean()
    rating_mean = 3.5 if rating_mean > 3.5 else rating_mean

    print ("Training data:")
    print ("Shape: %s" % str(ratings.shape))
    print ("n_users: %d" % n_users)
    print ("n_items: %d" % n_items)
    print ("rating mean: %.5f" % rating_mean)

    user_indices = [x for x in range(n_users)]
    item_indices = [x for x in range(n_items)]

    print ("Max userid train: %d" % np.max(users))
    print ("Max itemid train: %d" % np.max(items))
    print ("user_indices size: %d" % len(user_indices))
    print ("item_indices size: %d " % len(item_indices))

    if normalized_on:
        ratings_norm = np.zeros(ratings.shape)
        temp = ratings.values
        np.subtract(temp, rating_mean, where=temp!=0, out=ratings_norm)
        ratings = ratings_norm
    else:
        ratings = ratings.values

    # Variables
    n_features = 10 # latent factors
    U = tf.Variable(initial_value=tf.truncated_normal([n_users, n_features]))
    P = tf.Variable(initial_value=tf.truncated_normal([n_features, n_items]))

    result = tf.matmul(U, P)

    result_flatten = tf.reshape(result, [-1])
    assert (result_flatten.shape[0] == n_users * n_items)

    print ("user indices size: %d item indices size: %d" % (len(user_indices), len(item_indices)))

    # Fill R from result_flatten
    R = tf.gather(result_flatten, user_indices[:-1] * n_items + item_indices)
    assert (R.shape == result_flatten.shape)

    # Format R to user x item sized matrix
    R_ = tf.reshape(R, [tf.div(R.shape[0], n_items), len(item_indices)])
    assert (R_.shape == ratings.shape)

    """ Compute error of fields from the original ratings matrix """
    var = tf.Variable(ratings.astype(np.float32))
    compare = tf.not_equal(var, tf.constant(0.0))
    compare_op = var.assign(tf.where(compare, tf.ones_like(var), var))
    R_mask = tf.multiply(R_, compare_op)
    assert (R_mask.shape == np.shape(ratings))

    """ Cost function: sum_ij{ |r_ij- rhat_ij| + lambda*(|u_i|+|p_j|)} """
    # cost |r - r_hat|
    diff_op = tf.subtract(ratings.astype(np.float32), R_mask)
    diff_op_abs = tf.abs(diff_op)
    base_cost = tf.reduce_sum(diff_op_abs)

    lambda_ = tf.constant(.001)
    norm_sums = tf.add(tf.reduce_sum(tf.abs(U)), tf.reduce_sum(tf.abs(P)))
    regularizer = tf.multiply(norm_sums, lambda_)
    cost = tf.add(base_cost, regularizer)

    """ Run """
    init = tf.global_variables_initializer()
    session.run(init)
    session.run(cost)

    """ Mean square error """
    diff_op_train = tf.subtract(ratings.astype(np.float32), R_mask)
    diff_op_train_squared = tf.square(diff_op_train)
    diff_op = tf.sqrt(tf.reduce_sum(diff_op_train_squared))
    cost_train = tf.divide(diff_op, ratings.shape[0])
    cost_train_result =  session.run(cost_train)
    print("Training MSE: %.5f" % cost_train_result)

    k = 100
    R_hat = R_.eval(session=session)
    print (ratings[:5, :5])
    print (R_hat[:5,:5])
    interactions = sparse.csr_matrix(ratings)
    predicted_ranks = metrics.rank_matrix(R_hat)
    precision = metrics.precision_at_k(predicted_ranks, interactions, k=100)
    recall = metrics.recall_at_k(predicted_ranks, interactions, k=100)
    print("Precision:%.3f%% Recall:%.3f%%" % (precision * 100, recall * 100))

    print("\nStopping '%s'" % sys.argv[0])