Esempio n. 1
0
    def test_predict_fail_unfit(self):
        model = TensorRec()
        with self.assertRaises(ModelNotFitException):
            model.predict(self.user_features, self.item_features)
        with self.assertRaises(ModelNotFitException):
            model.predict_rank(self.user_features, self.item_features)

        with self.assertRaises(ModelNotFitException):
            model.predict_user_representation(self.user_features)
        with self.assertRaises(ModelNotFitException):
            model.predict_item_representation(self.item_features)
        with self.assertRaises(ModelNotFitException):
            model.predict_user_attention_representation(self.user_features)

        with self.assertRaises(ModelNotFitException):
            model.predict_similar_items(self.item_features,
                                        item_ids=[1],
                                        n_similar=5)

        with self.assertRaises(ModelNotFitException):
            model.predict_item_bias(self.item_features)
        with self.assertRaises(ModelNotFitException):
            model.predict_user_bias(self.user_features)
Esempio n. 2
0
                  n_tastes=3)

# Make some random selections of movies and users we want to plot
movies_to_plot = np.random.choice(a=item_features.shape[0], size=200, replace=False)
user_to_plot = np.random.choice(a=user_features.shape[0], size=400, replace=False)

# Iterate through 1000 epochs, outputting a JPG plot each epoch
for epoch in range(epochs):
    model.fit_partial(interactions=train_interactions, user_features=user_features, item_features=item_features,
                      **fit_kwargs)

    # The position of a movie or user is that movie's/user's 2-dimensional representation. The size of the movie dot is
    # related to its item bias.
    movie_positions = model.predict_item_representation(item_features)
    user_positions = model.predict_user_representation(user_features)
    movie_sizes = model.predict_item_bias(item_features) * 10 + 1.0

    # Handle multiple tastes, if applicable. If there are more than 1 taste per user, only the first of each user's
    # tastes will be plotted.
    if model.n_tastes > 1:
        user_positions = user_positions[0]

    _, ax = plt.subplots()
    ax.grid(b=True, which='both')
    ax.axhline(y=0, color='k')
    ax.axvline(x=0, color='k')
    ax.scatter(*zip(*user_positions[user_to_plot]), color='r', s=1)
    ax.scatter(*zip(*movie_positions[movies_to_plot]), s=movie_sizes)
    ax.set_aspect('equal')

    for i, movie in enumerate(movies_to_plot):