def test_predict_recommend(self): def gen_rand_user_item_feature(user_num, item_num, class_num): user_id = random.randint(1, user_num) item_id = random.randint(1, item_num) rating = random.randint(1, class_num) sample = Sample.from_ndarray(np.array([user_id, item_id]), np.array([rating])) return UserItemFeature(user_id, item_id, sample) model = NeuralCF(200, 80, 5) data = self.sc.parallelize(range(0, 50))\ .map(lambda i: gen_rand_user_item_feature(200, 80, 5)) predictions = model.predict_user_item_pair(data).collect() print(predictions[0]) recommended_items = model.recommend_for_user(data, max_items=3).collect() print(recommended_items[0]) recommended_users = model.recommend_for_item(data, max_users=4).collect() print(recommended_users[0])
# Train the model optimizer.optimize() # Prediction results = ncf.predict(val_rdd) results.take(5) results_class = ncf.predict_class(val_rdd) results_class.take(5) userItemPairPrediction = ncf.predict_user_item_pair(valPairFeatureRdds) for result in userItemPairPrediction.take(5): print(result) userRecs = ncf.recommend_for_user(valPairFeatureRdds, 3) for result in userRecs.take(5): print(result) itemRecs = ncf.recommend_for_item(valPairFeatureRdds, 3) for result in itemRecs.take(5): print(result) # Evaluation #retrieve train and validation summary object and read the loss data into ndarray's. loss = np.array(train_summary.read_scalar("Loss")) val_loss = np.array(val_summary.read_scalar("Loss")) #plot the train and validation curves # each event data is a tuple in form of (iteration_count, value, timestamp) plt.plot(loss[:,0],loss[:,1],label='train loss') plt.plot(val_loss[:,0],val_loss[:,1],label='val loss',color='green') plt.scatter(val_loss[:,0],val_loss[:,1],color='green') plt.legend();