def predict(self, uid, iid, r_ui=None, clip=True, verbose=False): """ Predict a rating using the hybrid. :param uid: User id :param iid: Item id :param r_ui: Observed rating :param clip: If True, clip to rating range :param verbose: :return: A Prediction object """ #est = self._intercept #Because we normalize the weights, so we should not use intercept as a part of prediction est = 0 # Creat a new empty dictionary to retain any following Prediction "details" values details = {} weights = self.get_weights() for i in range(len(self._components)): comp = self._components[i] cur_pred = comp.predict(uid, iid, r_ui, clip=False) est += cur_pred.est * weights[i] details["Comp{0}".format(i)] = cur_pred.details if clip: low_bound, high_bound = self.trainset.rating_scale est = min(high_bound, est) est = max(low_bound, est) pred = Prediction(uid, iid, r_ui, est, details) return pred
def process_result(self, job_response): """ Transform the return values of combine() into Prediction. This is equal to the post-processing in predict(). Args: job_response: Instance of JobResponse. Returns: List of Prediction. """ # Adaptation of second part of predict() from AlgoBase uid, iid, r_ui, est = job_response details = {} if isinstance(est, PredictionImpossible): error = str(est) est = self.trainset.global_mean details['was_impossible'] = True details['reason'] = error else: if isinstance(est, tuple): est, details = est details['was_impossible'] = False # Remap the rating into its initial rating scale est -= self.trainset.offset # clip estimate lower_bound, higher_bound = self.trainset.rating_scale est = min(higher_bound, est) est = max(lower_bound, est) return Prediction(uid, iid, r_ui, est, details)
def test_dump(): predictions = [Prediction(None, None, None, None, None)] algo = AlgoBase() trainset = Trainset(*[None] * 9) with tempfile.NamedTemporaryFile() as tmp_file: dump(tmp_file.name, predictions, trainset, algo)
def predict(uid, iid, r_ui=None, clip=True, verbose=False, WholeSet, ABPredictM, trainset): try: iuid = WholeSet.to_inner_uid(uid) # print('uid = ',uid,'iuid = ', iuid) except ValueError: print("545: uid error!") iuid = 'UKN__' + str(uid) try: iiid = WholeSet.to_inner_iid(iid) except ValueError: print("545: iid error!") iiid = 'UKN__' + str(iid) details = {} try: est = 0.0 for mm in range(m): ############################################ Estimation from Adaboost Prediction Model ######################################### est += ABPredictM[mm][iuid][iiid] * recm_w[mm] # If the details dict was also returned if isinstance(est, tuple): est, details = est details['was_impossible'] = False except PredictionImpossible as e: est = default_prediction() details['was_impossible'] = True details['reason'] = str(e) # clip estimate into [lower_bound, higher_bound] if clip: lower_bound, higher_bound = trainset.rating_scale est = min(higher_bound, est) est = max(lower_bound, est) pred = Prediction(uid, iid, r_ui, est, details, abs(r_ui - est)) if verbose: print(pred) return pred
def cross_community_search(save_name, dimension=8): predictions = [] t = AnnoyIndex(dimension, metric='euclidean') t.load(saved_objs_path + '/' + save_name) user_threshold = 1682 close_nb_amount = 30 actual_nb_amount = 20 ds = get_dataset() g = ds.original_graph for src_node in range(1682, 1682 + 942): nn_nodes, distances = t.get_nns_by_item(src_node, close_nb_amount, include_distances=True) each_user_close_nb = {} for idx, dst_node in enumerate(nn_nodes): # remove non-user node if dst_node - user_threshold > 0: continue # cal weigh of each path paths = nx.shortest_path(g, source=src_node, target=dst_node, weight='score') path_length = nx.shortest_path_length(g, source=src_node, target=dst_node, weight='score') if path_length == 0: continue print(idx, paths, path_length) real_rating = g[src_node][paths[1]]['score'] pred_rating = path_length / len(paths) pred = Prediction(src_node, paths[1], real_rating, pred_rating, {}) predictions.append(pred) rmse, prec, rec, ils_sim = evaluate_pred(g, predictions) ds_name = 'movielens' algo_name = 'GraphRec' with open(f'eval_{ds_name}.csv', 'a') as f: f.write( f'{ds_name}_{algo_name},rmse,{rmse},precision,{prec},recall,{rec},ils,{ils_sim}\n' )
def predict(self, uid, iid, r_ui=None, clip=True, verbose=False): """Compute the rating prediction for given user and item. The ``predict`` method converts raw ids to inner ids and then calls the ``estimate`` method which is defined in every derived class. If the prediction is impossible (e.g. because the user and/or the item is unkown), the prediction is set according to :meth:`default_prediction() <surprise.prediction_algorithms.algo_base.AlgoBase.default_prediction>`. Args: uid: (Raw) id of the user. See :ref:`this note<raw_inner_note>`. iid: (Raw) id of the item. See :ref:`this note<raw_inner_note>`. r_ui(float): The true rating :math:`r_{ui}`. Optional, default is ``None``. clip(bool): Whether to clip the estimation into the rating scale. For example, if :math:`\\hat{r}_{ui}` is :math:`5.5` while the rating scale is :math:`[1, 5]`, then :math:`\\hat{r}_{ui}` is set to :math:`5`. Same goes if :math:`\\hat{r}_{ui} < 1`. Default is ``True``. verbose(bool): Whether to print details of the prediction. Default is False. Returns: A :obj:`Prediction\ <surprise.prediction_algorithms.predictions.Prediction>` object containing: - The (raw) user id ``uid``. - The (raw) item id ``iid``. - The true rating ``r_ui`` (:math:`\\hat{r}_{ui}`). - The estimated rating (:math:`\\hat{r}_{ui}`). - Some additional details about the prediction that might be useful for later analysis. """ # Convert raw ids to inner ids try: iuid = self.trainset.to_inner_uid(uid) except ValueError: iuid = 'UKN__' + str(uid) try: #print(iid) iiid = self.trainset.to_inner_iid(iid) except ValueError: iiid = 'UKN__' + str(iid) details = {} try: #print('TRAINSET') #print('####') #print('ID:' + str(iuid) + 'ITEM_ID ' + str(iiid)) #print('ID:' + str(iuid) + 'ITEM_ID ' + str(iiid)) #print('llamo') est = self.estimate(iuid, iiid) #print('ESTIMACION' + str(est)) # If the details dict was also returned if isinstance(est, tuple): est, details = est details['was_impossible'] = False except PredictionImpossible as e: est = self.default_prediction() details['was_impossible'] = True details['reason'] = str(e) # clip estimate into [lower_bound, higher_bound] if clip: lower_bound, higher_bound = self.trainset.rating_scale est = min(higher_bound, est) est = max(lower_bound, est) pred = Prediction(uid, iid, r_ui, est, details) if verbose: print(pred) return pred
def predict(uid, iid, r_ui=None, clip=True, verbose=False): """Compute the rating prediction for given user and item. The ``predict`` method converts raw ids to inner ids and then calls the ``estimate`` method which is defined in every derived class. If the prediction is impossible (e.g. because the user and/or the item is unkown), the prediction is set according to :meth:`default_prediction() <surprise.prediction_algorithms.algo_base.AlgoBase.default_prediction>`. Args: uid: (Raw) id of the user. See :ref:`this note<raw_inner_note>`. iid: (Raw) id of the item. See :ref:`this note<raw_inner_note>`. r_ui(float): The true rating :math:`r_{ui}`. Optional, default is ``None``. clip(bool): Whether to clip the estimation into the rating scale, that was set during dataset creation. For example, if :math:`\\hat{r}_{ui}` is :math:`5.5` while the rating scale is :math:`[1, 5]`, then :math:`\\hat{r}_{ui}` is set to :math:`5`. Same goes if :math:`\\hat{r}_{ui} < 1`. Default is ``True``. verbose(bool): Whether to print details of the prediction. Default is False. Returns: A :obj:`Prediction\ <surprise.prediction_algorithms.predictions.Prediction>` object containing: - The (raw) user id ``uid``. - The (raw) item id ``iid``. - The true rating ``r_ui`` (:math:`\\hat{r}_{ui}`). - The estimated ratino ig (:math:`\\hat{r}_{ui}`). - Some additional details about the prediction that might be useful for later analysis. """ # Convert raw ids to inner ids # print("inner ids: ", uid, ", ", iid) try: iuid = WholeSet.to_inner_uid(uid) # print('uid = ',uid,'iuid = ', iuid) except ValueError: print("545: uid error!") iuid = 'UKN__' + str(uid) try: iiid = WholeSet.to_inner_iid(iid) except ValueError: print("545: iid error!") iiid = 'UKN__' + str(iid) details = {} try: est = 0.0 for mm in range(m): ############################################ Estimation from Adaboost Prediction Model ######################################### est += ABPredictM[mm][iuid][iiid] * recm_w[mm] # If the details dict was also returned if isinstance(est, tuple): est, details = est details['was_impossible'] = False except PredictionImpossible as e: est = default_prediction() details['was_impossible'] = True details['reason'] = str(e) # clip estimate into [lower_bound, higher_bound] if clip: lower_bound, higher_bound = trainset.rating_scale est = min(higher_bound, est) est = max(lower_bound, est) pred = Prediction(uid, iid, r_ui, est, details, abs(r_ui - est)) if verbose: print(pred) return pred