def predict_model_cold_users(self): res = [] for user in self.rg.testColdUserSet_u.keys(): for item in self.rg.testColdUserSet_u[user].keys(): rating = self.rg.testColdUserSet_u[user][item] pred = self.predict(user, item) # denormalize pred = denormalize(pred, self.config.min_val, self.config.max_val) pred = self.checkRatingBoundary(pred) res.append([user, item, rating, pred]) rmse = Metric.RMSE(res) return rmse
def valid_model(self): res = [] for ind, entry in enumerate(self.rg.validSet()): user, item, rating = entry # predict prediction = self.predict(user, item) # denormalize prediction = denormalize(prediction, self.config.min_val, self.config.max_val) pred = self.checkRatingBoundary(prediction) # add prediction in order to measure # self.dao.testData[ind].append(pred) res.append([user, item, rating, pred]) rmse = Metric.RMSE(res) mae = Metric.MAE(res) self.iter_rmse.append(rmse) # for plot self.iter_mae.append(mae) return rmse, mae
def predict_model(self): res = [] for ind, entry in enumerate(self.rg.testSet()): user, item, rating = entry rating_length = len(self.rg.trainSet_u[user]) # remove cold start users for test if rating_length <= self.config.coldUserRating: continue prediction = self.predict(user, item) # denormalize prediction = denormalize(prediction, self.config.min_val, self.config.max_val) pred = self.checkRatingBoundary(prediction) # add prediction in order to measure res.append([user, item, rating, pred]) rmse = Metric.RMSE(res) mae = Metric.MAE(res) self.iter_rmse.append(rmse) # for plot self.iter_mae.append(mae) return rmse, mae