def evalRatings(self): res = [] #used to contain the text of the result res.append('userId itemId original prediction\n') #predict for userId in self.dao.testSet_u: for ind,item in enumerate(self.dao.testSet_u[userId]): itemId = item[0] originRating = item[1] #predict prediction = self.predict(userId,itemId) #denormalize prediction = denormalize(prediction,self.dao.rScale[-1],self.dao.rScale[0]) ##################################### pred = self.checkRatingBoundary(prediction) # add prediction in order to measure self.dao.testSet_u[userId][ind].append(pred) res.append(userId+' '+itemId+' '+str(originRating)+' '+str(pred)+'\n') currentTime = strftime("%Y-%m-%d %H-%M-%S",localtime(time())) #output prediction result if self.isOutput: outDir = self.output['-dir'] fileName = self.config['recommender']+'@'+currentTime+'-rating-predictions'+self.foldInfo+'.txt' FileIO.writeFile(outDir,fileName,res) print 'The Result has been output to ',abspath(outDir),'.' #output evaluation result outDir = self.output['-dir'] fileName = self.config['recommender'] + '@'+currentTime +'-measure'+ self.foldInfo + '.txt' measure = Measure.ratingMeasure(self.dao.testSet_u) FileIO.writeFile(outDir, fileName, measure)
def evalRatings(self): res = [] #used to contain the text of the result res.append('userId itemId original prediction\n') #predict for ind, entry in enumerate(self.data.testData): user, item, rating = entry #predict prediction = self.predict(user, item) #denormalize #prediction = denormalize(prediction,self.data.rScale[-1],self.data.rScale[0]) ##################################### pred = self.checkRatingBoundary(prediction) # add prediction in order to measure self.data.testData[ind].append(pred) res.append(user + ' ' + item + ' ' + str(rating) + ' ' + str(pred) + '\n') currentTime = strftime("%Y-%m-%d %H-%M-%S", localtime(time())) #output prediction result if self.isOutput: outDir = self.output['-dir'] fileName = self.config[ 'recommender'] + '@' + currentTime + '-rating-predictions' + self.foldInfo + '.txt' FileIO.writeFile(outDir, fileName, res) print('The result has been output to ', abspath(outDir), '.') #output evaluation result outDir = self.output['-dir'] fileName = self.config[ 'recommender'] + '@' + currentTime + '-measure' + self.foldInfo + '.txt' self.measure = Measure.ratingMeasure(self.data.testData) FileIO.writeFile(outDir, fileName, self.measure) print('The result of %s %s:\n%s' % (self.algorName, self.foldInfo, ''.join(self.measure)))
def performance(self): #res = [] # used to contain the text of the result #res.append('userId itemId original prediction\n') # predict res = [] for ind, entry in enumerate(self.dao.testData): user, item, rating = entry # predict prediction = self.predict(user, item) # denormalize prediction = denormalize(prediction, self.dao.rScale[-1], self.dao.rScale[0]) ##################################### pred = self.checkRatingBoundary(prediction) # add prediction in order to measure res.append([user, item, rating, pred]) #res.append(user + ' ' + item + ' ' + str(rating) + ' ' + str(pred) + '\n') #currentTime = strftime("%Y-%m-%d %H-%M-%S", localtime(time())) # output prediction result # if self.isOutput: # outDir = self.output['-dir'] # fileName = self.config['recommender'] + '@' + currentTime + '-rating-predictions' + self.foldInfo + '.txt' # FileIO.writeFile(outDir, fileName, res) # print 'The Result has been output to ', abspath(outDir), '.' # output evaluation result # outDir = self.output['-dir'] # fileName = self.config['recommender'] + '@' + currentTime + '-measure' + self.foldInfo + '.txt' self.measure = Measure.ratingMeasure(res) return self.measure
def rating_performance(self): res = [] for ind, entry in enumerate(self.data.testData): user, item, rating = entry # predict prediction = self.predict(user, item) pred = self.checkRatingBoundary(prediction) res.append([user,item,rating,pred]) self.measure = Measure.ratingMeasure(res) return self.measure
def rating_performance(self): res = [] for ind, entry in enumerate(self.data.testData): user, item, rating = entry # predict prediction = self.predict(user, item) # denormalize #prediction = denormalize(prediction, self.data.rScale[-1], self.data.rScale[0]) ##################################### pred = self.checkRatingBoundary(prediction) # add prediction in order to measure res.append([user,item,rating,pred]) self.measure = Measure.ratingMeasure(res) return self.measure