예제 #1
0
class Model:
    def __init__(self, args):
        self.dataName = args.dataName
        self.dataSet = DataSet(self.dataName)
        self.shape = self.dataSet.shape
        self.maxRate = self.dataSet.maxRate

        self.train = self.dataSet.train
        self.test = self.dataSet.test

        self.negNum = args.negNum
        self.testNeg = self.dataSet.getTestNeg(self.test, 99)
        self.add_embedding_matrix()

        self.add_placeholders()

        self.userLayer = args.userLayer
        self.itemLayer = args.itemLayer
        self.add_model()

        self.add_loss()

        self.lr = args.lr
        self.add_train_step()

        self.checkPoint = args.checkPoint
        self.init_sess()

        self.maxEpochs = args.maxEpochs
        self.batchSize = args.batchSize

        self.topK = args.topK
        self.earlyStop = args.earlyStop

    def add_placeholders(self):
        self.user = tf.placeholder(tf.int32)
        self.item = tf.placeholder(tf.int32)
        self.rate = tf.placeholder(tf.float32)
        self.drop = tf.placeholder(tf.float32)

    def add_embedding_matrix(self):
        self.user_item_embedding = tf.convert_to_tensor(
            self.dataSet.getEmbedding())
        self.item_user_embedding = tf.transpose(self.user_item_embedding)

    def add_model(self):
        user_input = tf.nn.embedding_lookup(self.user_item_embedding,
                                            self.user)
        item_input = tf.nn.embedding_lookup(self.item_user_embedding,
                                            self.item)

        def init_variable(shape, name):
            return tf.Variable(tf.truncated_normal(shape=shape,
                                                   dtype=tf.float32,
                                                   stddev=0.01),
                               name=name)

        with tf.name_scope("User_Layer"):
            user_W1 = init_variable([self.shape[1], self.userLayer[0]],
                                    "user_W1")
            user_out = tf.matmul(user_input, user_W1)
            for i in range(0, len(self.userLayer) - 1):
                W = init_variable([self.userLayer[i], self.userLayer[i + 1]],
                                  "user_W" + str(i + 2))
                b = init_variable([self.userLayer[i + 1]],
                                  "user_b" + str(i + 2))
                user_out = tf.nn.relu(tf.add(tf.matmul(user_out, W), b))

        with tf.name_scope("Item_Layer"):
            item_W1 = init_variable([self.shape[0], self.itemLayer[0]],
                                    "item_W1")
            item_out = tf.matmul(item_input, item_W1)
            for i in range(0, len(self.itemLayer) - 1):
                W = init_variable([self.itemLayer[i], self.itemLayer[i + 1]],
                                  "item_W" + str(i + 2))
                b = init_variable([self.itemLayer[i + 1]],
                                  "item_b" + str(i + 2))
                item_out = tf.nn.relu(tf.add(tf.matmul(item_out, W), b))

        norm_user_output = tf.sqrt(tf.reduce_sum(tf.square(user_out), axis=1))
        norm_item_output = tf.sqrt(tf.reduce_sum(tf.square(item_out), axis=1))
        self.y_ = tf.reduce_sum(
            tf.multiply(user_out, item_out), axis=1,
            keep_dims=False) / (norm_item_output * norm_user_output)
        self.y_ = tf.maximum(1e-6, self.y_)

    def add_loss(self):
        regRate = self.rate / self.maxRate
        losses = regRate * tf.log(
            self.y_) + (1 - regRate) * tf.log(1 - self.y_)
        loss = -tf.reduce_sum(losses)
        # regLoss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
        # self.loss = loss + self.reg * regLoss
        self.loss = loss

    def add_train_step(self):
        '''
        global_step = tf.Variable(0, name='global_step', trainable=False)
        self.lr = tf.train.exponential_decay(self.lr, global_step,
                                             self.decay_steps, self.decay_rate, staircase=True)
        '''
        optimizer = tf.train.AdamOptimizer(self.lr)
        self.train_step = optimizer.minimize(self.loss)

    def init_sess(self):
        self.config = tf.ConfigProto()
        self.config.gpu_options.allow_growth = True
        self.config.allow_soft_placement = True
        self.sess = tf.Session(config=self.config)
        self.sess.run(tf.global_variables_initializer())

        self.saver = tf.train.Saver()
        if os.path.exists(self.checkPoint):
            [os.remove(f) for f in os.listdir(self.checkPoint)]
        else:
            os.mkdir(self.checkPoint)

    def run(self):
        best_hr = -1
        best_NDCG = -1
        best_epoch = -1
        print("Start Training!")
        for epoch in range(self.maxEpochs):
            print("=" * 20 + "Epoch ", epoch, "=" * 20)
            self.run_epoch(self.sess)
            print('=' * 50)
            print("Start Evaluation!")
            hr, NDCG = self.evaluate(self.sess, self.topK)
            print("Epoch ", epoch, "HR: {}, NDCG: {}".format(hr, NDCG))
            if hr > best_hr or NDCG > best_NDCG:
                best_hr = hr
                best_NDCG = NDCG
                best_epoch = epoch
                self.saver.save(self.sess, self.checkPoint)
            if epoch - best_epoch > self.earlyStop:
                print("Normal Early stop!")
                break
            print("=" * 20 + "Epoch ", epoch, "End" + "=" * 20)
        print("Best hr: {}, NDCG: {}, At Epoch {}".format(
            best_hr, best_NDCG, best_epoch))
        print("Training complete!")

    def run_epoch(self, sess, verbose=10):
        train_u, train_i, train_r = self.dataSet.getInstances(
            self.train, self.negNum)
        train_len = len(train_u)
        shuffled_idx = np.random.permutation(np.arange(train_len))
        train_u = train_u[shuffled_idx]
        train_i = train_i[shuffled_idx]
        train_r = train_r[shuffled_idx]

        num_batches = len(train_u) // self.batchSize + 1

        losses = []
        for i in range(num_batches):
            min_idx = i * self.batchSize
            max_idx = np.min([train_len, (i + 1) * self.batchSize])
            train_u_batch = train_u[min_idx:max_idx]
            train_i_batch = train_i[min_idx:max_idx]
            train_r_batch = train_r[min_idx:max_idx]

            feed_dict = self.create_feed_dict(train_u_batch, train_i_batch,
                                              train_r_batch)
            _, tmp_loss = sess.run([self.train_step, self.loss],
                                   feed_dict=feed_dict)
            losses.append(tmp_loss)
            if verbose and i % verbose == 0:
                sys.stdout.write('\r{} / {} : loss = {}'.format(
                    i, num_batches, np.mean(losses[-verbose:])))
                sys.stdout.flush()
        loss = np.mean(losses)
        print("\nMean loss in this epoch is: {}".format(loss))
        return loss

    def create_feed_dict(self, u, i, r=None, drop=None):
        return {self.user: u, self.item: i, self.rate: r, self.drop: drop}

    def evaluate(self, sess, topK):
        def getHitRatio(ranklist, targetItem):
            for item in ranklist:
                if item == targetItem:
                    return 1
            return 0

        def getNDCG(ranklist, targetItem):
            for i in range(len(ranklist)):
                item = ranklist[i]
                if item == targetItem:
                    return math.log(2) / math.log(i + 2)
            return 0

        hr = []
        NDCG = []
        testUser = self.testNeg[0]
        testItem = self.testNeg[1]
        for i in range(len(testUser)):
            target = testItem[i][0]
            feed_dict = self.create_feed_dict(testUser[i], testItem[i])
            predict = sess.run(self.y_, feed_dict=feed_dict)

            item_score_dict = {}

            for j in range(len(testItem[i])):
                item = testItem[i][j]
                item_score_dict[item] = predict[j]

            ranklist = heapq.nlargest(topK,
                                      item_score_dict,
                                      key=item_score_dict.get)

            tmp_hr = getHitRatio(ranklist, target)
            tmp_NDCG = getNDCG(ranklist, target)
            hr.append(tmp_hr)
            NDCG.append(tmp_NDCG)
        return np.mean(hr), np.mean(NDCG)
예제 #2
0
class Model:
    def __init__(self, args):
        self.dataName = args.dataName
        self.dataSet = DataSet(self.dataName)
        self.shape = self.dataSet.shape
        self.maxRate = self.dataSet.maxRate

        self.train = self.dataSet.train
        self.test = self.dataSet.test

        self.negNum = args.negNum
        # self.testNeg = self.dataSet.getTestNeg(self.test, 99)
        self.testNeg = self.dataSet.getTestNeg(self.test, 31)
        self.add_embedding_matrix()

        self.add_placeholders()

        self.userLayer = args.userLayer
        self.itemLayer = args.itemLayer
        self.add_model()

        self.add_loss()

        self.lr = args.lr
        self.add_train_step()

        self.checkPoint = args.checkPoint
        self.init_sess()

        self.maxEpochs = args.maxEpochs
        self.batchSize = args.batchSize

        self.topK = args.topK
        self.earlyStop = args.earlyStop


    def add_placeholders(self):
        self.user = tf.placeholder(tf.int32)
        self.item = tf.placeholder(tf.int32)
        self.rate = tf.placeholder(tf.float32)
        self.drop = tf.placeholder(tf.float32)

    def add_embedding_matrix(self):
        self.user_item_embedding = tf.convert_to_tensor(self.dataSet.getEmbedding())
        self.item_user_embedding = tf.transpose(self.user_item_embedding)

    def add_model(self):
        user_input = tf.nn.embedding_lookup(self.user_item_embedding, self.user)
        item_input = tf.nn.embedding_lookup(self.item_user_embedding, self.item)

        def init_variable(shape, name):
            return tf.Variable(tf.truncated_normal(shape=shape, dtype=tf.float32, stddev=0.01), name=name)

        with tf.name_scope("User_Layer"):
            user_W1 = init_variable([self.shape[1], self.userLayer[0]], "user_W1")
            user_out = tf.matmul(user_input, user_W1)
            for i in range(0, len(self.userLayer)-1):
                W = init_variable([self.userLayer[i], self.userLayer[i+1]], "user_W"+str(i+2))
                b = init_variable([self.userLayer[i+1]], "user_b"+str(i+2))
                user_out = tf.nn.relu(tf.add(tf.matmul(user_out, W), b))

        with tf.name_scope("Item_Layer"):
            item_W1 = init_variable([self.shape[0], self.itemLayer[0]], "item_W1")
            item_out = tf.matmul(item_input, item_W1)
            for i in range(0, len(self.itemLayer)-1):
                W = init_variable([self.itemLayer[i], self.itemLayer[i+1]], "item_W"+str(i+2))
                b = init_variable([self.itemLayer[i+1]], "item_b"+str(i+2))
                item_out = tf.nn.relu(tf.add(tf.matmul(item_out, W), b))

        norm_user_output = tf.sqrt(tf.reduce_sum(tf.square(user_out), axis=1))
        norm_item_output = tf.sqrt(tf.reduce_sum(tf.square(item_out), axis=1))
        self.y_ = tf.reduce_sum(tf.multiply(user_out, item_out), axis=1, keep_dims=False) / (norm_item_output* norm_user_output)
        self.y_ = tf.maximum(1e-6, self.y_)

    def add_loss(self):
        regRate = self.rate / self.maxRate
        losses = regRate * tf.log(self.y_) + (1 - regRate) * tf.log(1 - self.y_)
        loss = -tf.reduce_sum(losses)
        # regLoss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
        # self.loss = loss + self.reg * regLoss
        self.loss = loss

    def add_train_step(self):
        '''
        global_step = tf.Variable(0, name='global_step', trainable=False)
        self.lr = tf.train.exponential_decay(self.lr, global_step,
                                             self.decay_steps, self.decay_rate, staircase=True)
        '''
        optimizer = tf.train.AdamOptimizer(self.lr)
        self.train_step = optimizer.minimize(self.loss)

    def init_sess(self):
        self.config = tf.ConfigProto()
        self.config.gpu_options.allow_growth = True
        self.config.allow_soft_placement = True
        self.sess = tf.Session(config=self.config)
        self.sess.run(tf.global_variables_initializer())

        self.saver = tf.train.Saver()
        if os.path.exists(self.checkPoint):
            [os.remove(f) for f in os.listdir(self.checkPoint)]
        else:
            os.mkdir(self.checkPoint)

    def run(self):
        best_hr = -1
        best_NDCG = -1
        best_epoch = -1
        print("Start Training!")
        for epoch in range(self.maxEpochs):
            print("="*20+"Epoch ", epoch, "="*20)
            self.run_epoch(self.sess)
            print('='*50)
            print("Start Evaluation!")
            hr, NDCG = self.evaluate(self.sess, self.topK)
            print("Epoch ", epoch, "HR: {}, NDCG: {}".format(hr, NDCG))
            if hr > best_hr or NDCG > best_NDCG:
                best_hr = hr
                best_NDCG = NDCG
                best_epoch = epoch
                self.saver.save(self.sess, self.checkPoint)
            if epoch - best_epoch > self.earlyStop:
                print("Normal Early stop!")
                break
            print("="*20+"Epoch ", epoch, "End"+"="*20)
        print("Best hr: {}, NDCG: {}, At Epoch {}".format(best_hr, best_NDCG, best_epoch))
        print("Training complete!")
        # self.metrics(self.sess)

    def run_epoch(self, sess, verbose=10):
        train_u, train_i, train_r = self.dataSet.getInstances(self.train, self.negNum)
        train_len = len(train_u)
        shuffled_idx = np.random.permutation(np.arange(train_len))
        train_u = train_u[shuffled_idx]
        train_i = train_i[shuffled_idx]
        train_r = train_r[shuffled_idx]

        num_batches = len(train_u) // self.batchSize + 1

        losses = []
        for i in range(num_batches):
            min_idx = i * self.batchSize
            max_idx = np.min([train_len, (i+1)*self.batchSize])
            train_u_batch = train_u[min_idx: max_idx]
            train_i_batch = train_i[min_idx: max_idx]
            train_r_batch = train_r[min_idx: max_idx]

            feed_dict = self.create_feed_dict(train_u_batch, train_i_batch, train_r_batch)
            _, tmp_loss = sess.run([self.train_step, self.loss], feed_dict=feed_dict)
            losses.append(tmp_loss)
            if verbose and i % verbose == 0:
                sys.stdout.write('\r{} / {} : loss = {}'.format(
                    i, num_batches, np.mean(losses[-verbose:])
                ))
                sys.stdout.flush()
        loss = np.mean(losses)
        print("\nMean loss in this epoch is: {}".format(loss))
        return loss

    def create_feed_dict(self, u, i, r=None, drop=None):
        return {self.user: u,
                self.item: i,
                self.rate: r,
                self.drop: drop}

    def evaluate(self, sess, topK):
        def getHitRatio(ranklist, targetItem):
            for item in ranklist:
                if item == targetItem:
                    return 1
            return 0
        def getNDCG(ranklist, targetItem):
            # print('ranklist:',ranklist)
            # print('targetItem:', targetItem)
            for i in range(len(ranklist)):
                item = ranklist[i]
                if item == targetItem:
                    return math.log(2) / math.log(i+2)
            return 0

        hr =[]
        NDCG = []
        testUser = self.testNeg[0]
        testItem = self.testNeg[1]
        usr_scores = {}
        for i in range(len(testUser)): # 1078
            usr_idx = testUser[i][0]
            target = testItem[i][0]
            feed_dict = self.create_feed_dict(testUser[i], testItem[i])
            predict = sess.run(self.y_, feed_dict=feed_dict) # len 32
            # print('predict:', predict)
            usr_scores[str(usr_idx)] = list(predict)
            item_score_dict = {}
            for j in range(len(testItem[i])): #32
                item = testItem[i][j]
                item_score_dict[item] = predict[j]

            # ranklist = heapq.nlargest(topK, item_score_dict, key=item_score_dict.get)
            ranklist = heapq.nlargest(32, item_score_dict, key=item_score_dict.get)
            tmp_hr = getHitRatio(ranklist, target)
            # print(target)
            tmp_NDCG = getNDCG(ranklist, target)
            hr.append(tmp_hr)
            NDCG.append(tmp_NDCG)
        print('Len usr_scores:', len(usr_scores))
        write_json(str(usr_scores),'./Data/ours/usr_scores.json')
        return np.mean(hr), np.mean(NDCG)

    def metrics(self, sess):
        RS = []
        targets = []

        testUser = self.testNeg[0]
        testItem = self.testNeg[1]
        undup = []
        for i in range(len(testUser)): # 1078
            if testUser[i][0] in undup:
                continue
            
            undup.append(testUser[i][0])

            target = testItem[i]
            targets.append(target)

            feed_dict = self.create_feed_dict(testUser[i], testItem[i])
            predict = sess.run(self.y_, feed_dict=feed_dict) # len 32
            RS.append(predict)

        print('undup:', len(undup), undup)
        # print(len(ranklists), len(targets))
        Rss = np.asarray(RS)
        targets = np.asarray(targets)
        print(Rss.shape, targets.shape)

        usr_test_amount = 150
        sumtarget = len(testUser) # 1078
        testRS = Rss
        target = np.load('./Data/ours/target.npy')

        def F1_score(prec,rec):
            f1 = 2*((prec*rec)/(prec+rec))
            return f1

        def topN(RSls, n):
            maxn = np.argsort(RSls)[::-1][:n]
            return maxn
        all_sort = []

        for i in range(usr_test_amount):
            all_sort.append(topN(list(testRS[i]),len(testRS[i])))
            
        all_sort = np.asarray(all_sort)
        print(all_sort.shape)
        def DCG(prec_list): #找出前n名的[1,1,1,0,...]
            dcg = 0
            for i in range(len(prec_list)):
                dcg += (2**prec_list[i]-1)/math.log2(i+2)
            return dcg

        def NDCG(target, testRS, num_ndcg, all_sort): #target是真正的喜好
            total_ndcg = 0
            
            for m in range(usr_test_amount): # the number of testing users
                idcg = DCG(target[m][:num_ndcg])
                
                pre_list = []
                for s in all_sort[m][:num_ndcg]:
                    #print(m,s,target[m][s])
                    pre_list.append(target[m][s]) #把prec_list 的 score加進去
                
                dcg = DCG(pre_list)
                ndcg = dcg/idcg
                total_ndcg += ndcg
                
            avg_ndcg = total_ndcg/usr_test_amount
            return avg_ndcg

        from sklearn.metrics import average_precision_score

        def MAP(target,testRS):
            total_prec = 0
            for u in range(usr_test_amount):
                y_true = target[u]
                y_scores = testRS[u]
                total_prec += average_precision_score(y_true, y_scores)
                
            Map_value = total_prec/usr_test_amount
            
            return Map_value

        print('\n==============================\n')
        # Top N
        N = [1, 5]
        correct = 0

        for n in N:
            print('Top', n)
            correct = 0

            for i in range(len(testRS)):
                topn = topN(testRS[i], n)
                sum_target = int(np.sum(target[i]))
                TP = 0
                for i in topn:
                    if i < sum_target:
                        TP += 1

                correct += TP

            print('Num of TP:', correct)
            prec = correct/(len(testRS)*n) #150*n
            recall = correct/sumtarget

            print('prec:', prec)
            print('recall:', recall)
            try:
                print('F1_score:', F1_score(prec, recall))
            except ZeroDivisionError:
                print('ZeroDivisionError')
                pass
            print('*****')

        print('\n==============================\n')

        # NDCG
        num_ndcgs = [10]
        for num_ndcg in num_ndcgs:
            print('NDCG@', num_ndcg)
            print('NDCG score:', NDCG(target, testRS, num_ndcg, all_sort))
            print('*****')

        print('\n==============================\n')

        # MAP
        print('MAP:', MAP(target,testRS))
        print('\n==============================\n')
예제 #3
0
class Model:
    def __init__(self, args, Testinglist):
        self.dataName = args.dataName
        self.dataSet = DataSet(self.dataName, Testinglist)
        self.shape = self.dataSet.shape
        self.maxRate = self.dataSet.maxRate
        
        self.userFlen = self.dataSet.UF_len
        self.allMatrix = self.dataSet.getTrainAll()
        
        self.train = self.dataSet.train
        self.test = self.dataSet.test

        self.negNum = args.negNum
        self.reg = args.reg
        self.alfha = args.alfha
        self.testNeg = self.dataSet.getTestNeg(self.test, 20)
        
        self.cvfold_num = args.cvfold
        
        self.add_embedding_matrix()
        print("add_embedding_matrix SUCCESS")
        self.add_placeholders()
        print("add_placeholders SUCCESS")

        self.userLayer = args.userLayer
        self.itemLayer = args.itemLayer
        self.userFLayer = args.userFLayer
        self.add_model()
        print("add_model SUCCESS")

        self.add_loss()
        print("add_loss SUCCESS")

        self.lr = args.lr
        self.add_train_step()
        print("add_train_step SUCCESS")

#         self.checkPoint = args.checkPoint
        self.init_sess()
        print("init_sess SUCCESS")

        self.maxEpochs = args.maxEpochs
        self.batchSize = args.batchSize

        self.topK = args.topK
        self.earlyStop = args.earlyStop
        
    def add_placeholders(self):
        self.user = tf.placeholder(tf.int32)
        self.item = tf.placeholder(tf.int32)
        self.rate = tf.placeholder(tf.float32)

    def add_embedding_matrix(self):
        self.user_item_embedding = tf.convert_to_tensor(self.dataSet.getEmbedding())
        self.item_user_embedding = tf.transpose(self.user_item_embedding)
        user_feature = self.dataSet.getFeatureEmbedding()
        self.user_feature_embedding = tf.convert_to_tensor(user_feature)
        self.feature_user_embedding = tf.transpose(tf.convert_to_tensor(user_feature))

    def add_model(self):
        user_input = tf.nn.embedding_lookup(self.user_item_embedding, self.user)
        item_input = tf.nn.embedding_lookup(self.item_user_embedding, self.item)
        Fea_user_input = self.feature_user_embedding
        userFea_input = tf.nn.embedding_lookup(self.user_feature_embedding, self.user) 

        def init_variable(shape, name):
            return tf.Variable(tf.truncated_normal(shape=shape, dtype=tf.float32, stddev=0.01), name=name)

        with tf.name_scope("User_Layer"):
            user_W1 = init_variable([self.shape[1], self.userLayer[0]], "user_W1")
            user_out = tf.matmul(user_input, user_W1)
            for i in range(0, len(self.userLayer)-1):
                W = init_variable([self.userLayer[i], self.userLayer[i+1]], "user_W"+str(i+2))
                b = init_variable([self.userLayer[i+1]], "user_b"+str(i+2))
                user_out = tf.nn.relu(tf.add(tf.matmul(user_out, W), b))

        with tf.name_scope("Item_Layer"):
            item_W1 = init_variable([self.shape[0], self.itemLayer[0]], "item_W1")
            item_out = tf.matmul(item_input, item_W1)
            for i in range(0, len(self.itemLayer)-1):
                W = init_variable([self.itemLayer[i], self.itemLayer[i+1]], "item_W"+str(i+2))
                b = init_variable([self.itemLayer[i+1]], "item_b"+str(i+2))
                item_out = tf.nn.relu(tf.add(tf.matmul(item_out, W), b))

        with tf.name_scope("UserFeature_Layer"):
            userF_W1 = init_variable([self.shape[0], self.userFLayer[0]], "userF_W1")
            userF_out = tf.matmul(Fea_user_input, userF_W1)
            for i in range(0, len(self.userFLayer)-1):
                W = init_variable([self.userFLayer[i], self.userFLayer[i+1]], "userF_W"+str(i+2))
                b = init_variable([self.userFLayer[i+1]], "userF_b"+str(i+2))
                userF_out = tf.nn.relu(tf.add(tf.matmul(userF_out, W), b))                
        
        norm_user_output = tf.norm(user_out, axis=1)
        norm_item_output = tf.norm(item_out, axis=1)        
        norm_userF_output = tf.norm(userF_out)
        
        self.y_ = tf.reduce_sum(tf.multiply(user_out, item_out), axis=1, keep_dims=False) / (norm_item_output* norm_user_output)        
               
        self.user_out = user_out
        self.yu_ = tf.matmul(userF_out, tf.transpose(user_out)) / (norm_user_output* norm_userF_output)             
        
        self.y_ = tf.maximum(1e-6, self.y_)
        self.yu_ = tf.maximum(1e-6, self.yu_)
                
        self.y_ = tf.minimum(1-1e-6, self.y_)
        self.yu_ = tf.minimum(1-1e-6, self.yu_)
                
        self.yu_temp = tf.subtract(self.yu_, tf.transpose(userFea_input))

    def add_loss(self):
        regRate = self.rate / self.maxRate
             
        losses = regRate * tf.log(self.y_) + (1 - regRate) * tf.log(1 - self.y_)
        loss = -tf.reduce_sum(losses)

        #MSE
        losses_u = tf.reduce_mean(tf.square(self.yu_temp)) / self.dataSet.UF_len
        t_vars = tf.trainable_variables()
        self.loss = loss * self.alfha + self.reg * tf.add_n([tf.nn.l2_loss(v) for v in t_vars if v.name.startswith('Item_Layer')]) + 0.5 * tf.add_n([tf.nn.l2_loss(v) for v in t_vars if v.name.startswith('User_Layer')])
        self.loss_u = losses_u * (1-self.alfha) + self.reg * tf.add_n([tf.nn.l2_loss(v) for v in t_vars if v.name.startswith('UserFeature_Layer')]) + 0.5 * tf.add_n([tf.nn.l2_loss(v) for v in t_vars if v.name.startswith('User_Layer')])
    def add_train_step(self):
        '''
        global_step = tf.Variable(0, name='global_step', trainable=False)
        self.lr = tf.train.exponential_decay(self.lr, global_step,
                                             self.decay_steps, self.decay_rate, staircase=True)
        '''
        t_vars = tf.trainable_variables()
        var_list1 = []
        var_list2 = []
        for v in t_vars:
            if v.name.startswith('Item_Layer') or v.name.startswith('User_Layer'):
                var_list2.append(v)
        for v in t_vars:
            if v.name.startswith('UserFeature_Layer') or v.name.startswith('User_Layer'):
                var_list1.append(v)
        
        optimizer = tf.train.AdamOptimizer(self.lr)
        self.train_step1 = optimizer.minimize(self.loss_u, var_list = var_list1)
        self.train_step2 = optimizer.minimize(self.loss, var_list = var_list2)

    def init_sess(self):
        self.config = tf.ConfigProto()
        self.config.gpu_options.allow_growth = True
        self.config.allow_soft_placement = True
        self.sess = tf.Session(config=self.config)
        self.sess.run(tf.global_variables_initializer())


    def run(self):
        loss_epoch = []
        loss_u_epoch = []
        print("Start Training!")
        for epoch in range(self.maxEpochs):
            print("="*20+"Epoch ", epoch, "="*20)
            loss, loss_u = self.run_epoch(self.sess)
            loss_epoch.append(loss)
            loss_u_epoch.append(loss_u)
            print('='*50)
        s_ItemUser, s_UserF, s_UserEmbedding = self.get_testScore(self.sess)
        return s_ItemUser, s_UserF, s_UserEmbedding

    def run_epoch(self, sess, verbose=10):
        train_u, train_i, train_r = self.dataSet.getInstances(self.train, self.negNum)
        train_len = len(train_u)
        shuffled_idx = np.random.permutation(np.arange(train_len))
        train_u = train_u[shuffled_idx]
        train_i = train_i[shuffled_idx]
        train_r = train_r[shuffled_idx]

        num_batches = len(train_u) // self.batchSize + 1

        losses = []
        losses_u = []
        for i in range(num_batches):
            min_idx = i * self.batchSize
            max_idx = np.min([train_len, (i+1)*self.batchSize])
            train_u_batch = train_u[min_idx: max_idx]
            train_i_batch = train_i[min_idx: max_idx]
            train_r_batch = train_r[min_idx: max_idx]

            feed_dict = self.create_feed_dict(train_u_batch, train_i_batch, train_r_batch)
            _, _, tmp_loss, tmp_loss_u = sess.run([self.train_step1, self.train_step2, self.loss, self.loss_u], feed_dict=feed_dict)
#             _, tmp_loss, tmp_loss_u = sess.run([self.train_step2, self.loss, self.loss_u], feed_dict=feed_dict)
            losses.append(tmp_loss)
            losses_u.append(tmp_loss_u)
            if verbose and i % verbose == 0:
                if np.isnan(np.mean(losses[-verbose:])):
                    raise ValueError                                
                sys.stdout.write('\r{} / {} : loss = {}'.format(
                    i, num_batches, np.mean(losses[-verbose:])
                ))
                sys.stdout.flush()
        loss = np.mean(losses)
        loss_u = np.mean(losses_u)
        print("\nMean loss in this epoch is: {}".format(loss))
        return loss, loss_u

    def create_feed_dict(self, u, i, r=None):
        return {self.user: u,
                self.item: i,
                self.rate: r,}

    def evaluate(self, sess, topK):
        def getHitRatio(ranklist, targetItem):
            for item in ranklist:
                if item == targetItem:
                    return 1
            return 0
        def getNDCG(ranklist, targetItem):
            for i in range(len(ranklist)):
                item = ranklist[i]
                if item == targetItem:
                    return math.log(2) / math.log(i+2)
            return 0

        hr =[]
        NDCG = []
        testUser = self.testNeg[0]
        testItem = self.testNeg[1]
        for i in range(len(testUser)):
            target = testItem[i][0]
            feed_dict = self.create_feed_dict(testUser[i], testItem[i])
            predict = sess.run(self.y_, feed_dict=feed_dict)
            item_score_dict = {}

            for j in range(len(testItem[i])):
                item = testItem[i][j]
                item_score_dict[item] = predict[j]

            ranklist = heapq.nlargest(topK, item_score_dict, key=item_score_dict.get)

            tmp_hr = getHitRatio(ranklist, target)
            tmp_NDCG = getNDCG(ranklist, target)
            hr.append(tmp_hr)
            NDCG.append(tmp_NDCG)
        return np.mean(hr), np.mean(NDCG)
    
    def get_testScore(self, sess):
        train_allu = self.allMatrix[0]
        train_alli = self.allMatrix[1]
        train_allr = self.allMatrix[2]
        feed_dict = self.create_feed_dict(train_allu, train_alli, train_allr)
        predict_p, predict_q, predict_user_out = sess.run([self.y_, self.yu_, self.user_out], feed_dict=feed_dict)
        
        
        s_ItemUser = np.zeros((799, 268))
        for i in range(len(predict_p)):
            s_ItemUser[train_alli[i],train_allu[i]] = predict_p[i]
        
        
        print(predict_user_out.shape)
        s_UserEmbedding = np.zeros((268, predict_user_out.shape[1]))
         #to get the score of userFeature matrix        
        s_UserF = np.zeros((541, 268))
        u_set = set()
        for i in range(predict_q.shape[1]):
            if train_allu[i] not in u_set:
                s_UserF[:,train_allu[i]] = predict_q[:, i]
                s_UserEmbedding[train_allu[i],:] = predict_user_out[i, :]
                u_set.add(train_allu[i])
        
        return s_ItemUser, s_UserF, s_UserEmbedding
예제 #4
0
파일: Model.py 프로젝트: www6130911/dmf
class Model(nn.Module):
    def __init__(self, args):
        super(Model, self).__init__()
        self.dataName = args.dataName
        self.splitSign = args.splitSign
        self.dataSet = DataSet(self.dataName, self.splitSign)
        self.shape = self.dataSet.shape
        self.maxRate = self.dataSet.maxRate
        self.mu = args.mu

        self.train = self.dataSet.train
        self.test = self.dataSet.test

        self.negNum = args.negNum
        self.testNeg = self.dataSet.getTestNeg(self.test, 99)
        self.add_embedding_matrix()

        # self.add_placeholders()

        self.userLayer = args.userLayer
        self.itemLayer = args.itemLayer
        # self.add_model()

        # self.add_loss()

        self.lr = args.lr
        # self.add_train_step()

        self.checkPoint = args.checkPoint
        # self.init_sess()

        self.maxEpochs = args.maxEpochs
        self.batchSize = args.batchSize

        self.topK = args.topK
        self.earlyStop = args.earlyStop
        self.init_model()
        self.training = True

    def init_model(self):
        self.User_Layer = torch.nn.Sequential(
            #2
            # nn.Linear(self.shape[1], self.userLayer[0]),
            # nn.Dropout(0.15),
            # nn.ReLU(True),
            # nn.Linear(self.userLayer[0], self.userLayer[1]),
            # nn.Dropout(0.1),
            # nn.ReLU(True),
            # nn.Linear(self.userLayer[1], self.userLayer[2]),
            # nn.ReLU(True)

            #1
            # nn.Linear(self.shape[1], self.userLayer[0]),
            # nn.ReLU(True),
            # nn.Linear(self.userLayer[0], self.userLayer[1]),
            # nn.ReLU(True)

            #3
            nn.Linear(self.shape[1], self.userLayer[0]),
            nn.Tanh(),
            nn.Linear(self.userLayer[0], self.userLayer[1]),
            nn.Tanh())

        self.Item_Layer = torch.nn.Sequential(
            #2
            # nn.Linear(self.shape[0], self.itemLayer[0]),
            # nn.Dropout(0.15),
            # nn.ReLU(True),
            # nn.Linear(self.itemLayer[0], self.itemLayer[1]),
            # nn.Dropout(0.1),
            # nn.ReLU(True),
            # nn.Linear(self.itemLayer[1], self.itemLayer[2]),
            # nn.ReLU(True)
            #1
            # nn.Linear(self.shape[0], self.itemLayer[0]),
            # nn.Tanh(),
            # nn.Linear(self.itemLayer[0], self.itemLayer[1]),
            # nn.ReLU(True)

            #3
            nn.Linear(self.shape[0], self.itemLayer[0]),
            nn.Tanh(),
            nn.Linear(self.itemLayer[0], self.itemLayer[1]),
            nn.Tanh())

        for param in self.parameters():
            nn.init.normal(param, 0, 0.01)

    def add_embedding_matrix(self):
        # self.user_item_embedding = tf.convert_to_tensor(self.dataSet.getEmbedding())
        # self.item_user_embedding = tf.transpose(self.user_item_embedding)
        self.user_item_embedding = torch.from_numpy(
            self.dataSet.getEmbedding())
        self.item_user_embedding = torch.t(self.user_item_embedding)

    def forward(self, userI, itemJ):
        p = self.User_Layer(userI)
        q = self.Item_Layer(itemJ)

        y_pre = F.cosine_similarity(p, q)
        # tensor_exp = torch.FloatTensor(y_pre.size()).fill_(math.exp(1))
        # tensor_exp = useGpu(tensor_exp)
        # y_pre.data = y_pre.data.exp().div(tensor_exp)

        tensor_mu = torch.FloatTensor(y_pre.size()).fill_(self.mu)
        tensor_mu = useGpu(tensor_mu)
        y_pre.data = y_pre.data.max(tensor_mu)

        return y_pre
예제 #5
0
class DMF:
    def __init__(self, args):
        # initialize the (object) variables
        self.dataName = args.dataName

        # data exploration
        # create the dataset
        # data created => dataSet.shape, dataSet.data, dataSet.train, dataSet.test, dataSet.trainDict,
        #                 dataSet.maxRate (in getData() in DataSet.py)
        self.dataSet = DataSet(self.dataName)

        # shape => [users, items] total users and items in the dataset
        self.shape = self.dataSet.shape

        # maxRate => maximum ratings given by any user in the whole dataset
        # this is used for normalization of the data in the loss (binary cross entropy) function
        self.maxRate = self.dataSet.maxRate

        # create train and test data, test negatives (sample of the test data itself)
        self.train = self.dataSet.train
        self.test = self.dataSet.test

        # negNum(default = 7), used in creating training instances
        self.negNum = args.negNum

        # zero ratings are considered to be negative from the implicit data point of view
        # create testNegatives: set of negative instances, which can be all (or sampled from) zero ratings
        self.testNeg = self.dataSet.getTestNeg(self.test, 99)

        # create embeddings used for creating user and item latent vectors
        self.add_embedding_matrix()

        # specify the inputs, (to feed actual training examples)
        self.add_placeholders()

        # store the userLayer and the itemlayer arguments in the DMF's object itself
        # to be used during the model creation
        self.userLayer = args.userLayer
        self.itemLayer = args.itemLayer

        # create model
        self.add_model()

        # add the normalized cross entropy loss function
        self.add_loss()

        # learning rate
        self.lr = args.lr

        # add optimization and loss (already created above) function
        self.add_train_step()

        # store the checkPoint directory value in the model's object
        self.checkPoint = args.checkPoint

        # initialize tensorflow session
        self.init_sess()

        # max iterations for training, default: 50
        self.maxEpochs = args.maxEpochs
        # batch size for training, default: 256
        self.batchSize = args.batchSize

        # metrics calculated over top K recommendations (predictions)
        # topK (default 10) => to calculate ndcg@10, hr@10
        self.topK = args.topK

        # default: 5
        self.earlyStop = args.earlyStop

    def add_placeholders(self):

        # tf.placeholder is used to feed actual training examples.
        # (dummy nodes that provide entry points for data to computational graph).

        self.user = tf.placeholder(tf.int32)
        self.item = tf.placeholder(tf.int32)
        self.rate = tf.placeholder(tf.float32)
        self.drop = tf.placeholder(tf.float32)

    def add_embedding_matrix(self):

        # training numpy array => (rows: user_id), (columns: itemId)
        # Convert (sparse) training numpy array to a Tensor object
        self.user_item_embedding = tf.convert_to_tensor(
            self.dataSet.getEmbedding())

        # take a transpose of the above converted tensor object
        # shape-> (row: item_id), (column: user_id)
        self.item_user_embedding = tf.transpose(self.user_item_embedding)

    def add_model(self):

        # tf.nn.embedding_lookup retrieves rows of the 'user_item_embedding' tensor.
        # Example:
        # params = tf.constant([10,20,30,40])
        # ids = tf.constant([0,1,2,3])
        # print tf.nn.embedding_lookup(params,ids).eval()
        # would return [10, 20, 30, 40]

        # user_input -> shape: users, items
        user_input = tf.nn.embedding_lookup(self.user_item_embedding,
                                            self.user)

        # item_input -> shape: items, users
        item_input = tf.nn.embedding_lookup(self.item_user_embedding,
                                            self.item)

        def init_variable(shape, name):
            # use tf.Variable for trainable variables such as weights (W) and biases (B) for the model.
            return tf.Variable(tf.truncated_normal(shape=shape,
                                                   dtype=tf.float32,
                                                   stddev=0.01),
                               name=name)

        with tf.name_scope("User_Layer"):
            # generate user weight (shape: [items, 512])
            user_W1 = init_variable([self.shape[1], self.userLayer[0]],
                                    "user_W1")

            # multiply user_weights and the user_input, (user_input * user_W1)
            # output at first layer: l(u) = Y(u) * W(u), acc. to the paper
            # user_out: (shape: [users, 512])
            user_out = tf.matmul(user_input, user_W1)

            # by default we have 1 hidden user layer, as default userLayer = [512, 64]
            for i in range(0, len(self.userLayer) - 1):
                # again, generate user weights, (shape: [512, 64])
                W = init_variable([self.userLayer[i], self.userLayer[i + 1]],
                                  "user_W" + str(i + 2))

                # in the computation at hidden layers, also generate and then add user biases
                # shape: [64]
                b = init_variable([self.userLayer[i + 1]],
                                  "user_b" + str(i + 2))

                # output of the hidden layer: (user_out (of the previous layer) * Weights) + biases)
                # l(u'): (l(u) * W(u)) + b(u)
                # shape: [users, 64]
                # apply the 'relu' activation function on the l(u')
                user_out = tf.nn.relu(tf.add(tf.matmul(user_out, W), b))

        with tf.name_scope("Item_Layer"):
            # generate item weight (shape: [users, 1024])
            item_W1 = init_variable([self.shape[0], self.itemLayer[0]],
                                    "item_W1")

            # multiply item_weights and the item_input, (item_input * item_W1)
            # output at first layer: l(i) = Y(i) * W(i), acc. to the paper
            # item_out: (shape: [items, 1024])
            item_out = tf.matmul(item_input, item_W1)

            # by default we have 1 hidden item layer, as default itemLayer = [1024, 64]
            for i in range(0, len(self.itemLayer) - 1):
                # again, generate item weights, (shape: [1024, 64])
                W = init_variable([self.itemLayer[i], self.itemLayer[i + 1]],
                                  "item_W" + str(i + 2))

                # in the computation at hidden layers, also generate and then add item biases
                # shape: [64]
                b = init_variable([self.itemLayer[i + 1]],
                                  "item_b" + str(i + 2))

                # output of the hidden layer: (item_out (of the previous layer) * Weights) + biases)
                # l(i') = (l(i) * W(i)) + b(i)
                # shape: [users, 64]
                # apply the 'relu' activation function on the l(i')
                item_out = tf.nn.relu(tf.add(tf.matmul(item_out, W), b))

        # calculate the length of (or normalize) the user and item latent vectors (user_out, item_out)
        norm_user_output = tf.sqrt(tf.reduce_sum(tf.square(user_out), axis=1))
        norm_item_output = tf.sqrt(tf.reduce_sum(tf.square(item_out), axis=1))

        # calculate cosine similarity of the latent vectors (user_out, item_out)
        self.y_ = tf.reduce_sum(
            tf.multiply(user_out, item_out), axis=1,
            keep_dims=False) / (norm_item_output * norm_user_output)

        # For cross entropy loss, because the predicted score of Yij can be negative,
        # we use the below equation to transform the original predictions self.y_ (calculated above)
        self.y_ = tf.maximum(1e-6, self.y_)

    def add_loss(self):
        # normalized cross entropy loss fucntion
        # to incorporate the explicit ratings into cross entropy loss, so that explicit and implicit
        # information can be used together for optimization
        regRate = self.rate / self.maxRate
        losses = regRate * tf.log(
            self.y_) + (1 - regRate) * tf.log(1 - self.y_)
        loss = -tf.reduce_sum(losses)
        # regLoss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
        # self.loss = loss + self.reg * regLoss
        self.loss = loss

    def add_train_step(self):
        '''
        global_step = tf.Variable(0, name='global_step', trainable=False)
        self.lr = tf.train.exponential_decay(self.lr, global_step,
                                             self.decay_steps, self.decay_rate, staircase=True)
        '''
        # lr = 0.0001
        # create an optimizer
        optimizer = tf.train.AdamOptimizer(self.lr)

        # add optimization function and the loss function
        self.train_step = optimizer.minimize(self.loss)

    def init_sess(self):
        self.config = tf.ConfigProto()

        # option "allow_growth" used below:
        # attempts to allocate only as much GPU memory based on runtime allocations:
        # it starts out allocating very little memory, and as Sessions get run and
        # more GPU memory is needed, we extend the GPU memory region needed by the TensorFlow process.
        self.config.gpu_options.allow_growth = True

        # If you would like TensorFlow to automatically choose an existing and supported device to
        # run the operations in case the specified one doesn't exist, you can set allow_soft_placement
        # to True in the configuration option when creating the session.
        self.config.allow_soft_placement = True

        # A class for running TensorFlow operations.
        # A Session object encapsulates the environment in which Operation objects are executed,
        # and Tensor objects are evaluated.
        self.sess = tf.Session(config=self.config)

        # tf.global_variables_initializer is a shortcut to initialize all global variables
        # sess.run(): Runs operations and evaluates tensors
        # (begin the session and allocate memory to store the current value of the model variables)
        self.sess.run(tf.global_variables_initializer())

        # Saves (and restores) model variables.
        self.saver = tf.train.Saver()

        if os.path.exists(self.checkPoint):
            [
                os.remove('./checkPoint/{}'.format(f))
                for f in os.listdir(self.checkPoint)
            ]
        else:
            os.mkdir(self.checkPoint)

    def run(self):
        # initialize the performance metrics
        best_hr = -1
        best_NDCG = -1

        # iteration over which the best performance of the model is observed
        best_epoch = -1

        print("Start Training!")

        # iterate over each epoch/iteration for training the model
        for epoch in range(self.maxEpochs):
            print("=" * 20 + "Epoch ", epoch, "=" * 20)

            # run the iteration (train the model)
            self.run_epoch(self.sess)

            print('=' * 50)

            # start evaluating the model
            print("Start Evaluation!")

            # calculate the metrics HR (Hit Ratio), NDCG (Normalized Discounted Cumulative Gain)
            hr, NDCG = self.evaluate(self.sess, self.topK)
            print("Epoch ", epoch, "HR: {}, NDCG: {}".format(hr, NDCG))

            # update the best_hr, best_hr if better model performance is recorded
            if hr > best_hr or NDCG > best_NDCG:
                best_hr = hr
                best_NDCG = NDCG
                best_epoch = epoch

                # save the model variables
                self.saver.save(self.sess, self.checkPoint)

            # stop the model, only if
            # the metrics (ndcg, hr) are not improving over some iterations (defined in earlyStop)
            if epoch - best_epoch > self.earlyStop:
                print("Normal Early stop!")
                break
            print("=" * 20 + "Epoch ", epoch, "End" + "=" * 20)

        # print the best_hr and the best_ndcg metrics value
        print("Best hr: {}, NDCG: {}, At Epoch {}".format(
            best_hr, best_NDCG, best_epoch))

        # training completion
        print("Training complete!")

    def run_epoch(self, sess, verbose=10):

        # fetch the training instances (to be fed into the model)
        train_u, train_i, train_r = self.dataSet.getInstances(
            self.train, self.negNum)

        # number of training instances
        train_len = len(train_u)
        print('\n# Training Instances: {}\n'.format(train_len))

        # np.arange(): generates a sequence from [0, train_len - 1]
        # Randomly permute a sequence of indexes from 0 to (train_len - 1)
        index_shuffled = np.random.permutation(np.arange(train_len))

        # shuffled training data (according to permuted indexes above)
        train_u = train_u[index_shuffled]
        train_i = train_i[index_shuffled]
        train_r = train_r[index_shuffled]

        # As the data is to be trained in batches, so the data has to be divided accordingly,
        # such that, it can be fed into the model.
        # And as the batchSize (default :256) is defined above, we can calculate the number
        # of batch iterations required to include all the training instances for training the model.

        # number of batch iterations required for training over all the training instances.
        # (+ 1) => to include a few left training instances at the end.
        number_of_batches = (len(train_u) // self.batchSize) + 1

        # initilaize the list for keeping a record of loss during training of each bacth.
        losses = []

        # iterate over all the batch iterations (calculated above 'num_batches')
        for i in range(number_of_batches):

            # min_idx: starting point of the training instance to be included
            # in each batch (to be trained during each iteration).
            min_index = i * self.batchSize

            # max_idx: last training instance in the training batch.

            # for the last index value, when 'i' == (num_batches - 1),
            # (i+1)*self.batchSize becomes > train_len. Hence, the max_idx = train_len
            # So, for the last few left training instances,
            # the range of the taining instances to be included becomes:
            # [(num_batches - 1) * batch_size , train_len)
            max_index = np.min([train_len, (i + 1) * self.batchSize])

            # form/create the training batch data (acc. to the range calculated above)
            train_u_batch = train_u[min_index:max_index]
            train_i_batch = train_i[min_index:max_index]
            train_r_batch = train_r[min_index:max_index]

            # create a dictionary object with keys: user, item, rate
            feed_dict = self.create_feed_dict(train_u_batch, train_i_batch,
                                              train_r_batch)

            # run computations and evaluate tensors in the session
            # self.train_step, self.loss: optimization and loss functions
            # feed_dict: input to the model
            _, tmp_loss = sess.run([self.train_step, self.loss],
                                   feed_dict=feed_dict)

            # append the loss calculated during each batch iteration in the list
            losses.append(tmp_loss)

            # print the loss after 10 batch iterations
            if verbose and i % verbose == 0:
                sys.stdout.write('\r{} / {} : loss = {}'.format(
                    i, number_of_batches, np.mean(losses[-verbose:])))
                sys.stdout.flush()

        # total loss (mean of all the batch iteration losses)
        loss = np.mean(losses)

        print("\nMean loss in this epoch is: {}".format(loss))
        return loss

    # preparing the input training data for the model
    def create_feed_dict(self, u, i, r=None, drop=None):
        return {self.user: u, self.item: i, self.rate: r, self.drop: drop}

    # evaluation metrics (NDCG, HR)
    def evaluate(self, sess, topK):

        # if the predicted item matches the target(label) return 1 (as it's a hit else a miss)
        def getHitRatio(ranklist, targetItem):
            for item in ranklist:
                if item == targetItem:
                    return 1
            return 0

        def getNDCG(ranklist, targetItem):
            for i in range(len(ranklist)):
                item = ranklist[i]
                if item == targetItem:
                    return math.log(2) / math.log(i + 2)
            return 0

        hr = []
        NDCG = []

        # set of users in the testNeg set created above
        testUser = self.testNeg[0]

        # set of items in the testNeg set created above
        testItem = self.testNeg[1]

        for i in range(len(testUser)):

            # target item for the i(th) user, which has a non-zero rating.
            target = testItem[i][0]

            # prepare the input
            feed_dict = self.create_feed_dict(testUser[i], testItem[i])

            # evaluate the model by making predictions on the test negatives
            predict = sess.run(self.y_, feed_dict=feed_dict)
            # print(predict)

            # store the predictions for corresponding items
            item_score_dict = {}

            # iterate over all the test items present in each set
            for j in range(len(testItem[i])):
                item = testItem[i][j]
                item_score_dict[item] = predict[j]

            # returns a list with K largest items.
            ranklist = heapq.nlargest(topK,
                                      item_score_dict,
                                      key=item_score_dict.get)

            # calculate metrics over these predictions for the particular user (i).
            tmp_hr = getHitRatio(ranklist, target)
            tmp_NDCG = getNDCG(ranklist, target)

            # add the metric values to hr, ndcg main lists
            hr.append(tmp_hr)
            NDCG.append(tmp_NDCG)

        # average all the hit ratios and the ndcg values.
        return np.mean(hr), np.mean(NDCG)