コード例 #1
0
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        for epoch in range(self.num_epochs):
            user_input, item_input_pos, item_input_social, item_input_neg, suk_input = self._get_pairwise_all_data(
            )
            data_iter = DataIterator(user_input,
                                     item_input_pos,
                                     item_input_social,
                                     item_input_neg,
                                     suk_input,
                                     batch_size=self.batch_size,
                                     shuffle=True)
            total_loss = 0.0
            training_start_time = time()
            num_training_instances = len(user_input)
            for bat_users, bat_items_pos, bat_items_social, bat_items_neg, bat_suk_input in data_iter:
                feed_dict = {
                    self.user_input: bat_users,
                    self.item_input_pos: bat_items_pos,
                    self.item_input_social: bat_items_social,
                    self.item_input_neg: bat_items_neg,
                    self.suk: bat_suk_input
                }

                loss, _ = self.sess.run((self.loss, self.optimizer),
                                        feed_dict=feed_dict)
                total_loss += loss
            self.logger.info("[iter %d : loss : %f, time: %f]" %
                             (epoch, total_loss / num_training_instances,
                              time() - training_start_time))
            if epoch % self.verbose == 0:
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
コード例 #2
0
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        for epoch in range(self.num_epochs):
            # Generate training instances
            mask_corruption_np = np.random.binomial(
                1, 1 - self.corruption_level, (self.num_users, self.num_items))

            total_loss = 0.0
            all_users = np.arange(self.num_users)
            users_iter = DataIterator(all_users,
                                      batch_size=self.batch_size,
                                      shuffle=True,
                                      drop_last=False)
            training_start_time = time()
            for batch_set_idx in users_iter:
                batch_matrix = np.zeros((len(batch_set_idx), self.num_items))
                for idx, user_id in enumerate(batch_set_idx):
                    items_by_user_id = self.train_dict[user_id]
                    batch_matrix[idx, items_by_user_id] = 1

                feed_dict = {
                    self.mask_corruption: mask_corruption_np[batch_set_idx, :],
                    self.input_R: batch_matrix
                }
                _, loss = self.sess.run([self.optimizer, self.loss],
                                        feed_dict=feed_dict)
                total_loss += loss
            self.logger.info("[iter %d : loss : %f, time: %f]" %
                             (epoch, total_loss / self.num_users,
                              time() - training_start_time))
            if epoch % self.verbose == 0:
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
コード例 #3
0
ファイル: DeepICF.py プロジェクト: zjfng1733/NeuRec-1
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        for epoch in range(1, self.num_epochs + 1):
            user_input, num_idx, item_input, labels = \
                data_generator._get_pointwise_all_likefism_data(self.dataset, self.num_negatives, self.train_dict)
            data_iter = DataIterator(user_input,
                                     num_idx,
                                     item_input,
                                     labels,
                                     batch_size=self.batch_size,
                                     shuffle=True)

            num_training_instances = len(user_input)
            total_loss = 0.0
            training_start_time = time()
            for bat_users, bat_idx, bat_items, bat_labels in data_iter:
                bat_users = pad_sequences(bat_users, value=self.num_items)
                feed_dict = {
                    self.user_input: bat_users,
                    self.num_idx: bat_idx,
                    self.item_input: bat_items,
                    self.labels: bat_labels,
                    self.is_train_phase: True
                }
                loss, _ = self.sess.run((self.loss, self.optimizer),
                                        feed_dict=feed_dict)
                total_loss += loss
            self.logger.info("[iter %d : loss : %f, time: %f]" %
                             (epoch, total_loss / num_training_instances,
                              time() - training_start_time))
            if epoch % self.verbose == 0:
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
コード例 #4
0
    def train_model(self):
        train_users = [
            user for user in range(self.num_users)
            if self.train_csr_mat[user].nnz
        ]
        user_iter = DataIterator(train_users,
                                 batch_size=self.batch_size,
                                 shuffle=True,
                                 drop_last=False)
        self.logger.info(self.evaluator.metrics_info())
        for epoch in range(self.epochs):
            for bat_users in user_iter:
                bat_sp_mat = self.train_csr_mat[bat_users]
                bat_items = []
                bat_labels = []
                bat_idx = []  # used to decoder
                for idx, _ in enumerate(bat_users):
                    pos_items = bat_sp_mat[idx].indices
                    neg_items = randint_choice(self.num_items,
                                               size=bat_sp_mat[idx].nnz *
                                               self.num_neg,
                                               replace=True,
                                               exclusion=pos_items)
                    neg_items = np.unique(neg_items)
                    bat_sp_mat[idx, neg_items] = 1

                    bat_items.append(pos_items)
                    bat_labels.append(np.ones_like(pos_items,
                                                   dtype=np.float32))
                    bat_items.append(neg_items)
                    bat_labels.append(
                        np.zeros_like(neg_items, dtype=np.float32))
                    bat_idx.append(
                        np.full(len(pos_items) + len(neg_items),
                                idx,
                                dtype=np.int32))

                bat_items = np.concatenate(bat_items)
                bat_labels = np.concatenate(bat_labels)
                bat_idx = np.concatenate(bat_idx)
                bat_users = np.asarray(bat_users)

                coo = bat_sp_mat.tocoo().astype(np.float32)
                indices = np.asarray([coo.row, coo.col]).transpose()

                feed = {
                    self.users_ph: bat_users,
                    self.remap_idx_ph: bat_idx,
                    self.items_ph: bat_items,
                    self.sp_mat_ph: (indices, coo.data, coo.shape),
                    self.labels_ph: bat_labels,
                    self.dropout_ph: self.dropout,
                    self.noise_shape_ph: bat_sp_mat.nnz
                }
                self.sess.run(self.train_opt, feed_dict=feed)
            result = self.evaluate_model()
            self.logger.info("epoch %d:\t%s" % (epoch, result))
コード例 #5
0
ファイル: IRGAN.py プロジェクト: zjfng1733/NeuRec-1
 def training_discriminator(self):
     users_list, items_list, labels_list = self.get_train_data()
     data_iter = DataIterator(users_list, items_list, labels_list,
                              batch_size=self.batch_size, shuffle=True)
     for bat_users, bat_items, bat_labels in data_iter:
         feed = {self.discriminator.u: bat_users,
                 self.discriminator.i: bat_items,
                 self.discriminator.label: bat_labels}
         self.sess.run(self.discriminator.d_updates, feed_dict=feed)
コード例 #6
0
ファイル: CFGAN.py プロジェクト: zjfng1733/NeuRec-1
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        g_iter = DataIterator(np.arange(self.num_users),
                              batch_size=self.batchSize_G,
                              shuffle=True,
                              drop_last=False)
        d_iter = DataIterator(np.arange(self.num_users),
                              batch_size=self.batchSize_D,
                              shuffle=True,
                              drop_last=False)

        total_epochs = self.epochs
        total_epochs = int(total_epochs / self.step_G)
        for epoch in range(total_epochs):
            train_matrix, zr_matrix, pm_matrix = self.get_train_data()
            # training discriminator
            for d_epoch in range(self.step_D):
                for idx in d_iter:
                    train_data = train_matrix[idx].toarray()
                    train_mask = pm_matrix[idx].toarray()
                    feed = {
                        self.real_data: train_data,
                        self.mask: train_mask,
                        self.condition: train_data
                    }
                    self.sess.run(self.trainer_d, feed_dict=feed)

            # training generator
            for g_epoch in range(self.step_G):
                for idx in g_iter:
                    train_data = train_matrix[idx].toarray()
                    train_z_mask = zr_matrix[idx].toarray()
                    train_p_mask = pm_matrix[idx].toarray()
                    feed = {
                        self.real_data: train_data,
                        self.condition: train_data,
                        self.mask: train_p_mask,
                        self.g_zr_dims: train_z_mask
                    }
                    self.sess.run(self.trainer_g, feed_dict=feed)
            if epoch % self.verbose == 0:
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
コード例 #7
0
    def predict(self, users, items=None):
        users = DataIterator(users,
                             batch_size=512,
                             shuffle=False,
                             drop_last=False)
        all_ratings = []
        for bat_user in users:
            bat_seq = [self.user_test_seq[u] for u in bat_user]
            feed = {self.user_ph: bat_user, self.item_seqs_ph: bat_seq}
            bat_ratings = self.sess.run(self.bat_ratings, feed_dict=feed)
            all_ratings.extend(bat_ratings)
        all_ratings = np.array(all_ratings, dtype=np.float32)

        if items is not None:
            all_ratings = [
                all_ratings[idx][item] for idx, item in enumerate(items)
            ]

        return all_ratings
コード例 #8
0
    def train_model(self):
        users_list, item_seq_list, item_pos_list = self._generate_sequences()
        self.logger.info(self.evaluator.metrics_info())
        for epoch in range(self.epochs):
            item_neg_list = self._sample_negative(users_list)
            data = DataIterator(users_list,
                                item_seq_list,
                                item_pos_list,
                                item_neg_list,
                                batch_size=self.batch_size,
                                shuffle=True)
            for bat_user, bat_item_seq, bat_item_pos, bat_item_neg in data:
                feed = {
                    self.user_ph: bat_user,
                    self.item_seqs_ph: bat_item_seq,
                    self.pos_item_ph: bat_item_pos,
                    self.neg_item_ph: bat_item_neg,
                }

                self.sess.run(self.train_opt, feed_dict=feed)
            result = self.evaluate_model()
            self.logger.info("epoch %d:\t%s" % (epoch, result))
コード例 #9
0
ファイル: TransRec.py プロジェクト: zjfng1733/NeuRec-1
    def predict(self, user_ids, items=None):
        users = DataIterator(user_ids,
                             batch_size=64,
                             shuffle=False,
                             drop_last=False)
        all_ratings = []
        for bat_user in users:
            last_items = [self.train_dict[u][-1] for u in bat_user]
            feed = {
                self.user_input: bat_user,
                self.item_input_recent: last_items
            }
            bat_ratings = self.sess.run(self.prediction, feed_dict=feed)
            all_ratings.append(bat_ratings)
        all_ratings = np.vstack(all_ratings)

        if items is not None:
            all_ratings = [
                all_ratings[idx][item] for idx, item in enumerate(items)
            ]

        return all_ratings
コード例 #10
0
    def _sample_negative(self, users_list):
        neg_items_list = []
        user_neg_items_dict = {}
        all_uni_user, all_counts = np.unique(users_list, return_counts=True)
        user_count = DataIterator(all_uni_user,
                                  all_counts,
                                  batch_size=1024,
                                  shuffle=False)
        for bat_users, bat_counts in user_count:
            n_neg_items = [c * self.neg_samples for c in bat_counts]
            exclusion = [self.user_pos_train[u] for u in bat_users]
            bat_neg = batch_randint_choice(self.items_num,
                                           n_neg_items,
                                           replace=True,
                                           exclusion=exclusion)
            for u, neg in zip(bat_users, bat_neg):
                user_neg_items_dict[u] = neg

        for u, c in zip(all_uni_user, all_counts):
            neg_items = np.reshape(user_neg_items_dict[u],
                                   newshape=[c, self.neg_samples])
            neg_items_list.extend(neg_items)
        return neg_items_list
コード例 #11
0
ファイル: Fossil.py プロジェクト: zjfng1733/NeuRec-1
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        self.evaluate()
        for epoch in range(1, self.num_epochs + 1):
            if self.is_pairwise is True:
                user_input_id, user_input, user_input_neg, num_idx_pos,\
                    num_idx_neg, item_input_pos, item_input_neg, item_input_recent = \
                    data_generator._get_pairwise_all_likefossil_data(self.dataset, self.high_order, self.train_dict)

                data_iter = DataIterator(user_input_id,
                                         user_input,
                                         user_input_neg,
                                         num_idx_pos,
                                         num_idx_neg,
                                         item_input_pos,
                                         item_input_neg,
                                         item_input_recent,
                                         batch_size=self.batch_size,
                                         shuffle=True)
            else:
                user_input_id, user_input, num_idx, item_input, item_input_recent, labels = \
                    data_generator._get_pointwise_all_likefossil_data(self.dataset, self.high_order,
                                                                      self.num_negatives, self.train_dict)

                data_iter = DataIterator(user_input_id,
                                         user_input,
                                         num_idx,
                                         item_input,
                                         item_input_recent,
                                         labels,
                                         batch_size=self.batch_size,
                                         shuffle=True)

            num_training_instances = len(user_input)
            total_loss = 0.0
            training_start_time = time()

            if self.is_pairwise is True:
                for bat_user_input_id, bat_users_pos, bat_users_neg, bat_idx_pos, bat_idx_neg, \
                        bat_items_pos, bat_items_neg, bat_item_input_recent in data_iter:
                    bat_users_pos = pad_sequences(bat_users_pos,
                                                  value=self.num_items)
                    bat_users_neg = pad_sequences(bat_users_neg,
                                                  value=self.num_items)
                    feed_dict = {
                        self.user_input_id: bat_user_input_id,
                        self.user_input: bat_users_pos,
                        self.user_input_neg: bat_users_neg,
                        self.num_idx: bat_idx_pos,
                        self.num_idx_neg: bat_idx_neg,
                        self.item_input: bat_items_pos,
                        self.item_input_neg: bat_items_neg,
                        self.item_input_recent: bat_item_input_recent
                    }

                    loss, _ = self.sess.run((self.loss, self.optimizer),
                                            feed_dict=feed_dict)
                    total_loss += loss
            else:
                for bat_user_input_id, bat_users, bat_idx, bat_items, bat_item_input_recent, bat_labels in data_iter:
                    bat_users = pad_sequences(bat_users, value=self.num_items)
                    feed_dict = {
                        self.user_input_id: bat_user_input_id,
                        self.user_input: bat_users,
                        self.num_idx: bat_idx,
                        self.item_input: bat_items,
                        self.item_input_recent: bat_item_input_recent,
                        self.labels: bat_labels
                    }

                    loss, _ = self.sess.run((self.loss, self.optimizer),
                                            feed_dict=feed_dict)
                    total_loss += loss

            self.logger.info("[iter %d : loss : %f, time: %f]" %
                             (epoch, total_loss / num_training_instances,
                              time() - training_start_time))

            if epoch % self.verbose == 0:
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
コード例 #12
0
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        for epoch in range(1, self.num_epochs + 1):
            if self.is_pairwise is True:
                user_input, user_input_neg, num_idx_pos, num_idx_neg, item_input_pos, item_input_neg = \
                    data_generator._get_pairwise_all_likefism_data(self.dataset)
                data_iter = DataIterator(user_input,
                                         user_input_neg,
                                         num_idx_pos,
                                         num_idx_neg,
                                         item_input_pos,
                                         item_input_neg,
                                         batch_size=self.batch_size,
                                         shuffle=True)
            else:
                user_input, num_idx, item_input, labels = \
                data_generator._get_pointwise_all_likefism_data_debug(self.dataset, self.num_negatives, self.train_dict)
                data_iter = DataIterator(user_input,
                                         num_idx,
                                         item_input,
                                         labels,
                                         batch_size=self.batch_size,
                                         shuffle=True)

            total_loss = 0.0
            training_start_time = time()

            if self.is_pairwise is True:
                for bat_users_pos, bat_users_neg, bat_idx_pos, bat_idx_neg, bat_items_pos, bat_items_neg in data_iter:
                    bat_users_pos = pad_sequences(bat_users_pos,
                                                  value=self.num_items)
                    bat_users_neg = pad_sequences(bat_users_neg,
                                                  value=self.num_items)
                    feed_dict = {
                        self.user_input: bat_users_pos,
                        self.user_input_neg: bat_users_neg,
                        self.num_idx: bat_idx_pos,
                        self.num_idx_neg: bat_idx_neg,
                        self.item_input: bat_items_pos,
                        self.item_input_neg: bat_items_neg
                    }

                    loss, _ = self.sess.run((self.loss, self.optimizer),
                                            feed_dict=feed_dict)
                    total_loss += loss
            else:
                for index, (bat_users, bat_idx, bat_items,
                            bat_labels) in enumerate(data_iter):
                    bat_users = pad_sequences(bat_users, value=self.num_items)
                    feed_dict = {
                        self.user_input: bat_users,
                        self.num_idx: bat_idx,
                        self.item_input: bat_items,
                        self.labels: bat_labels
                    }
                    loss, _ = self.sess.run((self.loss, self.optimizer),
                                            feed_dict=feed_dict)
                    total_loss += loss

            self.logger.info("[iter %d : loss : %f, time: %f]" %
                             (epoch, total_loss / len(user_input),
                              time() - training_start_time))
            if epoch % self.verbose == 0:
                params = self.sess.run([self.c1, self.embedding_Q, self.bias])
                #with open("/gdata/yujr/pretrained/epoch=%d_gamma=%f_lambda=%f_fism.pkl" % (epoch, self.gamma_bilinear, self.lambda_bilinear), "wb") as fout:
                #    pickle.dump(params, fout)
                self.logger.info("epoch %d:\t%s" %
                                 (epoch, self.evaluate_val()))
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
コード例 #13
0
    def train_model(self):
        #self.logger.info("epoch %d:\t%s" % (0, self.evaluate()))
        for epoch in range(1, self.num_epochs + 1):
            if self.is_pairwise is True:
                user_input, user_input_neg, num_idx_pos, num_idx_neg, item_input_pos, item_input_neg = \
                    data_generator._get_pairwise_all_likefism_data(self.dataset)
                data_iter = DataIterator(user_input,
                                         user_input_neg,
                                         num_idx_pos,
                                         num_idx_neg,
                                         item_input_pos,
                                         item_input_neg,
                                         batch_size=self.batch_size,
                                         shuffle=True)
            else:
                user_input, num_idx, item_input, labels = \
                 data_generator._get_pointwise_all_likefism_data(self.dataset, self.num_negatives, self.train_dict)
                data_iter = DataIterator(user_input,
                                         num_idx,
                                         item_input,
                                         labels,
                                         batch_size=1,
                                         shuffle=True)
            num_training_instances = len(user_input)
            total_loss = 0.0
            training_start_time = time()
            if self.is_pairwise is True:
                for bat_users_pos, bat_users_neg, bat_idx_pos, bat_idx_neg, bat_items_pos, bat_items_neg in data_iter:
                    bat_users_pos = pad_sequences(bat_users_pos,
                                                  value=self.num_items)
                    bat_users_neg = pad_sequences(bat_users_neg,
                                                  value=self.num_items)
                    feed_dict = {
                        self.user_input: bat_users_pos,
                        self.user_input_neg: bat_users_neg,
                        self.num_idx: bat_idx_pos,
                        self.num_idx_neg: bat_idx_neg,
                        self.item_input: bat_items_pos,
                        self.item_input_neg: bat_items_neg
                    }

                    loss, _ = self.sess.run((self.loss, self.optimizer),
                                            feed_dict=feed_dict)
                    total_loss += loss
            else:
                """
                for index in range(len(batch_length)-1):
                    temp = pad_sequences(user_input[batch_length[index]:batch_length[index+1]], value=self.num_items)
                    feed_dict = {self.user_input: temp,
                                 self.num_idx: num_idx[batch_length[index]:batch_length[index+1]],
                                 self.item_input: item_input[batch_length[index]:batch_length[index+1]],
                                 self.labels: labels[batch_length[index]:batch_length[index+1]]}
                    loss, _ = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict)
                    print(loss)
                    total_loss += loss
                """
                for index, (user_input, num_idx, item_input,
                            labels) in enumerate(data_iter):
                    feed_dict = {
                        self.user_input: user_input,
                        self.num_idx: num_idx,
                        self.item_input: item_input,
                        self.labels: labels
                    }
                    loss, _ = self.sess.run((self.loss, self.optimizer),
                                            feed_dict=feed_dict)
                    if index % 10000 == 0:
                        print(index)
                    total_loss += loss

            self.logger.info("[iter %d : loss : %f, time: %f]" %
                             (epoch, total_loss / num_training_instances,
                              time() - training_start_time))
            if epoch % self.verbose == 0:
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
                self.logger.info("epoch %d:\t%s" %
                                 (epoch, self.evaluate_val()))