def train_model(self): for epoch in range(self.num_epochs): # Generate training instances user_input, item_input, item_input_recents, lables = data_gen._get_pointwise_all_highorder_data( self.dataset, self.high_order, self.num_negatives) num_training_instances = len(user_input) total_loss = 0.0 training_start_time = time() for num_batch in np.arange( int(num_training_instances / self.batch_size)): bat_users, bat_items, bat_items_recents, bat_lables =\ data_gen._get_pointwise_batch_seqdata(user_input, \ item_input,item_input_recents, lables, num_batch, self.batch_size) feed_dict = { self.user_input: bat_users, self.item_input: bat_items, self.item_input_recents: bat_items_recents, self.lables: bat_lables } loss, _ = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict) total_loss += loss print("[iter %d : loss : %f, time: %f]" % (epoch + 1, total_loss / num_training_instances, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): for epoch in range(self.num_epochs): # Generate training instances if self.ispairwise.lower() == "true": user_input, item_input_pos, item_input_neg = data_gen._get_pairwise_all_data(self.dataset) else: user_input, item_input, lables = data_gen._get_pointwise_all_data(self.dataset, self.num_negatives) total_loss = 0.0 training_start_time = time() num_training_instances = len(user_input) for num_batch in np.arange(int(num_training_instances / self.batch_size)): if self.ispairwise.lower() == "true": bat_users, bat_items_pos, bat_items_neg = \ data_gen._get_pairwise_batch_data(user_input, item_input_pos, item_input_neg, num_batch, self.batch_size) feed_dict = {self.user_input: bat_users, self.item_input: bat_items_pos, self.item_input_neg: bat_items_neg} else: bat_users, bat_items, bat_lables = \ data_gen._get_pointwise_batch_data(user_input, item_input, lables, num_batch, self.batch_size) feed_dict = {self.user_input: bat_users, self.item_input: bat_items, self.lables: bat_lables} loss, _ = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict) total_loss += loss print("[iter %d : loss : %f, time: %f]" % ( epoch + 1, total_loss / num_training_instances, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): for epoch in range(self.num_epochs): # Generate training instances user_input, item_input_pos, item_input_neg = data_gen._get_pairwise_all_data( self.dataset) total_loss = 0.0 training_start_time = time() num_training_instances = len(user_input) for num_batch in np.arange( int(num_training_instances / self.batch_size)): bat_users,bat_items_pos,bat_items_neg =\ data_gen._get_pairwise_batch_data(user_input,\ item_input_pos, item_input_neg, num_batch, self.batch_size) feed_dict = { self.users: bat_users, self.pos_items: bat_items_pos, self.node_dropout: [0.1], self.mess_dropout: [0.1], self.neg_items: bat_items_neg } loss, _ = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict) total_loss += loss print("[iter %d : loss : %f, time: %f]" % (epoch + 1, total_loss / num_training_instances, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): for epoch in range(self.num_epochs): batches = self.shuffle() num_batch = len(batches[1]) batch_index = np.arange(num_batch) training_start_time = time() total_loss = 0.0 for index in batch_index: user_input, num_idx, item_input, labels = self.batch_gen( batches, index) feed_dict = { self.user_input: user_input, self.num_idx: num_idx, self.item_input: item_input, self.labels: labels, self.is_train_phase: True } loss, _ = self.sess.run([self.loss, self.optimizer], feed_dict) total_loss += loss print("[iter %d : loss : %f, time: %f]" % (epoch + 1, total_loss / num_batch, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): for epoch in range(self.num_epochs): # Generate training instances user_input, item_input, lables = self._get_input_all_data() total_loss = 0.0 training_start_time = time() num_training_instances = len(user_input) for num_batch in np.arange(int(num_training_instances / self.batch_size)): num_training_instances = len(user_input) id_start = num_batch * self.batch_size id_end = (num_batch + 1) * self.batch_size if id_end > num_training_instances: id_end = num_training_instances bat_users = user_input[id_start:id_end].tolist() bat_items = item_input[id_start:id_end].tolist() bat_lables = np.array(lables[id_start:id_end]) feed_dict = {self.one_hot_u: bat_users, self.one_hot_v: bat_items, self.lables: bat_lables} loss, _ = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict) total_loss += loss print("[iter %d : loss : %f, time: %f]" % ( epoch + 1, total_loss / num_training_instances, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): for epoch in range(self.num_epochs): # Generate training instances user_input, item_input_pos, item_input_social, item_input_neg, suk_input = self._get_pairwise_all_data( ) total_loss = 0.0 training_start_time = time() num_training_instances = len(user_input) for num_batch in np.arange( int(num_training_instances / self.batch_size)): num_training_instances = len(user_input) id_start = num_batch * self.batch_size id_end = (num_batch + 1) * self.batch_size if id_end > num_training_instances: id_end = num_training_instances bat_users = user_input[id_start:id_end] bat_items_pos = item_input_pos[id_start:id_end] bat_items_social = item_input_social[id_start:id_end] bat_items_neg = item_input_neg[id_start:id_end] bat_suk_input = suk_input[id_start:id_end] feed_dict = {self.user_input:bat_users,self.item_input:bat_items_pos,\ self.item_input_social:bat_items_social,\ self.item_input_neg:bat_items_neg,self.suk:bat_suk_input} loss, _ = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict) total_loss += loss print("[iter %d : loss : %f, time: %f]" % (epoch + 1, total_loss / num_training_instances, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): gen_batch_index = np.arange(self.num_users) np.random.shuffle(gen_batch_index) dis_batch_index = np.arange(self.num_users) np.random.shuffle(dis_batch_index) totalEpochs = self.epochs totalEpochs = int(totalEpochs / self.step_G) for epoch in range(totalEpochs): train_matrix, ZR_matrix, PM_matrix = self.get_train_data() # training discriminator for d_epoch in range(self.step_D): for idx in np.arange(0, self.num_users, step=self.batchSize_D): idx = dis_batch_index[idx:idx + self.batchSize_D] train_data = train_matrix[idx].toarray() train_mask = PM_matrix[idx].toarray() feed = {self.realData: train_data, self.mask: train_mask, self.condition: train_data} self.sess.run(self.trainer_D, feed_dict=feed) # training generator for g_epoch in range(self.step_G): for idx in np.arange(0, self.num_users, step=self.batchSize_G): idx = dis_batch_index[idx:idx + self.batchSize_G] train_data = train_matrix[idx].toarray() train_z_mask = ZR_matrix[idx].toarray() train_p_mask = PM_matrix[idx].toarray() feed = {self.realData: train_data, self.condition: train_data, self.mask: train_p_mask, self.G_ZR_dims: train_z_mask} self.sess.run(self.trainer_G, feed_dict=feed) self.eval_rating_matrix() Evaluate.test_model(self, self.dataset)
def train_model(self): for epoch in range(self.num_epochs): training_start_time = time() print('solving for user vectors...') for userid in range(self.num_users): feed = { self.user_id: [userid], self.Pu: self.Pui[userid].T.reshape([-1, 1]), self.Cu: self.Cui[userid].T.reshape([-1, 1]) } self.sess.run(self.update_user, feed_dict=feed) print('solving for item vectors...') for itemid in range(self.num_items): feed = { self.item_id: [itemid], self.Pi: self.Pui[:, itemid].reshape([-1, 1]), self.Ci: self.Cui[:, itemid].reshape([-1, 1]) } self.sess.run(self.update_item, feed_dict=feed) print('iteration %i finished in %f seconds' % (epoch + 1, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): for _ in range(self.epochs): for _ in range(self.d_epoch): users_list, items_list, labels_list = self.get_train_data() self.training_discriminator(users_list, items_list, labels_list) for _ in range(self.g_epoch): self.training_generator() Evaluate.test_model(self, self.dataset)
def train_model(self): update_count = 0.0 # the total number of gradient updates for annealing # largest annealing parameter for epoch in range(self.num_epochs): random_perm_doc_idx = np.random.permutation(self.num_users) self.total_batch = self.num_users total_loss = 0.0 training_start_time = time() num_training_instances = self.num_users for num_batch in np.arange( int(num_training_instances / self.batch_size)): if num_batch == self.total_batch - 1: batch_set_idx = random_perm_doc_idx[num_batch * self.batch_size:] elif num_batch < self.total_batch - 1: batch_set_idx = random_perm_doc_idx[num_batch * self.batch_size: (num_batch + 1) * self.batch_size] batch_matrix = np.zeros((len(batch_set_idx), self.num_items)) if self.total_anneal_steps > 0: anneal = min(self.anneal_cap, 1. * update_count / self.total_anneal_steps) else: anneal = self.anneal_cap batch_uid = 0 trainDict = self.dataset.trainDict for userid in batch_set_idx: items_by_userid = trainDict[userid] for itemid in items_by_userid: batch_matrix[batch_uid, itemid] = 1 batch_uid = batch_uid + 1 feed_dict = feed_dict = { self.input_ph: batch_matrix, self.keep_prob_ph: 0.5, self.anneal_ph: anneal, self.is_training_ph: 1 } _, loss = self.sess.run([self.optimizer, self.loss], feed_dict=feed_dict) total_loss += loss update_count += 1 print("[iter %d : loss : %f, time: %f]" % (epoch + 1, total_loss / num_training_instances, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): for epoch in range(self.num_epochs): # Generate training instances mask_corruption_np = np.random.binomial( 1, 1 - self.corruption_level, (self.num_users, self.num_items)) random_perm_doc_idx = np.random.permutation(self.num_users) self.total_batch = self.num_users total_loss = 0.0 training_start_time = time() num_training_instances = self.num_users for num_batch in np.arange( int(num_training_instances / self.batch_size)): if num_batch == self.total_batch - 1: batch_set_idx = random_perm_doc_idx[num_batch * self.batch_size:] elif num_batch < self.total_batch - 1: batch_set_idx = random_perm_doc_idx[num_batch * self.batch_size: (num_batch + 1) * self.batch_size] batch_matrix = np.zeros((len(batch_set_idx), self.num_items)) batch_uid = 0 trainDict = self.dataset.trainDict for userid in batch_set_idx: items_by_userid = trainDict[userid] for itemid in items_by_userid: batch_matrix[batch_uid, itemid] = 1 batch_uid = batch_uid + 1 feed_dict = feed_dict={self.mask_corruption: mask_corruption_np[batch_set_idx, :],\ self.input_R: batch_matrix} _, loss = self.sess.run([self.optimizer, self.loss], feed_dict=feed_dict) total_loss += loss print("[iter %d : loss : %f, time: %f]" % (epoch + 1, total_loss / num_training_instances, time() - training_start_time)) if epoch % self.verbose == 0: Evaluate.test_model(self, self.dataset)
def train_model(self): for epoch in range(self.num_epochs): random_row_idx = np.random.permutation(self.num_users) # randomly permute the rows random_col_idx = np.random.permutation(self.num_items) # randomly permute the cols training_start_time = time() total_loss = 0.0 for i in range(self.num_batch_U): # iterate each batch if i == self.num_batch_U - 1: row_idx = random_row_idx[i * self.batch_size:] else: row_idx = random_row_idx[(i * self.batch_size):((i + 1) * self.batch_size)] for j in range(self.num_batch_I): # get the indices of the current batch if j == self.num_batch_I - 1: col_idx = random_col_idx[j * self.batch_size:] else: col_idx = random_col_idx[(j * self.batch_size):((j + 1) * self.batch_size)] p_input, n_input = self.pairwise_neg_sampling(row_idx, col_idx) input_tmp = self.train_R[row_idx, :] input_tmp = input_tmp[:, col_idx] input_R_U = self.train_R[row_idx, :] input_R_I = self.train_R[:, col_idx] _, loss = self.sess.run( # do the optimization by the minibatch [self.optimizer, self.cost], feed_dict={ self.input_R_U: input_R_U, self.input_R_I: input_R_I, self.input_OH_I: self.I_OH_mat[col_idx, :], self.input_P_cor: p_input, self.input_N_cor: n_input, self.row_idx: np.reshape(row_idx, (len(row_idx), 1)), self.col_idx: np.reshape(col_idx, (len(col_idx), 1))}) total_loss+=loss print("[iter %d : total_loss : %f, time: %f]" %(epoch+1,total_loss,time()-training_start_time)) if epoch %self.verbose == 0: self.eval_rating_matrix() Evaluate.test_model(self,self.dataset)
model = TransRec(sess, dataset) elif recommender.lower() == "cdae": model = CDAE(sess, dataset) elif recommender.lower() == "dae": model = DAE(sess, dataset) elif recommender.lower() == "npe": model = NPE(sess, dataset) elif recommender.lower() == "multidae": model = MultiDAE(sess, dataset) elif recommender.lower() == "multivae": model = MultiVAE(sess, dataset) elif recommender.lower() == "irgan": model = IRGAN(sess, dataset) elif recommender.lower() == "cfgan": model = CFGAN(sess, dataset) elif recommender.lower() == "jca": model = JCA(sess, dataset) model.build_graph() sess.run(tf.global_variables_initializer()) model.train_model() Evaluate.test_model(model, dataset, num_thread)