def validate(self, dataset, epoch): self.model.eval() total_loss = 0.0 batch_size = self.config.batch_size indices = torch.randperm(len(dataset)) if self.config.cuda: indices = indices.cuda() total_batches = math.floor(len(indices) / batch_size) + 1 batches = list(get_batches(indices, batch_size)) for i, batch in tqdm(enumerate(batches), desc='Testing epoch ' + str(epoch + 1) + '', total=total_batches): X, y = dataset.get_batch(batch) X, y = Var(X, requires_grad=False), Var(y, requires_grad=False) loss = self.model.forward_train(X, y) total_loss += loss.item() logging.debug('Validation batch {}, loss {}'.format( i, loss.item())) total_loss /= len(dataset) return total_loss
def train(self, dataset, epoch, st_batch=None): self.model.train() self.optimizer.zero_grad() total_loss = 0.0 batch_size = self.config.batch_size indices = torch.randperm(len(dataset)) if self.config.cuda: indices = indices.cuda() total_batches = math.floor(len(indices) / batch_size) + 1 batches = list(get_batches(indices, batch_size)) if st_batch: batches = batches[st_batch:] for i, batch in tqdm(enumerate(batches), desc='Training epoch ' + str(epoch + 1) + '', total=total_batches): trees, queries, tgt_node_seq, tgt_par_rule_seq, tgt_par_t_seq, \ tgt_action_seq, tgt_action_seq_type = dataset.get_batch(batch) #print(trees) loss = self.model.forward_train(trees, queries, tgt_node_seq, tgt_action_seq, tgt_par_rule_seq, tgt_par_t_seq, tgt_action_seq_type) assert loss > 0, "NLL can not be less than zero" total_loss += loss.data[0] loss.backward() self.optimizer.step() self.optimizer.zero_grad() logging.debug('Batch {}, loss {}'.format(i + 1, loss[0])) return total_loss / len(dataset)
def train(self, dataset, epoch): self.model.train() self.optimizer.zero_grad() total_loss = 0.0 batch_size = self.config.batch_size indices = torch.randperm(len(dataset)) if self.config.cuda: indices = indices.cuda() total_batches = math.floor(len(indices) / batch_size) + 1 batches = list(get_batches(indices, batch_size)) for i, batch in tqdm(enumerate(batches), desc='Training epoch ' + str(epoch + 1) + '', total=total_batches): X, y = dataset.get_batch(batch) X, y = Var(X, requires_grad=False), Var(y, requires_grad=False) loss = self.model.forward_train(X, y) total_loss += loss.item() loss.backward() self.optimizer.step() self.optimizer.zero_grad() logging.debug('Batch {}, loss {}'.format(i + 1, loss.item())) return total_loss / len(dataset)
def visualize(self, dataset, writer): self.model.train() self.optimizer.zero_grad() batch_size = 2 indices = torch.randperm(len(dataset)) batch = next(get_batches(indices, batch_size)) trees, queries, tgt_node_seq, tgt_par_rule_seq, tgt_par_t_seq, \ tgt_action_seq, tgt_action_seq_type = dataset.get_batch(batch) loss = self.model.forward_train(trees, queries, tgt_node_seq, tgt_action_seq, tgt_par_rule_seq, tgt_par_t_seq, tgt_action_seq_type) assert loss > 0, "NLL can not be less than zero" loss.backward() writer.add_graph(self.model, loss)