def evaluate(self, epoch, iteration, summary): """ Runs evaluation on test dataset. :param epoch: index of the current epoch :param iteration: index of the current iteration :param summary: if True prints summary """ batch_time = AverageMeter(False) tot_tok_per_sec = AverageMeter(False) iterations = AverageMeter(False) enc_seq_len = AverageMeter(False) dec_seq_len = AverageMeter(False) stats = {} output = [] for i, (src, indices) in enumerate(self.loader): translate_timer = time.time() src, src_length = src batch_size = self.loader.batch_size global_batch_size = batch_size * get_world_size() beam_size = self.beam_size bos = [self.insert_target_start] * (batch_size * beam_size) bos = torch.LongTensor(bos) if self.batch_first: bos = bos.view(-1, 1) else: bos = bos.view(1, -1) src_length = torch.LongTensor(src_length) stats['total_enc_len'] = int(src_length.sum()) if self.cuda: src = src.cuda() src_length = src_length.cuda() bos = bos.cuda() with torch.no_grad(): context = self.model.encode(src, src_length) context = [context, src_length, None] if beam_size == 1: generator = self.generator.greedy_search else: generator = self.generator.beam_search preds, lengths, counter = generator(batch_size, bos, context) stats['total_dec_len'] = lengths.sum().item() stats['iters'] = counter indices = torch.tensor(indices).to(preds) preds = preds.scatter(0, indices.unsqueeze(1).expand_as(preds), preds) preds = gather_predictions(preds).cpu() for pred in preds: pred = pred.tolist() detok = self.tokenizer.detokenize(pred) output.append(detok + '\n') elapsed = time.time() - translate_timer batch_time.update(elapsed, batch_size) total_tokens = stats['total_dec_len'] + stats['total_enc_len'] ttps = total_tokens / elapsed tot_tok_per_sec.update(ttps, batch_size) iterations.update(stats['iters']) enc_seq_len.update(stats['total_enc_len'] / batch_size, batch_size) dec_seq_len.update(stats['total_dec_len'] / batch_size, batch_size) if i % self.print_freq == 0: log = [] log += f'TEST ' if epoch is not None: log += f'[{epoch}]' if iteration is not None: log += f'[{iteration}]' log += f'[{i}/{len(self.loader)}]\t' log += f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' log += f'Decoder iters {iterations.val:.1f} ({iterations.avg:.1f})\t' log += f'Tok/s {tot_tok_per_sec.val:.0f} ({tot_tok_per_sec.avg:.0f})' log = ''.join(log) logging.info(log) tot_tok_per_sec.reduce('sum') enc_seq_len.reduce('mean') dec_seq_len.reduce('mean') batch_time.reduce('mean') iterations.reduce('sum') if summary and get_rank() == 0: time_per_sentence = (batch_time.avg / global_batch_size) log = [] log += f'TEST SUMMARY:\n' log += f'Lines translated: {len(self.loader.dataset)}\t' log += f'Avg total tokens/s: {tot_tok_per_sec.avg:.0f}\n' log += f'Avg time per batch: {batch_time.avg:.3f} s\t' log += f'Avg time per sentence: {1000*time_per_sentence:.3f} ms\n' log += f'Avg encoder seq len: {enc_seq_len.avg:.2f}\t' log += f'Avg decoder seq len: {dec_seq_len.avg:.2f}\t' log += f'Total decoder iterations: {int(iterations.sum)}' log = ''.join(log) logging.info(log) return output
def feed_data(self, data_loader, training=True): """ Runs training or validation on batches from data_loader. :param data_loader: data loader :param training: if True runs training else runs validation """ if training: assert self.optimizer is not None eval_fractions = np.linspace(0, 1, self.intra_epoch_eval + 2)[1:-1] iters_with_update = len(data_loader) // self.iter_size eval_iters = (eval_fractions * iters_with_update).astype(int) eval_iters = eval_iters * self.iter_size eval_iters = set(eval_iters) batch_time = AverageMeter() data_time = AverageMeter() losses_per_token = AverageMeter(skip_first=False) losses_per_sentence = AverageMeter(skip_first=False) tot_tok_time = AverageMeter() src_tok_time = AverageMeter() tgt_tok_time = AverageMeter() batch_size = data_loader.batch_size end = time.time() for i, (src, tgt) in enumerate(data_loader): self.save_counter += 1 # measure data loading time data_time.update(time.time() - end) update = False if i % self.iter_size == self.iter_size - 1: update = True # do a train/evaluate iteration stats = self.iterate(src, tgt, update, training=training) loss_per_token, loss_per_sentence, num_toks = stats # measure accuracy and record loss losses_per_token.update(loss_per_token, num_toks['tgt']) losses_per_sentence.update(loss_per_sentence, batch_size) # measure elapsed time elapsed = time.time() - end batch_time.update(elapsed) src_tok_time.update(num_toks['src'] / elapsed) tgt_tok_time.update(num_toks['tgt'] / elapsed) tot_num_toks = num_toks['tgt'] + num_toks['src'] tot_tok_time.update(tot_num_toks / elapsed) self.loss = losses_per_token.avg if training and i in eval_iters: test_bleu, _ = self.translator.run(calc_bleu=True, epoch=self.epoch, iteration=i) log = [] log += [f'TRAIN [{self.epoch}][{i}/{len(data_loader)}]'] log += [f'BLEU: {test_bleu:.2f}'] log = '\t'.join(log) logging.info(log) self.model.train() self.preallocate(data_loader, training=True) if i % self.print_freq == 0: phase = 'TRAIN' if training else 'VALIDATION' log = [] log += [f'{phase} [{self.epoch}][{i}/{len(data_loader)}]'] log += [f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'] log += [f'Data {data_time.val:.2e} ({data_time.avg:.2e})'] log += [ f'Tok/s {tot_tok_time.val:.0f} ({tot_tok_time.avg:.0f})' ] if self.verbose: log += [ f'Src tok/s {src_tok_time.val:.0f} ({src_tok_time.avg:.0f})' ] log += [ f'Tgt tok/s {tgt_tok_time.val:.0f} ({tgt_tok_time.avg:.0f})' ] log += [ f'Loss/sentence {losses_per_sentence.val:.1f} ({losses_per_sentence.avg:.1f})' ] log += [ f'Loss/tok {losses_per_token.val:.4f} ({losses_per_token.avg:.4f})' ] if training: lr = self.optimizer.param_groups[0]['lr'] log += [f'LR {lr:.3e}'] log = '\t'.join(log) logging.info(log) save_chkpt = (self.save_counter % self.save_freq) == (self.save_freq - 1) if training and save_chkpt: self.save_counter = 0 self.save_info['iteration'] = i identifier = next(self.checkpoint_counter, -1) if identifier != -1: with sync_workers() as rank: if rank == 0: self.save(identifier=identifier) end = time.time() tot_tok_time.reduce('sum') losses_per_token.reduce('mean') return losses_per_token.avg, tot_tok_time.avg
def feed_data(self, data_loader, training=True): """ Runs training or validation on batches from data_loader. :param data_loader: data loader :param training: if True runs training else runs validation """ if training: assert self.optimizer is not None eval_fractions = np.linspace(0, 1, self.intra_epoch_eval + 2)[1:-1] eval_iters = (eval_fractions * len(data_loader)).astype(int) eval_iters = set(eval_iters) batch_time = AverageMeter() data_time = AverageMeter() losses_per_token = AverageMeter() losses_per_sentence = AverageMeter() tot_tok_time = AverageMeter() src_tok_time = AverageMeter() tgt_tok_time = AverageMeter() batch_size = data_loader.batch_size layer_timestamps = [] verbose = True module_whitelist = ["EmuBidirLSTM", "RecurrentAttention", "Classifier"] for i, (src, tgt) in enumerate(data_loader): break (src, src_length) = src (tgt, tgt_length) = tgt src_length = torch.LongTensor(src_length).cuda() src = src.cuda() tgt = tgt.cuda() model_input = (src, src_length, tgt[:-1]) summary = torchsummary.summary(model=self.model, module_whitelist=module_whitelist, model_input=model_input, verbose=True) end = time.time() NUM_STEPS_TO_PROFILE = 100 # profile 100 steps for i, (src, tgt) in enumerate(data_loader): self.save_counter += 1 # measure data loading time data_time.update(time.time() - end) with torchprofiler.Profiling(self.model, module_whitelist) as p: # do a train/evaluate iteration stats = self.iterate(src, tgt, training=training) loss_per_token, loss_per_sentence, num_toks = stats print(str(p)) layer_timestamps.append(p.processed_times()) # measure accuracy and record loss losses_per_token.update(loss_per_token, num_toks['tgt']) losses_per_sentence.update(loss_per_sentence, batch_size) # measure elapsed time elapsed = time.time() - end batch_time.update(elapsed) src_tok_time.update(num_toks['src'] / elapsed) tgt_tok_time.update(num_toks['tgt'] / elapsed) tot_num_toks = num_toks['tgt'] + num_toks['src'] tot_tok_time.update(tot_num_toks / elapsed) self.loss = losses_per_token.avg if training and i in eval_iters: test_bleu, _ = self.translator.run(calc_bleu=True, epoch=self.epoch, iteration=i) log = [] log += [f'TRAIN [{self.epoch}][{i}/{len(data_loader)}]'] log += [f'BLEU: {test_bleu:.2f}'] log = '\t'.join(log) logging.info(log) self.model.train() self.preallocate(data_loader, training=True) if i % self.print_freq == 0: phase = 'TRAIN' if training else 'VALIDATION' log = [] log += [f'{phase} [{self.epoch}][{i}/{len(data_loader)}]'] log += [f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'] log += [f'Data {data_time.val:.5f} ({data_time.avg:.5f})'] log += [ f'Tok/s {tot_tok_time.val:.0f} ({tot_tok_time.avg:.0f})' ] if self.verbose: log += [ f'Src tok/s {src_tok_time.val:.0f} ({src_tok_time.avg:.0f})' ] log += [ f'Tgt tok/s {tgt_tok_time.val:.0f} ({tgt_tok_time.avg:.0f})' ] log += [ f'Loss/sentence {losses_per_sentence.val:.1f} ({losses_per_sentence.avg:.1f})' ] log += [ f'Loss/tok {losses_per_token.val:.4f} ({losses_per_token.avg:.4f})' ] lr = [ param_group['lr'] for param_group in self.optimizer.param_groups ] log += [f'Learning Rate {lr}'] log = '\t'.join(log) logging.info(log) if i >= NUM_STEPS_TO_PROFILE: break save_chkpt = (self.save_counter % self.save_freq) == (self.save_freq - 1) if training and save_chkpt: self.save_counter = 0 self.save_info['iteration'] = i identifier = next(self.checkpoint_counter, -1) if identifier != -1: with sync_workers() as rank: if rank == 0: self.save(identifier=identifier) end = time.time() if verbose: print( "\n==========================================================") print("Layer Type Forward Time (ms) Backward Time (ms)") print("==========================================================") tot_accounted_time = 0.0 per_layer_times = [] for i in range(len(layer_timestamps[0])): layer_type = str(layer_timestamps[0][i][0]) layer_forward_time_sum = 0.0 layer_backward_time_sum = 0.0 for j in range(len(layer_timestamps)): layer_forward_time_sum += (layer_timestamps[j][i][2] / 1000) layer_backward_time_sum += (layer_timestamps[j][i][5] / 1000) per_layer_times.append( (layer_type, layer_forward_time_sum / len(layer_timestamps), layer_backward_time_sum / len(layer_timestamps))) if verbose: print(per_layer_times[-1][0], per_layer_times[-1][1], per_layer_times[-1][2]) tot_accounted_time += (per_layer_times[-1][1] + per_layer_times[-1][2]) print("Total accounted time: %.3f ms" % tot_accounted_time) summary_i = 0 per_layer_times_i = 0 last_summary_i = -1 last_per_layer_times_i = -1 while len(per_layer_times) > 0: per_layer_time = per_layer_times.pop(0) for summary_i in range(len(summary)): summary_elem = summary[summary_i] if str(summary_elem['layer_name']) != str(per_layer_time[0]): continue if 'forward_time' in summary_elem and 'backward_time' in summary_elem: continue summary_elem['forward_time'] = per_layer_time[1] summary_elem['backward_time'] = per_layer_time[2] break if training: create_graph(self.model, module_whitelist, (src, tgt), summary, os.path.join("profiles", self.arch)) tot_tok_time.reduce('sum') losses_per_token.reduce('mean') return losses_per_token.avg, tot_tok_time.avg