def end_of_iter(self, loss_dicts, output_list, model): opt = self.opt epoch, epoch_iter, print_freq, total_steps = self.epoch, self.epoch_iter, self.print_freq, self.total_steps ############## Display results and errors ########## ### print out errors if is_master() and total_steps % print_freq == 0: t = (time.time() - self.iter_start_time) / print_freq errors = { k: v.data.item() if not isinstance(v, int) else v for k, v in loss_dicts.items() } self.visualizer.print_current_errors(epoch, epoch_iter, errors, t) self.visualizer.plot_current_errors(errors, total_steps) ### display output images if is_master() and self.save: visuals = save_all_tensors(opt, output_list, model) self.visualizer.display_current_results(visuals, epoch, total_steps) if is_master() and opt.print_mem: call([ "nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free" ]) ### save latest model save_models(opt, epoch, epoch_iter, total_steps, self.visualizer, self.iter_path, model) if epoch_iter > self.dataset_size - opt.batchSize: return True return False
def vis_print(opt, message): print(message) if is_master() and opt.isTrain and not opt.debug: log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(log_name, "a") as log_file: log_file.write('%s\n' % message)
def mkdir(path): if is_master(): os.makedirs(path, exist_ok=True)