x = torch.zeros(sample_rate * 2) for i in range(x.size(0)): # sound amplitude should in [-1, 1] x[i] = np.cos(freqs[n_iter // 10] * np.pi * float(i) / float(sample_rate)) writer.add_audio('myAudio', x, n_iter) writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter) writer.add_text('markdown Text', '''a|b\n-|-\nc|d''', n_iter) for name, param in resnet18.named_parameters(): if 'bn' not in name: writer.add_histogram(name, param, n_iter) writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(100), n_iter) # needs tensorboard 0.4RC or later writer.add_pr_curve_raw('prcurve with raw data', true_positive_counts, false_positive_counts, true_negative_counts, false_negative_counts, precision, recall, n_iter) # export scalar data to JSON for external processing writer.export_scalars_to_json( # 在 PyTorch_Tutorial/Code 目录下运行 # os.path.join("..", "..", "Result", "all_scalars.json")) # 在 PyTorch_Tutorial 目录下运行 os.path.join(".", "Result", "all_scalars.json")) # 在 PyTorch_Tutorial/Code 目录下运行 # dataset = datasets.MNIST(os.path.join("..", "..", "Data", "mnist"), # 在 PyTorch_Tutorial 目录下运行 dataset = datasets.MNIST(os.path.join(".", "Data", "mnist"), train=False, download=True) images = dataset.test_data[:100].float()
class Summarizer(object): def __init__(self): self.report = False self.global_step = None self.writer = None def initialize_writer(self, log_dir): self.writer = SummaryWriter(log_dir) def add_scalar(self, tag, scalar_value, global_step=None, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_scalar(tag, scalar_value, global_step=global_step, walltime=walltime) def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_scalars(self, main_tag, tag_scalar_dict, global_step=global_step, walltime=walltime) def add_histogram(self, tag, values, global_step=None, bins='tensorflow', walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step if isinstance(values, chainer.cuda.cupy.ndarray): values = chainer.cuda.to_cpu(values) self.writer.add_histogram(tag, values, global_step=global_step, bins=bins, walltime=walltime) def add_image(self, tag, img_tensor, global_step=None, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_image(tag, img_tensor, global_step=global_step, walltime=walltime) def add_image_with_boxes(self, tag, img_tensor, box_tensor, global_step=None, walltime=None, **kwargs): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_image_with_boxes(tag, img_tensor, box_tensor, global_step=global_step, walltime=walltime, **kwargs) def add_figure(self, tag, figure, global_step=None, close=True, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_figure(tag, figure, global_step=global_step, close=close, walltime=walltime) def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_video(tag, vid_tensor, global_step=global_step, fps=fps, walltime=walltime) def add_audio(self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_audio(tag, snd_tensor, global_step=global_step, sample_rate=sample_rate, walltime=walltime) def add_text(self, tag, text_string, global_step=None, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_text(tag, text_string, global_step=global_step, walltime=walltime) def add_graph_onnx(self, prototxt): if not self.report: return self.writer.add_graph_onnx(self, prototxt) def add_graph(self, model, input_to_model=None, verbose=False, **kwargs): if not self.report: return self.writer.add_graph(model, input_to_model=input_to_model, verbose=verbose, **kwargs) def add_embedding(self, mat, metadata=None, label_img=None, global_step=None, tag='default', metadata_header=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_embedding(mat, metadata=metadata, label_img=label_img, global_step=global_step, tag=tag, metadata_header=metadata_header) def add_pr_curve(self, tag, labels, predictions, global_step=None, num_thresholds=127, weights=None, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_pr_curve(tag, labels, predictions, global_step=global_step, num_thresholds=num_thresholds, weights=weights, walltime=walltime) def add_pr_curve_raw(self, tag, true_positive_counts, false_positive_counts, true_negative_counts, false_negative_counts, precision, recall, global_step=None, num_thresholds=127, weights=None, walltime=None): if not self.report: return if global_step is None and self.global_step is not None: global_step = self.global_step self.writer.add_pr_curve_raw(tag, true_positive_counts, false_positive_counts, true_negative_counts, false_negative_counts, precision, recall, global_step=global_step, num_thresholds=num_thresholds, weights=weights, walltime=walltime) def add_custom_scalars_multilinechart(self, tags, category='default', title='untitled'): if not self.report: return self.writer.add_custom_scalars_multilinechart(tags, category=category, title=title) def add_custom_scalars_marginchart(self, tags, category='default', title='untitled'): if not self.report: return self.writer.add_custom_scalars_marginchart(tags, category=category, title=title) def add_custom_scalars(self, layout): if not self.report: return self.writer.add_custom_scalars(layout)
def validate(model, loss, dataloader, epoch: int, metrics=dict(), summary_writer: SummaryWriter = None): losses = AverageMeter() pr_meter = PRCurveMeter() valid_scores = {} for key, _ in metrics.items(): valid_scores[key] = AverageMeter() with torch.set_grad_enabled(False): model.eval() n_batches = len(dataloader) with tqdm(total=len(dataloader)) as tq: tq.set_description('Validation') x = None y = None outputs = None batch_loss = None for batch_index, (x, y) in enumerate(dataloader): x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True) # forward + backward + optimize outputs = model(x) batch_loss = loss(outputs, y) # Log train progress batch_loss_val = batch_loss.cpu().item() if summary_writer is not None: summary_writer.add_scalar('val/batch/loss', batch_loss_val, epoch * n_batches + batch_index) losses.update(batch_loss_val) for key, metric in metrics.items(): score = metric(outputs, y).cpu().item() valid_scores[key].update(score) if summary_writer is not None: summary_writer.add_scalar( 'val/batch/' + key, score, epoch * n_batches + batch_index) tq.set_postfix(loss='{:.3f}'.format(losses.avg), **valid_scores) tq.update() if summary_writer is not None: summary_writer.add_image('val/image', make_grid(x.cpu(), normalize=True), epoch) summary_writer.add_image('val/y_true', make_grid(y.cpu(), normalize=True), epoch) summary_writer.add_image( 'val/y_pred', make_grid(outputs.sigmoid().cpu(), normalize=True), epoch) summary_writer.add_scalar('val/epoch/loss', losses.avg, epoch) for key, value in valid_scores.items(): summary_writer.add_scalar('val/epoch/' + key, value.avg, epoch) # Compute PR curve only for last batch, because computing it for entire validation set is costly pr_meter.update(outputs, y) summary_writer.add_pr_curve_raw( 'val/pr_curve', true_positive_counts=pr_meter.tp, true_negative_counts=pr_meter.tn, false_negative_counts=pr_meter.fn, false_positive_counts=pr_meter.fp, precision=pr_meter.precision(), recall=pr_meter.recall(), global_step=epoch) del x, y, outputs, batch_loss return losses, valid_scores
class Visualizer(): """ Visualizer wrapper based on Visdom. Returns: Visualizer: Class file. """ # pylint: disable=too-many-instance-attributes # Reasonable. ## def __init__(self, opt): self.name = opt.name self.opt = opt self.writer = None # use tensorboard for now if self.opt.display: from tensorboardX import SummaryWriter self.writer = SummaryWriter(log_dir=os.path.join( "../tensorboard/skip_ganomaly/", opt.outf)) # -- # Dictionaries for plotting data and results. self.plot_data = None self.plot_res = None # -- # Path to train and test directories. self.img_dir = os.path.join(opt.outf, opt.name, 'train', 'images') self.tst_img_dir = os.path.join(opt.outf, opt.name, 'test', 'images') if not os.path.exists(self.img_dir): os.makedirs(self.img_dir) if not os.path.exists(self.tst_img_dir): os.makedirs(self.tst_img_dir) # -- # Log file. self.log_name = os.path.join(opt.outf, opt.name, 'loss_log.txt') # with open(self.log_name, "a") as log_file: # now = time.strftime("%c") # log_file.write('================ Training Loss (%s) ================\n' % now) now = time.strftime("%c") title = f'================ {now} ================\n' info = f'Anomalies, {opt.nz}, {opt.w_adv}, {opt.w_con}, {opt.w_lat}\n' self.write_to_log_file(text=title + info) ## @staticmethod def normalize(inp): """Normalize the tensor Args: inp ([FloatTensor]): Input tensor Returns: [FloatTensor]: Normalized tensor. """ return (inp - inp.min()) / (inp.max() - inp.min() + 1e-5) ## def plot_current_errors(self, epoch, total_steps, errors): """Plot current errros. Args: epoch (int): Current epoch counter_ratio (float): Ratio to plot the range between two epoch. errors (OrderedDict): Error for the current epoch. """ self.writer.add_scalars("Loss over time", errors, global_step=total_steps) ## def plot_performance(self, epoch, counter_ratio, performance, tag=None): """ Plot performance Args: epoch (int): Current epoch counter_ratio (float): Ratio to plot the range between two epoch. performance (OrderedDict): Performance for the current epoch. """ self.writer.add_scalars(tag if tag else "Performance Metrics", { k: v for k, v in performance.items() if ("conf_matrix" not in k and k != "Avg Run Time (ms/batch)") }, global_step=epoch) def plot_current_conf_matrix(self, epoch, cm, tag=None, save_path=None): plot = plot_confusion_matrix(cm, normalize=False, save_path=save_path) self.writer.add_figure(tag if tag else "Confusion Matrix", plot, global_step=epoch) ## def print_current_errors(self, epoch, errors): """ Print current errors. Args: epoch (int): Current epoch. errors (OrderedDict): Error for the current epoch. batch_i (int): Current batch batch_n (int): Total Number of batches. """ # message = ' [%d/%d] ' % (epoch, self.opt.niter) message = ' Loss: [%d/%d] ' % (epoch, self.opt.niter) for key, val in errors.items(): message += '%s: %.3f ' % (key, val) print(message) with open(self.log_name, "a") as log_file: log_file.write('%s\n' % message) ## def write_to_log_file(self, text): with open(self.log_name, "a") as log_file: log_file.write('%s\n' % text) ## def print_current_performance(self, performance, best): """ Print current performance results. Args: performance ([OrderedDict]): Performance of the model best ([int]): Best performance. """ message = ' ' #print(performance) for key, val in performance.items(): if key == "conf_matrix": message += '%s: %s ' % (key, val) else: message += '%s: %.3f ' % (key, val) message += 'max AUC: %.3f' % best print(message) self.write_to_log_file(text=message) def display_current_images(self, reals, fakes, fixed, train_or_test="train", global_step=0): """ Display current images. Args: epoch (int): Current epoch counter_ratio (float): Ratio to plot the range between two epoch. reals ([FloatTensor]): Real Image fakes ([FloatTensor]): Fake Image fixed ([FloatTensor]): Fixed Fake Image """ reals = self.normalize(reals.cpu().numpy()) fakes = self.normalize(fakes.cpu().numpy()) # fixed = self.normalize(fixed.cpu().numpy()) self.writer.add_images("Reals from {} step: ".format( str(train_or_test)), reals, global_step=global_step) self.writer.add_images("Fakes from {} step: ".format( str(train_or_test)), fakes, global_step=global_step) def plot_pr_curve(self, y_trues, y_preds, thresholds, global_step, tag=None): tp_counts, fp_counts, tn_counts, fn_counts, precisions, recalls, n_thresholds = get_values_for_pr_curve( y_trues, y_preds, thresholds) self.writer.add_pr_curve_raw(tag if tag else "Precision_recall_curve", true_positive_counts=tp_counts, false_positive_counts=fp_counts, true_negative_counts=tn_counts, false_negative_counts=fn_counts, precision=precisions, recall=recalls, num_thresholds=n_thresholds, global_step=global_step) def save_current_images(self, epoch, reals, fakes, fixed): """ Save images for epoch i. Args: epoch ([int]) : Current epoch reals ([FloatTensor]): Real Image fakes ([FloatTensor]): Fake Image fixed ([FloatTensor]): Fixed Fake Image """ vutils.save_image(reals, '%s/reals.png' % self.img_dir, normalize=True) vutils.save_image(fakes, '%s/fakes.png' % self.img_dir, normalize=True) vutils.save_image(fixed, '%s/fixed_fakes_%03d.png' % (self.img_dir, epoch + 1), normalize=True) def plot_histogram(self, y_trues, y_preds, threshold, global_step=1, save_path=None, tag=None): scores = dict() scores["scores"] = y_preds scores["labels"] = y_trues hist = pd.DataFrame.from_dict(scores) plt.ion() # Filter normal and abnormal scores. abn_scr = hist.loc[hist.labels == 1]['scores'] nrm_scr = hist.loc[hist.labels == 0]['scores'] # Create figure and plot the distribution. fig = plt.figure(figsize=(4, 4)) sns.distplot(nrm_scr, label=r'Normal Scores') sns.distplot(abn_scr, label=r'Abnormal Scores') plt.axvline(threshold, 0, 1, label='threshold', color="red") plt.legend() plt.yticks([]) plt.xlabel(r'Anomaly Scores') plt.savefig(save_path) self.writer.add_figure(tag if tag else "Histogram", fig, global_step) def plot_roc_curve(self, y_trues, y_preds, global_step=1, tag=None, save_path=None): fpr, tpr, roc_auc = get_values_for_roc_curve(y_trues, y_preds) fig = plt.figure(figsize=(4, 4)) lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='(AUC = %0.2f)' % (roc_auc)) plt.plot([0, 1], [1, 0], color='navy', lw=1, linestyle=':') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") if save_path: plt.savefig(save_path) self.writer.add_figure(tag if tag else "ROC-Curve", fig, global_step)