def __init__(self, batch_inteval=100): super(PrintGradientsCallback, self).__init__(epoch_inteval=-1, batch_inteval=batch_inteval) self.is_in_ipython = is_in_ipython() self.is_in_colab = is_in_colab() self.batch_inteval = batch_inteval self.first_layer = '' self.last_layer = '' self.lines = []
def on_loss_calculation_end(self, training_context): """Returns mixed inputs, pairs of targets, and lambda""" train_data = training_context['train_data'] x = None y = None x = train_data.value_list[0].copy().detach() # input y = train_data.value_list[1].copy().detach() # label model = training_context['current_model'] lam = builtins.min( builtins.max(np.random.beta(self.alpha, self.alpha), 0.3), 0.7) batch_size = int_shape(x)[0] index = arange(batch_size) index = cast(shuffle(index), 'long') this_loss = None mixed_x = None if get_backend() == 'pytorch': mixed_x = lam * x + (1 - lam) * x[index, :] pred = model(to_tensor(mixed_x, requires_grad=True)) y_a, y_b = y, y[index] this_loss = lam * self.loss_criterion(pred, y_a.long()) + ( 1 - lam) * self.loss_criterion(pred, y_b.long()) elif get_backend() == 'tensorflow': x1 = tf.gather(x, index, axis=0) y1 = tf.gather(y, index, axis=0) mixed_x = lam * x + (1 - lam) * x1 pred = model(to_tensor(mixed_x, requires_grad=True)) y_a, y_b = y, y1 this_loss = lam * self.loss_criterion( pred, y_a) + (1 - lam) * self.loss_criterion(pred, y_b) training_context['current_loss'] = training_context[ 'current_loss'] + this_loss * self.loss_weight if training_context['is_collect_data']: training_context['losses'].collect( 'mixup_loss', training_context['steps'], float(to_numpy(this_loss * self.loss_weight))) if training_context['current_batch'] == 0: for item in mixed_x: if self.save_path is None and not is_in_colab(): item = unnormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(to_numpy(item)) item = unnormalize(0, 255)(item) array2image(item).save('Results/mixup_{0}.jpg'.format( get_time_suffix())) elif self.save_path is not None: item = unnormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(to_numpy(item)) item = unnormalize(0, 255)(item) array2image(item).save( os.path.join(self.save_path, 'mixup_{0}.jpg'.format( get_time_suffix())))
def __init__(self, epoch_inteval, batch_inteval, save_path: str = None, imshow=False): super(VisualizationCallbackBase, self).__init__() self.is_in_ipython = is_in_ipython() self.is_in_colab = is_in_colab() self.epoch_inteval = epoch_inteval self.batch_inteval = batch_inteval if save_path is None: save_path = 'results' self.save_path = make_dir_if_need(save_path) self.imshow = imshow
def __init__(self, frequency=-1, unit='batch', save_path: str = None, imshow=False): super(VisualizationCallbackBase, self).__init__() self.is_in_ipython = is_in_ipython() self.is_in_colab = is_in_colab() self.frequency = frequency if unit in ('batch', 'step', 'epoch'): self.unit = unit else: print(red_color('Only [batch, step, epoch] are valid unit.', True)) if save_path is None: save_path = 'results' self.save_path = make_dir_if_need(save_path) self.imshow = imshow
def __init__(self, epoch_inteval=-1, batch_inteval=-1, save_path: str = 'results', reverse_image_transform=None, palette=None, background=(120, 120, 120), name_prefix: str = 'segtile_image_{0}.png', imshow=False): super(SegTileImageCallback, self).__init__(epoch_inteval, batch_inteval, save_path, imshow) self.is_in_ipython = is_in_ipython() self.is_in_colab = is_in_colab() self.palette = palette self.tile_image_name_prefix = name_prefix self.reverse_image_transform = reverse_image_transform self.background = np.expand_dims( np.expand_dims(to_numpy(background), 0), 0)
def __init__(self, frequency=-1, unit='batch', save_path: str = 'results', reverse_image_transform=None, labels=None, palette=None, background=(120, 120, 120), name_prefix: str = 'detection_plot_image_{0}.png', imshow=False): super(DetectionPlotImageCallback, self).__init__(frequency, unit, save_path, imshow) self.is_in_ipython = is_in_ipython() self.is_in_colab = is_in_colab() self.labels = labels self.palette = palette self.tile_image_name_prefix = name_prefix self.reverse_image_transform = reverse_image_transform self.background = np.expand_dims( np.expand_dims(to_numpy(background), 0), 0)
def __init__(self, frequency=-1, unit='batch', save_path: str = 'results', reverse_image_transform=None, is_label_mask=False, palette=None, background=(120, 120, 120), name_prefix: str = 'segtile_image_{0}.png', imshow=False): super(SegTileImageCallback, self).__init__(frequency, unit, save_path, imshow) self.is_in_ipython = is_in_ipython() self.is_in_colab = is_in_colab() self.is_label_mask = is_label_mask self.palette = palette self.tile_image_name_prefix = name_prefix self.reverse_image_transform = reverse_image_transform self.background = to_numpy(background)
def __init__(self, epoch_inteval=-1, batch_inteval=-1, save_path: str = 'results', name_prefix: str = 'tile_image_{0}.png', row=3, include_mask=None, reverse_image_transform=None, imshow=False): super(GanTileImageCallback, self).__init__(epoch_inteval, batch_inteval, save_path, imshow) self.is_in_ipython = is_in_ipython() self.is_in_colab = is_in_colab() self.tile_image_name_prefix = name_prefix self.reverse_image_transform = reverse_image_transform self.row = row dataprovider = enforce_singleton(ctx.get_data_provider()) self.accumulate_sample = False self.sample_enough = False if dataprovider.minibatch_size < row * row: self.accumulate_sample = True self.tile_images_list = [] self.output_arr = []
def __init__(self, epoch_inteval=-1, batch_inteval=-1, save_path: str = 'results', name_prefix: str = 'tile_image_{0}.png', row=3, include_input=True, include_output=True, include_target=True, include_mask=None, reverse_image_transform=None, imshow=False): super(TileImageCallback, self).__init__(epoch_inteval, batch_inteval, save_path, imshow) self.is_in_ipython = is_in_ipython() self.is_in_colab = is_in_colab() self.tile_image_name_prefix = name_prefix self.reverse_image_transform = reverse_image_transform self.row = row self.include_input = include_input self.include_output = include_output self.include_target = include_target self.include_mask = include_mask
def steps_histogram(grads, weights=None, sample_collected=None, bins=None, size=(18, 8), inteval=1, title='', save_path=None, imshow=False): global default_bins from mpl_toolkits.mplot3d import Axes3D if bins is None: bins = default_bins collected_samples = [] if sample_collected is not None and len(sample_collected) > 0: sample_collected = np.array(sample_collected) sample = np.arange(len(sample_collected)) collected_samples = sample[sample_collected == 1] plt.ion() fig = plt.figure(figsize=size) fig.patch.set_facecolor('white') if grads is not None: ax = fig.add_subplot( 1, 2, 1, projection='3d' ) if grads is not None and weights is not None else fig.add_subplot( 1, 1, 1, projection='3d') # ax = fig.gca(projection='3d') # Make verts a list, verts[i] will be a list of (x,y) pairs defining polygon i verts = [] # The ith polygon will appear on the plane y = zs[i] zs = np.arange(len(grads)) if len(collected_samples) == len(grads): zs = collected_samples new_zs = [] max_frequency = 0 for i in range(len(grads)): a, b = np.histogram(grads[i].reshape([-1]), bins) ys = a xs = b[:-1] new_zs.append(zs[i]) max_frequency = max(np.max(a), max_frequency) verts.append(polygon_under_graph(xs, ys)) poly = PolyCollection(verts, facecolors=['r', 'g', 'b', 'y'], alpha=.4) ax.add_collection3d(poly, zs=new_zs, zdir='y') override = { 'fontsize': 'small', 'verticalalignment': 'top', 'horizontalalignment': 'center' } ax.set_xlabel('gradients', override) ax.set_ylabel('steps', override) ax.set_zlabel('frequency', override) ax.set_xlim(min(bins), max(bins)) ax.set_ylim(0, int(max(new_zs))) ax.set_zlim(0, int(max_frequency * 1.1)) plt.title(title + ' Gradients Histogram') if weights is not None: ax = fig.add_subplot( 1, 2, 2, projection='3d') if grads is not None else fig.add_subplot( 1, 1, 1, projection='3d') bins = [b * 10 for b in bins] # Make verts a list, verts[i] will be a list of (x,y) pairs defining polygon i verts = [] # The ith polygon will appear on the plane y = zs[i] zs = np.arange(len(weights)) if len(collected_samples) == len(weights): zs = collected_samples new_zs = [] max_frequency = 0 for i in range(len(weights)): if i % inteval == 0: a, b = np.histogram(weights[i].reshape([-1]), bins) ys = a xs = b[:-1] + 0.001 new_zs.append(zs[i]) max_frequency = max(np.max(a), max_frequency) verts.append(polygon_under_graph(xs, ys)) poly = PolyCollection(verts, facecolors=['r', 'g', 'b', 'y'], alpha=.4) ax.add_collection3d(poly, zs=new_zs, zdir='y') override = { 'fontsize': 'small', 'verticalalignment': 'top', 'horizontalalignment': 'center' } ax.set_xlabel('weights', override) ax.set_ylabel('steps', override) ax.set_zlabel('frequency', override) ax.set_xlim(min(bins), max(bins)) ax.set_ylim(0, int(max(new_zs))) ax.set_zlim(0, int(max_frequency * 1.1)) plt.title('Weights Histogram') if save_path is not None: plt.savefig(save_path, bbox_inches='tight') if imshow == True: if is_in_ipython() or is_in_colab(): display.display(plt.gcf()) plt.close(fig) else: plt.ioff() plt.show(block=False)
from __future__ import division from __future__ import print_function import sys from trident.misc.ipython_utils import is_in_ipython, is_in_colab import math if is_in_ipython(): from IPython import display from tkinter import * if not is_in_colab: import matplotlib matplotlib.use( 'TkAgg' if not is_in_ipython() and not is_in_colab() else 'NbAgg') else: import matplotlib import matplotlib.pyplot as plt from matplotlib.collections import PolyCollection import matplotlib.patches as patches import matplotlib.font_manager fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf') fontnames = [ matplotlib.font_manager.FontProperties(fname=fname).get_name() for fname in fonts ] default_font = None
from trident.callbacks.callback_base import CallbackBase from trident.data.mask_common import label2color from trident.misc.ipython_utils import is_in_ipython, is_in_colab from trident.misc.visualization_utils import * from trident.data.bbox_common import * if get_backend() == 'pytorch': from trident.backend.pytorch_backend import try_map_args_and_call from trident.backend.pytorch_ops import to_numpy, to_tensor, arange, shuffle, cast, clip, sqrt, int_shape, argmax, softmax, any_abnormal_number, reduce_any elif get_backend() == 'tensorflow': from trident.backend.tensorflow_backend import try_map_args_and_call from trident.backend.tensorflow_ops import to_numpy, to_tensor, arange, shuffle, cast, clip, sqrt, int_shape, concate, zeros_like, ones_like, argmax, softmax, any_abnormal_number, \ not_equal,reduce_any if is_in_ipython() or is_in_colab(): from IPython import display _session = get_session() _backend = get_backend() __all__ = [ 'VisualizationCallbackBase', 'TileImageCallback', 'PrintGradientsCallback', 'SegTileImageCallback', 'PlotLossMetricsCallback', 'DetectionPlotImageCallback' ] class VisualizationCallbackBase(CallbackBase): def __init__(self, epoch_inteval,
def on_loss_calculation_end(self, training_context): """Returns mixed inputs, pairs of targets, and lambda""" model = training_context['current_model'] train_data = training_context['train_data'] x = None y = None x = train_data.value_list[0].copy().detach().to(model.device) # input y = train_data.value_list[1].copy().detach().to(model.device) # label lam = builtins.min( builtins.max(np.random.beta(self.alpha, self.alpha), 0.1), 0.4) batch_size = int_shape(x)[0] index = cast(arange(batch_size), 'int64') index = shuffle(index) this_loss = None if get_backend() == 'pytorch': y_a, y_b = y, y[index] bbx1, bby1, bbx2, bby2 = self.rand_bbox(x.shape[3], x.shape[2], lam) x[:, :, bbx1:bbx2, bby1:bby2] = x[index, :, bbx1:bbx2, bby1:bby2] # adjust lambda to exactly match pixel ratio lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.shape[3] * x.shape[2])) pred = model(to_tensor(x, requires_grad=True, device=model.device)) this_loss = lam * self.loss_criterion(pred, y_a.long()) + ( 1 - lam) * self.loss_criterion(pred, y_b.long()) training_context['current_loss'] = training_context[ 'current_loss'] + this_loss * self.loss_weight if training_context['is_collect_data']: training_context['losses'].collect( 'cutmix_loss', training_context['steps'], float(to_numpy(this_loss * self.loss_weight))) elif get_backend() == 'tensorflow': with tf.device(get_device()): y1 = tf.gather(y, index, axis=0) x1 = tf.gather(x, index, axis=0) y_a, y_b = y, y1 bbx1, bby1, bbx2, bby2 = self.rand_bbox( x.shape[2], x.shape[1], lam) filter = np.zeros(int_shape(x)) filter[:, bbx1:bbx2, bby1:bby2, :] = 1 filter = to_tensor(x) x = x * (1 - filter) + x1 * filter # x[:, bbx1:bbx2, bby1:bby2, :] = x1[:, bbx1:bbx2, bby1:bby2,:] # adjust lambda to exactly match pixel ratio lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.shape[2] * x.shape[1])) pred = model(to_tensor(x, requires_grad=True)) loss1 = self.loss_criterion(pred, y_a) loss2 = self.loss_criterion(pred, y_b) this_loss = lam * loss1 + (1 - lam) * loss2 training_context['current_loss'] = training_context[ 'current_loss'] + this_loss * self.loss_weight if training_context['is_collect_data']: training_context['losses'].collect( 'cutmix_loss', training_context['steps'], float(to_numpy(this_loss * self.loss_weight))) if training_context['current_batch'] == 0: if self.save_path is None and not is_in_colab(): for item in x: item = Unnormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(to_numpy(item)) item = Unnormalize(0, 255)(item) array2image(item).save( os.path.join( self.save_path, 'cutmix_{0}.jpg'.format(get_time_suffix()))) elif self.save_path is not None: for item in x: item = Unnormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(to_numpy(item)) item = Unnormalize(0, 255)(item) array2image(item).save( os.path.join( self.save_path, 'cutmix_{0}.jpg'.format(get_time_suffix()))) x = None y = None