def output_max_patches(max_tracker, net, layer, idx_begin, idx_end, num_top, datadir, filelist, outdir, do_which, layer_info): do_maxes, do_deconv, do_deconv_norm, do_backprop, do_backprop_norm, do_info = do_which assert do_maxes or do_deconv or do_deconv_norm or do_backprop or do_backprop_norm or do_info, 'nothing to do' mt = max_tracker rc = RegionComputer(layer_info) image_filenames, image_labels = load_file_list(filelist) print 'Loaded filenames and labels for %d files' % len(image_filenames) print ' First file', os.path.join(datadir, image_filenames[0]) num_top_in_mt = mt.max_locs.shape[1] assert num_top <= num_top_in_mt, 'Requested %d top images but MaxTracker contains only %d' % ( num_top, num_top_in_mt) assert idx_end >= idx_begin, 'Range error' size_ii, size_jj = get_max_data_extent(net, layer, rc, mt.is_conv) data_size_ii, data_size_jj = net.blobs['data'].data.shape[2:4] assert 0 < size_ii <= data_size_ii assert 0 < size_jj <= data_size_jj n_total_images = (idx_end - idx_begin) * num_top for cc, channel_idx in enumerate(range(idx_begin, idx_end)): unit_dir = os.path.join(outdir, layer, 'unit_%04d' % channel_idx) mkdir_p(unit_dir) if do_info: info_filename = os.path.join(unit_dir, 'info.txt') info_file = open(info_filename, 'w') print >> info_file, '# is_conv val image_idx image_class i(if is_conv) j(if is_conv) filename' # iterate through maxes from highest (at end) to lowest for max_idx_0 in range(num_top): max_idx = num_top_in_mt - 1 - max_idx_0 if mt.is_conv: im_idx, im_class, ii, jj = mt.max_locs[channel_idx, max_idx] else: im_idx, im_class = mt.max_locs[channel_idx, max_idx] recorded_val = mt.max_vals[channel_idx, max_idx] filename = image_filenames[im_idx] do_print = (max_idx_0 == 0) if do_print: print '%s Output file/image(s) %d/%d' % (datetime.now().ctime(), cc * num_top, n_total_images) if mt.is_conv: # Compute the focus area of the data layer layer_indices = (ii, ii + 1, jj, jj + 1) data_indices = rc.convert_region(layer, 'data', layer_indices, normalize_last=True) data_ii_start, data_ii_end, data_jj_start, data_jj_end = data_indices # why == 0. I think it's also possible for it to be < 0 # if we have padding in the first layer. such as VGG-16/19. touching_imin = (data_ii_start <= 0) touching_jmin = (data_jj_start <= 0) # Compute how much of the data slice falls outside the actual data [0,max] range ii_outside = size_ii - (data_ii_end - data_ii_start) # possibly 0 jj_outside = size_jj - (data_jj_end - data_jj_start) # possibly 0 # this should be true, since usually, the central unit (with extent size_ii, size_jj) # should cover larger or equal region compared to all others, since there's less effect of padding. assert ii_outside >= 0 and jj_outside >= 0 # if ii_outside (or jj_outside) is > 0, # then that means more data got chopped off due to padding. # if touching_imin, that means the chopping is due to close to left side # if touching_jmin, chopping due to close to top. # so we should pad those data there. if touching_imin: out_ii_start = ii_outside out_ii_end = size_ii else: out_ii_start = 0 out_ii_end = size_ii - ii_outside if touching_jmin: out_jj_start = jj_outside out_jj_end = size_jj else: out_jj_start = 0 out_jj_end = size_jj - jj_outside else: ii, jj = 0, 0 data_ii_start, out_ii_start, data_jj_start, out_jj_start = 0, 0, 0, 0 data_ii_end, out_ii_end, data_jj_end, out_jj_end = size_ii, size_ii, size_jj, size_jj if do_info: print >> info_file, 1 if mt.is_conv else 0, '%.6f' % mt.max_vals[channel_idx, max_idx], if mt.is_conv: print >> info_file, '%d %d %d %d' % tuple(mt.max_locs[channel_idx, max_idx]), else: print >> info_file, '%d %d' % tuple(mt.max_locs[channel_idx, max_idx]), print >> info_file, filename if not (do_maxes or do_deconv or do_deconv_norm or do_backprop or do_backprop_norm): continue with WithTimer('Load image', quiet=not do_print): im = caffe.io.load_image(os.path.join(datadir, filename)) with WithTimer('Predict ', quiet=not do_print): net.predict([im], oversample=False) # Just take center crop, same as in scan_images_for_maxes if len(net.blobs[layer].data.shape) == 4: reproduced_val = net.blobs[layer].data[0, channel_idx, ii, jj] else: reproduced_val = net.blobs[layer].data[0, channel_idx] if abs(reproduced_val - recorded_val) > .1: print 'Warning: recorded value %s is suspiciously different from reproduced value %s. Is the filelist the same?' % ( recorded_val, reproduced_val) if do_maxes: # grab image from data layer, not from im (to ensure preprocessing / center crop details match between image and deconv/backprop) out_arr = np.zeros((3, size_ii, size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].data[0, :, data_ii_start:data_ii_end, data_jj_start:data_jj_end] with WithTimer('Save img ', quiet=not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'maxim_%03d.png' % max_idx_0), autoscale=False, autoscale_center=0) if do_deconv or do_deconv_norm: diffs = net.blobs[layer].diff * 0 if len(diffs.shape) == 4: diffs[0, channel_idx, ii, jj] = 1.0 else: assert len(diffs.shape) == 2 diffs[0, channel_idx] = 1.0 with WithTimer('Deconv ', quiet=not do_print): net.deconv_from_layer(layer, diffs) out_arr = np.zeros((3, size_ii, size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].diff[0, :, data_ii_start:data_ii_end, data_jj_start:data_jj_end] if out_arr.max() == 0: print 'Warning: Deconv out_arr in range', out_arr.min(), 'to', out_arr.max(), 'ensure force_backward: true in prototxt' if do_deconv: with WithTimer('Save img ', quiet=not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'deconv_%03d.png' % max_idx_0), autoscale=False, autoscale_center=0) if do_deconv_norm: out_arr = np.linalg.norm(out_arr, axis=0) with WithTimer('Save img ', quiet=not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'deconvnorm_%03d.png' % max_idx_0)) if do_backprop or do_backprop_norm: diffs = net.blobs[layer].diff * 0 if len(diffs.shape) == 4: diffs[0, channel_idx, ii, jj] = 1.0 else: assert len(diffs.shape) == 2 diffs[0, channel_idx] = 1.0 with WithTimer('Backward ', quiet=not do_print): net.backward_from_layer(layer, diffs) out_arr = np.zeros((3, size_ii, size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].diff[0, :, data_ii_start:data_ii_end, data_jj_start:data_jj_end] if out_arr.max() == 0: print 'Warning: Deconv out_arr in range', out_arr.min(), 'to', out_arr.max(), 'ensure force_backward: true in prototxt' if do_backprop: with WithTimer('Save img ', quiet=not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'backprop_%03d.png' % max_idx_0), autoscale=False, autoscale_center=0) if do_backprop_norm: out_arr = np.linalg.norm(out_arr, axis=0) with WithTimer('Save img ', quiet=not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'backpropnorm_%03d.png' % max_idx_0)) if do_info: info_file.close()
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model', type=str, default= '/home/ubuntu/new/caffe/data/marketplace/googlenet/deploy.prototxt') parser.add_argument( '--weights', type=str, default= '/home/ubuntu/new/caffe/data/marketplace/googlenet/snapshots/googlenet_train_quick_iter_5000.caffemodel' ) parser.add_argument( '--lmdb', type=str, default= '/home/ubuntu/new/caffe/data/marketplace/images/marketplace_val_lmdb') parser.add_argument( '--mean', type=str, default= '/home/ubuntu/new/caffe/data/marketplace/images/mean.binaryproto') parser.add_argument('--layer', type=str, default='prob') parser.add_argument('--channel', type=int, default=0) args = parser.parse_args() caffe.set_mode_gpu() caffe.set_device(0) reader = lmdb_reader(args.lmdb) mean = read_mean(args.mean) net = caffe.Net(args.model, args.weights, caffe.TEST) try: max_tracker = pickle.load(open('maxes.pkl', 'rb')) except: layers = map(str, net._blob_names) is_conv = [False for ll in layers] max_tracker = NetMaxTracker(layers=layers, is_conv=is_conv, n_top=10) for idx, (i, image, label) in enumerate(reader): print idx, i, label caffe_in = preprocess_image(image, net, mean) net.blobs['data'].data[...] = caffe_in net.forward() max_tracker.update(net, idx, label) with open('maxes.pkl', 'wb') as f: pickle.dump(max_tracker, f, -1) reader = lmdb_reader(args.lmdb) files = [i.split('_', 1)[1] for (i, _, _) in reader] layer = args.layer channel = args.channel mt = max_tracker.max_trackers[layer] rc = RegionComputer() fig = plt.figure(figsize=(6, 6)) gs = gridspec.GridSpec(3, 3, wspace=0, hspace=0) for i, im in enumerate(mt.max_locs[channel, -9:, 0]): img = mpimg.imread('/home/ubuntu/new/caffe/data/marketplace/images/' + files[im]) if mt.is_conv: ii, jj = mt.max_locs[channel, i + 1, 2:] layer_indices = (ii, ii + 1, jj, jj + 1) data_indices = rc.convert_region(layer, 'data', layer_indices) data_ii_start, data_ii_end, data_jj_start, data_jj_end = data_indices img = img[data_ii_start:data_ii_end, data_jj_start:data_jj_end, :] ax = plt.subplot(gs[i]) ax.set_xticks([]) ax.set_yticks([]) ax.imshow(img, aspect='auto') fig.add_subplot(ax) plt.tight_layout(pad=0) plt.show()
def output_max_patches(max_tracker, net, layer, idx_begin, idx_end, num_top, datadir, filelist, outdir, do_which): do_maxes, do_deconv, do_deconv_norm, do_backprop, do_backprop_norm, do_info = do_which assert do_maxes or do_deconv or do_deconv_norm or do_backprop or do_backprop_norm or do_info, 'nothing to do' mt = max_tracker rc = RegionComputer() image_filenames, image_xmls, image_labels = load_file_list(filelist) print('Loaded filenames and labels for %d files' % len(image_filenames)) print(' First file', os.path.join(datadir, image_filenames[0])) num_top_in_mt = mt.max_locs.shape[1] assert num_top <= num_top_in_mt, 'Requested %d top images but MaxTracker contains only %d' % ( num_top, num_top_in_mt) assert idx_end >= idx_begin, 'Range error' size_ii, size_jj = get_max_data_extent(net, layer, rc, mt.is_conv) data_size_ii, data_size_jj = net.blobs['data'].data.shape[2:4] n_total_images = (idx_end - idx_begin) * num_top for cc, channel_idx in enumerate(range(idx_begin, idx_end)): unit_dir = os.path.join(outdir, layer, 'unit_%04d' % channel_idx) mkdir_p(unit_dir) if do_info: info_filename = os.path.join(unit_dir, 'info.txt') info_file = open(info_filename, 'w') info_file.write( '# is_conv val image_idx image_class i(if is_conv) j(if is_conv) filename' ) # iterate through maxes from highest (at end) to lowest for max_idx_0 in range(num_top): max_idx = num_top_in_mt - 1 - max_idx_0 if mt.is_conv: im_idx, im_class, ii, jj = mt.max_locs[channel_idx, max_idx] else: im_idx, im_class = mt.max_locs[channel_idx, max_idx] recorded_val = mt.max_vals[channel_idx, max_idx] filename = image_filenames[im_idx] roiname = image_xmls[im_idx] do_print = (max_idx_0 == 0) if do_print: print('%s Output file/image(s) %d/%d' % (datetime.now().ctime(), cc * num_top, n_total_images)) if mt.is_conv: # Compute the focus area of the data layer layer_indices = (ii, ii + 1, jj, jj + 1) data_indices = rc.convert_region(layer, 'data', layer_indices) data_ii_start, data_ii_end, data_jj_start, data_jj_end = data_indices touching_imin = (data_ii_start == 0) touching_jmin = (data_jj_start == 0) # Compute how much of the data slice falls outside the actual data [0,max] range ii_outside = size_ii - (data_ii_end - data_ii_start ) # possibly 0 jj_outside = size_jj - (data_jj_end - data_jj_start ) # possibly 0 if touching_imin: out_ii_start = ii_outside out_ii_end = size_ii else: out_ii_start = 0 out_ii_end = size_ii - ii_outside if touching_jmin: out_jj_start = jj_outside out_jj_end = size_jj else: out_jj_start = 0 out_jj_end = size_jj - jj_outside else: ii, jj = 0, 0 data_ii_start, out_ii_start, data_jj_start, out_jj_start = 0, 0, 0, 0 data_ii_end, out_ii_end, data_jj_end, out_jj_end = size_ii, size_ii, size_jj, size_jj if do_info: info_file.write(str(1 if mt.is_conv else 0)) info_file.write('%.6f' % mt.max_vals[channel_idx, max_idx]) if mt.is_conv: info_file.write('%d %d %d %d' % tuple(mt.max_locs[channel_idx, max_idx])) else: info_file.write('%d %d' % tuple(mt.max_locs[channel_idx, max_idx])) info_file.write(filename) if not (do_maxes or do_deconv or do_deconv_norm or do_backprop or do_backprop_norm): continue with WithTimer('Load image & ROIs', quiet=not do_print): im = load_image(os.path.join(datadir, filename)) _, _, rois = parse_roi_xml(os.path.join(datadir, roiname)) rois = np.array(rois) with WithTimer('Predict ', quiet=not do_print): net.blobs['data'].data[...] = im net.blobs['rois'].reshape(1, *rois.shape) net.blobs['rois'].data[...] = rois net.forward() if len(net.blobs[layer].data.shape) == 4: reproduced_val = net.blobs[layer].data[0, channel_idx, ii, jj] else: reproduced_val = net.blobs[layer].data[0, channel_idx] if abs(reproduced_val - recorded_val) > .1: print( 'Warning: recorded value %s is suspiciously different from reproduced value %s. Is the filelist the same?' % (recorded_val, reproduced_val)) if do_maxes: #grab image from data layer, not from im (to ensure preprocessing / center crop details match between image and deconv/backprop) out_arr = np.zeros((3, size_ii, size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].data[ 0, :, data_ii_start:data_ii_end, data_jj_start:data_jj_end] with WithTimer('Save img ', quiet=not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'maxim_%03d.png' % max_idx_0), autoscale=False, autoscale_center=0) if do_deconv or do_deconv_norm: diffs = net.blobs[layer].diff * 0 if len(diffs.shape) == 4: diffs[0, channel_idx, ii, jj] = 1.0 else: diffs[0, channel_idx] = 1.0 with WithTimer('Deconv ', quiet=not do_print): net.deconv_from_layer(layer, diffs) out_arr = np.zeros((3, size_ii, size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].diff[ 0, :, data_ii_start:data_ii_end, data_jj_start:data_jj_end] if out_arr.max() == 0: print('Warning: Deconv out_arr in range', out_arr.min(), 'to', out_arr.max(), 'ensure force_backward: true in prototxt') if do_deconv: with WithTimer('Save img ', quiet=not do_print): save_caffe_image(out_arr, os.path.join( unit_dir, 'deconv_%03d.png' % max_idx_0), autoscale=False, autoscale_center=0) if do_deconv_norm: out_arr = np.linalg.norm(out_arr, axis=0) with WithTimer('Save img ', quiet=not do_print): save_caffe_image( out_arr, os.path.join(unit_dir, 'deconvnorm_%03d.png' % max_idx_0)) if do_backprop or do_backprop_norm: diffs = net.blobs[layer].diff * 0 diffs[0, channel_idx, ii, jj] = 1.0 with WithTimer('Backward ', quiet=not do_print): net.backward_from_layer(layer, diffs) out_arr = np.zeros((3, size_ii, size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].diff[ 0, :, data_ii_start:data_ii_end, data_jj_start:data_jj_end] if out_arr.max() == 0: print('Warning: Deconv out_arr in range', out_arr.min(), 'to', out_arr.max(), 'ensure force_backward: true in prototxt') if do_backprop: with WithTimer('Save img ', quiet=not do_print): save_caffe_image(out_arr, os.path.join( unit_dir, 'backprop_%03d.png' % max_idx_0), autoscale=False, autoscale_center=0) if do_backprop_norm: out_arr = np.linalg.norm(out_arr, axis=0) with WithTimer('Save img ', quiet=not do_print): save_caffe_image( out_arr, os.path.join(unit_dir, 'backpropnorm_%03d.png' % max_idx_0)) if do_info: info_file.close()
def output_max_patches(max_tracker, net, layer, idx_begin, idx_end, num_top, datadir, filelist, outdir, do_which): do_maxes, do_deconv, do_deconv_norm, do_backprop, do_backprop_norm, do_info = do_which assert do_maxes or do_deconv or do_deconv_norm or do_backprop or do_backprop_norm or do_info, 'nothing to do' mt = max_tracker rc = RegionComputer() image_filenames, image_labels = load_file_list(filelist) print 'Loaded filenames and labels for %d files' % len(image_filenames) print ' First file', os.path.join(datadir, image_filenames[0]) num_top_in_mt = mt.max_locs.shape[1] assert num_top <= num_top_in_mt, 'Requested %d top images but MaxTracker contains only %d' % (num_top, num_top_in_mt) assert idx_end >= idx_begin, 'Range error' size_ii, size_jj = get_max_data_extent(net, layer, rc, mt.is_conv) data_size_ii, data_size_jj = net.blobs['data'].data.shape[2:4] n_total_images = (idx_end-idx_begin) * num_top for cc, channel_idx in enumerate(range(idx_begin, idx_end)): unit_dir = os.path.join(outdir, layer, 'unit_%04d' % channel_idx) mkdir_p(unit_dir) if do_info: info_filename = os.path.join(unit_dir, 'info.txt') info_file = open(info_filename, 'w') print >>info_file, '# is_conv val image_idx image_class i(if is_conv) j(if is_conv) filename' # iterate through maxes from highest (at end) to lowest for max_idx_0 in range(num_top): max_idx = num_top_in_mt - 1 - max_idx_0 if mt.is_conv: im_idx, im_class, ii, jj = mt.max_locs[channel_idx, max_idx] else: im_idx, im_class = mt.max_locs[channel_idx, max_idx] recorded_val = mt.max_vals[channel_idx, max_idx] filename = image_filenames[im_idx] do_print = (max_idx_0 == 0) if do_print: print '%s Output file/image(s) %d/%d' % (datetime.now().ctime(), cc * num_top, n_total_images) if mt.is_conv: # Compute the focus area of the data layer layer_indices = (ii,ii+1,jj,jj+1) data_indices = rc.convert_region(layer, 'data', layer_indices) data_ii_start,data_ii_end,data_jj_start,data_jj_end = data_indices touching_imin = (data_ii_start == 0) touching_jmin = (data_jj_start == 0) # Compute how much of the data slice falls outside the actual data [0,max] range ii_outside = size_ii - (data_ii_end - data_ii_start) # possibly 0 jj_outside = size_jj - (data_jj_end - data_jj_start) # possibly 0 if touching_imin: out_ii_start = ii_outside out_ii_end = size_ii else: out_ii_start = 0 out_ii_end = size_ii - ii_outside if touching_jmin: out_jj_start = jj_outside out_jj_end = size_jj else: out_jj_start = 0 out_jj_end = size_jj - jj_outside else: ii,jj = 0,0 data_ii_start, out_ii_start, data_jj_start, out_jj_start = 0,0,0,0 data_ii_end, out_ii_end, data_jj_end, out_jj_end = size_ii, size_ii, size_jj, size_jj if do_info: print >>info_file, 1 if mt.is_conv else 0, '%.6f' % mt.max_vals[channel_idx, max_idx], if mt.is_conv: print >>info_file, '%d %d %d %d' % tuple(mt.max_locs[channel_idx, max_idx]), else: print >>info_file, '%d %d' % tuple(mt.max_locs[channel_idx, max_idx]), print >>info_file, filename if not (do_maxes or do_deconv or do_deconv_norm or do_backprop or do_backprop_norm): continue with WithTimer('Load image', quiet = not do_print): im = caffe.io.load_image(os.path.join(datadir, filename)) with WithTimer('Predict ', quiet = not do_print): net.predict([im], oversample = False) # Just take center crop, same as in scan_images_for_maxes if len(net.blobs[layer].data.shape) == 4: reproduced_val = net.blobs[layer].data[0,channel_idx,ii,jj] else: reproduced_val = net.blobs[layer].data[0,channel_idx] if abs(reproduced_val - recorded_val) > .1: print 'Warning: recorded value %s is suspiciously different from reproduced value %s. Is the filelist the same?' % (recorded_val, reproduced_val) if do_maxes: #grab image from data layer, not from im (to ensure preprocessing / center crop details match between image and deconv/backprop) out_arr = np.zeros((3,size_ii,size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].data[0,:,data_ii_start:data_ii_end,data_jj_start:data_jj_end] with WithTimer('Save img ', quiet = not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'maxim_%03d.png' % max_idx_0), autoscale = False, autoscale_center = 0) if do_deconv or do_deconv_norm: diffs = net.blobs[layer].diff * 0 if len(diffs.shape) == 4: diffs[0,channel_idx,ii,jj] = 1.0 else: diffs[0,channel_idx] = 1.0 with WithTimer('Deconv ', quiet = not do_print): net.deconv_from_layer(layer, diffs) out_arr = np.zeros((3,size_ii,size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].diff[0,:,data_ii_start:data_ii_end,data_jj_start:data_jj_end] if out_arr.max() == 0: print 'Warning: Deconv out_arr in range', out_arr.min(), 'to', out_arr.max(), 'ensure force_backward: true in prototxt' if do_deconv: with WithTimer('Save img ', quiet = not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'deconv_%03d.png' % max_idx_0), autoscale = False, autoscale_center = 0) if do_deconv_norm: out_arr = np.linalg.norm(out_arr, axis=0) with WithTimer('Save img ', quiet = not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'deconvnorm_%03d.png' % max_idx_0)) if do_backprop or do_backprop_norm: diffs = net.blobs[layer].diff * 0 diffs[0,channel_idx,ii,jj] = 1.0 with WithTimer('Backward ', quiet = not do_print): net.backward_from_layer(layer, diffs) out_arr = np.zeros((3,size_ii,size_jj), dtype='float32') out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].diff[0,:,data_ii_start:data_ii_end,data_jj_start:data_jj_end] if out_arr.max() == 0: print 'Warning: Deconv out_arr in range', out_arr.min(), 'to', out_arr.max(), 'ensure force_backward: true in prototxt' if do_backprop: with WithTimer('Save img ', quiet = not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'backprop_%03d.png' % max_idx_0), autoscale = False, autoscale_center = 0) if do_backprop_norm: out_arr = np.linalg.norm(out_arr, axis=0) with WithTimer('Save img ', quiet = not do_print): save_caffe_image(out_arr, os.path.join(unit_dir, 'backpropnorm_%03d.png' % max_idx_0)) if do_info: info_file.close()