def save_samples(data, bits, outfname): print("bits", bits.shape) nb_bits = bits.shape[1] for i in range(len(data)): s = 64 // nb_bits for j in range(nb_bits): data[i, :s, s * j:s * (j + 1)] = 2 * bits[i, j] - 1 image_save(outfname, np.clip(tile(data), -1, 1))
def save_samples(data, bits, outfname): print("bits", bits.shape) nb_bits = bits.shape[1] for i in range(len(data)): s = 64 // nb_bits for j in range(nb_bits): data[i, :s, s*j:s*(j+1)] = 2*bits[i, j] - 1 image_save(outfname, np.clip(tile(data), -1, 1))
def save_real_images(real_hdf5_fname, output_dir, nb_visualise=20**2): real = next(train_data_generator(real_hdf5_fname, nb_visualise, 10))['data'] print(real.shape) print(real.max()) print(real.min()) tiled = tile(real) fname = join(output_dir, 'real_{}.png'.format(nb_visualise)) image_save(fname, tiled[0])
def save_first_frame(video_fname, output_dir, force): camIdx, start, _ = parse_video_fname(video_fname) outname = os.path.join(output_dir, get_fname(camIdx, start) + ".png") if os.path.exists(outname) and not force: return gen = raw_frames_generator(video_fname, format='guess_on_ext') frame = next(gen) assert frame.dtype == np.uint8 image_save(outname, frame)
def test_image_save(tmpdir): x = np.random.random((1, 64, 64)) image_save(str(tmpdir.join("one_channel.png")), x) x = np.random.random((3, 64, 64)) image_save(str(tmpdir.join("one_channel.png")), x) x = np.random.random((64, 64)) image_save(str(tmpdir.join("one_channel.png")), x) with pytest.raises(Exception): x = np.random.random((100, 64, 64)) image_save(str(tmpdir.join("one_channel.png")), x) # test scale x = 0.5*np.linspace(0, 1, 64*64).reshape((64, 64)) + 0.1 image_save(str(tmpdir.join("scale.png")), x, low=0, high=1) loaded_x = imread(str(tmpdir.join("scale.png"))) assert 0.1 <= x.min() assert np.abs(0.1 - (loaded_x / 255.).min()) <= 1/256 assert (loaded_x / 255.).max() <= 0.6
def visualize_detection_tiles(dset, name, n=20**2): crown_vis = ResultCrownVisualizer() imgs_raw = [] imgs_overlayed = [] nb_tags = len(dset['tags']) n = min(nb_tags, n) indicies = np.arange(nb_tags) np.random.shuffle(indicies) for i in range(n): p = indicies[i] position = np.array([dset['tags'][0, 0].shape]) / 2 tag = dset['tags'][p, 0] bits = (dset['bits'][p:p + 1] + 1) / 2. overlay = crown_vis(tag, position, np.zeros((1, 3)), bits)[0] overlayed = crown_vis.add_overlay((tag + 1) / 2, overlay) imgs_raw.append(tag) imgs_overlayed.append(2 * overlayed - 1) tiled_raw = tile([img.swapaxes(0, -1) for img in imgs_raw]) tiled_overlayed = tile([img.swapaxes(0, -1) for img in imgs_overlayed]) image_save(name + '_raw.png', tiled_raw) image_save(name + '_overlayed.png', tiled_overlayed)
def test_stack_augmentations(outdir, datadir): fname = "/home/leon/uni/bachelor/deepdecoder/test/data/00350.hdf5" myconfig = config.load(str(datadir.join('augmentation.yml'))) dset = DistributionHDF5Dataset(fname) batch = next(dset.iter(15**2)) image_save(str(outdir.join('real.png')), tile(batch['real'])) image_save(str(outdir.join('fake.png')), np.clip(tile(batch['fake']), -1, 1)) # for i, name in enumerate(['tag3d']): for i, name in enumerate(['tag3d', 'tag3d_lighten', 'fake_without_noise', 'fake']): augment = stack_augmentations(name, myconfig) xs = augment(batch) image_save(str(outdir.join('{}_{}_aug_tiled.png'.format(i, name))), tile(xs))