def test_projection(): launch_args = dict( scene=BLENDDIR / 'cam.blend', script=BLENDDIR / 'cam.blend.py', num_instances=1, named_sockets=['DATA'], background=True, ) ortho_xy_expected = np.array([[480., 80], [480., 80], [480., 400], [480., 400], [160., 80], [160., 80], [160., 400], [160., 400]]) proj_xy_expected = np.array([ [468.148, 91.851], [431.111, 128.888], [468.148, 388.148], [431.111, 351.111], [171.851, 91.851], [208.888, 128.888], [171.851, 388.148], [208.888, 351.111], ]) z_expected = np.array([6., 8, 6, 8, 6, 8, 6, 8]) with btt.BlenderLauncher(**launch_args) as bl: addr = bl.launch_info.addresses['DATA'] ds = btt.RemoteIterableDataset(addr, max_items=2) item = next(iter(ds)) assert_allclose(item['ortho_xy'], ortho_xy_expected, atol=1e-2) assert_allclose(item['ortho_z'], z_expected, atol=1e-2) assert_allclose(item['proj_xy'], proj_xy_expected, atol=1e-2) assert_allclose(item['proj_z'], z_expected, atol=1e-2)
def test_dataset_robustness(): launch_args = dict( scene='', script=BLENDDIR/'dataset_robust.blend.py', num_instances=2, named_sockets=['DATA'], background=True, ) with btt.BlenderLauncher(**launch_args) as bl: addr = bl.launch_info.addresses['DATA'] # Note, https://github.com/pytorch/pytorch/issues/44108 ds = btt.RemoteIterableDataset(addr, max_items=128) dl = DataLoader(ds, batch_size=4, num_workers=0, drop_last=False, shuffle=False) count = 0 ids = [] for item in dl: assert item['img'].shape == (4,64,64) assert item['frameid'].shape == (4,) ids.append(item['btid']) count += 1 assert count == 32 ids = np.concatenate(ids) assert (ids==1).sum() == 1 assert (ids==0).sum() == 127
def main(): # Define how we want to launch Blender launch_args = dict( scene=Path(__file__).parent / 'compositor_normals_depth.blend', script=Path(__file__).parent / 'compositor_normals_depth.blend.py', num_instances=1, named_sockets=['DATA'], ) # Launch Blender with btt.BlenderLauncher(**launch_args) as bl: # Create remote dataset and limit max length to 16 elements. addr = bl.launch_info.addresses['DATA'] ds = btt.RemoteIterableDataset(addr, max_items=4) dl = data.DataLoader(ds, batch_size=4, num_workers=0) for item in dl: normals = item['normals'] # Note, normals are color-coded (0..1), to convert back to original # range (-1..1) use # true_normals = (normals - 0.5) * \ # torch.tensor([2., 2., -2.]).view(1, 1, 1, -1) depth = item['depth'] print('Received', normals.shape, depth.shape, depth.dtype, np.ptp(depth)) fig, axs = plt.subplots(2, 2) axs = np.asarray(axs).reshape(-1) for i in range(4): axs[i].imshow(depth[i, :, :, 0], vmin=1, vmax=2.5) fig, axs = plt.subplots(2, 2) axs = np.asarray(axs).reshape(-1) for i in range(4): axs[i].imshow(normals[i, :, :]) plt.show()
def main(): parser = argparse.ArgumentParser() parser.add_argument('scene', help='Blender scene name to run', default='cube') args = parser.parse_args() launch_args = dict(scene=EXAMPLES_DIR / f'{args.scene}.blend', script=EXAMPLES_DIR / f'{args.scene}.blend.py', num_instances=INSTANCES, named_sockets=['DATA']) with btt.BlenderLauncher(**launch_args) as bl: ds = btt.RemoteIterableDataset(bl.launch_info.addresses['DATA']) ds.stream_length(NUM_ITEMS) dl = data.DataLoader(ds, batch_size=BATCH, num_workers=WORKER_INSTANCES, shuffle=False) # Wait to avoid timing startup times of Blender time.sleep(5) t0 = None tlast = None imgshape = None elapsed = [] n = 0 for item in dl: n += len(item['image']) if t0 is None: # 1st is warmup t0 = time.time() tlast = t0 imgshape = item['image'].shape elif n % (50 * BATCH) == 0: t = time.time() elapsed.append(t - tlast) tlast = t print('.', end='') assert n == NUM_ITEMS t1 = time.time() N = NUM_ITEMS - BATCH B = NUM_ITEMS // BATCH - 1 print( f'Time {(t1-t0)/N:.3f}sec/image, {(t1-t0)/B:.3f}sec/batch, shape {imgshape}' ) fig, _ = plt.subplots() plt.plot(np.arange(len(elapsed)), elapsed) plt.title('Receive times between 50 consecutive batches') save_path = EXAMPLES_DIR / 'tmp' / 'batches_elapsed.png' fig.savefig(str(save_path)) plt.close(fig) print(f'Figure saved to {save_path}')
def test_launcher_connected_remote(tmp_path): # Simulates BlenderLauncher called from a separate process and # shows how one can connect to already launched instances through # serialization of addresses. q = mp.Queue() p = mp.Process(target=_launch, args=(q, tmp_path)) p.start() path = q.get() launch_info = btt.LaunchInfo.load_json(path) ds = btt.RemoteIterableDataset(launch_info.addresses['DATA'], max_items=2) items = [item for item in ds] _validate_result(items) p.join()
def main(): import logging logging.basicConfig(level=logging.INFO) import argparse parser = argparse.ArgumentParser() parser.add_argument('scene', help='Blender scene name to run') parser.add_argument( '--replay', action='store_true', help='Replay from disc instead of launching from Blender') parser.add_argument('--record', action='store_true', help='Record raw blender data') args = parser.parse_args() with ExitStack() as es: if not args.replay: # Launch Blender instance. Upon exit of this script all Blender instances will be closed. bl = es.enter_context( btt.BlenderLauncher( scene=Path(__file__).parent / f'{args.scene}.blend', script=Path(__file__).parent / f'{args.scene}.blend.py', num_instances=BLENDER_INSTANCES, named_sockets=['DATA'], )) # Setup a streaming dataset ds = btt.RemoteIterableDataset(bl.launch_info.addresses['DATA'], item_transform=item_transform) # Iterable datasets do not support shuffle shuffle = False # Limit the total number of streamed elements ds.stream_length(64) # Setup raw recording if desired if args.record: ds.enable_recording(f'./tmp/record_{args.scene}') else: # Otherwise we replay from file. ds = btt.FileDataset(f'./tmp/record_{args.scene}', item_transform=item_transform) shuffle = True # Setup DataLoader and iterate dl = DataLoader(ds, batch_size=BATCH, num_workers=WORKER_INSTANCES, shuffle=shuffle) iterate(dl)
def test_launcher_app(tmp_path): p = mp.Process(target=_launch_app, args=(tmp_path, LAUNCH_ARGS)) p.start() import time path = tmp_path/'launchinfo.json' while not Path.exists(path): time.sleep(1) launch_info = btt.LaunchInfo.load_json(path) ds = btt.RemoteIterableDataset(launch_info.addresses['DATA'], max_items=2) items = [item for item in ds] _validate_result(items) p.join()
def test_projection(): launch_args = dict( scene=BLENDDIR / 'compositor.blend', script=BLENDDIR / 'compositor.blend.py', num_instances=1, named_sockets=['DATA'], background=True, ) expected_color = np.full((200, 320, 3), (0, 1, 0), dtype=np.float32) expected_depth = np.full((200, 320, 1), 2.0, dtype=np.float32) with btt.BlenderLauncher(**launch_args) as bl: addr = bl.launch_info.addresses['DATA'] ds = btt.RemoteIterableDataset(addr, max_items=1) item = next(iter(ds)) assert_allclose(item['color'], expected_color) assert_allclose(item['depth'], expected_depth)
def test_launcher_app_primaryip(tmp_path): # Same with primary ip resolver args = copy.deepcopy(LAUNCH_ARGS) args['bind_addr'] = 'primaryip' p = mp.Process(target=_launch_app, args=(tmp_path,args)) p.start() import time path = tmp_path/'launchinfo.json' while not Path.exists(path): time.sleep(1) launch_info = btt.LaunchInfo.load_json(path) print(launch_info.addresses) ds = btt.RemoteIterableDataset(launch_info.addresses['DATA'], max_items=2) items = [item for item in ds] _validate_result(items) p.join()
def main(): # Define how we want to launch Blender launch_args = dict( scene=Path(__file__).parent/'cube.blend', script=Path(__file__).parent/'cube.blend.py', num_instances=2, named_sockets=['DATA'], ) # Launch Blender with btt.BlenderLauncher(**launch_args) as bl: # Create remote dataset and limit max length to 16 elements. addr = bl.launch_info.addresses['DATA'] ds = btt.RemoteIterableDataset(addr, max_items=16) dl = data.DataLoader(ds, batch_size=4, num_workers=4) for item in dl: # item is a dict with custom content (see cube.blend.py) img, xy = item['image'], item['xy'] print('Received', img.shape, xy.shape)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--scene', help='Blender scene name to run', default='cube') args = parser.parse_args() launch_args = dict(scene=EXAMPLES_DIR / f'{args.scene}.blend', script=EXAMPLES_DIR / f'{args.scene}.blend.py', num_instances=INSTANCES, named_sockets=['DATA']) with btt.BlenderLauncher(**launch_args) as bl: ds = btt.RemoteIterableDataset(bl.launch_info.addresses['DATA']) ds.stream_length(NUM_ITEMS) dl = data.DataLoader(ds, batch_size=BATCH, num_workers=WORKER_INSTANCES, shuffle=False) # Wait to avoid timing startup times of Blender time.sleep(5) t0 = None imgshape = None n = 0 for item in dl: if t0 is None: # 1st is warmup t0 = time.time() imgshape = item['image'].shape n += len(item['image']) assert n == NUM_ITEMS t1 = time.time() N = NUM_ITEMS - BATCH B = NUM_ITEMS // BATCH - 1 print( f'Time {(t1-t0)/N:.3f}sec/image, {(t1-t0)/B:.3f}sec/batch, shape {imgshape}' )
def test_dataset(): launch_args = dict( scene='', script=BLENDDIR/'dataset.blend.py', num_instances=1, named_sockets=['DATA'], background=True, ) with btt.BlenderLauncher(**launch_args) as bl: addr = bl.launch_info.addresses['DATA'] # Note, https://github.com/pytorch/pytorch/issues/44108 ds = btt.RemoteIterableDataset(addr, max_items=16) dl = DataLoader(ds, batch_size=4, num_workers=4, drop_last=False, shuffle=False) count = 0 for item in dl: assert item['img'].shape == (4,64,64) assert item['frameid'].shape == (4,) count += 1 assert count == 4
def test_launcher(): with btt.BlenderLauncher(**LAUNCH_ARGS) as bl: addr = bl.launch_info.addresses['DATA'] ds = btt.RemoteIterableDataset(addr, max_items=2) items = [item for item in ds] _validate_result(items)
def run(args): # Define how we want to launch Blender launch_args = dict( scene=Path(__file__).parent / 'supershape.blend', script=Path(__file__).parent / 'supershape.blend.py', num_instances=SIM_INSTANCES, named_sockets=['DATA', 'CTRL'], ) # Create an untrained discriminator. dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu') netD = Discriminator().to(dev) # Launch Blender with btt.BlenderLauncher(**launch_args) as bl: # Create remote dataset addr = bl.launch_info.addresses['DATA'] sim_ds = btt.RemoteIterableDataset(addr, item_transform=item_transform) sim_dl = data.DataLoader(sim_ds, batch_size=BATCH, num_workers=0, shuffle=False) # Create a control channel to each Blender instance. We use this channel to # communicate new shape parameters to be rendered. addr = bl.launch_info.addresses['CTRL'] remotes = [btt.DuplexChannel(a) for a in addr] # Fetch images of the target distribution. In the following we assume the # target distribution to be unknown. if args.random_start: mu_m1m2_target = np.random.uniform(0.0, 3, size=2).astype(np.float32) else: mu_m1m2_target = [DEFAULT_MEAN_TARGET, DEFAULT_MEAN_TARGET] std_m1m2_target = [DEFAULT_STD_TARGET, DEFAULT_STD_TARGET] print('Target params:', mu_m1m2_target, std_m1m2_target) target_ds = get_target_images(sim_dl, remotes, mu_m1m2_target, std_m1m2_target, n=BATCH) target_dl = data.DataLoader(target_ds, batch_size=BATCH, num_workers=0, shuffle=True) # Initial simulation parameters. The parameters in mean and std are off from the target # distribution parameters. Note that we especially enlarge the scale of the initial # distribution to get explorative behaviour in the beginning. if args.random_start: mu_m1m2 = np.asarray(mu_m1m2_target) + np.random.randn(2) else: mu_m1m2 = [1.2, 3.0] std_m1m2 = [0.4, 0.4] pm = ProbModel(mu_m1m2, std_m1m2) # Setup discriminator and simulation optimizer optD = optim.Adam(netD.parameters(), lr=5e-5, betas=(0.5, 0.999)) optS = optim.Adam(pm.parameters(), lr=5e-2, betas=(0.7, 0.999)) # Get generators for image batches from target and simulation. gen_real = infinite_batch_generator(target_dl) gen_sim = infinite_batch_generator(sim_dl) crit = nn.BCELoss(reduction='none') epoch = 0 b = 0.7 # Baseline to reduce variance of gradient estimator. first = True param_history = [] # Send instructions to render supershapes from the starting point. samples = pm.sample(BATCH) update_simulations(remotes, pm.to_supershape(samples)) for (real, sim) in zip(gen_real, gen_sim): ### Train the discriminator from target and simulation images. label = torch.full((BATCH, ), TARGET_LABEL, dtype=torch.float32, device=dev) netD.zero_grad() target_img = real[0].to(dev) output = netD(target_img) errD_real = crit(output, label) errD_real.mean().backward() D_real = output.mean().item() sim_img, sim_shape_id = sim sim_img = sim_img.to(dev) label.fill_(SIM_LABEL) output = netD(sim_img) errD_sim = crit(output, label) errD_sim.mean().backward() D_sim = output.mean().item() if (D_real - D_sim) < 0.7: optD.step() print('D step: mean real', D_real, 'mean sim', D_sim) ### Optimize the simulation parameters. # We update the simulation parameters once the discriminator # has started to converge. Note that unlike to GANs the generator # (simulation) is giving meaningful output from the very beginning, so we # give the discriminator some time to adjust and avoid spurious signals # in gradient estimation of the simulation parameters. # # Note, the rendering function is considered a black-box and we cannot # propagate through it. Therefore we reformulate the optimization as # minimization of an expectation with the parameters in the distribution # the expectation runs over. Using score-function gradients permits gradient # based optimization _without_ access to gradients of the render function. if not first or (D_real - D_sim) >= 0.7: optS.zero_grad() label.fill_(TARGET_LABEL) with torch.no_grad(): output = netD(sim_img) errS_sim = crit(output, label) GD_sim = output.mean().item() log_probs = pm.log_prob(samples) loss = log_probs[sim_shape_id] * (errS_sim.cpu() - b) loss.mean().backward() optS.step() if first: b = errS_sim.mean() else: b = BASELINE_ALPHA * errS_sim.mean() + (1 - BASELINE_ALPHA) * b print('S step:', pm.m1m2_mean.detach().numpy(), torch.exp(pm.m1m2_log_std).detach().numpy(), 'mean sim', GD_sim) first = False del log_probs, loss # Generate shapes/images according to updated parameters. samples = pm.sample(BATCH) update_simulations(remotes, pm.to_supershape(samples)) # Bookkeeping param_history.append(pm.readable_params()) epoch += 1 if epoch % 5 == 0: vutils.save_image(target_img, 'tmp/real_%03d.png' % (epoch), normalize=True) vutils.save_image(sim_img, 'tmp/sim_samples_%03d.png' % (epoch), normalize=True) if epoch > args.num_epochs: # Append true target target = torch.tensor( np.concatenate((mu_m1m2_target, std_m1m2_target))).float() print('Abs.Diff to true params', abs(target - param_history[-1])) param_history.append(target) break param_history = torch.stack(param_history) return param_history