def main(): logger = VisdomLogger("train", env=JOB) logger.add_hook(lambda logger, data: logger.step(), feature="energy", freq=16) logger.add_hook( lambda logger, data: logger.plot(data["energy"], "free_energy"), feature="energy", freq=100) task_list = [ tasks.rgb, tasks.normal, tasks.principal_curvature, tasks.sobel_edges, tasks.depth_zbuffer, tasks.reshading, tasks.edge_occlusion, tasks.keypoints3d, tasks.keypoints2d, ] reality = RealityTask('ood', dataset=ImagePairDataset(data_dir=OOD_DIR, resize=(256, 256)), tasks=[tasks.rgb, tasks.rgb], batch_size=28) # reality = RealityTask('almena', # dataset=TaskDataset( # buildings=['almena'], # tasks=task_list, # ), # tasks=task_list, # batch_size=8 # ) graph = TaskGraph(tasks=[reality, *task_list], batch_size=28) task = tasks.rgb images = [reality.task_data[task]] sources = [task.name] for _, edge in sorted( ((edge.dest_task.name, edge) for edge in graph.adj[task])): if isinstance(edge.src_task, RealityTask): continue reality.task_data[edge.src_task] x = edge(reality.task_data[edge.src_task]) if edge.dest_task != tasks.normal: edge2 = graph.edge_map[(edge.dest_task.name, tasks.normal.name)] x = edge2(x) images.append(x.clamp(min=0, max=1)) sources.append(edge.dest_task.name) logger.images_grouped(images, ", ".join(sources), resize=256)
def main(): logger = VisdomLogger("train", env=JOB) logger.add_hook(lambda logger, data: logger.step(), feature="energy", freq=16) logger.add_hook(lambda logger, data: logger.plot(data["energy"], "free_energy"), feature="energy", freq=100) task_list = [ tasks.rgb, tasks.normal, tasks.principal_curvature, tasks.sobel_edges, tasks.depth_zbuffer, tasks.reshading, tasks.edge_occlusion, tasks.keypoints3d, tasks.keypoints2d, ] reality = RealityTask('almena', dataset=TaskDataset( buildings=['almena'], tasks=task_list, ), tasks=task_list, batch_size=8 ) graph = TaskGraph( tasks=[reality, *task_list], batch_size=8 ) for task in graph.tasks: if isinstance(task, RealityTask): continue images = [reality.task_data[task].clamp(min=0, max=1)] sources = [task.name] for _, edge in sorted(((edge.src_task.name, edge) for edge in graph.in_adj[task])): if isinstance(edge.src_task, RealityTask): continue x = edge(reality.task_data[edge.src_task]) images.append(x.clamp(min=0, max=1)) sources.append(edge.src_task.name) logger.images_grouped(images, ", ".join(sources), resize=192)
dataset[0][1][:, 0:3], dataset[1][1][:, 0:3], dataset[2][1][:, 0:3], dataset[3][1][:, 0:3], dataset[4][1][:, 0:3], dataset[5][1][:, 0:3], ] images = torch.cat(data, dim=0) targets = torch.cat(target, dim=0) if targets.shape[1] == 1: targets = torch.cat([targets] * 3, dim=1) print(targets.shape) print(targets.shape) images = [images, targets] images += [ run_viz_suite("imagepercep", data, model_file=f"{MODELS_DIR}/rgb2normal_imagepercep.pth", logger=logger, percep_mode=False) ] #images += [run_viz_suite("unet-b", data, graph_file=f"{SHARED_DIR}/results_SAMPLEFF_full_data_baseline_3/graph.pth", logger=logger)] #images += [run_viz_suite("unet-pc", data, model_file=f"{MODELS_DIR}/unet_percepstep_0.1.pth", logger=logger, old=True)] #images += [run_viz_suite("geonet", data, graph_file=f"{SHARED_DIR}/results_geonet_lr1e5_1gpu_2/graph.pth", logger=logger)] # run_eval_suite("unet-b-1m", model_file=f"{SHARED_DIR}/results_SAMPLEFF_baseline1m_3/n.pth", logger=logger) # run_eval_suite("unet-pc-1m", graph_file=f"{SHARED_DIR}/results_SAMPLEFF_consistency1m_25/graph.pth", logger=logger) for image in images: print(image.shape) logger.images_grouped(images, "imagepercep", resize=320)