# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will # interpolate the texture uv coordinates for each vertex, sample from a texture image and # apply the Phong lighting model renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=SoftPhongShader(device=device, cameras=cameras, lights=lights)) img = renderer(mesh) plt.figure(figsize=(10, 10)) plt.imshow(img[0].cpu().numpy()) plt.show() if __name__ == '__main__': file = Path( "/home/lukas/rtm/rtm_files_3d/2020-08-24_11-20-27_111_RESULT.erfh5") from Pipeline.data_loader_mesh import DataLoaderMesh sensor_verts_path = Path( "/home/lukas/rtm/sensor_verts_3d_272_subsampled.dump") dl = DataLoaderMesh(sensor_verts_path=sensor_verts_path) data = dl.get_sensor_flowfront_mesh(file) sample = data[150][1] mc = MeshCreator(file) verts, faces, _ = mc.get_mesh_components() show_vedo_mesh_old(verts, faces, sample) # save_p3d_mesh(verts, faces, sample) pass
return x if __name__ == '__main__': from Pipeline.data_loader_mesh import DataLoaderMesh from pathlib import Path dl = DataLoaderMesh( sensor_verts_path=Path("/home/lukas/rtm/sensor_verts.dump")) file = Path( "/home/lukas/rtm/rtm_files/2019-07-24_16-32-40_308_RESULT.erfh5") bs = 4 mesh = dl.get_batched_mesh_torch(bs, file) # model = SensorMeshToFlowFrontModel(mesh) # model = SensorMeshToDryspotModel(mesh, bs).cuda() model = SensorMeshToDryspotResnet(mesh, bs).cuda() instances = dl.get_sensor_flowfront_mesh(file) data, labels = [], [] batch = instances[0:bs] for d, l in batch: data.append(d) labels.append(l) data = torch.Tensor(data).cuda() lables = torch.Tensor(labels) for i in tqdm(range(500)): output = model(data)