def point_cloud_examples(x, y, predictions, splits): input_data = x[0].numpy() points = input_data[:, :3] ## Create y label point cloud # Wrap for concat y_numpy = y.cpu().numpy() y_labels = np.expand_dims(y_numpy, axis=1) points_with_y_label = np.concatenate((points, y_labels), axis=1) ## Transform loss predictions into a point cloud with labels predictions_numpy = predictions.detach().cpu().numpy() predictions_highest = predictions_numpy.argmax(axis=1) predictions_labels = np.expand_dims(predictions_highest, axis=1) points_with_predictions_label = np.concatenate( (points, predictions_labels), axis=1) idxs = np.cumsum(splits) xs = np.split(input_data, idxs) ys = np.split(points_with_y_label, idxs) ps = np.split(points_with_predictions_label, idxs) return { "x": [wandb.Object3D(x) for x in xs], "y": [wandb.Object3D(y) for y in ys], "predictions": [wandb.Object3D(p) for p in ps] }
def test_object3d_seq_to_json(mocked_run): objs = [ wandb.Object3D(utils.fixture_open("Box.gltf")), wandb.Object3D(utils.fixture_open("cube.obj")), wandb.Object3D(point_cloud_1), ] for o in objs: o.bind_to_run(mocked_run, "pc", 1) obj = wandb.Object3D.seq_to_json(objs, mocked_run, "pc", 1) box = obj["filenames"][0] cube = obj["filenames"][1] pts = obj["filenames"][2] assert os.path.exists( os.path.join(mocked_run.dir, "media", "object3D", box)) assert os.path.exists( os.path.join(mocked_run.dir, "media", "object3D", cube)) assert os.path.exists( os.path.join(mocked_run.dir, "media", "object3D", pts)) assert obj["_type"] == "object3D" assert obj["filenames"] == [ box, cube, pts, ]
def test_object3d_seq_to_json(): cwd = os.getcwd() with CliRunner().isolated_filesystem(): run = wandb.wandb_run.Run() obj = wandb.Object3D.seq_to_json([ wandb.Object3D(open(os.path.join(cwd, "tests/fixtures/Box.gltf"))), wandb.Object3D(open(os.path.join(cwd, "tests/fixtures/cube.obj"))), wandb.Object3D(point_cloud_1) ], run, "pc", 1) print(obj) assert os.path.exists( os.path.join(run.dir, "media/object3D/Box_be115756.gltf")) assert os.path.exists( os.path.join(run.dir, "media/object3D/cube_afff12bc.obj")) assert os.path.exists( os.path.join(run.dir, "media/object3D/pc_1_2.pts.json")) assert obj["_type"] == "object3D" assert obj["filenames"] == [ "Box_be115756.gltf", "cube_afff12bc.obj", "pc_1_2.pts.json", ]
def test_object3d_numpy(): obj = wandb.Object3D(point_cloud_1) np.testing.assert_array_equal(obj.numpyData, point_cloud_1) obj = wandb.Object3D(point_cloud_2) np.testing.assert_array_equal(obj.numpyData, point_cloud_2) obj = wandb.Object3D(point_cloud_3) np.testing.assert_array_equal(obj.numpyData, point_cloud_3)
def test_object3d_numpy(mocked_run): obj1 = wandb.Object3D(point_cloud_1) obj2 = wandb.Object3D(point_cloud_2) obj3 = wandb.Object3D(point_cloud_3) obj1.bind_to_run(mocked_run, "object3d", 0) obj2.bind_to_run(mocked_run, "object3d", 1) obj3.bind_to_run(mocked_run, "object3d", 2) assert obj1.to_json(mocked_run)["_type"] == "object3D-file" assert obj2.to_json(mocked_run)["_type"] == "object3D-file" assert obj3.to_json(mocked_run)["_type"] == "object3D-file"
def visualize_wandb(points, pred, target): # points [B,N,C]->[B*N,C] # pred,target [B,N,1]->[B*N,1] points = points.view(-1, 5).numpy() pred = pred.view(-1, 1).numpy() target = target.view(-1, 1).numpy() points_gt = np.concatenate((points[:, [0, 1, 2]], target), axis=1) points_pd = np.concatenate((points[:, [0, 1, 2]], pred), axis=1) wandb.log({"Ground_truth": wandb.Object3D(points_gt)}) wandb.log({"Prediction": wandb.Object3D(points_pd)})
def log_point_cloud(img): c, z, y, x = img.shape assert z > 1 assert y > 1 assert x > 1 if c == 1: img = numpy.concatenate([img] * 3) c = 3 if c == 2: mask = img.max(0) > self.point_cloud_threshold if not mask.max(): logger.debug("no points in cloud") return idx = numpy.asarray(numpy.where(mask)).T idx = idx * numpy.broadcast_to(numpy.array([self.zyx_scaling]), idx.shape) pixels = img[numpy.broadcast_to(mask[None], img.shape)].reshape(-1, 2) color = numpy.ones(len(pixels)) color[pixels[:, 0] > self.point_cloud_threshold] += 1 color[pixels[:, 1] > self.point_cloud_threshold] += 2 assert len(idx) == len(color) point_cloud = numpy.asarray( [list(coord) + [col] for coord, col in zip(idx, color)]) conv[key] = wandb.Object3D(point_cloud) elif c == 3: raise NotImplementedError("result looks only black and white!") mask = img.sum(0) > self.point_cloud_threshold if not mask.max(): logger.debug("no points in cloud") return idx = numpy.asarray(numpy.where(mask)).T idx = idx * numpy.broadcast_to(numpy.array([self.zyx_scaling]), idx.shape) rgb = img[numpy.broadcast_to(mask[None], img.shape)].reshape(-1, 3) assert len(idx) == len(rgb) point_cloud = numpy.asarray([ list(coord) + [r, g, b] for coord, (r, g, b) in zip(idx, rgb) ]) conv[key] = wandb.Object3D(point_cloud) else: raise NotImplementedError(c)
def export(self, file=None, vcolor=None): if file is None: if self.export_folder and self.export_file == self.filename: filename, file_extension = os.path.splitext(self.filename) file = '%s/%s_%d%s' % (self.export_folder, filename, self.pool_count, file_extension) else: return faces = [] vs = self.vs[self.v_mask] gemm = np.array(self.gemm_edges) new_indices = np.zeros(self.v_mask.shape[0], dtype=np.int32) new_indices[self.v_mask] = np.arange(0, np.ma.where(self.v_mask)[0].shape[0]) for edge_index in range(len(gemm)): cycles = self.__get_cycle(gemm, edge_index) for cycle in cycles: faces.append(self.__cycle_to_face(cycle, new_indices)) with open(file, 'w+') as f: for vi, v in enumerate(vs): vcol = ' %f %f %f' % (vcolor[vi, 0], vcolor[vi, 1], vcolor[vi, 2]) if vcolor is not None else '' f.write("v %f %f %f%s\n" % (v[0], v[1], v[2], vcol)) for face_id in range(len(faces) - 1): f.write("f %d %d %d\n" % (faces[face_id][0] + 1, faces[face_id][1] + 1, faces[face_id][2] + 1)) f.write("f %d %d %d" % (faces[-1][0] + 1, faces[-1][1] + 1, faces[-1][2] + 1)) for edge in self.edges: f.write("\ne %d %d" % (new_indices[edge[0]] + 1, new_indices[edge[1]] + 1)) wandb.log({str(self.pool_count): wandb.Object3D(open(file))}, step=self.epoch)
def _make_point_cloud(): # Generate a symetric pattern POINT_COUNT = 20000 # Choose a random sample theta_chi = pi * np.random.rand(POINT_COUNT, 2) def gen_point(theta, chi, i): p = sin(theta) * 4.5 * sin(i + 1 / 2 * (i * i + 2)) + \ cos(chi) * 7 * sin((2 * i - 4) / 2 * (i + 2)) x = p * sin(chi) * cos(theta) y = p * sin(chi) * sin(theta) z = p * cos(chi) r = sin(theta) * 120 + 120 g = sin(x) * 120 + 120 b = cos(y) * 120 + 120 return [x, y, z, r, g, b] def wave_pattern(i): return np.array( [gen_point(theta, chi, i) for [theta, chi] in theta_chi]) return wandb.Object3D(wave_pattern(0))
def test_object3d_io(mocked_run): f = utils.fixture_open("Box.gltf") body = f.read() ioObj = six.StringIO(six.u(body)) obj = wandb.Object3D(ioObj, file_type="obj") obj.bind_to_run(mocked_run, "object3D", 0) assert obj.to_json(mocked_run)["_type"] == "object3D-file"
def test_object3d_transform(): obj = wandb.Object3D.transform([ wandb.Object3D(open("tests/fixtures/Box.gltf")), wandb.Object3D(open("tests/fixtures/cube.obj")), wandb.Object3D(point_cloud_1) ], "tests/output", "pc", 1) assert os.path.exists("tests/output/media/object3D/pc_1_0.gltf") assert os.path.exists("tests/output/media/object3D/pc_1_1.obj") assert os.path.exists("tests/output/media/object3D/pc_1_2.pts.json") assert obj["_type"] == "object3D" assert obj["filenames"] == [ "pc_1_0.gltf", "pc_1_1.obj", "pc_1_2.pts.json", ]
def test_object3d_io(): f = open("tests/fixtures/Box.gltf") body = f.read() ioObj = six.StringIO(six.u(body)) obj = wandb.Object3D(ioObj, file_type="obj") assert obj.extension == "obj"
def visualize_wandb(points, pred, target, index_important): # points [B,N,C]->[B*N,C] # pred,target [B,N,1]->[B*N,1] points = points.view(-1, 5).numpy() pred = pred.view(-1, 1).numpy() target = target.view(-1, 1).numpy() index_important = index_important.view(-1, ) temp_arr = np.zeros(len(target)) temp_arr[index_important] = 1 temp_arr = temp_arr.reshape(-1, 1) points_gt = np.concatenate((points[:, [0, 1, 2]], target), axis=1) points_pd = np.concatenate((points[:, [0, 1, 2]], pred), axis=1) points_important = np.concatenate((points[:, [0, 1, 2]], temp_arr), axis=1) wandb.log({"Ground_truth": wandb.Object3D(points_gt)}) wandb.log({"Prediction": wandb.Object3D(points_pd)}) wandb.log({"important points": wandb.Object3D(points_important)})
def test_object3d_unsupported_numpy(): with pytest.raises(ValueError): wandb.Object3D(np.array([1])) with pytest.raises(ValueError): wandb.Object3D(np.array([[1, 2], [3, 4], [1, 2]])) with pytest.raises(ValueError): wandb.Object3D(np.array([1, 3, 4, 5, 6, 7, 8, 8, 3])) with pytest.raises(ValueError): wandb.Object3D(np.array([[1, 3, 4, 5, 6, 7, 8, 8, 3]])) f = utils.fixture_open("Box.gltf") body = f.read() ioObj = six.StringIO(six.u(body)) with pytest.raises(ValueError): wandb.Object3D(ioObj)
def log_structure_and_angs(args, pred_ang, pred_coords, true_coords, src_seq, commit, log_angs=True, struct_name="train"): """ Logs a 3D structure prediction to wandb. """ if log_angs: log_angle_distributions(args, pred_ang, src_seq) src_seq_cpu = src_seq.cpu().detach().numpy() # Make dir if needed cur_struct_path = os.path.join(args.structure_dir, struct_name) os.makedirs(cur_struct_path, exist_ok=True) # Remove coordinate level padding (each residue has about 13 atoms, # even if some are missing) gold_item_non_batch_pad = (true_coords != VOCAB.pad_id).any(dim=-1) true_coords = true_coords[gold_item_non_batch_pad] true_coords[torch.isnan(true_coords)] = 0 creator = PDB_Creator(pred_coords.detach().numpy(), seq=VOCAB.ints2str(src_seq_cpu)) creator.save_pdb(f"{cur_struct_path}/{wandb.run.step:05}_pred.pdb", title="pred") t_creator = PDB_Creator(true_coords.cpu().detach().numpy(), seq=VOCAB.ints2str(src_seq_cpu)) if not os.path.isfile( f"{cur_struct_path}/true.pdb") or struct_name == "train": t_creator.save_pdb(f"{cur_struct_path}/true.pdb", title="true") gltf_out_path = os.path.join(args.gltf_dir, f"{wandb.run.step:05}_{struct_name}.gltf") t_creator.save_gltfs( f"{cur_struct_path}/true.pdb", f"{cur_struct_path}/{wandb.run.step:05}_pred.pdb", gltf_out_path=gltf_out_path, make_pse=True, make_png=args.save_pngs, pse_out_path=f"{cur_struct_path}/{wandb.run.step:05}_both.pse") log_items = {struct_name: wandb.Object3D(gltf_out_path)} if args.save_pngs: try: log_items[struct_name + "_img"] = wandb.Image( gltf_out_path.replace("gltf", "png")) except FileNotFoundError: # Account for the possibility that a PyMol session may have failed to create successfully pass wandb.log(log_items, commit=commit)
def main(): dataset = SequenceDataset("/home/ai/Data/kitti_formatted") loader, _, _ = utils.data_loaders(dataset, 1.0, 0.0, 0.0, 4, 1) wandb.init(project="sfmnet") for i, batch in enumerate(loader): tgt, refs, K, Kinv = batch #wandb.log({ "batch": wandb.Video((tgt*255).byte(), fps=1, format="webm") }, step=i) pos = np.random.random((tgt.shape[-1] * tgt.shape[-2], 3)) * 100.0 color = np.random.randint(0, 256, (tgt.shape[-1] * tgt.shape[-2], 3)) cloud = np.concatenate((pos, color), axis=1) print(cloud.shape) wandb.log( { "batch": [wandb.Image(img) for img in tgt], "loss": 1 / (0.01 * i + 1), "cloud": wandb.Object3D(cloud) }, step=i) time.sleep(3.0)
def make_scene(vecs): return wandb.Object3D({ "type": "lidar/beta", "vectors": np.array(vecs), "points": points, "boxes": np.array([ { "corners": [[0, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 1, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]], # "label": "Tree", "color": [123, 321, 111], }, { "corners": [[0, 0, 0], [0, 2, 0], [0, 0, 2], [2, 0, 0], [2, 2, 0], [0, 2, 2], [2, 0, 2], [2, 2, 2]], # "label": "Card", "color": [111, 321, 0], } ]), })
def test_object3d_gltf(mocked_run): obj = wandb.Object3D(utils.fixture_open("Box.gltf")) obj.bind_to_run(mocked_run, "object3D", 0) assert obj.to_json(mocked_run)["_type"] == "object3D-file"
def test_object3d_dict_invalid_string(mocked_run): with pytest.raises(ValueError): obj = wandb.Object3D("INVALID")
def test_object3d_dict_invalid(mocked_run): with pytest.raises(ValueError): obj = wandb.Object3D({ "type": "INVALID", })
def test_object3d_dict(mocked_run): obj = wandb.Object3D({ "type": "lidar/beta", }) obj.bind_to_run(mocked_run, "object3D", 0) assert obj.to_json(mocked_run)["_type"] == "object3D-file"
def test_object3d_obj(mocked_run): obj = wandb.Object3D(utils.fixture_open("cube.obj")) obj.bind_to_run(mocked_run, "object3D", 0) assert obj.to_json(mocked_run)["_type"] == "object3D-file" wandb.finish()
return [x, y, z, r, g, b] def wave_pattern(i): return np.array([gen_point(theta, chi, i) for [theta, chi] in theta_chi]) run = wandb.init() # Tests 3d OBJ #wandb.log({"gltf": wandb.Object3D(open(os.path.join(DIR, "../tests/fixtures/Duck.gltf"))), # "obj": wandb.Object3D(open(os.path.join(DIR, "../tests/fixtures/cube.obj")))}) artifact = wandb.Artifact("pointcloud_test_2", "dataset") table = wandb.Table(["ID", "Model"], ) # Tests numpy clouds for i in range(0, 20, 10): table.add_data("Cloud " + str(i), wandb.Object3D(wave_pattern(i))) wandb.log({ "Clouds": [wandb.Object3D(point_cloud_1), wandb.Object3D(point_cloud_2)], "Colored_Cloud": wandb.Object3D(wave_pattern(i)) }) artifact.add(table, "table") run.log_artifact(artifact)
def test_object3d_gltf(): obj = wandb.Object3D(open("tests/fixtures/Box.gltf"))
def main(): print('Start Training\nInitiliazing\n') print('src:', args.source) # Data loading data_func = { 'modelnet': Modelnet40_data, 'scannet': Scannet_data_h5, 'shapenet': Shapenet_data } source_train_dataset = data_func[args.source](pc_input_num=args.num_points, status='train', aug=False, pc_root=dir_root + args.source) source_test_dataset = data_func[args.source](pc_input_num=args.num_points, status='test', aug=False, pc_root= \ dir_root + args.source) num_source_train = len(source_train_dataset) num_source_test = len(source_test_dataset) source_train_dataloader = DataLoader(source_train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2, drop_last=True) source_test_dataloader = DataLoader(source_test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2, drop_last=False) print('num_source_train: {:d}, num_source_test: {:d} '.format( num_source_train, num_source_test)) print('batch_size:', BATCH_SIZE) # Model model = PointNet_AutoEncoder() model = model.to(device=device) criterion = chamfer_distance remain_epoch = 50 # Optimizer optimizer = optim.Adam(model.parameters(), lr=LR, weight_decay=weight_decay) scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs + remain_epoch) wandb.init(project="pcl2pcl", entity="unitn-mhug-csalto", name=args.wandb_name) wandb.config.update({ "source": args.source, "epochs": max_epoch, "batch_size": BATCH_SIZE, "lr": LR, "decay": weight_decay, "momentum": momentum, "dir_root": dir_root }) wandb.watch(model) title_frame = "%s" % (args.source) layout = go.Layout(scene=dict(bgcolor='rgba(1,1,1,1)', xaxis=dict(title="X", backgroundcolor="rgb(0, 0, 0)", gridcolor="black", showbackground=True, zerolinecolor="black"), yaxis=dict(title="Y", backgroundcolor="rgb(0, 0,0)", gridcolor="black", showbackground=True, zerolinecolor="black"), zaxis=dict(title="Z", backgroundcolor="rgb(0, 0,0)", gridcolor="black", showbackground=True, zerolinecolor="black")), scene_aspectmode='data', title=title_frame) best_target_test_acc = 0 for epoch in range(max_epoch): since_e = time.time() scheduler.step(epoch=epoch) wandb.log({"lr": scheduler.get_lr()[0]}) model.train() loss_total = 0 data_total = 0 data_t_total = 0 # Training for batch_idx, batch_s in enumerate(source_train_dataloader): data, label = batch_s # fig_dict = {"layout": layout} # lidar_plot = show_pcl(data) # fig_dict['data'] = lidar_plot # fig = go.Figure(lidar_plot) # os.makedirs('lidar', exist_ok=True) # fig.write_html(file=os.path.join('lidar', str(epoch)+'_'+str(batch_idx)+'.html')) data = data.to(device=device) pred, _ = model(data) # Classification loss loss_chamfer, _ = criterion(pred.permute(0, 2, 1), data.permute(0, 2, 1)) loss_chamfer.backward() optimizer.step() optimizer.zero_grad() loss_total += loss_chamfer.item() * data.size(0) data_total += data.size(0) if batch_idx % 50 == 0: print('[It {}: chamfer loss {:.4f}]'.format( batch_idx, loss_total / data_total)) if batch_idx % 1000 == 0: pred_log = pred.cpu() pred_log = pred_log[0] pred_log = pred_log.t().detach().numpy() data_log = data.cpu() data_log = data_log[0] data_log = data_log.t().numpy() wandb.log({ "train_original_%d" % batch_idx: wandb.Object3D(data_log) }) wandb.log( {"train_recon_%d" % batch_idx: wandb.Object3D(pred_log)}) print('[Epoch {}: Avg chamfer loss {:.4f}]'.format( epoch, loss_total / data_total)) wandb.log({"train_chamfer": loss_total / data_total}) # Testing with torch.no_grad(): model.eval() loss_total = 0 data_total = 0 for batch_idx, (data, ll) in enumerate(source_test_dataset): data = data.to(device=device) data = data.unsqueeze(0) pred, _ = model(data) loss, _ = criterion(pred.permute(0, 2, 1), data.permute(0, 2, 1)) loss_total += loss.item() * data.size(0) data_total += data.size(0) # to_log = np.random.choice([x for x in range(BATCH_SIZE)], 10) pred_log = pred.cpu().squeeze(0) pred_log = pred_log.t().numpy() data_log = data.cpu().squeeze(0) data_log = data_log.t().numpy() if batch_idx % 1000 == 0: wandb.log({ "test_original_%d" % batch_idx: wandb.Object3D(data_log) }) wandb.log({ "test_recon_%d" % batch_idx: wandb.Object3D(pred_log) }) # fig_dict = {"layout": layout} # lidar_plot = show_pcl(data_log) # fig_dict['data'] = lidar_plot # fig = go.Figure(lidar_plot) # os.makedirs('lidar', exist_ok=True) # fig.write_html(file=os.path.join('lidar', str(epoch)+'_'+str(batch_idx)+'.html')) pred_loss = loss_total / data_total print('TEST - [Epoch: {} \t loss: {:.4f}]'.format( epoch, pred_loss)) # writer.add_scalar('accs/target_test_acc', pred_acc, epoch) wandb.log({"test_chamfer": pred_loss}) # to_log = np.random.choice([x for x in range(BATCH_SIZE)], 10) # pred_log = pred[to_log].cpu().numpy() # data_log = data[to_log].cpu().numpy() # for i in range(10): # wandb.log({"original_%d"%i: wandb.Object3D(data[i])}) # wandb.log({"recon_%d"%i: wandb.Object3D(pred_log[i])}) time_pass_e = time.time() - since_e print('The {} epoch takes {:.0f}m {:.0f}s'.format( epoch, time_pass_e // 60, time_pass_e % 60)) print(args) print(' ')
def test_object3d_numpy(): obj = wandb.Object3D(point_cloud_1) obj = wandb.Object3D(point_cloud_2) obj = wandb.Object3D(point_cloud_3)
y = p * sin(chi) * sin(theta) z = p * cos(chi) r = sin(theta) * 120 + 120 g = sin(x) * 120 + 120 b = cos(y) * 120 + 120 return [x, y, z, r, g, b] def wave_pattern(i): return np.array([gen_point(theta, chi, i) for [theta, chi] in theta_chi]) wandb.init() # Tests 3d OBJ #wandb.log({"gltf": wandb.Object3D(open(os.path.join(DIR, "../tests/fixtures/Duck.gltf"))), # "obj": wandb.Object3D(open(os.path.join(DIR, "../tests/fixtures/cube.obj")))}) # Tests numpy clouds for i in range(0, 20, 10): wandb.log({ "Clouds": [wandb.Object3D(point_cloud_1), wandb.Object3D(point_cloud_2)], "Colored_Cloud": wandb.Object3D(wave_pattern(i)) })
def _generic_step(self, batch, batch_idx): pcl = batch["pointcloud"] fv = batch["feature_vector"] est_yaw = batch["est_yaw"] est_pos = batch["est_pos"] target_yaw = batch["target_yaw"] target_pos = batch["target_pos"] # regress against correction target_yaw_residual = target_yaw - est_yaw target_pos_residual = target_pos - est_pos # size: B x N x 4 (3 for pos, 1 for yaw) preds = self(pcl, fv) pos = preds[:, :3] yaw = preds[:, 3:4] # compute losses pos_loss = self._get_location_loss(pos, target_pos_residual) yaw_loss = self._get_angle_loss(yaw, target_yaw_residual) self.log(f"pos_loss_{self._stage}", pos_loss, on_epoch=self._stage == Stages.TRAIN) self.log(f"yaw_loss_{self._stage}", yaw_loss, on_epoch=self._stage == Stages.TRAIN) total_loss = pos_loss #+ yaw_loss self.log( f"total_loss_{self._stage}", total_loss, on_epoch=self._stage == Stages.TRAIN, ) if (batch_idx % self._log_pcl_every_n_steps) == 0: # log first pointcloud + GT box + Est Box from batch # expected vector: x, y, z, theta, height, width, length = vec.reshape(7, 1) target_vec = get_oobbox_vec( pos=target_pos[0].cpu().detach().numpy(), yaw=target_yaw[0].cpu().detach().numpy(), dims=config.CAR_DIMS, ) gt_corners = get_corners_from_vector(target_vec) init_est_vec = get_oobbox_vec( pos=est_pos[0].cpu().detach().numpy(), yaw=est_yaw[0].cpu().detach().numpy(), dims=config.CAR_DIMS, ) init_est_corners = get_corners_from_vector(init_est_vec) est_vec = get_oobbox_vec( pos=(est_pos + pos)[0].cpu().detach().numpy(), #yaw=(est_yaw + yaw)[0].cpu().detach().numpy(), yaw=est_yaw[0].cpu().detach().numpy(), dims=config.CAR_DIMS, ) est_corners = get_corners_from_vector(est_vec) pcl_cam = (pcl + est_pos.reshape(-1, 1, 3)) # visualize_pointcloud_and_obb( # pcl_cam[0].numpy(), # [gt_corners, init_est_corners, est_corners], # colors=[Colors.WHITE, Colors.RED, Colors.GREEN], # ) wb.log( { "point_scene": wb.Object3D({ "type": "lidar/beta", "points": pcl_cam[0].cpu().detach().numpy(), "boxes": np.array([ { "corners": gt_corners.T.tolist(), "label": "GT OBBox", "color": Colors.WHITE, }, { "corners": est_corners.T.tolist(), "label": "Est. OBBox", "color": Colors.GREEN, }, { "corners": init_est_corners.T.tolist(), "label": "Initial est. OBBox", "color": Colors.RED, }, ]), }) }, ) return total_loss
def test_object3d_obj(): obj = wandb.Object3D(open("tests/fixtures/cube.obj"))
def trainer(args): config_keys = [ "batch_size", "soft_label", "adv_weight", "d_thresh", "z_dim", "z_dis", "model_save_step", "g_lr", "d_lr", "beta", "cube_len", "leak_value", "bias", ] # check new run or resume run if args.resume_id: api = wandb.Api() previous_run = api.run(f"bugan/simple-pytorch-3dgan/{args.resume_id}") config = previous_run.config pprint.pprint(config) run = wandb.init( project="simple-pytorch-3dgan", id=args.resume_id, entity="bugan", config=config, resume=True, ) else: config = { **args.__dict__, **{k: getattr(params, k) for k in config_keys}, } pprint.pprint(config) run = wandb.init( entity="bugan", project="simple-pytorch-3dgan", config=config, resume=True ) # convert config dict to Namespace config = Namespace(**config) # added for output dir save_file_path = params.output_dir + "/" + config.model_name print(save_file_path) # ../outputs/dcgan if not os.path.exists(save_file_path): os.makedirs(save_file_path) # for using tensorboard if config.logs: model_uid = datetime.datetime.now().strftime("%d-%m-%Y-%H-%M-%S") writer = SummaryWriter( params.output_dir + "/" + config.model_name + "/logs_" + model_uid + "_" + config.logs + "/" ) # datset define # dsets_path = args.input_dir + args.data_dir + "train/" dsets_path = config.data_dir # if params.cube_len == 64: # dsets_path = params.data_dir + params.model_dir + "30/train64/" print(dsets_path) # ../volumetric_data/chair/30/train/ if config.rotate: train_dsets = AugmentDataset(dsets_path, config, "train", res=config.res) else: train_dsets = ShapeNetDataset(dsets_path, config, "train", res=config.res) # val_dsets = ShapeNetDataset(dsets_path, args, "val") train_dset_loaders = torch.utils.data.DataLoader( train_dsets, batch_size=params.batch_size, shuffle=True, num_workers=24, pin_memory=True, ) # val_dset_loaders = torch.utils.data.DataLoader(val_dsets, batch_size=args.batch_size, shuffle=True, num_workers=1) dset_len = {"train": len(train_dsets)} dset_loaders = {"train": train_dset_loaders} # print (dset_len["train"]) # model define D = net_D(config) # summary(net_D, input_size=(32, 32, 32)) G = net_G(config) # print(G) # print(D) # load state dict if resume if args.resume_id: G, D = load_model(run, G, D) wandb.watch(G) wandb.watch(D) # summary(net_G, input_size=(params.z_dim,)) # print total number of parameters in a model # x = sum(p.numel() for p in G.parameters() if p.requires_grad) # print (x) # x = sum(p.numel() for p in D.parameters() if p.requires_grad) # print (x) D_solver = optim.Adam(D.parameters(), lr=params.d_lr, betas=params.beta) # D_solver = optim.SGD(D.parameters(), lr=params.d_lr * 100, momentum=0.9) G_solver = optim.Adam(G.parameters(), lr=params.g_lr, betas=params.beta) D.to(params.device) G.to(params.device) # criterion_D = nn.BCELoss() criterion_D = nn.MSELoss() criterion_G = nn.L1Loss() itr_val = -1 itr_train = -1 for epoch in range(config.epochs): start = time.time() for phase in ["train"]: if phase == "train": # if args.lrsh: # D_scheduler.step() D.train() G.train() else: D.eval() G.eval() running_loss_G = 0.0 running_loss_D = 0.0 running_loss_adv_G = 0.0 for i, X in enumerate(tqdm(dset_loaders[phase])): # if phase == 'val': # itr_val += 1 if phase == "train": itr_train += 1 X = X.to(params.device) # print (X) # print (X.size()) batch = X.size()[0] # print (batch) Z = generateZ(config, batch) # print (Z.size()) # ============= Train the discriminator =============# d_real = D(X) fake = G(Z) if i == 0 and epoch % config.generate_every == 0: image_saved_path = Path(params.images_dir) / config.model_name image_saved_path.mkdir(parents=True, exist_ok=True) samples = fake.cpu().data[:5].squeeze().numpy() fnames = [] for i, samp in enumerate(samples): # print(i, samp) try: mesh = trimesh.voxel.VoxelGrid( trimesh.voxel.encoding.DenseEncoding(samp >= 0.5) ).marching_cubes except ValueError as exc: print(f"Marching cubes failed: {exc}") continue fname = Path(image_saved_path) / f"{epoch:04}_{i}.obj" mesh.export(fname) fnames.append(fname) wandb.log( { "generated_tree_samples": [ wandb.Object3D(open(fname)) for fname in fnames ], "epoch": epoch, }, step=itr_train, ) d_fake = D(fake) real_labels = torch.ones_like(d_real).to(params.device) fake_labels = torch.zeros_like(d_fake).to(params.device) # print (d_fake.size(), fake_labels.size()) if params.soft_label: real_labels = ( torch.Tensor(batch).uniform_(0.7, 1.2).to(params.device) ) fake_labels = torch.Tensor(batch).uniform_(0, 0.3).to(params.device) # print (d_real.size(), real_labels.size()) d_real_loss = criterion_D(d_real, real_labels) d_fake_loss = criterion_D(d_fake, fake_labels) d_loss = d_real_loss + d_fake_loss # no deleted d_real_acu = torch.ge(d_real.squeeze(), 0.5).float() d_fake_acu = torch.le(d_fake.squeeze(), 0.5).float() d_total_acu = torch.mean(torch.cat((d_real_acu, d_fake_acu), 0)) if d_total_acu < params.d_thresh: D.zero_grad() d_loss.backward() D_solver.step() # =============== Train the generator ===============# Z = generateZ(config, batch) # print (X) fake = G(Z) # generated fake: 0-1, X: 0/1 d_fake = D(fake) adv_g_loss = criterion_D(d_fake, real_labels) # print (fake.size(), X.size()) # recon_g_loss = criterion_D(fake, X) recon_g_loss = criterion_G(fake, X) # g_loss = recon_g_loss + params.adv_weight * adv_g_loss g_loss = adv_g_loss if config.local_test: # print('Iteration-{} , D(x) : {:.4} , G(x) : {:.4} , D(G(x)) : {:.4}'.format(itr_train, d_loss.item(), recon_g_loss.item(), adv_g_loss.item())) print( "Iteration-{} , D(x) : {:.4}, D(G(x)) : {:.4}".format( itr_train, d_loss.item(), adv_g_loss.item() ) ) D.zero_grad() G.zero_grad() g_loss.backward() G_solver.step() # =============== logging each 10 iterations ===============# running_loss_G += recon_g_loss.item() * X.size(0) running_loss_D += d_loss.item() * X.size(0) running_loss_adv_G += adv_g_loss.item() * X.size(0) if config.logs: loss_G = { "adv_loss_G": adv_g_loss, "recon_loss_G": recon_g_loss, } loss_D = { "adv_real_loss_D": d_real_loss, "adv_fake_loss_D": d_fake_loss, "d_real_acu": d_real_acu.mean(), "d_fake_acu": d_fake_acu.mean(), "d_total_acu": d_total_acu, } # if itr_val % 10 == 0 and phase == 'val': # save_val_log(writer, loss_D, loss_G, itr_val) if itr_train % 10 == 0 and phase == "train": save_train_log(writer, loss_D, loss_G, itr_train) wandb.log( {"G": loss_G, "D": loss_D, "epoch": epoch}, step=itr_train ) # =============== each epoch save model or save image ===============# epoch_loss_G = running_loss_G / dset_len[phase] epoch_loss_D = running_loss_D / dset_len[phase] epoch_loss_adv_G = running_loss_adv_G / dset_len[phase] end = time.time() epoch_time = end - start print( "Epochs-{} ({}) , D(x) : {:.4}, D(G(x)) : {:.4}".format( epoch, phase, epoch_loss_D, epoch_loss_adv_G ) ) print("Elapsed Time: {:.4} min".format(epoch_time / 60.0)) if (epoch + 1) % params.model_save_step == 0: print("model_saved, images_saved...") save_model(run, config.model_name, G, D)