def __getitem__(self, index): if hyp.dataset_name == 'kitti' or hyp.dataset_name == 'clevr' or hyp.dataset_name == 'real' or hyp.dataset_name == "bigbird" or hyp.dataset_name == "carla" or hyp.dataset_name == "carla_mix" or hyp.dataset_name == "carla_det" or hyp.dataset_name == "replica" or hyp.dataset_name == "clevr_vqa": # print(index) filename = self.records[index] d = pickle.load(open(filename, "rb")) d = dict(d) # elif hyp.dataset_name=="carla": # filename = self.records[index] # d = np.load(filename) # d = dict(d) # d['rgb_camXs_raw'] = d['rgb_camXs'] # d['pix_T_cams_raw'] = d['pix_T_cams'] # d['tree_seq_filename'] = "dummy_tree_filename" # d['origin_T_camXs_raw'] = d['origin_T_camXs'] # d['camR_T_origin_raw'] = utils_geom.safe_inverse(torch.from_numpy(d['origin_T_camRs'])).numpy() # d['xyz_camXs_raw'] = d['xyz_camXs'] else: assert (False) # reader not ready yet # st() # if hyp.save_gt_occs: # pickle.dump(d,open(filename, "wb")) # st() # st() if hyp.use_gt_occs: __p = lambda x: utils_basic.pack_seqdim(x, 1) __u = lambda x: utils_basic.unpack_seqdim(x, 1) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K BOX_SIZE = hyp.BOX_SIZE Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) Z4, Y4, X4 = int(Z / 4), int(Y / 4), int(X / 4) D = 9 pix_T_cams = torch.from_numpy( d["pix_T_cams_raw"]).unsqueeze(0).cuda().to(torch.float) camRs_T_origin = torch.from_numpy( d["camR_T_origin_raw"]).unsqueeze(0).cuda().to(torch.float) origin_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_origin))) origin_T_camXs = torch.from_numpy( d["origin_T_camXs_raw"]).unsqueeze(0).cuda().to(torch.float) camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camRs_T_camXs = __u( torch.matmul(utils_geom.safe_inverse(__p(origin_T_camRs)), __p(origin_T_camXs))) camXs_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_camXs))) camX0_T_camRs = camXs_T_camRs[:, 0] camX1_T_camRs = camXs_T_camRs[:, 1] camR_T_camX0 = utils_geom.safe_inverse(camX0_T_camRs) xyz_camXs = torch.from_numpy( d["xyz_camXs_raw"]).unsqueeze(0).cuda().to(torch.float) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) depth_camXs_, valid_camXs_ = utils_geom.create_depth_image( __p(pix_T_cams), __p(xyz_camXs), H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud( depth_camXs_, __p(pix_T_cams)) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occRs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z2, Y2, X2)) occRs_half = torch.max(occRs_half, dim=1).values.squeeze(0) occ_complete = occRs_half.cpu().numpy() # st() if hyp.do_empty: item_names = [ 'pix_T_cams_raw', 'origin_T_camXs_raw', 'camR_T_origin_raw', 'rgb_camXs_raw', 'xyz_camXs_raw', 'empty_rgb_camXs_raw', 'empty_xyz_camXs_raw', ] else: item_names = [ 'pix_T_cams_raw', 'origin_T_camXs_raw', 'camR_T_origin_raw', 'rgb_camXs_raw', 'xyz_camXs_raw', ] # if hyp.do_time_flip: # d = random_time_flip_single(d,item_names) # if the sequence length > 2, select S frames # filename = d['raw_seq_filename'] original_filename = filename if hyp.dataset_name == "carla_mix" or hyp.dataset_name == "carla_det": bbox_origin_gt = d['bbox_origin'] if 'bbox_origin_predicted' in d: bbox_origin_predicted = d['bbox_origin_predicted'] else: bbox_origin_predicted = [] classes = d['obj_name'] if isinstance(classes, str): classes = [classes] # st() d['tree_seq_filename'] = "temp" if hyp.dataset_name == "replica": d['tree_seq_filename'] = "temp" object_category = d['object_category_names'] bbox_origin = d['bbox_origin'] if hyp.dataset_name == "clevr_vqa": d['tree_seq_filename'] = "temp" pix_T_cams = d['pix_T_cams_raw'] num_cams = pix_T_cams.shape[0] # padding_1 = torch.zeros([num_cams,1,3]) # padding_2 = torch.zeros([num_cams,4,1]) # padding_2[:,3] = 1.0 # st() # pix_T_cams = torch.cat([pix_T_cams,padding_1],dim=1) # pix_T_cams = torch.cat([pix_T_cams,padding_2],dim=2) # st() shape_name = d['shape_list'] color_name = d['color_list'] material_name = d['material_list'] all_name = [] all_style = [] for index in range(len(shape_name)): name = shape_name[index] + "/" + color_name[ index] + "_" + material_name[index] style_name = color_name[index] + "_" + material_name[index] all_name.append(name) all_style.append(style_name) # st() if hyp.do_shape: class_name = shape_name elif hyp.do_color: class_name = color_name elif hyp.do_material: class_name = material_name elif hyp.do_style: class_name = all_style else: class_name = all_name object_category = class_name bbox_origin = d['bbox_origin'] # st() if hyp.dataset_name == "carla": camR_index = d['camR_index'] rgb_camtop = d['rgb_camXs_raw'][camR_index:camR_index + 1] origin_T_camXs_top = d['origin_T_camXs_raw'][ camR_index:camR_index + 1] # predicted_box = d['bbox_origin_predicted'] predicted_box = [] filename = d['tree_seq_filename'] if hyp.do_2d_style_munit: d, indexes = non_random_select_single(d, item_names, num_samples=hyp.S) # st() if hyp.fixed_view: d, indexes = non_random_select_single(d, item_names, num_samples=hyp.S) elif self.shuffle or hyp.randomly_select_views: d, indexes = random_select_single(d, item_names, num_samples=hyp.S) else: d, indexes = non_random_select_single(d, item_names, num_samples=hyp.S) filename_g = "/".join([original_filename, str(indexes[0])]) filename_e = "/".join([original_filename, str(indexes[1])]) rgb_camXs = d['rgb_camXs_raw'] # move channel dim inward, like pytorch wants # rgb_camRs = np.transpose(rgb_camRs, axes=[0, 3, 1, 2]) rgb_camXs = np.transpose(rgb_camXs, axes=[0, 3, 1, 2]) rgb_camXs = rgb_camXs[:, :3] rgb_camXs = utils_improc.preprocess_color(rgb_camXs) if hyp.dataset_name == "carla": rgb_camtop = np.transpose(rgb_camtop, axes=[0, 3, 1, 2]) rgb_camtop = rgb_camtop[:, :3] rgb_camtop = utils_improc.preprocess_color(rgb_camtop) d['rgb_camtop'] = rgb_camtop d['origin_T_camXs_top'] = origin_T_camXs_top if len(predicted_box) == 0: predicted_box = np.zeros([hyp.N, 6]) score = np.zeros([hyp.N]).astype(np.float32) else: num_boxes = predicted_box.shape[0] score = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) predicted_box = np.pad(predicted_box, [[0, hyp.N - num_boxes], [0, 0]]) d['predicted_box'] = predicted_box.astype(np.float32) d['predicted_scores'] = score.astype(np.float32) if hyp.dataset_name == "clevr_vqa": num_boxes = bbox_origin.shape[0] bbox_origin = np.array(bbox_origin) score = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin = np.pad(bbox_origin, [[0, hyp.N - num_boxes], [0, 0], [0, 0]]) object_category = np.pad(object_category, [[0, hyp.N - num_boxes]], lambda x, y, z, m: "0") d['gt_box'] = bbox_origin.astype(np.float32) d['gt_scores'] = score.astype(np.float32) d['classes'] = list(object_category) if hyp.dataset_name == "replica": if len(bbox_origin) == 0: score = np.zeros([hyp.N]) bbox_origin = np.zeros([hyp.N, 6]) object_category = ["0"] * hyp.N object_category = np.array(object_category) else: num_boxes = len(bbox_origin) bbox_origin = torch.stack(bbox_origin).numpy().squeeze( 1).squeeze(1).reshape([num_boxes, 6]) bbox_origin = np.array(bbox_origin) score = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin = np.pad(bbox_origin, [[0, hyp.N - num_boxes], [0, 0]]) object_category = np.pad(object_category, [[0, hyp.N - num_boxes]], lambda x, y, z, m: "0") d['gt_box'] = bbox_origin.astype(np.float32) d['gt_scores'] = score.astype(np.float32) d['classes'] = list(object_category) # st() if hyp.dataset_name == "carla_mix" or hyp.dataset_name == "carla_det": bbox_origin_predicted = bbox_origin_predicted[:3] if len(bbox_origin_gt.shape) == 1: bbox_origin_gt = np.expand_dims(bbox_origin_gt, 0) num_boxes = bbox_origin_gt.shape[0] # st() score_gt = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin_gt = np.pad(bbox_origin_gt, [[0, hyp.N - num_boxes], [0, 0]]) # st() classes = np.pad(classes, [[0, hyp.N - num_boxes]], lambda x, y, z, m: "0") if len(bbox_origin_predicted) == 0: bbox_origin_predicted = np.zeros([hyp.N, 6]) score_pred = np.zeros([hyp.N]).astype(np.float32) else: num_boxes = bbox_origin_predicted.shape[0] score_pred = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin_predicted = np.pad( bbox_origin_predicted, [[0, hyp.N - num_boxes], [0, 0]]) d['predicted_box'] = bbox_origin_predicted.astype(np.float32) d['predicted_scores'] = score_pred.astype(np.float32) d['gt_box'] = bbox_origin_gt.astype(np.float32) d['gt_scores'] = score_gt.astype(np.float32) d['classes'] = list(classes) d['rgb_camXs_raw'] = rgb_camXs if hyp.dataset_name != "carla" and hyp.do_empty: empty_rgb_camXs = d['empty_rgb_camXs_raw'] # move channel dim inward, like pytorch wants empty_rgb_camXs = np.transpose(empty_rgb_camXs, axes=[0, 3, 1, 2]) empty_rgb_camXs = empty_rgb_camXs[:, :3] empty_rgb_camXs = utils_improc.preprocess_color(empty_rgb_camXs) d['empty_rgb_camXs_raw'] = empty_rgb_camXs # st() if hyp.use_gt_occs: d['occR_complete'] = occ_complete d['tree_seq_filename'] = filename d['filename_e'] = filename_e d['filename_g'] = filename_g return d
def __getitem__(self, index): if hyp.dataset_name == 'kitti' or hyp.dataset_name == 'clevr' or hyp.dataset_name == 'real' or hyp.dataset_name == "bigbird" or hyp.dataset_name == "carla" or hyp.dataset_name == "carla_mix" or hyp.dataset_name == "replica" or hyp.dataset_name == "clevr_vqa" or hyp.dataset_name == "carla_det": # print(index) # st() filename = self.records[index] d = pickle.load(open(filename, "rb")) d = dict(d) d_empty = pickle.load(open(self.empty_scene, "rb")) d_empty = dict(d_empty) # st() # elif hyp.dataset_name=="carla": # filename = self.records[index] # d = np.load(filename) # d = dict(d) # d['rgb_camXs_raw'] = d['rgb_camXs'] # d['pix_T_cams_raw'] = d['pix_T_cams'] # d['tree_seq_filename'] = "dummy_tree_filename" # d['origin_T_camXs_raw'] = d['origin_T_camXs'] # d['camR_T_origin_raw'] = utils_geom.safe_inverse(torch.from_numpy(d['origin_T_camRs'])).numpy() # d['xyz_camXs_raw'] = d['xyz_camXs'] else: assert (False) # reader not ready yet if hyp.do_empty: item_names = [ 'pix_T_cams_raw', 'origin_T_camXs_raw', 'camR_T_origin_raw', 'rgb_camXs_raw', 'xyz_camXs_raw', 'empty_rgb_camXs_raw', 'empty_xyz_camXs_raw', ] else: item_names = [ 'pix_T_cams_raw', 'origin_T_camXs_raw', 'camR_T_origin_raw', 'rgb_camXs_raw', 'xyz_camXs_raw', ] if hyp.use_gt_occs: __p = lambda x: utils_basic.pack_seqdim(x, 1) __u = lambda x: utils_basic.unpack_seqdim(x, 1) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K BOX_SIZE = hyp.BOX_SIZE Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) Z4, Y4, X4 = int(Z / 4), int(Y / 4), int(X / 4) D = 9 pix_T_cams = torch.from_numpy( d["pix_T_cams_raw"]).unsqueeze(0).cuda().to(torch.float) camRs_T_origin = torch.from_numpy( d["camR_T_origin_raw"]).unsqueeze(0).cuda().to(torch.float) origin_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_origin))) origin_T_camXs = torch.from_numpy( d["origin_T_camXs_raw"]).unsqueeze(0).cuda().to(torch.float) camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camRs_T_camXs = __u( torch.matmul(utils_geom.safe_inverse(__p(origin_T_camRs)), __p(origin_T_camXs))) camXs_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_camXs))) camX0_T_camRs = camXs_T_camRs[:, 0] camX1_T_camRs = camXs_T_camRs[:, 1] camR_T_camX0 = utils_geom.safe_inverse(camX0_T_camRs) xyz_camXs = torch.from_numpy( d["xyz_camXs_raw"]).unsqueeze(0).cuda().to(torch.float) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) depth_camXs_, valid_camXs_ = utils_geom.create_depth_image( __p(pix_T_cams), __p(xyz_camXs), H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud( depth_camXs_, __p(pix_T_cams)) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occRs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z2, Y2, X2)) occRs_half = torch.max(occRs_half, dim=1).values.squeeze(0) occ_complete = occRs_half.cpu().numpy() # if hyp.do_time_flip: # d = random_time_flip_single(d,item_names) # if the sequence length > 2, select S frames # filename = d['raw_seq_filename'] original_filename = filename original_filename_empty = self.empty_scene # st() if hyp.dataset_name == "clevr_vqa": d['tree_seq_filename'] = "temp" pix_T_cams = d['pix_T_cams_raw'] num_cams = pix_T_cams.shape[0] # padding_1 = torch.zeros([num_cams,1,3]) # padding_2 = torch.zeros([num_cams,4,1]) # padding_2[:,3] = 1.0 # st() # pix_T_cams = torch.cat([pix_T_cams,padding_1],dim=1) # pix_T_cams = torch.cat([pix_T_cams,padding_2],dim=2) # st() shape_name = d['shape_list'] color_name = d['color_list'] material_name = d['material_list'] all_name = [] all_style = [] for index in range(len(shape_name)): name = shape_name[index] + "/" + color_name[ index] + "_" + material_name[index] style_name = color_name[index] + "_" + material_name[index] all_name.append(name) all_style.append(style_name) # st() if hyp.do_shape: class_name = shape_name elif hyp.do_color: class_name = color_name elif hyp.do_material: class_name = material_name elif hyp.do_style: class_name = all_style else: class_name = all_name object_category = class_name bbox_origin = d['bbox_origin'] # bbox_origin = torch.cat([bbox_origin],dim=0) # object_category = object_category bbox_origin_empty = np.zeros_like(bbox_origin) object_category_empty = ['0'] # st() if not hyp.dataset_name == "clevr_vqa": filename = d['tree_seq_filename'] filename_empty = d_empty['tree_seq_filename'] if hyp.fixed_view: d, indexes = non_random_select_single(d, item_names, num_samples=hyp.S) d_empty, indexes_empty = specific_select_single_empty( d_empty, item_names, d['origin_T_camXs_raw'], num_samples=hyp.S) filename_g = "/".join([original_filename, str(indexes[0])]) filename_e = "/".join([original_filename, str(indexes[1])]) filename_g_empty = "/".join([original_filename_empty, str(indexes[0])]) filename_e_empty = "/".join([original_filename_empty, str(indexes[1])]) rgb_camXs = d['rgb_camXs_raw'] rgb_camXs_empty = d_empty['rgb_camXs_raw'] # move channel dim inward, like pytorch wants # rgb_camRs = np.transpose(rgb_camRs, axes=[0, 3, 1, 2]) rgb_camXs = np.transpose(rgb_camXs, axes=[0, 3, 1, 2]) rgb_camXs = rgb_camXs[:, :3] rgb_camXs = utils_improc.preprocess_color(rgb_camXs) rgb_camXs_empty = np.transpose(rgb_camXs_empty, axes=[0, 3, 1, 2]) rgb_camXs_empty = rgb_camXs_empty[:, :3] rgb_camXs_empty = utils_improc.preprocess_color(rgb_camXs_empty) if hyp.dataset_name == "clevr_vqa": num_boxes = bbox_origin.shape[0] bbox_origin = np.array(bbox_origin) score = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin = np.pad(bbox_origin, [[0, hyp.N - num_boxes], [0, 0], [0, 0]]) object_category = np.pad(object_category, [[0, hyp.N - num_boxes]], lambda x, y, z, m: "0") object_category_empty = np.pad(object_category_empty, [[0, hyp.N - 1]], lambda x, y, z, m: "0") # st() score_empty = np.zeros_like(score) bbox_origin_empty = np.zeros_like(bbox_origin) d['gt_box'] = np.stack( [bbox_origin.astype(np.float32), bbox_origin_empty]) d['gt_scores'] = np.stack([score.astype(np.float32), score_empty]) try: d['classes'] = np.stack( [object_category, object_category_empty]).tolist() except Exception as e: st() d['rgb_camXs_raw'] = np.stack([rgb_camXs, rgb_camXs_empty]) d['pix_T_cams_raw'] = np.stack( [d["pix_T_cams_raw"], d_empty["pix_T_cams_raw"]]) d['origin_T_camXs_raw'] = np.stack( [d["origin_T_camXs_raw"], d_empty["origin_T_camXs_raw"]]) d['camR_T_origin_raw'] = np.stack( [d["camR_T_origin_raw"], d_empty["camR_T_origin_raw"]]) d['xyz_camXs_raw'] = np.stack( [d["xyz_camXs_raw"], d_empty["xyz_camXs_raw"]]) # d['rgb_camXs_raw'] = rgb_camXs # d['tree_seq_filename'] = filename if not hyp.dataset_name == "clevr_vqa": d['tree_seq_filename'] = [filename, "invalid_tree"] else: d['tree_seq_filename'] = ["temp"] # st() d['filename_e'] = ["temp"] d['filename_g'] = ["temp"] if hyp.use_gt_occs: d['occR_complete'] = np.expand_dims(occ_complete, axis=0) return d
def assemble(bkg_feat0, obj_feat0, origin_T_camRs, camRs_T_zoom): # let's first assemble the seq of background tensors # this should effectively CREATE egomotion # i fully expect we can do this all in one shot # note it makes sense to create egomotion here, because # we want to predict each view B, C, Z, Y, X = list(bkg_feat0.shape) B2, C2, Z2, Y2, X2 = list(obj_feat0.shape) assert (B == B2) assert (C == C2) B, S, _, _ = list(origin_T_camRs.shape) # ok, we have everything we need # for each timestep, we want to warp the bkg to this timestep # utils for packing/unpacking along seq dim __p = lambda x: pack_seqdim(x, B) __u = lambda x: unpack_seqdim(x, B) # we in fact have utils for this already cam0s_T_camRs = utils_geom.get_camM_T_camXs(origin_T_camRs, ind=0) camRs_T_cam0s = __u(utils_geom.safe_inverse(__p(cam0s_T_camRs))) bkg_feat0s = bkg_feat0.unsqueeze(1).repeat(1, S, 1, 1, 1, 1) bkg_featRs = apply_4x4s_to_voxs(camRs_T_cam0s, bkg_feat0s) # now for the objects # we want to sample for each location in the bird grid xyz_mems_ = utils_basic.gridcloud3D(B * S, Z, Y, X, norm=False) # this is B*S x Z*Y*X x 3 xyz_camRs_ = Mem2Ref(xyz_mems_, Z, Y, X) camRs_T_zoom_ = __p(camRs_T_zoom) zoom_T_camRs_ = camRs_T_zoom_.inverse( ) # note this is not a rigid transform xyz_zooms_ = utils_geom.apply_4x4(zoom_T_camRs_, xyz_camRs_) # we will do the whole traj at once (per obj) # note we just have one feat for the whole traj, so we tile up obj_feats = obj_feat0.unsqueeze(1).repeat(1, S, 1, 1, 1, 1) obj_feats_ = __p(obj_feats) # this is B*S x Z x Y x X x C # to sample, we need feats_ in ZYX order obj_featRs_ = utils_samp.sample3D(obj_feats_, xyz_zooms_, Z, Y, X) obj_featRs = __u(obj_featRs_) # overweigh objects, so that we essentially overwrite # featRs = 0.05*bkg_featRs + 0.95*obj_featRs # overwrite the bkg at the object obj_mask = (bkg_featRs > 0).float() featRs = obj_featRs + (1.0 - obj_mask) * bkg_featRs # note the normalization (next) will restore magnitudes for the bkg # # featRs = bkg_featRs # featRs = obj_featRs # l2 normalize on chans featRs = l2_normalize(featRs, dim=2) validRs = 1.0 - (featRs == 0).all(dim=2, keepdim=True).float().cuda() return featRs, validRs, bkg_featRs, obj_featRs
def prepare_common_tensors(self, feed, prep_summ=True): results = dict() if prep_summ: self.summ_writer = utils_improc.Summ_writer( writer=feed['writer'], global_step=feed['global_step'], log_freq=feed['set_log_freq'], fps=8, just_gif=feed['just_gif'], ) else: self.summ_writer = None self.include_vis = hyp.do_include_vis self.B = feed["set_batch_size"] self.S = feed["set_seqlen"] __p = lambda x: utils_basic.pack_seqdim(x, self.B) __u = lambda x: utils_basic.unpack_seqdim(x, self.B) self.H, self.W, self.V, self.N = hyp.H, hyp.W, hyp.V, hyp.N self.PH, self.PW = hyp.PH, hyp.PW self.K = hyp.K self.set_name = feed['set_name'] # print('set_name', self.set_name) if self.set_name == 'test': self.Z, self.Y, self.X = hyp.Z_test, hyp.Y_test, hyp.X_test else: self.Z, self.Y, self.X = hyp.Z, hyp.Y, hyp.X # print('Z, Y, X = %d, %d, %d' % (self.Z, self.Y, self.X)) self.Z2, self.Y2, self.X2 = int(self.Z / 2), int(self.Y / 2), int( self.X / 2) self.Z4, self.Y4, self.X4 = int(self.Z / 4), int(self.Y / 4), int( self.X / 4) self.rgb_camXs = feed["rgb_camXs"] self.pix_T_cams = feed["pix_T_cams"] self.origin_T_camXs = feed["origin_T_camXs"] self.cams_T_velos = feed["cams_T_velos"] self.camX0s_T_camXs = utils_geom.get_camM_T_camXs(self.origin_T_camXs, ind=0) self.camXs_T_camX0s = __u( utils_geom.safe_inverse(__p(self.camX0s_T_camXs))) self.xyz_veloXs = feed["xyz_veloXs"] self.xyz_camXs = __u( utils_geom.apply_4x4(__p(self.cams_T_velos), __p(self.xyz_veloXs))) self.xyz_camX0s = __u( utils_geom.apply_4x4(__p(self.camX0s_T_camXs), __p(self.xyz_camXs))) if self.set_name == 'test': self.boxlist_camXs = feed["boxlists"] self.scorelist_s = feed["scorelists"] self.tidlist_s = feed["tidlists"] boxlist_camXs_ = __p(self.boxlist_camXs) scorelist_s_ = __p(self.scorelist_s) tidlist_s_ = __p(self.tidlist_s) boxlist_camXs_, tidlist_s_, scorelist_s_ = utils_misc.shuffle_valid_and_sink_invalid_boxes( boxlist_camXs_, tidlist_s_, scorelist_s_) self.boxlist_camXs = __u(boxlist_camXs_) self.scorelist_s = __u(scorelist_s_) self.tidlist_s = __u(tidlist_s_) # self.boxlist_camXs[:,0], self.scorelist_s[:,0], self.tidlist_s[:,0] = utils_misc.shuffle_valid_and_sink_invalid_boxes( # self.boxlist_camXs[:,0], self.tidlist_s[:,0], self.scorelist_s[:,0]) # self.score_s = feed["scorelists"] # self.tid_s = torch.ones_like(self.score_s).long() # self.lrt_camRs = utils_geom.convert_boxlist_to_lrtlist(self.box_camRs) # self.lrt_camXs = utils_geom.apply_4x4s_to_lrts(self.camXs_T_camRs, self.lrt_camRs) # self.lrt_camX0s = utils_geom.apply_4x4s_to_lrts(self.camX0s_T_camXs, self.lrt_camXs) # self.lrt_camR0s = utils_geom.apply_4x4s_to_lrts(self.camR0s_T_camRs, self.lrt_camRs) # boxlist_camXs_ = __p(self.boxlist_camXs) # boxlist_camXs_ = __p(self.boxlist_camXs) # lrtlist_camXs = __u(utils_geom.convert_boxlist_to_lrtlist(__p(self.boxlist_camXs))).reshape( # self.B, self.S, self.N, 19) self.lrtlist_camXs = __u( utils_geom.convert_boxlist_to_lrtlist(__p(self.boxlist_camXs))) # print('lrtlist_camXs', lrtlist_camXs.shape) # # self.B, self.S, self.N, 19) # # lrtlist_camXs = __u(utils_geom.apply_4x4_to_lrtlist(__p(camXs_T_camRs), __p(lrtlist_camRs))) # self.summ_writer.summ_lrtlist('2D_inputs/lrtlist_camX0', self.rgb_camXs[:,0], lrtlist_camXs[:,0], # self.scorelist_s[:,0], self.tidlist_s[:,0], self.pix_T_cams[:,0]) # self.summ_writer.summ_lrtlist('2D_inputs/lrtlist_camX1', self.rgb_camXs[:,1], lrtlist_camXs[:,1], # self.scorelist_s[:,1], self.tidlist_s[:,1], self.pix_T_cams[:,1]) ( self.lrt_camXs, self.box_camXs, self.score_s, ) = utils_misc.collect_object_info(self.lrtlist_camXs, self.boxlist_camXs, self.tidlist_s, self.scorelist_s, 1, mod='X', do_vis=False, summ_writer=None) self.lrt_camXs = self.lrt_camXs.squeeze(0) self.score_s = self.score_s.squeeze(0) self.tid_s = torch.ones_like(self.score_s).long() self.lrt_camX0s = utils_geom.apply_4x4s_to_lrts( self.camX0s_T_camXs, self.lrt_camXs) if prep_summ and self.include_vis: visX_g = [] for s in list(range(self.S)): visX_g.append( self.summ_writer.summ_lrtlist('', self.rgb_camXs[:, s], self.lrtlist_camXs[:, s], self.scorelist_s[:, s], self.tidlist_s[:, s], self.pix_T_cams[:, 0], only_return=True)) self.summ_writer.summ_rgbs('2D_inputs/box_camXs', visX_g) # visX_g = [] # for s in list(range(self.S)): # visX_g.append(self.summ_writer.summ_lrtlist( # 'track/box_camX%d_g' % s, self.rgb_camXs[:,s], self.lrt_camXs[:,s:s+1], # self.score_s[:,s:s+1], self.tid_s[:,s:s+1], self.pix_T_cams[:,0], only_return=True)) # self.summ_writer.summ_rgbs('track/box_camXs_g', visX_g) if self.set_name == 'test': # center on an object, so that it does not fall out of bounds self.scene_centroid = utils_geom.get_clist_from_lrtlist( self.lrt_camXs)[:, 0] self.vox_util = vox_util.Vox_util( self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) else: # center randomly scene_centroid_x = np.random.uniform(-8.0, 8.0) scene_centroid_y = np.random.uniform(-1.5, 3.0) scene_centroid_z = np.random.uniform(10.0, 26.0) scene_centroid = np.array( [scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.scene_centroid = torch.from_numpy( scene_centroid).float().cuda() # center on a random non-outlier point all_ok = False num_tries = 0 while not all_ok: scene_centroid_x = np.random.uniform(-8.0, 8.0) scene_centroid_y = np.random.uniform(-1.5, 3.0) scene_centroid_z = np.random.uniform(10.0, 26.0) scene_centroid = np.array( [scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.scene_centroid = torch.from_numpy( scene_centroid).float().cuda() num_tries += 1 # try to vox self.vox_util = vox_util.Vox_util( self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) all_ok = True # we want to ensure this gives us a few points inbound for each batch el inb = __u( self.vox_util.get_inbounds(__p(self.xyz_camX0s), self.Z4, self.Y4, self.X, already_mem=False)) num_inb = torch.sum(inb.float(), axis=2) if torch.min(num_inb) < 100: all_ok = False if num_tries > 100: return False self.summ_writer.summ_scalar('zoom_sampling/num_tries', num_tries) self.summ_writer.summ_scalar('zoom_sampling/num_inb', torch.mean(num_inb).cpu().item()) self.occ_memXs = __u( self.vox_util.voxelize_xyz(__p(self.xyz_camXs), self.Z, self.Y, self.X)) self.occ_memX0s = __u( self.vox_util.voxelize_xyz(__p(self.xyz_camX0s), self.Z, self.Y, self.X)) self.occ_memX0s_half = __u( self.vox_util.voxelize_xyz(__p(self.xyz_camX0s), self.Z2, self.Y2, self.X2)) self.unp_memXs = __u( self.vox_util.unproject_rgb_to_mem(__p(self.rgb_camXs), self.Z, self.Y, self.X, __p(self.pix_T_cams))) self.unp_memX0s = self.vox_util.apply_4x4s_to_voxs( self.camX0s_T_camXs, self.unp_memXs) if prep_summ and self.include_vis: self.summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(self.rgb_camXs, dim=1)) self.summ_writer.summ_occs('3D_inputs/occ_memXs', torch.unbind(self.occ_memXs, dim=1)) self.summ_writer.summ_occs('3D_inputs/occ_memX0s', torch.unbind(self.occ_memX0s, dim=1)) self.summ_writer.summ_rgb('2D_inputs/rgb_camX0', self.rgb_camXs[:, 0]) # self.summ_writer.summ_oned('2D_inputs/depth_camX0', self.depth_camXs[:,0], maxval=20.0) # self.summ_writer.summ_oned('2D_inputs/valid_camX0', self.valid_camXs[:,0], norm=False) return True
def forward(self, feed): results = dict() summ_writer = utils_improc.Summ_writer(writer=feed['writer'], global_step=feed['global_step'], set_name=feed['set_name'], fps=8) writer = feed['writer'] global_step = feed['global_step'] total_loss = torch.tensor(0.0) __p = lambda x: pack_seqdim(x, B) __u = lambda x: unpack_seqdim(x, B) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) D = 9 rgb_camRs = feed["rgb_camRs"] rgb_camXs = feed["rgb_camXs"] pix_T_cams = feed["pix_T_cams"] cam_T_velos = feed["cam_T_velos"] boxlist_camRs = feed["boxes3D"] tidlist_s = feed["tids"] # coordinate-less and plural scorelist_s = feed["scores"] # coordinate-less and plural # # postproc the boxes: # scorelist_s = __u(utils_misc.rescore_boxlist_with_inbound(__p(boxlist_camRs), __p(tidlist_s), Z, Y, X)) boxlist_camRs_, tidlist_s_, scorelist_s_ = __p(boxlist_camRs), __p( tidlist_s), __p(scorelist_s) boxlist_camRs_, tidlist_s_, scorelist_s_ = utils_misc.shuffle_valid_and_sink_invalid_boxes( boxlist_camRs_, tidlist_s_, scorelist_s_) boxlist_camRs = __u(boxlist_camRs_) tidlist_s = __u(tidlist_s_) scorelist_s = __u(scorelist_s_) origin_T_camRs = feed["origin_T_camRs"] origin_T_camRs_ = __p(origin_T_camRs) origin_T_camXs = feed["origin_T_camXs"] origin_T_camXs_ = __p(origin_T_camXs) camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camX0_T_camXs_ = __p(camX0_T_camXs) camRs_T_camXs_ = torch.matmul(origin_T_camRs_.inverse(), origin_T_camXs_) camXs_T_camRs_ = camRs_T_camXs_.inverse() camRs_T_camXs = __u(camRs_T_camXs_) camXs_T_camRs = __u(camXs_T_camRs_) xyz_veloXs = feed["xyz_veloXs"] xyz_camXs = __u(utils_geom.apply_4x4(__p(cam_T_velos), __p(xyz_veloXs))) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) xyz_camX0s = __u( utils_geom.apply_4x4(__p(camX0_T_camXs), __p(xyz_camXs))) occRs = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z, Y, X)) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occX0s = __u(utils_vox.voxelize_xyz(__p(xyz_camX0s), Z, Y, X)) occRs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z2, Y2, X2)) occXs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z2, Y2, X2)) occX0s_half = __u(utils_vox.voxelize_xyz(__p(xyz_camX0s), Z2, Y2, X2)) unpRs = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z, Y, X, __p(torch.matmul(pix_T_cams, camXs_T_camRs)))) unpXs = __u( utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z, Y, X, __p(pix_T_cams))) unpX0s = utils_vox.apply_4x4_to_voxs(camX0_T_camXs, unpXs) unpRs_half = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z2, Y2, X2, __p(torch.matmul(pix_T_cams, camXs_T_camRs)))) ##################### ## visualize what we got ##################### summ_writer.summ_rgbs('2D_inputs/rgb_camRs', torch.unbind(rgb_camRs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(rgb_camXs, dim=1)) summ_writer.summ_occs('3D_inputs/occRs', torch.unbind(occRs, dim=1)) summ_writer.summ_occs('3D_inputs/occXs', torch.unbind(occXs, dim=1)) summ_writer.summ_unps('3D_inputs/unpRs', torch.unbind(unpRs, dim=1), torch.unbind(occRs, dim=1)) summ_writer.summ_unps('3D_inputs/unpXs', torch.unbind(unpXs, dim=1), torch.unbind(occXs, dim=1)) summ_writer.summ_unps('3D_inputs/unpX0s', torch.unbind(unpX0s, dim=1), torch.unbind(occX0s, dim=1)) lrtlist_camRs = __u( utils_geom.convert_boxlist_to_lrtlist(boxlist_camRs_)).reshape( B, S, N, 19) lrtlist_camXs = __u( utils_geom.apply_4x4_to_lrtlist(__p(camXs_T_camRs), __p(lrtlist_camRs))) # stabilize boxes for ego/cam motion lrtlist_camX0s = __u( utils_geom.apply_4x4_to_lrtlist(__p(camX0_T_camXs), __p(lrtlist_camXs))) # these are is B x S x N x 19 summ_writer.summ_lrtlist('lrtlist_camR0', rgb_camRs[:, 0], lrtlist_camRs[:, 0], scorelist_s[:, 0], tidlist_s[:, 0], pix_T_cams[:, 0]) summ_writer.summ_lrtlist('lrtlist_camR1', rgb_camRs[:, 1], lrtlist_camRs[:, 1], scorelist_s[:, 1], tidlist_s[:, 1], pix_T_cams[:, 1]) summ_writer.summ_lrtlist('lrtlist_camX0', rgb_camXs[:, 0], lrtlist_camXs[:, 0], scorelist_s[:, 0], tidlist_s[:, 0], pix_T_cams[:, 0]) summ_writer.summ_lrtlist('lrtlist_camX1', rgb_camXs[:, 1], lrtlist_camXs[:, 1], scorelist_s[:, 1], tidlist_s[:, 1], pix_T_cams[:, 1]) ( obj_lrtlist_camXs, obj_scorelist_s, ) = utils_misc.collect_object_info(lrtlist_camXs, tidlist_s, scorelist_s, pix_T_cams, K, mod='X', do_vis=True, summ_writer=summ_writer) ( obj_lrtlist_camRs, obj_scorelist_s, ) = utils_misc.collect_object_info(lrtlist_camRs, tidlist_s, scorelist_s, pix_T_cams, K, mod='R', do_vis=True, summ_writer=summ_writer) ( obj_lrtlist_camX0s, obj_scorelist_s, ) = utils_misc.collect_object_info(lrtlist_camX0s, tidlist_s, scorelist_s, pix_T_cams, K, mod='X0', do_vis=False) masklist_memR = utils_vox.assemble_padded_obj_masklist( lrtlist_camRs[:, 0], scorelist_s[:, 0], Z, Y, X, coeff=1.0) masklist_memX = utils_vox.assemble_padded_obj_masklist( lrtlist_camXs[:, 0], scorelist_s[:, 0], Z, Y, X, coeff=1.0) # obj_mask_memR is B x N x 1 x Z x Y x X summ_writer.summ_occ('obj/masklist_memR', torch.sum(masklist_memR, dim=1)) summ_writer.summ_occ('obj/masklist_memX', torch.sum(masklist_memX, dim=1)) # to do tracking or whatever, i need to be able to extract a 3d object crop cropX0_obj0 = utils_vox.crop_zoom_from_mem(occXs[:, 0], lrtlist_camXs[:, 0, 0], Z2, Y2, X2) cropX0_obj1 = utils_vox.crop_zoom_from_mem(occXs[:, 0], lrtlist_camXs[:, 0, 1], Z2, Y2, X2) cropR0_obj0 = utils_vox.crop_zoom_from_mem(occRs[:, 0], lrtlist_camRs[:, 0, 0], Z2, Y2, X2) cropR0_obj1 = utils_vox.crop_zoom_from_mem(occRs[:, 0], lrtlist_camRs[:, 0, 1], Z2, Y2, X2) # print('got it:') # print(cropX00.shape) # summ_writer.summ_occ('crops/cropX0_obj0', cropX0_obj0) # summ_writer.summ_occ('crops/cropX0_obj1', cropX0_obj1) summ_writer.summ_feat('crops/cropX0_obj0', cropX0_obj0, pca=False) summ_writer.summ_feat('crops/cropX0_obj1', cropX0_obj1, pca=False) summ_writer.summ_feat('crops/cropR0_obj0', cropR0_obj0, pca=False) summ_writer.summ_feat('crops/cropR0_obj1', cropR0_obj1, pca=False) if hyp.do_feat: if hyp.flow_do_synth_rt: result = utils_misc.get_synth_flow(unpRs_half, occRs_half, obj_lrtlist_camX0s, obj_scorelist_s, occXs_half, feed['set_name'], K=K, summ_writer=summ_writer, sometimes_zero=True, sometimes_real=False) occXs, unpXs, flowX0, camX1_T_camX0, is_synth = result else: # ego-stabilized flow from X00 to X01 flowX0 = utils_misc.get_gt_flow( obj_lrtlist_camX0s, obj_scorelist_s, utils_geom.eye_4x4s(B, S), occXs_half[:, 0], K=K, occ_only=False, # get the dense flow mod='X0', summ_writer=summ_writer) # occXs is B x S x 1 x H x W x D # unpXs is B x S x 3 x H x W x D # featXs_input = torch.cat([occXs, occXs*unpXs], dim=2) featX0s_input = torch.cat([occX0s, occX0s * unpX0s], dim=2) featX0s_input_ = __p(featX0s_input) featX0s_, validX0s_, feat_loss = self.featnet( featX0s_input_, summ_writer) total_loss += feat_loss featX0s = __u(featX0s_) # _featX00 = featXs[:,0:1] # _featX01 = utils_vox.apply_4x4_to_voxs(camX0_T_camXs[:,1:], featXs[:,1:]) # featX0s = torch.cat([_featX00, _featX01], dim=1) validX0s = 1.0 - (featX0s == 0).all( dim=2, keepdim=True).float() #this shall be B x S x 1 x H x W x D summ_writer.summ_feats('3D_feats/featX0s_input', torch.unbind(featX0s_input, dim=1), pca=True) # summ_writer.summ_feats('3D_feats/featXs_output', torch.unbind(featXs, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featX0s_output', torch.unbind(featX0s, dim=1), pca=True) if hyp.do_flow: # total flow from X0 to X1 flowX = utils_misc.get_gt_flow( obj_lrtlist_camXs, obj_scorelist_s, camX0_T_camXs, occXs_half[:, 0], K=K, occ_only=False, # get the dense flow mod='X', vis=False, summ_writer=None) # # vis this to confirm it's ok (it is) # unpX0_e = utils_samp.backwarp_using_3D_flow(unpXs[:,1], flowX) # occX0_e = utils_samp.backwarp_using_3D_flow(occXs[:,1], flowX) # summ_writer.summ_unps('flow/backwarpX', [unpX0s[:,0], unpX0_e], [occXs[:,0], occX0_e]) # unpX0_e = utils_samp.backwarp_using_3D_flow(unpX0s[:,1], flowX0) # occX0_e = utils_samp.backwarp_using_3D_flow(occX0s[:,1], flowX0, binary_feat=True) # summ_writer.summ_unps('flow/backwarpX0', [unpX0s[:,0], unpX0_e], [occXs[:,0], occX0_e]) flow_loss, flowX0_pred = self.flownet( featX0s[:, 0], featX0s[:, 1], flowX0, # gt flow torch.max(validX0s[:, 1:], dim=1)[0], is_synth, summ_writer) total_loss += flow_loss # g = flowX.reshape(-1) # summ_writer.summ_histogram('flowX_g_nonzero_hist', g[torch.abs(g)>0.01]) # g = flowX0.reshape(-1) # e = flowX0_pred.reshape(-1) # summ_writer.summ_histogram('flowX0_g_nonzero_hist', g[torch.abs(g)>0.01]) # summ_writer.summ_histogram('flowX0_e_nonzero_hist', e[torch.abs(g)>0.01]) summ_writer.summ_scalar('loss', total_loss.cpu().item()) return total_loss, results
def forward(self, feed): results = dict() if 'log_freq' not in feed.keys(): feed['log_freq'] = None start_time = time.time() summ_writer = utils_improc.Summ_writer(writer=feed['writer'], global_step=feed['global_step'], set_name=feed['set_name'], log_freq=feed['log_freq'], fps=8) writer = feed['writer'] global_step = feed['global_step'] total_loss = torch.tensor(0.0).cuda() __p = lambda x: utils_basic.pack_seqdim(x, B) __u = lambda x: utils_basic.unpack_seqdim(x, B) __pb = lambda x: utils_basic.pack_boxdim(x, hyp.N) __ub = lambda x: utils_basic.unpack_boxdim(x, hyp.N) if hyp.aug_object_ent_dis: __pb_a = lambda x: utils_basic.pack_boxdim( x, hyp.max_obj_aug + hyp.max_obj_aug_dis) __ub_a = lambda x: utils_basic.unpack_boxdim( x, hyp.max_obj_aug + hyp.max_obj_aug_dis) else: __pb_a = lambda x: utils_basic.pack_boxdim(x, hyp.max_obj_aug) __ub_a = lambda x: utils_basic.unpack_boxdim(x, hyp.max_obj_aug) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K BOX_SIZE = hyp.BOX_SIZE Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) Z4, Y4, X4 = int(Z / 4), int(Y / 4), int(X / 4) D = 9 tids = torch.from_numpy(np.reshape(np.arange(B * N), [B, N])) rgb_camXs = feed["rgb_camXs_raw"] pix_T_cams = feed["pix_T_cams_raw"] camRs_T_origin = feed["camR_T_origin_raw"] origin_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_origin))) origin_T_camXs = feed["origin_T_camXs_raw"] camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camRs_T_camXs = __u( torch.matmul(utils_geom.safe_inverse(__p(origin_T_camRs)), __p(origin_T_camXs))) camXs_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_camXs))) camX0_T_camRs = camXs_T_camRs[:, 0] camX1_T_camRs = camXs_T_camRs[:, 1] camR_T_camX0 = utils_geom.safe_inverse(camX0_T_camRs) xyz_camXs = feed["xyz_camXs_raw"] depth_camXs_, valid_camXs_ = utils_geom.create_depth_image( __p(pix_T_cams), __p(xyz_camXs), H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud(depth_camXs_, __p(pix_T_cams)) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) xyz_camX0s = __u( utils_geom.apply_4x4(__p(camX0_T_camXs), __p(xyz_camXs))) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occXs_to_Rs = utils_vox.apply_4x4s_to_voxs(camRs_T_camXs, occXs) occXs_to_Rs_45 = cross_corr.rotate_tensor_along_y_axis(occXs_to_Rs, 45) occXs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z2, Y2, X2)) occRs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z2, Y2, X2)) occX0s_half = __u(utils_vox.voxelize_xyz(__p(xyz_camX0s), Z2, Y2, X2)) unpXs = __u( utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z, Y, X, __p(pix_T_cams))) unpXs_half = __u( utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z2, Y2, X2, __p(pix_T_cams))) unpX0s_half = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z2, Y2, X2, utils_basic.matmul2( __p(pix_T_cams), utils_geom.safe_inverse(__p(camX0_T_camXs))))) unpRs = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z, Y, X, utils_basic.matmul2( __p(pix_T_cams), utils_geom.safe_inverse(__p(camRs_T_camXs))))) unpRs_half = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z2, Y2, X2, utils_basic.matmul2( __p(pix_T_cams), utils_geom.safe_inverse(__p(camRs_T_camXs))))) dense_xyz_camRs_ = utils_geom.apply_4x4(__p(camRs_T_camXs), dense_xyz_camXs_) inbound_camXs_ = utils_vox.get_inbounds(dense_xyz_camRs_, Z, Y, X).float() inbound_camXs_ = torch.reshape(inbound_camXs_, [B * S, 1, H, W]) depth_camXs = __u(depth_camXs_) valid_camXs = __u(valid_camXs_) * __u(inbound_camXs_) summ_writer.summ_oneds('2D_inputs/depth_camXs', torch.unbind(depth_camXs, dim=1), maxdepth=21.0) summ_writer.summ_oneds('2D_inputs/valid_camXs', torch.unbind(valid_camXs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(rgb_camXs, dim=1)) summ_writer.summ_occs('3D_inputs/occXs', torch.unbind(occXs, dim=1)) summ_writer.summ_unps('3D_inputs/unpXs', torch.unbind(unpXs, dim=1), torch.unbind(occXs, dim=1)) occRs = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z, Y, X)) if hyp.do_eval_boxes: if hyp.dataset_name == "clevr_vqa": gt_boxes_origin_corners = feed['gt_box'] gt_scores_origin = feed['gt_scores'].detach().cpu().numpy() classes = feed['classes'] scores = gt_scores_origin tree_seq_filename = feed['tree_seq_filename'] gt_boxes_origin = nlu.get_ends_of_corner( gt_boxes_origin_corners) gt_boxes_origin_end = torch.reshape(gt_boxes_origin, [hyp.B, hyp.N, 2, 3]) gt_boxes_origin_theta = nlu.get_alignedboxes2thetaformat( gt_boxes_origin_end) gt_boxes_origin_corners = utils_geom.transform_boxes_to_corners( gt_boxes_origin_theta) gt_boxesR_corners = __ub( utils_geom.apply_4x4(camRs_T_origin[:, 0], __pb(gt_boxes_origin_corners))) gt_boxesR_theta = utils_geom.transform_corners_to_boxes( gt_boxesR_corners) gt_boxesR_end = nlu.get_ends_of_corner(gt_boxesR_corners) else: tree_seq_filename = feed['tree_seq_filename'] tree_filenames = [ join(hyp.root_dataset, i) for i in tree_seq_filename if i != "invalid_tree" ] invalid_tree_filenames = [ join(hyp.root_dataset, i) for i in tree_seq_filename if i == "invalid_tree" ] num_empty = len(invalid_tree_filenames) trees = [pickle.load(open(i, "rb")) for i in tree_filenames] len_valid = len(trees) if len_valid > 0: gt_boxesR, scores, classes = nlu.trees_rearrange(trees) if num_empty > 0: gt_boxesR = np.concatenate([ gt_boxesR, empty_gt_boxesR ]) if len_valid > 0 else empty_gt_boxesR scores = np.concatenate([ scores, empty_scores ]) if len_valid > 0 else empty_scores classes = np.concatenate([ classes, empty_classes ]) if len_valid > 0 else empty_classes gt_boxesR = torch.from_numpy( gt_boxesR).cuda().float() # torch.Size([2, 3, 6]) gt_boxesR_end = torch.reshape(gt_boxesR, [hyp.B, hyp.N, 2, 3]) gt_boxesR_theta = nlu.get_alignedboxes2thetaformat( gt_boxesR_end) #torch.Size([2, 3, 9]) gt_boxesR_corners = utils_geom.transform_boxes_to_corners( gt_boxesR_theta) class_names_ex_1 = "_".join(classes[0]) summ_writer.summ_text('eval_boxes/class_names', class_names_ex_1) gt_boxesRMem_corners = __ub( utils_vox.Ref2Mem(__pb(gt_boxesR_corners), Z2, Y2, X2)) gt_boxesRMem_end = nlu.get_ends_of_corner(gt_boxesRMem_corners) gt_boxesRMem_theta = utils_geom.transform_corners_to_boxes( gt_boxesRMem_corners) gt_boxesRUnp_corners = __ub( utils_vox.Ref2Mem(__pb(gt_boxesR_corners), Z, Y, X)) gt_boxesRUnp_end = nlu.get_ends_of_corner(gt_boxesRUnp_corners) gt_boxesX0_corners = __ub( utils_geom.apply_4x4(camX0_T_camRs, __pb(gt_boxesR_corners))) gt_boxesX0Mem_corners = __ub( utils_vox.Ref2Mem(__pb(gt_boxesX0_corners), Z2, Y2, X2)) gt_boxesX0Mem_theta = utils_geom.transform_corners_to_boxes( gt_boxesX0Mem_corners) gt_boxesX0Mem_end = nlu.get_ends_of_corner(gt_boxesX0Mem_corners) gt_boxesX0_end = nlu.get_ends_of_corner(gt_boxesX0_corners) gt_cornersX0_pix = __ub( utils_geom.apply_pix_T_cam(pix_T_cams[:, 0], __pb(gt_boxesX0_corners))) rgb_camX0 = rgb_camXs[:, 0] rgb_camX1 = rgb_camXs[:, 1] summ_writer.summ_box_by_corners('eval_boxes/gt_boxescamX0', rgb_camX0, gt_boxesX0_corners, torch.from_numpy(scores), tids, pix_T_cams[:, 0]) unps_vis = utils_improc.get_unps_vis(unpX0s_half, occX0s_half) unp_vis = torch.mean(unps_vis, dim=1) unps_visRs = utils_improc.get_unps_vis(unpRs_half, occRs_half) unp_visRs = torch.mean(unps_visRs, dim=1) unps_visRs_full = utils_improc.get_unps_vis(unpRs, occRs) unp_visRs_full = torch.mean(unps_visRs_full, dim=1) summ_writer.summ_box_mem_on_unp('eval_boxes/gt_boxesR_mem', unp_visRs, gt_boxesRMem_end, scores, tids) unpX0s_half = torch.mean(unpX0s_half, dim=1) unpX0s_half = nlu.zero_out(unpX0s_half, gt_boxesX0Mem_end, scores) occX0s_half = torch.mean(occX0s_half, dim=1) occX0s_half = nlu.zero_out(occX0s_half, gt_boxesX0Mem_end, scores) summ_writer.summ_unp('3D_inputs/unpX0s', unpX0s_half, occX0s_half) if hyp.do_feat: featXs_input = torch.cat([occXs, occXs * unpXs], dim=2) featXs_input_ = __p(featXs_input) freeXs_ = utils_vox.get_freespace(__p(xyz_camXs), __p(occXs_half)) freeXs = __u(freeXs_) visXs = torch.clamp(occXs_half + freeXs, 0.0, 1.0) mask_ = None if (type(mask_) != type(None)): assert (list(mask_.shape)[2:5] == list( featXs_input_.shape)[2:5]) featXs_, feat_loss = self.featnet(featXs_input_, summ_writer, mask=__p(occXs)) #mask_) total_loss += feat_loss validXs = torch.ones_like(visXs) _validX00 = validXs[:, 0:1] _validX01 = utils_vox.apply_4x4s_to_voxs(camX0_T_camXs[:, 1:], validXs[:, 1:]) validX0s = torch.cat([_validX00, _validX01], dim=1) validRs = utils_vox.apply_4x4s_to_voxs(camRs_T_camXs, validXs) visRs = utils_vox.apply_4x4s_to_voxs(camRs_T_camXs, visXs) featXs = __u(featXs_) _featX00 = featXs[:, 0:1] _featX01 = utils_vox.apply_4x4s_to_voxs(camX0_T_camXs[:, 1:], featXs[:, 1:]) featX0s = torch.cat([_featX00, _featX01], dim=1) emb3D_e = torch.mean(featX0s[:, 1:], dim=1) vis3D_e_R = torch.max(visRs[:, 1:], dim=1)[0] emb3D_g = featX0s[:, 0] vis3D_g_R = visRs[:, 0] validR_combo = torch.min(validRs, dim=1).values summ_writer.summ_feats('3D_feats/featXs_input', torch.unbind(featXs_input, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featXs_output', torch.unbind(featXs, dim=1), valids=torch.unbind(validXs, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featX0s_output', torch.unbind(featX0s, dim=1), valids=torch.unbind( torch.ones_like(validRs), dim=1), pca=True) summ_writer.summ_feats('3D_feats/validRs', torch.unbind(validRs, dim=1), pca=False) summ_writer.summ_feat('3D_feats/vis3D_e_R', vis3D_e_R, pca=False) summ_writer.summ_feat('3D_feats/vis3D_g_R', vis3D_g_R, pca=False) if hyp.do_munit: object_classes, filenames = nlu.create_object_classes( classes, [tree_seq_filename, tree_seq_filename], scores) if hyp.do_munit_fewshot: emb3D_e_R = utils_vox.apply_4x4_to_vox(camR_T_camX0, emb3D_e) emb3D_g_R = utils_vox.apply_4x4_to_vox(camR_T_camX0, emb3D_g) emb3D_R = emb3D_e_R emb3D_e_R_object, emb3D_g_R_object, validR_combo_object = nlu.create_object_tensors( [emb3D_e_R, emb3D_g_R], [validR_combo], gt_boxesRMem_end, scores, [BOX_SIZE, BOX_SIZE, BOX_SIZE]) emb3D_R_object = (emb3D_e_R_object + emb3D_g_R_object) / 2 content, style = self.munitnet.net.gen_a.encode(emb3D_R_object) objects_taken, _ = self.munitnet.net.gen_a.decode( content, style) styles = style contents = content elif hyp.do_3d_style_munit: emb3D_e_R = utils_vox.apply_4x4_to_vox(camR_T_camX0, emb3D_e) emb3D_g_R = utils_vox.apply_4x4_to_vox(camR_T_camX0, emb3D_g) emb3D_R = emb3D_e_R # st() emb3D_e_R_object, emb3D_g_R_object, validR_combo_object = nlu.create_object_tensors( [emb3D_e_R, emb3D_g_R], [validR_combo], gt_boxesRMem_end, scores, [BOX_SIZE, BOX_SIZE, BOX_SIZE]) emb3D_R_object = (emb3D_e_R_object + emb3D_g_R_object) / 2 camX1_T_R = camXs_T_camRs[:, 1] camX0_T_R = camXs_T_camRs[:, 0] assert hyp.B == 2 assert emb3D_e_R_object.shape[0] == 2 munit_loss, sudo_input_0, sudo_input_1, recon_input_0, recon_input_1, sudo_input_0_cycle, sudo_input_1_cycle, styles, contents, adin = self.munitnet( emb3D_R_object[0:1], emb3D_R_object[1:2]) if hyp.store_content_style_range: if self.max_content == None: self.max_content = torch.zeros_like( contents[0][0]).cuda() - 100000000 if self.min_content == None: self.min_content = torch.zeros_like( contents[0][0]).cuda() + 100000000 if self.max_style == None: self.max_style = torch.zeros_like( styles[0][0]).cuda() - 100000000 if self.min_style == None: self.min_style = torch.zeros_like( styles[0][0]).cuda() + 100000000 self.max_content = torch.max( torch.max(self.max_content, contents[0][0]), contents[1][0]) self.min_content = torch.min( torch.min(self.min_content, contents[0][0]), contents[1][0]) self.max_style = torch.max( torch.max(self.max_style, styles[0][0]), styles[1][0]) self.min_style = torch.min( torch.min(self.min_style, styles[0][0]), styles[1][0]) data_to_save = { 'max_content': self.max_content.cpu().numpy(), 'min_content': self.min_content.cpu().numpy(), 'max_style': self.max_style.cpu().numpy(), 'min_style': self.min_style.cpu().numpy() } with open('content_style_range.p', 'wb') as f: pickle.dump(data_to_save, f) elif hyp.is_contrastive_examples: if hyp.normalize_contrast: content0 = (contents[0] - self.min_content) / ( self.max_content - self.min_content + 1e-5) content1 = (contents[1] - self.min_content) / ( self.max_content - self.min_content + 1e-5) style0 = (styles[0] - self.min_style) / ( self.max_style - self.min_style + 1e-5) style1 = (styles[1] - self.min_style) / ( self.max_style - self.min_style + 1e-5) else: content0 = contents[0] content1 = contents[1] style0 = styles[0] style1 = styles[1] # euclid_dist_content = torch.sum(torch.sqrt((content0 - content1)**2))/torch.prod(torch.tensor(content0.shape)) # euclid_dist_style = torch.sum(torch.sqrt((style0-style1)**2))/torch.prod(torch.tensor(style0.shape)) euclid_dist_content = (content0 - content1).norm(2) / ( content0.numel()) euclid_dist_style = (style0 - style1).norm(2) / (style0.numel()) content_0_pooled = torch.mean( content0.reshape(list(content0.shape[:2]) + [-1]), dim=-1) content_1_pooled = torch.mean( content1.reshape(list(content1.shape[:2]) + [-1]), dim=-1) euclid_dist_content_pooled = (content_0_pooled - content_1_pooled).norm(2) / ( content_0_pooled.numel()) content_0_normalized = content0 / content0.norm() content_1_normalized = content1 / content1.norm() style_0_normalized = style0 / style0.norm() style_1_normalized = style1 / style1.norm() content_0_pooled_normalized = content_0_pooled / content_0_pooled.norm( ) content_1_pooled_normalized = content_1_pooled / content_1_pooled.norm( ) cosine_dist_content = torch.sum(content_0_normalized * content_1_normalized) cosine_dist_style = torch.sum(style_0_normalized * style_1_normalized) cosine_dist_content_pooled = torch.sum( content_0_pooled_normalized * content_1_pooled_normalized) print("euclid dist [content, pooled-content, style]: ", euclid_dist_content, euclid_dist_content_pooled, euclid_dist_style) print("cosine sim [content, pooled-content, style]: ", cosine_dist_content, cosine_dist_content_pooled, cosine_dist_style) if hyp.run_few_shot_on_munit: if (global_step % 300) == 1 or (global_step % 300) == 0: wrong = False try: precision_style = float(self.tp_style) / self.all_style precision_content = float( self.tp_content) / self.all_content except ZeroDivisionError: wrong = True if not wrong: summ_writer.summ_scalar( 'precision/unsupervised_precision_style', precision_style) summ_writer.summ_scalar( 'precision/unsupervised_precision_content', precision_content) # st() self.embed_list_style = defaultdict(lambda: []) self.embed_list_content = defaultdict(lambda: []) self.tp_style = 0 self.all_style = 0 self.tp_content = 0 self.all_content = 0 self.check = False elif not self.check and not nlu.check_fill_dict( self.embed_list_content, self.embed_list_style): print("Filling \n") for index, class_val in enumerate(object_classes): if hyp.dataset_name == "clevr_vqa": class_val_content, class_val_style = class_val.split( "/") else: class_val_content, class_val_style = [ class_val.split("/")[0], class_val.split("/")[0] ] print(len(self.embed_list_style.keys()), "style class", len(self.embed_list_content), "content class", self.embed_list_content.keys()) if len(self.embed_list_style[class_val_style] ) < hyp.few_shot_nums: self.embed_list_style[class_val_style].append( styles[index].squeeze()) if len(self.embed_list_content[class_val_content] ) < hyp.few_shot_nums: if hyp.avg_3d: content_val = contents[index] content_val = torch.mean(content_val.reshape( [content_val.shape[1], -1]), dim=-1) # st() self.embed_list_content[ class_val_content].append(content_val) else: self.embed_list_content[ class_val_content].append( contents[index].reshape([-1])) else: self.check = True try: print(float(self.tp_content) / self.all_content) print(float(self.tp_style) / self.all_style) except Exception as e: pass average = True if average: for key, val in self.embed_list_style.items(): if isinstance(val, type([])): self.embed_list_style[key] = torch.mean( torch.stack(val, dim=0), dim=0) for key, val in self.embed_list_content.items(): if isinstance(val, type([])): self.embed_list_content[key] = torch.mean( torch.stack(val, dim=0), dim=0) else: for key, val in self.embed_list_style.items(): if isinstance(val, type([])): self.embed_list_style[key] = torch.stack(val, dim=0) for key, val in self.embed_list_content.items(): if isinstance(val, type([])): self.embed_list_content[key] = torch.stack( val, dim=0) for index, class_val in enumerate(object_classes): class_val = class_val if hyp.dataset_name == "clevr_vqa": class_val_content, class_val_style = class_val.split( "/") else: class_val_content, class_val_style = [ class_val.split("/")[0], class_val.split("/")[0] ] style_val = styles[index].squeeze().unsqueeze(0) if not average: embed_list_val_style = torch.cat(list( self.embed_list_style.values()), dim=0) embed_list_key_style = list( np.repeat( np.expand_dims( list(self.embed_list_style.keys()), 1), hyp.few_shot_nums, 1).reshape([-1])) else: embed_list_val_style = torch.stack(list( self.embed_list_style.values()), dim=0) embed_list_key_style = list( self.embed_list_style.keys()) embed_list_val_style = utils_basic.l2_normalize( embed_list_val_style, dim=1).permute(1, 0) style_val = utils_basic.l2_normalize(style_val, dim=1) scores_styles = torch.matmul(style_val, embed_list_val_style) index_key = torch.argmax(scores_styles, dim=1).squeeze() selected_class_style = embed_list_key_style[index_key] self.styles_prediction[class_val_style].append( selected_class_style) if class_val_style == selected_class_style: self.tp_style += 1 self.all_style += 1 if hyp.avg_3d: content_val = contents[index] content_val = torch.mean(content_val.reshape( [content_val.shape[1], -1]), dim=-1).unsqueeze(0) else: content_val = contents[index].reshape( [-1]).unsqueeze(0) if not average: embed_list_val_content = torch.cat(list( self.embed_list_content.values()), dim=0) embed_list_key_content = list( np.repeat( np.expand_dims( list(self.embed_list_content.keys()), 1), hyp.few_shot_nums, 1).reshape([-1])) else: embed_list_val_content = torch.stack(list( self.embed_list_content.values()), dim=0) embed_list_key_content = list( self.embed_list_content.keys()) embed_list_val_content = utils_basic.l2_normalize( embed_list_val_content, dim=1).permute(1, 0) content_val = utils_basic.l2_normalize(content_val, dim=1) scores_content = torch.matmul(content_val, embed_list_val_content) index_key = torch.argmax(scores_content, dim=1).squeeze() selected_class_content = embed_list_key_content[ index_key] self.content_prediction[class_val_content].append( selected_class_content) if class_val_content == selected_class_content: self.tp_content += 1 self.all_content += 1 # st() munit_loss = hyp.munit_loss_weight * munit_loss recon_input_obj = torch.cat([recon_input_0, recon_input_1], dim=0) recon_emb3D_R = nlu.update_scene_with_objects( emb3D_R, recon_input_obj, gt_boxesRMem_end, scores) sudo_input_obj = torch.cat([sudo_input_0, sudo_input_1], dim=0) styled_emb3D_R = nlu.update_scene_with_objects( emb3D_R, sudo_input_obj, gt_boxesRMem_end, scores) styled_emb3D_e_X1 = utils_vox.apply_4x4_to_vox( camX1_T_R, styled_emb3D_R) styled_emb3D_e_X0 = utils_vox.apply_4x4_to_vox( camX0_T_R, styled_emb3D_R) emb3D_e_X1 = utils_vox.apply_4x4_to_vox(camX1_T_R, recon_emb3D_R) emb3D_e_X0 = utils_vox.apply_4x4_to_vox(camX0_T_R, recon_emb3D_R) emb3D_e_X1_og = utils_vox.apply_4x4_to_vox(camX1_T_R, emb3D_R) emb3D_e_X0_og = utils_vox.apply_4x4_to_vox(camX0_T_R, emb3D_R) emb3D_R_aug_diff = torch.abs(emb3D_R - recon_emb3D_R) summ_writer.summ_feat(f'aug_feat/og', emb3D_R) summ_writer.summ_feat(f'aug_feat/og_gen', recon_emb3D_R) summ_writer.summ_feat(f'aug_feat/og_aug_diff', emb3D_R_aug_diff) if hyp.cycle_style_view_loss: sudo_input_obj_cycle = torch.cat( [sudo_input_0_cycle, sudo_input_1_cycle], dim=0) styled_emb3D_R_cycle = nlu.update_scene_with_objects( emb3D_R, sudo_input_obj_cycle, gt_boxesRMem_end, scores) styled_emb3D_e_X0_cycle = utils_vox.apply_4x4_to_vox( camX0_T_R, styled_emb3D_R_cycle) styled_emb3D_e_X1_cycle = utils_vox.apply_4x4_to_vox( camX1_T_R, styled_emb3D_R_cycle) summ_writer.summ_scalar('munit_loss', munit_loss.cpu().item()) total_loss += munit_loss if hyp.do_occ and hyp.occ_do_cheap: occX0_sup, freeX0_sup, _, freeXs = utils_vox.prep_occs_supervision( camX0_T_camXs, xyz_camXs, Z2, Y2, X2, agg=True) summ_writer.summ_occ('occ_sup/occ_sup', occX0_sup) summ_writer.summ_occ('occ_sup/free_sup', freeX0_sup) summ_writer.summ_occs('occ_sup/freeXs_sup', torch.unbind(freeXs, dim=1)) summ_writer.summ_occs('occ_sup/occXs_sup', torch.unbind(occXs_half, dim=1)) occ_loss, occX0s_pred_ = self.occnet( torch.mean(featX0s[:, 1:], dim=1), occX0_sup, freeX0_sup, torch.max(validX0s[:, 1:], dim=1)[0], summ_writer) occX0s_pred = __u(occX0s_pred_) total_loss += occ_loss if hyp.do_view: assert (hyp.do_feat) PH, PW = hyp.PH, hyp.PW sy = float(PH) / float(hyp.H) sx = float(PW) / float(hyp.W) assert (sx == 0.5) # else we need a fancier downsampler assert (sy == 0.5) projpix_T_cams = __u( utils_geom.scale_intrinsics(__p(pix_T_cams), sx, sy)) # st() if hyp.do_munit: feat_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], emb3D_e_X1, # use feat1 to predict rgb0 hyp.view_depth, PH, PW) feat_projX00_og = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], emb3D_e_X1_og, # use feat1 to predict rgb0 hyp.view_depth, PH, PW) # only for checking the style styled_feat_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], styled_emb3D_e_X1, # use feat1 to predict rgb0 hyp.view_depth, PH, PW) if hyp.cycle_style_view_loss: styled_feat_projX00_cycle = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], styled_emb3D_e_X1_cycle, # use feat1 to predict rgb0 hyp.view_depth, PH, PW) else: feat_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], featXs[:, 1], # use feat1 to predict rgb0 hyp.view_depth, PH, PW) rgb_X00 = utils_basic.downsample(rgb_camXs[:, 0], 2) rgb_X01 = utils_basic.downsample(rgb_camXs[:, 1], 2) valid_X00 = utils_basic.downsample(valid_camXs[:, 0], 2) view_loss, rgb_e, emb2D_e = self.viewnet(feat_projX00, rgb_X00, valid_X00, summ_writer, "rgb") if hyp.do_munit: _, rgb_e, emb2D_e = self.viewnet(feat_projX00_og, rgb_X00, valid_X00, summ_writer, "rgb_og") if hyp.do_munit: styled_view_loss, styled_rgb_e, styled_emb2D_e = self.viewnet( styled_feat_projX00, rgb_X00, valid_X00, summ_writer, "recon_style") if hyp.cycle_style_view_loss: styled_view_loss_cycle, styled_rgb_e_cycle, styled_emb2D_e_cycle = self.viewnet( styled_feat_projX00_cycle, rgb_X00, valid_X00, summ_writer, "recon_style_cycle") rgb_input_1 = torch.cat( [rgb_X01[1], rgb_X01[0], styled_rgb_e[0]], dim=2) rgb_input_2 = torch.cat( [rgb_X01[0], rgb_X01[1], styled_rgb_e[1]], dim=2) complete_vis = torch.cat([rgb_input_1, rgb_input_2], dim=1) summ_writer.summ_rgb('munit/munit_recons_vis', complete_vis.unsqueeze(0)) if not hyp.do_munit: total_loss += view_loss else: if hyp.basic_view_loss: total_loss += view_loss if hyp.style_view_loss: total_loss += styled_view_loss if hyp.cycle_style_view_loss: total_loss += styled_view_loss_cycle summ_writer.summ_scalar('loss', total_loss.cpu().item()) if hyp.save_embed_tsne: for index, class_val in enumerate(object_classes): class_val_content, class_val_style = class_val.split("/") style_val = styles[index].squeeze().unsqueeze(0) self.cluster_pool.update(style_val, [class_val_style]) print(self.cluster_pool.num) if self.cluster_pool.is_full(): embeds, classes = self.cluster_pool.fetch() with open("offline_cluster" + '/%st.txt' % 'classes', 'w') as f: for index, embed in enumerate(classes): class_val = classes[index] f.write("%s\n" % class_val) f.close() with open("offline_cluster" + '/%st.txt' % 'embeddings', 'w') as f: for index, embed in enumerate(embeds): # embed = utils_basic.l2_normalize(embed,dim=0) print("writing {} embed".format(index)) embed_l_s = [str(i) for i in embed.tolist()] embed_str = '\t'.join(embed_l_s) f.write("%s\n" % embed_str) f.close() st() return total_loss, results
def forward(self, feed): results = dict() summ_writer = utils_improc.Summ_writer(writer=feed['writer'], global_step=feed['global_step'], set_name=feed['set_name'], fps=8) writer = feed['writer'] global_step = feed['global_step'] total_loss = torch.tensor(0.0).cuda() __p = lambda x: pack_seqdim(x, B) __u = lambda x: unpack_seqdim(x, B) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) D = 9 rgb_camRs = feed["rgb_camRs"] rgb_camXs = feed["rgb_camXs"] pix_T_cams = feed["pix_T_cams"] cam_T_velos = feed["cam_T_velos"] origin_T_camRs = feed["origin_T_camRs"] origin_T_camRs_ = __p(origin_T_camRs) origin_T_camXs = feed["origin_T_camXs"] origin_T_camXs_ = __p(origin_T_camXs) camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camX0_T_camXs_ = __p(camX0_T_camXs) camRs_T_camXs_ = torch.matmul(utils_geom.safe_inverse(origin_T_camRs_), origin_T_camXs_) camXs_T_camRs_ = utils_geom.safe_inverse(camRs_T_camXs_) camRs_T_camXs = __u(camRs_T_camXs_) camXs_T_camRs = __u(camXs_T_camRs_) xyz_veloXs = feed["xyz_veloXs"] xyz_camXs = __u(utils_geom.apply_4x4(__p(cam_T_velos), __p(xyz_veloXs))) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) xyz_camX0s = __u( utils_geom.apply_4x4(__p(camX0_T_camXs), __p(xyz_camXs))) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occXs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z2, Y2, X2)) occX0s_half = __u(utils_vox.voxelize_xyz(__p(xyz_camX0s), Z2, Y2, X2)) unpXs = __u( utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z, Y, X, __p(pix_T_cams))) ## projected depth, and inbound mask depth_camXs_, valid_camXs_ = utils_geom.create_depth_image( __p(pix_T_cams), __p(xyz_camXs), H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud(depth_camXs_, __p(pix_T_cams)) dense_xyz_camX0s_ = utils_geom.apply_4x4(__p(camX0_T_camXs), dense_xyz_camXs_) inbound_camXs_ = utils_vox.get_inbounds(dense_xyz_camX0s_, Z, Y, X).float() inbound_camXs_ = torch.reshape(inbound_camXs_, [B * S, 1, H, W]) depth_camXs = __u(depth_camXs_) valid_camXs = __u(valid_camXs_) * __u(inbound_camXs_) ##################### ## visualize what we got ##################### summ_writer.summ_oneds('2D_inputs/depth_camXs', torch.unbind(depth_camXs, dim=1)) summ_writer.summ_oneds('2D_inputs/valid_camXs', torch.unbind(valid_camXs, dim=1)) summ_writer.summ_oneds('2D_inputs/valid_camXs', torch.unbind(valid_camXs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camRs', torch.unbind(rgb_camRs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(rgb_camXs, dim=1)) summ_writer.summ_occs('3D_inputs/occXs', torch.unbind(occXs, dim=1)) summ_writer.summ_unps('3D_inputs/unpXs', torch.unbind(unpXs, dim=1), torch.unbind(occXs, dim=1)) if summ_writer.save_this: unpRs = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z, Y, X, matmul2(__p(pix_T_cams), utils_geom.safe_inverse(__p(camRs_T_camXs))))) occRs = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z, Y, X)) summ_writer.summ_occs('3D_inputs/occRs', torch.unbind(occRs, dim=1)) summ_writer.summ_unps('3D_inputs/unpRs', torch.unbind(unpRs, dim=1), torch.unbind(occRs, dim=1)) ##################### ## run the nets ##################### mask_ = None if hyp.do_occ and (not hyp.occ_do_cheap): ''' occRs_sup, freeRs_sup, freeXs = utils_vox.prep_occs_supervision(xyz_camXs, occRs_half, occXs_half, camRs_T_camXs, agg=True) featRs_input = torch.cat([occRs, occRs*unpRs], dim=2) featRs_input_ = __p(featRs_input) occRs_sup_ = __p(occRs_sup) freeRs_sup_ = __p(freeRs_sup) occ_loss, occRs_pred_ = self.occnet(featRs_input_, occRs_sup_, freeRs_sup_, summ_writer ) occRs_pred = __u(occRs_pred_) total_loss += occ_loss mask_ = F.upsample(occRs_pred_, scale_factor=2) ''' occXs_ = __p(occXs) mask_ = occXs_ if hyp.do_feat: # occXs is B x S x 1 x H x W x D # unpXs is B x S x 3 x H x W x D featXs_input = torch.cat([occXs, occXs * unpXs], dim=2) featXs_input_ = __p(featXs_input) # it is useful to keep track of what was visible from each viewpoint freeXs_ = utils_vox.get_freespace(__p(xyz_camXs), __p(occXs_half)) freeXs = __u(freeXs_) visXs = torch.clamp(occXs_half + freeXs, 0.0, 1.0) if (type(mask_) != type(None)): assert (list(mask_.shape)[2:5] == list( featXs_input_.shape)[2:5]) featXs_, validXs_, feat_loss = self.featnet( featXs_input_, summ_writer, mask=__p(occXs)) #mask_) total_loss += feat_loss validXs = __u(validXs_) _validX00 = validXs[:, 0:1] _validX01 = utils_vox.apply_4x4_to_voxs(camX0_T_camXs[:, 1:], validXs[:, 1:]) validX0s = torch.cat([_validX00, _validX01], dim=1) _visX00 = visXs[:, 0:1] _visX01 = utils_vox.apply_4x4_to_voxs(camX0_T_camXs[:, 1:], visXs[:, 1:]) visX0s = torch.cat([_visX00, _visX01], dim=1) featXs = __u(featXs_) _featX00 = featXs[:, 0:1] _featX01 = utils_vox.apply_4x4_to_voxs(camX0_T_camXs[:, 1:], featXs[:, 1:]) featX0s = torch.cat([_featX00, _featX01], dim=1) emb3D_e = torch.mean(featX0s[:, 1:], dim=1) # context emb3D_g = featX0s[:, 0] # obs vis3D_e = torch.max(validX0s[:, 1:], dim=1)[0] * torch.max( visX0s[:, 1:], dim=1)[0] vis3D_g = validX0s[:, 0] * visX0s[:, 0] # obs if hyp.do_eval_recall: results['emb3D_e'] = emb3D_e results['emb3D_g'] = emb3D_g summ_writer.summ_feats('3D_feats/featXs_input', torch.unbind(featXs_input, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featXs_output', torch.unbind(featXs, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featX0s_output', torch.unbind(featX0s, dim=1), pca=True) summ_writer.summ_feats('3D_feats/validX0s', torch.unbind(validX0s, dim=1), pca=False) summ_writer.summ_feat('3D_feats/vis3D_e', vis3D_e, pca=False) summ_writer.summ_feat('3D_feats/vis3D_g', vis3D_g, pca=False) if hyp.do_occ and hyp.occ_do_cheap: occX0_sup, freeX0_sup, freeXs = utils_vox.prep_occs_supervision( xyz_camXs, occX0s_half, occXs_half, camX0_T_camXs, agg=True) summ_writer.summ_occ('occ_sup/occ_sup', occX0_sup) summ_writer.summ_occ('occ_sup/free_sup', freeX0_sup) summ_writer.summ_occs('occ_sup/freeXs_sup', torch.unbind(freeXs, dim=1)) summ_writer.summ_occs('occ_sup/occXs_sup', torch.unbind(occXs_half, dim=1)) occ_loss, occRs_pred_ = self.occnet( torch.mean(featX0s[:, 1:], dim=1), occX0_sup, freeX0_sup, torch.max(validX0s[:, 1:], dim=1)[0], summ_writer) occRs_pred = __u(occRs_pred_) total_loss += occ_loss if hyp.do_view: assert (hyp.do_feat) # we warped the features into the canonical view # now we resample to the target view and decode PH, PW = hyp.PH, hyp.PW sy = float(PH) / float(hyp.H) sx = float(PW) / float(hyp.W) assert (sx == 0.5) # else we need a fancier downsampler assert (sy == 0.5) projpix_T_cams = __u( utils_geom.scale_intrinsics(__p(pix_T_cams), sx, sy)) assert (S == 2) # else we should warp each feat in 1: feat_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], featXs[:, 1], hyp.view_depth, PH, PW) # feat_projX0 is B x hyp.feat_dim x hyp.view_depth x PH x PW rgb_X00 = downsample(rgb_camXs[:, 0], 2) if summ_writer.save_this: # for vis, let's also project some rgb rgb_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camXs_T_camRs[:, 0], unpRs[:, 0], hyp.view_depth, PH, PW) rgb_projX01 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 1], camXs_T_camRs[:, 1], unpRs[:, 1], hyp.view_depth, PH, PW) occ_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camXs_T_camRs[:, 0], occRs[:, 0], hyp.view_depth, PH, PW) occ_projX01 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 1], camXs_T_camRs[:, 1], occRs[:, 1], hyp.view_depth, PH, PW) rgb_projX00_vis = reduce_masked_mean(rgb_projX00, occ_projX00.repeat( [1, 3, 1, 1, 1]), dim=2) rgb_projX01_vis = reduce_masked_mean(rgb_projX01, occ_projX01.repeat( [1, 3, 1, 1, 1]), dim=2) summ_writer.summ_rgbs('projection/rgb_projX', [rgb_projX00_vis, rgb_projX01_vis]) rgb_X01 = downsample(rgb_camXs[:, 1], 2) summ_writer.summ_rgbs('projection/rgb_origX', [rgb_X00, rgb_X01]) # decode the perspective volume into an image view_loss, rgb_e, emb2D_e = self.viewnet(feat_projX00, rgb_X00, summ_writer) total_loss += view_loss if hyp.do_emb2D: assert (hyp.do_view) # create an embedding image, representing the bottom-up 2D feature tensor emb_loss_2D, emb2D_g = self.embnet2D(rgb_camXs[:, 0], emb2D_e, valid_camXs[:, 0], summ_writer) total_loss += emb_loss_2D if hyp.do_emb3D: occX0_sup, freeX0_sup, freeXs = utils_vox.prep_occs_supervision( xyz_camXs, occX0s_half, occXs_half, camX0_T_camXs, agg=True) emb_loss_3D = self.embnet3D(emb3D_e, emb3D_g, vis3D_e, vis3D_g, summ_writer) total_loss += emb_loss_3D if hyp.do_eval_recall: results['emb2D_e'] = None results['emb2D_g'] = None summ_writer.summ_scalar('loss', total_loss.cpu().item()) return total_loss, results