def convert_params_to_lrt(self, obj_len, obj_xyz_sce, obj_rot, cam_T_sce): # this borrows from utils_geom.convert_box_to_ref_T_obj B = list(obj_xyz_sce.shape)[0] obj_xyz_cam = utils_geom.apply_4x4(cam_T_sce, obj_xyz_sce.unsqueeze(1)).squeeze(1) # # compose with lengths # lrt = utils_geom.merge_lrt(obj_len, cam_T_obj) # return lrt rot0 = utils_geom.eye_3x3(B) # tra = torch.stack([x, y, z], axis=1) center_T_ref = utils_geom.merge_rt(rot0, -obj_xyz_cam) # center_T_ref is B x 4 x 4 t0 = torch.zeros([B, 3]) obj_T_center = utils_geom.merge_rt(obj_rot, t0) # this is B x 4 x 4 # we want obj_T_ref # first we to translate to center, # and then rotate around the origin obj_T_ref = utils_basic.matmul2(obj_T_center, center_T_ref) # return the inverse of this, so that we can transform obj corners into cam coords ref_T_obj = utils_geom.safe_inverse(obj_T_ref) # return ref_T_obj # compose with lengths lrt = utils_geom.merge_lrt(obj_len, ref_T_obj) return lrt
def apply_4x4_to_vox(B_T_A, feat_A, already_mem=False, binary_feat=False, rigid=True): # B_T_A is B x 4 x 4 # if already_mem=False, it is a transformation between cam systems # if already_mem=True, it is a transformation between mem systems # feat_A is B x C x Z x Y x X # it represents some scene features in reference/canonical coordinates # we want to go from these coords to some target coords # since this is a backwarp, # the question to ask is: # "WHERE in the tensor do you want to sample, # to replace each voxel's current value?" # the inverse of B_T_A represents this "where"; # it transforms each coordinate in B # to the location we want to sample in A B, C, Z, Y, X = list(feat_A.shape) # we have B_T_A in input, since this follows the other utils_geom.apply_4x4 # for an apply_4x4 func, but really we need A_T_B if rigid: A_T_B = utils_geom.safe_inverse(B_T_A) else: # this op is slower but more powerful A_T_B = B_T_A.inverse() if not already_mem: cam_T_mem = get_ref_T_mem(B, Z, Y, X) mem_T_cam = get_mem_T_ref(B, Z, Y, X) A_T_B = matmul3(mem_T_cam, A_T_B, cam_T_mem) # we want to sample for each location in the bird grid xyz_B = gridcloud3D(B, Z, Y, X) # this is B x N x 3 # transform xyz_A = utils_geom.apply_4x4(A_T_B, xyz_B) # we want each voxel to take its value # from whatever is at these A coordinates # i.e., we are back-warping from the "A" coords # feat_B = F.grid_sample(feat_A, normalize_grid(xyz_A, Z, Y, X)) feat_B = utils_samp.resample3D(feat_A, xyz_A, binary_feat=binary_feat) # feat_B, valid = utils_samp.resample3D(feat_A, xyz_A, binary_feat=binary_feat) # return feat_B, valid return feat_B
def assemble_padded_obj_masklist(lrtlist, scorelist, Z, Y, X, coeff=1.0): # compute a binary mask in 3D for each object # we use this when computing the center-surround objectness score # lrtlist is B x N x 19 # scorelist is B x N # returns masklist shaped B x N x 1 x Z x Y x X B, N, D = list(lrtlist.shape) assert (D == 19) masks = torch.zeros(B, N, Z, Y, X) lenlist, ref_T_objlist = utils_geom.split_lrtlist(lrtlist) # lenlist is B x N x 3 # ref_T_objlist is B x N x 4 x 4 lenlist_ = lenlist.reshape(B * N, 3) ref_T_objlist_ = ref_T_objlist.reshape(B * N, 4, 4) obj_T_reflist_ = utils_geom.safe_inverse(ref_T_objlist_) # we want a value for each location in the mem grid xyz_mem_ = gridcloud3D(B * N, Z, Y, X) # this is B*N x V x 3, where V = Z*Y*X xyz_ref_ = Mem2Ref(xyz_mem_, Z, Y, X) # this is B*N x V x 3 lx, ly, lz = torch.unbind(lenlist_, dim=1) # these are B*N # ref_T_obj = convert_box_to_ref_T_obj(boxes3D) # obj_T_ref = ref_T_obj.inverse() xyz_obj_ = utils_geom.apply_4x4(obj_T_reflist_, xyz_ref_) x, y, z = torch.unbind(xyz_obj_, dim=2) # these are B*N x V lx = lx.unsqueeze(1) * coeff ly = ly.unsqueeze(1) * coeff lz = lz.unsqueeze(1) * coeff # these are B*N x 1 x_valid = (x > -lx / 2.0).byte() & (x < lx / 2.0).byte() y_valid = (y > -ly / 2.0).byte() & (y < ly / 2.0).byte() z_valid = (z > -lz / 2.0).byte() & (z < lz / 2.0).byte() inbounds = x_valid.byte() & y_valid.byte() & z_valid.byte() masklist = inbounds.float() # print(masklist.shape) masklist = masklist.reshape(B, N, 1, Z, Y, X) # print(masklist.shape) # print(scorelist.shape) masklist = masklist * scorelist.view(B, N, 1, 1, 1, 1) return masklist
def test_bbox_projection(self, xyz_camXs_origin_agg, origin_T_camXs, pix_T_camXs, rgb_camX, xyz_camXs, f): rgb_camX = rgb_camX.astype(np.float32) objs_info = f['objects_info'] for obj_info in objs_info: if obj_info['category_name'] == "chair": bbox_center = obj_info['bbox_center'] bbox_size = obj_info['bbox_size'] print("bbox center and size are: ", bbox_center, bbox_size) xmin, xmax = bbox_center[0] - bbox_size[0] / 2., bbox_center[ 0] + bbox_size[0] / 2. ymin, ymax = bbox_center[1] - bbox_size[1] / 2., bbox_center[ 1] + bbox_size[1] / 2. zmin, zmax = bbox_center[2] - bbox_size[2] / 2., bbox_center[ 2] + bbox_size[2] / 2. bbox_origin_ends = np.array([xmin, ymin, zmin, xmax, ymax, zmax]) bbox_origin_theta = nlu.get_alignedboxes2thetaformat( torch.tensor(bbox_origin_ends).reshape(1, 1, 2, 3).float()) bbox_origin_corners = utils_geom.transform_boxes_to_corners( bbox_origin_theta) nlu.only_visualize(nlu.make_pcd(xyz_camXs_origin_agg.numpy()), bbox_origin_ends.reshape(1, -1)) print("Ends of bbox in origin are: ", bbox_origin_ends) camX_T_origin = utils_geom.safe_inverse( torch.tensor(origin_T_camXs).unsqueeze(0)).float() bbox_corners_camX = utils_geom.apply_4x4( camX_T_origin, bbox_origin_corners.squeeze(0).float()) bbox_ends_camX = nlu.get_ends_of_corner( bbox_corners_camX.permute(0, 2, 1)).permute(0, 2, 1) ends_camX = bbox_ends_camX.reshape(1, -1).numpy() print("ends in camX are: ", ends_camX) nlu.only_visualize(nlu.make_pcd(xyz_camXs), ends_camX) plt.imshow(rgb_camX) plt.show(block=True) utils_pointcloud.draw_boxes_on_rgb(rgb_camX, pix_T_camXs, ends_camX, visualize=True)
def assemble_static_seq(feats, ref0_T_refXs): # feats is B x S x C x Y x X x Z # it is in mem coords # ref0_T_refXs is B x S x 4 x 4 # it tells us how to warp the static scene # ref0 represents a reference frame, not necessarily frame0 # refXs represents the frames where feats were observed B, S, C, Z, Y, X = list(feats.shape) # each feat is in its own little coord system # we need to get from 0 coords to these coords # and sample # we want to sample for each location in the bird grid # xyz_mem = gridcloud3D(B, Z, Y, X) grid_y, grid_x, grid_z = meshgrid3D(B, Z, Y, X) # these are B x BY x BX x BZ # these represent the mem grid coordinates # we need to convert these to pixel coordinates x = torch.reshape(grid_x, [B, -1]) y = torch.reshape(grid_y, [B, -1]) z = torch.reshape(grid_z, [B, -1]) # these are B x N xyz_mem = torch.stack([x, y, z], dim=2) # this is B x N x 3 xyz_ref = Mem2Ref(xyz_mem, Z, Y, X) # this is B x N x 3 xyz_refs = xyz_ref.unsqueeze(1).repeat(1, S, 1, 1) # this is B x S x N x 3 xyz_refs_ = torch.reshape(xyz_refs, (B * S, Y * X * Z, 3)) feats_ = torch.reshape(feats, (B * S, C, Z, Y, X)) ref0_T_refXs_ = torch.reshape(ref0_T_refXs, (B * S, 4, 4)) refXs_T_ref0_ = utils_geom.safe_inverse(ref0_T_refXs_) xyz_refXs_ = utils_geom.apply_4x4(refXs_T_ref0_, xyz_refs_) xyz_memXs_ = Ref2Mem(xyz_refXs_, Z, Y, X) feats_, _ = utils_samp.resample3D(feats_, xyz_memXs_) feats = torch.reshape(feats_, (B, S, C, Z, Y, X)) return feats
def apply_pixX_T_memR_to_voxR(pix_T_camX, camX_T_camR, voxR, D, H, W): # mats are B x 4 x 4 # voxR is B x C x Z x Y x X # H, W, D indicates how big to make the output # returns B x C x D x H x W B, C, Z, Y, X = list(voxR.shape) z_near = hyp.ZMIN z_far = hyp.ZMAX grid_z = torch.linspace(z_near, z_far, steps=D, dtype=torch.float32, device=torch.device('cuda')) # grid_z = torch.exp(torch.linspace(np.log(z_near), np.log(z_far), steps=D, dtype=torch.float32, device=torch.device('cuda'))) grid_z = torch.reshape(grid_z, [1, 1, D, 1, 1]) grid_z = grid_z.repeat([B, 1, 1, H, W]) grid_z = torch.reshape(grid_z, [B * D, 1, H, W]) pix_T_camX__ = torch.unsqueeze(pix_T_camX, axis=1).repeat([1, D, 1, 1]) pix_T_camX = torch.reshape(pix_T_camX__, [B * D, 4, 4]) xyz_camX = utils_geom.depth2pointcloud(grid_z, pix_T_camX) camR_T_camX = utils_geom.safe_inverse(camX_T_camR) camR_T_camX_ = torch.unsqueeze(camR_T_camX, dim=1).repeat([1, D, 1, 1]) camR_T_camX = torch.reshape(camR_T_camX_, [B * D, 4, 4]) mem_T_cam = get_mem_T_ref(B * D, Z, Y, X) memR_T_camX = matmul2(mem_T_cam, camR_T_camX) xyz_memR = utils_geom.apply_4x4(memR_T_camX, xyz_camX) xyz_memR = torch.reshape(xyz_memR, [B, D * H * W, 3]) samp = utils_samp.sample3D(voxR, xyz_memR, D, H, W) # samp is B x H x W x D x C return samp
def assemble(bkg_feat0, obj_feat0, origin_T_camRs, camRs_T_zoom): # let's first assemble the seq of background tensors # this should effectively CREATE egomotion # i fully expect we can do this all in one shot # note it makes sense to create egomotion here, because # we want to predict each view B, C, Z, Y, X = list(bkg_feat0.shape) B2, C2, Z2, Y2, X2 = list(obj_feat0.shape) assert (B == B2) assert (C == C2) B, S, _, _ = list(origin_T_camRs.shape) # ok, we have everything we need # for each timestep, we want to warp the bkg to this timestep # utils for packing/unpacking along seq dim __p = lambda x: pack_seqdim(x, B) __u = lambda x: unpack_seqdim(x, B) # we in fact have utils for this already cam0s_T_camRs = utils_geom.get_camM_T_camXs(origin_T_camRs, ind=0) camRs_T_cam0s = __u(utils_geom.safe_inverse(__p(cam0s_T_camRs))) bkg_feat0s = bkg_feat0.unsqueeze(1).repeat(1, S, 1, 1, 1, 1) bkg_featRs = apply_4x4s_to_voxs(camRs_T_cam0s, bkg_feat0s) # now for the objects # we want to sample for each location in the bird grid xyz_mems_ = utils_basic.gridcloud3D(B * S, Z, Y, X, norm=False) # this is B*S x Z*Y*X x 3 xyz_camRs_ = Mem2Ref(xyz_mems_, Z, Y, X) camRs_T_zoom_ = __p(camRs_T_zoom) zoom_T_camRs_ = camRs_T_zoom_.inverse( ) # note this is not a rigid transform xyz_zooms_ = utils_geom.apply_4x4(zoom_T_camRs_, xyz_camRs_) # we will do the whole traj at once (per obj) # note we just have one feat for the whole traj, so we tile up obj_feats = obj_feat0.unsqueeze(1).repeat(1, S, 1, 1, 1, 1) obj_feats_ = __p(obj_feats) # this is B*S x Z x Y x X x C # to sample, we need feats_ in ZYX order obj_featRs_ = utils_samp.sample3D(obj_feats_, xyz_zooms_, Z, Y, X) obj_featRs = __u(obj_featRs_) # overweigh objects, so that we essentially overwrite # featRs = 0.05*bkg_featRs + 0.95*obj_featRs # overwrite the bkg at the object obj_mask = (bkg_featRs > 0).float() featRs = obj_featRs + (1.0 - obj_mask) * bkg_featRs # note the normalization (next) will restore magnitudes for the bkg # # featRs = bkg_featRs # featRs = obj_featRs # l2 normalize on chans featRs = l2_normalize(featRs, dim=2) validRs = 1.0 - (featRs == 0).all(dim=2, keepdim=True).float().cuda() return featRs, validRs, bkg_featRs, obj_featRs
def get_zoom_T_ref(lrt, Z, Y, X, additive_pad=0.0): # lrt is B x 19 B, E = list(lrt.shape) assert (E == 19) lens, ref_T_obj = utils_geom.split_lrt(lrt) lx, ly, lz = lens.unbind(1) debug = False if debug: print('lx, ly, lz') print(lx) print(ly) print(lz) obj_T_ref = utils_geom.safe_inverse(ref_T_obj) # this is B x 4 x 4 if debug: print('ok, got obj_T_ref:') print(obj_T_ref) # we want a tiny bit of padding # additive helps avoid nans with invalid objects # mult helps expand big objects lx = lx + additive_pad ly = ly + additive_pad * 0.5 # pad less in this dim, since it is usually pointless lz = lz + additive_pad # lx *= 1.1 # ly *= 1.1 # lz *= 1.1 # translation center_T_obj_r = utils_geom.eye_3x3(B) center_T_obj_t = torch.stack([lx / 2., ly / 2., lz / 2.], dim=1) if debug: print('merging these:') print(center_T_obj_r.shape) print(center_T_obj_t.shape) center_T_obj = utils_geom.merge_rt(center_T_obj_r, center_T_obj_t) if debug: print('ok, got center_T_obj:') print(center_T_obj) # scaling Z_VOX_SIZE_X = (lx) / float(X) Z_VOX_SIZE_Y = (ly) / float(Y) Z_VOX_SIZE_Z = (lz) / float(Z) diag = torch.stack([ 1. / Z_VOX_SIZE_X, 1. / Z_VOX_SIZE_Y, 1. / Z_VOX_SIZE_Z, torch.ones([B], device=torch.device('cuda')) ], axis=1).view(B, 4) if debug: print('diag:') print(diag) print(diag.shape) zoom_T_center = torch.diag_embed(diag) if debug: print('ok, got zoom_T_center:') print(zoom_T_center) print(zoom_T_center.shape) # compose these zoom_T_obj = utils_basic.matmul2(zoom_T_center, center_T_obj) if debug: print('ok, got zoom_T_obj:') print(zoom_T_obj) print(zoom_T_obj.shape) zoom_T_ref = utils_basic.matmul2(zoom_T_obj, obj_T_ref) if debug: print('ok, got zoom_T_ref:') print(zoom_T_ref) return zoom_T_ref
def forward(self, feed): results = dict() summ_writer = utils_improc.Summ_writer(writer=feed['writer'], global_step=feed['global_step'], set_name=feed['set_name'], fps=8) writer = feed['writer'] global_step = feed['global_step'] total_loss = torch.tensor(0.0).cuda() __p = lambda x: pack_seqdim(x, B) __u = lambda x: unpack_seqdim(x, B) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) D = 9 rgb_camRs = feed["rgb_camRs"] rgb_camXs = feed["rgb_camXs"] pix_T_cams = feed["pix_T_cams"] cam_T_velos = feed["cam_T_velos"] origin_T_camRs = feed["origin_T_camRs"] origin_T_camRs_ = __p(origin_T_camRs) origin_T_camXs = feed["origin_T_camXs"] origin_T_camXs_ = __p(origin_T_camXs) camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camX0_T_camXs_ = __p(camX0_T_camXs) camRs_T_camXs_ = torch.matmul(utils_geom.safe_inverse(origin_T_camRs_), origin_T_camXs_) camXs_T_camRs_ = utils_geom.safe_inverse(camRs_T_camXs_) camRs_T_camXs = __u(camRs_T_camXs_) camXs_T_camRs = __u(camXs_T_camRs_) xyz_veloXs = feed["xyz_veloXs"] xyz_camXs = __u(utils_geom.apply_4x4(__p(cam_T_velos), __p(xyz_veloXs))) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) xyz_camX0s = __u( utils_geom.apply_4x4(__p(camX0_T_camXs), __p(xyz_camXs))) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occXs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z2, Y2, X2)) occX0s_half = __u(utils_vox.voxelize_xyz(__p(xyz_camX0s), Z2, Y2, X2)) unpXs = __u( utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z, Y, X, __p(pix_T_cams))) ## projected depth, and inbound mask depth_camXs_, valid_camXs_ = utils_geom.create_depth_image( __p(pix_T_cams), __p(xyz_camXs), H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud(depth_camXs_, __p(pix_T_cams)) dense_xyz_camX0s_ = utils_geom.apply_4x4(__p(camX0_T_camXs), dense_xyz_camXs_) inbound_camXs_ = utils_vox.get_inbounds(dense_xyz_camX0s_, Z, Y, X).float() inbound_camXs_ = torch.reshape(inbound_camXs_, [B * S, 1, H, W]) depth_camXs = __u(depth_camXs_) valid_camXs = __u(valid_camXs_) * __u(inbound_camXs_) ##################### ## visualize what we got ##################### summ_writer.summ_oneds('2D_inputs/depth_camXs', torch.unbind(depth_camXs, dim=1)) summ_writer.summ_oneds('2D_inputs/valid_camXs', torch.unbind(valid_camXs, dim=1)) summ_writer.summ_oneds('2D_inputs/valid_camXs', torch.unbind(valid_camXs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camRs', torch.unbind(rgb_camRs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(rgb_camXs, dim=1)) summ_writer.summ_occs('3D_inputs/occXs', torch.unbind(occXs, dim=1)) summ_writer.summ_unps('3D_inputs/unpXs', torch.unbind(unpXs, dim=1), torch.unbind(occXs, dim=1)) if summ_writer.save_this: unpRs = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z, Y, X, matmul2(__p(pix_T_cams), utils_geom.safe_inverse(__p(camRs_T_camXs))))) occRs = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z, Y, X)) summ_writer.summ_occs('3D_inputs/occRs', torch.unbind(occRs, dim=1)) summ_writer.summ_unps('3D_inputs/unpRs', torch.unbind(unpRs, dim=1), torch.unbind(occRs, dim=1)) ##################### ## run the nets ##################### mask_ = None if hyp.do_occ and (not hyp.occ_do_cheap): ''' occRs_sup, freeRs_sup, freeXs = utils_vox.prep_occs_supervision(xyz_camXs, occRs_half, occXs_half, camRs_T_camXs, agg=True) featRs_input = torch.cat([occRs, occRs*unpRs], dim=2) featRs_input_ = __p(featRs_input) occRs_sup_ = __p(occRs_sup) freeRs_sup_ = __p(freeRs_sup) occ_loss, occRs_pred_ = self.occnet(featRs_input_, occRs_sup_, freeRs_sup_, summ_writer ) occRs_pred = __u(occRs_pred_) total_loss += occ_loss mask_ = F.upsample(occRs_pred_, scale_factor=2) ''' occXs_ = __p(occXs) mask_ = occXs_ if hyp.do_feat: # occXs is B x S x 1 x H x W x D # unpXs is B x S x 3 x H x W x D featXs_input = torch.cat([occXs, occXs * unpXs], dim=2) featXs_input_ = __p(featXs_input) # it is useful to keep track of what was visible from each viewpoint freeXs_ = utils_vox.get_freespace(__p(xyz_camXs), __p(occXs_half)) freeXs = __u(freeXs_) visXs = torch.clamp(occXs_half + freeXs, 0.0, 1.0) if (type(mask_) != type(None)): assert (list(mask_.shape)[2:5] == list( featXs_input_.shape)[2:5]) featXs_, validXs_, feat_loss = self.featnet( featXs_input_, summ_writer, mask=__p(occXs)) #mask_) total_loss += feat_loss validXs = __u(validXs_) _validX00 = validXs[:, 0:1] _validX01 = utils_vox.apply_4x4_to_voxs(camX0_T_camXs[:, 1:], validXs[:, 1:]) validX0s = torch.cat([_validX00, _validX01], dim=1) _visX00 = visXs[:, 0:1] _visX01 = utils_vox.apply_4x4_to_voxs(camX0_T_camXs[:, 1:], visXs[:, 1:]) visX0s = torch.cat([_visX00, _visX01], dim=1) featXs = __u(featXs_) _featX00 = featXs[:, 0:1] _featX01 = utils_vox.apply_4x4_to_voxs(camX0_T_camXs[:, 1:], featXs[:, 1:]) featX0s = torch.cat([_featX00, _featX01], dim=1) emb3D_e = torch.mean(featX0s[:, 1:], dim=1) # context emb3D_g = featX0s[:, 0] # obs vis3D_e = torch.max(validX0s[:, 1:], dim=1)[0] * torch.max( visX0s[:, 1:], dim=1)[0] vis3D_g = validX0s[:, 0] * visX0s[:, 0] # obs if hyp.do_eval_recall: results['emb3D_e'] = emb3D_e results['emb3D_g'] = emb3D_g summ_writer.summ_feats('3D_feats/featXs_input', torch.unbind(featXs_input, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featXs_output', torch.unbind(featXs, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featX0s_output', torch.unbind(featX0s, dim=1), pca=True) summ_writer.summ_feats('3D_feats/validX0s', torch.unbind(validX0s, dim=1), pca=False) summ_writer.summ_feat('3D_feats/vis3D_e', vis3D_e, pca=False) summ_writer.summ_feat('3D_feats/vis3D_g', vis3D_g, pca=False) if hyp.do_occ and hyp.occ_do_cheap: occX0_sup, freeX0_sup, freeXs = utils_vox.prep_occs_supervision( xyz_camXs, occX0s_half, occXs_half, camX0_T_camXs, agg=True) summ_writer.summ_occ('occ_sup/occ_sup', occX0_sup) summ_writer.summ_occ('occ_sup/free_sup', freeX0_sup) summ_writer.summ_occs('occ_sup/freeXs_sup', torch.unbind(freeXs, dim=1)) summ_writer.summ_occs('occ_sup/occXs_sup', torch.unbind(occXs_half, dim=1)) occ_loss, occRs_pred_ = self.occnet( torch.mean(featX0s[:, 1:], dim=1), occX0_sup, freeX0_sup, torch.max(validX0s[:, 1:], dim=1)[0], summ_writer) occRs_pred = __u(occRs_pred_) total_loss += occ_loss if hyp.do_view: assert (hyp.do_feat) # we warped the features into the canonical view # now we resample to the target view and decode PH, PW = hyp.PH, hyp.PW sy = float(PH) / float(hyp.H) sx = float(PW) / float(hyp.W) assert (sx == 0.5) # else we need a fancier downsampler assert (sy == 0.5) projpix_T_cams = __u( utils_geom.scale_intrinsics(__p(pix_T_cams), sx, sy)) assert (S == 2) # else we should warp each feat in 1: feat_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], featXs[:, 1], hyp.view_depth, PH, PW) # feat_projX0 is B x hyp.feat_dim x hyp.view_depth x PH x PW rgb_X00 = downsample(rgb_camXs[:, 0], 2) if summ_writer.save_this: # for vis, let's also project some rgb rgb_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camXs_T_camRs[:, 0], unpRs[:, 0], hyp.view_depth, PH, PW) rgb_projX01 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 1], camXs_T_camRs[:, 1], unpRs[:, 1], hyp.view_depth, PH, PW) occ_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camXs_T_camRs[:, 0], occRs[:, 0], hyp.view_depth, PH, PW) occ_projX01 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 1], camXs_T_camRs[:, 1], occRs[:, 1], hyp.view_depth, PH, PW) rgb_projX00_vis = reduce_masked_mean(rgb_projX00, occ_projX00.repeat( [1, 3, 1, 1, 1]), dim=2) rgb_projX01_vis = reduce_masked_mean(rgb_projX01, occ_projX01.repeat( [1, 3, 1, 1, 1]), dim=2) summ_writer.summ_rgbs('projection/rgb_projX', [rgb_projX00_vis, rgb_projX01_vis]) rgb_X01 = downsample(rgb_camXs[:, 1], 2) summ_writer.summ_rgbs('projection/rgb_origX', [rgb_X00, rgb_X01]) # decode the perspective volume into an image view_loss, rgb_e, emb2D_e = self.viewnet(feat_projX00, rgb_X00, summ_writer) total_loss += view_loss if hyp.do_emb2D: assert (hyp.do_view) # create an embedding image, representing the bottom-up 2D feature tensor emb_loss_2D, emb2D_g = self.embnet2D(rgb_camXs[:, 0], emb2D_e, valid_camXs[:, 0], summ_writer) total_loss += emb_loss_2D if hyp.do_emb3D: occX0_sup, freeX0_sup, freeXs = utils_vox.prep_occs_supervision( xyz_camXs, occX0s_half, occXs_half, camX0_T_camXs, agg=True) emb_loss_3D = self.embnet3D(emb3D_e, emb3D_g, vis3D_e, vis3D_g, summ_writer) total_loss += emb_loss_3D if hyp.do_eval_recall: results['emb2D_e'] = None results['emb2D_g'] = None summ_writer.summ_scalar('loss', total_loss.cpu().item()) return total_loss, results
def __getitem__(self, index): if hyp.dataset_name == 'kitti' or hyp.dataset_name == 'clevr' or hyp.dataset_name == 'real' or hyp.dataset_name == "bigbird" or hyp.dataset_name == "carla" or hyp.dataset_name == "carla_mix" or hyp.dataset_name == "replica" or hyp.dataset_name == "clevr_vqa" or hyp.dataset_name == "carla_det": # print(index) # st() filename = self.records[index] d = pickle.load(open(filename, "rb")) d = dict(d) d_empty = pickle.load(open(self.empty_scene, "rb")) d_empty = dict(d_empty) # st() # elif hyp.dataset_name=="carla": # filename = self.records[index] # d = np.load(filename) # d = dict(d) # d['rgb_camXs_raw'] = d['rgb_camXs'] # d['pix_T_cams_raw'] = d['pix_T_cams'] # d['tree_seq_filename'] = "dummy_tree_filename" # d['origin_T_camXs_raw'] = d['origin_T_camXs'] # d['camR_T_origin_raw'] = utils_geom.safe_inverse(torch.from_numpy(d['origin_T_camRs'])).numpy() # d['xyz_camXs_raw'] = d['xyz_camXs'] else: assert (False) # reader not ready yet if hyp.do_empty: item_names = [ 'pix_T_cams_raw', 'origin_T_camXs_raw', 'camR_T_origin_raw', 'rgb_camXs_raw', 'xyz_camXs_raw', 'empty_rgb_camXs_raw', 'empty_xyz_camXs_raw', ] else: item_names = [ 'pix_T_cams_raw', 'origin_T_camXs_raw', 'camR_T_origin_raw', 'rgb_camXs_raw', 'xyz_camXs_raw', ] if hyp.use_gt_occs: __p = lambda x: utils_basic.pack_seqdim(x, 1) __u = lambda x: utils_basic.unpack_seqdim(x, 1) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K BOX_SIZE = hyp.BOX_SIZE Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) Z4, Y4, X4 = int(Z / 4), int(Y / 4), int(X / 4) D = 9 pix_T_cams = torch.from_numpy( d["pix_T_cams_raw"]).unsqueeze(0).cuda().to(torch.float) camRs_T_origin = torch.from_numpy( d["camR_T_origin_raw"]).unsqueeze(0).cuda().to(torch.float) origin_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_origin))) origin_T_camXs = torch.from_numpy( d["origin_T_camXs_raw"]).unsqueeze(0).cuda().to(torch.float) camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camRs_T_camXs = __u( torch.matmul(utils_geom.safe_inverse(__p(origin_T_camRs)), __p(origin_T_camXs))) camXs_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_camXs))) camX0_T_camRs = camXs_T_camRs[:, 0] camX1_T_camRs = camXs_T_camRs[:, 1] camR_T_camX0 = utils_geom.safe_inverse(camX0_T_camRs) xyz_camXs = torch.from_numpy( d["xyz_camXs_raw"]).unsqueeze(0).cuda().to(torch.float) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) depth_camXs_, valid_camXs_ = utils_geom.create_depth_image( __p(pix_T_cams), __p(xyz_camXs), H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud( depth_camXs_, __p(pix_T_cams)) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occRs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z2, Y2, X2)) occRs_half = torch.max(occRs_half, dim=1).values.squeeze(0) occ_complete = occRs_half.cpu().numpy() # if hyp.do_time_flip: # d = random_time_flip_single(d,item_names) # if the sequence length > 2, select S frames # filename = d['raw_seq_filename'] original_filename = filename original_filename_empty = self.empty_scene # st() if hyp.dataset_name == "clevr_vqa": d['tree_seq_filename'] = "temp" pix_T_cams = d['pix_T_cams_raw'] num_cams = pix_T_cams.shape[0] # padding_1 = torch.zeros([num_cams,1,3]) # padding_2 = torch.zeros([num_cams,4,1]) # padding_2[:,3] = 1.0 # st() # pix_T_cams = torch.cat([pix_T_cams,padding_1],dim=1) # pix_T_cams = torch.cat([pix_T_cams,padding_2],dim=2) # st() shape_name = d['shape_list'] color_name = d['color_list'] material_name = d['material_list'] all_name = [] all_style = [] for index in range(len(shape_name)): name = shape_name[index] + "/" + color_name[ index] + "_" + material_name[index] style_name = color_name[index] + "_" + material_name[index] all_name.append(name) all_style.append(style_name) # st() if hyp.do_shape: class_name = shape_name elif hyp.do_color: class_name = color_name elif hyp.do_material: class_name = material_name elif hyp.do_style: class_name = all_style else: class_name = all_name object_category = class_name bbox_origin = d['bbox_origin'] # bbox_origin = torch.cat([bbox_origin],dim=0) # object_category = object_category bbox_origin_empty = np.zeros_like(bbox_origin) object_category_empty = ['0'] # st() if not hyp.dataset_name == "clevr_vqa": filename = d['tree_seq_filename'] filename_empty = d_empty['tree_seq_filename'] if hyp.fixed_view: d, indexes = non_random_select_single(d, item_names, num_samples=hyp.S) d_empty, indexes_empty = specific_select_single_empty( d_empty, item_names, d['origin_T_camXs_raw'], num_samples=hyp.S) filename_g = "/".join([original_filename, str(indexes[0])]) filename_e = "/".join([original_filename, str(indexes[1])]) filename_g_empty = "/".join([original_filename_empty, str(indexes[0])]) filename_e_empty = "/".join([original_filename_empty, str(indexes[1])]) rgb_camXs = d['rgb_camXs_raw'] rgb_camXs_empty = d_empty['rgb_camXs_raw'] # move channel dim inward, like pytorch wants # rgb_camRs = np.transpose(rgb_camRs, axes=[0, 3, 1, 2]) rgb_camXs = np.transpose(rgb_camXs, axes=[0, 3, 1, 2]) rgb_camXs = rgb_camXs[:, :3] rgb_camXs = utils_improc.preprocess_color(rgb_camXs) rgb_camXs_empty = np.transpose(rgb_camXs_empty, axes=[0, 3, 1, 2]) rgb_camXs_empty = rgb_camXs_empty[:, :3] rgb_camXs_empty = utils_improc.preprocess_color(rgb_camXs_empty) if hyp.dataset_name == "clevr_vqa": num_boxes = bbox_origin.shape[0] bbox_origin = np.array(bbox_origin) score = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin = np.pad(bbox_origin, [[0, hyp.N - num_boxes], [0, 0], [0, 0]]) object_category = np.pad(object_category, [[0, hyp.N - num_boxes]], lambda x, y, z, m: "0") object_category_empty = np.pad(object_category_empty, [[0, hyp.N - 1]], lambda x, y, z, m: "0") # st() score_empty = np.zeros_like(score) bbox_origin_empty = np.zeros_like(bbox_origin) d['gt_box'] = np.stack( [bbox_origin.astype(np.float32), bbox_origin_empty]) d['gt_scores'] = np.stack([score.astype(np.float32), score_empty]) try: d['classes'] = np.stack( [object_category, object_category_empty]).tolist() except Exception as e: st() d['rgb_camXs_raw'] = np.stack([rgb_camXs, rgb_camXs_empty]) d['pix_T_cams_raw'] = np.stack( [d["pix_T_cams_raw"], d_empty["pix_T_cams_raw"]]) d['origin_T_camXs_raw'] = np.stack( [d["origin_T_camXs_raw"], d_empty["origin_T_camXs_raw"]]) d['camR_T_origin_raw'] = np.stack( [d["camR_T_origin_raw"], d_empty["camR_T_origin_raw"]]) d['xyz_camXs_raw'] = np.stack( [d["xyz_camXs_raw"], d_empty["xyz_camXs_raw"]]) # d['rgb_camXs_raw'] = rgb_camXs # d['tree_seq_filename'] = filename if not hyp.dataset_name == "clevr_vqa": d['tree_seq_filename'] = [filename, "invalid_tree"] else: d['tree_seq_filename'] = ["temp"] # st() d['filename_e'] = ["temp"] d['filename_g'] = ["temp"] if hyp.use_gt_occs: d['occR_complete'] = np.expand_dims(occ_complete, axis=0) return d
def forward(self, feat, summ_writer=None, comp_mask=None): total_loss = torch.tensor(0.0).cuda() B, C, D, H, W = list(feat.shape) if summ_writer is not None: summ_writer.summ_feat('feat/feat0_input', feat, pca=False) if comp_mask is not None: if summ_writer is not None: summ_writer.summ_feat('feat/mask_input', comp_mask, pca=False) if hyp.feat_do_rt: # apply a random rt to the feat # Y_T_X = utils_geom.get_random_rt(B, r_amount=5.0, t_amount=8.0).cuda() # Y_T_X = utils_geom.get_random_rt(B, r_amount=1.0, t_amount=8.0).cuda() Y_T_X = utils_geom.get_random_rt(B, r_amount=1.0, t_amount=4.0).cuda() feat = utils_vox.apply_4x4_to_vox(Y_T_X, feat) if comp_mask is not None: comp_mask = utils_vox.apply_4x4_to_vox(Y_T_X, comp_mask) if summ_writer is not None: summ_writer.summ_feat('feat/feat1_rt', feat, pca=False) if hyp.feat_do_flip: # randomly flip the input flip0 = torch.rand(1) flip1 = torch.rand(1) flip2 = torch.rand(1) if flip0 > 0.5: # transpose width/depth (rotate 90deg) feat = feat.permute(0, 1, 4, 3, 2) if comp_mask is not None: comp_mask = comp_mask.permute(0, 1, 4, 3, 2) if flip1 > 0.5: # flip depth feat = feat.flip(2) if comp_mask is not None: comp_mask = comp_mask.flip(2) if flip2 > 0.5: # flip width feat = feat.flip(4) if comp_mask is not None: comp_mask = comp_mask.flip(4) if summ_writer is not None: summ_writer.summ_feat('feat/feat2_flip', feat, pca=False) if hyp.feat_do_sparse_conv: feat, comp_mask = self.net(feat, comp_mask) if summ_writer is not None: summ_writer.summ_feat('feat/mask_output', comp_mask, pca=False) elif hyp.feat_do_sparse_invar: feat, comp_mask = self.net(feat, comp_mask) else: feat = self.net(feat) # smooth loss dz, dy, dx = gradient3D(feat, absolute=True) smooth_vox = torch.mean(dz + dy + dx, dim=1, keepdims=True) if summ_writer is not None: summ_writer.summ_oned('feat/smooth_loss', torch.mean(smooth_vox, dim=3)) smooth_loss = torch.mean(smooth_vox) total_loss = utils_misc.add_loss('feat/smooth_loss', total_loss, smooth_loss, hyp.feat_smooth_coeff, summ_writer) # feat = l2_normalize(feat, dim=1) if summ_writer is not None: summ_writer.summ_feat('feat/feat3_out', feat) if hyp.feat_do_flip: if flip2 > 0.5: # unflip width feat = feat.flip(4) if flip1 > 0.5: # unflip depth feat = feat.flip(2) if flip0 > 0.5: # untranspose width/depth feat = feat.permute(0, 1, 4, 3, 2) if summ_writer is not None: summ_writer.summ_feat('feat/feat4_unflip', feat) if hyp.feat_do_rt: # undo the random rt X_T_Y = utils_geom.safe_inverse(Y_T_X) feat = utils_vox.apply_4x4_to_vox(X_T_Y, feat) if summ_writer is not None: summ_writer.summ_feat('feat/feat5_unrt', feat) valid_mask = 1.0 - (feat == 0).all(dim=1, keepdim=True).float() if hyp.feat_do_sparse_conv and (comp_mask is not None): valid_mask = valid_mask * comp_mask if summ_writer is not None: summ_writer.summ_feat('feat/valid_mask', valid_mask, pca=False) return feat, valid_mask, total_loss
def process(self): fnames = [] for scene_cnt, scene_dir in enumerate(self.scene_dirs): print("Processing scene {}. Scene number {}".format( scene_dir, scene_cnt)) rgb_camXs = [] depth_camXs = [] pix_T_camXs = [] origin_T_camXs = [] xyz_camXs = [] habitat_pix_T_camXs = [] scene_bbox_ends = [] scene_category_ids = [] scene_category_names = [] scene_instance_ids = [] scene_object_dict = {} pickle_files = [ os.path.join(scene_dir, f) for f in os.listdir(scene_dir) if f.endswith('.p') ] pickle_files = sorted(pickle_files) for cnt, pickle_file in enumerate(pickle_files): f = pickle.load(open(pickle_file, "rb")) if cnt == 0: sample_f = f rgb_camXs.append(f['rgb_camX']) depth_camXs.append(f['depth_camX']) pix_T_camXs.append(self.get_pix_T_camX()) habitat_pix_T_camXs.append(self.get_habitat_pix_T_camX()) origin_T_camXs.append( self.get_origin_T_camX(f['sensor_pos'], f['sensor_rot'])) print("count of pickle file is: ", cnt) object_dict = self.get_object_info(f, rgb_camXs[-1], pix_T_camXs[-1], origin_T_camXs[-1]) # st() for instance_id in object_dict: if instance_id not in scene_object_dict: scene_object_dict[instance_id] = [] scene_object_dict[instance_id].append( object_dict[instance_id]) scene_category_ids, scene_instance_ids, scene_category_names, scene_bbox_ends = self.select_frequently_occuring_objects( scene_object_dict) habitat_pix_T_camXs = np.stack(habitat_pix_T_camXs) rgb_camXs_to_save = np.stack(rgb_camXs)[:, :, :, :3] rgb_camXs = np.stack(rgb_camXs)[:, :, :, :3].astype( np.float32) / 255. origin_T_camXs = np.stack(origin_T_camXs) depth_camXs = np.stack(depth_camXs) pix_T_camXs = np.stack(pix_T_camXs) xyz_habitatCamXs = self.generate_xyz_habitatCamXs( depth_camXs, rgb_camXs, habitat_pix_T_camXs) if self.visualize: print( "Showing pointclouds in habitat_camXs coordinate ref frame" ) for xyz_habitatCamX in xyz_habitatCamXs: pcd = nlu.make_pcd(xyz_habitatCamX) o3d.visualization.draw_geometries([pcd, self.mesh_frame]) # Get xyz_camXs in pydisco coordinate frame. # Since its 180 deg rotation, habitatCamX_T_camX and it's inverse will be same. Therefore, not taking inv. xyz_camXs = utils_geom.apply_4x4( torch.tensor(self.get_habitatCamX_T_camX()).repeat( xyz_habitatCamXs.shape[0], 1, 1), torch.tensor(xyz_habitatCamXs)).numpy() if self.visualize: print( "Showing pointclouds in pydisco_camXs coordinate ref frame" ) for xyz_camX, rgb_camX in zip(xyz_camXs, rgb_camXs): pcd = nlu.make_pcd(xyz_camX) o3d.visualization.draw_geometries([pcd, self.mesh_frame]) pix_T_camX = pix_T_camXs[0] depth, _ = utils_geom.create_depth_image( torch.tensor(pix_T_camX).unsqueeze(0).float(), torch.tensor(xyz_camX).unsqueeze(0).float(), self.H, self.W) depth[torch.where(depth > 10)] = 0 utils_pointcloud.visualize_colored_pcd( depth.squeeze(0).squeeze(0).numpy(), rgb_camX, pix_T_camX) xyz_camXs_origin = utils_geom.apply_4x4( torch.tensor(origin_T_camXs), torch.tensor(xyz_camXs)) xyz_camXs_origin_agg = xyz_camXs_origin.reshape(-1, 3) # Visualize aggregated pointcloud if self.visualize: print("Showing aggregated pointclouds") pcd_list = [self.mesh_frame] for xyz_camX_origin in xyz_camXs_origin: pcd_list.append(nlu.make_pcd(xyz_camX_origin)) o3d.visualization.draw_geometries(pcd_list) if self.visualize: self.test_bbox_projection(xyz_camXs_origin_agg, origin_T_camXs[0], pix_T_camXs[0], rgb_camXs[0], xyz_camXs[0], sample_f) # object_data = self.get_objects_in_scene() # First num_camR_candidates views will be our camR candidates for num_save in range(self.num_camR_candidates): camX1_T_origin = utils_geom.safe_inverse( torch.tensor( origin_T_camXs[num_save]).unsqueeze(0)).float().repeat( origin_T_camXs.shape[0], 1, 1).numpy() data_to_save = { "camR_index": num_save, "object_category_ids": scene_category_ids, "object_category_names": scene_category_names, "object_instance_ids": scene_instance_ids, "bbox_origin": scene_bbox_ends, "pix_T_cams_raw": pix_T_camXs, "camR_T_origin_raw": camX1_T_origin, "xyz_camXs_raw": xyz_camXs, "origin_T_camXs_raw": origin_T_camXs, 'rgb_camXs_raw': rgb_camXs_to_save } cur_epoch = str(time()).replace(".", "") pickle_fname = cur_epoch + ".p" fnames.append(pickle_fname) with open(os.path.join(dump_dir, pickle_fname), 'wb') as f: pickle.dump(data_to_save, f) return fnames
def get_object_info(self, f, rgb_camX, pix_T_camX, origin_T_camX): # st() objs_info = f['objects_info'] object_dict = {} if self.visualize: plt.imshow(rgb_camX[..., :3]) plt.show(block=True) for obj_info in objs_info: classname = obj_info['category_name'] if classname in self.ignore_classes: continue category = obj_info['category_id'] instance_id = obj_info['instance_id'] bbox_center = obj_info['bbox_center'] bbox_size = obj_info['bbox_size'] xmin, xmax = bbox_center[0] - bbox_size[0] / 2., bbox_center[ 0] + bbox_size[0] / 2. ymin, ymax = bbox_center[1] - bbox_size[1] / 2., bbox_center[ 1] + bbox_size[1] / 2. zmin, zmax = bbox_center[2] - bbox_size[2] / 2., bbox_center[ 2] + bbox_size[2] / 2. bbox_volume = (xmax - xmin) * (ymax - ymin) * (zmax - zmin) bbox_origin_ends = np.array([xmin, ymin, zmin, xmax, ymax, zmax]) bbox_origin_ends = torch.tensor(bbox_origin_ends).reshape( 1, 1, 2, 3).float() bbox_origin_theta = nlu.get_alignedboxes2thetaformat( bbox_origin_ends) bbox_origin_corners = utils_geom.transform_boxes_to_corners( bbox_origin_theta).float() camX_T_origin = utils_geom.safe_inverse( torch.tensor(origin_T_camX).unsqueeze(0)).float() bbox_corners_camX = utils_geom.apply_4x4( camX_T_origin.float(), bbox_origin_corners.squeeze(0).float()) bbox_corners_pixX = utils_geom.apply_pix_T_cam( torch.tensor(pix_T_camX).unsqueeze(0).float(), bbox_corners_camX) bbox_ends_pixX = nlu.get_ends_of_corner( bbox_corners_pixX.permute(0, 2, 1)).permute(0, 2, 1) bbox_ends_pixX_np = torch.clamp( bbox_ends_pixX.squeeze(0), 0, rgb_camX.shape[1]).numpy().astype(int) bbox_area = (bbox_ends_pixX_np[1, 1] - bbox_ends_pixX_np[0, 1]) * ( bbox_ends_pixX_np[1, 0] - bbox_ends_pixX_np[0, 0]) print("Volume and area occupied by class {} is {} and {}".format( classname, bbox_volume, bbox_area)) semantic = f['semantic_camX'] instance_id_pixel_cnt = np.where(semantic == instance_id)[0].shape object_to_bbox_ratio = instance_id_pixel_cnt / bbox_area print( "Num pixels in semantic map {}. Ratio of pixels to bbox area{}. Ratio of pixels to bbox volume {}. " .format(instance_id_pixel_cnt, object_to_bbox_ratio, instance_id_pixel_cnt / bbox_volume)) if self.visualize: # print("bbox ends are: ", bbox_ends_pixX_np) cropped_rgb = rgb_camX[ bbox_ends_pixX_np[0, 1]:bbox_ends_pixX_np[1, 1], bbox_ends_pixX_np[0, 0]:bbox_ends_pixX_np[1, 0], :3] plt.imshow(cropped_rgb) plt.show(block=True) if bbox_area < self.bbox_area_thresh: continue if object_to_bbox_ratio < self.occlusion_thresh: continue object_dict[instance_id] = (classname, category, instance_id, bbox_origin_ends) return object_dict
def forward(self, feed, moc_init_done=False, debug=False): summ_writer = utils_improc.Summ_writer( writer = feed['writer'], global_step = feed['global_step'], set_name= feed['set_name'], fps=8) writer = feed['writer'] global_step = feed['global_step'] total_loss = torch.tensor(0.0).cuda() ### ... All things sensor ... ### sensor_rgbs = feed['sensor_imgs'] sensor_depths = feed['sensor_depths'] center_sensor_H, center_sensor_W = sensor_depths[0][0].shape[-1] // 2, sensor_depths[0][0].shape[-2] // 2 ### ... All things sensor end ... ### # 1. Form the memory tensor using the feat net and visual images. # check what all do you need for this and create only those things ## .... Input images .... ## rgb_camRs = feed['rgb_camRs'] rgb_camXs = feed['rgb_camXs'] ## .... Input images end .... ## ## ... Hyperparams ... ## B, H, W, V, S = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S __p = lambda x: pack_seqdim(x, B) __u = lambda x: unpack_seqdim(x, B) PH, PW = hyp.PH, hyp.PW Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z/2), int(Y/2), int(X/2) ## ... Hyperparams end ... ## ## .... VISUAL TRANSFORMS BEGIN .... ## pix_T_cams = feed['pix_T_cams'] pix_T_cams_ = __p(pix_T_cams) origin_T_camRs = feed['origin_T_camRs'] origin_T_camRs_ = __p(origin_T_camRs) origin_T_camXs = feed['origin_T_camXs'] origin_T_camXs_ = __p(origin_T_camXs) camRs_T_camXs_ = torch.matmul(utils_geom.safe_inverse( origin_T_camRs_), origin_T_camXs_) camXs_T_camRs_ = utils_geom.safe_inverse(camRs_T_camXs_) camRs_T_camXs = __u(camRs_T_camXs_) camXs_T_camRs = __u(camXs_T_camRs_) pix_T_cams_ = utils_geom.pack_intrinsics(pix_T_cams_[:, 0, 0], pix_T_cams_[:, 1, 1], pix_T_cams_[:, 0, 2], pix_T_cams_[:, 1, 2]) pix_T_camRs_ = torch.matmul(pix_T_cams_, camXs_T_camRs_) pix_T_camRs = __u(pix_T_camRs_) ## ... VISUAL TRANSFORMS END ... ## ## ... SENSOR TRANSFORMS BEGIN ... ## sensor_origin_T_camXs = feed['sensor_extrinsics'] sensor_origin_T_camXs_ = __p(sensor_origin_T_camXs) sensor_origin_T_camRs = feed['sensor_origin_T_camRs'] sensor_origin_T_camRs_ = __p(sensor_origin_T_camRs) sensor_camRs_T_origin_ = utils_geom.safe_inverse(sensor_origin_T_camRs_) sensor_camRs_T_camXs_ = torch.matmul(utils_geom.safe_inverse( sensor_origin_T_camRs_), sensor_origin_T_camXs_) sensor_camXs_T_camRs_ = utils_geom.safe_inverse(sensor_camRs_T_camXs_) sensor_camRs_T_camXs = __u(sensor_camRs_T_camXs_) sensor_camXs_T_camRs = __u(sensor_camXs_T_camRs_) sensor_pix_T_cams = feed['sensor_intrinsics'] sensor_pix_T_cams_ = __p(sensor_pix_T_cams) sensor_pix_T_cams_ = utils_geom.pack_intrinsics(sensor_pix_T_cams_[:, 0, 0], sensor_pix_T_cams_[:, 1, 1], sensor_pix_T_cams_[:, 0, 2], sensor_pix_T_cams_[:, 1, 2]) sensor_pix_T_camRs_ = torch.matmul(sensor_pix_T_cams_, sensor_camXs_T_camRs_) sensor_pix_T_camRs = __u(sensor_pix_T_camRs_) ## .... SENSOR TRANSFORMS END .... ## ## .... Visual Input point clouds .... ## xyz_camXs = feed['xyz_camXs'] xyz_camXs_ = __p(xyz_camXs) xyz_camRs_ = utils_geom.apply_4x4(camRs_T_camXs_, xyz_camXs_) # (40, 4, 4) (B*S, N, 3) xyz_camRs = __u(xyz_camRs_) assert all([torch.allclose(xyz_camR, inp_xyz_camR) for xyz_camR, inp_xyz_camR in zip( xyz_camRs, feed['xyz_camRs'] )]), "computation of xyz_camR here and those computed in input do not match" ## .... Visual Input point clouds end .... ## ## ... Sensor input point clouds ... ## sensor_xyz_camXs = feed['sensor_xyz_camXs'] sensor_xyz_camXs_ = __p(sensor_xyz_camXs) sensor_xyz_camRs_ = utils_geom.apply_4x4(sensor_camRs_T_camXs_, sensor_xyz_camXs_) sensor_xyz_camRs = __u(sensor_xyz_camRs_) assert all([torch.allclose(sensor_xyz, inp_sensor_xyz) for sensor_xyz, inp_sensor_xyz in zip( sensor_xyz_camRs, feed['sensor_xyz_camRs'] )]), "the sensor_xyz_camRs computed in forward do not match those computed in input" ## ... visual occupancy computation voxelize the pointcloud from above ... ## occRs_ = utils_vox.voxelize_xyz(xyz_camRs_, Z, Y, X) occXs_ = utils_vox.voxelize_xyz(xyz_camXs_, Z, Y, X) occRs_half_ = utils_vox.voxelize_xyz(xyz_camRs_, Z2, Y2, X2) occXs_half_ = utils_vox.voxelize_xyz(xyz_camXs_, Z2, Y2, X2) ## ... visual occupancy computation end ... NOTE: no unpacking ## ## .. visual occupancy computation for sensor inputs .. ## sensor_occRs_ = utils_vox.voxelize_xyz(sensor_xyz_camRs_, Z, Y, X) sensor_occXs_ = utils_vox.voxelize_xyz(sensor_xyz_camXs_, Z, Y, X) sensor_occRs_half_ = utils_vox.voxelize_xyz(sensor_xyz_camRs_, Z2, Y2, X2) sensor_occXs_half_ = utils_vox.voxelize_xyz(sensor_xyz_camXs_, Z2, Y2, X2) ## ... unproject rgb images ... ## unpRs_ = utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z, Y, X, pix_T_camRs_) unpXs_ = utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z, Y, X, pix_T_cams_) ## ... unproject rgb finish ... NOTE: no unpacking ## ## ... Make depth images ... ## depth_camXs_, valid_camXs_ = utils_geom.create_depth_image(pix_T_cams_, xyz_camXs_, H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud(depth_camXs_, pix_T_cams_) dense_xyz_camRs_ = utils_geom.apply_4x4(camRs_T_camXs_, dense_xyz_camXs_) inbound_camXs_ = utils_vox.get_inbounds(dense_xyz_camRs_, Z, Y, X).float() inbound_camXs_ = torch.reshape(inbound_camXs_, [B*S, 1, H, W]) valid_camXs = __u(valid_camXs_) * __u(inbound_camXs_) ## ... Make depth images ... ## ## ... Make sensor depth images ... ## sensor_depth_camXs_, sensor_valid_camXs_ = utils_geom.create_depth_image(sensor_pix_T_cams_, sensor_xyz_camXs_, H, W) sensor_dense_xyz_camXs_ = utils_geom.depth2pointcloud(sensor_depth_camXs_, sensor_pix_T_cams_) sensor_dense_xyz_camRs_ = utils_geom.apply_4x4(sensor_camRs_T_camXs_, sensor_dense_xyz_camXs_) sensor_inbound_camXs_ = utils_vox.get_inbounds(sensor_dense_xyz_camRs_, Z, Y, X).float() sensor_inbound_camXs_ = torch.reshape(sensor_inbound_camXs_, [B*hyp.sensor_S, 1, H, W]) sensor_valid_camXs = __u(sensor_valid_camXs_) * __u(sensor_inbound_camXs_) ### .. Done making sensor depth images .. ## ### ... Sanity check ... Write to tensorboard ... ### summ_writer.summ_oneds('2D_inputs/depth_camXs', torch.unbind(__u(depth_camXs_), dim=1)) summ_writer.summ_oneds('2D_inputs/valid_camXs', torch.unbind(valid_camXs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(rgb_camXs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camRs', torch.unbind(rgb_camRs, dim=1)) summ_writer.summ_occs('3d_inputs/occXs', torch.unbind(__u(occXs_), dim=1), reduce_axes=[2]) summ_writer.summ_unps('3d_inputs/unpXs', torch.unbind(__u(unpXs_), dim=1),\ torch.unbind(__u(occXs_), dim=1)) # A different approach for viewing occRs of sensors sensor_occRs = __u(sensor_occRs_) vis_sensor_occRs = torch.max(sensor_occRs, dim=1, keepdim=True)[0] # summ_writer.summ_occs('3d_inputs/sensor_occXs', torch.unbind(__u(sensor_occXs_), dim=1), # reduce_axes=[2]) summ_writer.summ_occs('3d_inputs/sensor_occRs', torch.unbind(vis_sensor_occRs, dim=1), reduce_axes=[2]) ### ... code for visualizing sensor depths and sensor rgbs ... ### # summ_writer.summ_oneds('2D_inputs/depths_sensor', torch.unbind(sensor_depths, dim=1)) # summ_writer.summ_rgbs('2D_inputs/rgbs_sensor', torch.unbind(sensor_rgbs, dim=1)) # summ_writer.summ_oneds('2D_inputs/validXs_sensor', torch.unbind(sensor_valid_camXs, dim=1)) if summ_writer.save_this: unpRs_ = utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z, Y, X, matmul2(pix_T_cams_, camXs_T_camRs_)) unpRs = __u(unpRs_) occRs_ = utils_vox.voxelize_xyz(xyz_camRs_, Z, Y, X) summ_writer.summ_occs('3d_inputs/occRs', torch.unbind(__u(occRs_), dim=1), reduce_axes=[2]) summ_writer.summ_unps('3d_inputs/unpRs', torch.unbind(unpRs, dim=1),\ torch.unbind(__u(occRs_), dim=1)) ### ... Sanity check ... Writing to tensoboard complete ... ### results = list() mask_ = None ### ... Visual featnet part .... ### if hyp.do_feat: featXs_input = torch.cat([__u(occXs_), __u(occXs_)*__u(unpXs_)], dim=2) # B, S, 4, H, W, D featXs_input_ = __p(featXs_input) freeXs_ = utils_vox.get_freespace(__p(xyz_camXs), occXs_half_) freeXs = __u(freeXs_) visXs = torch.clamp(__u(occXs_half_) + freeXs, 0.0, 1.0) if type(mask_) != type(None): assert(list(mask_.shape)[2:5] == list(featXs_input.shape)[2:5]) featXs_, validXs_, _ = self.featnet(featXs_input_, summ_writer, mask=occXs_) # total_loss += feat_loss # Note no need of loss validXs, featXs = __u(validXs_), __u(featXs_) # unpacked into B, S, C, D, H, W # bring everything to ref_frame validRs = utils_vox.apply_4x4_to_voxs(camRs_T_camXs, validXs) visRs = utils_vox.apply_4x4_to_voxs(camRs_T_camXs, visXs) featRs = utils_vox.apply_4x4_to_voxs(camRs_T_camXs, featXs) # This is now in memory coordinates emb3D_e = torch.mean(featRs[:, 1:], dim=1) # context, or the features of the scene emb3D_g = featRs[:, 0] # this is to predict, basically I will pass emb3D_e as input and hope to predict emb3D_g vis3D_e = torch.max(validRs[:, 1:], dim=1)[0] * torch.max(visRs[:, 1:], dim=1)[0] vis3D_g = validRs[:, 0] * visRs[:, 0] #### ... I do not think I need this ... #### results = {} # # if hyp.do_eval_recall: # # results['emb3D_e'] = emb3D_e # # results['emb3D_g'] = emb3D_g # #### ... Check if you need the above summ_writer.summ_feats('3D_feats/featXs_input', torch.unbind(featXs_input, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featXs_output', torch.unbind(featXs, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featRs_output', torch.unbind(featRs, dim=1), pca=True) summ_writer.summ_feats('3D_feats/validRs', torch.unbind(validRs, dim=1), pca=False) summ_writer.summ_feat('3D_feats/vis3D_e', vis3D_e, pca=False) summ_writer.summ_feat('3D_feats/vis3D_g', vis3D_g, pca=False) # I need to aggregate the features and detach to prevent the backward pass on featnet featRs = torch.mean(featRs, dim=1) featRs = featRs.detach() # ... HERE I HAVE THE VISUAL FEATURE TENSOR ... WHICH IS MADE USING 5 EVENLY SPACED VIEWS # # FOR THE TOUCH PART, I HAVE THE OCC and THE AIM IS TO PREDICT FEATURES FROM THEM # if hyp.do_touch_feat: # 1. Pass all the sensor depth images through the backbone network input_sensor_depths = __p(sensor_depths) sensor_features_ = self.backbone_2D(input_sensor_depths) # should normalize these feature tensors sensor_features_ = l2_normalize(sensor_features_, dim=1) sensor_features = __u(sensor_features_) assert torch.allclose(torch.norm(sensor_features_, dim=1), torch.Tensor([1.0]).cuda()),\ "normalization has no effect on you huh." if hyp.do_eval_recall: results['sensor_features'] = sensor_features_ results['sensor_depths'] = input_sensor_depths results['object_img'] = rgb_camRs results['sensor_imgs'] = __p(sensor_rgbs) # if moco is used do the same procedure as above but with a different network # if hyp.do_moc or hyp.do_eval_recall: # 1. Pass all the sensor depth images through the key network key_input_sensor_depths = copy.deepcopy(__p(sensor_depths)) # bx1024x1x16x16->(2048x1x16x16) self.key_touch_featnet.eval() with torch.no_grad(): key_sensor_features_ = self.key_touch_featnet(key_input_sensor_depths) key_sensor_features_ = l2_normalize(key_sensor_features_, dim=1) key_sensor_features = __u(key_sensor_features_) assert torch.allclose(torch.norm(key_sensor_features_, dim=1), torch.Tensor([1.0]).cuda()),\ "normalization has no effect on you huh." # doing the same procedure for moco but with a different network end # # do you want to do metric learning voxel point based using visual features and sensor features if hyp.do_touch_embML and not hyp.do_touch_forward: # trial 1: I do not pass the above obtained features through some encoder decoder in 3d # So compute the location is ref_frame which the center of these depth images will occupy # at all of these locations I will sample the from the visual tensor. It forms the positive pairs # negatives are simply everything except the positive sensor_depths_centers_x = center_sensor_W * torch.ones((hyp.B, hyp.sensor_S)) sensor_depths_centers_x = sensor_depths_centers_x.cuda() sensor_depths_centers_y = center_sensor_H * torch.ones((hyp.B, hyp.sensor_S)) sensor_depths_centers_y = sensor_depths_centers_y.cuda() sensor_depths_centers_z = sensor_depths[:, :, 0, center_sensor_H, center_sensor_W] # Next use Pixels2Camera to unproject all of these together. # merge the batch and the sequence dimension sensor_depths_centers_x = sensor_depths_centers_x.reshape(-1, 1, 1) # BxHxW as required by Pixels2Camera sensor_depths_centers_y = sensor_depths_centers_y.reshape(-1, 1, 1) sensor_depths_centers_z = sensor_depths_centers_z.reshape(-1, 1, 1) fx, fy, x0, y0 = utils_geom.split_intrinsics(sensor_pix_T_cams_) sensor_depths_centers_in_camXs_ = utils_geom.Pixels2Camera(sensor_depths_centers_x, sensor_depths_centers_y, sensor_depths_centers_z, fx, fy, x0, y0) # finally use apply4x4 to get the locations in ref_cam sensor_depths_centers_in_ref_cam_ = utils_geom.apply_4x4(sensor_camRs_T_camXs_, sensor_depths_centers_in_camXs_) # NOTE: convert them to memory coordinates, the name is xyz so I presume it returns xyz but talk to ADAM sensor_depths_centers_in_mem_ = utils_vox.Ref2Mem(sensor_depths_centers_in_ref_cam_, Z2, Y2, X2) sensor_depths_centers_in_mem = sensor_depths_centers_in_mem_.reshape(hyp.B, hyp.sensor_S, -1) if debug: print('assert that you are not entering here') from IPython import embed; embed() # form a (0, 1) volume here at these locations and see if it resembles a cup dim1 = X2 * Y2 * Z2 dim2 = X2 * Y2 dim3 = X2 binary_voxel_grid = torch.zeros((hyp.B, X2, Y2, Z2)) # NOTE: Z is the leading dimension rounded_idxs = torch.round(sensor_depths_centers_in_mem) flat_idxs = dim2 * rounded_idxs[0, :, 0] + dim3 * rounded_idxs[0, :, 1] + rounded_idxs[0, :, 2] flat_idxs1 = dim2 * rounded_idxs[1, :, 0] + dim3 * rounded_idxs[1, :, 1] + rounded_idxs[1, :, 2] flat_idxs1 = flat_idxs1 + dim1 flat_idxs1 = flat_idxs1.long() flat_idxs = flat_idxs.long() flattened_grid = binary_voxel_grid.flatten() flattened_grid[flat_idxs] = 1. flattened_grid[flat_idxs1] = 1. binary_voxel_grid = flattened_grid.view(B, X2, Y2, Z2) assert binary_voxel_grid[0].sum() == len(torch.unique(flat_idxs)), "some indexes are missed here" assert binary_voxel_grid[1].sum() == len(torch.unique(flat_idxs1)), "some indexes are missed here" # o3d.io.write_voxel_grid("forward_pass_save/grid0.ply", binary_voxel_grid[0]) # o3d.io.write_voxel_grid("forward_pass_save/grid1.ply", binary_voxel_grid[0]) # need to save these voxels save_voxel(binary_voxel_grid[0].cpu().numpy(), "forward_pass_save/grid0.binvox") save_voxel(binary_voxel_grid[1].cpu().numpy(), "forward_pass_save/grid1.binvox") from IPython import embed; embed() # use grid sample to get the visual touch tensor at these locations, NOTE: visual tensor features shape is (B, C, N) visual_tensor_features = utils_samp.bilinear_sample3D(featRs, sensor_depths_centers_in_mem[:, :, 0], sensor_depths_centers_in_mem[:, :, 1], sensor_depths_centers_in_mem[:, :, 2]) visual_feature_tensor = visual_tensor_features.permute(0, 2, 1) # pack it visual_feature_tensor_ = __p(visual_feature_tensor) C = list(visual_feature_tensor.shape)[-1] print('C=', C) # do the metric learning this is the same as before. # the code is basically copied from embnet3d.py but some changes are being made very minor emb_vec = torch.stack((sensor_features_, visual_feature_tensor_), dim=1).view(B*self.num_samples*self.batch_k, C) y = torch.stack([torch.range(0,self.num_samples*B-1), torch.range(0,self.num_samples*B-1)], dim=1).view(self.num_samples*B*self.batch_k) a_indices, anchors, positives, negatives, _ = self.sampler(emb_vec) # I need to write my own version of margin loss since the negatives and anchors may not be same dim d_ap = torch.sqrt(torch.sum((positives - anchors)**2, dim=1) + 1e-8) pos_loss = torch.clamp(d_ap - beta + self._margin, min=0.0) # TODO: expand the dims of anchors and tile them and compute the negative loss # do the pair count where you average by contributors only # this is your total loss # Further idea is to check what volumetric locations do each of the depth images corresponds to # unproject the entire depth image and convert to ref. and then sample. if hyp.do_touch_forward: ## ... Begin code for getting crops from visual memory ... ## sensor_depths_centers_x = center_sensor_W * torch.ones((hyp.B, hyp.sensor_S)) sensor_depths_centers_x = sensor_depths_centers_x.cuda() sensor_depths_centers_y = center_sensor_H * torch.ones((hyp.B, hyp.sensor_S)) sensor_depths_centers_y = sensor_depths_centers_y.cuda() sensor_depths_centers_z = sensor_depths[:, :, 0, center_sensor_H, center_sensor_W] # Next use Pixels2Camera to unproject all of these together. # merge the batch and the sequence dimension sensor_depths_centers_x = sensor_depths_centers_x.reshape(-1, 1, 1) sensor_depths_centers_y = sensor_depths_centers_y.reshape(-1, 1, 1) sensor_depths_centers_z = sensor_depths_centers_z.reshape(-1, 1, 1) fx, fy, x0, y0 = utils_geom.split_intrinsics(sensor_pix_T_cams_) sensor_depths_centers_in_camXs_ = utils_geom.Pixels2Camera(sensor_depths_centers_x, sensor_depths_centers_y, sensor_depths_centers_z, fx, fy, x0, y0) sensor_depths_centers_in_world_ = utils_geom.apply_4x4(sensor_origin_T_camXs_, sensor_depths_centers_in_camXs_) # not used by the algorithm ## this will be later used for visualization hence saving it here for now sensor_depths_centers_in_ref_cam_ = utils_geom.apply_4x4(sensor_camRs_T_camXs_, sensor_depths_centers_in_camXs_) # not used by the algorithm sensor_depths_centers_in_camXs = __u(sensor_depths_centers_in_camXs_).squeeze(2) # There has to be a better way to do this, for each of the cameras in the batch I want a box of size (ch, cw, cd) # TODO: rotation is the deviation of the box from the axis aligned do I want this tB, tN, _ = list(sensor_depths_centers_in_camXs.shape) # 2, 512, _ boxlist = torch.zeros(tB, tN, 9) # 2, 512, 9 boxlist[:, :, :3] = sensor_depths_centers_in_camXs # this lies on the object boxlist[:, :, 3:6] = torch.FloatTensor([hyp.contextW, hyp.contextH, hyp.contextD]) # convert the boxlist to lrtlist and to cuda # the rt here transforms the from box coordinates to camera coordinates box_lrtlist = utils_geom.convert_boxlist_to_lrtlist(boxlist) # Now I will use crop_zoom_from_mem functionality to get the features in each of the boxes # I will do it for each of the box separately as required by the api context_grid_list = list() for m in range(box_lrtlist.shape[1]): curr_box = box_lrtlist[:, m, :] context_grid = utils_vox.crop_zoom_from_mem(featRs, curr_box, 8, 8, 8, sensor_camRs_T_camXs[:, m, :, :]) context_grid_list.append(context_grid) context_grid_list = torch.stack(context_grid_list, dim=1) context_grid_list_ = __p(context_grid_list) ## ... till here I believe I have not introduced any randomness, so the points are still in ## ... End code for getting crops around this center of certain height, width and depth ... ## ## ... Begin code for passing the context grid through 3D CNN to obtain a vector ... ## sensor_cam_locs = feed['sensor_locs'] # these are in origin coordinates sensor_cam_quats = feed['sensor_quats'] # this too in in world_coordinates sensor_cam_locs_ = __p(sensor_cam_locs) sensor_cam_quats_ = __p(sensor_cam_quats) sensor_cam_locs_in_R_ = utils_geom.apply_4x4(sensor_camRs_T_origin_, sensor_cam_locs_.unsqueeze(1)).squeeze(1) # TODO TODO TODO confirm that this is right? TODO TODO TODO get_r_mat = lambda cam_quat: transformations.quaternion_matrix_py(cam_quat) rot_mat_Xs_ = torch.from_numpy(np.stack(list(map(get_r_mat, sensor_cam_quats_.cpu().numpy())))).to(sensor_cam_locs_.device).float() rot_mat_Rs_ = torch.bmm(sensor_camRs_T_origin_, rot_mat_Xs_) get_quat = lambda r_mat: transformations.quaternion_from_matrix_py(r_mat) sensor_quats_in_R_ = torch.from_numpy(np.stack(list(map(get_quat, rot_mat_Rs_.cpu().numpy())))).to(sensor_cam_locs_.device).float() pred_features_ = self.context_net(context_grid_list_,\ sensor_cam_locs_in_R_, sensor_quats_in_R_) # normalize pred_features_ = l2_normalize(pred_features_, dim=1) pred_features = __u(pred_features_) # if doing moco I have to pass the inputs through the key(slow) network as well # if hyp.do_moc or hyp.do_eval_recall: key_context_grid_list_ = copy.deepcopy(context_grid_list_) key_sensor_cam_locs_in_R_ = copy.deepcopy(sensor_cam_locs_in_R_) key_sensor_quats_in_R_ = copy.deepcopy(sensor_quats_in_R_) self.key_context_net.eval() with torch.no_grad(): key_pred_features_ = self.key_context_net(key_context_grid_list_,\ key_sensor_cam_locs_in_R_, key_sensor_quats_in_R_) # normalize, normalization is very important why though key_pred_features_ = l2_normalize(key_pred_features_, dim=1) key_pred_features = __u(key_pred_features_) # end passing of the input through the slow network this is necessary for moco # ## ... End code for passing the context grid through 3D CNN to obtain a vector ... ## ## ... Begin code for doing metric learning between pred_features and sensor features ... ## # 1. Subsample both based on the number of positive samples if hyp.do_touch_embML: assert(hyp.do_touch_forward) assert(hyp.do_touch_feat) perm = torch.randperm(len(pred_features_)) ## 1024 chosen_sensor_feats_ = sensor_features_[perm[:self.num_pos_samples*hyp.B]] chosen_pred_feats_ = pred_features_[perm[:self.num_pos_samples*B]] # 2. form the emb_vec and get pos and negative samples for the batch emb_vec = torch.stack((chosen_sensor_feats_, chosen_pred_feats_), dim=1).view(hyp.B*self.num_pos_samples*self.batch_k, -1) y = torch.stack([torch.range(0, self.num_pos_samples*B-1), torch.range(0, self.num_pos_samples*B-1)],\ dim=1).view(B*self.num_pos_samples*self.batch_k) # (0, 0, 1, 1, ..., 255, 255) a_indices, anchors, positives, negatives, _ = self.sampler(emb_vec) # 3. Compute the loss, ML loss and the l2 distance betwee the embeddings margin_loss, _ = self.criterion(anchors, positives, negatives, self.beta, y[a_indices]) total_loss = utils_misc.add_loss('embtouch/emb_touch_ml_loss', total_loss, margin_loss, hyp.emb_3D_ml_coeff, summ_writer) # the l2 loss between the embeddings l2_loss = torch.nn.functional.mse_loss(chosen_sensor_feats_, chosen_pred_feats_) total_loss = utils_misc.add_loss('embtouch/emb_l2_loss', total_loss, l2_loss, hyp.emb_3D_l2_coeff, summ_writer) ## ... End code for doing metric learning between pred_features and sensor_features ... ## ## ... Begin code for doing moc inspired ML between pred_features and sensor_features ... ## if hyp.do_moc and moc_init_done: moc_loss = self.moc_ml_net(sensor_features_, key_sensor_features_,\ pred_features_, key_pred_features_, summ_writer) total_loss += moc_loss ## ... End code for doing moc inspired ML between pred_features and sensor_feature ... ## ## ... add code for filling up results needed for eval recall ... ## if hyp.do_eval_recall and moc_init_done: results['context_features'] = pred_features_ results['sensor_depth_centers_in_world'] = sensor_depths_centers_in_world_ results['sensor_depths_centers_in_ref_cam'] = sensor_depths_centers_in_ref_cam_ results['object_name'] = feed['object_name'] # I will do precision recall here at different recall values and summarize it using tensorboard recalls = [1, 5, 10, 50, 100, 200] # also should not include any gradients because of this # fast_sensor_emb_e = sensor_features_ # fast_context_emb_e = pred_features_ # slow_sensor_emb_g = key_sensor_features_ # slow_context_emb_g = key_context_features_ fast_sensor_emb_e = sensor_features_.clone().detach() fast_context_emb_e = pred_features_.clone().detach() # I will do multiple eval recalls here slow_sensor_emb_g = key_sensor_features_.clone().detach() slow_context_emb_g = key_pred_features_.clone().detach() # assuming the above thing goes well fast_sensor_emb_e = fast_sensor_emb_e.cpu().numpy() fast_context_emb_e = fast_context_emb_e.cpu().numpy() slow_sensor_emb_g = slow_sensor_emb_g.cpu().numpy() slow_context_emb_g = slow_context_emb_g.cpu().numpy() # now also move the vis to numpy and plot it using matplotlib vis_e = __p(sensor_rgbs) vis_g = __p(sensor_rgbs) np_vis_e = vis_e.cpu().detach().numpy() np_vis_e = np.transpose(np_vis_e, [0, 2, 3, 1]) np_vis_g = vis_g.cpu().detach().numpy() np_vis_g = np.transpose(np_vis_g, [0, 2, 3, 1]) # bring it back to original color np_vis_g = ((np_vis_g+0.5) * 255).astype(np.uint8) np_vis_e = ((np_vis_e+0.5) * 255).astype(np.uint8) # now compare fast_sensor_emb_e with slow_context_emb_g # since I am doing positive against this fast_sensor_emb_e_list = [fast_sensor_emb_e, np_vis_e] slow_context_emb_g_list = [slow_context_emb_g, np_vis_g] prec, vis, chosen_inds_and_neighbors_inds = compute_precision( fast_sensor_emb_e_list, slow_context_emb_g_list, recalls=recalls ) # finally plot the nearest neighbour retrieval and move ahead if feed['global_step'] % 1 == 0: plot_nearest_neighbours(vis, step=feed['global_step'], save_dir='/home/gauravp/eval_results', name='fast_sensor_slow_context') # plot the precisions at different recalls for pr, re in enumerate(recalls): summ_writer.summ_scalar(f'evrefast_sensor_slow_context/recall@{re}',\ prec[pr]) # now compare fast_context_emb_e with slow_sensor_emb_g fast_context_emb_e_list = [fast_context_emb_e, np_vis_e] slow_sensor_emb_g_list = [slow_sensor_emb_g, np_vis_g] prec, vis, chosen_inds_and_neighbors_inds = compute_precision( fast_context_emb_e_list, slow_sensor_emb_g_list, recalls=recalls ) if feed['global_step'] % 1 == 0: plot_nearest_neighbours(vis, step=feed['global_step'], save_dir='/home/gauravp/eval_results', name='fast_context_slow_sensor') # plot the precisions at different recalls for pr, re in enumerate(recalls): summ_writer.summ_scalar(f'evrefast_context_slow_sensor/recall@{re}',\ prec[pr]) # now finally compare both the fast, I presume we want them to go closer too fast_sensor_list = [fast_sensor_emb_e, np_vis_e] fast_context_list = [fast_context_emb_e, np_vis_g] prec, vis, chosen_inds_and_neighbors_inds = compute_precision( fast_sensor_list, fast_context_list, recalls=recalls ) if feed['global_step'] % 1 == 0: plot_nearest_neighbours(vis, step=feed['global_step'], save_dir='/home/gauravp/eval_results', name='fast_sensor_fast_context') for pr, re in enumerate(recalls): summ_writer.summ_scalar(f'evrefast_sensor_fast_context/recall@{re}',\ prec[pr]) ## ... done code for filling up results needed for eval recall ... ## summ_writer.summ_scalar('loss', total_loss.cpu().item()) return total_loss, results, [key_sensor_features_, key_pred_features_]
def forward(self, feed): results = dict() if 'log_freq' not in feed.keys(): feed['log_freq'] = None start_time = time.time() summ_writer = utils_improc.Summ_writer(writer=feed['writer'], global_step=feed['global_step'], set_name=feed['set_name'], log_freq=feed['log_freq'], fps=8) writer = feed['writer'] global_step = feed['global_step'] total_loss = torch.tensor(0.0).cuda() __p = lambda x: utils_basic.pack_seqdim(x, B) __u = lambda x: utils_basic.unpack_seqdim(x, B) __pb = lambda x: utils_basic.pack_boxdim(x, hyp.N) __ub = lambda x: utils_basic.unpack_boxdim(x, hyp.N) if hyp.aug_object_ent_dis: __pb_a = lambda x: utils_basic.pack_boxdim( x, hyp.max_obj_aug + hyp.max_obj_aug_dis) __ub_a = lambda x: utils_basic.unpack_boxdim( x, hyp.max_obj_aug + hyp.max_obj_aug_dis) else: __pb_a = lambda x: utils_basic.pack_boxdim(x, hyp.max_obj_aug) __ub_a = lambda x: utils_basic.unpack_boxdim(x, hyp.max_obj_aug) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K BOX_SIZE = hyp.BOX_SIZE Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) Z4, Y4, X4 = int(Z / 4), int(Y / 4), int(X / 4) D = 9 tids = torch.from_numpy(np.reshape(np.arange(B * N), [B, N])) rgb_camXs = feed["rgb_camXs_raw"] pix_T_cams = feed["pix_T_cams_raw"] camRs_T_origin = feed["camR_T_origin_raw"] origin_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_origin))) origin_T_camXs = feed["origin_T_camXs_raw"] camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camRs_T_camXs = __u( torch.matmul(utils_geom.safe_inverse(__p(origin_T_camRs)), __p(origin_T_camXs))) camXs_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_camXs))) camX0_T_camRs = camXs_T_camRs[:, 0] camX1_T_camRs = camXs_T_camRs[:, 1] camR_T_camX0 = utils_geom.safe_inverse(camX0_T_camRs) xyz_camXs = feed["xyz_camXs_raw"] depth_camXs_, valid_camXs_ = utils_geom.create_depth_image( __p(pix_T_cams), __p(xyz_camXs), H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud(depth_camXs_, __p(pix_T_cams)) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) xyz_camX0s = __u( utils_geom.apply_4x4(__p(camX0_T_camXs), __p(xyz_camXs))) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occXs_to_Rs = utils_vox.apply_4x4s_to_voxs(camRs_T_camXs, occXs) occXs_to_Rs_45 = cross_corr.rotate_tensor_along_y_axis(occXs_to_Rs, 45) occXs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z2, Y2, X2)) occRs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z2, Y2, X2)) occX0s_half = __u(utils_vox.voxelize_xyz(__p(xyz_camX0s), Z2, Y2, X2)) unpXs = __u( utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z, Y, X, __p(pix_T_cams))) unpXs_half = __u( utils_vox.unproject_rgb_to_mem(__p(rgb_camXs), Z2, Y2, X2, __p(pix_T_cams))) unpX0s_half = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z2, Y2, X2, utils_basic.matmul2( __p(pix_T_cams), utils_geom.safe_inverse(__p(camX0_T_camXs))))) unpRs = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z, Y, X, utils_basic.matmul2( __p(pix_T_cams), utils_geom.safe_inverse(__p(camRs_T_camXs))))) unpRs_half = __u( utils_vox.unproject_rgb_to_mem( __p(rgb_camXs), Z2, Y2, X2, utils_basic.matmul2( __p(pix_T_cams), utils_geom.safe_inverse(__p(camRs_T_camXs))))) dense_xyz_camRs_ = utils_geom.apply_4x4(__p(camRs_T_camXs), dense_xyz_camXs_) inbound_camXs_ = utils_vox.get_inbounds(dense_xyz_camRs_, Z, Y, X).float() inbound_camXs_ = torch.reshape(inbound_camXs_, [B * S, 1, H, W]) depth_camXs = __u(depth_camXs_) valid_camXs = __u(valid_camXs_) * __u(inbound_camXs_) summ_writer.summ_oneds('2D_inputs/depth_camXs', torch.unbind(depth_camXs, dim=1), maxdepth=21.0) summ_writer.summ_oneds('2D_inputs/valid_camXs', torch.unbind(valid_camXs, dim=1)) summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(rgb_camXs, dim=1)) summ_writer.summ_occs('3D_inputs/occXs', torch.unbind(occXs, dim=1)) summ_writer.summ_unps('3D_inputs/unpXs', torch.unbind(unpXs, dim=1), torch.unbind(occXs, dim=1)) occRs = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z, Y, X)) if hyp.do_eval_boxes: if hyp.dataset_name == "clevr_vqa": gt_boxes_origin_corners = feed['gt_box'] gt_scores_origin = feed['gt_scores'].detach().cpu().numpy() classes = feed['classes'] scores = gt_scores_origin tree_seq_filename = feed['tree_seq_filename'] gt_boxes_origin = nlu.get_ends_of_corner( gt_boxes_origin_corners) gt_boxes_origin_end = torch.reshape(gt_boxes_origin, [hyp.B, hyp.N, 2, 3]) gt_boxes_origin_theta = nlu.get_alignedboxes2thetaformat( gt_boxes_origin_end) gt_boxes_origin_corners = utils_geom.transform_boxes_to_corners( gt_boxes_origin_theta) gt_boxesR_corners = __ub( utils_geom.apply_4x4(camRs_T_origin[:, 0], __pb(gt_boxes_origin_corners))) gt_boxesR_theta = utils_geom.transform_corners_to_boxes( gt_boxesR_corners) gt_boxesR_end = nlu.get_ends_of_corner(gt_boxesR_corners) else: tree_seq_filename = feed['tree_seq_filename'] tree_filenames = [ join(hyp.root_dataset, i) for i in tree_seq_filename if i != "invalid_tree" ] invalid_tree_filenames = [ join(hyp.root_dataset, i) for i in tree_seq_filename if i == "invalid_tree" ] num_empty = len(invalid_tree_filenames) trees = [pickle.load(open(i, "rb")) for i in tree_filenames] len_valid = len(trees) if len_valid > 0: gt_boxesR, scores, classes = nlu.trees_rearrange(trees) if num_empty > 0: gt_boxesR = np.concatenate([ gt_boxesR, empty_gt_boxesR ]) if len_valid > 0 else empty_gt_boxesR scores = np.concatenate([ scores, empty_scores ]) if len_valid > 0 else empty_scores classes = np.concatenate([ classes, empty_classes ]) if len_valid > 0 else empty_classes gt_boxesR = torch.from_numpy( gt_boxesR).cuda().float() # torch.Size([2, 3, 6]) gt_boxesR_end = torch.reshape(gt_boxesR, [hyp.B, hyp.N, 2, 3]) gt_boxesR_theta = nlu.get_alignedboxes2thetaformat( gt_boxesR_end) #torch.Size([2, 3, 9]) gt_boxesR_corners = utils_geom.transform_boxes_to_corners( gt_boxesR_theta) class_names_ex_1 = "_".join(classes[0]) summ_writer.summ_text('eval_boxes/class_names', class_names_ex_1) gt_boxesRMem_corners = __ub( utils_vox.Ref2Mem(__pb(gt_boxesR_corners), Z2, Y2, X2)) gt_boxesRMem_end = nlu.get_ends_of_corner(gt_boxesRMem_corners) gt_boxesRMem_theta = utils_geom.transform_corners_to_boxes( gt_boxesRMem_corners) gt_boxesRUnp_corners = __ub( utils_vox.Ref2Mem(__pb(gt_boxesR_corners), Z, Y, X)) gt_boxesRUnp_end = nlu.get_ends_of_corner(gt_boxesRUnp_corners) gt_boxesX0_corners = __ub( utils_geom.apply_4x4(camX0_T_camRs, __pb(gt_boxesR_corners))) gt_boxesX0Mem_corners = __ub( utils_vox.Ref2Mem(__pb(gt_boxesX0_corners), Z2, Y2, X2)) gt_boxesX0Mem_theta = utils_geom.transform_corners_to_boxes( gt_boxesX0Mem_corners) gt_boxesX0Mem_end = nlu.get_ends_of_corner(gt_boxesX0Mem_corners) gt_boxesX0_end = nlu.get_ends_of_corner(gt_boxesX0_corners) gt_cornersX0_pix = __ub( utils_geom.apply_pix_T_cam(pix_T_cams[:, 0], __pb(gt_boxesX0_corners))) rgb_camX0 = rgb_camXs[:, 0] rgb_camX1 = rgb_camXs[:, 1] summ_writer.summ_box_by_corners('eval_boxes/gt_boxescamX0', rgb_camX0, gt_boxesX0_corners, torch.from_numpy(scores), tids, pix_T_cams[:, 0]) unps_vis = utils_improc.get_unps_vis(unpX0s_half, occX0s_half) unp_vis = torch.mean(unps_vis, dim=1) unps_visRs = utils_improc.get_unps_vis(unpRs_half, occRs_half) unp_visRs = torch.mean(unps_visRs, dim=1) unps_visRs_full = utils_improc.get_unps_vis(unpRs, occRs) unp_visRs_full = torch.mean(unps_visRs_full, dim=1) summ_writer.summ_box_mem_on_unp('eval_boxes/gt_boxesR_mem', unp_visRs, gt_boxesRMem_end, scores, tids) unpX0s_half = torch.mean(unpX0s_half, dim=1) unpX0s_half = nlu.zero_out(unpX0s_half, gt_boxesX0Mem_end, scores) occX0s_half = torch.mean(occX0s_half, dim=1) occX0s_half = nlu.zero_out(occX0s_half, gt_boxesX0Mem_end, scores) summ_writer.summ_unp('3D_inputs/unpX0s', unpX0s_half, occX0s_half) if hyp.do_feat: featXs_input = torch.cat([occXs, occXs * unpXs], dim=2) featXs_input_ = __p(featXs_input) freeXs_ = utils_vox.get_freespace(__p(xyz_camXs), __p(occXs_half)) freeXs = __u(freeXs_) visXs = torch.clamp(occXs_half + freeXs, 0.0, 1.0) mask_ = None if (type(mask_) != type(None)): assert (list(mask_.shape)[2:5] == list( featXs_input_.shape)[2:5]) featXs_, feat_loss = self.featnet(featXs_input_, summ_writer, mask=__p(occXs)) #mask_) total_loss += feat_loss validXs = torch.ones_like(visXs) _validX00 = validXs[:, 0:1] _validX01 = utils_vox.apply_4x4s_to_voxs(camX0_T_camXs[:, 1:], validXs[:, 1:]) validX0s = torch.cat([_validX00, _validX01], dim=1) validRs = utils_vox.apply_4x4s_to_voxs(camRs_T_camXs, validXs) visRs = utils_vox.apply_4x4s_to_voxs(camRs_T_camXs, visXs) featXs = __u(featXs_) _featX00 = featXs[:, 0:1] _featX01 = utils_vox.apply_4x4s_to_voxs(camX0_T_camXs[:, 1:], featXs[:, 1:]) featX0s = torch.cat([_featX00, _featX01], dim=1) emb3D_e = torch.mean(featX0s[:, 1:], dim=1) vis3D_e_R = torch.max(visRs[:, 1:], dim=1)[0] emb3D_g = featX0s[:, 0] vis3D_g_R = visRs[:, 0] validR_combo = torch.min(validRs, dim=1).values summ_writer.summ_feats('3D_feats/featXs_input', torch.unbind(featXs_input, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featXs_output', torch.unbind(featXs, dim=1), valids=torch.unbind(validXs, dim=1), pca=True) summ_writer.summ_feats('3D_feats/featX0s_output', torch.unbind(featX0s, dim=1), valids=torch.unbind( torch.ones_like(validRs), dim=1), pca=True) summ_writer.summ_feats('3D_feats/validRs', torch.unbind(validRs, dim=1), pca=False) summ_writer.summ_feat('3D_feats/vis3D_e_R', vis3D_e_R, pca=False) summ_writer.summ_feat('3D_feats/vis3D_g_R', vis3D_g_R, pca=False) if hyp.do_munit: object_classes, filenames = nlu.create_object_classes( classes, [tree_seq_filename, tree_seq_filename], scores) if hyp.do_munit_fewshot: emb3D_e_R = utils_vox.apply_4x4_to_vox(camR_T_camX0, emb3D_e) emb3D_g_R = utils_vox.apply_4x4_to_vox(camR_T_camX0, emb3D_g) emb3D_R = emb3D_e_R emb3D_e_R_object, emb3D_g_R_object, validR_combo_object = nlu.create_object_tensors( [emb3D_e_R, emb3D_g_R], [validR_combo], gt_boxesRMem_end, scores, [BOX_SIZE, BOX_SIZE, BOX_SIZE]) emb3D_R_object = (emb3D_e_R_object + emb3D_g_R_object) / 2 content, style = self.munitnet.net.gen_a.encode(emb3D_R_object) objects_taken, _ = self.munitnet.net.gen_a.decode( content, style) styles = style contents = content elif hyp.do_3d_style_munit: emb3D_e_R = utils_vox.apply_4x4_to_vox(camR_T_camX0, emb3D_e) emb3D_g_R = utils_vox.apply_4x4_to_vox(camR_T_camX0, emb3D_g) emb3D_R = emb3D_e_R # st() emb3D_e_R_object, emb3D_g_R_object, validR_combo_object = nlu.create_object_tensors( [emb3D_e_R, emb3D_g_R], [validR_combo], gt_boxesRMem_end, scores, [BOX_SIZE, BOX_SIZE, BOX_SIZE]) emb3D_R_object = (emb3D_e_R_object + emb3D_g_R_object) / 2 camX1_T_R = camXs_T_camRs[:, 1] camX0_T_R = camXs_T_camRs[:, 0] assert hyp.B == 2 assert emb3D_e_R_object.shape[0] == 2 munit_loss, sudo_input_0, sudo_input_1, recon_input_0, recon_input_1, sudo_input_0_cycle, sudo_input_1_cycle, styles, contents, adin = self.munitnet( emb3D_R_object[0:1], emb3D_R_object[1:2]) if hyp.store_content_style_range: if self.max_content == None: self.max_content = torch.zeros_like( contents[0][0]).cuda() - 100000000 if self.min_content == None: self.min_content = torch.zeros_like( contents[0][0]).cuda() + 100000000 if self.max_style == None: self.max_style = torch.zeros_like( styles[0][0]).cuda() - 100000000 if self.min_style == None: self.min_style = torch.zeros_like( styles[0][0]).cuda() + 100000000 self.max_content = torch.max( torch.max(self.max_content, contents[0][0]), contents[1][0]) self.min_content = torch.min( torch.min(self.min_content, contents[0][0]), contents[1][0]) self.max_style = torch.max( torch.max(self.max_style, styles[0][0]), styles[1][0]) self.min_style = torch.min( torch.min(self.min_style, styles[0][0]), styles[1][0]) data_to_save = { 'max_content': self.max_content.cpu().numpy(), 'min_content': self.min_content.cpu().numpy(), 'max_style': self.max_style.cpu().numpy(), 'min_style': self.min_style.cpu().numpy() } with open('content_style_range.p', 'wb') as f: pickle.dump(data_to_save, f) elif hyp.is_contrastive_examples: if hyp.normalize_contrast: content0 = (contents[0] - self.min_content) / ( self.max_content - self.min_content + 1e-5) content1 = (contents[1] - self.min_content) / ( self.max_content - self.min_content + 1e-5) style0 = (styles[0] - self.min_style) / ( self.max_style - self.min_style + 1e-5) style1 = (styles[1] - self.min_style) / ( self.max_style - self.min_style + 1e-5) else: content0 = contents[0] content1 = contents[1] style0 = styles[0] style1 = styles[1] # euclid_dist_content = torch.sum(torch.sqrt((content0 - content1)**2))/torch.prod(torch.tensor(content0.shape)) # euclid_dist_style = torch.sum(torch.sqrt((style0-style1)**2))/torch.prod(torch.tensor(style0.shape)) euclid_dist_content = (content0 - content1).norm(2) / ( content0.numel()) euclid_dist_style = (style0 - style1).norm(2) / (style0.numel()) content_0_pooled = torch.mean( content0.reshape(list(content0.shape[:2]) + [-1]), dim=-1) content_1_pooled = torch.mean( content1.reshape(list(content1.shape[:2]) + [-1]), dim=-1) euclid_dist_content_pooled = (content_0_pooled - content_1_pooled).norm(2) / ( content_0_pooled.numel()) content_0_normalized = content0 / content0.norm() content_1_normalized = content1 / content1.norm() style_0_normalized = style0 / style0.norm() style_1_normalized = style1 / style1.norm() content_0_pooled_normalized = content_0_pooled / content_0_pooled.norm( ) content_1_pooled_normalized = content_1_pooled / content_1_pooled.norm( ) cosine_dist_content = torch.sum(content_0_normalized * content_1_normalized) cosine_dist_style = torch.sum(style_0_normalized * style_1_normalized) cosine_dist_content_pooled = torch.sum( content_0_pooled_normalized * content_1_pooled_normalized) print("euclid dist [content, pooled-content, style]: ", euclid_dist_content, euclid_dist_content_pooled, euclid_dist_style) print("cosine sim [content, pooled-content, style]: ", cosine_dist_content, cosine_dist_content_pooled, cosine_dist_style) if hyp.run_few_shot_on_munit: if (global_step % 300) == 1 or (global_step % 300) == 0: wrong = False try: precision_style = float(self.tp_style) / self.all_style precision_content = float( self.tp_content) / self.all_content except ZeroDivisionError: wrong = True if not wrong: summ_writer.summ_scalar( 'precision/unsupervised_precision_style', precision_style) summ_writer.summ_scalar( 'precision/unsupervised_precision_content', precision_content) # st() self.embed_list_style = defaultdict(lambda: []) self.embed_list_content = defaultdict(lambda: []) self.tp_style = 0 self.all_style = 0 self.tp_content = 0 self.all_content = 0 self.check = False elif not self.check and not nlu.check_fill_dict( self.embed_list_content, self.embed_list_style): print("Filling \n") for index, class_val in enumerate(object_classes): if hyp.dataset_name == "clevr_vqa": class_val_content, class_val_style = class_val.split( "/") else: class_val_content, class_val_style = [ class_val.split("/")[0], class_val.split("/")[0] ] print(len(self.embed_list_style.keys()), "style class", len(self.embed_list_content), "content class", self.embed_list_content.keys()) if len(self.embed_list_style[class_val_style] ) < hyp.few_shot_nums: self.embed_list_style[class_val_style].append( styles[index].squeeze()) if len(self.embed_list_content[class_val_content] ) < hyp.few_shot_nums: if hyp.avg_3d: content_val = contents[index] content_val = torch.mean(content_val.reshape( [content_val.shape[1], -1]), dim=-1) # st() self.embed_list_content[ class_val_content].append(content_val) else: self.embed_list_content[ class_val_content].append( contents[index].reshape([-1])) else: self.check = True try: print(float(self.tp_content) / self.all_content) print(float(self.tp_style) / self.all_style) except Exception as e: pass average = True if average: for key, val in self.embed_list_style.items(): if isinstance(val, type([])): self.embed_list_style[key] = torch.mean( torch.stack(val, dim=0), dim=0) for key, val in self.embed_list_content.items(): if isinstance(val, type([])): self.embed_list_content[key] = torch.mean( torch.stack(val, dim=0), dim=0) else: for key, val in self.embed_list_style.items(): if isinstance(val, type([])): self.embed_list_style[key] = torch.stack(val, dim=0) for key, val in self.embed_list_content.items(): if isinstance(val, type([])): self.embed_list_content[key] = torch.stack( val, dim=0) for index, class_val in enumerate(object_classes): class_val = class_val if hyp.dataset_name == "clevr_vqa": class_val_content, class_val_style = class_val.split( "/") else: class_val_content, class_val_style = [ class_val.split("/")[0], class_val.split("/")[0] ] style_val = styles[index].squeeze().unsqueeze(0) if not average: embed_list_val_style = torch.cat(list( self.embed_list_style.values()), dim=0) embed_list_key_style = list( np.repeat( np.expand_dims( list(self.embed_list_style.keys()), 1), hyp.few_shot_nums, 1).reshape([-1])) else: embed_list_val_style = torch.stack(list( self.embed_list_style.values()), dim=0) embed_list_key_style = list( self.embed_list_style.keys()) embed_list_val_style = utils_basic.l2_normalize( embed_list_val_style, dim=1).permute(1, 0) style_val = utils_basic.l2_normalize(style_val, dim=1) scores_styles = torch.matmul(style_val, embed_list_val_style) index_key = torch.argmax(scores_styles, dim=1).squeeze() selected_class_style = embed_list_key_style[index_key] self.styles_prediction[class_val_style].append( selected_class_style) if class_val_style == selected_class_style: self.tp_style += 1 self.all_style += 1 if hyp.avg_3d: content_val = contents[index] content_val = torch.mean(content_val.reshape( [content_val.shape[1], -1]), dim=-1).unsqueeze(0) else: content_val = contents[index].reshape( [-1]).unsqueeze(0) if not average: embed_list_val_content = torch.cat(list( self.embed_list_content.values()), dim=0) embed_list_key_content = list( np.repeat( np.expand_dims( list(self.embed_list_content.keys()), 1), hyp.few_shot_nums, 1).reshape([-1])) else: embed_list_val_content = torch.stack(list( self.embed_list_content.values()), dim=0) embed_list_key_content = list( self.embed_list_content.keys()) embed_list_val_content = utils_basic.l2_normalize( embed_list_val_content, dim=1).permute(1, 0) content_val = utils_basic.l2_normalize(content_val, dim=1) scores_content = torch.matmul(content_val, embed_list_val_content) index_key = torch.argmax(scores_content, dim=1).squeeze() selected_class_content = embed_list_key_content[ index_key] self.content_prediction[class_val_content].append( selected_class_content) if class_val_content == selected_class_content: self.tp_content += 1 self.all_content += 1 # st() munit_loss = hyp.munit_loss_weight * munit_loss recon_input_obj = torch.cat([recon_input_0, recon_input_1], dim=0) recon_emb3D_R = nlu.update_scene_with_objects( emb3D_R, recon_input_obj, gt_boxesRMem_end, scores) sudo_input_obj = torch.cat([sudo_input_0, sudo_input_1], dim=0) styled_emb3D_R = nlu.update_scene_with_objects( emb3D_R, sudo_input_obj, gt_boxesRMem_end, scores) styled_emb3D_e_X1 = utils_vox.apply_4x4_to_vox( camX1_T_R, styled_emb3D_R) styled_emb3D_e_X0 = utils_vox.apply_4x4_to_vox( camX0_T_R, styled_emb3D_R) emb3D_e_X1 = utils_vox.apply_4x4_to_vox(camX1_T_R, recon_emb3D_R) emb3D_e_X0 = utils_vox.apply_4x4_to_vox(camX0_T_R, recon_emb3D_R) emb3D_e_X1_og = utils_vox.apply_4x4_to_vox(camX1_T_R, emb3D_R) emb3D_e_X0_og = utils_vox.apply_4x4_to_vox(camX0_T_R, emb3D_R) emb3D_R_aug_diff = torch.abs(emb3D_R - recon_emb3D_R) summ_writer.summ_feat(f'aug_feat/og', emb3D_R) summ_writer.summ_feat(f'aug_feat/og_gen', recon_emb3D_R) summ_writer.summ_feat(f'aug_feat/og_aug_diff', emb3D_R_aug_diff) if hyp.cycle_style_view_loss: sudo_input_obj_cycle = torch.cat( [sudo_input_0_cycle, sudo_input_1_cycle], dim=0) styled_emb3D_R_cycle = nlu.update_scene_with_objects( emb3D_R, sudo_input_obj_cycle, gt_boxesRMem_end, scores) styled_emb3D_e_X0_cycle = utils_vox.apply_4x4_to_vox( camX0_T_R, styled_emb3D_R_cycle) styled_emb3D_e_X1_cycle = utils_vox.apply_4x4_to_vox( camX1_T_R, styled_emb3D_R_cycle) summ_writer.summ_scalar('munit_loss', munit_loss.cpu().item()) total_loss += munit_loss if hyp.do_occ and hyp.occ_do_cheap: occX0_sup, freeX0_sup, _, freeXs = utils_vox.prep_occs_supervision( camX0_T_camXs, xyz_camXs, Z2, Y2, X2, agg=True) summ_writer.summ_occ('occ_sup/occ_sup', occX0_sup) summ_writer.summ_occ('occ_sup/free_sup', freeX0_sup) summ_writer.summ_occs('occ_sup/freeXs_sup', torch.unbind(freeXs, dim=1)) summ_writer.summ_occs('occ_sup/occXs_sup', torch.unbind(occXs_half, dim=1)) occ_loss, occX0s_pred_ = self.occnet( torch.mean(featX0s[:, 1:], dim=1), occX0_sup, freeX0_sup, torch.max(validX0s[:, 1:], dim=1)[0], summ_writer) occX0s_pred = __u(occX0s_pred_) total_loss += occ_loss if hyp.do_view: assert (hyp.do_feat) PH, PW = hyp.PH, hyp.PW sy = float(PH) / float(hyp.H) sx = float(PW) / float(hyp.W) assert (sx == 0.5) # else we need a fancier downsampler assert (sy == 0.5) projpix_T_cams = __u( utils_geom.scale_intrinsics(__p(pix_T_cams), sx, sy)) # st() if hyp.do_munit: feat_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], emb3D_e_X1, # use feat1 to predict rgb0 hyp.view_depth, PH, PW) feat_projX00_og = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], emb3D_e_X1_og, # use feat1 to predict rgb0 hyp.view_depth, PH, PW) # only for checking the style styled_feat_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], styled_emb3D_e_X1, # use feat1 to predict rgb0 hyp.view_depth, PH, PW) if hyp.cycle_style_view_loss: styled_feat_projX00_cycle = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], styled_emb3D_e_X1_cycle, # use feat1 to predict rgb0 hyp.view_depth, PH, PW) else: feat_projX00 = utils_vox.apply_pixX_T_memR_to_voxR( projpix_T_cams[:, 0], camX0_T_camXs[:, 1], featXs[:, 1], # use feat1 to predict rgb0 hyp.view_depth, PH, PW) rgb_X00 = utils_basic.downsample(rgb_camXs[:, 0], 2) rgb_X01 = utils_basic.downsample(rgb_camXs[:, 1], 2) valid_X00 = utils_basic.downsample(valid_camXs[:, 0], 2) view_loss, rgb_e, emb2D_e = self.viewnet(feat_projX00, rgb_X00, valid_X00, summ_writer, "rgb") if hyp.do_munit: _, rgb_e, emb2D_e = self.viewnet(feat_projX00_og, rgb_X00, valid_X00, summ_writer, "rgb_og") if hyp.do_munit: styled_view_loss, styled_rgb_e, styled_emb2D_e = self.viewnet( styled_feat_projX00, rgb_X00, valid_X00, summ_writer, "recon_style") if hyp.cycle_style_view_loss: styled_view_loss_cycle, styled_rgb_e_cycle, styled_emb2D_e_cycle = self.viewnet( styled_feat_projX00_cycle, rgb_X00, valid_X00, summ_writer, "recon_style_cycle") rgb_input_1 = torch.cat( [rgb_X01[1], rgb_X01[0], styled_rgb_e[0]], dim=2) rgb_input_2 = torch.cat( [rgb_X01[0], rgb_X01[1], styled_rgb_e[1]], dim=2) complete_vis = torch.cat([rgb_input_1, rgb_input_2], dim=1) summ_writer.summ_rgb('munit/munit_recons_vis', complete_vis.unsqueeze(0)) if not hyp.do_munit: total_loss += view_loss else: if hyp.basic_view_loss: total_loss += view_loss if hyp.style_view_loss: total_loss += styled_view_loss if hyp.cycle_style_view_loss: total_loss += styled_view_loss_cycle summ_writer.summ_scalar('loss', total_loss.cpu().item()) if hyp.save_embed_tsne: for index, class_val in enumerate(object_classes): class_val_content, class_val_style = class_val.split("/") style_val = styles[index].squeeze().unsqueeze(0) self.cluster_pool.update(style_val, [class_val_style]) print(self.cluster_pool.num) if self.cluster_pool.is_full(): embeds, classes = self.cluster_pool.fetch() with open("offline_cluster" + '/%st.txt' % 'classes', 'w') as f: for index, embed in enumerate(classes): class_val = classes[index] f.write("%s\n" % class_val) f.close() with open("offline_cluster" + '/%st.txt' % 'embeddings', 'w') as f: for index, embed in enumerate(embeds): # embed = utils_basic.l2_normalize(embed,dim=0) print("writing {} embed".format(index)) embed_l_s = [str(i) for i in embed.tolist()] embed_str = '\t'.join(embed_l_s) f.write("%s\n" % embed_str) f.close() st() return total_loss, results
def get_gt_flow(obj_lrtlist_camRs, obj_scorelist, camRs_T_camXs, Z, Y, X, K=2, mod='', vis=True, summ_writer=None): # this constructs the flow field according to the given # box trajectories (obj_lrtlist_camRs) (collected from a moving camR) # and egomotion (encoded in camRs_T_camXs) # (so they do not take into account egomotion) # so, we first generate the flow for all the objects, # then in the background, put the ego flow N, B, S, D = list(obj_lrtlist_camRs.shape) assert (S == 2) # as a flow util, this expects S=2 flows = [] masks = [] for k in list(range(K)): obj_masklistR0 = utils_vox.assemble_padded_obj_masklist( obj_lrtlist_camRs[k, :, 0:1], obj_scorelist[k, :, 0:1], Z, Y, X, coeff=1.0) # this is B x 1(N) x 1(C) x Z x Y x Z # obj_masklistR0 = obj_masklistR0.squeeze(1) # this is B x 1 x Z x Y x X obj_mask0 = obj_masklistR0.squeeze(1) # this is B x 1 x Z x Y x X camR_T_cam0 = camRs_T_camXs[:, 0] camR_T_cam1 = camRs_T_camXs[:, 1] cam0_T_camR = utils_geom.safe_inverse(camR_T_cam0) cam1_T_camR = utils_geom.safe_inverse(camR_T_cam1) # camR0_T_camR1 = camR0_T_camRs[:,1] # camR1_T_camR0 = utils_geom.safe_inverse(camR0_T_camR1) # obj_masklistA1 = utils_vox.apply_4x4_to_vox(camR1_T_camR0, obj_masklistA0) # if vis and (summ_writer is not None): # summ_writer.summ_occ('flow/obj%d_maskA0' % k, obj_masklistA0) # summ_writer.summ_occ('flow/obj%d_maskA1' % k, obj_masklistA1) if vis and (summ_writer is not None): # summ_writer.summ_occ('flow/obj%d_mask0' % k, obj_mask0) summ_writer.summ_oned('flow/obj%d_mask0_%s' % (k, mod), torch.mean(obj_mask0, 3)) _, ref_T_objs_list = utils_geom.split_lrtlist(obj_lrtlist_camRs[k]) # this is B x S x 4 x 4 ref_T_obj0 = ref_T_objs_list[:, 0] ref_T_obj1 = ref_T_objs_list[:, 1] obj0_T_ref = utils_geom.safe_inverse(ref_T_obj0) obj1_T_ref = utils_geom.safe_inverse(ref_T_obj1) # these are B x 4 x 4 mem_T_ref = utils_vox.get_mem_T_ref(B, Z, Y, X) ref_T_mem = utils_vox.get_ref_T_mem(B, Z, Y, X) ref1_T_ref0 = utils_basic.matmul2(ref_T_obj1, obj0_T_ref) cam1_T_cam0 = utils_basic.matmul3(cam1_T_camR, ref1_T_ref0, camR_T_cam0) mem1_T_mem0 = utils_basic.matmul3(mem_T_ref, cam1_T_cam0, ref_T_mem) xyz_mem0 = utils_basic.gridcloud3D(B, Z, Y, X) xyz_mem1 = utils_geom.apply_4x4(mem1_T_mem0, xyz_mem0) xyz_mem0 = xyz_mem0.reshape(B, Z, Y, X, 3) xyz_mem1 = xyz_mem1.reshape(B, Z, Y, X, 3) # only use these displaced points within the obj mask # obj_mask03 = obj_mask0.view(B, Z, Y, X, 1).repeat(1, 1, 1, 1, 3) obj_mask0 = obj_mask0.view(B, Z, Y, X, 1) # # xyz_mem1[(obj_mask03 < 1.0).bool()] = xyz_mem0 # cond = (obj_mask03 < 1.0).float() cond = (obj_mask0 > 0.0).float() xyz_mem1 = cond * xyz_mem1 + (1.0 - cond) * xyz_mem0 flow = xyz_mem1 - xyz_mem0 flow = flow.permute(0, 4, 1, 2, 3) obj_mask0 = obj_mask0.permute(0, 4, 1, 2, 3) # if vis and k==0: if vis: summ_writer.summ_3D_flow('flow/gt_%d_%s' % (k, mod), flow, clip=4.0) masks.append(obj_mask0) flows.append(flow) camR_T_cam0 = camRs_T_camXs[:, 0] camR_T_cam1 = camRs_T_camXs[:, 1] cam0_T_camR = utils_geom.safe_inverse(camR_T_cam0) cam1_T_camR = utils_geom.safe_inverse(camR_T_cam1) mem_T_ref = utils_vox.get_mem_T_ref(B, Z, Y, X) ref_T_mem = utils_vox.get_ref_T_mem(B, Z, Y, X) cam1_T_cam0 = utils_basic.matmul2(cam1_T_camR, camR_T_cam0) mem1_T_mem0 = utils_basic.matmul3(mem_T_ref, cam1_T_cam0, ref_T_mem) xyz_mem0 = utils_basic.gridcloud3D(B, Z, Y, X) xyz_mem1 = utils_geom.apply_4x4(mem1_T_mem0, xyz_mem0) xyz_mem0 = xyz_mem0.reshape(B, Z, Y, X, 3) xyz_mem1 = xyz_mem1.reshape(B, Z, Y, X, 3) flow = xyz_mem1 - xyz_mem0 flow = flow.permute(0, 4, 1, 2, 3) bkg_flow = flow # allow zero motion in the bkg any_mask = torch.max(torch.stack(masks, axis=0), axis=0)[0] masks.append(1.0 - any_mask) flows.append(bkg_flow) flows = torch.stack(flows, axis=0) masks = torch.stack(masks, axis=0) masks = masks.repeat(1, 1, 3, 1, 1, 1) flow = utils_basic.reduce_masked_mean(flows, masks, dim=0) if vis: summ_writer.summ_3D_flow('flow/gt_complete', flow, clip=4.0) # flow is shaped B x 3 x D x H x W return flow
def __getitem__(self, index): if hyp.dataset_name == 'kitti' or hyp.dataset_name == 'clevr' or hyp.dataset_name == 'real' or hyp.dataset_name == "bigbird" or hyp.dataset_name == "carla" or hyp.dataset_name == "carla_mix" or hyp.dataset_name == "carla_det" or hyp.dataset_name == "replica" or hyp.dataset_name == "clevr_vqa": # print(index) filename = self.records[index] d = pickle.load(open(filename, "rb")) d = dict(d) # elif hyp.dataset_name=="carla": # filename = self.records[index] # d = np.load(filename) # d = dict(d) # d['rgb_camXs_raw'] = d['rgb_camXs'] # d['pix_T_cams_raw'] = d['pix_T_cams'] # d['tree_seq_filename'] = "dummy_tree_filename" # d['origin_T_camXs_raw'] = d['origin_T_camXs'] # d['camR_T_origin_raw'] = utils_geom.safe_inverse(torch.from_numpy(d['origin_T_camRs'])).numpy() # d['xyz_camXs_raw'] = d['xyz_camXs'] else: assert (False) # reader not ready yet # st() # if hyp.save_gt_occs: # pickle.dump(d,open(filename, "wb")) # st() # st() if hyp.use_gt_occs: __p = lambda x: utils_basic.pack_seqdim(x, 1) __u = lambda x: utils_basic.unpack_seqdim(x, 1) B, H, W, V, S, N = hyp.B, hyp.H, hyp.W, hyp.V, hyp.S, hyp.N PH, PW = hyp.PH, hyp.PW K = hyp.K BOX_SIZE = hyp.BOX_SIZE Z, Y, X = hyp.Z, hyp.Y, hyp.X Z2, Y2, X2 = int(Z / 2), int(Y / 2), int(X / 2) Z4, Y4, X4 = int(Z / 4), int(Y / 4), int(X / 4) D = 9 pix_T_cams = torch.from_numpy( d["pix_T_cams_raw"]).unsqueeze(0).cuda().to(torch.float) camRs_T_origin = torch.from_numpy( d["camR_T_origin_raw"]).unsqueeze(0).cuda().to(torch.float) origin_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_origin))) origin_T_camXs = torch.from_numpy( d["origin_T_camXs_raw"]).unsqueeze(0).cuda().to(torch.float) camX0_T_camXs = utils_geom.get_camM_T_camXs(origin_T_camXs, ind=0) camRs_T_camXs = __u( torch.matmul(utils_geom.safe_inverse(__p(origin_T_camRs)), __p(origin_T_camXs))) camXs_T_camRs = __u(utils_geom.safe_inverse(__p(camRs_T_camXs))) camX0_T_camRs = camXs_T_camRs[:, 0] camX1_T_camRs = camXs_T_camRs[:, 1] camR_T_camX0 = utils_geom.safe_inverse(camX0_T_camRs) xyz_camXs = torch.from_numpy( d["xyz_camXs_raw"]).unsqueeze(0).cuda().to(torch.float) xyz_camRs = __u( utils_geom.apply_4x4(__p(camRs_T_camXs), __p(xyz_camXs))) depth_camXs_, valid_camXs_ = utils_geom.create_depth_image( __p(pix_T_cams), __p(xyz_camXs), H, W) dense_xyz_camXs_ = utils_geom.depth2pointcloud( depth_camXs_, __p(pix_T_cams)) occXs = __u(utils_vox.voxelize_xyz(__p(xyz_camXs), Z, Y, X)) occRs_half = __u(utils_vox.voxelize_xyz(__p(xyz_camRs), Z2, Y2, X2)) occRs_half = torch.max(occRs_half, dim=1).values.squeeze(0) occ_complete = occRs_half.cpu().numpy() # st() if hyp.do_empty: item_names = [ 'pix_T_cams_raw', 'origin_T_camXs_raw', 'camR_T_origin_raw', 'rgb_camXs_raw', 'xyz_camXs_raw', 'empty_rgb_camXs_raw', 'empty_xyz_camXs_raw', ] else: item_names = [ 'pix_T_cams_raw', 'origin_T_camXs_raw', 'camR_T_origin_raw', 'rgb_camXs_raw', 'xyz_camXs_raw', ] # if hyp.do_time_flip: # d = random_time_flip_single(d,item_names) # if the sequence length > 2, select S frames # filename = d['raw_seq_filename'] original_filename = filename if hyp.dataset_name == "carla_mix" or hyp.dataset_name == "carla_det": bbox_origin_gt = d['bbox_origin'] if 'bbox_origin_predicted' in d: bbox_origin_predicted = d['bbox_origin_predicted'] else: bbox_origin_predicted = [] classes = d['obj_name'] if isinstance(classes, str): classes = [classes] # st() d['tree_seq_filename'] = "temp" if hyp.dataset_name == "replica": d['tree_seq_filename'] = "temp" object_category = d['object_category_names'] bbox_origin = d['bbox_origin'] if hyp.dataset_name == "clevr_vqa": d['tree_seq_filename'] = "temp" pix_T_cams = d['pix_T_cams_raw'] num_cams = pix_T_cams.shape[0] # padding_1 = torch.zeros([num_cams,1,3]) # padding_2 = torch.zeros([num_cams,4,1]) # padding_2[:,3] = 1.0 # st() # pix_T_cams = torch.cat([pix_T_cams,padding_1],dim=1) # pix_T_cams = torch.cat([pix_T_cams,padding_2],dim=2) # st() shape_name = d['shape_list'] color_name = d['color_list'] material_name = d['material_list'] all_name = [] all_style = [] for index in range(len(shape_name)): name = shape_name[index] + "/" + color_name[ index] + "_" + material_name[index] style_name = color_name[index] + "_" + material_name[index] all_name.append(name) all_style.append(style_name) # st() if hyp.do_shape: class_name = shape_name elif hyp.do_color: class_name = color_name elif hyp.do_material: class_name = material_name elif hyp.do_style: class_name = all_style else: class_name = all_name object_category = class_name bbox_origin = d['bbox_origin'] # st() if hyp.dataset_name == "carla": camR_index = d['camR_index'] rgb_camtop = d['rgb_camXs_raw'][camR_index:camR_index + 1] origin_T_camXs_top = d['origin_T_camXs_raw'][ camR_index:camR_index + 1] # predicted_box = d['bbox_origin_predicted'] predicted_box = [] filename = d['tree_seq_filename'] if hyp.do_2d_style_munit: d, indexes = non_random_select_single(d, item_names, num_samples=hyp.S) # st() if hyp.fixed_view: d, indexes = non_random_select_single(d, item_names, num_samples=hyp.S) elif self.shuffle or hyp.randomly_select_views: d, indexes = random_select_single(d, item_names, num_samples=hyp.S) else: d, indexes = non_random_select_single(d, item_names, num_samples=hyp.S) filename_g = "/".join([original_filename, str(indexes[0])]) filename_e = "/".join([original_filename, str(indexes[1])]) rgb_camXs = d['rgb_camXs_raw'] # move channel dim inward, like pytorch wants # rgb_camRs = np.transpose(rgb_camRs, axes=[0, 3, 1, 2]) rgb_camXs = np.transpose(rgb_camXs, axes=[0, 3, 1, 2]) rgb_camXs = rgb_camXs[:, :3] rgb_camXs = utils_improc.preprocess_color(rgb_camXs) if hyp.dataset_name == "carla": rgb_camtop = np.transpose(rgb_camtop, axes=[0, 3, 1, 2]) rgb_camtop = rgb_camtop[:, :3] rgb_camtop = utils_improc.preprocess_color(rgb_camtop) d['rgb_camtop'] = rgb_camtop d['origin_T_camXs_top'] = origin_T_camXs_top if len(predicted_box) == 0: predicted_box = np.zeros([hyp.N, 6]) score = np.zeros([hyp.N]).astype(np.float32) else: num_boxes = predicted_box.shape[0] score = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) predicted_box = np.pad(predicted_box, [[0, hyp.N - num_boxes], [0, 0]]) d['predicted_box'] = predicted_box.astype(np.float32) d['predicted_scores'] = score.astype(np.float32) if hyp.dataset_name == "clevr_vqa": num_boxes = bbox_origin.shape[0] bbox_origin = np.array(bbox_origin) score = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin = np.pad(bbox_origin, [[0, hyp.N - num_boxes], [0, 0], [0, 0]]) object_category = np.pad(object_category, [[0, hyp.N - num_boxes]], lambda x, y, z, m: "0") d['gt_box'] = bbox_origin.astype(np.float32) d['gt_scores'] = score.astype(np.float32) d['classes'] = list(object_category) if hyp.dataset_name == "replica": if len(bbox_origin) == 0: score = np.zeros([hyp.N]) bbox_origin = np.zeros([hyp.N, 6]) object_category = ["0"] * hyp.N object_category = np.array(object_category) else: num_boxes = len(bbox_origin) bbox_origin = torch.stack(bbox_origin).numpy().squeeze( 1).squeeze(1).reshape([num_boxes, 6]) bbox_origin = np.array(bbox_origin) score = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin = np.pad(bbox_origin, [[0, hyp.N - num_boxes], [0, 0]]) object_category = np.pad(object_category, [[0, hyp.N - num_boxes]], lambda x, y, z, m: "0") d['gt_box'] = bbox_origin.astype(np.float32) d['gt_scores'] = score.astype(np.float32) d['classes'] = list(object_category) # st() if hyp.dataset_name == "carla_mix" or hyp.dataset_name == "carla_det": bbox_origin_predicted = bbox_origin_predicted[:3] if len(bbox_origin_gt.shape) == 1: bbox_origin_gt = np.expand_dims(bbox_origin_gt, 0) num_boxes = bbox_origin_gt.shape[0] # st() score_gt = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin_gt = np.pad(bbox_origin_gt, [[0, hyp.N - num_boxes], [0, 0]]) # st() classes = np.pad(classes, [[0, hyp.N - num_boxes]], lambda x, y, z, m: "0") if len(bbox_origin_predicted) == 0: bbox_origin_predicted = np.zeros([hyp.N, 6]) score_pred = np.zeros([hyp.N]).astype(np.float32) else: num_boxes = bbox_origin_predicted.shape[0] score_pred = np.pad(np.ones([num_boxes]), [0, hyp.N - num_boxes]) bbox_origin_predicted = np.pad( bbox_origin_predicted, [[0, hyp.N - num_boxes], [0, 0]]) d['predicted_box'] = bbox_origin_predicted.astype(np.float32) d['predicted_scores'] = score_pred.astype(np.float32) d['gt_box'] = bbox_origin_gt.astype(np.float32) d['gt_scores'] = score_gt.astype(np.float32) d['classes'] = list(classes) d['rgb_camXs_raw'] = rgb_camXs if hyp.dataset_name != "carla" and hyp.do_empty: empty_rgb_camXs = d['empty_rgb_camXs_raw'] # move channel dim inward, like pytorch wants empty_rgb_camXs = np.transpose(empty_rgb_camXs, axes=[0, 3, 1, 2]) empty_rgb_camXs = empty_rgb_camXs[:, :3] empty_rgb_camXs = utils_improc.preprocess_color(empty_rgb_camXs) d['empty_rgb_camXs_raw'] = empty_rgb_camXs # st() if hyp.use_gt_occs: d['occR_complete'] = occ_complete d['tree_seq_filename'] = filename d['filename_e'] = filename_e d['filename_g'] = filename_g return d
def prepare_common_tensors(self, feed, prep_summ=True): results = dict() if prep_summ: self.summ_writer = utils_improc.Summ_writer( writer=feed['writer'], global_step=feed['global_step'], log_freq=feed['set_log_freq'], fps=8, just_gif=feed['just_gif'], ) else: self.summ_writer = None self.include_vis = hyp.do_include_vis self.B = feed["set_batch_size"] self.S = feed["set_seqlen"] __p = lambda x: utils_basic.pack_seqdim(x, self.B) __u = lambda x: utils_basic.unpack_seqdim(x, self.B) self.H, self.W, self.V, self.N = hyp.H, hyp.W, hyp.V, hyp.N self.PH, self.PW = hyp.PH, hyp.PW self.K = hyp.K self.set_name = feed['set_name'] # print('set_name', self.set_name) if self.set_name == 'test': self.Z, self.Y, self.X = hyp.Z_test, hyp.Y_test, hyp.X_test else: self.Z, self.Y, self.X = hyp.Z, hyp.Y, hyp.X # print('Z, Y, X = %d, %d, %d' % (self.Z, self.Y, self.X)) self.Z2, self.Y2, self.X2 = int(self.Z / 2), int(self.Y / 2), int( self.X / 2) self.Z4, self.Y4, self.X4 = int(self.Z / 4), int(self.Y / 4), int( self.X / 4) self.rgb_camXs = feed["rgb_camXs"] self.pix_T_cams = feed["pix_T_cams"] self.origin_T_camXs = feed["origin_T_camXs"] self.cams_T_velos = feed["cams_T_velos"] self.camX0s_T_camXs = utils_geom.get_camM_T_camXs(self.origin_T_camXs, ind=0) self.camXs_T_camX0s = __u( utils_geom.safe_inverse(__p(self.camX0s_T_camXs))) self.xyz_veloXs = feed["xyz_veloXs"] self.xyz_camXs = __u( utils_geom.apply_4x4(__p(self.cams_T_velos), __p(self.xyz_veloXs))) self.xyz_camX0s = __u( utils_geom.apply_4x4(__p(self.camX0s_T_camXs), __p(self.xyz_camXs))) if self.set_name == 'test': self.boxlist_camXs = feed["boxlists"] self.scorelist_s = feed["scorelists"] self.tidlist_s = feed["tidlists"] boxlist_camXs_ = __p(self.boxlist_camXs) scorelist_s_ = __p(self.scorelist_s) tidlist_s_ = __p(self.tidlist_s) boxlist_camXs_, tidlist_s_, scorelist_s_ = utils_misc.shuffle_valid_and_sink_invalid_boxes( boxlist_camXs_, tidlist_s_, scorelist_s_) self.boxlist_camXs = __u(boxlist_camXs_) self.scorelist_s = __u(scorelist_s_) self.tidlist_s = __u(tidlist_s_) # self.boxlist_camXs[:,0], self.scorelist_s[:,0], self.tidlist_s[:,0] = utils_misc.shuffle_valid_and_sink_invalid_boxes( # self.boxlist_camXs[:,0], self.tidlist_s[:,0], self.scorelist_s[:,0]) # self.score_s = feed["scorelists"] # self.tid_s = torch.ones_like(self.score_s).long() # self.lrt_camRs = utils_geom.convert_boxlist_to_lrtlist(self.box_camRs) # self.lrt_camXs = utils_geom.apply_4x4s_to_lrts(self.camXs_T_camRs, self.lrt_camRs) # self.lrt_camX0s = utils_geom.apply_4x4s_to_lrts(self.camX0s_T_camXs, self.lrt_camXs) # self.lrt_camR0s = utils_geom.apply_4x4s_to_lrts(self.camR0s_T_camRs, self.lrt_camRs) # boxlist_camXs_ = __p(self.boxlist_camXs) # boxlist_camXs_ = __p(self.boxlist_camXs) # lrtlist_camXs = __u(utils_geom.convert_boxlist_to_lrtlist(__p(self.boxlist_camXs))).reshape( # self.B, self.S, self.N, 19) self.lrtlist_camXs = __u( utils_geom.convert_boxlist_to_lrtlist(__p(self.boxlist_camXs))) # print('lrtlist_camXs', lrtlist_camXs.shape) # # self.B, self.S, self.N, 19) # # lrtlist_camXs = __u(utils_geom.apply_4x4_to_lrtlist(__p(camXs_T_camRs), __p(lrtlist_camRs))) # self.summ_writer.summ_lrtlist('2D_inputs/lrtlist_camX0', self.rgb_camXs[:,0], lrtlist_camXs[:,0], # self.scorelist_s[:,0], self.tidlist_s[:,0], self.pix_T_cams[:,0]) # self.summ_writer.summ_lrtlist('2D_inputs/lrtlist_camX1', self.rgb_camXs[:,1], lrtlist_camXs[:,1], # self.scorelist_s[:,1], self.tidlist_s[:,1], self.pix_T_cams[:,1]) ( self.lrt_camXs, self.box_camXs, self.score_s, ) = utils_misc.collect_object_info(self.lrtlist_camXs, self.boxlist_camXs, self.tidlist_s, self.scorelist_s, 1, mod='X', do_vis=False, summ_writer=None) self.lrt_camXs = self.lrt_camXs.squeeze(0) self.score_s = self.score_s.squeeze(0) self.tid_s = torch.ones_like(self.score_s).long() self.lrt_camX0s = utils_geom.apply_4x4s_to_lrts( self.camX0s_T_camXs, self.lrt_camXs) if prep_summ and self.include_vis: visX_g = [] for s in list(range(self.S)): visX_g.append( self.summ_writer.summ_lrtlist('', self.rgb_camXs[:, s], self.lrtlist_camXs[:, s], self.scorelist_s[:, s], self.tidlist_s[:, s], self.pix_T_cams[:, 0], only_return=True)) self.summ_writer.summ_rgbs('2D_inputs/box_camXs', visX_g) # visX_g = [] # for s in list(range(self.S)): # visX_g.append(self.summ_writer.summ_lrtlist( # 'track/box_camX%d_g' % s, self.rgb_camXs[:,s], self.lrt_camXs[:,s:s+1], # self.score_s[:,s:s+1], self.tid_s[:,s:s+1], self.pix_T_cams[:,0], only_return=True)) # self.summ_writer.summ_rgbs('track/box_camXs_g', visX_g) if self.set_name == 'test': # center on an object, so that it does not fall out of bounds self.scene_centroid = utils_geom.get_clist_from_lrtlist( self.lrt_camXs)[:, 0] self.vox_util = vox_util.Vox_util( self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) else: # center randomly scene_centroid_x = np.random.uniform(-8.0, 8.0) scene_centroid_y = np.random.uniform(-1.5, 3.0) scene_centroid_z = np.random.uniform(10.0, 26.0) scene_centroid = np.array( [scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.scene_centroid = torch.from_numpy( scene_centroid).float().cuda() # center on a random non-outlier point all_ok = False num_tries = 0 while not all_ok: scene_centroid_x = np.random.uniform(-8.0, 8.0) scene_centroid_y = np.random.uniform(-1.5, 3.0) scene_centroid_z = np.random.uniform(10.0, 26.0) scene_centroid = np.array( [scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.scene_centroid = torch.from_numpy( scene_centroid).float().cuda() num_tries += 1 # try to vox self.vox_util = vox_util.Vox_util( self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) all_ok = True # we want to ensure this gives us a few points inbound for each batch el inb = __u( self.vox_util.get_inbounds(__p(self.xyz_camX0s), self.Z4, self.Y4, self.X, already_mem=False)) num_inb = torch.sum(inb.float(), axis=2) if torch.min(num_inb) < 100: all_ok = False if num_tries > 100: return False self.summ_writer.summ_scalar('zoom_sampling/num_tries', num_tries) self.summ_writer.summ_scalar('zoom_sampling/num_inb', torch.mean(num_inb).cpu().item()) self.occ_memXs = __u( self.vox_util.voxelize_xyz(__p(self.xyz_camXs), self.Z, self.Y, self.X)) self.occ_memX0s = __u( self.vox_util.voxelize_xyz(__p(self.xyz_camX0s), self.Z, self.Y, self.X)) self.occ_memX0s_half = __u( self.vox_util.voxelize_xyz(__p(self.xyz_camX0s), self.Z2, self.Y2, self.X2)) self.unp_memXs = __u( self.vox_util.unproject_rgb_to_mem(__p(self.rgb_camXs), self.Z, self.Y, self.X, __p(self.pix_T_cams))) self.unp_memX0s = self.vox_util.apply_4x4s_to_voxs( self.camX0s_T_camXs, self.unp_memXs) if prep_summ and self.include_vis: self.summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(self.rgb_camXs, dim=1)) self.summ_writer.summ_occs('3D_inputs/occ_memXs', torch.unbind(self.occ_memXs, dim=1)) self.summ_writer.summ_occs('3D_inputs/occ_memX0s', torch.unbind(self.occ_memX0s, dim=1)) self.summ_writer.summ_rgb('2D_inputs/rgb_camX0', self.rgb_camXs[:, 0]) # self.summ_writer.summ_oned('2D_inputs/depth_camX0', self.depth_camXs[:,0], maxval=20.0) # self.summ_writer.summ_oned('2D_inputs/valid_camX0', self.valid_camXs[:,0], norm=False) return True
def forward(self, feat, summ_writer, mask=None,prefix=""): total_loss = torch.tensor(0.0).cuda() B, C, D, H, W = list(feat.shape) if not hyp.onlyocc: summ_writer.summ_feat(f'feat/{prefix}feat0_input', feat) if hyp.feat_do_rt: # apply a random rt to the feat # Y_T_X = utils_geom.get_random_rt(B, r_amount=5.0, t_amount=8.0).cuda() # Y_T_X = utils_geom.get_random_rt(B, r_amount=1.0, t_amount=8.0).cuda() Y_T_X = utils_geom.get_random_rt(B, r_amount=1.0, t_amount=4.0).cuda() feat = utils_vox.apply_4x4_to_vox(Y_T_X, feat) summ_writer.summ_feat(f'feat/{prefix}feat1_rt', feat) if hyp.feat_do_flip: # randomly flip the input flip0 = torch.rand(1) flip1 = torch.rand(1) flip2 = torch.rand(1) if flip0 > 0.5: # transpose width/depth (rotate 90deg) feat = feat.permute(0,1,4,3,2) if flip1 > 0.5: # flip depth feat = feat.flip(2) if flip2 > 0.5: # flip width feat = feat.flip(4) summ_writer.summ_feat(f'feat/{prefix}feat2_flip', feat) if hyp.feat_do_sb: feat = self.net(feat, mask) elif hyp.feat_do_sparse_invar: feat, mask = self.net(feat, mask) else: if hyp.feat_quantize: feat,feat_uq,loss,encodings,perplexity = self.net(feat) total_loss = utils_misc.add_loss('feat_loss',total_loss, loss,hyp.feat_coeff,summ_writer) summ_writer.summ_scalar('feat/perplexity',perplexity) summ_writer.summ_histogram('feat/encodings',encodings) ## Visualizing encodings will make training very slow. ## Use this only for debugging. # feat_uq = feat_uq[:1] # encodings = encodings[:1] # B,C,D2,H2,W2 = feat_uq.shape # feat_uq = feat_uq.permute(0,2,3,4,1) # [B,D,H,W,C] # feat_uq = feat_uq.reshape(B*D2*H2*W2,C) # encodings = encodings.flatten() # [B*D2*H2*W2] # summ_writer.summ_embeddings('feat/emb_before_vqvae',feat_uq,encodings) del feat_uq,encodings,perplexity # Cleanup. else: feat = self.net(feat) feat = l2_normalize(feat, dim=1) summ_writer.summ_feat(f'feat/{prefix}feat3_out', feat) if hyp.feat_do_flip: if flip2 > 0.5: # unflip width feat = feat.flip(4) if flip1 > 0.5: # unflip depth feat = feat.flip(2) if flip0 > 0.5: # untranspose width/depth feat = feat.permute(0,1,4,3,2) summ_writer.summ_feat(f'feat/{prefix}feat4_unflip', feat) if hyp.feat_do_rt: # undo the random rt X_T_Y = utils_geom.safe_inverse(Y_T_X) feat = utils_vox.apply_4x4_to_vox(X_T_Y, feat) summ_writer.summ_feat(f'feat/{prefix}feat5_unrt', feat) # valid_mask = 1.0 - (feat==0).all(dim=1, keepdim=True).float() # if hyp.feat_do_sparse_invar: # valid_mask = valid_mask * mask return feat, total_loss
def run_test(self, feed): results = dict() global_step = feed['global_step'] total_loss = torch.tensor(0.0).cuda() __p = lambda x: utils_basic.pack_seqdim(x, self.B) __u = lambda x: utils_basic.unpack_seqdim(x, self.B) self.obj_clist_camX0 = utils_geom.get_clist_from_lrtlist( self.lrt_camX0s) self.original_centroid = self.scene_centroid.clone() obj_lengths, cams_T_obj0 = utils_geom.split_lrtlist(self.lrt_camX0s) obj_length = obj_lengths[:, 0] for b in list(range(self.B)): if self.score_s[b, 0] < 1.0: # we need the template to exist print('returning early, since score_s[%d,0] = %.1f' % (b, self.score_s[b, 0].cpu().numpy())) return total_loss, results, True # if torch.sum(self.score_s[b]) < (self.S/2): if not (torch.sum(self.score_s[b]) == self.S): # the full traj should be valid print( 'returning early, since sum(score_s) = %d, while S = %d' % (torch.sum(self.score_s).cpu().numpy(), self.S)) return total_loss, results, True if hyp.do_feat3D: feat_memX0_input = torch.cat([ self.occ_memX0s[:, 0], self.unp_memX0s[:, 0] * self.occ_memX0s[:, 0], ], dim=1) _, feat_memX0, valid_memX0 = self.featnet3D(feat_memX0_input) B, C, Z, Y, X = list(feat_memX0.shape) S = self.S obj_mask_memX0s = self.vox_util.assemble_padded_obj_masklist( self.lrt_camX0s, self.score_s, Z, Y, X).squeeze(1) # only take the occupied voxels occ_memX0 = self.vox_util.voxelize_xyz(self.xyz_camX0s[:, 0], Z, Y, X) # obj_mask_memX0 = obj_mask_memX0s[:,0] * occ_memX0 obj_mask_memX0 = obj_mask_memX0s[:, 0] # discard the known freespace _, free_memX0_, _, _ = self.vox_util.prep_occs_supervision( self.camX0s_T_camXs[:, 0:1], self.xyz_camXs[:, 0:1], Z, Y, X, agg=True) free_memX0 = free_memX0_.squeeze(1) obj_mask_memX0 = obj_mask_memX0 * (1.0 - free_memX0) for b in list(range(self.B)): if torch.sum(obj_mask_memX0[b] * occ_memX0[b]) <= 8: print( 'returning early, since there are not enough valid object points' ) return total_loss, results, True # for b in list(range(self.B)): # sum_b = torch.sum(obj_mask_memX0[b]) # print('sum_b', sum_b.detach().cpu().numpy()) # if sum_b > 1000: # obj_mask_memX0[b] *= occ_memX0[b] # sum_b = torch.sum(obj_mask_memX0[b]) # print('reducing this to', sum_b.detach().cpu().numpy()) feat0_vec = feat_memX0.view(B, hyp.feat3D_dim, -1) # this is B x C x huge feat0_vec = feat0_vec.permute(0, 2, 1) # this is B x huge x C obj_mask0_vec = obj_mask_memX0.reshape(B, -1).round() occ_mask0_vec = occ_memX0.reshape(B, -1).round() free_mask0_vec = free_memX0.reshape(B, -1).round() # these are B x huge orig_xyz = utils_basic.gridcloud3D(B, Z, Y, X) # this is B x huge x 3 obj_lengths, cams_T_obj0 = utils_geom.split_lrtlist( self.lrt_camX0s) obj_length = obj_lengths[:, 0] cam0_T_obj = cams_T_obj0[:, 0] # this is B x S x 4 x 4 mem_T_cam = self.vox_util.get_mem_T_ref(B, Z, Y, X) cam_T_mem = self.vox_util.get_ref_T_mem(B, Z, Y, X) lrt_camIs_g = self.lrt_camX0s.clone() lrt_camIs_e = torch.zeros_like(self.lrt_camX0s) # we will fill this up ious = torch.zeros([B, S]).float().cuda() point_counts = np.zeros([B, S]) inb_counts = np.zeros([B, S]) feat_vis = [] occ_vis = [] for s in range(self.S): if not (s == 0): # remake the vox util and all the mem data self.scene_centroid = utils_geom.get_clist_from_lrtlist( lrt_camIs_e[:, s - 1:s])[:, 0] delta = self.scene_centroid - self.original_centroid self.vox_util = vox_util.Vox_util( self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) self.occ_memXs = __u( self.vox_util.voxelize_xyz(__p(self.xyz_camXs), self.Z, self.Y, self.X)) self.occ_memX0s = __u( self.vox_util.voxelize_xyz(__p(self.xyz_camX0s), self.Z, self.Y, self.X)) self.unp_memXs = __u( self.vox_util.unproject_rgb_to_mem( __p(self.rgb_camXs), self.Z, self.Y, self.X, __p(self.pix_T_cams))) self.unp_memX0s = self.vox_util.apply_4x4s_to_voxs( self.camX0s_T_camXs, self.unp_memXs) self.summ_writer.summ_occ('track/reloc_occ_%d' % s, self.occ_memX0s[:, s]) else: self.summ_writer.summ_occ('track/init_occ_%d' % s, self.occ_memX0s[:, s]) delta = torch.zeros([B, 3]).float().cuda() # print('scene centroid:', self.scene_centroid.detach().cpu().numpy()) occ_vis.append( self.summ_writer.summ_occ('', self.occ_memX0s[:, s], only_return=True)) # inb = __u(self.vox_util.get_inbounds(__p(self.xyz_camX0s), self.Z4, self.Y4, self.X, already_mem=False)) inb = self.vox_util.get_inbounds(self.xyz_camX0s[:, s], self.Z4, self.Y4, self.X, already_mem=False) num_inb = torch.sum(inb.float(), axis=1) # print('num_inb', num_inb, num_inb.shape) inb_counts[:, s] = num_inb.cpu().numpy() feat_memI_input = torch.cat([ self.occ_memX0s[:, s], self.unp_memX0s[:, s] * self.occ_memX0s[:, s], ], dim=1) _, feat_memI, valid_memI = self.featnet3D(feat_memI_input) self.summ_writer.summ_feat('3D_feats/feat_%d_input' % s, feat_memI_input, pca=True) self.summ_writer.summ_feat('3D_feats/feat_%d' % s, feat_memI, pca=True) feat_vis.append( self.summ_writer.summ_feat('', feat_memI, pca=True, only_return=True)) # collect freespace here, to discard bad matches _, free_memI_, _, _ = self.vox_util.prep_occs_supervision( self.camX0s_T_camXs[:, s:s + 1], self.xyz_camXs[:, s:s + 1], Z, Y, X, agg=True) free_memI = free_memI_.squeeze(1) feat_vec = feat_memI.view(B, hyp.feat3D_dim, -1) # this is B x C x huge feat_vec = feat_vec.permute(0, 2, 1) # this is B x huge x C memI_T_mem0 = utils_geom.eye_4x4(B) # we will fill this up # # put these on cpu, to save mem # feat0_vec = feat0_vec.detach().cpu() # feat_vec = feat_vec.detach().cpu() # to simplify the impl, we will iterate over the batch dim for b in list(range(B)): feat_vec_b = feat_vec[b] feat0_vec_b = feat0_vec[b] obj_mask0_vec_b = obj_mask0_vec[b] occ_mask0_vec_b = occ_mask0_vec[b] free_mask0_vec_b = free_mask0_vec[b] orig_xyz_b = orig_xyz[b] # these are huge x C careful = False if careful: # start with occ points, since these are definitely observed obj_inds_b = torch.where( (occ_mask0_vec_b * obj_mask0_vec_b) > 0) obj_vec_b = feat0_vec_b[obj_inds_b] xyz0 = orig_xyz_b[obj_inds_b] # these are med x C # also take random non-free non-occ points in the mask ok_mask = obj_mask0_vec_b * (1.0 - occ_mask0_vec_b) * ( 1.0 - free_mask0_vec_b) alt_inds_b = torch.where(ok_mask > 0) alt_vec_b = feat0_vec_b[alt_inds_b] alt_xyz0 = orig_xyz_b[alt_inds_b] # these are med x C # issues arise when "med" is too large num = len(alt_xyz0) max_pts = 2000 if num > max_pts: # print('have %d pts; taking a random set of %d pts inside' % (num, max_pts)) perm = np.random.permutation(num) alt_vec_b = alt_vec_b[perm[:max_pts]] alt_xyz0 = alt_xyz0[perm[:max_pts]] obj_vec_b = torch.cat([obj_vec_b, alt_vec_b], dim=0) xyz0 = torch.cat([xyz0, alt_xyz0], dim=0) if s == 0: print('have %d pts in total' % (len(xyz0))) else: # take any points within the mask obj_inds_b = torch.where(obj_mask0_vec_b > 0) obj_vec_b = feat0_vec_b[obj_inds_b] xyz0 = orig_xyz_b[obj_inds_b] # these are med x C # issues arise when "med" is too large # trim down to max_pts num = len(xyz0) max_pts = 2000 if num > max_pts: print( 'have %d pts; taking a random set of %d pts inside' % (num, max_pts)) perm = np.random.permutation(num) obj_vec_b = obj_vec_b[perm[:max_pts]] xyz0 = xyz0[perm[:max_pts]] obj_vec_b = obj_vec_b.permute(1, 0) # this is is C x med corr_b = torch.matmul(feat_vec_b, obj_vec_b) # this is huge x med heat_b = corr_b.permute(1, 0).reshape(-1, 1, Z, Y, X) # this is med x 1 x Z4 x Y4 x X4 # # for numerical stability, we sub the max, and mult by the resolution # heat_b_ = heat_b.reshape(-1, Z*Y*X) # heat_b_max = (torch.max(heat_b_, dim=1).values).reshape(-1, 1, 1, 1, 1) # heat_b = heat_b - heat_b_max # heat_b = heat_b * float(len(heat_b[0].reshape(-1))) # # for numerical stability, we sub the max, and mult by the resolution # heat_b_ = heat_b.reshape(-1, Z*Y*X) # heat_b_max = (torch.max(heat_b_, dim=1).values).reshape(-1, 1, 1, 1, 1) # heat_b = heat_b - heat_b_max # heat_b = heat_b * float(len(heat_b[0].reshape(-1))) # heat_b_ = heat_b.reshape(-1, Z*Y*X) # # heat_b_min = (torch.min(heat_b_, dim=1).values).reshape(-1, 1, 1, 1, 1) # heat_b_min = (torch.min(heat_b_).values) # free_b = free_memI[b:b+1] # print('free_b', free_b.shape) # print('heat_b', heat_b.shape) # heat_b[free_b > 0.0] = heat_b_min # make the min zero heat_b_ = heat_b.reshape(-1, Z * Y * X) heat_b_min = (torch.min(heat_b_, dim=1).values).reshape( -1, 1, 1, 1, 1) heat_b = heat_b - heat_b_min # zero out the freespace heat_b = heat_b * (1.0 - free_memI[b:b + 1]) # make the max zero heat_b_ = heat_b.reshape(-1, Z * Y * X) heat_b_max = (torch.max(heat_b_, dim=1).values).reshape( -1, 1, 1, 1, 1) heat_b = heat_b - heat_b_max # scale up, for numerical stability heat_b = heat_b * float(len(heat_b[0].reshape(-1))) xyzI = utils_basic.argmax3D(heat_b, hard=False, stack=True) # xyzI = utils_basic.argmax3D(heat_b*float(Z*10), hard=False, stack=True) # this is med x 3 xyzI_cam = self.vox_util.Mem2Ref(xyzI.unsqueeze(1), Z, Y, X) xyzI_cam += delta xyzI = self.vox_util.Ref2Mem(xyzI_cam, Z, Y, X).squeeze(1) memI_T_mem0[b] = utils_track.rigid_transform_3D(xyz0, xyzI) # record #points, since ransac depends on this point_counts[b, s] = len(xyz0) # done stepping through batch mem0_T_memI = utils_geom.safe_inverse(memI_T_mem0) cam0_T_camI = utils_basic.matmul3(cam_T_mem, mem0_T_memI, mem_T_cam) # eval camI_T_obj = utils_basic.matmul4(cam_T_mem, memI_T_mem0, mem_T_cam, cam0_T_obj) # this is B x 4 x 4 lrt_camIs_e[:, s] = utils_geom.merge_lrt(obj_length, camI_T_obj) ious[:, s] = utils_geom.get_iou_from_corresponded_lrtlists( lrt_camIs_e[:, s:s + 1], lrt_camIs_g[:, s:s + 1]).squeeze(1) results['ious'] = ious # if ious[0,-1] > 0.5: # print('returning early, since acc is too high') # return total_loss, results, True self.summ_writer.summ_rgbs('track/feats', feat_vis) self.summ_writer.summ_oneds('track/occs', occ_vis, norm=False) for s in range(self.S): self.summ_writer.summ_scalar( 'track/mean_iou_%02d' % s, torch.mean(ious[:, s]).cpu().item()) self.summ_writer.summ_scalar('track/mean_iou', torch.mean(ious).cpu().item()) self.summ_writer.summ_scalar('track/point_counts', np.mean(point_counts)) # self.summ_writer.summ_scalar('track/inb_counts', torch.mean(inb_counts).cpu().item()) self.summ_writer.summ_scalar('track/inb_counts', np.mean(inb_counts)) lrt_camX0s_e = lrt_camIs_e.clone() lrt_camXs_e = utils_geom.apply_4x4s_to_lrts( self.camXs_T_camX0s, lrt_camX0s_e) if self.include_vis: visX_e = [] for s in list(range(self.S)): visX_e.append( self.summ_writer.summ_lrtlist('track/box_camX%d_e' % s, self.rgb_camXs[:, s], lrt_camXs_e[:, s:s + 1], self.score_s[:, s:s + 1], self.tid_s[:, s:s + 1], self.pix_T_cams[:, 0], only_return=True)) self.summ_writer.summ_rgbs('track/box_camXs_e', visX_e) visX_g = [] for s in list(range(self.S)): visX_g.append( self.summ_writer.summ_lrtlist('track/box_camX%d_g' % s, self.rgb_camXs[:, s], self.lrt_camXs[:, s:s + 1], self.score_s[:, s:s + 1], self.tid_s[:, s:s + 1], self.pix_T_cams[:, 0], only_return=True)) self.summ_writer.summ_rgbs('track/box_camXs_g', visX_g) obj_clist_camX0_e = utils_geom.get_clist_from_lrtlist(lrt_camX0s_e) dists = torch.norm(obj_clist_camX0_e - self.obj_clist_camX0, dim=2) # this is B x S mean_dist = utils_basic.reduce_masked_mean(dists, self.score_s) median_dist = utils_basic.reduce_masked_median(dists, self.score_s) # this is [] self.summ_writer.summ_scalar('track/centroid_dist_mean', mean_dist.cpu().item()) self.summ_writer.summ_scalar('track/centroid_dist_median', median_dist.cpu().item()) # if self.include_vis: if (True): self.summ_writer.summ_traj_on_occ('track/traj_e', obj_clist_camX0_e, self.occ_memX0s[:, 0], self.vox_util, already_mem=False, sigma=2) self.summ_writer.summ_traj_on_occ('track/traj_g', self.obj_clist_camX0, self.occ_memX0s[:, 0], self.vox_util, already_mem=False, sigma=2) total_loss += mean_dist # we won't backprop, but it's nice to plot and print this anyway else: ious = torch.zeros([self.B, self.S]).float().cuda() for s in list(range(self.S)): ious[:, s] = utils_geom.get_iou_from_corresponded_lrtlists( self.lrt_camX0s[:, 0:1], self.lrt_camX0s[:, s:s + 1]).squeeze(1) results['ious'] = ious for s in range(self.S): self.summ_writer.summ_scalar( 'track/mean_iou_%02d' % s, torch.mean(ious[:, s]).cpu().item()) self.summ_writer.summ_scalar('track/mean_iou', torch.mean(ious).cpu().item()) lrt_camX0s_e = self.lrt_camX0s[:, 0:1].repeat(1, self.S, 1) obj_clist_camX0_e = utils_geom.get_clist_from_lrtlist(lrt_camX0s_e) self.summ_writer.summ_traj_on_occ('track/traj_e', obj_clist_camX0_e, self.occ_memX0s[:, 0], self.vox_util, already_mem=False, sigma=2) self.summ_writer.summ_traj_on_occ('track/traj_g', self.obj_clist_camX0, self.occ_memX0s[:, 0], self.vox_util, already_mem=False, sigma=2) self.summ_writer.summ_scalar('loss', total_loss.cpu().item()) return total_loss, results, False