def test_compute_mas_weights_with_oldomegas(self): network = build_network_class4().cuda() dataloader = build_dataloader() num_of_datasamples = len(dataloader) num_of_anchorsamples = 15 anchor_sample_strategy = "all" reg_coef = 0.1 oldtask_omega_paths = ["unit_tests/data/test_model-mas-omega-class3-npy.pkl"] oldtask_omega_weights = [0.5] newtask_omega_weight = 0.9 est_mas_dict = network.compute_mas_weights( dataloader, num_of_datasamples, num_of_anchorsamples, anchor_sample_strategy, reg_coef, oldtask_omega_paths, oldtask_omega_weights, newtask_omega_weight) for name, param in est_mas_dict["omega"].items(): self.assertTrue(torch.all(est_mas_dict["new_omega"][name] == est_mas_dict["new_clsterm"][name] + est_mas_dict["new_regterm"][name] * reg_coef)) from incdet3.models.ewc_func import parse_numclasses_numanchorperloc, expand_old_weights gt_omega = read_pkl("unit_tests/data/test_model-mas-omega-class3-npy.pkl") num_new_classes, num_new_anchor_per_loc = parse_numclasses_numanchorperloc(est_mas_dict["new_omega"]) num_old_classes, num_old_anchor_per_loc = parse_numclasses_numanchorperloc(gt_omega) for name, param in est_mas_dict["omega"].items(): oldparam = expand_old_weights(name, torch.from_numpy(gt_omega[name]).cuda(), num_new_classes, num_new_anchor_per_loc, num_old_classes, num_old_anchor_per_loc) self.assertTrue(torch.allclose(param, oldtask_omega_weights[0] * oldparam + newtask_omega_weight * est_mas_dict["new_omega"][name]))
def test_compute_mas_weights(self): network = build_network().cuda() dataloader = build_dataloader() num_of_datasamples = len(dataloader) num_of_anchorsamples = 15 anchor_sample_strategy = "all" reg_coef = 0.1 oldtask_omega_paths = [] oldtask_omega_weights = [] newtask_omega_weight = 1.0 est_mas_dict = network.compute_mas_weights( dataloader, num_of_datasamples, num_of_anchorsamples, anchor_sample_strategy, reg_coef, oldtask_omega_paths, oldtask_omega_weights, newtask_omega_weight) for name, param in est_mas_dict["omega"].items(): self.assertTrue(torch.all(param == est_mas_dict["new_omega"][name])) self.assertTrue(torch.all(param == est_mas_dict["new_clsterm"][name] + est_mas_dict["new_regterm"][name] * reg_coef)) # write_pkl({name: param.cpu().numpy() for name, param in est_mas_dict["omega"].items()}, "unit_tests/data/test_model-mas-omega-class2-npy.pkl") gt_omega = read_pkl("unit_tests/data/test_model-mas-omega.pkl") for name, param in est_mas_dict["omega"].items(): self.assertTrue(torch.allclose(param, gt_omega[name]))
def test_compute_FIM_clsregterm_v1(self): from det3.ops import read_pkl from incdet3.models.ewc_func import _compute_FIM_clsregterm_v1 accum_grad_cls = read_pkl( "unit_tests/data/test_model-ewcv1_accum_grad_dict.pkl")["cls_grad"] accum_grad_reg = read_pkl( "unit_tests/data/test_model-ewcv1_accum_grad_dict.pkl")["reg_grad"] clsregterm_v1 = _compute_FIM_clsregterm_v1(accum_grad_cls, accum_grad_reg) for name, param in clsregterm_v1.items(): self.assertTrue( torch.all(param == accum_grad_cls[name] * accum_grad_reg[name])) regclsterm_v1 = _compute_FIM_clsregterm_v1(accum_grad_reg, accum_grad_cls) for name, param in regclsterm_v1.items(): self.assertTrue(torch.all(param == clsregterm_v1[name]))
def test_pkl_io(self): from det3.ops import write_pkl, read_pkl for dtype in [np.float32, np.float16]: data = np.random.randn(3 * 100).reshape(100, -1).astype(dtype) path = "./unit-test/result/test_pcd_io.pkl" write_pkl(data, path) est = read_pkl(path).reshape(100, -1) self.assertTrue(np.array_equal(data, est))
def test_update_ewc_weights_v1(self): from det3.ops import read_pkl from incdet3.models.ewc_func import (_compute_FIM_clsregterm_v1, _compute_FIM_cls2term_v1, _compute_FIM_reg2term_v1, _init_ewc_weights, _update_ewc_weights_v1) reg2_coef = 0.1 clsreg_coef = 0.2 accum_grad_cls = read_pkl( "unit_tests/data/test_model-ewcv1_accum_grad_dict.pkl")["cls_grad"] accum_grad_reg = read_pkl( "unit_tests/data/test_model-ewcv1_accum_grad_dict.pkl")["reg_grad"] cls2term_v1 = _compute_FIM_cls2term_v1(accum_grad_cls) reg2term_v1 = _compute_FIM_reg2term_v1(accum_grad_reg) clsregterm_v1 = _compute_FIM_clsregterm_v1(accum_grad_cls, accum_grad_reg) network = TestModel() ewc_weights = _init_ewc_weights(network) ewc_weights = _update_ewc_weights_v1(ewc_weights, cls2term_v1, reg2term_v1, clsregterm_v1, reg2_coef, clsreg_coef, accum_idx=0) ewc_weights = _update_ewc_weights_v1(ewc_weights, cls2term_v1, reg2term_v1, clsregterm_v1, reg2_coef, clsreg_coef, accum_idx=1) ewc_weights = _update_ewc_weights_v1(ewc_weights, cls2term_v1, reg2term_v1, clsregterm_v1, reg2_coef, clsreg_coef, accum_idx=2) for name, param in ewc_weights.items(): self.assertTrue( torch.allclose( param, cls2term_v1[name] + reg2term_v1[name] * reg2_coef + clsregterm_v1[name] * clsreg_coef))
def main(log_dir, val_pkl_path, valid_range, valid_classes, dataset): # compute num of cars and num of pedes acc_dict = {itm: 0 for itm in valid_classes} ## load val pkl val_pkl = read_pkl(val_pkl_path) for itm in val_pkl: label = itm['label'] calib = itm['calib'] if dataset == "kitti" else None # compute weights by the number of data samples # instead of computing weights by the number of instances label_ = filt_label_by_range(label, valid_range, calib) if len(label_) == 0: continue has_classes = [obj.type for obj in label_.data] has_classes = list(set(has_classes)) for cls in has_classes: if cls in valid_classes: acc_dict[cls] += 1 print(acc_dict) # get all eval_pkl_list global g_config_dict eval_pkl_list = glob(os.path.join(log_dir, '[0-9]*')) eval_pkl_list = [ os.path.join(itm, 'val_eval_res.pkl') for itm in eval_pkl_list ] max_eval_pkl = (None, 0) for eval_pkl_path in eval_pkl_list: res_pkl = read_pkl(eval_pkl_path) res_pkl = (res_pkl['detail'] if dataset == "kitti" else res_pkl['detail']['eval.carla']['carla']) calc_map_dict = {itm: None for itm in valid_classes} for cls in valid_classes: eval_attrib = g_config_dict[cls] calc_map_dict[cls] = res_pkl[cls][eval_attrib] # output results map_val = 0 for cls in valid_classes: mean_ap = sum(calc_map_dict[cls]) / len(calc_map_dict[cls]) cls_norm = acc_dict[cls] / sum([v for k, v in acc_dict.items()]) map_val += mean_ap * cls_norm if map_val > max_eval_pkl[1]: max_eval_pkl = (eval_pkl_path, map_val) print(eval_pkl_path, f"{map_val:.2f}") print("Max:", f"{max_eval_pkl}")
def __init__(self, root_path, info_path, class_names, prep_func=None, prep_info_func=lambda x: x): self._kitti_infos = prep_info_func(read_pkl(info_path)) self._root_path = root_path self._class_names = class_names self._prep_func = prep_func
def test_compute_ewc_weights_v2_oldandnewtasksFIM(self): network = build_network().cuda() dataloader = build_dataloader() num_of_datasamples = len(dataloader) est_FIM_dict = network.compute_ewc_weights_v2( dataloader, num_of_datasamples, oldtask_FIM_paths=[ "unit_tests/data/test_model-ewcv2-newtaskFIM.pkl" ], oldtask_FIM_weights=[0.2], newtask_FIM_weight=1.2) # newtaskFIM should not be equal to FIM flag = True for name, param in network._model.named_parameters(): flag = torch.all( est_FIM_dict["newtask_FIM"][name] == est_FIM_dict["FIM"][name]) if not flag: break self.assertFalse(flag) # compute gt newtask_FIM_list = [] dataloader_itr = dataloader.__iter__() batch_size = dataloader.batch_size for data in dataloader: data = example_convert_to_torch(data, dtype=torch.float32, device=torch.device("cuda:0")) loss = network.forward(data) loss_det = loss["loss_cls"] + loss["loss_reg"] network._model.zero_grad() loss_det.backward() tmp_FIM = {} for name, param in network._model.named_parameters(): tmp_FIM[name] = (param.grad**2 if param.grad is not None else torch.zeros(1).float().cuda()) newtask_FIM_list.append(tmp_FIM) gt_FIM = newtask_FIM_list[0] for i in range(1, len(newtask_FIM_list)): for name, param in gt_FIM.items(): gt_FIM[name] += newtask_FIM_list[i][name] for name, pram in gt_FIM.items(): gt_FIM[name] /= len(newtask_FIM_list) old_FIM = read_pkl("unit_tests/data/test_model-ewcv2-newtaskFIM.pkl") for name, param in gt_FIM.items(): # self.assertTrue(torch.allclose(gt_FIM[name], est_FIM_dict["newtask_FIM"][name], atol=1e-8, rtol=1e-4)) # print(name, est_FIM_dict["FIM"][name].sum(), (torch.from_numpy(old_FIM[name]*0.2).float().cuda() + gt_FIM[name]*1.2).sum()) self.assertTrue( torch.allclose( est_FIM_dict["FIM"][name], torch.from_numpy(old_FIM[name] * 0.2).float().cuda() + gt_FIM[name] * 1.2, atol=1e-8, rtol=1e-4))
def test_compute_ewc_weights_v1_debug(self): return state = np.random.get_state() torch_state_cpu = torch.Generator().get_state() torch_state_gpu = torch.Generator(device="cuda:0").get_state() network = build_network().cuda() dataloader = build_dataloader() num_of_datasamples = len(dataloader) debug_mode = True reg2_coef = 0.1 clsreg_coef = 0.01 est_ewc_weights_dict = network.compute_ewc_weights_v1( dataloader, num_of_datasamples, reg2_coef=reg2_coef, clsreg_coef=clsreg_coef, debug_mode=debug_mode) gt_ewc_weights = network.compute_ewc_weights_v1( dataloader, num_of_datasamples, reg2_coef=reg2_coef, clsreg_coef=clsreg_coef, debug_mode=False) for name, param in gt_ewc_weights.items(): self.assertTrue( torch.allclose(param, est_ewc_weights_dict["ewc_weights"][name])) for name, param in gt_ewc_weights.items(): self.assertTrue( torch.allclose( param, est_ewc_weights_dict["cls2_term"][name] + est_ewc_weights_dict["reg2_term"][name] * reg2_coef + est_ewc_weights_dict["clsreg_term"][name] * clsreg_coef)) from det3.ops import write_pkl, read_pkl import subprocess import os write_pkl( { k: v.cpu().numpy() for k, v in est_ewc_weights_dict["cls2_term"].items() }, f"ewc_cls2term-tmp.pkl") write_pkl( { k: v.cpu().numpy() for k, v in est_ewc_weights_dict["reg2_term"].items() }, f"ewc_reg2term-tmp.pkl") write_pkl( { k: v.cpu().numpy() for k, v in est_ewc_weights_dict["clsreg_term"].items() }, f"ewc_clsregterm-tmp.pkl") cmd = "python tools/impose_ewc-reg2coef-clsregcoef.py " cmd += "--cls2term-path ewc_cls2term-tmp.pkl " cmd += "--reg2term-path ewc_reg2term-tmp.pkl " cmd += "--clsregterm-path ewc_clsregterm-tmp.pkl " cmd += f"--reg2coef {reg2_coef} " cmd += f"--clsregcoef {clsreg_coef} " cmd += "--output-path ewc_weights-tmp.pkl" subprocess.check_output(cmd, shell=True) est_ewc_weights = read_pkl("ewc_weights-tmp.pkl") for name, _ in est_ewc_weights.items(): self.assertTrue( np.allclose(est_ewc_weights[name], gt_ewc_weights[name].cpu().numpy(), atol=1e-08, rtol=1e-05)) os.remove("ewc_cls2term-tmp.pkl") os.remove("ewc_reg2term-tmp.pkl") os.remove("ewc_clsregterm-tmp.pkl") os.remove("ewc_weights-tmp.pkl") np.random.set_state(state) torch.Generator().set_state(torch_state_cpu) torch.Generator(device="cuda:0").set_state(torch_state_gpu)
plt.xticks([]), plt.yticks([]) if title is not None: plt.title(title) def main(X, y, output_path): n_samples, n_features = X.shape n_neighbors = 30 print("Computing t-SNE embedding") tsne = manifold.TSNE(n_components=2, init='pca', random_state=0) t0 = time() X_tsne = tsne.fit_transform(X) plot_embedding( X_tsne, "t-SNE embedding of the digits (time %.2fs)" % (time() - t0)) plt.savefig(output_path) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Visulize T-SNE') parser.add_argument('--pkl-path', type=str, metavar='PKL PATH', help='pkl path {"x": [#samples, #feat] (np.ndarray),' + '"y": [#samples, 1](np.ndarray)}') parser.add_argument('--output-path', type=str) args = parser.parse_args() data = read_pkl(args.pkl_path) X = data["x"] y = data["y"] main(X, y, args.output_path)