def main(): opt.manualSeed = random.randint(1, 10000) random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) if opt.dataset == 'ycb': opt.num_objects = 21 #number of object classes in the dataset opt.num_points = 1000 #number of points on the input pointcloud opt.outf = 'trained_models/ycb' #folder to save trained models opt.log_dir = 'experiments/logs/ycb' #folder to save logs opt.repeat_epoch = 1 #number of repeat times for one epoch training elif opt.dataset == 'linemod': opt.num_objects = 13 opt.num_points = 500 opt.outf = 'trained_models/linemod' opt.log_dir = 'experiments/logs/linemod' opt.repeat_epoch = 20 else: print('Unknown dataset') return estimator = PoseNet(num_points=opt.num_points, num_obj=opt.num_objects) estimator.cuda() refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects) refiner.cuda() if opt.resume_posenet != '': estimator.load_state_dict( torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet))) if opt.resume_refinenet != '': refiner.load_state_dict( torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet))) opt.refine_start = True opt.decay_start = True opt.lr *= opt.lr_rate opt.w *= opt.w_rate opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) else: opt.refine_start = False opt.decay_start = False optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) if opt.dataset == 'ycb': dataset = PoseDataset_ycb('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers) if opt.dataset == 'ycb': test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'linemod': test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) opt.sym_list = dataset.get_sym_list() opt.num_points_mesh = dataset.get_num_points_mesh() print( '>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}' .format(len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list)) criterion = Loss(opt.num_points_mesh, opt.sym_list) criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list) best_test = np.Inf if opt.start_epoch == 1: for log in os.listdir(opt.log_dir): os.remove(os.path.join(opt.log_dir, log)) st_time = time.time() for epoch in range(opt.start_epoch, opt.nepoch): logger = setup_logger( 'epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch)) logger.info('Train time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started')) train_count = 0 train_dis_avg = 0.0 if opt.refine_start: estimator.eval() refiner.train() else: estimator.train() optimizer.zero_grad() for rep in range(opt.repeat_epoch): for i, data in enumerate(dataloader, 0): points, choose, img, target, model_points, idx = data points, choose, img, target, model_points, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target).cuda(), \ Variable(model_points).cuda(), \ Variable(idx).cuda() pred_r, pred_t, pred_c, emb = estimator( img, points, choose, idx) loss, dis, new_points, new_target = criterion( pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start) if opt.refine_start: for ite in range(0, opt.iteration): pred_r, pred_t = refiner(new_points, emb, idx) dis, new_points, new_target = criterion_refine( pred_r, pred_t, new_target, model_points, idx, new_points) dis.backward() else: loss.backward() train_dis_avg += dis.item() train_count += 1 if train_count % opt.batch_size == 0: logger.info( 'Train time {0} Epoch {1} Batch {2} Frame {3} Avg_dis:{4}' .format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, int(train_count / opt.batch_size), train_count, train_dis_avg / opt.batch_size)) optimizer.step() optimizer.zero_grad() train_dis_avg = 0 if train_count != 0 and train_count % 1000 == 0: if opt.refine_start: torch.save( refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format( opt.outf)) else: torch.save( estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf)) print( '>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'.format( epoch)) logger = setup_logger( 'epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch)) logger.info('Test time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started')) test_dis = 0.0 test_count = 0 estimator.eval() refiner.eval() for j, data in enumerate(testdataloader, 0): points, choose, img, target, model_points, idx = data points, choose, img, target, model_points, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target).cuda(), \ Variable(model_points).cuda(), \ Variable(idx).cuda() pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx) _, dis, new_points, new_target = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start) if opt.refine_start: for ite in range(0, opt.iteration): pred_r, pred_t = refiner(new_points, emb, idx) dis, new_points, new_target = criterion_refine( pred_r, pred_t, new_target, model_points, idx, new_points) test_dis += dis.item() logger.info('Test time {0} Test Frame No.{1} dis:{2}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_count, dis)) test_count += 1 test_dis = test_dis / test_count logger.info('Test time {0} Epoch {1} TEST FINISH Avg dis: {2}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_dis)) if test_dis <= best_test: best_test = test_dis if opt.refine_start: torch.save( refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format( opt.outf, epoch, test_dis)) else: torch.save( estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format( opt.outf, epoch, test_dis)) print(epoch, '>>>>>>>>----------BEST TEST MODEL SAVED---------<<<<<<<<') if best_test < opt.decay_margin and not opt.decay_start: opt.decay_start = True opt.lr *= opt.lr_rate opt.w *= opt.w_rate optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) if best_test < opt.refine_margin and not opt.refine_start: opt.refine_start = True opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) if opt.dataset == 'ycb': dataset = PoseDataset_ycb('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers) if opt.dataset == 'ycb': test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'linemod': test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) testdataloader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) opt.sym_list = dataset.get_sym_list() opt.num_points_mesh = dataset.get_num_points_mesh() print( '>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}' .format(len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list)) criterion = Loss(opt.num_points_mesh, opt.sym_list) criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
def main(): # g13: parameter setting ------------------- batch_id = 1 opt.dataset ='linemod' opt.dataset_root = './datasets/linemod/Linemod_preprocessed' estimator_path = 'trained_checkpoints/linemod/pose_model_9_0.01310166542980859.pth' refiner_path = 'trained_checkpoints/linemod/pose_refine_model_493_0.006761023565178073.pth' opt.resume_posenet = estimator_path opt.resume_posenet = refiner_path dataset_config_dir = 'datasets/linemod/dataset_config' output_result_dir = 'experiments/eval_result/linemod' bs = 1 #fixed because of the default setting in torch.utils.data.DataLoader opt.iteration = 2 #default is 4 in eval_linemod.py t1_idx = 0 t1_total_eval_num = 3 axis_range = 0.1 # the length of X, Y, and Z axis in 3D vimg_dir = 'verify_img' if not os.path.exists(vimg_dir): os.makedirs(vimg_dir) #------------------------------------------- if opt.dataset == 'ycb': opt.num_objects = 21 #number of object classes in the dataset opt.num_points = 1000 #number of points on the input pointcloud opt.outf = 'trained_models/ycb' #folder to save trained models opt.log_dir = 'experiments/logs/ycb' #folder to save logs opt.repeat_epoch = 1 #number of repeat times for one epoch training elif opt.dataset == 'linemod': opt.num_objects = 13 opt.num_points = 500 opt.outf = 'trained_models/linemod' opt.log_dir = 'experiments/logs/linemod' opt.repeat_epoch = 20 else: print('Unknown dataset') return estimator = PoseNet(num_points = opt.num_points, num_obj = opt.num_objects) estimator.cuda() refiner = PoseRefineNet(num_points = opt.num_points, num_obj = opt.num_objects) refiner.cuda() if opt.resume_posenet != '': estimator.load_state_dict(torch.load(estimator_path)) if opt.resume_refinenet != '': refiner.load_state_dict(torch.load(refiner_path)) opt.refine_start = True opt.decay_start = True opt.lr *= opt.lr_rate opt.w *= opt.w_rate opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) else: opt.refine_start = False opt.decay_start = False optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) if opt.dataset == 'ycb': test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'linemod': test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) print('complete loading testing loader\n') opt.sym_list = test_dataset.get_sym_list() opt.num_points_mesh = test_dataset.get_num_points_mesh() print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\n\ length of the testing set: {0}\nnumber of sample points on mesh: {1}\n\ symmetry object list: {2}'\ .format( len(test_dataset), opt.num_points_mesh, opt.sym_list)) #load pytorch model estimator.eval() refiner.eval() criterion = Loss(opt.num_points_mesh, opt.sym_list) criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list) fw = open('{0}/t1_eval_result_logs.txt'.format(output_result_dir), 'w') #Pose estimation for j, data in enumerate(testdataloader, 0): # g13: modify this part for evaluation target-------------------- if j == t1_total_eval_num: break #---------------------------------------------------------------- points, choose, img, target, model_points, idx = data if len(points.size()) == 2: print('No.{0} NOT Pass! Lost detection!'.format(j)) fw.write('No.{0} NOT Pass! Lost detection!\n'.format(j)) continue points, choose, img, target, model_points, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target).cuda(), \ Variable(model_points).cuda(), \ Variable(idx).cuda() pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx) _, dis, new_points, new_target = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start) #if opt.refine_start: #iterative poserefinement # for ite in range(0, opt.iteration): # pred_r, pred_t = refiner(new_points, emb, idx) # dis, new_points, new_target = criterion_refine(pred_r, pred_t, new_target, model_points, idx, new_points) pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, opt.num_points, 1) pred_c = pred_c.view(bs, opt.num_points) how_max, which_max = torch.max(pred_c, 1) pred_t = pred_t.view(bs * opt.num_points, 1, 3) my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy() my_t = (points.view(bs * opt.num_points, 1, 3) + pred_t)[which_max[0]].view(-1).cpu().data.numpy() my_pred = np.append(my_r, my_t) for ite in range(0, opt.iteration): T = Variable(torch.from_numpy(my_t.astype(np.float32))).cuda().view(1, 3).repeat(opt.num_points, 1).contiguous().view(1, opt.num_points, 3) my_mat = quaternion_matrix(my_r) R = Variable(torch.from_numpy(my_mat[:3, :3].astype(np.float32))).cuda().view(1, 3, 3) my_mat[0:3, 3] = my_t new_points = torch.bmm((points - T), R).contiguous() pred_r, pred_t = refiner(new_points, emb, idx) pred_r = pred_r.view(1, 1, -1) pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1)) my_r_2 = pred_r.view(-1).cpu().data.numpy() my_t_2 = pred_t.view(-1).cpu().data.numpy() my_mat_2 = quaternion_matrix(my_r_2) my_mat_2[0:3, 3] = my_t_2 my_mat_final = np.dot(my_mat, my_mat_2) my_r_final = copy.deepcopy(my_mat_final) my_r_final[0:3, 3] = 0 my_r_final = quaternion_from_matrix(my_r_final, True) my_t_final = np.array([my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]]) my_pred = np.append(my_r_final, my_t_final) my_r = my_r_final my_t = my_t_final # g13: start drawing pose on image------------------------------------ # pick up image print("index {0}: {1}".format(j, test_dataset.list_rgb[j])) img = Image.open(test_dataset.list_rgb[j]) # pick up center position by bbox meta_file = open('{0}/data/{1}/gt.yml'.format(opt.dataset_root, '%02d' % test_dataset.list_obj[j]), 'r') meta = {} meta = yaml.load(meta_file) which_item = test_dataset.list_rank[j] bbx = meta[which_item][0]['obj_bb'] draw = ImageDraw.Draw(img) # draw box (ensure this is the right object) draw.line((bbx[0],bbx[1], bbx[0], bbx[1]+bbx[3]), fill=(255,0,0), width=5) draw.line((bbx[0],bbx[1], bbx[0]+bbx[2], bbx[1]), fill=(255,0,0), width=5) draw.line((bbx[0],bbx[1]+bbx[3], bbx[0]+bbx[2], bbx[1]+bbx[3]), fill=(255,0,0), width=5) draw.line((bbx[0]+bbx[2],bbx[1], bbx[0]+bbx[2], bbx[1]+bbx[3]), fill=(255,0,0), width=5) #get center c_x = bbx[0]+int(bbx[2]/2) c_y = bbx[1]+int(bbx[3]/2) draw.point((c_x,c_y), fill=(255,255,0)) #get the 3D position of center cam_intrinsic = np.zeros((3,3)) cam_intrinsic.itemset(0, test_dataset.cam_fx) cam_intrinsic.itemset(4, test_dataset.cam_fy) cam_intrinsic.itemset(2, test_dataset.cam_cx) cam_intrinsic.itemset(5, test_dataset.cam_cy) cam_intrinsic.itemset(8, 1) cam_extrinsic = my_mat_final[0:3, :] cam2d_3d = np.matmul(cam_intrinsic, cam_extrinsic) cen_3d = np.matmul(np.linalg.pinv(cam2d_3d), [[c_x],[c_y],[1]]) # replace img.show() with plt.imshow(img) #transpose three 3D axis point into 2D x_3d = cen_3d + [[axis_range],[0],[0],[0]] y_3d = cen_3d + [[0],[axis_range],[0],[0]] z_3d = cen_3d + [[0],[0],[axis_range],[0]] x_2d = np.matmul(cam2d_3d, x_3d) y_2d = np.matmul(cam2d_3d, y_3d) z_2d = np.matmul(cam2d_3d, z_3d) #draw the axis on 2D draw.line((c_x, c_y, x_2d[0], x_2d[1]), fill=(255,255,0), width=5) draw.line((c_x, c_y, y_2d[0], y_2d[1]), fill=(0,255,0), width=5) draw.line((c_x, c_y, z_2d[0], z_2d[1]), fill=(0,0,255), width=5) #g13: show image #img.show() #save file under file img_file_name = '{0}/pred_obj{1}_pic{2}.png'.format(vimg_dir, test_dataset.list_obj[j], which_item) img.save( img_file_name, "PNG" ) img.close()
def main(): # opt.manualSeed = random.randint(1, 10000) # # opt.manualSeed = 1 # random.seed(opt.manualSeed) # torch.manual_seed(opt.manualSeed) torch.set_printoptions(threshold=5000) # device_ids = [0,1] cudnn.benchmark = True if opt.dataset == 'ycb': opt.num_objects = 21 #number of object classes in the dataset opt.num_points = 1000 #number of points on the input pointcloud opt.outf = 'trained_models/ycb' #folder to save trained models opt.log_dir = 'experiments/logs/ycb' #folder to save logs opt.repeat_epoch = 3 #number of repeat times for one epoch training elif opt.dataset == 'linemod': opt.num_objects = 13 opt.num_points = 500 opt.outf = 'trained_models/linemod' opt.log_dir = 'experiments/logs/linemod' opt.repeat_epoch = 20 else: print('Unknown dataset') return estimator = PoseNet(num_points=opt.num_points, num_obj=opt.num_objects) estimator.cuda() refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects) # refiner.cuda() # estimator = nn.DataParallel(estimator, device_ids=device_ids) if opt.resume_posenet != '': estimator.load_state_dict( torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet))) print('LOADED!!') if opt.resume_refinenet != '': refiner.load_state_dict( torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet))) opt.refine_start = True opt.decay_start = True opt.lr *= opt.lr_rate opt.w *= opt.w_rate opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) else: print('no refinement') opt.refine_start = False opt.decay_start = False optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) # optimizer = nn.DataParallel(optimizer, device_ids=device_ids) if opt.dataset == 'ycb': dataset = PoseDataset_ycb('train', opt.num_points, False, opt.dataset_root, opt.noise_trans, opt.refine_start) # print(dataset.list) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers) if opt.dataset == 'ycb': test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'linemod': test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) opt.sym_list = dataset.get_sym_list() opt.num_points_mesh = dataset.get_num_points_mesh() # print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}'.format(len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list)) criterion = Loss(opt.num_points_mesh, opt.sym_list) # criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list) best_test = np.Inf best_epoch = 0 if opt.start_epoch == 1: for log in os.listdir(opt.log_dir): os.remove(os.path.join(opt.log_dir, log)) st_time = time.time() count_gen = 0 mode = 1 if mode == 1: for epoch in range(opt.start_epoch, opt.nepoch): logger = setup_logger( 'epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch)) logger.info('Train time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started')) train_count = 0 train_dis_avg = 0.0 if opt.refine_start: estimator.eval() refiner.train() else: estimator.train() optimizer.zero_grad() for rep in range(opt.repeat_epoch): for i, data in enumerate(dataloader, 0): points, choose, img, target_sym, target_cen, idx, file_list_idx = data if idx is 9 or idx is 16: continue points, choose, img, target_sym, target_cen, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target_sym).cuda(), \ Variable(target_cen).cuda(), \ Variable(idx).cuda() pred_norm, pred_on_plane, emb = estimator( img, points, choose, idx) loss = criterion(pred_norm, pred_on_plane, target_sym, target_cen, idx, points, opt.w, opt.refine_start) # scene_idx = dataset.list[file_list_idx] loss.backward() # train_dis_avg += dis.item() train_count += 1 if train_count % opt.batch_size == 0: logger.info( 'Train time {0} Epoch {1} Batch {2} Frame {3}'. format( time.strftime( "%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, int(train_count / opt.batch_size), train_count)) optimizer.step() # for param_lr in optimizer.module.param_groups: # param_lr['lr'] /= 2 optimizer.zero_grad() train_dis_avg = 0 if train_count % 8 == 0: print(pred_on_plane.max()) print(pred_on_plane.mean()) print(idx) if train_count != 0 and train_count % 1000 == 0: if opt.refine_start: torch.save( refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format( opt.outf)) else: torch.save( estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf)) print('>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'. format(epoch)) logger = setup_logger( 'epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch)) logger.info('Test time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started')) test_loss = 0.0 test_count = 0 estimator.eval() logger.info( 'Test time {0} Epoch {1} TEST FINISH Avg dis: {2}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_loss)) print(pred_on_plane.max()) print(pred_on_plane.mean()) bs, num_p, _ = pred_on_plane.size() # if epoch % 40 == 0: # import pdb;pdb.set_trace() best_test = test_loss best_epoch = epoch if opt.refine_start: torch.save( refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format( opt.outf, epoch, test_loss)) else: torch.save( estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format( opt.outf, epoch, test_loss)) print(epoch, '>>>>>>>>----------BEST TEST MODEL SAVED---------<<<<<<<<') if best_test < opt.decay_margin and not opt.decay_start: opt.decay_start = True opt.lr *= opt.lr_rate # opt.w *= opt.w_rate optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) estimator.load_state_dict( torch.load('{0}/pose_model_{1}_{2}.pth'.format( opt.outf, best_epoch, best_test))) else: estimator.load_state_dict( torch.load('{0}/pose_model_45_0.0.pth'.format(opt.outf), map_location='cpu'))
def main(): opt.manualSeed = random.randint(1, 10000) random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) if opt.dataset == 'ycb': opt.dataset_root = 'datasets/ycb/YCB_Video_Dataset' opt.num_objects = 21 opt.num_points = 1000 opt.result_dir = 'results/ycb' opt.repeat_epoch = 1 elif opt.dataset == 'linemod': opt.dataset_root = 'datasets/linemod/Linemod_preprocessed' opt.num_objects = 13 opt.num_points = 500 opt.result_dir = 'results/linemod' opt.repeat_epoch = 1 else: print('unknown dataset') return if opt.dataset == 'ycb': dataset = PoseDataset_ycb('train', opt.num_points, True, opt.dataset_root, opt.noise_trans) test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, 0.0) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('train', opt.num_points, True, opt.dataset_root, opt.noise_trans) test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) opt.sym_list = dataset.get_sym_list() opt.num_points_mesh = dataset.get_num_points_mesh() opt.diameters = dataset.get_diameter() print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<') print('length of the training set: {0}'.format(len(dataset))) print('length of the testing set: {0}'.format(len(test_dataset))) print('number of sample points on mesh: {0}'.format(opt.num_points_mesh)) print('symmetrical object list: {0}'.format(opt.sym_list)) if not os.path.exists(opt.result_dir): os.makedirs(opt.result_dir) tb_writer = tf.summary.FileWriter(opt.result_dir) os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id # network estimator = PoseNet(num_points=opt.num_points, num_obj=opt.num_objects, num_rot=opt.num_rot) estimator.cuda() # loss criterion = Loss(opt.sym_list, estimator.rot_anchors) knn = KNearestNeighbor(1) # learning rate decay best_test = np.Inf opt.first_decay_start = False opt.second_decay_start = False # if resume training if opt.resume_posenet != '': estimator.load_state_dict(torch.load(opt.resume_posenet)) model_name_parsing = (opt.resume_posenet.split('.')[0]).split('_') best_test = float(model_name_parsing[-1]) opt.start_epoch = int(model_name_parsing[-2]) + 1 if best_test < 0.016 and not opt.first_decay_start: opt.first_decay_start = True opt.lr *= 0.6 if best_test < 0.013 and not opt.second_decay_start: opt.second_decay_start = True opt.lr *= 0.5 # optimizer optimizer = torch.optim.Adam(estimator.parameters(), lr=opt.lr) global_step = (len(dataset) // opt.batch_size) * opt.repeat_epoch * (opt.start_epoch - 1) # train st_time = time.time() for epoch in range(opt.start_epoch, opt.nepoch): logger = setup_logger( 'epoch%02d' % epoch, os.path.join(opt.result_dir, 'epoch_%02d_train_log.txt' % epoch)) logger.info('Train time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started')) train_count = 0 train_loss_avg = 0.0 train_loss_r_avg = 0.0 train_loss_t_avg = 0.0 train_loss_reg_avg = 0.0 estimator.train() optimizer.zero_grad() for rep in range(opt.repeat_epoch): for i, data in enumerate(dataloader, 0): points, choose, img, target_t, target_r, model_points, idx, gt_t = data obj_diameter = opt.diameters[idx] points, choose, img, target_t, target_r, model_points, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target_t).cuda(), \ Variable(target_r).cuda(), \ Variable(model_points).cuda(), \ Variable(idx).cuda() pred_r, pred_t, pred_c = estimator(img, points, choose, idx) loss, loss_r, loss_t, loss_reg = criterion( pred_r, pred_t, pred_c, target_r, target_t, model_points, idx, obj_diameter) loss.backward() train_loss_avg += loss.item() train_loss_r_avg += loss_r.item() train_loss_t_avg += loss_t.item() train_loss_reg_avg += loss_reg.item() train_count += 1 if train_count % opt.batch_size == 0: global_step += 1 lr = opt.lr optimizer.step() optimizer.zero_grad() # write results to tensorboard summary = tf.Summary(value=[ tf.Summary.Value(tag='learning_rate', simple_value=lr), tf.Summary.Value(tag='loss', simple_value=train_loss_avg / opt.batch_size), tf.Summary.Value(tag='loss_r', simple_value=train_loss_r_avg / opt.batch_size), tf.Summary.Value(tag='loss_t', simple_value=train_loss_t_avg / opt.batch_size), tf.Summary.Value(tag='loss_reg', simple_value=train_loss_reg_avg / opt.batch_size) ]) tb_writer.add_summary(summary, global_step) logger.info( 'Train time {0} Epoch {1} Batch {2} Frame {3} Avg_loss:{4:f}' .format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, int(train_count / opt.batch_size), train_count, train_loss_avg / opt.batch_size)) train_loss_avg = 0.0 train_loss_r_avg = 0.0 train_loss_t_avg = 0.0 train_loss_reg_avg = 0.0 print( '>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'.format( epoch)) logger = setup_logger( 'epoch%02d_test' % epoch, os.path.join(opt.result_dir, 'epoch_%02d_test_log.txt' % epoch)) logger.info('Test time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started')) test_dis = 0.0 test_count = 0 save_model = False estimator.eval() success_count = [0 for i in range(opt.num_objects)] num_count = [0 for i in range(opt.num_objects)] for j, data in enumerate(testdataloader, 0): points, choose, img, target_t, target_r, model_points, idx, gt_t = data obj_diameter = opt.diameters[idx] points, choose, img, target_t, target_r, model_points, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target_t).cuda(), \ Variable(target_r).cuda(), \ Variable(model_points).cuda(), \ Variable(idx).cuda() pred_r, pred_t, pred_c = estimator(img, points, choose, idx) loss, _, _, _ = criterion(pred_r, pred_t, pred_c, target_r, target_t, model_points, idx, obj_diameter) test_count += 1 # evalaution how_min, which_min = torch.min(pred_c, 1) pred_r = pred_r[0][which_min[0]].view(-1).cpu().data.numpy() pred_r = quaternion_matrix(pred_r)[:3, :3] pred_t, pred_mask = ransac_voting_layer(points, pred_t) pred_t = pred_t.cpu().data.numpy() model_points = model_points[0].cpu().detach().numpy() pred = np.dot(model_points, pred_r.T) + pred_t target = target_r[0].cpu().detach().numpy() + gt_t[0].cpu( ).data.numpy() if idx[0].item() in opt.sym_list: pred = torch.from_numpy(pred.astype( np.float32)).cuda().transpose(1, 0).contiguous() target = torch.from_numpy(target.astype( np.float32)).cuda().transpose(1, 0).contiguous() inds = knn(target.unsqueeze(0), pred.unsqueeze(0)) target = torch.index_select(target, 1, inds.view(-1) - 1) dis = torch.mean(torch.norm( (pred.transpose(1, 0) - target.transpose(1, 0)), dim=1), dim=0).item() else: dis = np.mean(np.linalg.norm(pred - target, axis=1)) logger.info( 'Test time {0} Test Frame No.{1} loss:{2:f} confidence:{3:f} distance:{4:f}' .format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_count, loss, how_min[0].item(), dis)) if dis < 0.1 * opt.diameters[idx[0].item()]: success_count[idx[0].item()] += 1 num_count[idx[0].item()] += 1 test_dis += dis # compute accuracy accuracy = 0.0 for i in range(opt.num_objects): accuracy += float(success_count[i]) / num_count[i] logger.info('Object {0} success rate: {1}'.format( test_dataset.objlist[i], float(success_count[i]) / num_count[i])) accuracy = accuracy / opt.num_objects test_dis = test_dis / test_count # log results logger.info( 'Test time {0} Epoch {1} TEST FINISH Avg dis: {2:f}, Accuracy: {3:f}' .format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_dis, accuracy)) # tensorboard summary = tf.Summary(value=[ tf.Summary.Value(tag='accuracy', simple_value=accuracy), tf.Summary.Value(tag='test_dis', simple_value=test_dis) ]) tb_writer.add_summary(summary, global_step) # save model if test_dis < best_test: best_test = test_dis torch.save( estimator.state_dict(), '{0}/pose_model_{1:02d}_{2:06f}.pth'.format( opt.result_dir, epoch, best_test)) # adjust learning rate if necessary if best_test < 0.016 and not opt.first_decay_start: opt.first_decay_start = True opt.lr *= 0.6 optimizer = torch.optim.Adam(estimator.parameters(), lr=opt.lr) if best_test < 0.013 and not opt.second_decay_start: opt.second_decay_start = True opt.lr *= 0.5 optimizer = torch.optim.Adam(estimator.parameters(), lr=opt.lr) print( '>>>>>>>>----------epoch {0} test finish---------<<<<<<<<'.format( epoch))
def main(): if opt.dataset == 'ycb': opt.num_obj = 21 opt.sym_list = [12, 15, 18, 19, 20] opt.num_points = 1000 writer = SummaryWriter('experiments/runs/ycb/{0}'.format(opt.experiment_name)) opt.outf = 'trained_models/ycb/{0}'.format(opt.experiment_name) opt.log_dir = 'experiments/logs/ycb/{0}'.format(opt.experiment_name) opt.repeat_num = 1 if not os.path.exists(opt.outf): os.mkdir(opt.outf) if not os.path.exists(opt.log_dir): os.mkdir(opt.log_dir) else: print('Unknown dataset') return estimator = PoseNet(num_points = opt.num_points, num_vote = 9, num_obj = opt.num_obj) estimator.cuda() refiner = PoseRefineNet(num_points = opt.num_points, num_obj = opt.num_obj) refiner.cuda() if opt.resume_posenet != '': estimator.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet))) if opt.resume_refinenet != '': refiner.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet))) opt.refine_start = True opt.lr = opt.lr_refine opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) else: opt.refine_start = False optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) dataset = PoseDataset_ycb('train', opt.num_points, True, opt.dataset_root) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers) test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}'.format(len(dataset), len(test_dataset), opt.num_points)) criterion = Loss(opt.num_points, opt.sym_list) criterion_refine = Loss_refine(opt.num_points, opt.sym_list) best_test = np.Inf if opt.start_epoch == 1: for log in os.listdir(opt.log_dir): os.remove(os.path.join(opt.log_dir, log)) st_time = time.time() train_scalar = 0 for epoch in range(opt.start_epoch, opt.nepoch): logger = setup_logger('epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch)) logger.info('Train time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started')) train_count = 0 train_loss_avg = 0.0 train_loss = 0.0 train_dis_avg = 0.0 train_dis = 0.0 if opt.refine_start: estimator.eval() refiner.train() else: estimator.train() optimizer.zero_grad() for rep in range(opt.repeat_num): for i, data in enumerate(dataloader, 0): points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = data points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = points.cuda(), choose.cuda(), img.cuda(), target.cuda(), model_points.cuda(), model_kp.cuda(), vertex_gt.cuda(), idx.cuda(), target_r.cuda(), target_t.cuda() vertex_pred, c_pred, emb = estimator(img, points, choose, idx) vertex_loss, pose_loss, dis, new_points, new_target = criterion(vertex_pred, vertex_gt, c_pred, points, target, model_points, model_kp, idx, target_r, target_t) loss = 10 * vertex_loss + pose_loss if opt.refine_start: for ite in range(0, opt.iteration): pred_r, pred_t = refiner(new_points, emb, idx) dis, new_points, new_target = criterion_refine(pred_r, pred_t, new_points, new_target, model_points, idx) dis.backward() else: loss.backward() train_loss_avg += loss.item() train_loss += loss.item() train_dis_avg += dis.item() train_dis += dis.item() train_count += 1 train_scalar += 1 if train_count % opt.batch_size == 0: logger.info('Train time {0} Epoch {1} Batch {2} Frame {3} Avg_loss:{4} Avg_diss:{5}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, int(train_count / opt.batch_size), train_count, train_loss_avg / opt.batch_size, train_dis_avg / opt.batch_size)) writer.add_scalar('ycb training loss', train_loss_avg / opt.batch_size, train_scalar) writer.add_scalar('ycb training dis', train_dis_avg / opt.batch_size, train_scalar) optimizer.step() optimizer.zero_grad() train_loss_avg = 0 train_dis_avg = 0 if train_count != 0 and train_count % 1000 == 0: if opt.refine_start: torch.save(refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format(opt.outf)) else: torch.save(estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf)) print('>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'.format(epoch)) train_loss = train_loss / train_count train_dis = train_dis / train_count logger.info('Train time {0} Epoch {1} TRAIN FINISH Avg loss: {2} Avg dis: {3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, train_loss, train_dis)) logger = setup_logger('epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch)) logger.info('Test time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started')) test_loss = 0.0 test_vertex_loss = 0.0 test_pose_loss = 0.0 test_dis = 0.0 test_count = 0 success_count = 0 estimator.eval() refiner.eval() for j, data in enumerate(testdataloader, 0): points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = data points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = points.cuda(), choose.cuda(), img.cuda(), target.cuda(), model_points.cuda(), model_kp.cuda(), vertex_gt.cuda(), idx.cuda(), target_r.cuda(), target_t.cuda() vertex_pred, c_pred, emb = estimator(img, points, choose, idx) vertex_loss, pose_loss, dis, new_points, new_target = criterion(vertex_pred, vertex_gt, c_pred, points, target, model_points, model_kp, idx, target_r, target_t) loss = 10 * vertex_loss + pose_loss if opt.refine_start: for ite in range(0, opt.iteration): pred_r, pred_t = refiner(new_points, emb, idx) dis, new_points, new_target = criterion_refine(pred_r, pred_t, new_points, new_target, model_points, idx) test_loss += loss.item() test_vertex_loss += vertex_loss.item() test_pose_loss += pose_loss.item() test_dis += dis.item() logger.info('Test time {0} Test Frame No.{1} loss:{2} dis:{3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_count, loss, dis)) test_count += 1 if dis.item() < 0.02: success_count += 1 test_loss = test_loss / test_count test_vertex_loss = test_vertex_loss / test_count test_pose_loss = test_pose_loss / test_count test_dis = test_dis / test_count logger.info('Test time {0} Epoch {1} TEST FINISH Avg loss: {2} Avg dis: {3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_loss, test_dis)) logger.info('Success rate: {}'.format(float(success_count) / test_count)) writer.add_scalar('ycb test loss', test_loss, epoch) writer.add_scalar('ycb test vertex loss', test_vertex_loss, epoch) writer.add_scalar('ycb test pose loss', test_pose_loss, epoch) writer.add_scalar('ycb test dis', test_dis, epoch) writer.add_scalar('ycb success rate', float(success_count) / test_count, epoch) writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch) if test_dis <= best_test: best_test = test_dis if opt.refine_start: torch.save(refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis)) else: torch.save(estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis)) print(epoch, '>>>>>>>>----------MODEL SAVED---------<<<<<<<<') if best_test < opt.refine_margin and not opt.refine_start: opt.refine_start = True opt.lr = opt.lr_refine opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) print('>>>>>>>>----------Refine started---------<<<<<<<<') writer.close()
def main(): # opt.manualSeed = random.randint(1, 10000) # # opt.manualSeed = 1 # random.seed(opt.manualSeed) # torch.manual_seed(opt.manualSeed) torch.set_printoptions(threshold=5000) # device_ids = [0,1] cudnn.benchmark = True if opt.dataset == 'ycb': opt.num_objects = 21 #number of object classes in the dataset opt.num_points = 1000 #number of points on the input pointcloud opt.outf = 'trained_models/ycb' #folder to save trained models opt.log_dir = 'experiments/logs/ycb' #folder to save logs opt.repeat_epoch = 3 #number of repeat times for one epoch training elif opt.dataset == 'linemod': opt.num_objects = 13 opt.num_points = 500 opt.outf = 'trained_models/linemod' opt.log_dir = 'experiments/logs/linemod' opt.repeat_epoch = 20 else: print('Unknown dataset') return estimator = PoseNet(num_points=opt.num_points, num_obj=opt.num_objects) estimator.cuda() refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects) refiner.cuda() # estimator = nn.DataParallel(estimator, device_ids=device_ids) if opt.resume_posenet != '': estimator.load_state_dict( torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet))) if opt.resume_refinenet != '': refiner.load_state_dict( torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet))) opt.refine_start = True opt.decay_start = True opt.lr *= opt.lr_rate opt.w *= opt.w_rate opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) else: print('no refinement') opt.refine_start = False opt.decay_start = False optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) # optimizer = nn.DataParallel(optimizer, device_ids=device_ids) if opt.dataset == 'ycb': dataset = PoseDataset_ycb('train', opt.num_points, False, opt.dataset_root, opt.noise_trans, opt.refine_start) # print(dataset.list) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers) if opt.dataset == 'ycb': test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'linemod': test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) opt.sym_list = dataset.get_sym_list() opt.num_points_mesh = dataset.get_num_points_mesh() # print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}'.format(len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list)) criterion = Loss(opt.num_points_mesh, opt.sym_list) # criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list) best_test = np.Inf best_epoch = 0 if opt.start_epoch == 1: for log in os.listdir(opt.log_dir): os.remove(os.path.join(opt.log_dir, log)) st_time = time.time() count_gen = 0 mode = 1 if mode == 1: for epoch in range(opt.start_epoch, opt.nepoch): logger = setup_logger( 'epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch)) logger.info('Train time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started')) train_count = 0 train_dis_avg = 0.0 if opt.refine_start: estimator.eval() refiner.train() else: estimator.train() optimizer.zero_grad() for rep in range(opt.repeat_epoch): for i, data in enumerate(dataloader, 0): points, choose, img, target_sym, target_cen, idx, file_list_idx = data if idx is 9 or idx is 16: continue # points, choose, img, target_sym, target_cen, target, idx, file_list_idx = data # generate_obj_file(target_sym, target_cen, target, idx.squeeze()) # import pdb;pdb.set_trace() points, choose, img, target_sym, target_cen, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target_sym).cuda(), \ Variable(target_cen).cuda(), \ Variable(idx).cuda() # points, choose, img, target_sym, target_cen, idx = Variable(points), \ # Variable(choose), \ # Variable(img), \ # Variable(target_sym), \ # Variable(target_cen), \ # Variable(idx) pred_norm, pred_on_plane, emb = estimator( img, points, choose, idx) # pred_norm_new = torch.cat((pred_norm, torch.zeros(1,pred_norm.size(1),1)),2) # for i in range(pred_norm.size(1)): # pred_norm_new[0,i,2] = torch.sqrt(1 - pred_norm[0,i,0] * pred_norm[0,i,0] - pred_norm[0,i,1] * pred_norm[0,i,1]) # if epoch % 10 == 0: # generate_obj_file_pred(pred_norm, pred_on_plane, points, count_gen, idx) # count_gen += 1 # print(pred_norm[0,0,:]) loss = criterion(pred_norm, pred_on_plane, target_sym, target_cen, idx, points, opt.w, opt.refine_start) # scene_idx = dataset.list[file_list_idx] loss.backward() # train_dis_avg += dis.item() train_count += 1 if train_count % opt.batch_size == 0: logger.info( 'Train time {0} Epoch {1} Batch {2} Frame {3}'. format( time.strftime( "%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, int(train_count / opt.batch_size), train_count)) optimizer.step() # for param_lr in optimizer.module.param_groups: # param_lr['lr'] /= 2 optimizer.zero_grad() train_dis_avg = 0 if train_count % 5000 == 0: print(pred_on_plane.max()) print(pred_on_plane.mean()) if train_count != 0 and train_count % 1000 == 0: if opt.refine_start: torch.save( refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format( opt.outf)) else: torch.save( estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf)) print('>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'. format(epoch)) logger = setup_logger( 'epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch)) logger.info('Test time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started')) test_loss = 0.0 test_count = 0 estimator.eval() # refiner.eval() # for rep in range(opt.repeat_epoch): # for j, data in enumerate(testdataloader, 0): # points, choose, img, target_sym, target_cen, idx, img_idx = data # # points, choose, img, target, model_points, idx = Variable(points).cuda(), \ # # Variable(choose).cuda(), \ # # Variable(img).cuda(), \ # # Variable(target).cuda(), \ # # Variable(model_points).cuda(), \ # # Variable(idx).cuda() # points, choose, img, target_sym, target_cen, idx = Variable(points), \ # Variable(choose), \ # Variable(img), \ # Variable(target_sym), \ # Variable(target_cen), \ # Variable(idx) # pred_norm, pred_on_plane, emb = estimator(img, points, choose, idx) # loss = criterion(pred_norm, pred_on_plane, target_sym, target_cen, idx, points, opt.w, opt.refine_start) # test_loss += loss # logger.info('Test time {0} Test Frame No.{1}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_count)) # test_count += 1 # test_loss = test_loss / test_count logger.info( 'Test time {0} Epoch {1} TEST FINISH Avg dis: {2}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_loss)) print(pred_on_plane.max()) print(pred_on_plane.mean()) bs, num_p, _ = pred_on_plane.size() # if epoch % 40 == 0: # import pdb;pdb.set_trace() best_test = test_loss best_epoch = epoch if opt.refine_start: torch.save( refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format( opt.outf, epoch, test_loss)) else: torch.save( estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format( opt.outf, epoch, test_loss)) print(epoch, '>>>>>>>>----------BEST TEST MODEL SAVED---------<<<<<<<<') if best_test < opt.decay_margin and not opt.decay_start: opt.decay_start = True opt.lr *= opt.lr_rate # opt.w *= opt.w_rate optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) estimator.load_state_dict( torch.load('{0}/pose_model_{1}_{2}.pth'.format( opt.outf, best_epoch, best_test))) else: estimator.load_state_dict( torch.load('{0}/pose_model_11_0.0.pth'.format(opt.outf))) product_list = [] dist_list = [] true_positives = 0 false_positives = 0 false_negatives = 0 for index in range(len(test_dataset.list)): img = Image.open('{0}/data_v1/{1}-color.png'.format( test_dataset.root, test_dataset.list[index])) depth = np.array( Image.open('{0}/data_v1/{1}-depth.png'.format( test_dataset.root, test_dataset.list[index]))) label = np.array( Image.open('{0}/data_v1/{1}-label.png'.format( test_dataset.root, test_dataset.list[index]))) meta = scio.loadmat('{0}/data_v1/{1}-meta.mat'.format( test_dataset.root, test_dataset.list[index])) cam_cx = test_dataset.cam_cx_1 cam_cy = test_dataset.cam_cy_1 cam_fx = test_dataset.cam_fx_1 cam_fy = test_dataset.cam_fy_1 mask_back = ma.getmaskarray(ma.masked_equal(label, 0)) obj = meta['cls_indexes'].flatten().astype(np.int32) for idx in range(0, len(obj)): print('object index: ', obj[idx]) mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0)) mask_label = ma.getmaskarray(ma.masked_equal(label, obj[idx])) mask = mask_label * mask_depth if not (len(mask.nonzero()[0]) > test_dataset.minimum_num_pt and len(test_dataset.symmetry[obj[idx]]['mirror']) > 0): continue rmin, rmax, cmin, cmax = get_bbox(mask_label) img_temp = np.transpose(np.array(img)[:, :, :3], (2, 0, 1))[:, rmin:rmax, cmin:cmax] img_masked = img_temp target_r = meta['poses'][:, :, idx][:, 0:3] target_t = np.array(meta['poses'][:, :, idx][:, 3:4].flatten()) add_t = np.array([ random.uniform(-test_dataset.noise_trans, test_dataset.noise_trans) for i in range(3) ]) choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0] if len(choose) > test_dataset.num_pt: c_mask = np.zeros(len(choose), dtype=int) c_mask[:test_dataset.num_pt] = 1 np.random.shuffle(c_mask) choose = choose[c_mask.nonzero()] else: choose = np.pad(choose, (0, test_dataset.num_pt - len(choose)), 'wrap') depth_masked = depth[ rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32) xmap_masked = test_dataset.xmap[ rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32) ymap_masked = test_dataset.ymap[ rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32) choose = np.array([choose]) cam_scale = meta['factor_depth'][0][0] pt2 = depth_masked / cam_scale pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy cloud = np.concatenate((pt0, pt1, pt2), axis=1) dellist = [j for j in range(0, len(test_dataset.cld[obj[idx]]))] # dellist = random.sample(dellist, len(test_dataset.cld[obj[idx]]) - test_dataset.num_pt_mesh_small) # model_points = np.delete(test_dataset.cld[obj[idx]], dellist, axis=0) model_points = test_dataset.cld[obj[idx]] target_sym = [] for sym in test_dataset.symmetry[obj[idx]]['mirror']: target_sym.append(np.dot(sym, target_r.T)) target_sym = np.array(target_sym) target_cen = np.add(test_dataset.symmetry[obj[idx]]['center'], target_t) target = np.dot(model_points, target_r.T) target = np.add(target, target_t) print('ground truth norm: ', target_sym) print('ground truth center: ', target_cen) points_ten, choose_ten, img_ten, target_sym_ten, target_cen_ten, target_ten, idx_ten = \ torch.from_numpy(cloud.astype(np.float32)).unsqueeze(0), \ torch.LongTensor(choose.astype(np.int32)).unsqueeze(0), \ test_dataset.norm(torch.from_numpy(img_masked.astype(np.float32))).unsqueeze(0), \ torch.from_numpy(target_sym.astype(np.float32)).unsqueeze(0), \ torch.from_numpy(target_cen.astype(np.float32)).unsqueeze(0), \ torch.from_numpy(target.astype(np.float32)).unsqueeze(0), \ torch.LongTensor([obj[idx]-1]).unsqueeze(0) # print(img_ten.size()) # print(points_ten.size()) # print(choose_ten.size()) # print(idx_ten.size()) points_ten, choose_ten, img_ten, target_sym_ten, target_cen_ten, idx_ten = Variable(points_ten).cuda(), \ Variable(choose_ten).cuda(), \ Variable(img_ten).cuda(), \ Variable(target_sym_ten).cuda(), \ Variable(target_cen_ten).cuda(), \ Variable(idx_ten).cuda() pred_norm, pred_on_plane, emb = estimator(img_ten, points_ten, choose_ten, idx_ten) # import pdb;pdb.set_trace() bs, num_p, _ = pred_on_plane.size() # pred_norm = torch.cat((pred_norm, torch.zeros(1,pred_norm.size(1),1)),2) # for i in range(pred_norm.size(1)): # pred_norm[0,i,2] = torch.sqrt(1 - pred_norm[0,i,0] * pred_norm[0,i,0] - pred_norm[0,i,1] * pred_norm[0,i,1]) # pred_norm = pred_norm / (torch.norm(pred_norm, dim=2).view(bs, num_p, 1)) generate_obj_file_norm_pred( pred_norm / (torch.norm(pred_norm, dim=2).view(bs, num_p, 1)), pred_on_plane, points_ten, test_dataset.list[index].split('/')[0], test_dataset.list[index].split('/')[1], obj[idx]) loss = criterion(pred_norm, pred_on_plane, target_sym_ten, target_cen_ten, idx, points_ten, opt.w, opt.refine_start) # print('test loss: ', loss) # bs, num_p, _ = pred_on_plane.size() pred_norm = pred_norm / (torch.norm(pred_norm, dim=2).view( bs, num_p, 1)) pred_norm = pred_norm.cpu().detach().numpy() pred_on_plane = pred_on_plane.cpu().detach().numpy() points = points_ten.cpu().detach().numpy() clustering_points_idx = np.where( pred_on_plane > pred_on_plane.max() * PRED_ON_PLANE_FACTOR + pred_on_plane.mean() * (1 - PRED_ON_PLANE_FACTOR))[1] clustering_norm = pred_norm[0, clustering_points_idx, :] clustering_points = points[0, clustering_points_idx, :] num_points = len(clustering_points_idx) # import pdb;pdb.set_trace() close_thresh = 5e-3 broad_thresh = 7e-3 sym_flag = [0 for i in range(target_sym.shape[0])] sym_max_product = [0.0 for i in range(target_sym.shape[0])] sym_dist = [0.0 for i in range(target_sym.shape[0])] count_pred = 0 while True: if num_points == 0: break count_pred += 1 if count_pred > target_sym.shape[0]: break best_fit_num = 0 count_try = 0 while True: if count_try > 3 or num_points <= 1: break pick_idx = np.random.randint(0, num_points - 1) pick_point = clustering_points[pick_idx] # proposal_norm = np.array(Plane(Point3D(pick_points[0]),Point3D(pick_points[1]),Point3D(pick_points[2])).normal_vector).astype(np.float32) proposal_norm = clustering_norm[pick_idx] proposal_norm = proposal_norm[:, np.newaxis] # import pdb;pdb.set_trace() proposal_point = pick_point # highest_pred_idx = np.argmax(pred_on_plane[0,clustering_points_idx,:]) # highest_pred_loc = clustering_points[highest_pred_idx] # proposal_norm = clustering_norm[highest_pred_idx][:,np.newaxis] clustering_diff = clustering_points - proposal_point clustering_dist = np.abs( np.matmul(clustering_diff, proposal_norm)) broad_inliers = np.where(clustering_dist < broad_thresh)[0] broad_inlier_num = len(broad_inliers) close_inliers = np.where(clustering_dist < close_thresh)[0] close_inlier_num = len(close_inliers) if broad_inlier_num > num_points / (5 - count_pred): best_fit_num = close_inlier_num best_fit_norm = proposal_norm best_fit_cen = clustering_points[close_inliers].mean(0) best_fit_idx = clustering_points_idx[close_inliers] scrub_idx = clustering_points_idx[broad_inliers] break else: count_try += 1 # else: # np.delete(clustering_points_idx, highest_pred_idx) # num_points -= 1 if count_try > 3 or num_points <= 1: break for i in range(2): def f(x): dist = 0 x = x / LA.norm(x) for point in clustering_points[broad_inliers]: dist += np.abs(point[0] * x[0] + point[1] * x[1] + point[2] * np.sqrt(1 - x[0] * x[0] - x[1] * x[1]) + x[2]) return dist start_point = np.copy(proposal_norm) start_point[2] = (-proposal_point * proposal_norm[:, 0]).sum() min_point = fmin(f, start_point) new_pred_loc = np.array([ 0, 0, -min_point[2] / np.sqrt(1 - min_point[0] * min_point[0] - min_point[1] * min_point[1]) ]) min_point[2] = np.sqrt(1 - min_point[0] * min_point[0] - min_point[1] * min_point[1]) new_proposal_norm = min_point clustering_diff = clustering_points - new_pred_loc clustering_dist = np.abs( np.matmul(clustering_diff, new_proposal_norm)) close_inliers = np.where(clustering_dist < close_thresh)[0] new_close_inlier_num = len(close_inliers) broad_inliers = np.where(clustering_dist < broad_thresh)[0] new_broad_inlier_num = len(broad_inliers) # import pdb;pdb.set_trace() if new_close_inlier_num > close_inlier_num: best_fit_num = new_close_inlier_num # proposal_point = clustering_points_idx[clustering_dist.argmin()] proposal_point = new_pred_loc best_fit_norm = new_proposal_norm[:, np.newaxis] best_fit_idx = clustering_points_idx[close_inliers] scrub_idx = clustering_points_idx[broad_inliers] best_fit_cen = new_pred_loc inlier_num = new_inlier_num proposal_norm = best_fit_norm # other_idx_pick = other_idx[other_idx_pick] # if len(other_idx_pick) > num_points//6: # pick_idx = np.concatenate((pick_idx, other_idx_pick), 0) # norm_proposal_new = clustering_norm[pick_idx,:].mean(0) # norm_proposal_new = norm_proposal_new / LA.norm(norm_proposal_new) # inlier_num_new = len(np.where(np.abs(clustering_norm-norm_proposal_new).sum(1) < thresh)[0]) # if inlier_num_new > inlier_num: # best_fit_num = inlier_num_new # best_fit_idx = np.where(np.abs(clustering_norm-norm_proposal_new).sum(1) < thresh_scrap) # best_fit_norm = norm_proposal_new # best_fit_cen = clustering_points[best_fit_idx].mean(0) if best_fit_num == 0: break else: print('predicted norm:{}, predicted point:{}'.format( best_fit_norm, best_fit_cen)) max_idx = np.argmax(np.matmul(target_sym, best_fit_norm)) sym_flag[max_idx] += 1 sym_product = np.abs((target_sym[max_idx] * (best_fit_cen - target_cen)).sum()) if sym_max_product[max_idx] < sym_product: sym_max_product[max_idx] = sym_product sym_dist[max_idx] = np.matmul(target_sym, best_fit_norm)[max_idx] # generate_obj_file_sym_pred(best_fit_norm, best_fit_cen, target_ten, test_dataset.list[index].split('/')[0], test_dataset.list[index].split('/')[1], obj[idx], count_pred) # import pdb;pdb.set_trace() clustering_points_idx = np.setdiff1d( clustering_points_idx, scrub_idx) clustering_norm = pred_norm[0, clustering_points_idx, :] clustering_points = points[0, clustering_points_idx, :] num_points = len(clustering_points_idx) for i in range(target_sym.shape[0]): if sym_flag[i] >= 1: dist_list.append(sym_dist[i]) product_list.append(sym_max_product[i]) false_positives += sym_flag[i] - 1 else: false_negatives += 1 product_list = np.array(product_list) dist_list = np.array(dist_list) # import pdb;pdb.set_trace() total_num = len(product_list) prec = [] recall = [] for t in range(1000): good_ones = len( np.logical_and(dist_list < 0.5 * t / 1000, product_list > math.cos(math.pi * 0.25 * t / 1000))) prec.append(good_ones * 1.0 / (false_positives + total_num)) recall.append(good_ones * 1.0 / (good_ones + false_negatives)) print(prec) print(recall) plt.plot(recall, prec, 'r') plt.axis([0, 1, 0, 1]) plt.savefig('prec-recall.png')
def main(): opt.manualSeed = random.randint(1, 10000) random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) if opt.dataset == 'linemod': opt.num_objects = 13 opt.num_points = 500 opt.outf = 'trained_models/linemod' opt.log_dir = 'experiments/logs/linemod' output_results = 'check_linemod.txt' opt.repeat_epoch = 20 elif opt.dataset == 'ycb': opt.num_objects = 21 #number of object classes in the dataset opt.num_points = 1000 #number of points on the input pointcloud opt.outf = 'trained_models/ycb' #folder to save trained models opt.log_dir = 'experiments/logs/ycb' #folder to save logs opt.repeat_epoch = 1 #number of repeat times for one epoch training elif opt.dataset == 'ycb-syn': opt.num_objects = 31 # number of object classes in the dataset opt.num_points = 1000 # number of points on the input pointcloud opt.dataset_root = '/data/Akeaveny/Datasets/ycb_syn' opt.outf = 'trained_models/ycb_syn/ycb_syn2' # folder to save trained models opt.log_dir = 'experiments/logs/ycb_syn/ycb_syn2' # folder to save logs output_results = 'check_ycb_syn.txt' opt.w = 0.05 opt.refine_margin = 0.01 elif opt.dataset == 'arl': opt.num_objects = 10 # number of object classes in the dataset opt.num_points = 1000 # number of points on the input pointcloud opt.dataset_root = '/data/Akeaveny/Datasets/arl_dataset' opt.outf = 'trained_models/arl/clutter/arl_finetune_syn_2' # folder to save trained models opt.log_dir = '/home/akeaveny/catkin_ws/src/object-rpe-ak/DenseFusion/experiments/logs/arl/clutter/arl_finetune_syn_2' # folder to save logs output_results = 'check_arl_syn.txt' opt.nepoch = 750 opt.w = 0.05 opt.refine_margin = 0.0045 # TODO opt.repeat_epoch = 20 opt.start_epoch = 0 opt.resume_posenet = 'pose_model_1_0.012397416144377301.pth' opt.resume_refinenet = 'pose_refine_model_153_0.004032851301599294.pth' elif opt.dataset == 'arl1': opt.num_objects = 5 # number of object classes in the dataset opt.num_points = 1000 # number of points on the input pointcloud opt.dataset_root = '/data/Akeaveny/Datasets/arl_dataset' opt.outf = 'trained_models/arl1/clutter/arl_real_2' # folder to save trained models opt.log_dir = '/home/akeaveny/catkin_ws/src/object-rpe-ak/DenseFusion/experiments/logs/arl1/clutter/arl_real_2' # folder to save logs output_results = 'check_arl_syn.txt' opt.nepoch = 750 opt.w = 0.05 opt.refine_margin = 0.015 # opt.start_epoch = 120 # opt.resume_posenet = 'pose_model_current.pth' # opt.resume_refinenet = 'pose_refine_model_115_0.008727498716640046.pth' elif opt.dataset == 'elevator': opt.num_objects = 1 # number of object classes in the dataset opt.num_points = 1000 # number of points on the input pointcloud opt.dataset_root = '/data/Akeaveny/Datasets/elevator_dataset' opt.outf = 'trained_models/elevator/elevator_2' # folder to save trained models opt.log_dir = '/home/akeaveny/catkin_ws/src/object-rpe-ak/DenseFusion/experiments/logs/elevator/elevator_2' # folder to save logs output_results = 'check_arl_syn.txt' opt.nepoch = 750 opt.w = 0.05 opt.refine_margin = 0.015 opt.nepoch = 750 opt.w = 0.05 opt.refine_margin = 0.015 # TODO opt.repeat_epoch = 40 # opt.start_epoch = 47 # opt.resume_posenet = 'pose_model_current.pth' # opt.resume_refinenet = 'pose_refine_model_46_0.007581770288279472.pth' else: print('Unknown dataset') return estimator = PoseNet(num_points=opt.num_points, num_obj=opt.num_objects) estimator.cuda() refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects) refiner.cuda() if opt.resume_posenet != '': estimator.load_state_dict( torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet))) if opt.resume_refinenet != '': refiner.load_state_dict( torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet))) opt.refine_start = False opt.decay_start = False opt.lr *= opt.lr_rate opt.w *= opt.w_rate opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) else: opt.refine_start = False opt.decay_start = False optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) if opt.dataset == 'ycb': dataset = PoseDataset_ycb('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'ycb-syn': dataset = PoseDataset_ycb_syn('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'arl': dataset = PoseDataset_arl('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'arl1': dataset = PoseDataset_arl1('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'elevator': dataset = PoseDataset_elevator('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers) if opt.dataset == 'ycb': test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'linemod': test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'ycb-syn': test_dataset = PoseDataset_ycb_syn('test', opt.num_points, True, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'arl': test_dataset = PoseDataset_arl('test', opt.num_points, True, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'arl1': test_dataset = PoseDataset_arl1('test', opt.num_points, True, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'elevator': test_dataset = PoseDataset_elevator('test', opt.num_points, True, opt.dataset_root, 0.0, opt.refine_start) testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) opt.sym_list = dataset.get_sym_list() opt.num_points_mesh = dataset.get_num_points_mesh() print( '>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}' .format(len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list)) criterion = Loss(opt.num_points_mesh, opt.sym_list) criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list) best_test = np.Inf if opt.start_epoch == 1: for log in os.listdir(opt.log_dir): os.remove(os.path.join(opt.log_dir, log)) st_time = time.time() ###################### ###################### # TODO (ak): set up tensor board # if not os.path.exists(opt.log_dir): # os.makedirs(opt.log_dir) # # writer = SummaryWriter(opt.log_dir) ###################### ###################### for epoch in range(opt.start_epoch, opt.nepoch): logger = setup_logger( 'epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch)) logger.info('Train time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started')) train_count = 0 train_dis_avg = 0.0 if opt.refine_start: estimator.eval() refiner.train() else: estimator.train() optimizer.zero_grad() for rep in range(opt.repeat_epoch): ################## # train ################## for i, data in enumerate(dataloader, 0): points, choose, img, target, model_points, idx = data # TODO: txt file # fw = open(test_folder + output_results, 'w') # fw.write('Points\n{0}\n\nchoose\n{1}\n\nimg\n{2}\n\ntarget\n{3}\n\nmodel_points\n{4}'.format(points, choose, img, target, model_points)) # fw.close() points, choose, img, target, model_points, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target).cuda(), \ Variable(model_points).cuda(), \ Variable(idx).cuda() pred_r, pred_t, pred_c, emb = estimator( img, points, choose, idx) loss, dis, new_points, new_target = criterion( pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start) if opt.refine_start: for ite in range(0, opt.iteration): pred_r, pred_t = refiner(new_points, emb, idx) dis, new_points, new_target = criterion_refine( pred_r, pred_t, new_target, model_points, idx, new_points) dis.backward() else: loss.backward() train_dis_avg += dis.item() train_count += 1 if train_count % opt.batch_size == 0: logger.info( 'Train time {} Epoch {} Batch {} Frame {}/{} Avg_dis: {:.2f} [cm]' .format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, int(train_count / opt.batch_size), train_count, len(dataset.list), train_dis_avg / opt.batch_size * 100)) optimizer.step() optimizer.zero_grad() # TODO: tensorboard # if train_count != 0 and train_count % 250 == 0: # scalar_info = {'loss': loss.item(), # 'dis': train_dis_avg / opt.batch_size} # for key, val in scalar_info.items(): # writer.add_scalar(key, val, train_count) train_dis_avg = 0 if train_count != 0 and train_count % 1000 == 0: if opt.refine_start: torch.save( refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format( opt.outf)) else: torch.save( estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf)) # TODO: tensorboard # scalar_info = {'loss': loss.item(), # 'dis': dis.item()} # for key, val in scalar_info.items(): # writer.add_scalar(key, val, train_count) print( '>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'.format( epoch)) logger = setup_logger( 'epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch)) logger.info('Test time {0}'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started')) test_dis = 0.0 test_count = 0 estimator.eval() refiner.eval() for j, data in enumerate(testdataloader, 0): points, choose, img, target, model_points, idx = data points, choose, img, target, model_points, idx = Variable(points).cuda(), \ Variable(choose).cuda(), \ Variable(img).cuda(), \ Variable(target).cuda(), \ Variable(model_points).cuda(), \ Variable(idx).cuda() pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx) _, dis, new_points, new_target = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start) if opt.refine_start: for ite in range(0, opt.iteration): pred_r, pred_t = refiner(new_points, emb, idx) dis, new_points, new_target = criterion_refine( pred_r, pred_t, new_target, model_points, idx, new_points) test_dis += dis.item() logger.info('Test time {} Test Frame No.{} dis: {} [cm]'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_count, dis * 100)) test_count += 1 test_dis = test_dis / test_count logger.info( 'Test time {} Epoch {} TEST FINISH Avg dis: {} [cm]'.format( time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_dis * 100)) # TODO: tensorboard # scalar_info = {'test dis': test_dis} # for key, val in scalar_info.items(): # writer.add_scalar(key, val, train_count) if test_dis <= best_test: best_test = test_dis if opt.refine_start: torch.save( refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format( opt.outf, epoch, test_dis)) else: torch.save( estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format( opt.outf, epoch, test_dis)) print(epoch, '>>>>>>>>----------BEST TEST MODEL SAVED---------<<<<<<<<') if best_test < opt.decay_margin and not opt.decay_start: opt.decay_start = True opt.lr *= opt.lr_rate opt.w *= opt.w_rate optimizer = optim.Adam(estimator.parameters(), lr=opt.lr) if best_test < opt.refine_margin and not opt.refine_start: opt.refine_start = True opt.batch_size = int(opt.batch_size / opt.iteration) optimizer = optim.Adam(refiner.parameters(), lr=opt.lr) if opt.dataset == 'ycb': dataset = PoseDataset_ycb('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'ycb-syn': dataset = PoseDataset_ycb_syn('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'arl': dataset = PoseDataset_arl('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'arl1': dataset = PoseDataset_arl1('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'elevator': dataset = PoseDataset_elevator('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers) if opt.dataset == 'ycb': test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'linemod': test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'ycb-syn': test_dataset = PoseDataset_ycb_syn('test', opt.num_points, True, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'arl': test_dataset = PoseDataset_arl('test', opt.num_points, True, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'arl1': test_dataset = PoseDataset_arl1('test', opt.num_points, True, opt.dataset_root, 0.0, opt.refine_start) elif opt.dataset == 'elevator': test_dataset = PoseDataset_elevator('test', opt.num_points, True, opt.dataset_root, 0.0, opt.refine_start) testdataloader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers) opt.sym_list = dataset.get_sym_list() opt.num_points_mesh = dataset.get_num_points_mesh() print( '>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}' .format(len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list)) criterion = Loss(opt.num_points_mesh, opt.sym_list) criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
def main(): opt.manualSeed = random.randint(1, 10000) random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) device_cpu = torch.device('cpu') if opt.gpu: if torch.cuda.is_available(): device = torch.device('cuda:0') else: device = torch.device('cpu') else: device = torch.device('cpu') if torch.cuda.is_available(): torch.cuda.empty_cache() torch.backends.cudnn.benchmark = True if opt.dataset == 'ycb': opt.num_objects = 21 #number of object classes in the dataset opt.num_points = 1000 #number of points on the input pointcloud opt.outf = 'trained_models/ycb' #folder to save trained models opt.log_dir = 'experiments/logs/ycb' #folder to save logs opt.repeat_epoch = 1 #number of repeat times for one epoch training elif opt.dataset == 'linemod': opt.num_objects = 10 opt.num_points = 500 opt.outf = 'trained_models/linemod' opt.log_dir = 'experiments/logs/linemod' opt.repeat_epoch = 20 else: print('Unknown dataset') return # check for the network mode if not opt.vertex_reg and opt.vertex_reg_hough: assert ValueError('Mode Incorrect') if opt.mode == "train": if opt.dataset == 'ycb': # print("No YCB dataset") # return dataset = PoseDataset_ycb('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('train', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start, False, True, opt.vertex_reg, opt.vertex_reg_hough) trainloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers) # print("dataset cld : " + str(dataset.cld)) # return if opt.mode == "eval": if opt.dataset == 'ycb': dataset = PoseDataset_ycb('test', opt.num_points, True, opt.dataset_root, opt.noise_trans, opt.refine_start) # print("No YCB dataset") # return # test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root, # 0.0, opt.refine_start) elif opt.dataset == 'linemod': dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start, False, True, opt.vertex_reg, opt.vertex_reg_hough) testdataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=opt.workers) # if opt.dataset == 'ycb': # pass # else: # ap_data = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, # opt.refine_start, True, True, opt.vertex_reg, opt.vertex_reg_hough) # ap_loader = torch.utils.data.DataLoader(ap_data, batch_size=1, shuffle=False, num_workers=opt.workers) # dataset = test_dataset opt.sym_list = dataset.get_sym_list() opt.num_points_mesh = dataset.get_num_points_mesh() # print(opt.sym_list) # print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}'.format(len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list)) # Network, optimizer and loss net = vgg16_convs(None, opt.num_objects, opt.num_objects, opt.scales, opt.threshold_label, opt.vote_threshold, opt.vertex_reg, opt.vertex_reg, opt.vertex_reg_hough) # net = vgg16_convs_comb_seg_center(None, opt.num_objects, opt.num_objects, opt.scales, opt.threshold_label, # opt.vote_threshold, opt.vertex_reg, opt.combine_seg_center) optimizer = optim.Adam(net.parameters(), lr=opt.lr) # weight_class = torch.from_numpy(dataset.weight_clsss).type('torch.FloatTensor').to(device) # criterion = nn.CrossEntropyLoss(weight_class) criterion = nn.CrossEntropyLoss() # criterion = nn.BCELoss() # criterion = nn.MSELoss() criterion_center = nn.SmoothL1Loss() # Load pretrained model start_epoch = 0 if opt.flag_pretrained and not opt.flag_pretrained_vgg: # load out model trained before as initialization to continue if os.path.isfile(opt.path_pretrained): print("=> Loading Checkpoint '{}'".format(opt.path_pretrained)) pre_trained = torch.load(opt.path_pretrained) net_dic = net.state_dict() net_dic_new = net_dic pretrained_dic = pre_trained['state_dict'] pretrained_list = list(pretrained_dic.items()) # print("pretrained list length : " + str(len(pretrained_list))) # net.load_state_dict() start_epoch = pre_trained['epoch'] if opt.num_pretrain_param_load > 0: count = 0 for k, v in net_dic.items(): if count >= opt.num_pretrain_param_load: break name_temp, value_pretrained = pretrained_list[count] if opt.gpu: net_dic_new[k] = value_pretrained else: net_dic_new[k] = value_pretrained.cpu() count += 1 # print("net dic new k : " + str(net_dic_new[k])) print("count : " + str(count)) net.load_state_dict(net_dic_new) """ optimizer.load_state_dict(pre_trained['optimizer']) for state in optimizer.state.values(): for k, v in state.items(): if torch.cuda.is_available: if isinstance(v, torch.Tensor): state[k] = v.cuda() """ print("=> Loaded Checkpoint '{}'".format(opt.path_pretrained)) print("Start epoch : " + str(start_epoch)) else: assert ValueError("no pretrained_model found at {}".format( opt.path_pretrained)) count = 0 for param in net.parameters(): if count >= opt.num_pretrain_param_freeze: break param.requires_grad = False count += 1 elif not opt.flag_pretrained and opt.flag_pretrained_vgg: # load the pretrained weight of VGG16 net # 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth' pretrained_dic = torch.load('pretrained_model\\vgg16-397923af.pth') pretrained_list = list(pretrained_dic.items()) net_dic = net.state_dict() net_dic_new = net_dic count = 0 for k, v in net_dic.items(): name_temp, value_pretrained = pretrained_list[count] net_dic_new[k] = value_pretrained count += 1 if count >= opt.num_pretrain_param_vgg: break net.load_state_dict(net_dic_new) count = 0 for param in net.parameters(): param.requires_grad = False count += 1 if count >= opt.num_pretrain_param_vgg: break elif not opt.flag_pretrained and not opt.flag_pretrained_vgg: print('without laod any pretrained model') else: print( 'Collision with the flag of load vgg param and laod pretrain model' ) net.to(device) if opt.mode == "train": loss_his = [] loss_his = train(trainloader, net, criterion, criterion_center, optimizer, device, device_cpu, start_epoch) print('>>>>>>>>----------Training Finished!---------<<<<<<<<') if opt.mode == "eval": test_loss = 0 test_loss = test(testdataloader, net, criterion, criterion_center, device, device_cpu) print('>>>>>>>>----------AP---------<<<<<<<<') # aps = None # if opt.train_single_frame: # aps = cal_AP(ap_loader, net, criterion, device, opt.num_objects, opt) # aps = np.array(aps) # print('Final mean AP : {}'.format(np.mean(aps))) # print('>>>>>>>>----------Save the model weights!---------<<<<<<<<') # if opt.save_model: # # save the trained model # save_checkpoint({ # 'epoch': opt.nepoch, # 'arch': opt.arch, # 'state_dict': net.state_dict(), # 'test_loss': test_loss, # 'aps': "aps", # 'optimizer' : optimizer.state_dict(), # }, False) # print('>>>>>>>>----------Loss History---------<<<<<<<<') # np.save('log//loss//loss', np.array(loss_his)) # plt.figure() # plt.plot(loss_his) # plt.show() # <<<<<<< HEAD # plt.savefig('/home/ubuntu/EECS442_CourseProject/log/loss/unfreeze_seg_ctr.png') # ======= # plt.savefig('log//loss//loss.png') # >>>>>>> ce96c070c17b981e90464ae0b458ab905b1009db print('>>>>>>>>----------The End---------<<<<<<<<')