print(location_ori_changed) location_tilt = np.dot(np.transpose(Rtilt), location) print("rtilted location") print(location) location_changed = [location_tilt[i] for i in sun_permutation] location_changed[1] *= -1 ct_3d_from_2d= [ct_x * location_ori_changed[2], ct_y * location_ori_changed[2], location_ori_changed[2]] calib_3 = np.zeros((3,3)) calib_3 = calib[0:3, 0:3] inv_K = inv(calib_3) ct_3d_from_2d2 = np.dot(inv_K, ct_3d_from_2d) ct_3d_from_2d2_world = [ct_3d_from_2d2[i] for i in sun_permutation] ct_3d_from_2d2_world[2] *= -1 box_3d_new = compute_box_3d_world(dim, ct_3d_from_2d2_world, rotation_y, theta) box_2d_new = project_to_image(box_3d_new, calib) image5 = draw_box_3d_world(image, box_2d_new, image_id, c= (255, 0, 0)) print(ct_3d_from_2d2) print("내가 location_changed") print(location_changed) print("dim") print(dim) #dim_changed = [dim[i]*2 if dim[i] > 0 else print("error" + str(image_id)) for i in sun_permutation] #왜냐하면 dimension은 크기니까 dim_changed = [dim[i]*2 for i in sun_permutation] #왜냐하면 dimension은 크기니까 #dim_2 = [dim[i]*2 for i in range(0,3)] #왜냐하면 dimension은 크기니까 #print(dim_changed) #Rtilt 적용된 location을 2d image에 project시킨것. ct3d_to_ct2d = project_to_image(np.array([location_changed]), calib) # 이제 3d bounding box를 image에 투영시킴 ct3d_to_ct2d_ori = project_to_image(np.array([location_ori_changed]), calib) # 이제 3d bounding box를 image에 투영시킴
'category_id': cat_ids[cat_id], # 'dim': dim_xyz, 'dim': dim_changed, 'bbox': bbox2D, #(xmin, ymin, w, h) #'depth': abs(location_changed[1]), ....NotRtilted(2019-12-23) 'depth': location_changed[2], 'alpha': alpha, # 'truncated': truncated, # 'occluded': occluded, 'location': location_changed, 'rotation_y': rotation_y, 'ct_2d_from_3d': ct3d_proj } ret['annotations'].append(ann) image = draw_box_3d_world(image, box_2d, image_id) # cv2.imshow(str(image_id), image) # cv2.waitKey() # cv2.imwrite('C:\\obj_detection\\CenterNet-master\\CenterNet-master\\data\\SUNRGBD\\resultImages_mini\\'+str(image_id)+'.jpg',image) else: print("3d_ct out") print(image_id) print(cat_id) # depth = np.array([location[2]], dtype=np.float32) # pt_2d = np.array([(bbox2D[0] + bbox2D[2]) / 2, (bbox2D[1] + bbox2D[3]) / 2], # dtype=np.float32) # pt_3d = unproject_2d_to_3d(pt_2d, depth, calib) # pt_3d[1] += dim[0] / 2 # because the position of KITTI is defined as the center of the bottom face # print('pt_3d', pt_3d) # print('location', location)
cv2.line(image, (int(xmax_projected), int(ymin_projected)), (int(xmax_projected), int(ymax_projected)), (0, 255, 255), 2, lineType=cv2.LINE_AA) cv2.line(image, (int(xmin_projected), int(ymax_projected)), (int(xmin_projected), int(ymin_projected)), (0, 255, 255), 2, lineType=cv2.LINE_AA) cv2.line(image, (int(xmax_projected), int(ymax_projected)), (int(xmin_projected), int(ymax_projected)), (0, 255, 255), 2, lineType=cv2.LINE_AA) cv2.circle(image, (int(ct_x_projected), int(ct_y_projected)), 5, (0, 255, 255), -1) image = draw_box_3d_world(image, box_2d, image_id) #print(box_2d.transpose()[0].min(), box_2d.transpose()[0].max(), box_2d.transpose()[1].min(), box_2d.transpose()[1].max()) #bbox2D_projected는 (xmin, ymin, width, height꼴로 넣어줘야함) bbox2D_projected = [ float(xmin_projected), float(ymin_projected), float(width_projected), float(height_projected) ] depth = location_ori_changed[2] P = calib loc_2d_depth = unproject_2d_to_3d(pt_2d, depth, P) box_3d = compute_box_3d_Rtilt(dim_changed, loc_2d_depth, rotation_y, Rtilt)