def detection_target(output_maps, corners_list_512, stage_lvl=4):
    """
    :param output_maps: net output size(B, H, W, C) have transform to real coordinate (0-512) NOTICE !!!!!
    :param corners_list_512: list(B), tensor(N, 8), here B is batch_size, N is obj number in one image, 512 scale
    :param stage_lvl: which level the corners project to
    :return: sample_area_target, coordinate_target
    """
    device = output_maps.device
    num_imgs = len(corners_list_512)
    corners_list_32 = [
        single_corners / (2**stage_lvl) for single_corners in corners_list_512
    ]
    B, H, W, C = output_maps.shape
    # ======================target initial==========================
    # positive and negative area maps
    sample_area_target = torch.zeros((B, H, W)).long().to(device)
    # coordinate target maps, the corresponding gt coordinate
    coordinate_target = torch.zeros((B, H, W, 8)).float().to(device)
    """
    其他的在loss函数里面计算吧
    # distance target maps, ratio of distance
    distance_target = torch.zeros((B, H, W)).float()
    # size target maps, ratio of w / h, arctan
    size_target = torch.zeros((B, H, W)).float()
    # Discrete degree target maps, Mean square error
    discrete_target = torch.zeros((B, H, W)).float()
    """
    # no grad
    # output_maps_detach = output_maps.detach()
    # each image in batch
    for img in range(num_imgs):
        # single_map_detach = output_maps_detach[img]  # size(12, 32, 32)
        single_corners_32 = corners_list_32[img]  # size(N, 8)
        single_corners_512 = corners_list_512[img]
        # calculate pos & neg areas
        obj_num = single_corners_32.shape[0]
        for obj in range(obj_num):
            # which to distribute
            dist_idx = gtP.scale_distribute(
                single_corners_512[obj], tra_cfg.K_Means_args['split_value'])
            # print('distribute', dist_idx)
            # e_spatial_map, i_spatial_map are all bool tensors
            e_spatial_map, i_spatial_map = get_spatial_idx(
                single_corners_32[obj], W, H, dist_idx, device)
            # print(i_spatial_map[i_spatial_map == 1].size())
            # print(i_spatial_map.shape)
            coordinate_target[img,
                              e_spatial_map == 1] = single_corners_512[obj]
            # sample_area_target 非背景部分乘以新的i_spatial_map得到相交部分,i_spatial_map除去相交点再赋值
            i_spatial_map = (i_spatial_map.byte() - (
                (sample_area_target[img] != 0) * i_spatial_map).byte()).bool()
            # print(i_spatial_map[i_spatial_map == 1].size())
            # print(i_spatial_map.shape)
            sample_area_target[img, e_spatial_map == 1] = 1
            sample_area_target[img, i_spatial_map == 1] = -1
    return sample_area_target, coordinate_target
                                                 gauss_target)
        # ===================== total loss =======================
        coord_loss = smooth_L1_map
        coord_loss = torch.sum(
            coord_loss[sample_area_target == 1]) / torch.sum(
                sample_area_target == 1)
        score_loss = sample_focal_loss
        detection_loss = score_loss + coord_loss
        # print(detection_loss, coord_loss, score_loss)
        return detection_loss, coord_loss, score_loss


if __name__ == '__main__':
    corners = torch.tensor(
        [[100.0, 100.0, 170.0, 120.0, 170.0, 135.0, 100.0, 115.0]])
    distr_idx = gtP.scale_distribute(
        corners, splitValue=tra_cfg.K_Means_args['split_value'])
    distr_idx = distr_idx[0]
    print("idx", distr_idx)
    corners = corners / 16
    print('corner / 16', corners)
    e, i = get_spatial_idx(corners, 32, 32, distr_idx, corners.device)
    with open('1.txt', 'w') as f:
        for y in range(e.shape[0]):
            for x in range(e.shape[1]):
                f.write(str(e[y][x].item()))
                f.write(' ')
            f.write('\n')
        f.write('\n=======================\n')
        for y in range(i.shape[0]):
            for x in range(i.shape[1]):
                f.write(str(i[y][x].item()))