示例#1
0
def pytorch_inference(config_path=None, ckpt_path=None, data_path=None):

    config = pipeline_pb2.TrainEvalPipelineConfig()

    with open(config_path, "r") as f:
        proto_str = f.read()
        text_format.Merge(proto_str, config)

    model_cfg = config.model.second

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    net = build_network(model_cfg).to(device).eval()
    net.load_state_dict(torch.load(ckpt_path))

    points = read_pointcloud(data_path)
    example = generate_example(net, model_cfg, points, device)

    pred = net(example)[0]

    boxes_lidar = pred["box3d_lidar"].detach().cpu().numpy()

    vis_voxel_size = [0.1, 0.1, 0.1]
    vis_point_range = [-50, -30, -3, 50, 30, 1]
    bev_map = simplevis.point_to_vis_bev(points, vis_voxel_size,
                                         vis_point_range)
    bev_map = simplevis.draw_box_in_bev(bev_map, vis_point_range, boxes_lidar,
                                        [0, 255, 0], 2)

    plt.imsave("result.png", bev_map)
示例#2
0
 def visualize_bev(points, boxes_lidar):
     vis_voxel_size = [0.1, 0.1, 0.1]
     vis_point_range = [-50, -30, -3, 50, 30, 1]
     bev_map = simplevis.point_to_vis_bev(points, vis_voxel_size,
                                          vis_point_range)
     bev_map = simplevis.draw_box_in_bev(bev_map, vis_point_range,
                                         boxes_lidar, [0, 255, 0], 2)
     plt.imshow(bev_map)
     plt.show()
示例#3
0
def onnx_inference(config_path, data_path, pfe_path, rpn_path):

    config = pipeline_pb2.TrainEvalPipelineConfig()

    with open(config_path, "r") as f:
        proto_str = f.read()
        text_format.Merge(proto_str, config)

    model_cfg = config.model.second
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    net = build_network(model_cfg).to(device).eval()
    points = read_pointcloud(data_path)
    example = generate_example(net, model_cfg, points, device)

    #onnx inference
    ort_session_pfe = onnxruntime.InferenceSession(pfe_path)
    ort_session_rpn = onnxruntime.InferenceSession(rpn_path)

    # compute ONNX Runtime output prediction
    ort_inputs_pfe = {
        ort_session_pfe.get_inputs()[0].name: to_numpy(example["voxels"]),
        ort_session_pfe.get_inputs()[1].name: to_numpy(example["num_points"]),
        ort_session_pfe.get_inputs()[2].name: to_numpy(example["coordinates"])
    }

    ort_outs_pfe = ort_session_pfe.run(None, ort_inputs_pfe)

    voxel_features = torch.from_numpy(ort_outs_pfe[0]).to(device)

    spatial_features = net.middle_feature_extractor(voxel_features,
                                                    example["coordinates"], 1)

    ort_inputs_rpn = {
        ort_session_rpn.get_inputs()[0].name: to_numpy(spatial_features)
    }

    ort_outs_rpn = ort_session_rpn.run(None, ort_inputs_rpn)

    preds_dict = {}
    preds_dict["box_preds"] = torch.from_numpy(ort_outs_rpn[0]).to(device)
    preds_dict["cls_preds"] = torch.from_numpy(ort_outs_rpn[1]).to(device)
    preds_dict["dir_cls_preds"] = torch.from_numpy(ort_outs_rpn[2]).to(device)

    with torch.no_grad():
        pred = net.predict(example, preds_dict)[0]

    boxes_lidar = pred["box3d_lidar"].detach().cpu().numpy()

    vis_voxel_size = [0.1, 0.1, 0.1]
    vis_point_range = [-50, -30, -3, 50, 30, 1]
    bev_map = simplevis.point_to_vis_bev(points, vis_voxel_size,
                                         vis_point_range)
    bev_map = simplevis.draw_box_in_bev(bev_map, vis_point_range, boxes_lidar,
                                        [0, 255, 0], 2)

    plt.imsave("result_onnx.png", bev_map)
示例#4
0
def visualize(pred, points, vis_voxel_size=[0.1, 0.1, 0.1], vis_point_range=[-50, -30, -3, 50, 30, 1]):    
    boxes_lidar = pred["box3d_lidar"].detach().cpu().numpy()
    bev_map = simplevis.point_to_vis_bev(points, vis_voxel_size, vis_point_range)
    bev_map = simplevis.draw_box_in_bev(bev_map, vis_point_range, boxes_lidar, [0, 255, 0], 2)
    return bev_map
示例#5
0
        # print(pred)

        boxes_lidar = pred["box3d_lidar"].detach().cpu().numpy()
        box3d = pred["box3d_lidar"].detach().cpu().numpy()
        # box3d = info['gt_boxes']
        scores = pred["scores"].detach().cpu().numpy()
        labels = pred["label_preds"].detach().cpu().numpy()
        idx = np.where(scores > 0.3)[0]
        # print(idx)
        # filter low score ones
        # print(boxes_lidar)
        box3d = box3d[idx, :]
        # print(labels)
        # label is one-dim
        labels = np.take(labels, idx)
        # print(labels)
        scores = np.take(scores, idx)
        # print(scores)
        vis_voxel_size = [0.1, 0.1, 0.1]
        vis_point_range = [-50, -50, -3, 50, 50, 1]
        # bev_map = simplevis.point_to_vis_bev(points, vis_voxel_size, vis_point_range)
        bev_map = simplevis.point_to_vis_bev(points, vis_voxel_size,
                                             vis_point_range)
        bev_map = simplevis.draw_box_in_bev(bev_map, vis_point_range, box3d,
                                            [0, 255, 0], 2)

        plt.imshow(bev_map)
        # plt.savefig("/home/lichao/image_rain_label2/%d.png"%(num))
        # plt.savefig("4.3.png")
        plt.savefig("/home/lichao/lidar_image_0.3/%d.png" % (num))