Exemple #1
0
def callback(cloud_msg):

    start_time = time.time()
    # cloud = process_cloud(cloud_msg, cfg, shift_cloud = True, sample_cloud = False)
    cloud = cloud_msg_to_numpy(cloud_msg, cfg, shift_cloud = True)

    # np_conversion = time.time()
    # print("np_conversion_time: ", np_conversion- start_time)

    voxels, coors, num_points = points_to_voxel(cloud, cfg.voxel_size, cfg.pc_range, cfg.max_points_voxel, True, cfg.max_voxels)
    voxels = torch.from_numpy(voxels).float().cuda()
    coors = torch.from_numpy(coors)
    coors = F.pad(coors, (1,0), 'constant', 0).float().cuda()
    num_points = torch.from_numpy(num_points).float().cuda()

    # cloud_process = time.time()
    # print("cloud_process: ", cloud_process - np_conversion)



    with torch.no_grad():
            output = model(voxels, coors, num_points)
            # model_time = time.time()
            # print("model_time: ", model_time - cloud_process)

    pred_GndSeg = segment_cloud(cloud.copy(),np.asarray(cfg.grid_range), cfg.voxel_size[0], elevation_map = output.cpu().numpy().T, threshold = 0.2)
    # seg_time = time.time()
    # print("seg_time: ", seg_time - model_time )
    # print("total_time: ", seg_time - np_conversion)
    # print()
    # pdb.set_trace()
    gnd_marker_pub(output.cpu().numpy(),marker_pub_2, cfg, color = "red")
    np2ros_pub_2(cloud, pcl_pub, None, pred_GndSeg)
Exemple #2
0
def InferGround(cloud):

    cloud = _shift_cloud(cloud[:,:4], cfg.lidar_height)

    voxels, coors, num_points = points_to_voxel(cloud, cfg.voxel_size, cfg.pc_range, cfg.max_points_voxel, True, cfg.max_voxels)
    voxels = torch.from_numpy(voxels).float().cuda()
    coors = torch.from_numpy(coors)
    coors = F.pad(coors, (1,0), 'constant', 0).float().cuda()
    num_points = torch.from_numpy(num_points).float().cuda()
    with torch.no_grad():
            output = model(voxels, coors, num_points)
    return output
Exemple #3
0
def evaluate():

    batch_time = AverageMeter()
    losses = AverageMeter()
    data_time = AverageMeter()

    # switch to evaluate mode
    # model.eval()
    # if args.evaluate:
    #     model.train()
    with torch.no_grad():
        start = time.time()
        
        for batch_idx, (data, labels) in enumerate(valid_loader):

            data_time.update(time.time() - start) # measure data loading time
            B = data.shape[0] # Batch size
            N = data.shape[1] # Num of points in PointCloud

            voxels = []; coors = []; num_points = []
            data = data.numpy()
            for i in range(B):
                fig = plt.figure()
                v, c, n = points_to_voxel(data[i], cfg.voxel_size, cfg.pc_range, cfg.max_points_voxel, True, cfg.max_voxels)
                # mask = np.zeros((100,100))
                # indi = c[:,1:]
                # mask[tuple(indi.T)] = 1
                # fig.clf()
                # fig.add_subplot(1, 2, 1)
                # plt.imshow(mask, interpolation='nearest')
                # fig.add_subplot(1, 2, 2)
                # plt.imshow(labels[i], interpolation='nearest')
                # plt.show()
                # # visualize_2D(mask, data[i], fig, cfg)
                # pdb.set_trace()
                
                c = torch.from_numpy(c)
                c = F.pad(c, (1,0), 'constant', i)
                voxels.append(torch.from_numpy(v))
                coors.append(c)
                num_points.append(torch.from_numpy(n))

            voxels = torch.cat(voxels).float().cuda()
            coors = torch.cat(coors).float().cuda()
            num_points = torch.cat(num_points).float().cuda()
            labels = labels.float().cuda()

            optimizer.zero_grad()
            output = model(voxels, coors, num_points)
            # pdb.set_trace()

            # loss = masked_huber_loss(output, labels, mask)
            loss = lossHuber(output, labels)

            losses.update(loss.item(), B)

            # measure elapsed time
            batch_time.update(time.time() - start)
            start = time.time()

            if args.visualize:
                for j in range(B):
                    pdb.set_trace()
                    out = output[j].cpu().numpy()
                    seg = segment_cloud(data[j].copy(),np.asarray(cfg.grid_range), cfg.voxel_size[0], elevation_map = out.T, threshold = 0.3)
                    np2ros_pub_2(data[j],pcl_pub, None, seg)
                    # np2ros_pub(data[j],pcl_pub)
                    # gnd_marker_pub(labels[j].cpu().numpy(),marker_pub_1, cfg, color = "red")
                    gnd_marker_pub(out,marker_pub_2, cfg, color = "red")




            if batch_idx % args.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                       batch_idx, len(valid_loader), batch_time=batch_time, loss=losses))

    return losses.avg
Exemple #4
0
def train(epoch):

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    # top1 = AverageMeter()

    # switch to train mode
    model.train()
    start = time.time()

    for batch_idx, (data, labels) in enumerate(train_loader):

        data_time.update(time.time() - start) # measure data loading time
        B = data.shape[0] # Batch size
        N = data.shape[1] # Num of points in PointCloud

        voxels = []; coors = []; num_points = []; mask = []
        # kernel = np.ones((3,3),np.uint8)

        data = data.numpy()
        for i in range(B):
            v, c, n = points_to_voxel(data[i], cfg.voxel_size, cfg.pc_range, cfg.max_points_voxel, True, cfg.max_voxels)
            # m = np.zeros((100,100),np.uint8)
            # ind = c[:,1:]
            # m[tuple(ind.T)] = 1
            # m = cv2.dilate(m,kernel,iterations = 1)

            c = torch.from_numpy(c)
            c = F.pad(c, (1,0), 'constant', i)
            voxels.append(torch.from_numpy(v))
            coors.append(c)
            num_points.append(torch.from_numpy(n))
            # mask.append(torch.from_numpy(m))
# 
        voxels = torch.cat(voxels).float().cuda()
        coors = torch.cat(coors).float().cuda()
        num_points = torch.cat(num_points).float().cuda()
        labels = labels.float().cuda()
        # mask = torch.stack(mask).cuda()

        optimizer.zero_grad()

        output = model(voxels, coors, num_points)
        # pdb.set_trace()

        loss = cfg.alpha * lossHuber(output, labels) + cfg.beta * lossSpatial(output)
        # loss = lossHuber(output, labels)
        # loss = masked_huber_loss(output, labels, mask)

        loss.backward()

        # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
        # torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip)

        optimizer.step() # optimiser step must be after clipping bcoz optimiser step updates the gradients.

        losses.update(loss.item(), B)

        # measure elapsed time
        batch_time.update(time.time() - start)
        start = time.time()


        if batch_idx % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
                   epoch, batch_idx, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses))

    return losses.avg
Exemple #5
0
def validate():

    batch_time = AverageMeter()
    losses = AverageMeter()
    data_time = AverageMeter()

    # switch to evaluate mode
    model.eval()
    # if args.evaluate:
    #     model.train()
    with torch.no_grad():
        start = time.time()

        for batch_idx, (data, labels) in enumerate(valid_loader):

            data_time.update(time.time() - start) # measure data loading time
            B = data.shape[0] # Batch size
            N = data.shape[1] # Num of points in PointCloud

            voxels = []; coors = []; num_points = []; mask = []
            # kernel = np.ones((3,3),np.uint8)

            data = data.numpy()
            for i in range(B):
                v, c, n = points_to_voxel(data[i], cfg.voxel_size, cfg.pc_range, cfg.max_points_voxel, True, cfg.max_voxels)
                # m = np.zeros((100,100),np.uint8)
                # ind = c[:,1:]
                # m[tuple(ind.T)] = 1
                # m = cv2.dilate(m,kernel,iterations = 1)

                c = torch.from_numpy(c)
                c = F.pad(c, (1,0), 'constant', i)
                voxels.append(torch.from_numpy(v))
                coors.append(c)
                num_points.append(torch.from_numpy(n))
                # mask.append(torch.from_numpy(m))

            voxels = torch.cat(voxels).float().cuda()
            coors = torch.cat(coors).float().cuda()
            num_points = torch.cat(num_points).float().cuda()
            labels = labels.float().cuda()
            # mask = torch.stack(mask).cuda()

            optimizer.zero_grad()

            output = model(voxels, coors, num_points)
            # pdb.set_trace()

            loss = cfg.alpha * lossHuber(output, labels) + cfg.beta * lossSpatial(output)
            # loss = lossHuber(output, labels)
            # loss = masked_huber_loss(output, labels, mask)

            losses.update(loss.item(), B)

            # measure elapsed time
            batch_time.update(time.time() - start)
            start = time.time()



            if batch_idx % args.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                       batch_idx, len(valid_loader), batch_time=batch_time, loss=losses))

    return losses.avg