def interpolate(sxyzs,sprobs,qxyzs,ratio=1.0/(2*0.15*0.15)):
    nidxs=libPointUtil.findNeighborInAnotherCPU(sxyzs,qxyzs,8)
    nidxs_lens=np.asarray([len(idxs) for idxs in nidxs],dtype=np.int32)
    nidxs_bgs=compute_nidxs_bgs(nidxs_lens)
    nidxs=np.concatenate(nidxs,axis=0)
    qprobs = libPointUtil.interpolateProbsGPU(sxyzs, qxyzs, sprobs, nidxs, nidxs_lens, nidxs_bgs, ratio)

    return qprobs
def interpolate_natural_terrain(sxyzs,qxyzs,qprobs):
    print sxyzs.shape,qxyzs.shape,qprobs.shape
    nidxs=libPointUtil.findNeighborInAnotherCPU(sxyzs,qxyzs,0.22)
    nlens=np.asarray([len(idxs) for idxs in nidxs],dtype=np.int32)
    qpreds=np.argmax(qprobs,axis=1)
    mask=np.logical_and(nlens>0,qpreds==0)
    qprobs[mask,0]=0.0
    qprobs[mask,1]=1.0

    return qprobs
Exemple #3
0
def interpolate(sxyzs, sprobs, qxyzs, ratio=1.0 / (2 * 0.125 * 0.125)):
    bg = time.time()
    nidxs = libPointUtil.findNeighborInAnotherCPU(sxyzs, qxyzs, 6)
    print 'search done {} s'.format(time.time() - bg)
    nidxs_lens = np.asarray([len(idxs) for idxs in nidxs], dtype=np.int32)
    nidxs_bgs = compute_nidxs_bgs(nidxs_lens)
    nidxs = np.concatenate(nidxs, axis=0)
    qprobs = libPointUtil.interpolateProbsGPU(sxyzs, qxyzs, sprobs, nidxs,
                                              nidxs_lens, nidxs_bgs, ratio)

    return qprobs
Exemple #4
0
def compute_context_idxs(context_xyzs,xyzs):
    context_idxs=[]
    for i in xrange(len(xyzs)):
        xyz=np.ascontiguousarray(xyzs[i][:,:3],np.float32)
        context_xyz=np.ascontiguousarray(context_xyzs[i][:,:3],np.float32)
        leaf_size=min(context_xyz.shape[0]-1,15)
        idxs=libPointUtil.findNeighborInAnotherCPU(context_xyz,xyz,1,leaf_size)
        idxs=np.squeeze(np.asarray(idxs,np.int32),axis=1)
        assert idxs.shape[0]==xyz.shape[0]
        context_idxs.append(idxs)

    return context_idxs
Exemple #5
0
def global_avg_downsample():
    with open('cached/semantic3d_stems.txt', 'r') as f:
        stems = [line.split(' ')[0] for line in f.readlines()]

    sess, pts_pl, ds_pts_op = build_avg_ds_session(ds_size=2.0, min_coor=3000.0)

    train_list = read_large_block_list()
    for stem in stems:
        for k in xrange(6):
            pts, lbls = [], []
            for tfs in train_list:
                if (not tfs.startswith(stem)) or (not tfs.endswith('{}.pkl'.format(k))): continue
                fs = 'data/Semantic3D.Net/context/large_block/' + tfs
                points, labels = read_room_pkl(fs)  # [n,6],[n,1]
                idxs = libPointUtil.gridDownsampleGPU(points, 0.1, False)
                pts.append(points[idxs])
                lbls.append(labels[idxs])

            # downsample
            pts = np.concatenate(pts, axis=0)
            ds_pts=sess.run(ds_pts_op,feed_dict={pts_pl:pts})

            # compute covar
            ds_xyzs=np.ascontiguousarray(ds_pts[:,:3],np.float32)
            xyzs=np.ascontiguousarray(pts[:,:3],np.float32)
            nidxs=libPointUtil.findNeighborInAnotherCPU(xyzs,ds_xyzs,4.0)
            nlens=np.ascontiguousarray([len(idxs) for idxs in nidxs],np.int32)
            nbegs=compute_nidxs_bgs(nlens)
            nidxs=np.ascontiguousarray(np.concatenate(nidxs,axis=0),dtype=np.int32)
            covars=libPointUtil.computeCovarsGPU(xyzs,nidxs,nlens,nbegs)
            if np.sum(np.isnan(covars))>0:
                print stem,k
                idxs,_=np.nonzero(np.isnan(covars))
                for idx in idxs:
                    print '{} {}'.format(idx,nlens[idx])

                exit(0)

            ds_pts=np.concatenate([ds_pts,covars],axis=1)

            # output_points('test_result/{}_{}_ds.txt'.format(stem,k),pts)
            save_pkl('data/Semantic3D.Net/context/global_avg/{}_{}.pkl'.format(stem,k),ds_pts)
Exemple #6
0
def testset_global_downsample_avg():
    with open('cached/semantic3d_test_stems.txt','r') as f:
        lines=f.readlines()
        fss=[fn.strip('\n').split(' ')[0] for fn in lines]
        fns=[int(fn.strip('\n').split(' ')[1]) for fn in lines]

    sess, pts_pl, ds_pts_op = build_avg_ds_session(ds_size=2.0, min_coor=3000.0)

    for fs,fn in zip(fss,fns):
        pts=[]
        for fni in xrange(fn):
            points, labels = read_room_pkl('data/Semantic3D.Net/context/test_large_block/{}_{}.pkl'.format(fs,fni))  # [n,6],[n,1]
            idxs = libPointUtil.gridDownsampleGPU(points, 0.1, False)
            pts.append(points[idxs])

        # downsample
        pts = np.concatenate(pts, axis=0)
        ds_pts=sess.run(ds_pts_op,feed_dict={pts_pl:pts})

        # compute covar
        ds_xyzs=np.ascontiguousarray(ds_pts[:,:3],np.float32)
        xyzs=np.ascontiguousarray(pts[:,:3],np.float32)
        nidxs=libPointUtil.findNeighborInAnotherCPU(xyzs,ds_xyzs,4.0)
        nlens=np.ascontiguousarray([len(idxs) for idxs in nidxs],np.int32)
        nbegs=compute_nidxs_bgs(nlens)
        nidxs=np.ascontiguousarray(np.concatenate(nidxs,axis=0),dtype=np.int32)
        covars=libPointUtil.computeCovarsGPU(xyzs,nidxs,nlens,nbegs)
        if np.sum(np.isnan(covars))>0:
            print fs
            idxs,_=np.nonzero(np.isnan(covars))
            for idx in idxs:
                print '{} {}'.format(idx,nlens[idx])

            exit(0)

        ds_pts=np.concatenate([ds_pts,covars],axis=1)

        # output_points('test_result/{}_{}_ds.txt'.format(stem,k),pts)
        save_pkl('data/Semantic3D.Net/context/test_global_avg/{}.pkl'.format(fs),ds_pts)