Esempio n. 1
0
def testset_compute_context_offset_z():
    with open('cached/semantic3d_test_stems.txt','r') as f:
        lines=f.readlines()
        fss=[fn.strip('\n').split(' ')[0] for fn in lines]
        fns=[int(fn.strip('\n').split(' ')[1]) for fn in lines]

    f=open('cached/semantic3d_test_context_offsetz.txt','w')
    for fs,fn in zip(fss,fns):
        pts=[]
        for fni in xrange(fn):
            points, labels =read_room_pkl('data/Semantic3D.Net/context/test_large_block/{}_{}.pkl'.format(fs,fni))
            idxs=libPointUtil.gridDownsampleGPU(points,0.1,False)
            pts.append(points[idxs])

        pts=np.concatenate(pts,axis=0)
        idxs=libPointUtil.gridDownsampleGPU(pts,0.1,False)
        pts=pts[idxs]

        zs=pts[:,2]
        min_z=np.min(zs)
        zs-=np.min(zs)

        hist,_=np.histogram(zs,np.arange(0.0,20.0,0.1),range=(0,20))
        offset_z=np.argmax(hist)*0.1+min_z
        f.write('{} {}\n'.format(fs,offset_z))

    f.close()
Esempio n. 2
0
def test_context_offset_z():
    import matplotlib as mpl
    mpl.use('Agg')
    import matplotlib.pyplot as plt

    with open('cached/semantic3d_stems.txt','r') as f:
        stems=[line.split(' ')[0] for line in f.readlines()]

    stem_offset_map=get_context_offset_z()

    train_list=read_large_block_list()
    for stem in stems:
        pts,lbls=[],[]
        for tfs in train_list:
            if (not tfs.startswith(stem)) or (not tfs.endswith('0.pkl')): continue
            fs='data/Semantic3D.Net/context/large_block/'+tfs
            points,labels=read_room_pkl(fs) # [n,6],[n,1]
            idxs=libPointUtil.gridDownsampleGPU(points,0.2,False)
            pts.append(points[idxs])
            lbls.append(labels[idxs])

        pts=np.concatenate(pts,axis=0)
        idxs=libPointUtil.gridDownsampleGPU(pts,0.1,False)
        pts=pts[idxs]

        zs=pts[:,2]
        zs-=stem_offset_map[stem]
        plt.figure()
        plt.hist(zs,500,range=(-25,25))
        plt.savefig('test_result/{}_offseted.png'.format(stem))
        plt.close()
Esempio n. 3
0
def testset_test_context_offset_z():
    import matplotlib as mpl
    mpl.use('Agg')
    import matplotlib.pyplot as plt

    with open('cached/semantic3d_test_stems.txt','r') as f:
        lines=f.readlines()
        fss=[fn.strip('\n').split(' ')[0] for fn in lines]
        fns=[int(fn.strip('\n').split(' ')[1]) for fn in lines]

    stem_offset_map=testset_get_context_offset_z()
    for fs,fn in zip(fss,fns):
        pts=[]
        for fni in xrange(fn):
            points, labels =read_room_pkl('data/Semantic3D.Net/context/test_large_block/{}_{}.pkl'.format(fs,fni))
            idxs=libPointUtil.gridDownsampleGPU(points,0.1,False)
            pts.append(points[idxs])

        pts=np.concatenate(pts,axis=0)
        idxs=libPointUtil.gridDownsampleGPU(pts,0.1,False)
        pts=pts[idxs]

        zs=pts[:,2]
        zs-=stem_offset_map[fs]
        plt.figure()
        plt.hist(zs,500,range=(-25,25))
        plt.savefig('test_result/{}_offseted.png'.format(fs))
        plt.close()
Esempio n. 4
0
def sample_large_block(beg, bi, ri, rm, fs, fn, min_p, bsize, ds_stride):
    # from io_util import get_semantic3d_class_colors
    # colors=get_semantic3d_class_colors()
    lbls, pts = [], []
    pn=0
    for i in xrange(fn):
        points, labels = read_room_pkl('data/Semantic3D.Net/pkl/train/' + fs + '_{}.pkl'.format(i))
        idxs = libPointUtil.gridDownsampleGPU(points, ds_stride, False)

        points = points[idxs]
        labels = labels[idxs]
        points[:, :3] = np.dot(points[:, :3], rm)
        offseted_points = points[:, :3]-np.expand_dims(min_p, axis=0)

        x_cond = (offseted_points[:, 0] >= beg[0]) & (offseted_points[:, 0] < beg[0] + bsize)
        y_cond = (offseted_points[:, 1] >= beg[1]) & (offseted_points[:, 1] < beg[1] + bsize)
        cond = x_cond & y_cond
        pn+=np.sum(cond)
        pts.append(points[cond])
        lbls.append(labels[cond])

    if pn>1024:
        pts = np.concatenate(pts, axis=0)
        lbls = np.concatenate(lbls, axis=0)

        print 'block {} pn {}'.format(bi,pn)
        save_room_pkl('data/Semantic3D.Net/context/large_block/' + fs + '_{}_{}.pkl'.format(bi,ri), pts, lbls)
def test_presample():
    for t in xrange(17):
        points,labels=read_pkl('data/Semantic3D.Net/pkl/test_presample/MarketplaceFeldkirch_Station4_rgb_intensity-reduced_{}.pkl'.format(t))
        print points.shape
        idxs=libPointUtil.gridDownsampleGPU(points,0.1,False)
        points=points[idxs]
        output_points('test_result/{}.txt'.format(t), points)
def semantic3d_sample_test_set():
    fns, pns = get_semantic3d_testset()
    for fn, pn in zip(fns[2:3], pns[2:3]):
        points, labels = read_room_pkl('data/Semantic3D.Net/pkl/test/' + fn +
                                       '.pkl')
        idxs = libPointUtil.gridDownsampleGPU(points, 0.1, False)
        points = points[idxs]
        output_points('test_result/{}_color.txt'.format(fn), points)
Esempio n. 7
0
def test_downsample():
    if not os.path.exists('output'):
        os.mkdir('output')

    pts = uniform_sample_sphere(8192)

    begin = time.time()
    ds_idxs = libPointUtil.gridDownsampleGPU(pts, 0.2, False)

    print('cost {} s'.format(time.time() - begin))
    output_points('output/ds_pts.txt', pts[ds_idxs])

    begin = time.time()
    ds_idxs, ds_gidxs = libPointUtil.gridDownsampleGPU(pts, 0.2, True)
    print('cost {} s'.format(time.time() - begin))

    colors = np.random.randint(0, 256, [len(np.unique(ds_gidxs)), 3])
    output_points('output/raw_pts.txt', pts, colors[ds_gidxs])
def test_labels():
    import os
    fss,fns=read_semantic3d_pkl_stems()
    from io_util import get_semantic3d_class_colors
    colors=get_semantic3d_class_colors()
    for fn in os.listdir('data/Semantic3D.Net/block/train'):
        if fn.startswith(fss[6]) and fn.endswith('_0.pkl'): # or fn.endswith('_3.pkl')):
            points,labels=read_room_pkl('data/Semantic3D.Net/block/train/'+fn)
            idxs = libPointUtil.gridDownsampleGPU(points, 0.1, False)
            output_points('test_result/'+fn[:-4]+'.txt',points[idxs],colors[labels[idxs],:])
Esempio n. 9
0
def test_downsample():
    pts,_=read_room_h5('data/S3DIS/room/45_Area_2_auditorium_2.h5')

    ds_idxs,ds_gidxs=libPointUtil.gridDownsampleGPU(pts,0.05,True)

    pts=pts[ds_idxs,:]
    angle=random.random()*math.pi/2.0
    begin=time.time()
    for i in xrange(10000):
        block_idxs=libPointUtil.sampleRotatedBlockGPU(pts,1.5,3.0,angle)
Esempio n. 10
0
def save_results(sxyzs, qxyzs, sprobs, qprobs, prefix, fs):
    colors = get_semantic3d_class_colors()
    spreds = np.argmax(sprobs[:, 1:], axis=1) + 1
    qpreds = np.argmax(qprobs[:, 1:], axis=1) + 1

    dir = 'data/Semantic3D.Net/{}'.format(prefix)
    if not os.path.exists(dir): os.mkdir(dir)
    with open('{}/{}.labels'.format(dir, fs), 'w') as f:
        for pred in qpreds:
            f.write('{}\n'.format(pred))

    idxs = libPointUtil.gridDownsampleGPU(sxyzs, 0.3, False)
    sxyzs = sxyzs[idxs]
    spreds = spreds[idxs]
    output_points('{}/{}_sparse.txt'.format(dir, fs), sxyzs, colors[spreds])

    idxs = libPointUtil.gridDownsampleGPU(qxyzs, 0.3, False)
    qxyzs = qxyzs[idxs]
    qpreds = qpreds[idxs]
    output_points('{}/{}_dense.txt'.format(dir, fs), qxyzs, colors[qpreds])
Esempio n. 11
0
def test_voxel_sort():
    pts, _ = read_room_h5('data/S3DIS/room/16_Area_1_office_15.h5')

    ds_idxs, _ = libPointUtil.gridDownsampleGPU(pts, 0.05, True)
    cxyz1 = pts[ds_idxs, :]

    cxyz1 = np.ascontiguousarray(cxyz1)
    sidxs1, vlens1 = libPointUtil.sortVoxelGPU(cxyz1, 0.15)

    cxyz1 = cxyz1[sidxs1, :]
    cxyz1 = np.ascontiguousarray(cxyz1)
    dxyz1, cxyz2 = libPointUtil.computeCenterDiffCPU(cxyz1, vlens1)

    # output
    vidxs = []
    for i, l in enumerate(vlens1):
        vidxs += [i for _ in xrange(l)]
    colors = np.random.randint(0, 256, [vlens1.shape[0], 3])
    vidxs = np.asarray(vidxs, np.int32)

    output_points('test_result/cxyz1t.txt', cxyz1, colors[vidxs, :])
    output_points('test_result/cxyz2t.txt', cxyz2, colors)

    # sort v2
    sidxs2, vlens2 = libPointUtil.sortVoxelGPU(cxyz2, 0.5)
    cxyz2 = cxyz2[sidxs2, :]
    cxyz2 = np.ascontiguousarray(cxyz2)
    dxyz2, cxyz3 = libPointUtil.computeCenterDiffCPU(cxyz2, vlens2)

    print sidxs1.shape
    print cxyz1.shape[0]
    sidxs1, vlens1 = libPointUtil.adjustPointsMemoryCPU(
        vlens1, sidxs2, cxyz1.shape[0])

    print sidxs1.shape
    cxyz1 = cxyz1[sidxs1, :]
    vidxs = []
    for i, l in enumerate(vlens1):
        vidxs += [i for _ in xrange(l)]
    colors = np.random.randint(0, 256, [vlens1.shape[0], 3])
    vidxs = np.asarray(vidxs, np.int32)

    output_points('test_result/cxyz1.txt', cxyz1, colors[vidxs, :])
    output_points('test_result/cxyz2.txt', cxyz2, colors)

    vidxs = []
    for i, l in enumerate(vlens2):
        vidxs += [i for _ in xrange(l)]
    colors = np.random.randint(0, 256, [vlens2.shape[0], 3])
    vidxs = np.asarray(vidxs, np.int32)

    output_points('test_result/cxyz2a.txt', cxyz2, colors[vidxs, :])
    output_points('test_result/cxyz3a.txt', cxyz3, colors)
Esempio n. 12
0
def testset_test_large_block():
    with open('cached/semantic3d_test_stems.txt','r') as f:
        lines=f.readlines()
        fss=[fn.strip('\n').split(' ')[0] for fn in lines]
        fns=[int(fn.strip('\n').split(' ')[1]) for fn in lines]

    for fs,fn in zip(fss,fns):
        for fni in xrange(fn):
            points,labels=read_room_pkl('data/Semantic3D.Net/context/test_large_block/{}_{}.pkl'.format(fs,fni))
            idxs=libPointUtil.gridDownsampleGPU(points,0.5,False)
            points=points[idxs]
            output_points('test_result/{}_{}.txt'.format(fs,fni),points)
Esempio n. 13
0
def global_downsample():
    with open('cached/semantic3d_stems.txt', 'r') as f:
        stems = [line.split(' ')[0] for line in f.readlines()]

    train_list = read_large_block_list()
    for stem in stems:
        for k in xrange(6):
            pts, lbls = [], []
            for tfs in train_list:
                if (not tfs.startswith(stem)) or (not tfs.endswith('{}.pkl'.format(k))): continue
                fs = 'data/Semantic3D.Net/context/large_block/' + tfs
                points, labels = read_room_pkl(fs)  # [n,6],[n,1]
                idxs = libPointUtil.gridDownsampleGPU(points, 0.1, False)
                pts.append(points[idxs])
                lbls.append(labels[idxs])

            pts = np.concatenate(pts, axis=0)
            idxs = libPointUtil.gridDownsampleGPU(pts, 2.0, False)
            pts = pts[idxs]

            # output_points('test_result/{}_{}_ds.txt'.format(stem,k),pts)
            save_pkl('data/Semantic3D.Net/context/global/{}_{}.pkl'.format(stem,k),pts)
def semantic3d_sample_trainset_offset_z():
    import matplotlib as mpl
    mpl.use('Agg')
    import matplotlib.pyplot as plt

    with open('cached/semantic3d_stems.txt', 'r') as f:
        stems = [line.split(' ')[0] for line in f.readlines()]

    train_list = semantic3d_read_train_block_list()
    f = open('cached/semantic3d_train_offsetz.txt', 'w')
    for stem in stems:
        pts, lbls = [], []
        for tfs in train_list:
            if (not tfs.startswith(stem)) or (not tfs.endswith('0.pkl')):
                continue
            fs = 'data/Semantic3D.Net/block/train/' + tfs
            points, labels = read_room_pkl(fs)  # [n,6],[n,1]
            idxs = libPointUtil.gridDownsampleGPU(points, 0.2, False)
            pts.append(points[idxs])
            lbls.append(labels[idxs])

        pts = np.concatenate(pts, axis=0)
        idxs = libPointUtil.gridDownsampleGPU(pts, 0.1, False)
        pts = pts[idxs]

        zs = pts[:, 2]
        min_z = np.min(zs)
        zs -= np.min(zs)
        plt.figure()
        plt.hist(zs, 200, range=(0, 20))
        plt.savefig('test_result/{}.png'.format(stem))
        plt.close()

        hist, _ = np.histogram(zs, np.arange(0.0, 20.0, 0.1), range=(0, 20))
        offset_z = np.argmax(hist) * 0.1 + min_z
        f.write('{} {}\n'.format(stem, offset_z))

    f.close()
Esempio n. 15
0
def test_hierachy():
    pts, lbls = read_room_h5('data/S3DIS/room/12_Area_1_office_11.h5')
    ds_idxs = libPointUtil.gridDownsampleGPU(pts, 0.05, False)
    pts = pts[ds_idxs, :]
    lbls = lbls[ds_idxs]
    xyzs = pts[:, :3]
    rgbs = pts[:, 3:]
    cxyz1, dxyz1, vlens1, cxyz2, dxyz2, vlens2, cxyz3, feats_list = build_hierarchy(
        xyzs, [rgbs, lbls], 0.15, 0.5)
    rgbs, lbls = feats_list

    output_points('test_result/cxyz1_rgb.txt', cxyz1, rgbs)
    colors = get_class_colors()
    output_points('test_result/cxyz1_lbl.txt', cxyz1,
                  colors[lbls.flatten(), :])

    # test cxyz
    vidxs = []
    for i, l in enumerate(vlens1):
        vidxs += [i for _ in xrange(l)]
    colors = np.random.randint(0, 256, [vlens1.shape[0], 3])
    vidxs = np.asarray(vidxs, np.int32)

    output_points('test_result/cxyz1.txt', cxyz1, colors[vidxs, :])
    output_points('test_result/cxyz2.txt', cxyz2, colors)

    vidxs = []
    for i, l in enumerate(vlens2):
        vidxs += [i for _ in xrange(l)]
    colors = np.random.randint(0, 256, [vlens2.shape[0], 3])
    vidxs = np.asarray(vidxs, np.int32)

    output_points('test_result/cxyz2a.txt', cxyz2, colors[vidxs, :])
    output_points('test_result/cxyz3a.txt', cxyz3, colors)

    # test dxyz
    c = 0
    for k, l in enumerate(vlens1):
        for t in xrange(l):
            dxyz1[c + t] += cxyz2[k]
        c += l
    output_points('test_result/dxyz1.txt', dxyz1)

    c = 0
    for k, l in enumerate(vlens2):
        for t in xrange(l):
            dxyz2[c + t] += cxyz3[k]
        c += l
    output_points('test_result/dxyz2.txt', dxyz2)
Esempio n. 16
0
def test_block():
    import os
    fss,fns=read_semantic3d_pkl_stems()
    from draw_util import get_semantic3d_class_colors
    colors=get_semantic3d_class_colors()
    for fs in fss:
        all_points,all_labels=[],[]
        for fn in os.listdir('data/Semantic3D.Net/block/train'):
            if fn.startswith(fs) and fn.endswith('_0.pkl'): # or fn.endswith('_3.pkl')):
                points,labels=read_room_pkl('data/Semantic3D.Net/block/train/'+fn)
                idxs = libPointUtil.gridDownsampleGPU(points, 0.1, False)
                all_points.append(points[idxs])
                all_labels.append(labels[idxs])

        all_points = np.concatenate(all_points, axis=0)
        all_labels = np.concatenate(all_labels, axis=0)
        output_points('test_result/'+fs+'_labels.txt',all_points,colors[all_labels,:])
        output_points('test_result/'+fs+'_colors.txt',all_points)
Esempio n. 17
0
def get_intensity_distribution():
    fss,fns=read_semantic3d_pkl_stems()
    intensities=[]
    for fs,fn in zip(fss,fns):
        for i in xrange(fn):
            points,labels=read_room_pkl('data/Semantic3D.Net/pkl/train/'+fs+'_{}.pkl'.format(i))
            idxs=libPointUtil.gridDownsampleGPU(points,0.1,False)
            intensities.append(points[idxs,-1])

    intensities=np.concatenate(intensities,axis=0)
    print np.min(intensities),np.max(intensities)
    print np.mean(intensities),np.std(intensities)

    import matplotlib as mpl
    mpl.use('Agg')
    import matplotlib.pyplot as plt
    plt.hist(intensities,100)
    plt.savefig('test_result/intensities.png')
    plt.close()
Esempio n. 18
0
def global_avg_downsample():
    with open('cached/semantic3d_stems.txt', 'r') as f:
        stems = [line.split(' ')[0] for line in f.readlines()]

    sess, pts_pl, ds_pts_op = build_avg_ds_session(ds_size=2.0, min_coor=3000.0)

    train_list = read_large_block_list()
    for stem in stems:
        for k in xrange(6):
            pts, lbls = [], []
            for tfs in train_list:
                if (not tfs.startswith(stem)) or (not tfs.endswith('{}.pkl'.format(k))): continue
                fs = 'data/Semantic3D.Net/context/large_block/' + tfs
                points, labels = read_room_pkl(fs)  # [n,6],[n,1]
                idxs = libPointUtil.gridDownsampleGPU(points, 0.1, False)
                pts.append(points[idxs])
                lbls.append(labels[idxs])

            # downsample
            pts = np.concatenate(pts, axis=0)
            ds_pts=sess.run(ds_pts_op,feed_dict={pts_pl:pts})

            # compute covar
            ds_xyzs=np.ascontiguousarray(ds_pts[:,:3],np.float32)
            xyzs=np.ascontiguousarray(pts[:,:3],np.float32)
            nidxs=libPointUtil.findNeighborInAnotherCPU(xyzs,ds_xyzs,4.0)
            nlens=np.ascontiguousarray([len(idxs) for idxs in nidxs],np.int32)
            nbegs=compute_nidxs_bgs(nlens)
            nidxs=np.ascontiguousarray(np.concatenate(nidxs,axis=0),dtype=np.int32)
            covars=libPointUtil.computeCovarsGPU(xyzs,nidxs,nlens,nbegs)
            if np.sum(np.isnan(covars))>0:
                print stem,k
                idxs,_=np.nonzero(np.isnan(covars))
                for idx in idxs:
                    print '{} {}'.format(idx,nlens[idx])

                exit(0)

            ds_pts=np.concatenate([ds_pts,covars],axis=1)

            # output_points('test_result/{}_{}_ds.txt'.format(stem,k),pts)
            save_pkl('data/Semantic3D.Net/context/global_avg/{}_{}.pkl'.format(stem,k),ds_pts)
Esempio n. 19
0
def testset_global_downsample_avg():
    with open('cached/semantic3d_test_stems.txt','r') as f:
        lines=f.readlines()
        fss=[fn.strip('\n').split(' ')[0] for fn in lines]
        fns=[int(fn.strip('\n').split(' ')[1]) for fn in lines]

    sess, pts_pl, ds_pts_op = build_avg_ds_session(ds_size=2.0, min_coor=3000.0)

    for fs,fn in zip(fss,fns):
        pts=[]
        for fni in xrange(fn):
            points, labels = read_room_pkl('data/Semantic3D.Net/context/test_large_block/{}_{}.pkl'.format(fs,fni))  # [n,6],[n,1]
            idxs = libPointUtil.gridDownsampleGPU(points, 0.1, False)
            pts.append(points[idxs])

        # downsample
        pts = np.concatenate(pts, axis=0)
        ds_pts=sess.run(ds_pts_op,feed_dict={pts_pl:pts})

        # compute covar
        ds_xyzs=np.ascontiguousarray(ds_pts[:,:3],np.float32)
        xyzs=np.ascontiguousarray(pts[:,:3],np.float32)
        nidxs=libPointUtil.findNeighborInAnotherCPU(xyzs,ds_xyzs,4.0)
        nlens=np.ascontiguousarray([len(idxs) for idxs in nidxs],np.int32)
        nbegs=compute_nidxs_bgs(nlens)
        nidxs=np.ascontiguousarray(np.concatenate(nidxs,axis=0),dtype=np.int32)
        covars=libPointUtil.computeCovarsGPU(xyzs,nidxs,nlens,nbegs)
        if np.sum(np.isnan(covars))>0:
            print fs
            idxs,_=np.nonzero(np.isnan(covars))
            for idx in idxs:
                print '{} {}'.format(idx,nlens[idx])

            exit(0)

        ds_pts=np.concatenate([ds_pts,covars],axis=1)

        # output_points('test_result/{}_{}_ds.txt'.format(stem,k),pts)
        save_pkl('data/Semantic3D.Net/context/test_global_avg/{}.pkl'.format(fs),ds_pts)
Esempio n. 20
0
def voxel_downsample_idxs(xyzs, voxel_len, use_gpu=True):
    if use_gpu:
        import libPointUtil
        ds_idxs = libPointUtil.gridDownsampleGPU(xyzs, voxel_len, False)
    else:
        loc2pt = {}
        for pt_index, pt in enumerate(xyzs):
            x_index = int(math.ceil(pt[0] / voxel_len))
            y_index = int(math.ceil(pt[1] / voxel_len))
            z_index = int(math.ceil(pt[2] / voxel_len))
            loc = (x_index, y_index, z_index)
            if loc in loc2pt:
                loc2pt[loc].append(pt_index)
            else:
                loc2pt[loc] = [pt_index]

        ds_idxs = []
        for k, v in loc2pt.items():
            grid_index = int(np.random.randint(0, len(v), 1))
            ds_idxs.append(v[grid_index])

    return ds_idxs
Esempio n. 21
0
def test_covars():
    pts, _ = read_room_h5('data/S3DIS/room/16_Area_1_office_15.h5')

    ds_idxs, _ = libPointUtil.gridDownsampleGPU(pts, 0.05, True)
    pts = pts[ds_idxs, :]

    spts = np.ascontiguousarray(pts[:, :3])
    nidxs = libPointUtil.findNeighborRadiusGPU(spts, 0.2)
    nidxs_lens = np.asarray([len(idxs) for idxs in nidxs], dtype=np.int32)
    nidxs_bgs = compute_nidxs_bgs(nidxs_lens)
    nidxs = np.concatenate(nidxs, axis=0)

    begin = time.time()
    for i in xrange(10000):
        covars = libPointUtil.computeCovarsGPU(spts, nidxs, nidxs_lens,
                                               nidxs_bgs)
    print 'cost {} s'.format(time.time() - begin)

    from sklearn.cluster import KMeans
    kmeans = KMeans(5)
    preds = kmeans.fit_predict(covars)
    colors = np.random.randint(0, 255, [5, 3])
    output_points('test_result/cluster.txt', pts, colors[preds, :])
def test_big_block():
    fss = semantic3d_read_train_block_list()
    random.shuffle(fss)
    colors = get_semantic3d_class_colors()
    fss = [
        fs for fs in fss
        if fs.startswith('untermaederbrunnen_station1_xyz_intensity_rgb')
    ]
    orientations = [[] for _ in xrange(6)]
    for t in xrange(6):
        for fs in fss:
            if fs.split('_')[-1].startswith(str(t)):
                orientations[t].append(fs)

    for i in xrange(6):
        for t, fs in enumerate(orientations[i]):
            points, labels = read_pkl('data/Semantic3D.Net/block/train/' + fs)
            idxs = libPointUtil.gridDownsampleGPU(points, 0.2, False)
            points = points[idxs]
            labels = labels[idxs]
            print points.shape
            output_points('test_result/{}_{}_colors.txt'.format(i, t), points)
            output_points('test_result/{}_{}_labels.txt'.format(i, t), points,
                          colors[labels, :])
Esempio n. 23
0
def sample_block_scannet(points,
                         labels,
                         ds_stride,
                         block_size,
                         block_stride,
                         min_pn,
                         use_rescale=False,
                         use_flip=False,
                         use_rotate=False,
                         covar_nn_size=0.1):
    xyzs = np.ascontiguousarray(points[:, :3])
    min_xyzs = np.min(xyzs, axis=0, keepdims=True)
    max_xyzs = np.max(xyzs, axis=0, keepdims=True)

    # flip
    if use_flip:
        if random.random() < 0.5:
            xyzs = swap_xy(xyzs)
            min_xyzs = swap_xy(min_xyzs)
            max_xyzs = swap_xy(max_xyzs)

        if random.random() < 0.5:
            xyzs = flip(xyzs, axis=0)
            min_xyzs[:, 0], max_xyzs[:, 0] = -max_xyzs[:, 0], -min_xyzs[:, 0]

        if random.random() < 0.5:
            xyzs = flip(xyzs, axis=1)
            min_xyzs[:, 1], max_xyzs[:, 1] = -max_xyzs[:, 1], -min_xyzs[:, 1]

    # rescale
    if use_rescale:
        rescale = np.random.uniform(0.9, 1.1, [1, 3])
        xyzs[:, :3] *= rescale
        min_xyzs *= rescale
        max_xyzs *= rescale

    # rotate
    if use_rotate:
        if random.random() > 0.3:
            angle = random.random() * np.pi / 2.0
            xyzs = rotate(xyzs, angle)
            min_xyzs = np.min(xyzs, axis=0, keepdims=True)
            max_xyzs = np.max(xyzs, axis=0, keepdims=True)

    ds_idxs = libPointUtil.gridDownsampleGPU(xyzs, ds_stride, False)

    covar_nidxs = libPointUtil.findNeighborRadiusGPU(xyzs, ds_idxs,
                                                     covar_nn_size)

    covar_nidxs_lens = np.ascontiguousarray(
        [len(idxs) for idxs in covar_nidxs], np.int32)
    covar_nidxs_bgs = compute_nidxs_bgs(covar_nidxs_lens)
    covar_nidxs = np.ascontiguousarray(np.concatenate(covar_nidxs, axis=0),
                                       dtype=np.int32)

    covars = libPointUtil.computeCovarsGPU(xyzs, covar_nidxs, covar_nidxs_lens,
                                           covar_nidxs_bgs)

    xyzs = xyzs[ds_idxs, :]
    lbls = labels[ds_idxs]

    xyzs -= min_xyzs
    idxs = uniform_sample_block(xyzs,
                                block_size,
                                block_stride,
                                normalized=True,
                                min_pn=min_pn)
    xyzs += min_xyzs
    xyzs, covars, lbls = fetch_subset([xyzs, covars, lbls], idxs)

    return xyzs, covars, lbls
Esempio n. 24
0
def sample_block(points,
                 labels,
                 ds_stride,
                 block_size,
                 block_stride,
                 min_pn,
                 use_rescale=False,
                 swap=False,
                 flip_x=False,
                 flip_y=False,
                 covar_ds_stride=0.03,
                 covar_nn_size=0.1):

    xyzs = np.ascontiguousarray(points[:, :3])
    rgbs = np.ascontiguousarray(points[:, 3:])
    min_xyzs = np.min(xyzs, axis=0, keepdims=True)
    max_xyzs = np.max(xyzs, axis=0, keepdims=True)

    covar_ds_idxs = libPointUtil.gridDownsampleGPU(xyzs, covar_ds_stride,
                                                   False)
    covar_ds_xyzs = np.ascontiguousarray(xyzs[covar_ds_idxs, :])

    # flip
    if swap:
        covar_ds_xyzs = swap_xy(covar_ds_xyzs)
        min_xyzs = swap_xy(min_xyzs)
        max_xyzs = swap_xy(max_xyzs)
    if flip_x:
        covar_ds_xyzs = flip(covar_ds_xyzs, axis=0)
        min_xyzs[:, 0], max_xyzs[:, 0] = -max_xyzs[:, 0], -min_xyzs[:, 0]

    if flip_y:
        covar_ds_xyzs = flip(covar_ds_xyzs, axis=1)
        min_xyzs[:, 1], max_xyzs[:, 1] = -max_xyzs[:, 1], -min_xyzs[:, 1]

    # rescale
    if use_rescale:
        rescale = np.random.uniform(0.9, 1.1, [1, 3])
        covar_ds_xyzs[:, :3] *= rescale
        min_xyzs *= rescale
        max_xyzs *= rescale

    ds_idxs = libPointUtil.gridDownsampleGPU(covar_ds_xyzs, ds_stride, False)

    covar_nidxs = libPointUtil.findNeighborRadiusCPU(covar_ds_xyzs, ds_idxs,
                                                     covar_nn_size)

    covar_nidxs_lens = np.ascontiguousarray(
        [len(idxs) for idxs in covar_nidxs], np.int32)
    covar_nidxs_bgs = compute_nidxs_bgs(covar_nidxs_lens)
    covar_nidxs = np.ascontiguousarray(np.concatenate(covar_nidxs, axis=0),
                                       dtype=np.int32)

    covars = libPointUtil.computeCovarsGPU(covar_ds_xyzs, covar_nidxs,
                                           covar_nidxs_lens, covar_nidxs_bgs)

    xyzs = covar_ds_xyzs[ds_idxs, :]
    rgbs = rgbs[covar_ds_idxs, :][ds_idxs, :]
    lbls = labels[covar_ds_idxs][ds_idxs]

    xyzs -= min_xyzs
    idxs = uniform_sample_block(xyzs,
                                block_size,
                                block_stride,
                                normalized=True,
                                min_pn=min_pn)
    xyzs += min_xyzs

    xyzs, rgbs, covars, lbls = fetch_subset([xyzs, rgbs, covars, lbls], idxs)

    return xyzs, rgbs, covars, lbls
Esempio n. 25
0
        qprobs = []
        for t in xrange(qrn):
            beg_idxs = t * rn
            end_idxs = min((t + 1) * rn, qn)
            qrprobs = interpolate(sxyzs, sprobs, qxyzs[beg_idxs:end_idxs])
            print 'interpolate {} done'.format(t)
            qprobs.append(qrprobs)

        qprobs = np.concatenate(qprobs, axis=0)
        qpreds = np.argmax(qprobs[:, 1:], axis=1) + 1

        colors = get_semantic3d_class_colors()
        spreds = np.argmax(sprobs[:, 1:], axis=1) + 1

        print 'total cost {} s'.format(time.time() - begin)

        with open('data/Semantic3D.Net/{}.labels'.format(fn), 'w') as f:
            for p in qpreds:
                f.write('{}\n'.format(p))

        idxs = libPointUtil.gridDownsampleGPU(sxyzs, 0.1, False)
        sxyzs = sxyzs[idxs]
        spreds = spreds[idxs]
        output_points('test_result/{}_sparse.txt'.format(fn), sxyzs,
                      colors[spreds, :])
        idxs = libPointUtil.gridDownsampleGPU(qxyzs, 0.1, False)
        qxyzs = qxyzs[idxs]
        qpreds = qpreds[idxs]
        output_points('test_result/{}_dense.txt'.format(fn), qxyzs,
                      colors[qpreds, :])
Esempio n. 26
0
        # Create the Timeline object, and write it to a json
        tl = timeline.Timeline(run_metadata.step_stats)
        ctf = tl.generate_chrome_trace_format()
        with open('timeline3.json', 'w') as f:
            f.write(ctf)

        print 'cost {} s'.format((time.time()-begin)/100)

    return vals


if __name__ == "__main__":
    pts,_=read_room_h5('data/S3DIS/room/16_Area_1_office_15.h5')

    ds_idxs,_=libPointUtil.gridDownsampleGPU(pts,0.05,True)
    pts=pts[ds_idxs,:]
    block_idxs=libPointUtil.sampleRotatedBlockGPU(pts,1.5,3.0,0.0)
    block_idxs=[idxs for idxs in block_idxs if len(idxs)>2048]
    print 'mean block pt_num: {}'.format(np.mean(np.asarray([len(idxs) for idxs in block_idxs])))
    bid=np.random.randint(0,len(block_idxs))
    pts=pts[block_idxs[bid],:]

    spts=np.ascontiguousarray(pts[:,:3])
    nidxs=libPointUtil.findNeighborRadiusGPU(spts,0.08)
    nidxs_lens=np.asarray([len(idxs) for idxs in nidxs],dtype=np.int32)
    nidxs_bgs=compute_nidxs_bgs(nidxs_lens)
    cidxs=compute_cidxs(nidxs_lens)
    nidxs=np.concatenate(nidxs,axis=0)
    print 'pn*n: {}'.format(nidxs.shape)
    print 'pn: {}'.format(nidxs_bgs.shape)
Esempio n. 27
0
def sample_context_block(tfs, points, labels, global_points, ds_stride, block_size, block_stride, min_pn,
                         use_rescale=False, use_flip=False, covar_ds_stride=0.03, covar_nn_size=0.1,
                         context_len=50.0):

    xyzs=np.ascontiguousarray(points[:,:3])
    rgbs=np.ascontiguousarray(points[:,3:])
    min_xyz=np.min(xyzs,axis=0,keepdims=True)
    max_xyz=np.max(xyzs,axis=0,keepdims=True)
    covar_ds_idxs=libPointUtil.gridDownsampleGPU(xyzs, covar_ds_stride, False)
    covar_ds_xyzs=np.ascontiguousarray(xyzs[covar_ds_idxs,:])

    # flip
    if use_flip:
        if random.random()<0.5:
            covar_ds_xyzs=swap_xy(covar_ds_xyzs)
            global_points=swap_xy(global_points)
            min_xyz=swap_xy(min_xyz)
            max_xyz=swap_xy(max_xyz)

        if random.random()<0.5:
            covar_ds_xyzs=flip(covar_ds_xyzs,axis=0)
            global_points=flip(global_points,axis=0)
            min_xyz[:,0],max_xyz[:,0]=-max_xyz[:,0],-min_xyz[:,0]

        if random.random()<0.5:
            covar_ds_xyzs=flip(covar_ds_xyzs,axis=1)
            global_points=flip(global_points,axis=1)
            min_xyz[:,1],max_xyz[:,1]=-max_xyz[:,1],-min_xyz[:,1]

    # rescale
    if use_rescale:
        rescale=np.random.uniform(0.9,1.1,[1,3])
        covar_ds_xyzs[:,:3]*=rescale
        global_points[:,:3]*=rescale
        min_xyz*=rescale
        max_xyz*=rescale

    ds_idxs=libPointUtil.gridDownsampleGPU(covar_ds_xyzs,ds_stride,False)

    # compute covar
    covar_nidxs=libPointUtil.findNeighborRadiusCPU(covar_ds_xyzs,ds_idxs,covar_nn_size)
    covar_nidxs_lens=np.ascontiguousarray([len(idxs) for idxs in covar_nidxs],np.int32)
    covar_nidxs_bgs=compute_nidxs_bgs(covar_nidxs_lens)
    covar_nidxs=np.ascontiguousarray(np.concatenate(covar_nidxs,axis=0),dtype=np.int32)
    covars=libPointUtil.computeCovarsGPU(covar_ds_xyzs,covar_nidxs,covar_nidxs_lens,covar_nidxs_bgs)

    xyzs=covar_ds_xyzs[ds_idxs,:]
    rgbs=rgbs[covar_ds_idxs,:][ds_idxs,:]
    lbls=labels[covar_ds_idxs][ds_idxs]

    xyzs-=min_xyz
    idxs=uniform_sample_block(xyzs,block_size,block_stride,min_pn=min_pn)
    xyzs+=min_xyz

    xyzs, rgbs, covars, lbls=fetch_subset([xyzs,rgbs,covars,lbls],idxs)

    context_xyzs=compute_context_xyzs(global_points,xyzs,context_len=context_len)

    for ci,ctx_xyz in enumerate(context_xyzs):
        if ctx_xyz.shape[0]==0:
            print '!!!! error {}'.format(tfs)
            raise RuntimeError

    context_idxs=compute_context_idxs(context_xyzs,xyzs)

    return xyzs, rgbs, covars, lbls, context_xyzs, context_idxs
Esempio n. 28
0
def downsample(points,labels,stride):
    idxs=libPointUtil.gridDownsampleGPU(points, stride, False)
    points=points[idxs,:]
    labels=labels[idxs,:]

    return points,labels