コード例 #1
0
def process_revise():
    qxyzs=read_pkl('cached/sg28_qxyzs.pkl')
    qprobs=read_pkl('cached/sg28_qprobs.pkl')
    sxyzs=read_natural_terrain()

    qprobs=interpolate_natural_terrain(sxyzs,qxyzs,qprobs)

    save_results(sxyzs, qxyzs, np.random.uniform(0,1.0,[sxyzs.shape[0],8]),
                 qprobs, FLAGS.prefix, 'sg28_revise')
コード例 #2
0
def prepare_subset_single_file(fn, sstride, bsize, bstride, min_pn, use_scale,
                               use_swap, use_flip_x, use_flip_y, resample,
                               jitter_color):
    points, labels = read_pkl(fn)
    xyzs, rgbs, covars, lbls = sample_block(points,
                                            labels,
                                            sstride,
                                            bsize,
                                            bstride,
                                            min_pn=min_pn,
                                            use_rescale=use_scale,
                                            swap=use_swap,
                                            flip_x=use_flip_x,
                                            flip_y=use_flip_y,
                                            covar_ds_stride=covar_ds_stride,
                                            covar_nn_size=covar_nn_size)

    xyzs, rgbs, covars, lbls, block_mins = normalize_block(
        xyzs,
        rgbs,
        covars,
        lbls,
        bsize,
        max_pt_num,
        resample=resample,
        resample_low=0.8,
        resample_high=1.0,
        jitter_color=jitter_color,
        jitter_val=2.5)

    return xyzs, rgbs, covars, lbls, block_mins
コード例 #3
0
    def train_fn(model, fn):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(fn)
        for i in xrange(len(xyzs)):
            pt_num = len(xyzs[i])
            if pt_num > 4096:
                idxs = np.random.choice(
                    pt_num, int(np.random.uniform(0.85, 1.0) * pt_num), False)
                xyzs[i] = xyzs[i][idxs]
                rgbs[i] = rgbs[i][idxs]
                covars[i] = covars[i][idxs]
                lbls[i] = lbls[i][idxs]

            pt_num = len(xyzs[i])
            if pt_num > 20480:
                idxs = np.random.choice(pt_num, 20480, False)
                xyzs[i] = xyzs[i][idxs]
                rgbs[i] = rgbs[i][idxs]
                covars[i] = covars[i][idxs]
                lbls[i] = lbls[i][idxs]

            if random.random() < 0.5:
                xyzs[i] = flip(xyzs[i], axis=0)

            if random.random() < 0.5:
                xyzs[i] = flip(xyzs[i], axis=1)

            if random.random() < 0.5:
                xyzs[i] = swap_xy(xyzs[i])

            jitter_color = np.random.uniform(-0.02, 0.02, rgbs[i].shape)
            rgbs[i] += jitter_color
            rgbs[i][rgbs[i] > 1.0] = 1.0
            rgbs[i][rgbs[i] < -1.0] = -1.0

        return xyzs, rgbs, covars, lbls, block_mins
コード例 #4
0
def merge_train_by_area():
    from io_util import get_block_train_test_split
    train_list, test_list = get_block_train_test_split()
    random.shuffle(train_list)
    f = open('cached/s3dis_merged_train.txt', 'w')
    for ai in xrange(1, 7):
        cur_data = [[] for _ in xrange(5)]
        cur_idx = 0
        for fn in train_list:
            an = get_area(fn)
            if an != ai: continue
            data = read_pkl('data/S3DIS/sampled_train_new/' + fn)
            for i in xrange(5):
                cur_data[i] += data[i]

            if len(cur_data[0]) > 1000:
                save_pkl(
                    'data/S3DIS/merged_train_new/{}_{}.pkl'.format(
                        ai, cur_idx), cur_data)
                f.write('data/S3DIS/merged_train_new/{}_{}.pkl\n'.format(
                    ai, cur_idx))
                cur_idx += 1
                cur_data = [[] for _ in xrange(5)]

        if len(cur_data[0]) > 0:
            save_pkl(
                'data/S3DIS/merged_train_new/{}_{}.pkl'.format(ai, cur_idx),
                cur_data)
            f.write('data/S3DIS/merged_train_new/{}_{}.pkl\n'.format(
                ai, cur_idx))
            cur_idx += 1

        print 'area {} done'.format(ai)

    f.close()
コード例 #5
0
def compute_weight():
    from io_util import get_block_train_test_split, get_class_names
    import numpy as np
    train_list, test_list = get_block_train_test_split()

    test_list = ['data/S3DIS/sampled_test/' + fs for fs in test_list]
    train_list = ['data/S3DIS/sampled_train/' + fs for fs in train_list]
    test_list += train_list
    labels = []
    for fs in test_list:
        labels += read_pkl(fs)[4]
    labels = np.concatenate(labels, axis=0)

    labelweights, _ = np.histogram(labels, range(14))
    plt.figure(0, figsize=(10, 8), dpi=80)
    plt.bar(np.arange(len(labelweights)),
            labelweights,
            tick_label=get_class_names())
    plt.savefig('s3dis_dist.png')
    plt.close()

    print labelweights
    labelweights = labelweights.astype(np.float32)
    labelweights = labelweights / np.sum(labelweights)
    labelweights = 1 / np.log(1.2 + labelweights)

    print labelweights
コード例 #6
0
def semantic_read_pkl():
    train_list,test_list=get_semantic3d_block_train_list()
    train_list=['data/Semantic3D.Net/block/sampled/merged/'+fn for fn in train_list]
    test_list=['data/Semantic3D.Net/block/sampled/merged/'+fn for fn in test_list]
    train_list+=test_list
    total_block_num=0
    label_count=np.zeros(9)
    min_label,max_label=0,0
    for fs in train_list:
        xyzs,rgbs,covars,labels=read_pkl(fs)
        total_block_num+=len(xyzs)
        for i in xrange(len(xyzs)):
            # print np.min(xyzs[i],axis=0),np.max(xyzs[i],axis=0)
            # print np.min(rgbs[i],axis=0),np.max(rgbs[i],axis=0)
            eps=1e-3
            min_val=np.array([-10.0,-10.0,0.0])-eps
            val=np.min(xyzs[i],axis=0)-min_val
            if val[0]<0:
                print val
            assert val[0]>=0
            assert val[1]>=0
            assert val[2]>=0
            count,_=np.histogram(labels[i],np.arange(10))
            label_count+=count
            max_label=max(np.max(labels[i]),max_label)
            min_label=min(np.min(labels[i]),min_label)

    print total_block_num
    print label_count
    print max_label,min_label
コード例 #7
0
def test_presample():
    for t in xrange(17):
        points,labels=read_pkl('data/Semantic3D.Net/pkl/test_presample/MarketplaceFeldkirch_Station4_rgb_intensity-reduced_{}.pkl'.format(t))
        print points.shape
        idxs=libPointUtil.gridDownsampleGPU(points,0.1,False)
        points=points[idxs]
        output_points('test_result/{}.txt'.format(t), points)
コード例 #8
0
def merge_train_files():
    with open('cached/semantic3d_stems.txt','r') as f:
        stems=[line.split(' ')[0] for line in f.readlines()]
    with open('cached/semantic3d_train_pkl.txt','r') as f:
        fs=[line.strip('\n') for line in f.readlines()]

    of=open('cached/semantic3d_merged_train.txt','w')
    for s in stems:
        idx=0
        all_data=[[] for _ in xrange(4)]
        for f in fs:
            if not f.startswith(s):
                continue
            data=read_pkl('data/Semantic3D.Net/block/sampled/train/'+f)

            for i in xrange(4):
                all_data[i]+=data[i]

            if len(all_data[0])>300:
                print len(all_data[0])
                save_pkl('data/Semantic3D.Net/block/sampled/merged/'+s+'_{}.pkl'.format(idx),all_data)
                all_data=[[] for _ in xrange(4)]
                idx+=1

        if len(all_data[0])>0:
            save_pkl('data/Semantic3D.Net/block/sampled/merged/'+s+'_{}.pkl'.format(idx),all_data)
            idx+=1

        of.write('{} {}\n'.format(s,idx))
        print '{} done'.format(s)

    of.close()
コード例 #9
0
    def train_fn(model, filename):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(filename)

        num = len(xyzs)
        for i in xrange(num):
            # pt_num=len(xyzs[i])
            # ds_ratio=np.random.uniform(0.8,1.0)
            # idxs=np.random.choice(pt_num,int(ds_ratio*pt_num),False)
            #
            # xyzs[i]=xyzs[i][idxs]
            # rgbs[i]=rgbs[i][idxs]
            # covars[i]=covars[i][idxs]
            # lbls[i]=lbls[i][idxs]

            if random.random() < 0.5:
                xyzs[i] = flip(xyzs[i], axis=0)

            if random.random() < 0.5:
                xyzs[i] = flip(xyzs[i], axis=1)

            if random.random() < 0.5:
                xyzs[i] = swap_xy(xyzs[i])

            jitter_color = np.random.uniform(-0.02, 0.02, rgbs[i].shape)
            rgbs[i] += jitter_color

        return xyzs, rgbs, covars, lbls, block_mins
コード例 #10
0
def test_fn(model, filename):
    xyzs, rgbs, covars, lbls, block_mins = read_pkl(filename)
    for i in xrange(len(xyzs)):
        idxs = np.random.choice(len(xyzs[i]), 15360, True)
        xyzs[i] = xyzs[i][idxs]
        rgbs[i] = rgbs[i][idxs]
        covars[i] = covars[i][idxs]
        lbls[i] = lbls[i][idxs]

    return xyzs, rgbs, covars, lbls, block_mins
コード例 #11
0
def train():
    test_set = [
        'sg27_station4_intensity_rgb', 'bildstein_station1_xyz_intensity_rgb'
    ]
    train_list, test_list = get_context_train_test(test_set)
    train_list = [
        'data/Semantic3D.Net/context/block_avg/' + fn for fn in train_list
    ]
    test_list = [
        'data/Semantic3D.Net/context/block_avg/' + fn for fn in test_list
    ]
    read_fn = lambda model, filename: read_pkl(filename)
    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch = 5000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['ctx_pts'],
                        pls['ctx_idxs'], pls['lbls'], pls['is_training'],
                        batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        sess.run(tf.global_variables_initializer())
        if FLAGS.restore:
            all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            all_vars = [
                var for var in all_vars if not var.name.startswith('tower')
            ]
            restore_saver = tf.train.Saver(var_list=all_vars)
            restore_saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
コード例 #12
0
def eval_room_probs(idx,sess,pls,ops,feed_dict):
    all_xyzs, all_lbls, all_probs = [], [], []
    all_feed_in=read_pkl('data/ScanNet/sampled_test/test_{}.pkl'.format(idx))
    for i in xrange(len(all_feed_in[0])):
        sxyzs,lbls,block_mins=fill_feed_dict(all_feed_in,feed_dict,pls,i)
        probs=sess.run(ops['probs'],feed_dict)
        all_xyzs.append(sxyzs+block_mins)
        all_lbls.append(lbls)
        all_probs.append(probs)

    return np.concatenate(all_xyzs,axis=0),np.concatenate(all_lbls,axis=0),np.concatenate(all_probs,axis=0)
コード例 #13
0
    def test_fn(model, fn):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(fn)
        for i in xrange(len(xyzs)):
            pt_num = len(xyzs[i])
            if pt_num > 20480:
                idxs = np.random.choice(pt_num, 20480, False)
                xyzs[i] = xyzs[i][idxs]
                rgbs[i] = rgbs[i][idxs]
                covars[i] = covars[i][idxs]
                lbls[i] = lbls[i][idxs]

        return xyzs, rgbs, covars, lbls, block_mins
コード例 #14
0
def train():
    with open('cached/scannet_train_filenames.txt', 'r') as f:
        train_list = [line.strip('\n') for line in f.readlines()]
    train_list = [
        'data/ScanNet/sampled_train/{}'.format(fn) for fn in train_list
    ]
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 11000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pls['weights'],
                        pmiu.shape[1], pls['is_training'], batch_num_per_epoch,
                        pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
コード例 #15
0
def eval_room_probs(fn, sess, pls, ops, feed_dict):
    all_xyzs, all_lbls, all_probs = [], [], []
    all_feed_in = read_pkl('data/Semantic3D.Net/block/test/' + fn + '.pkl')
    for i in xrange(len(all_feed_in[0])):
        sxyzs, lbls, block_mins = fill_feed_dict(all_feed_in, feed_dict, pls,
                                                 i)
        probs = sess.run(ops['probs'], feed_dict)
        all_xyzs.append(sxyzs + block_mins)
        all_lbls.append(lbls)
        all_probs.append(probs)

    return np.concatenate(all_xyzs, axis=0), np.concatenate(
        all_lbls, axis=0), np.concatenate(all_probs, axis=0)
コード例 #16
0
def visual_room():
    train_list, test_list = get_block_train_test_split()
    train_list += test_list
    file_list = [fn for fn in train_list if fn.split('_')[-2] == 'office']
    from draw_util import get_class_colors, output_points
    colors = get_class_colors()
    for fn in file_list:
        xyzs, rgbs, covars, labels, block_mins = read_pkl(
            'data/S3DIS/office_block/' + fn)
        for k in xrange(len(xyzs)):
            xyzs[k] += block_mins[k]
        xyzs = np.concatenate(xyzs, axis=0)
        labels = np.concatenate(labels, axis=0)

        output_points('test_result/{}.txt'.format(fn), xyzs, colors[labels])
コード例 #17
0
def eval_room_probs(fn, sess, pls, ops, feed_dict):
    all_xyzs, all_lbls, all_probs = [], [], []
    all_feed_in = read_pkl('data/Semantic3D.Net/context/test_block_avg/' + fn +
                           '.pkl')
    for i in xrange(len(all_feed_in[0])):
        cur_feed_in = [[fi[i]] for fi in all_feed_in]
        block_min = all_feed_in[-1][i]
        fill_feed_dict(cur_feed_in, feed_dict, pls, 1)
        probs, sxyzs, lbls = sess.run(
            [ops['probs'], ops['xyzs'], ops['labels']], feed_dict)
        all_xyzs.append(sxyzs + block_min)
        all_lbls.append(lbls)
        all_probs.append(probs)

    return np.concatenate(all_xyzs, axis=0), np.concatenate(
        all_lbls, axis=0), np.concatenate(all_probs, axis=0)
コード例 #18
0
def train():
    train_list, test_list = get_block_train_test_split()
    train_list = ['data/S3DIS/sampled_train/' + fn for fn in train_list]
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]
    fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['rgbs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pmiu.shape[1],
                        pls['is_training'], batch_num_per_epoch, pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
コード例 #19
0
def process():
    sess, pls, ops, feed_dict = build_session()
    with open('cached/semantic3d_test_stems.txt','r') as f:
        lines=f.readlines()
        fss=[fn.strip('\n').split(' ')[0] for fn in lines]

    for fs in fss[2:3]:
        sxyzs,_,sprobs=eval_room_probs(fs,sess, pls, ops, feed_dict)
        qxyzs,_=read_pkl('data/Semantic3D.Net/pkl/test/{}.pkl'.format(fs))
        qxyzs=np.ascontiguousarray(qxyzs[:,:3],np.float32)
        sxyzs=np.ascontiguousarray(sxyzs[:,:3],np.float32)
        sprobs=np.ascontiguousarray(sprobs,np.float32)
        qprobs=interpolate(sxyzs,sprobs,qxyzs)

        save_results(sxyzs,qxyzs,sprobs,qprobs,FLAGS.prefix,fs)

        save_pkl('cached/sg28_qxyzs.pkl',qxyzs)
        save_pkl('cached/sg28_qprobs.pkl',qprobs)
コード例 #20
0
def eval():
    train_list, test_list = get_block_train_test_split()
    sess, pls, ops, feed_dict = build_session()
    xyzs, rgbs, _, lbls, block_mins = read_pkl(
        '/home/liuyuan/data/S3DIS/sampled_test_nolimits/' + test_list[0])
    all_preds, all_labels = [], []
    for k in xrange(len(xyzs)):
        logits, labels = sess.run([ops['logits'], ops['labels']],
                                  feed_dict={
                                      pls[0]: xyzs[k],
                                      pls[1]: rgbs[k],
                                      pls[2]: lbls[k],
                                  })
        preds = np.argmax(logits, axis=1)
        all_preds.append(preds)
        all_labels.append(labels)

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)
    print np.sum(all_preds == all_labels) / float(len(all_preds))
コード例 #21
0
def eval_room_probs(idx, sess, pls, ops, feed_dict):
    all_xyzs, all_lbls, all_probs = [], [], []
    all_feed_in = read_pkl('data/ScanNet/sampled_test/test_{}.pkl'.format(idx))
    all_feed_in = [
        all_feed_in[0], all_feed_in[2], all_feed_in[3], all_feed_in[11]
    ]
    for i in xrange(len(all_feed_in[0])):
        print i
        feed_in = _fetch_all_feed_in_idx(all_feed_in, i)
        _, _, block_mins = fill_feed_dict(feed_in, feed_dict, pls, 1)
        print block_mins
        probs, lbls, xyzs = sess.run(
            [ops['probs'], ops['labels'], ops['xyzs']], feed_dict)
        print xyzs.shape
        all_xyzs.append(xyzs + block_mins[0])
        all_lbls.append(lbls)
        all_probs.append(probs)

    return np.concatenate(all_xyzs, axis=0), np.concatenate(
        all_lbls, axis=0), np.concatenate(all_probs, axis=0)
コード例 #22
0
def test_block_train():
    train_list, test_list = get_block_train_test_split()

    from draw_util import get_class_colors, output_points
    # colors=get_class_colors()
    # for fn in train_list[:1]:
    #     xyzs, rgbs, covars, lbls, block_mins=read_pkl(fn)
    #
    #     for i in xrange(len(xyzs[:5])):
    #         rgbs[i]+=128
    #         rgbs[i]*=127
    #         output_points('test_result/{}clr.txt'.format(i),xyzs[i],rgbs[i])
    #         output_points('test_result/{}lbl.txt'.format(i),xyzs[i],colors[lbls[i]])
    # count=0
    # pt_nums=[]
    #
    # stem2num={}
    # for fn in train_list:
    #     xyzs, rgbs, covars, lbls, block_mins=read_pkl('data/S3DIS/sampled_train_nolimits/'+fn)
    #     stem='_'.join(fn.split('_')[1:])
    #     if stem in stem2num:
    #         stem2num[stem]+=len(xyzs)
    #     else:
    #         stem2num[stem]=len(xyzs)
    #
    #     print stem,stem2num[stem]
    #     count+=len(xyzs)
    #     pt_nums+=[len(pts) for pts in xyzs]
    #
    # print count
    # print np.max(pt_nums)
    # print np.histogram(pt_nums)

    xyzs, rgbs, covars, lbls, block_mins = read_pkl(
        'data/S3DIS/sampled_train_nolimits/{}'.format(
            '1_Area_1_conferenceRoom_2.pkl'))
    for i in xrange(len(xyzs)):
        output_points('test_result/{}.txt'.format(i), xyzs[i] + block_mins[i],
                      rgbs[i] * 127 + 128)
コード例 #23
0
def eval():
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 11000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pls['weights'],
                        pmiu.shape[1], pls['is_training'], batch_num_per_epoch,
                        pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
コード例 #24
0
def test_covar():
    train_list, test_list = get_block_train_test_split()
    points, labels = read_pkl('data/S3DIS/room_block_10_10/' + train_list[0])
    xyzs, rgbs, covars, lbls = sample_block(points,
                                            labels,
                                            sstride,
                                            bsize,
                                            bstride,
                                            min_pn=512,
                                            use_rescale=False,
                                            swap=False,
                                            flip_x=False,
                                            flip_y=False,
                                            covar_ds_stride=0.075,
                                            covar_nn_size=0.15)

    from sklearn.cluster import KMeans
    from draw_util import output_points
    for i in xrange(len(xyzs[:5])):
        kmeans = KMeans(5)
        colors = np.random.randint(0, 256, [5, 3])
        preds = kmeans.fit_predict(covars[i])
        output_points('test_result/{}.txt'.format(i), xyzs[i], colors[preds])
コード例 #25
0
def process_one_file(fid):
    points, labels = read_pkl(
        'data/ScanNet/split/train_split_{}.pkl'.format(fid))

    room_num = len(points)
    all_data = [[] for _ in xrange(12)]
    idx = 0
    bg = time.time()
    for i in xrange(room_num):
        if i % 10 == 0:
            print 'idx {} cost {} s'.format(i, time.time() - bg)
            bg = time.time()

        for t in xrange(5):
            xyzs, covars, lbls = sample_block_scannet(points[i], labels[i],
                                                      ds_stride, block_size,
                                                      block_stride, min_pn,
                                                      True, True, True,
                                                      covar_nn_size)
            data = normalize_block_scannet(xyzs, covars, lbls, block_size, nr1,
                                           nr2, nr3, vc1, vc2, True, rs_low,
                                           rs_high, pn_limits)

            for s in xrange(len(data)):
                all_data[s] += data[s]

        if len(all_data[0]) > 300:
            save_pkl(
                'data/ScanNet/sampled_train/train_{}_{}.pkl'.format(fid, idx),
                all_data)
            idx += 1
            all_data = [[] for _ in xrange(12)]

    if len(all_data[0]) > 0:
        save_pkl('data/ScanNet/sampled_train/train_{}_{}.pkl'.format(fid, idx),
                 all_data)
        idx += 1
コード例 #26
0
def eval():
    test_set = [
        'sg27_station4_intensity_rgb', 'bildstein_station1_xyz_intensity_rgb'
    ]
    train_list, test_list = get_context_train_test(test_set)
    train_list = [
        'data/Semantic3D.Net/context/block/' + fn for fn in train_list
    ]
    test_list = ['data/Semantic3D.Net/context/block/' + fn for fn in test_list]
    read_fn = lambda model, filename: read_pkl(filename)

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch = 5000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['ctx_pts'],
                        pls['ctx_idxs'], pls['lbls'], pls['is_training'],
                        batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
コード例 #27
0
    idxs = libPointUtil.gridDownsampleGPU(sxyzs, 0.3, False)
    sxyzs = sxyzs[idxs]
    spreds = spreds[idxs]
    output_points('{}/{}_sparse.txt'.format(dir, fs), sxyzs, colors[spreds])

    idxs = libPointUtil.gridDownsampleGPU(qxyzs, 0.3, False)
    qxyzs = qxyzs[idxs]
    qpreds = qpreds[idxs]
    output_points('{}/{}_dense.txt'.format(dir, fs), qxyzs, colors[qpreds])


if __name__ == "__main__":
    sess, pls, ops, feed_dict = build_session()

    with open('cached/semantic3d_test_stems.txt', 'r') as f:
        lines = f.readlines()
        fss = [fn.strip('\n').split(' ')[0] for fn in lines]

    prefix = 'context_avg_result_32'

    for fs in fss:
        sxyzs, _, sprobs = eval_room_probs(fs, sess, pls, ops, feed_dict)
        qxyzs, _ = read_pkl('data/Semantic3D.Net/pkl/test/{}.pkl'.format(fs))
        qxyzs = np.ascontiguousarray(qxyzs[:, :3], np.float32)
        sxyzs = np.ascontiguousarray(sxyzs[:, :3], np.float32)
        sprobs = np.ascontiguousarray(sprobs, np.float32)
        qprobs = interpolate(sxyzs, sprobs, qxyzs)

        save_results(sxyzs, qxyzs, sprobs, qprobs, prefix, fs)
コード例 #28
0
    return qprobs


if __name__ == "__main__":
    import random
    train_list, test_list = get_block_train_test_split()
    sess, pls, ops, feed_dict = build_session()
    all_preds, all_labels = [], []
    fp = np.zeros(13, dtype=np.uint64)
    tp = np.zeros(13, dtype=np.uint64)
    fn = np.zeros(13, dtype=np.uint64)
    random.shuffle(test_list)
    for fi, fs in enumerate(test_list):
        sxyzs, slbls, sprobs = eval_room_probs(fs, sess, pls, ops, feed_dict)
        filename = 'data/S3DIS/room_block_10_10/' + fs
        points, labels = read_pkl(filename)
        qxyzs = np.ascontiguousarray(points[:, :3], np.float32)
        qn = qxyzs.shape[0]
        rn = 1000000
        qrn = qn / rn
        if qn % rn != 0: qrn += 1
        # print 'qrn {} sxyzs num {}'.format(qrn,sxyzs.shape[0])
        qprobs = []
        for t in xrange(qrn):
            beg_idxs = t * rn
            end_idxs = min((t + 1) * rn, qn)
            qrprobs = interpolate(sxyzs, sprobs, qxyzs[beg_idxs:end_idxs])
            qprobs.append(qrprobs)

        qprobs = np.concatenate(qprobs, axis=0)
        qpreds = np.argmax(qprobs, axis=1)
コード例 #29
0
 def read_fn(model, fn):
     data = read_pkl(fn)
     return data[0], data[2], data[3], data[11]
コード例 #30
0
    # classes_count=np.zeros(21)
    # for i in xrange(6):
    #     points,labels=read_pkl('data/ScanNet/split/train_split_{}.pkl'.format(i))
    #     for t in xrange(len(labels)):
    #         cur_count,_=np.histogram(labels[t],np.arange(22))
    #         classes_count+=cur_count
    # from io_util import get_scannet_class_names
    # names=get_scannet_class_names()
    # for name,count in zip(names,classes_count):
    #     print '{}: {}'.format(name,count)

    with open('cached/scannet_train_filenames.txt', 'r') as f:
        train_list = [line.strip('\n') for line in f.readlines()]

    train_list = [
        'data/ScanNet/sampled_train/{}'.format(fn) for fn in train_list
    ]
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    train_list += test_list

    def read_fn(model, fn):
        data = read_pkl(fn)
        return data[0], data[2], data[3], data[11]

    count = 0
    for fs in train_list:
        count += len(read_pkl(fs)[0])

    print count