def eval():
    test_list=['data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)]
    def read_fn(model,fn):
        data=read_pkl(fn)
        return data[0],data[2],data[3],data[11]

    test_provider = Provider(test_list,'test',FLAGS.batch_size*FLAGS.num_gpus,read_fn)

    try:
        pls=build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch=11000/FLAGS.num_gpus
        ops=train_ops(pls['xyzs'],pls['feats'],pls['lbls'],pls['is_training'],batch_num_per_epoch)

        feed_dict={}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess,FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,graph=sess.graph)
        test_one_epoch(ops,pls,sess,saver,test_provider,0,feed_dict,summary_writer)

    finally:
        test_provider.close()
Esempio n. 2
0
def eval():
    train_list, test_list = get_block_train_test_split()
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]

    def fn(model, filename):
        data = read_pkl(filename)
        return data[0], data[2], data[3], data[4], data[12]

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
Esempio n. 3
0
def test_data_iter():
    from provider import Provider, default_unpack_feats_labels
    from draw_util import output_points, get_class_colors
    import time
    import random

    train_list, test_list = get_block_train_test_split()
    random.shuffle(train_list)
    train_list = ['data/S3DIS/room_block_10_10/' + fn for fn in train_list]
    test_list = ['data/S3DIS/room_block_10_10/' + fn for fn in test_list]

    train_provider = Provider(train_list, 'train', 4, read_fn)
    test_provider = Provider(test_list, 'test', 4, read_fn)
    print len(train_list)
    try:
        begin = time.time()
        i = 0
        for data in test_provider:
            i += 1
            pass
        print 'batch_num {}'.format(i * 4)
        print 'test set cost {} s'.format(time.time() - begin)
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            pass
        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        train_provider.close()
        test_provider.close()
Esempio n. 4
0
def test_read_semantic_dataset():
    from provider import Provider, default_unpack_feats_labels
    train_list, test_list = get_semantic3d_block_train_list()
    # print train_list
    # exit(0)
    train_list = [
        'data/Semantic3D.Net/block/sampled/merged/' + fn for fn in train_list
    ]
    test_list = [
        'data/Semantic3D.Net/block/sampled/merged/' + fn for fn in test_list
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train', 4, read_fn)
    test_provider = Provider(test_list, 'test', 4, read_fn)

    try:
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            cxyzs, rgbs, covars, lbls, = default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                print len(cxyzs[k])

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        train_provider.close()
        test_provider.close()
def eval():
    train_list, test_list = get_block_train_test_split()
    test_list = ['data/S3DIS/room_block_10_10/' + fn for fn in test_list]

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = {}
        pls['xyzs'],pls['lbls'],pls['rgbs'],pls['covars'],pls['nidxs'],\
        pls['nidxs_lens'],pls['nidxs_bgs'],pls['cidxs']=[],[],[],[],[],[],[],[]
        pls['weights'] = []
        for i in xrange(FLAGS.num_gpus):
            pls['xyzs'].append(
                tf.placeholder(tf.float32, [None, 3], 'xyz{}'.format(i)))
            pls['rgbs'].append(
                tf.placeholder(tf.float32, [None, 3], 'rgb{}'.format(i)))
            pls['covars'].append(
                tf.placeholder(tf.float32, [None, 9], 'covar{}'.format(i)))
            pls['lbls'].append(
                tf.placeholder(tf.int64, [None], 'lbl{}'.format(i)))
            pls['nidxs'].append(
                tf.placeholder(tf.int32, [None], 'nidxs{}'.format(i)))
            pls['nidxs_lens'].append(
                tf.placeholder(tf.int32, [None], 'nidxs_lens{}'.format(i)))
            pls['nidxs_bgs'].append(
                tf.placeholder(tf.int32, [None], 'nidxs_bgs{}'.format(i)))
            pls['cidxs'].append(
                tf.placeholder(tf.int32, [None], 'cidxs{}'.format(i)))
            pls['weights'].append(
                tf.placeholder(tf.float32, [None], 'weights{}'.format(i)))

        pmiu = neighbor_anchors_v2()
        pls['is_training'] = tf.placeholder(tf.bool, name='is_training')
        pls['pmiu'] = tf.placeholder(tf.float32, name='pmiu')

        batch_num_per_epoch = 2500 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['rgbs'], pls['covars'], pls['lbls'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pmiu.shape[1], pls['is_training'],
                        batch_num_per_epoch, pls['pmiu'], pls['weights'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
def train():
    test_set = [
        'sg27_station4_intensity_rgb', 'bildstein_station1_xyz_intensity_rgb'
    ]
    train_list, test_list = get_context_train_test(test_set)
    train_list = [
        'data/Semantic3D.Net/context/block_avg/' + fn for fn in train_list
    ]
    test_list = [
        'data/Semantic3D.Net/context/block_avg/' + fn for fn in test_list
    ]
    read_fn = lambda model, filename: read_pkl(filename)
    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch = 5000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['ctx_pts'],
                        pls['ctx_idxs'], pls['lbls'], pls['is_training'],
                        batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        sess.run(tf.global_variables_initializer())
        if FLAGS.restore:
            all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            all_vars = [
                var for var in all_vars if not var.name.startswith('tower')
            ]
            restore_saver = tf.train.Saver(var_list=all_vars)
            restore_saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 7
0
def train():
    with open('cached/scannet_train_filenames.txt', 'r') as f:
        train_list = [line.strip('\n') for line in f.readlines()]
    train_list = [
        'data/ScanNet/sampled_train/{}'.format(fn) for fn in train_list
    ]
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 11000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pls['weights'],
                        pmiu.shape[1], pls['is_training'], batch_num_per_epoch,
                        pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 8
0
def train():
    train_list, test_list = get_block_train_test_split()
    # test_list=['data/S3DIS/sampled_train/'+fn for fn in train_list[:2]]
    train_list = ['data/S3DIS/sampled_train/' + fn for fn in train_list]
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]

    def fn(model, filename):
        data = read_pkl(filename)
        return data[0], data[2], data[3], data[4], data[12]

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

            base_var = [
                var for var in tf.trainable_variables() if
                var.name.startswith('base') or var.name.startswith('class_mlp')
            ]
            base_saver = tf.train.Saver(base_var)
            base_saver.restore(sess, FLAGS.base_restore)

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 9
0
def train():
    train_list, test_list = prepare_input_list('data/S3DIS/point/fpfh/',
                                               FLAGS.batch_size)
    fetch_data_with_batch = functools.partial(fetch_data,
                                              batch_size=FLAGS.batch_size)
    train_provider = Provider(train_list,
                              1,
                              fetch_data_with_batch,
                              'train',
                              4,
                              fetch_batch,
                              max_worker_num=1)
    test_provider = Provider(test_list,
                             1,
                             fetch_data_with_batch,
                             'test',
                             4,
                             fetch_batch,
                             max_worker_num=1)
    trainset = ProviderMultiGPUWrapper(FLAGS.num_gpus, train_provider)
    testset = ProviderMultiGPUWrapper(FLAGS.num_gpus, test_provider)

    try:
        pls = {}
        pls['feats'] = tf.placeholder(tf.float32, [None, 39], 'feats')
        pls['labels'] = tf.placeholder(tf.int64, [
            None,
        ], 'labels')
        pls['is_training'] = tf.placeholder(tf.bool, [], 'is_training')
        ops = train_ops(pls['feats'], pls['labels'], pls['is_training'],
                        train_provider.batch_num)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=500)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.train_epoch_num):
            test_one_epoch(ops, pls, sess, saver, testset, epoch_num)
            train_one_epoch(ops, pls, sess, summary_writer, trainset,
                            epoch_num)

    finally:
        train_provider.close()
        test_provider.close()
def train():
    train_list, test_list = get_block_train_test_split()
    train_list = ['data/S3DIS/sampled_train/' + fn for fn in train_list]
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]
    fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['rgbs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pmiu.shape[1],
                        pls['is_training'], batch_num_per_epoch, pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 11
0
def train():
    train_list, test_list = get_block_train_test_split()
    train_list = [
        'data/S3DIS/sampled_train_nolimits/' + fn for fn in train_list
    ]
    # train_list=['data/S3DIS/sampled_train_no_aug/'+fn for fn in train_list]
    # with open('cached/s3dis_merged_train.txt', 'r') as f:
    #     train_list=[line.strip('\n') for line in f.readlines()]
    random.shuffle(train_list)
    test_list = ['data/S3DIS/sampled_test_nolimits/' + fn for fn in test_list]

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, test_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, test_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
def train():
    import random
    train_list, test_list = get_block_train_test_split()
    train_list = [
        'data/S3DIS/sampled_train_nolimits/' + fn for fn in train_list
    ]
    random.shuffle(train_list)
    test_list = ['data/S3DIS/sampled_test_nolimits/' + fn for fn in test_list]

    def test_fn(model, filename):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(filename)
        return xyzs, rgbs, covars, lbls, block_mins

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, test_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, test_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        var_list = [
            var for var in tf.trainable_variables()
            if not var.name.startswith('class_mlp')
        ]
        saver = tf.train.Saver(max_to_keep=500, var_list=var_list)
        saver.restore(sess, FLAGS.restore_model)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, train_provider, epoch_num,
                            feed_dict)
            test_one_epoch(ops, pls, sess, test_provider, epoch_num, feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 13
0
def test_semantic_read_pkl():
    from provider import Provider, default_unpack_feats_labels
    train_list, test_list = get_semantic3d_block_train_list()
    train_list = [
        'data/Semantic3D.Net/block/sampled/train_merge/{}.pkl'.format(i)
        for i in xrange(231)
    ]
    test_list = [
        'data/Semantic3D.Net/block/sampled/test/' + fn for fn in test_list
    ]
    simple_read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train', 4, simple_read_fn)
    # test_provider = Provider(test_list,'test',4,simple_read_fn)

    print len(train_list)
    try:
        # begin = time.time()
        # i = 0
        # for data in test_provider:
        #     i += 1
        #     pass
        # print 'batch_num {}'.format(i * 4)
        # print 'test set cost {} s'.format(time.time() - begin)

        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            if i % 2500 == 0:
                print 'cost {} s'.format(time.time() - begin)

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        train_provider.close()
        test_provider.close()
Esempio n. 14
0
def eval():
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 11000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pls['weights'],
                        pmiu.shape[1], pls['is_training'], batch_num_per_epoch,
                        pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
def eval():
    from semantic3d_context_util import get_context_train_test
    test_set = [
        'sg27_station4_intensity_rgb', 'bildstein_station1_xyz_intensity_rgb'
    ]
    train_list, test_list = get_context_train_test(test_set)
    test_list = ['data/Semantic3D.Net/context/block/' + fn for fn in test_list]

    def read_fn(model, fn):
        xyzs, rgbs, covars, lbls, ctx_xyzs, ctx_idxs, block_mins = read_pkl(fn)
        return xyzs, rgbs, covars, lbls, block_mins

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
Esempio n. 16
0
def test_read_s3dis_dataset():
    from provider import Provider, default_unpack_feats_labels
    train_list, test_list = get_block_train_test_split()
    train_list = ['data/S3DIS/sampled_train/' + fn for fn in train_list]
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]
    train_list += test_list

    def fn(model, filename):
        data = read_pkl(filename)
        return data[0], data[2], data[3], data[4], data[12]

    train_provider = Provider(train_list, 'train', 4, fn)
    test_provider = Provider(test_list, 'test', 4, fn)

    try:
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            cxyzs, rgbs, covars, lbls, block_mins = default_unpack_feats_labels(
                data, 4)
            for k in xrange(4):
                min_xyz = np.min(cxyzs[k][0], axis=0)
                # print min_xyz
                eps = 1e-5
                min_val = np.asarray([-1.5, -1.5, 0.0]) - eps
                val = np.asarray(np.floor(min_xyz - min_val), np.int32)
                print val
                assert val[0] >= 0
                assert val[1] >= 0
                assert val[2] >= 0

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        train_provider.close()
        test_provider.close()
def train():
    with open('cached/scannet_train_filenames.txt','r') as f:
        train_list=[line.strip('\n') for line in f.readlines()]
    train_list=['data/ScanNet/sampled_train/{}'.format(fn) for fn in train_list]
    test_list=['data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)]
    def read_fn(model,fn):
        data=read_pkl(fn)
        return data[0],data[2],data[3],data[11]

    train_provider = Provider(train_list,'train',FLAGS.batch_size*FLAGS.num_gpus,read_fn)
    test_provider = Provider(test_list,'test',FLAGS.batch_size*FLAGS.num_gpus,read_fn)
    try:
        pls=build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch=11000/FLAGS.num_gpus
        ops=train_ops(pls['xyzs'],pls['feats'],pls['lbls'],pls['is_training'],batch_num_per_epoch)
        feed_dict={}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
              saver.restore(sess,FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch,FLAGS.train_epoch_num):
            train_one_epoch(ops,pls,sess,summary_writer,train_provider,epoch_num,feed_dict)
            test_one_epoch(ops,pls,sess,saver,test_provider,epoch_num,feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 18
0
def test_data_iter_hierarchy():
    from provider import Provider, default_unpack_feats_labels
    from draw_util import output_points, get_class_colors
    import time
    import random

    train_list, test_list = get_block_train_test_split_ds()
    # random.shuffle(train_list)
    train_list = [
        'data/S3DIS/room_block_10_10_ds0.03/' + fn for fn in train_list
    ]
    test_list = [
        'data/S3DIS/room_block_10_10_ds0.03/' + fn for fn in test_list
    ]
    train_list = train_list[:251]
    test_list = test_list[:len(test_list) / 5]

    train_provider = Provider(train_list, 'train', 4, read_fn_hierarchy)
    test_provider = Provider(test_list, 'test', 4, read_fn_hierarchy)
    print len(train_list)
    try:
        # begin=time.time()
        # i=0
        # for data in test_provider:
        #     i+=1
        #     pass
        # print 'batch_num {}'.format(i*4)
        # print 'test set cost {} s'.format(time.time()-begin)
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            cxyzs, dxyzs, rgbs, covars, lbls, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens, block_mins = \
                default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                for t in xrange(3):
                    print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(cxyzs[k][t], axis=0),
                        np.max(cxyzs[k][t], axis=0), cxyzs[k][t].shape[0])
                    assert cidxs[k][t].shape[0] == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert nidxs_lens[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert np.sum(nidxs_lens[k][t]) == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t][-1] + nidxs_lens[k][t][-1] == nidxs[
                        k][t].shape[0]
                    assert np.max(cidxs[k][t]) == cxyzs[k][t].shape[0] - 1
                    print 'lvl {} avg nsize {}'.format(
                        t, cidxs[k][t].shape[0] / float(cxyzs[k][t].shape[0]))

                print 'rgb min {} max {}'.format(np.min(rgbs[k], axis=0),
                                                 np.max(rgbs[k], axis=0))
                # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
                # print np.min(covars[k],axis=0)
                # print np.max(covars[k],axis=0)

                for t in xrange(2):
                    print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(dxyzs[k][t], axis=0),
                        np.max(dxyzs[k][t], axis=0), dxyzs[k][t].shape[0])
                    assert vlens[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert vlens_bgs[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert np.sum(vlens[k][t]) == cxyzs[k][t].shape[0]
                    assert vlens_bgs[k][t][-1] + vlens[k][t][-1] == cxyzs[k][
                        t].shape[0]
                    assert np.max(vcidxs[k][t]) == cxyzs[k][t + 1].shape[0] - 1
                print '////////////////////'

            output_hierarchy(cxyzs[0][0], cxyzs[0][1], cxyzs[0][2],
                             rgbs[0] * 127 + 128, lbls[0], vlens[0][0],
                             vlens[0][1], dxyzs[0][0], dxyzs[0][1], 0.2, 0.5)

            if i > 1:
                break

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        train_provider.close()
        test_provider.close()
def train():
    from semantic3d_context_util import get_context_train_test
    from aug_util import swap_xy, flip
    import random
    test_set = [
        'sg27_station4_intensity_rgb', 'bildstein_station1_xyz_intensity_rgb'
    ]
    train_list, test_list = get_semantic3d_block_train_test_list(test_set)
    train_list = [
        'data/Semantic3D.Net/block/sampled/' + fn for fn in train_list
    ]
    test_list = ['data/Semantic3D.Net/block/sampled/' + fn for fn in test_list]

    def test_fn(model, fn):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(fn)
        for i in xrange(len(xyzs)):
            pt_num = len(xyzs[i])
            if pt_num > 20480:
                idxs = np.random.choice(pt_num, 20480, False)
                xyzs[i] = xyzs[i][idxs]
                rgbs[i] = rgbs[i][idxs]
                covars[i] = covars[i][idxs]
                lbls[i] = lbls[i][idxs]

        return xyzs, rgbs, covars, lbls, block_mins

    def train_fn(model, fn):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(fn)
        for i in xrange(len(xyzs)):
            pt_num = len(xyzs[i])
            if pt_num > 4096:
                idxs = np.random.choice(
                    pt_num, int(np.random.uniform(0.85, 1.0) * pt_num), False)
                xyzs[i] = xyzs[i][idxs]
                rgbs[i] = rgbs[i][idxs]
                covars[i] = covars[i][idxs]
                lbls[i] = lbls[i][idxs]

            pt_num = len(xyzs[i])
            if pt_num > 20480:
                idxs = np.random.choice(pt_num, 20480, False)
                xyzs[i] = xyzs[i][idxs]
                rgbs[i] = rgbs[i][idxs]
                covars[i] = covars[i][idxs]
                lbls[i] = lbls[i][idxs]

            if random.random() < 0.5:
                xyzs[i] = flip(xyzs[i], axis=0)

            if random.random() < 0.5:
                xyzs[i] = flip(xyzs[i], axis=1)

            if random.random() < 0.5:
                xyzs[i] = swap_xy(xyzs[i])

            jitter_color = np.random.uniform(-0.02, 0.02, rgbs[i].shape)
            rgbs[i] += jitter_color
            rgbs[i][rgbs[i] > 1.0] = 1.0
            rgbs[i][rgbs[i] < -1.0] = -1.0

        return xyzs, rgbs, covars, lbls, block_mins

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, test_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, test_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch = 5000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        sess.run(tf.global_variables_initializer())
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 20
0
def test_scannet():
    from provider import Provider, default_unpack_feats_labels
    with open('cached/scannet_train_filenames.txt', 'r') as f:
        train_list = [line.strip('\n') for line in f.readlines()]
    train_list = [
        'data/ScanNet/sampled_train/{}'.format(fn) for fn in train_list
    ]
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train', 4, read_fn)
    test_provider = Provider(test_list, 'test', 4, read_fn)

    try:
        begin = time.time()
        i = 0
        class_count = np.zeros(21)
        for data in train_provider:
            i += 1
            cxyzs, dxyzs, covars, lbls, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens, block_mins = \
                default_unpack_feats_labels(data, 4)
            for t in xrange(4):
                cur_count, _ = np.histogram(lbls[t], np.arange(22))
                class_count += cur_count
            if i % 500 == 0:
                print i
            # for k in xrange(4):
            #     for t in xrange(3):
            #         print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(i,k,t,np.min(cxyzs[k][t],axis=0),
            #                                                                            np.max(cxyzs[k][t],axis=0),
            #                                                                            cxyzs[k][t].shape[0])
            #         assert cidxs[k][t].shape[0]==nidxs[k][t].shape[0]
            #         assert nidxs_bgs[k][t].shape[0]==cxyzs[k][t].shape[0]
            #         assert nidxs_lens[k][t].shape[0]==cxyzs[k][t].shape[0]
            #         assert np.sum(nidxs_lens[k][t])==nidxs[k][t].shape[0]
            #         assert nidxs_bgs[k][t][-1]+nidxs_lens[k][t][-1]==nidxs[k][t].shape[0]
            #         assert np.max(cidxs[k][t])==cxyzs[k][t].shape[0]-1
            #         print 'lvl {} avg nsize {}'.format(t,cidxs[k][t].shape[0]/float(cxyzs[k][t].shape[0]))
            #
            #     # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
            #     # print np.min(covars[k],axis=0)
            #     # print np.max(covars[k],axis=0)
            #
            #     for t in xrange(2):
            #         print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(i,k,t,np.min(dxyzs[k][t],axis=0),
            #                                                                            np.max(dxyzs[k][t],axis=0),
            #                                                                            dxyzs[k][t].shape[0])
            #         assert vlens[k][t].shape[0]==cxyzs[k][t+1].shape[0]
            #         assert vlens_bgs[k][t].shape[0]==cxyzs[k][t+1].shape[0]
            #         assert np.sum(vlens[k][t])==cxyzs[k][t].shape[0]
            #         assert vlens_bgs[k][t][-1]+vlens[k][t][-1]==cxyzs[k][t].shape[0]
            #         assert np.max(vcidxs[k][t])==cxyzs[k][t+1].shape[0]-1
            #     print '////////////////////'
            #
            # colors=np.random.randint(0,256,[21,3])
            # print lbls[0].shape,colors[lbls[0],:].shape,cxyzs[0][0].shape
            # output_hierarchy(cxyzs[0][0],cxyzs[0][1],cxyzs[0][2],
            #                  np.ones([cxyzs[0][0].shape[0],3]),lbls[0],
            #                  vlens[0][0],vlens[0][1],dxyzs[0][0],dxyzs[0][1],0.15,0.5,colors=colors)
            # break

        class_names = get_scannet_class_names()
        for count, name in zip(class_count, class_names):
            print '{}:\t\t{}'.format(name, count)

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)
    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 21
0
def test_model_read():
    from provider import Provider, default_unpack_feats_labels
    train_list = [
        'data/ModelNet40/ply_data_train{}.pkl'.format(i) for i in xrange(5)
    ]
    test_list = [
        'data/ModelNet40/ply_data_test{}.pkl'.format(i) for i in xrange(2)
    ]
    fn = lambda model, filename: read_pkl(filename)
    train_provider = Provider(train_list, 'train', 4, fn, max_cache=1)
    test_provider = Provider(test_list, 'test', 4, fn, max_cache=1)

    try:
        begin = time.time()
        i = 0
        for data in train_provider:
            print len(data[0])
            i += 1
            print 'cost {}s'.format(time.time() - begin)
            labels, cxyzs, dxyzs, covars, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens = \
                default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                for t in xrange(3):
                    print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(cxyzs[k][t], axis=0),
                        np.max(cxyzs[k][t], axis=0), cxyzs[k][t].shape[0])
                    assert cidxs[k][t].shape[0] == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert nidxs_lens[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert np.sum(nidxs_lens[k][t]) == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t][-1] + nidxs_lens[k][t][-1] == nidxs[
                        k][t].shape[0]
                    assert np.max(cidxs[k][t]) == cxyzs[k][t].shape[0] - 1
                    print 'lvl {} avg nsize {}'.format(
                        t, cidxs[k][t].shape[0] / float(cxyzs[k][t].shape[0]))

                # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
                # print np.min(covars[k],axis=0)
                # print np.max(covars[k],axis=0)

                for t in xrange(2):
                    print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(dxyzs[k][t], axis=0),
                        np.max(dxyzs[k][t], axis=0), dxyzs[k][t].shape[0])
                    assert vlens[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert vlens_bgs[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert np.sum(vlens[k][t]) == cxyzs[k][t].shape[0]
                    assert vlens_bgs[k][t][-1] + vlens[k][t][-1] == cxyzs[k][
                        t].shape[0]
                    assert np.max(vcidxs[k][t]) == cxyzs[k][t + 1].shape[0] - 1
                print '////////////////////'

            # output_hierarchy(cxyzs[0][0],cxyzs[0][1],cxyzs[0][2],
            #                  np.ones([cxyzs[0][0].shape[0],3]),
            #                  np.ones([cxyzs[0][0].shape[0]],dtype=np.int32),
            #                  vlens[0][0],vlens[0][1],dxyzs[0][0],dxyzs[0][1],0.2,0.5)

        print 'batch_num {}'.format(i * 4)
        print 'test set cost {} s'.format(time.time() - begin)
    finally:
        train_provider.close()
        test_provider.close()
Esempio n. 22
0
def test_model_hierarchy():
    from provider import Provider, default_unpack_feats_labels
    train_list = [
        'data/ModelNet40/ply_data_train{}.h5'.format(i) for i in xrange(5)
    ]
    test_list = [
        'data/ModelNet40/ply_data_test{}.h5'.format(i) for i in xrange(2)
    ]

    # train_provider = Provider(train_list,'train',4,read_model_hierarchy,max_cache=1)
    test_provider = Provider(test_list[1:],
                             'test',
                             4,
                             read_model_hierarchy,
                             max_cache=1)

    print len(train_list)
    try:
        begin = time.time()
        i = 0
        for data in test_provider:
            print data[0][0]
            i += 1
            print 'cost {}s'.format(time.time() - begin)
            labels, cxyzs, dxyzs, covars, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens = \
                default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                for t in xrange(3):
                    print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(cxyzs[k][t], axis=0),
                        np.max(cxyzs[k][t], axis=0), cxyzs[k][t].shape[0])
                    assert cidxs[k][t].shape[0] == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert nidxs_lens[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert np.sum(nidxs_lens[k][t]) == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t][-1] + nidxs_lens[k][t][-1] == nidxs[
                        k][t].shape[0]
                    assert np.max(cidxs[k][t]) == cxyzs[k][t].shape[0] - 1
                    print 'lvl {} avg nsize {}'.format(
                        t, cidxs[k][t].shape[0] / float(cxyzs[k][t].shape[0]))

                # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
                # print np.min(covars[k],axis=0)
                # print np.max(covars[k],axis=0)

                for t in xrange(2):
                    print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(dxyzs[k][t], axis=0),
                        np.max(dxyzs[k][t], axis=0), dxyzs[k][t].shape[0])
                    assert vlens[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert vlens_bgs[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert np.sum(vlens[k][t]) == cxyzs[k][t].shape[0]
                    assert vlens_bgs[k][t][-1] + vlens[k][t][-1] == cxyzs[k][
                        t].shape[0]
                    assert np.max(vcidxs[k][t]) == cxyzs[k][t + 1].shape[0] - 1
                print '////////////////////'

            output_hierarchy(cxyzs[0][0], cxyzs[0][1], cxyzs[0][2],
                             np.ones([cxyzs[0][0].shape[0], 3]),
                             np.ones([cxyzs[0][0].shape[0]],
                                     dtype=np.int32), vlens[0][0], vlens[0][1],
                             dxyzs[0][0], dxyzs[0][1], 0.2, 0.5)

            if i > 1:
                break
        print 'batch_num {}'.format(i * 4)
        print 'test set cost {} s'.format(time.time() - begin)
        # begin = time.time()
        # i = 0
        # for data in train_provider:
        #     i+=1
        #     print data[0]
        #     if i%2500==0:
        #         print 'cost {} s'.format(time.time()-begin)
        #
        # print 'batch_num {}'.format(i * 4)
        # print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        # train_provider.close()
        test_provider.close()
Esempio n. 23
0
def test_semantic_hierarchy():
    from provider import Provider, default_unpack_feats_labels
    from functools import partial
    train_list, test_list = get_semantic3d_block_train_list()
    train_list = ['data/Semantic3D.Net/block/train/' + fn for fn in train_list]
    test_list = ['data/Semantic3D.Net/block/train/' + fn for fn in test_list]

    random.shuffle(train_list)

    tmp_read_fn = partial(read_fn_hierarchy,
                          use_rotate=False,
                          presample=False,
                          nr1=0.1,
                          nr2=0.4,
                          nr3=1.0,
                          vc1=0.2,
                          vc2=0.5,
                          sstride=0.075,
                          bsize=5.0,
                          bstride=2.5,
                          min_pn=1024,
                          resample_ratio_low=0.8,
                          resample_ratio_high=1.0)

    train_provider = Provider(train_list, 'train', 4, tmp_read_fn)
    test_provider = Provider(test_list, 'test', 4, tmp_read_fn)
    print len(train_list)
    try:
        begin = time.time()
        i = 0
        for data in test_provider:
            i += 1
            pass
        print 'batch_num {}'.format(i * 4)
        print 'test set cost {} s'.format(time.time() - begin)
        begin = time.time()
        i = 0
        for data in train_provider:
            # i+=1
            # cxyzs, dxyzs, rgbs, covars, lbls, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens, block_mins = \
            #     default_unpack_feats_labels(data, 4)
            # for k in xrange(4):
            #     for t in xrange(3):
            #         print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(i,k,t,np.min(cxyzs[k][t],axis=0),
            #                                                                            np.max(cxyzs[k][t],axis=0),
            #                                                                            cxyzs[k][t].shape[0])
            #         assert cidxs[k][t].shape[0]==nidxs[k][t].shape[0]
            #         assert nidxs_bgs[k][t].shape[0]==cxyzs[k][t].shape[0]
            #         assert nidxs_lens[k][t].shape[0]==cxyzs[k][t].shape[0]
            #         assert np.sum(nidxs_lens[k][t])==nidxs[k][t].shape[0]
            #         assert nidxs_bgs[k][t][-1]+nidxs_lens[k][t][-1]==nidxs[k][t].shape[0]
            #         assert np.max(cidxs[k][t])==cxyzs[k][t].shape[0]-1
            #         print 'lvl {} avg nsize {}'.format(t,cidxs[k][t].shape[0]/float(cxyzs[k][t].shape[0]))
            #
            #     print 'rgb min {} max {}'.format(np.min(rgbs[k],axis=0),np.max(rgbs[k],axis=0))
            #     # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
            #     # print np.min(covars[k],axis=0)
            #     # print np.max(covars[k],axis=0)
            #
            #     for t in xrange(2):
            #         print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(i,k,t,np.min(dxyzs[k][t],axis=0),
            #                                                                            np.max(dxyzs[k][t],axis=0),
            #                                                                            dxyzs[k][t].shape[0])
            #         assert vlens[k][t].shape[0]==cxyzs[k][t+1].shape[0]
            #         assert vlens_bgs[k][t].shape[0]==cxyzs[k][t+1].shape[0]
            #         assert np.sum(vlens[k][t])==cxyzs[k][t].shape[0]
            #         assert vlens_bgs[k][t][-1]+vlens[k][t][-1]==cxyzs[k][t].shape[0]
            #         assert np.max(vcidxs[k][t])==cxyzs[k][t+1].shape[0]-1
            #     print '////////////////////'
            #
            # output_hierarchy(cxyzs[0][0],cxyzs[0][1],cxyzs[0][2],rgbs[0]*127+128,lbls[0],vlens[0][0],vlens[0][1],dxyzs[0][0],dxyzs[0][1],0.2,0.5)
            #
            # if i>1:
            #     break
            pass

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        train_provider.close()
        test_provider.close()
def train():
    import random
    from aug_util import flip, swap_xy
    train_list, test_list = get_block_train_test_split()
    train_list = [
        'data/S3DIS/sampled_train_nolimits/' + fn for fn in train_list
    ]
    # train_list=['data/S3DIS/sampled_train_no_aug/'+fn for fn in train_list]
    # with open('cached/s3dis_merged_train.txt', 'r') as f:
    #     train_list=[line.strip('\n') for line in f.readlines()]
    random.shuffle(train_list)
    test_list = ['data/S3DIS/sampled_test_nolimits/' + fn for fn in test_list]

    def train_fn(model, filename):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(filename)

        num = len(xyzs)
        for i in xrange(num):
            # pt_num=len(xyzs[i])
            # ds_ratio=np.random.uniform(0.8,1.0)
            # idxs=np.random.choice(pt_num,int(ds_ratio*pt_num),False)
            #
            # xyzs[i]=xyzs[i][idxs]
            # rgbs[i]=rgbs[i][idxs]
            # covars[i]=covars[i][idxs]
            # lbls[i]=lbls[i][idxs]

            if random.random() < 0.5:
                xyzs[i] = flip(xyzs[i], axis=0)

            if random.random() < 0.5:
                xyzs[i] = flip(xyzs[i], axis=1)

            if random.random() < 0.5:
                xyzs[i] = swap_xy(xyzs[i])

            jitter_color = np.random.uniform(-0.02, 0.02, rgbs[i].shape)
            rgbs[i] += jitter_color

        return xyzs, rgbs, covars, lbls, block_mins

    def test_fn(model, filename):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(filename)
        return xyzs, rgbs, covars, lbls, block_mins

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, test_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, test_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()