Ejemplo n.º 1
0
def train():
    pt_num = 4096
    voxel_num = FLAGS.split_num * FLAGS.split_num * FLAGS.split_num

    train_list, test_list = get_train_test_split()
    train_list = [
        'data/S3DIS/folding/block_v2/' + fn + '.h5' for fn in train_list
    ]
    test_list = [
        'data/S3DIS/folding/block_v2/' + fn + '.h5' for fn in test_list
    ]

    train_provider = ProviderV2(train_list, 'train',
                                FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                                read_fn, 2)
    test_provider = ProviderV2(test_list, 'test',
                               FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                               read_fn, 2)

    try:
        pls = {}
        pls['points'] = tf.placeholder(tf.float32, [None, pt_num, 6], 'points')
        pls['covars'] = tf.placeholder(tf.float32, [None, pt_num, 9], 'covars')
        pls['rpoints'] = tf.placeholder(tf.float32, [None, pt_num, 3],
                                        'rpoints')
        pls['labels'] = tf.placeholder(tf.int64, [None, pt_num], 'labels')
        pls['voxel_state'] = tf.placeholder(tf.float32, [None, voxel_num],
                                            'voxel_state')
        pls['voxel_color'] = tf.placeholder(tf.float32, [None, voxel_num, 3],
                                            'voxel_color')
        ops = train_ops(pls['points'], pls['covars'], pls['rpoints'],
                        pls['labels'], pls['voxel_state'], pls['voxel_color'],
                        FLAGS.num_classes, voxel_num,
                        26000 / (FLAGS.batch_size * FLAGS.num_gpus))

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(max_to_keep=500)
        # saver.restore(sess,'model/voxel_label_sync/unsupervise9.ckpt')
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Ejemplo n.º 2
0
def train():
    pt_num = 4096

    train_list, test_list = get_train_test_split()
    train_list += test_list
    train_list = [
        'data/S3DIS/folding/block_v2/' + fn + '.h5' for fn in train_list
    ]
    test_list = [
        'data/S3DIS/folding/block_v2/' + fn + '.h5' for fn in test_list[:5]
    ]

    read_fn = lambda model, fn: read_block_v2(fn)[:2]

    train_provider = ProviderV2(train_list, 'train',
                                FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                                read_fn, 2)
    test_provider = ProviderV2(test_list, 'test',
                               FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                               read_fn, 2)

    try:
        pls = {}
        pls['points'] = tf.placeholder(tf.float32, [None, pt_num, 3], 'points')
        pls['covars'] = tf.placeholder(tf.float32, [None, pt_num, 9], 'covars')
        pls['grids'] = tf.placeholder(tf.float32, [4096, 3], 'grids')
        ops = train_ops(pls['points'], pls['covars'], pls['grids'],
                        22000 / (FLAGS.batch_size * FLAGS.num_gpus))

        # compute grids
        feed_dict = {}
        feed_dict[pls['grids']] = generate_grids()

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=500)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        saver.restore(sess, 'model/unsupervise255.ckpt')

        for epoch_num in xrange(255, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Ejemplo n.º 3
0
def train():
    pt_num = 2048
    voxel_num = FLAGS.split_num * FLAGS.split_num * FLAGS.split_num

    # train_list,test_list=get_train_test_split()
    train_list = [
        'data/ModelNetTrain/nidxs/ply_data_train{}.h5'.format(i)
        for i in xrange(5)
    ]
    train_list += ['data/ModelNetTrain/voxel/ply_data_test_voxel0.h5']
    test_list = ['data/ModelNetTrain/voxel/ply_data_test_voxel1.h5']

    train_provider = ProviderV2(train_list, 'train',
                                FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                                read_fn, 2)
    test_provider = ProviderV2(test_list, 'test',
                               FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                               read_fn, 2)

    try:
        pls = {}
        pls['points'] = tf.placeholder(tf.float32, [None, pt_num, 3], 'points')
        pls['covars'] = tf.placeholder(tf.float32, [None, pt_num, 9], 'covars')
        pls['labels'] = tf.placeholder(tf.int64, [None], 'labels')
        pls['is_training'] = tf.placeholder(tf.bool, name='is_training')
        pls['voxel_state'] = tf.placeholder(tf.float32, [None, voxel_num],
                                            'voxel_state')

        ops = train_ops(pls['points'], pls['covars'], pls['labels'],
                        pls['voxel_state'], pls['is_training'],
                        FLAGS.num_classes, voxel_num,
                        10000 / (FLAGS.batch_size * FLAGS.num_gpus))

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=500)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Ejemplo n.º 4
0
def test_data_iter():
    train_list, test_list = get_train_test_split()
    train_list += test_list
    train_list = [
        'data/S3DIS/folding/block_v2/{}.h5'.format(fn) for fn in train_list
    ]
    # test_list=['data/S3DIS/folding/block_v2/{}.h5'.format(fn) for fn in test_list]

    train_provider = ProviderV2(train_list, 'train', 32, batch_fn, read_fn, 2)

    begin = time.time()
    for data in train_provider:
        for item in data:
            print item.shape
        print 'cost {} s'.format(time.time() - begin)
        begin = time.time()
        # print data[0].shape,data[1].shape

    begin = time.time()
    for data in train_provider:
        print 'cost {} s'.format(time.time() - begin)
        begin = time.time()
        # print data[0].shape, data[1].shape

    train_provider.close()
Ejemplo n.º 5
0
def test_data_iter():
    train_list, test_list = get_train_test_split()
    train_list = [
        'data/S3DIS/folding/block_v2/' + fn + '.h5' for fn in train_list
    ]
    read_fn = lambda fn: read_block_v2(fn)[:2]

    train_provider = ProviderV2(train_list, 'train', 20, batch_fn, read_fn, 2)
    for data in train_provider:
        print data[0].shape, data[1].shape

    for data in train_provider:
        print data[0].shape, data[1].shape

    train_provider.close()
Ejemplo n.º 6
0
def test_data_iter():
    train_list, test_list = get_train_test_split()
    train_list = [
        'data/S3DIS/folding/block_v2/' + fn + '.h5' for fn in train_list
    ]
    # test_list=['data/S3DIS/folding/block_v2/'+fn+'.h5' for fn in test_list]

    train_provider = ProviderV2(train_list, 'train',
                                FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                                read_block_v2, 2)
    max_label = 0
    for data in train_provider:
        # print data[0].shape,data[1].shape
        max_label = max(np.max(data[3]), max_label)

    print max_label
    train_provider.close()
Ejemplo n.º 7
0
def test_data_iter():
    train_list = [
        'data/ModelNetTrain/nidxs/ply_data_train{}.h5'.format(i)
        for i in xrange(5)
    ]
    train_list += ['data/ModelNetTrain/voxel/ply_data_test_voxel0.h5']
    test_list = ['data/ModelNetTrain/voxel/ply_data_test_voxel1.h5']

    train_provider = ProviderV2(train_list, 'train',
                                FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                                read_fn, 2)
    # test_provider = ProviderV2(test_list,'test',FLAGS.batch_size*FLAGS.num_gpus,batch_fn,read_fn,2)

    begin = time.time()
    for data in train_provider:
        print 'cost {} s'.format(time.time() - begin)
        begin = time.time()
Ejemplo n.º 8
0
def test_data_iter():
    train_list=['data/ModelNetTrain/nidxs/ply_data_train{}.h5'.format(i) for i in xrange(5)]
    test_list=['data/ModelNetTrain/nidxs/ply_data_test{}.h5'.format(i) for i in xrange(2)]

    train_provider = ProviderV2(train_list,'test',FLAGS.batch_size*FLAGS.num_gpus,batch_fn,read_fn,2)
    # test_provider = ProviderV2(test_list,'test',FLAGS.batch_size*FLAGS.num_gpus,batch_fn,read_fn,2)

    begin=time.time()
    for data in train_provider:
        for item in data:
            print item.shape
        break
        # true_state_pts = voxel2points(data[3][0])
        # fn = os.path.join(FLAGS.dump_dir, 'state_true.txt')
        # output_points(fn, true_state_pts)
        # print np.min(data[0],axis=(0,1))
        # print 'cost {} s'.format(time.time()-begin)
        # break

    train_provider.close()
Ejemplo n.º 9
0
def eval():
    pt_num = 4096
    voxel_num = FLAGS.split_num * FLAGS.split_num * FLAGS.split_num

    train_list, test_list = get_train_test_split()
    test_list = [
        'data/S3DIS/folding/block_v2/' + fn + '.h5' for fn in test_list
    ]
    test_provider = ProviderV2(test_list, 'test',
                               FLAGS.batch_size * FLAGS.num_gpus, batch_fn,
                               read_block_v2, 2)

    try:
        pls = {}
        pls['points'] = tf.placeholder(tf.float32, [None, pt_num, 6], 'points')
        pls['covars'] = tf.placeholder(tf.float32, [None, pt_num, 9], 'covars')
        pls['rpoints'] = tf.placeholder(tf.float32, [None, pt_num, 3],
                                        'rpoints')
        pls['labels'] = tf.placeholder(tf.int64, [None, pt_num], 'labels')
        pls['voxel_state'] = tf.placeholder(tf.float32, [None, voxel_num],
                                            'voxel_state')
        pls['voxel_color'] = tf.placeholder(tf.float32, [None, voxel_num, 3],
                                            'voxel_color')
        ops = train_ops(pls['points'], pls['covars'], pls['rpoints'],
                        pls['labels'], pls['voxel_state'], pls['voxel_color'],
                        FLAGS.num_classes, voxel_num,
                        22000 / (FLAGS.batch_size * FLAGS.num_gpus))

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)

        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict)

    finally:
        test_provider.close()
Ejemplo n.º 10
0
def eval():
    pt_num = 2048

    # train_list,test_list=get_train_test_split()
    # train_list = ['data/ModelNetTrain/nidxs/ply_data_train{}.h5'.format(i) for i in xrange(5)]
    test_list = ['data/ModelNetTrain/nidxs/ply_data_test{}.h5'.format(i) for i in xrange(2)]

    # train_provider = ProviderV2(train_list, 'train', FLAGS.batch_size * FLAGS.num_gpus, batch_fn, read_fn, 2)
    test_provider = ProviderV2(test_list, 'test', FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = {}
        pls['points'] = tf.placeholder(tf.float32, [None, pt_num, 3], 'points')
        pls['covars'] = tf.placeholder(tf.float32, [None, pt_num, 9], 'covars')
        pls['labels'] = tf.placeholder(tf.int64, [None], 'labels')
        pls['is_training'] = tf.placeholder(tf.bool, name='is_training')

        ops = train_ops(pls['points'], pls['covars'],
                        pls['labels'], pls['is_training'], FLAGS.num_classes,
                        8000 / (FLAGS.batch_size * FLAGS.num_gpus))

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess,FLAGS.eval_model)

        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict)

    finally:
        # train_provider.close()
        test_provider.close()
Ejemplo n.º 11
0
def test_points2voxel():
    import Points2Voxel
    from provider import ProviderV2, ProviderV3
    from s3dis.block_util import read_block_v2
    from s3dis.data_util import get_train_test_split
    from s3dis.voxel_util import points2voxel_color_gpu
    from s3dis.voxel_util import point2voxel
    from s3dis.draw_util import output_points

    def read_fn(filename):
        points, covars = read_block_v2(filename)[:2]
        voxel_state, voxel_color = points2voxel_color_gpu(points, 30)

        return points, covars, voxel_state, voxel_color

    def batch_fn(file_data, cur_idx, data_indices, require_size):
        points, covars, voxel_state, voxel_color = file_data
        end_idx = min(cur_idx + require_size, points.shape[0])

        return [
            points[data_indices[cur_idx:end_idx], :, :],
            covars[data_indices[cur_idx:end_idx], :, :],
            voxel_state[data_indices[cur_idx:end_idx], :],
            voxel_color[data_indices[cur_idx:end_idx], :, :]
        ], end_idx - cur_idx

    train_list, _ = get_train_test_split()
    train_list = [
        'data/S3DIS/folding/block_v2/{}.h5'.format(fn) for fn in train_list
    ]

    train_provider = ProviderV2(train_list, 'train', 32, batch_fn, read_fn, 2)

    begin = time.time()
    total_begin = time.time()
    for data in train_provider:
        # for pts_i,pts in enumerate(data[0][:2]):
        #     print np.min(pts,axis=0)
        #     pts[:,3:]+=1.0
        #     pts[:,3:]*=128
        #     output_points('original{}.txt'.format(pts_i),pts)
        #
        # for vi,v in enumerate(data[2][:2]):
        #     vpts=voxel2points(v)
        #     vpts[:,:3]/=np.max(vpts[:,:3],axis=0,keepdims=True)
        #     print np.min(vpts,axis=0)
        #     output_points('voxels{}.txt'.format(vi),vpts)
        #
        # for vi,v in enumerate(data[3][:2]):
        #     vpts=voxel2points(v)
        #     vpts[:,:3]/=np.max(vpts[:,:3],axis=0,keepdims=True)
        #     print np.min(vpts,axis=0)
        #     output_points('voxels_color{}.txt'.format(vi),vpts)

        # time.sleep(0.1)
        print 'cost {} s'.format(time.time() - begin)
        begin = time.time()
        # train_provider.close()
        # exit(0)

    print 'total cost {} s'.format(time.time() - total_begin)
    train_provider.close()
    print ' exit '
    exit(0)