Exemplo n.º 1
0
def Load_Scannet(npoints=8192):
    data_root = os.path.join(DATA_DIR, 'scannet_data')
    small_affix = ''
    small_affix = '_small'
    scannet_data_test = scannet_dataset.ScannetDatasetWholeScene(
        root=data_root, npoints=npoints, split='test', small_affix=small_affix)
    scannet_data_train = scannet_dataset.ScannetDatasetWholeScene(
        root=data_root,
        npoints=npoints,
        split='train',
        small_affix=small_affix)
    return scannet_data_train, scannet_data_test
Exemplo n.º 2
0
 def load_data(self):
     assert os.path.exists(DATA_PATH), 'train_data not found !!!'
     self.train_data = scannet_dataset.ScannetDataset(root=DATA_PATH, npoints=self.point_sz, split='train')
     self.test_data = scannet_dataset.ScannetDatasetWholeScene(root=DATA_PATH, npoints=self.point_sz, split='test')
     self.train_sz = self.train_data.__len__()
     self.test_sz = self.test_data.__len__()
     print('train size %d and test size %d' % (self.train_sz, self.test_sz))
Exemplo n.º 3
0
        npoints=NUM_POINT,
        split='train',
        dataset=FLAGS.dataset,
        num_classes=FLAGS.num_classes,
        color=FLAGS.color)
    TEST_DATASET = scannet_dataset.ScannetDataset(
        root=DATA_PATH,
        npoints=NUM_POINT,
        split='test',
        dataset=FLAGS.dataset,
        num_classes=FLAGS.num_classes,
        color=FLAGS.color)
    TEST_DATASET_WHOLE_SCENE = scannet_dataset.ScannetDatasetWholeScene(
        root=DATA_PATH,
        npoints=NUM_POINT,
        split='test',
        dataset=FLAGS.dataset,
        num_classes=FLAGS.num_classes,
        color=FLAGS.color)
else:
    print('Use virtual scan data')
    TRAIN_DATASET = scannet_dataset.ScannetDatasetVirtualScan(
        root=DATA_PATH,
        npoints=NUM_POINT,
        split='train',
        dataset=FLAGS.dataset,
        num_classes=FLAGS.num_classes,
        color=FLAGS.color)
    TEST_DATASET = scannet_dataset.ScannetDatasetVirtualScan(
        root=DATA_PATH,
        npoints=NUM_POINT,
Exemplo n.º 4
0
LOG_FOUT.write(str(FLAGS)+'\n')

BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()

NUM_CLASSES = 21

# Shapenet official train/test split
DATA_PATH = os.path.join(ROOT_DIR,'data','scannet_data_pointnet2')
TRAIN_DATASET = scannet_dataset.ScannetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train')
TEST_DATASET = scannet_dataset.ScannetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test')
TEST_DATASET_WHOLE_SCENE = scannet_dataset.ScannetDatasetWholeScene(root=DATA_PATH, npoints=NUM_POINT, split='test')


def log_string(out_str):
    LOG_FOUT.write(out_str+'\n')
    LOG_FOUT.flush()
    print(out_str)

def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(
                        BASE_LEARNING_RATE,  # Base learning rate.
                        batch * BATCH_SIZE,  # Current index into the dataset.
                        DECAY_STEP,          # Decay step.
                        DECAY_RATE,          # Decay rate.
                        staircase=True)
    learing_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
Exemplo n.º 5
0
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()
feature_channel = 3 if FLAGS.with_rgb else 0
NUM_CLASSES = 21

# Scannet official train/test split
DATA_PATH = FLAGS.data
print("Start loading training data ...")
TRAIN_DATASET = scannet_dataset.ScannetDataset(root=DATA_PATH, block_points=NUM_POINT, split='train', with_rgb=FLAGS.with_rgb)
print("Start loading validation data ...")
TEST_DATASET = scannet_dataset.ScannetDataset(root=DATA_PATH, block_points=NUM_POINT, split='val', with_rgb=FLAGS.with_rgb)
print("Start loading whole scene validation data ...")
TEST_DATASET_WHOLE_SCENE = scannet_dataset.ScannetDatasetWholeScene(root=DATA_PATH, block_points=NUM_POINT, split='val', with_rgb=FLAGS.with_rgb)

def log_string(out_str):
    LOG_FOUT.write(out_str + '\n')
    LOG_FOUT.flush()
    print(out_str)

def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(
        BASE_LEARNING_RATE,  # Base learning rate.
        batch * BATCH_SIZE,  # Current index into the dataset.
        DECAY_STEP,  # Decay step.
        DECAY_RATE,  # Decay rate.
        staircase=True)
    learning_rate = tf.maximum(learning_rate, 0.00001)  # CLIP THE LEARNING RATE!
    return learning_rate
Exemplo n.º 6
0
    help='Directory that stores all training logs and trained models')
parser.add_argument('--point_num',
                    type=int,
                    default=4096,
                    help='num of points')

FLAGS = parser.parse_args()

# DEFAULT SETTINGS
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
PRETRAINED_MODEL_PATH = os.path.join(FLAGS.restore_dir, 'trained_models/')

# dataloader
DATA_ROOT = '/mnt/raid/ji/SGPN/data/scannet_data/annotation'
TEST_DATASET = scannet_dataset.ScannetDatasetWholeScene(
    root=DATA_ROOT,
    npoints=FLAGS.point_num,
    split='/mnt/raid/ji/SGPN/data/scannet_data/meta/scannet_test.txt')

RESTORE_DIR = FLAGS.restore_dir
gpu_to_use = 0
OUTPUT_DIR = os.path.join(FLAGS.restore_dir, 'test_results')
if not os.path.exists(OUTPUT_DIR):
    os.mkdir(OUTPUT_DIR)

output_verbose = FLAGS.verbose  # If true, output all color-coded segmentation obj files

label_bin = np.loadtxt(os.path.join(RESTORE_DIR, 'pergroup_thres.txt'))
min_num_pts_in_group = np.loadtxt(os.path.join(RESTORE_DIR,
                                               'mingroupsize.txt'))

# MAIN SCRIPT
Exemplo n.º 7
0
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()

NUM_CLASSES = 21

# Shapenet official train/test split
DATA_PATH = os.path.join(ROOT_DIR, 'data', 'scannet_data_pointnet2')
TRAIN_DATASET = scannet_dataset_multi.ScannetDataset(root=DATA_PATH,
                                                     npoints=NUM_POINT,
                                                     split='train')
TEST_DATASET_WHOLE_SCENE = []
for i in range(len(NUM_POINT)):
    TEST_DATASET_WHOLE_SCENE.append(
        scannet_dataset.ScannetDatasetWholeScene(root=DATA_PATH,
                                                 npoints=NUM_POINT[i],
                                                 split='test'))
SUNCG_DATASET = suncg_dataset_multi.SuncgDataset(root=DATA_PATH,
                                                 npoints=NUM_POINT,
                                                 split='train',
                                                 batch_size=BATCH_SIZE // 2)

DATA_QUEUE = Queue(maxsize=100)
DATA_QUEUE_SUN = Queue(maxsize=100)


def data_producer_suncg():
    while True:
        if DATA_QUEUE_SUN.qsize() < 100:
            data = SUNCG_DATASET.get_next()
            DATA_QUEUE_SUN.put(data)
Exemplo n.º 8
0
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()

# Shapenet official train/test split
# DATA_PATH = os.path.join(ROOT_DIR,'data','scannet_data_pointnet2')
DATA_PATH = '/home/markus/thesis/data'

TRAIN_DATASET = scannet_dataset.ScannetDataset(root=DATA_PATH, data_base_name=DATA_BASE_NAME, npoints=NUM_POINT, voxel_size=VOX_SIZE, split='train')

VAL_DATASET = scannet_dataset.ScannetDataset(root=DATA_PATH, npoints=NUM_POINT, data_base_name=DATA_BASE_NAME, voxel_size=VOX_SIZE, split='val')
VAL_DATASET_WHOLE_SCENE = scannet_dataset.ScannetDatasetWholeScene(root=DATA_PATH, npoints=NUM_POINT, data_base_name=DATA_BASE_NAME, voxel_size=VOX_SIZE, split='val')

assert TRAIN_DATASET.nr_classes == VAL_DATASET.nr_classes, 'Number of classes detected is different between training and testing set'
assert TRAIN_DATASET.nr_classes == VAL_DATASET_WHOLE_SCENE.nr_classes, 'Number of classes detected is different between training and whole scene testing set'

# Use hardcoded number of classes if previous assertions find something wrong
# NUM_CLASSES = 19
NUM_CLASSES = TRAIN_DATASET.nr_classes

def log_string(out_str):
    LOG_FOUT.write(out_str+'\n')
    LOG_FOUT.flush()
    print(out_str)

def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(