def __init__(self, options, split, random=True):
        self.options = options
        self.random = random
        
        dataFolder = '../../Data/ScanNet/'
        
        self.scenes = []
        self.sceneImageIndices = []
        with open(dataFolder + '/ScanNet/Tasks/Benchmark/scannetv1_' + split + '.txt') as f:
            for line in f:
                scene_id = line.strip()
                scenePath = dataFolder + '/scans/' + scene_id
                if not os.path.exists(scenePath + '/' + scene_id + '.txt') or len(glob.glob(scenePath + '/annotation/segmentation/*')) == 0:
                    continue
                scene = ScanNetScene(options, scenePath, scene_id)
                self.scenes.append(scene)
                self.sceneImageIndices += [[len(self.scenes) - 1, imageIndex] for imageIndex in range(len(scene.imagePaths))]
                continue
            pass
        #np.savetxt(dataFolder + '/image_list_' + split + '.txt', imagePaths, fmt='%s')
        #imagePaths = np.loadtxt(dataFolder + '/image_list_' + split + '.txt', fmt='%s')

        print(('num images', len(self.sceneImageIndices)))

        np.random.shuffle(self.sceneImageIndices)
        
        numImages = options.numTrainingImages if split == 'train' else options.numTestingImages
        if numImages > 0:
            self.sceneImageIndices = self.sceneImageIndices[:numImages]
            pass
        return
 def __init__(self, config, split, random=True, evaluation=False):
     self.random = random
     self.dataFolder = config.dataFolder
     self.split = split
     self.eval = evaluation
     self.scenes = []
     self.loadClassMap()
     planenet_scene_ids_val = np.load('datasets/scene_ids_val.npy')
     planenet_scene_ids_val = {
         scene_id.decode('utf-8'): True
         for scene_id in planenet_scene_ids_val
     }
     with open(self.dataFolder + '/ScanNet/Tasks/Benchmark/scannetv1_' +
               split + '.txt') as f:
         for line in f:
             scene_id = line.strip()
             if split == 'test':
                 ## Remove scenes which are in PlaneNet's training set for fair comparison
                 if scene_id not in planenet_scene_ids_val:
                     continue
                 pass
             scenePath = self.dataFolder + '/scans/' + scene_id
             if not os.path.exists(scenePath + '/' + scene_id +
                                   '.txt') or not os.path.exists(
                                       scenePath +
                                       '/annotation/planes.npy'):
                 continue
             scene = ScanNetScene(config, scenePath, scene_id,
                                  self.confident_labels, self.layout_labels)
             if len(scene) < 100:
                 continue
             self.scenes.append(scene)
             continue
         pass
     self.transform = tf.Compose([
         tf.ToTensor(),
         tf.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
     ])
     if random:
         t = int(time.time() * 1000000)
         np.random.seed(((t & 0xff000000) >> 24) + ((t & 0x00ff0000) >> 8) +
                        ((t & 0x0000ff00) << 8) + ((t & 0x000000ff) << 24))
     else:
         np.random.seed(0)
         pass
     print(f'num of scenes {len(self.scenes)}')
     self.tmp_img_index = np.load('./datasets/img_id_list.npy').astype(
         np.int)
예제 #3
0
    def __init__(self,
                 options,
                 config,
                 split,
                 random=True,
                 loadNeighborImage=False,
                 load_semantics=False,
                 load_boundary=False):
        self.options = options
        self.config = config
        self.split = split
        self.random = random

        self.dataFolder = options.dataFolder

        self.scenes = []
        self.sceneImageIndices = []

        self.loadClassMap()

        planenet_scene_ids_val = np.load('datasets/scene_ids_val.npy')
        planenet_scene_ids_val = {
            scene_id.decode('utf-8'): True
            for scene_id in planenet_scene_ids_val
        }
        with open(self.dataFolder + '/ScanNet/Tasks/Benchmark/scannetv1_' +
                  split + '.txt') as f:
            for line in f:
                scene_id = line.strip()
                if split == 'test':
                    ## Remove scenes which are in PlaneNet's training set for fair comparison
                    if scene_id not in planenet_scene_ids_val:
                        continue
                    pass
                scenePath = self.dataFolder + '/scans/' + scene_id
                if not os.path.exists(scenePath + '/' + scene_id +
                                      '.txt') or not os.path.exists(
                                          scenePath +
                                          '/annotation/planes.npy'):
                    continue
                scene = ScanNetScene(options,
                                     scenePath,
                                     scene_id,
                                     self.confident_labels,
                                     self.layout_labels,
                                     load_semantics=load_semantics,
                                     load_boundary=load_boundary)
                self.scenes.append(scene)
                self.sceneImageIndices += [[
                    len(self.scenes) - 1, imageIndex
                ] for imageIndex in range(len(scene.imagePaths))]
                continue
            pass

        if random:
            t = int(time.time() * 1000000)
            np.random.seed(((t & 0xff000000) >> 24) + ((t & 0x00ff0000) >> 8) +
                           ((t & 0x0000ff00) << 8) + ((t & 0x000000ff) << 24))
        else:
            np.random.seed(0)
            pass
        np.random.shuffle(self.sceneImageIndices)

        self.invalid_indices = {}

        with open(self.dataFolder + '/invalid_indices_' + split + '.txt',
                  'r') as f:
            for line in f:
                tokens = line.split(' ')
                if len(tokens) == 3:
                    assert (int(tokens[2]) < 10000)
                    invalid_index = int(tokens[1]) * 10000 + int(tokens[2])
                    if invalid_index not in self.invalid_indices:
                        self.invalid_indices[invalid_index] = True
                        pass
                    pass
                continue
            pass

        self.sceneImageIndices = [
            [sceneIndex, imageIndex]
            for sceneIndex, imageIndex in self.sceneImageIndices
            if (sceneIndex * 10000 + imageIndex) not in self.invalid_indices
        ]

        print('num images', len(self.sceneImageIndices))

        self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
                                                      config.RPN_ANCHOR_RATIOS,
                                                      config.BACKBONE_SHAPES,
                                                      config.BACKBONE_STRIDES,
                                                      config.RPN_ANCHOR_STRIDE)

        self.loadNeighborImage = loadNeighborImage

        return
예제 #4
0
    def __init__(self, config, split, random=True, evaluation=False):
        self.random = random
        self.dataFolder = config.dataFolder
        self.split = split
        self.eval = evaluation
        self.scenes = []
        self.sceneImageIndices = []
        self.loadClassMap()
        planenet_scene_ids_val = np.load('datasets/scene_ids_val.npy')
        planenet_scene_ids_val = {
            scene_id.decode('utf-8'): True
            for scene_id in planenet_scene_ids_val
        }
        with open(self.dataFolder + '/ScanNet/Tasks/Benchmark/scannetv1_' +
                  split + '.txt') as f:
            for line in f:
                scene_id = line.strip()
                if split == 'test':
                    ## Remove scenes which are in PlaneNet's training set for fair comparison
                    if scene_id not in planenet_scene_ids_val:
                        continue
                    pass
                scenePath = self.dataFolder + '/scans/' + scene_id
                if not os.path.exists(scenePath + '/' + scene_id +
                                      '.txt') or not os.path.exists(
                                          scenePath +
                                          '/annotation/planes.npy'):
                    continue
                scene = ScanNetScene(config, scenePath, scene_id,
                                     self.confident_labels, self.layout_labels)
                self.scenes.append(scene)
                self.sceneImageIndices += [[
                    len(self.scenes) - 1, imageIndex
                ] for imageIndex in range(len(scene.imagePaths))]
                continue
            pass
        self.transform = tf.Compose([
            tf.ToTensor(),
            tf.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        if random:
            t = int(time.time() * 1000000)
            np.random.seed(((t & 0xff000000) >> 24) + ((t & 0x00ff0000) >> 8) +
                           ((t & 0x0000ff00) << 8) + ((t & 0x000000ff) << 24))
        else:
            np.random.seed(0)
            pass
        np.random.shuffle(self.sceneImageIndices)

        self.invalid_indices = {}

        with open(self.dataFolder + '/invalid_indices_' + split + '.txt',
                  'r') as f:
            for line in f:
                tokens = line.split(' ')
                if len(tokens) == 3:
                    assert (int(tokens[2]) < 10000)
                    invalid_index = int(tokens[1]) * 10000 + int(tokens[2])
                    if invalid_index not in self.invalid_indices:
                        self.invalid_indices[invalid_index] = True
                        pass
                    pass
                continue
            pass

        self.sceneImageIndices = [
            [sceneIndex, imageIndex]
            for sceneIndex, imageIndex in self.sceneImageIndices
            if (sceneIndex * 10000 + imageIndex) not in self.invalid_indices
        ]

        print('num images', len(self.sceneImageIndices))
        self.save_png = False
        self.return_prcnn = False
        self.tmp_img_index = np.load('./datasets/img_id_list.npy').astype(
            np.int)
        self.test_ids = np.loadtxt('./datasets/test_ids.txt', dtype=np.int)