Ejemplo n.º 1
0
    def __init__(self, mode, test_area_idx):
        self.name = 'S3DIS'
        self.path = 'data/S3DIS'
        self.label_to_names = {
            0: 'ceiling',
            1: 'floor',
            2: 'wall',
            3: 'beam',
            4: 'column',
            5: 'window',
            6: 'door',
            7: 'table',
            8: 'chair',
            9: 'sofa',
            10: 'bookcase',
            11: 'board',
            12: 'clutter'
        }
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.array([])

        self.val_split = 'Area_' + str(test_area_idx)
        self.all_files = glob.glob(join(self.path, 'original_ply', '*.ply'))

        # Initiate containers
        self.val_proj = []
        self.val_labels = []
        self.possibility = {}
        self.min_possibility = {}
        self.input_trees = {'training': [], 'validation': []}
        self.input_colors = {'training': [], 'validation': []}
        self.input_labels = {'training': [], 'validation': []}
        self.input_names = {'training': [], 'validation': []}
        self.load_sub_sampled_clouds(cfg.sub_grid_size)

        # ES: ignored_label_inds, class_weights, and init lines of `get_batch_gen'.
        self.mode = mode

        self.possibility[self.mode] = []
        self.min_possibility[self.mode] = []
        # Random initialize
        for i, tree in enumerate(self.input_colors[self.mode]):
            self.possibility[self.mode] += [
                np.random.rand(tree.data.shape[0]) * 1e-3
            ]
            self.min_possibility[self.mode] += [
                float(np.min(self.possibility[self.mode][-1]))
            ]

        cfg.ignored_label_inds = [
            self.label_to_idx[ign_label] for ign_label in self.ignored_labels
        ]
        cfg.class_weights = DP.get_class_weights('S3DIS')
Ejemplo n.º 2
0
    def __init__(self, mode, test_id=None):
        self.name = 'SemanticKITTI'
        self.dataset_path = '/data/WQ/DataSet/semantic-kitti/dataset/sequences_0.06'
        self.label_to_names = {0: 'unlabeled',
                               1: 'car',
                               2: 'bicycle',
                               3: 'motorcycle',
                               4: 'truck',
                               5: 'other-vehicle',
                               6: 'person',
                               7: 'bicyclist',
                               8: 'motorcyclist',
                               9: 'road',
                               10: 'parking',
                               11: 'sidewalk',
                               12: 'other-ground',
                               13: 'building',
                               14: 'fence',
                               15: 'vegetation',
                               16: 'trunk',
                               17: 'terrain',
                               18: 'pole',
                               19: 'traffic-sign'}
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort([k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.sort([0])

        self.seq_list = np.sort(os.listdir(self.dataset_path))

        if mode == 'test':
            self.test_scan_number = str(test_id)

        self.mode = mode
        train_list, val_list, test_list = DP.get_file_list(self.dataset_path, str(test_id))
        if mode == 'training':
            self.data_list = train_list
        elif mode == 'validation':
            self.data_list = val_list
        elif mode == 'test':
            self.data_list = test_list

        # self.data_list = self.data_list[0:1]
        self.data_list = DP.shuffle_list(self.data_list)

        self.possibility = []
        self.min_possibility = []
        if mode == 'test':
            path_list = self.data_list
            for test_file_name in path_list:
                points = np.load(test_file_name)
                self.possibility += [np.random.rand(points.shape[0]) * 1e-3]
                self.min_possibility += [float(np.min(self.possibility[-1]))]

        cfg.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]
        cfg.class_weights = DP.get_class_weights('SemanticKITTI')
Ejemplo n.º 3
0
    def __init__(self, config, dataset_name='SemanticKITTI'):
        super().__init__()
        self.config = config
        self.class_weights = DP.get_class_weights(dataset_name)

        self.fc0 = pt_utils.Conv1d(3, 8, kernel_size=1, bn=True)

        self.dilated_res_blocks = nn.ModuleList()
        d_in = 8
        for i in range(self.config.num_layers):
            d_out = self.config.d_out[i]
            self.dilated_res_blocks.append(Dilated_res_block(d_in, d_out))
            d_in = 2 * d_out

        d_out = d_in
        self.decoder_0 = pt_utils.Conv2d(d_in,
                                         d_out,
                                         kernel_size=(1, 1),
                                         bn=True)

        self.decoder_blocks = nn.ModuleList()
        for j in range(self.config.num_layers):
            if j < 3:
                d_in = d_out + 2 * self.config.d_out[-j - 2]
                d_out = 2 * self.config.d_out[-j - 2]
            else:
                d_in = 4 * self.config.d_out[-4]
                d_out = 2 * self.config.d_out[-4]
            self.decoder_blocks.append(
                pt_utils.Conv2d(d_in, d_out, kernel_size=(1, 1), bn=True))

        self.fc1 = pt_utils.Conv2d(d_out, 64, kernel_size=(1, 1), bn=True)
        self.fc2 = pt_utils.Conv2d(64, 32, kernel_size=(1, 1), bn=True)
        self.dropout = nn.Dropout(0.5)
        self.fc3 = pt_utils.Conv2d(32,
                                   self.config.num_classes,
                                   kernel_size=(1, 1),
                                   bn=False,
                                   activation=None)
Ejemplo n.º 4
0
    def __init__(self, mode):
        self.name = 'raildata_RandLA'
        self.dataset_path = '/home/hwq/dataset/rail_randla_0.06'
        self.label_to_names = {0: 'unlabeled', 1: 'rail', 2: 'pole'}
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])  # [0,1,2]
        self.label_to_idx = {l: i
                             for i, l in enumerate(self.label_values)
                             }  # dict {0:0,1:1,2:2}
        self.ignored_labels = np.sort([0])
        self.mode = mode

        fns = sorted(os.listdir(join(self.dataset_path, 'velodyne')))
        train_index = np.load('./utils/rail_index/trainindex.npy')
        test_index = np.load('./utils/rail_index/testindex.npy')

        alldatapath = []
        for fn in fns:
            alldatapath.append(os.path.join(self.dataset_path, fn))
        # print(alldatapath,train_index)

        self.data_list = []
        if mode == 'training':
            for index in train_index:
                self.data_list.append(alldatapath[index])
        elif mode == 'validation':
            for index in test_index:
                self.data_list.append(alldatapath[index])
        elif mode == 'test':
            for index in test_index:
                self.data_list.append(alldatapath[index])
        self.data_list = np.asarray(self.data_list)
        self.data_list = DP.shuffle_list(self.data_list)
        cfg.ignored_label_inds = [
            self.label_to_idx[ign_label] for ign_label in self.ignored_labels
        ]
        cfg.class_weights = DP.get_class_weights('Rail')
Ejemplo n.º 5
0
def train(args, io):
    train_loader = DataLoader(S3DIS(5, args.num_points, partition='train'),
                              num_workers=2,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(S3DIS(5, args.num_points, partition='val'),
                             num_workers=2,
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)
    # train_data = S3DIS(5, args.num_points, partition='train')
    # test_data = S3DIS(5, args.num_points, partition='val')

    device = torch.device("cuda" if args.cuda else "cpu")

    class_weights = torch.from_numpy(DP.get_class_weights('S3DIS')).to(device)
    class_weights = class_weights.float()
    model = Seg(args).to(device)
    print(str(model))
    model = nn.DataParallel(model)
    print('num_points:%s, batch_size:%s, %s' %
          (args.num_points, args.batch_size, args.test_batch_size))

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr * 100,
                        momentum=args.momentum,
                        weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)

    criterion = seg_loss
    best_test_acc = 0

    for epoch in range(args.epochs):
        scheduler.step()
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        idx = 0
        total_time = 0.0
        # logits_ = []
        # label_ = []
        # for i in range(train_data.__len__()):
        #     data, label = train_data.__getitem__(i)
        #     data, label = torch.from_numpy(data).to(device), torch.from_numpy(label).to(device).squeeze()
        #     data, label = torch.unsqueeze(data, 0), torch.unsqueeze(label, 0)
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = args.batch_size

            start_time = time.time()
            logits = model(data)
            end_time = time.time()
            total_time += (end_time - start_time)
            # logits_.append(logits)
            # label_.append(label)

            # if len(logits_) < batch_size:
            #     continue

            opt.zero_grad()
            # logits, label = torch.cat(logits_, dim=-1), torch.cat(label_, dim=-1)
            loss = get_loss(logits, label, class_weights)
            loss.backward()
            # logits_.clear()
            # label_.clear()
            opt.step()

            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true += label.cpu().numpy().tolist()[0]
            train_pred += preds.detach().cpu().numpy().tolist()[0]

        print('train total time is', total_time)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
            epoch, train_loss * 1.0 / count,
            metrics.accuracy_score(train_true, train_pred),
            metrics.balanced_accuracy_score(train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        if epoch % 5 == 0:
            test_loss = 0.0
            count = 0.0
            model.eval()
            test_pred = []
            test_true = []
            total_time = 0.0
            idx = 0
            # logits_ = []
            # label_ = []
            # for i in range(test_data.__len__()):
            #     data, label = test_data.__getitem__(i)
            #     data, label = torch.from_numpy(data).to(device), torch.from_numpy(label).to(device).squeeze()
            #     data, label = torch.unsqueeze(data, 0), torch.unsqueeze(label, 0)
            for data, label in test_loader:
                data, label = data.to(device), label.to(device).squeeze()
                data = data.permute(0, 2, 1)
                batch_size = args.test_batch_size

                start_time = time.time()
                logits = model(data)
                end_time = time.time()
                total_time += (end_time - start_time)
                # logits_.append(logits)
                # label_.append(label)

                # if len(logits_) < batch_size:
                #     continue

                # logits, label = torch.cat(logits_, dim=-1), torch.cat(label_, dim=-1)
                loss = get_loss(logits, label, class_weights)
                # logits_.clear()
                # label_.clear()

                preds = logits.max(dim=1)[1]
                count += batch_size
                test_loss += loss.item() * batch_size
                test_true += label.cpu().numpy().tolist()[0]
                test_pred += preds.detach().cpu().numpy().tolist()[0]

            print('test total time is', total_time)
            test_acc = metrics.accuracy_score(test_true, test_pred)
            avg_per_class_acc = metrics.balanced_accuracy_score(
                test_true, test_pred)
            outstr = '*** Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
                epoch, test_loss * 1.0 / count, test_acc, avg_per_class_acc)
            io.cprint(outstr)
            if test_acc >= best_test_acc:
                best_test_acc = test_acc
                print('save new best model acc: %s' % best_test_acc)
                torch.save(model.state_dict(),
                           'checkpoints/%s/models/model.t7' % args.exp_name)
    def __init__(self, dataset, config):
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                self.saving_path = time.strftime(
                    'results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None

        with tf.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers
            self.inputs['xyz'] = flat_inputs[:num_layers]
            self.inputs['neigh_idx'] = flat_inputs[num_layers:2 * num_layers]
            self.inputs['sub_idx'] = flat_inputs[2 * num_layers:3 * num_layers]
            self.inputs['interp_idx'] = flat_inputs[3 * num_layers:4 *
                                                    num_layers]
            self.inputs['features'] = flat_inputs[4 * num_layers]
            self.inputs['labels'] = flat_inputs[4 * num_layers + 1]
            self.inputs['input_inds'] = flat_inputs[4 * num_layers + 2]
            self.inputs['cloud_inds'] = flat_inputs[4 * num_layers + 3]

            K_points_numpy = np.array(fibonacci_sphere(self.config.k_n - 1))
            K_points_numpy = np.concatenate((np.array(
                (0, 0, 0))[None, :], K_points_numpy),
                                            axis=0)
            self.inputs['K_points'] = tf.Variable(K_points_numpy.astype(
                np.float32),
                                                  name='kernel_points',
                                                  trainable=False,
                                                  dtype=tf.float32)

            self.labels = self.inputs['labels']
            self.is_training = tf.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) + '.txt',
                'a')

        with tf.variable_scope('layers'):
            self.logits = self.inference(self.inputs, self.is_training)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.variable_scope('loss'):
            self.logits = tf.reshape(self.logits, [-1, config.num_classes])
            self.labels = tf.reshape(self.labels, [-1])

            # Boolean mask of points that should be ignored
            ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)
            for ign_label in self.config.ignored_label_inds:
                ignored_bool = tf.logical_or(ignored_bool,
                                             tf.equal(self.labels, ign_label))

            # Collect logits and labels that are not ignored
            valid_idx = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))
            valid_logits = tf.gather(self.logits, valid_idx, axis=0)
            valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)

            # Reduce label values in the range of logit shape
            reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
            inserted_value = tf.zeros((1, ), dtype=tf.int32)
            for ign_label in self.config.ignored_label_inds:
                reducing_list = tf.concat([
                    reducing_list[:ign_label], inserted_value,
                    reducing_list[ign_label:]
                ], 0)
            valid_labels = tf.gather(reducing_list, valid_labels_init)

            self.loss = self.get_loss(valid_logits, valid_labels,
                                      self.class_weights)

        with tf.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)
            self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.variable_scope('results'):
            self.correct_prediction = tf.nn.in_top_k(valid_logits,
                                                     valid_labels, 1)
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)

            tf.summary.scalar('learning_rate', self.learning_rate)
            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.saver = tf.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        c_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
        c_proto.allow_soft_placement = True
        self.sess = tf.Session(config=c_proto)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter(config.train_sum_dir,
                                                  self.sess.graph)
        self.sess.run(tf.global_variables_initializer())
Ejemplo n.º 7
0
    def __init__(self, dataset, config):
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                self.saving_path = time.strftime(
                    'results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None

        with tf.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers
            self.inputs['features'] = flat_inputs[0]
            self.inputs['labels'] = flat_inputs[1]
            self.inputs['input_inds'] = flat_inputs[2]
            self.inputs['cloud_inds'] = flat_inputs[3]

            self.labels = self.inputs['labels']
            self.is_training = tf.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.time_stamp = time.strftime('_%Y-%m-%d_%H-%M-%S',
                                            time.gmtime())
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) +
                self.time_stamp + '.txt', 'a')

        with tf.variable_scope('layers'):
            self.logits, self.new_xyz, self.xyz = self.inference(
                self.inputs, self.is_training)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.variable_scope('loss'):
            self.logits = tf.reshape(self.logits, [-1, config.num_classes])
            self.labels = tf.reshape(self.labels, [-1])

            # Boolean mask of points that should be ignored
            ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)
            for ign_label in self.config.ignored_label_inds:
                ignored_bool = tf.logical_or(ignored_bool,
                                             tf.equal(self.labels, ign_label))

            # Collect logits and labels that are not ignored
            valid_idx = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))
            valid_logits = tf.gather(self.logits, valid_idx, axis=0)
            valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)

            # Reduce label values in the range of logit shape
            reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
            inserted_value = tf.zeros((1, ), dtype=tf.int32)
            for ign_label in self.config.ignored_label_inds:
                reducing_list = tf.concat([
                    reducing_list[:ign_label], inserted_value,
                    reducing_list[ign_label:]
                ], 0)
            valid_labels = tf.gather(reducing_list, valid_labels_init)

            aug_loss_weights = tf.constant([0.1, 0.1, 0.3, 0.5, 0.5])
            aug_loss = 0
            for i in range(self.config.num_layers):
                centroids = tf.reduce_mean(self.new_xyz[i], axis=2)
                relative_dis = tf.sqrt(
                    tf.reduce_sum(tf.square(centroids -
                                            self.xyz[i]), axis=-1) + 1e-12)
                aug_loss = aug_loss + aug_loss_weights[i] * tf.reduce_mean(
                    tf.reduce_mean(relative_dis, axis=-1), axis=-1)

            self.loss = self.get_loss(valid_logits, valid_labels,
                                      self.class_weights) + aug_loss

        with tf.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)
            self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.variable_scope('results'):
            self.correct_prediction = tf.nn.in_top_k(valid_logits,
                                                     valid_labels, 1)
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)

            tf.summary.scalar('learning_rate', self.learning_rate)
            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.saver = tf.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        self.sess = tf.Session(config=c_proto)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter(config.train_sum_dir,
                                                  self.sess.graph)
        self.sess.run(tf.global_variables_initializer())
Ejemplo n.º 8
0
    def __init__(self, dataset, config):
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                self.saving_path = time.strftime(
                    'results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None
        # use inputs(a dict) variable to map the flat_inputs
        with tf.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers
            self.inputs[
                'xyz'] = flat_inputs[:
                                     num_layers]  # xyz(points) of sub_pc at all the sub_sampling stages, containing num_layers items
            self.inputs['neigh_idx'] = flat_inputs[
                num_layers:2 *
                num_layers]  # neighbour id, containing num_layers items
            self.inputs['sub_idx'] = flat_inputs[
                2 * num_layers:3 *
                num_layers]  # sub_sampled idx, containing num_layers items
            self.inputs['interp_idx'] = flat_inputs[
                3 * num_layers:4 *
                num_layers]  # interpolation idx (nearest idx in the sub_pc for all raw pts), containing num_layers items
            self.inputs['features'] = flat_inputs[
                4 *
                num_layers]  # features containing xyz and feature, (B,N,3+C)
            self.inputs['labels'] = flat_inputs[4 * num_layers + 1]
            self.inputs['input_inds'] = flat_inputs[
                4 * num_layers +
                2]  # input_inds for each batch 's point in the sub_pc
            self.inputs['cloud_inds'] = flat_inputs[
                4 * num_layers + 3]  # cloud_inds for each batch

            self.labels = self.inputs['labels']
            self.is_training = tf.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) + '.txt',
                'a')

        with tf.variable_scope('layers'):
            self.logits = self.inference(self.inputs, self.is_training)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.variable_scope('loss'):
            self.logits = tf.reshape(self.logits, [-1, config.num_classes])
            self.labels = tf.reshape(self.labels, [-1])

            # Boolean mask of points that should be ignored
            ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)  # (B,N)
            for ign_label in self.config.ignored_label_inds:  # e.g., ignore 12, [12]
                ignored_bool = tf.logical_or(
                    ignored_bool, tf.equal(self.labels,
                                           ign_label))  # bool tensor, (B,N)

            # Collect logits and labels that are not ignored
            valid_idx = tf.squeeze(tf.where(tf.logical_not(ignored_bool)))
            valid_logits = tf.gather(self.logits, valid_idx, axis=0)
            valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)

            # Reduce label values in the range of logit shape
            reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
            inserted_value = tf.zeros((1, ), dtype=tf.int32)
            for ign_label in self.config.ignored_label_inds:
                reducing_list = tf.concat([
                    reducing_list[:ign_label], inserted_value,
                    reducing_list[ign_label:]
                ], 0)
            valid_labels = tf.gather(reducing_list, valid_labels_init)

            self.loss = self.get_loss(valid_logits, valid_labels,
                                      self.class_weights)

        with tf.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)
            self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.variable_scope('results'):
            self.correct_prediction = tf.nn.in_top_k(valid_logits,
                                                     valid_labels, 1)
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)

            tf.summary.scalar('learning_rate', self.learning_rate)
            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.saver = tf.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        self.sess = tf.Session(config=c_proto)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter(config.train_sum_dir,
                                                  self.sess.graph)
        self.sess.run(tf.global_variables_initializer())
Ejemplo n.º 9
0
    def __init__(self, dataset, config):

        # obtain the dataset iterator's next element under the hood
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                self.saving_path = time.strftime(
                    '{}/Log_%Y-%m-%d_%H-%M-%S'.format(config.results_dir),
                    time.gmtime())
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None

        # use inputs(a dict) variable to map the flat_inputs
        with tf.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers

            # correspond to the flat_inputs defined in get_tf_mapping2() in main_S3DIS_SQN.py
            # HACK: for encoder, it needs the original points, so add it to the first element of this array.
            self.inputs['original_xyz'] = flat_inputs[
                4 *
                num_layers]  # features containing xyz and feature, (B,N,3+C)
            self.inputs['xyz'] = (
                self.inputs['original_xyz'],
            ) + flat_inputs[:
                            num_layers]  # xyz_original plus xyz(points) of sub_pc at all the sub_sampling stages, containing num_layers items
            self.inputs['neigh_idx'] = flat_inputs[
                num_layers:2 *
                num_layers]  # neighbour id, containing num_layers items
            self.inputs['sub_idx'] = flat_inputs[
                2 * num_layers:3 *
                num_layers]  # sub_sampled idx, containing num_layers items
            self.inputs['interp_idx'] = flat_inputs[
                3 * num_layers:4 *
                num_layers]  # interpolation idx (nearest idx in the sub_pc for all raw pts), containing num_layers items
            self.inputs['features'] = flat_inputs[
                4 * num_layers +
                1]  # features containing xyz and feature, (B,N,3+C)
            self.inputs['labels'] = flat_inputs[4 * num_layers + 2]
            self.inputs['weak_label_masks'] = flat_inputs[4 * num_layers + 3]
            self.inputs['input_inds'] = flat_inputs[
                4 * num_layers +
                4]  # input_inds for each batch 's point in the sub_pc
            self.inputs['cloud_inds'] = flat_inputs[
                4 * num_layers + 5]  # cloud_inds for each batch

            self.points = self.inputs['original_xyz']  # (B,N,3)
            self.labels = self.inputs['labels']  # (B,N)
            self.weak_label_masks = self.inputs[
                'weak_label_masks']  # weak label mask for weakly semseg, (B,N)
            self.is_training = tf.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) + '.txt',
                'a')

        with tf.variable_scope('layers'):
            self.logits, self.weak_labels = self.inference(
                self.inputs, self.is_training)  # (n, num_classes), (n,)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.variable_scope('loss'):
            self.logits = tf.reshape(
                self.logits, [-1, config.num_classes])  # (n, num_classes)
            self.weak_labels = tf.reshape(self.weak_labels, [-1])  # (n,)
            # TODO: which to use, WCE, CE or smooth label
            self.loss = self.get_loss_Sqn(self.logits, self.weak_labels)

        with tf.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)
            self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        with tf.variable_scope('results'):
            # self.correct_prediction = tf.nn.in_top_k(valid_logits, valid_labels, 1)
            self.correct_prediction = tf.nn.in_top_k(self.logits,
                                                     self.weak_labels, 1)
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)  # (n,C)

            tf.summary.scalar('learning_rate', self.learning_rate)
            tf.summary.scalar('loss', self.loss)
            tf.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.saver = tf.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        self.sess = tf.Session(config=c_proto)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter(config.train_sum_dir,
                                                  self.sess.graph)
        self.sess.run(tf.global_variables_initializer())
Ejemplo n.º 10
0
    def __init__(self, dataset, config):
        flat_inputs = dataset.flat_inputs
        self.config = config
        # Path of the result folder
        if self.config.saving:
            if self.config.saving_path is None:
                # self.saving_path = time.strftime('results/Log_test_20_{}'.format(self.config.test_area))
                self.saving_path = time.strftime('results/Log_SemanticKITTI_3')
            else:
                self.saving_path = self.config.saving_path
            makedirs(
                self.saving_path) if not exists(self.saving_path) else None

        with tf.compat.v1.variable_scope('inputs'):
            self.inputs = dict()
            num_layers = self.config.num_layers
            self.inputs['xyz'] = flat_inputs[:num_layers]
            self.inputs['neigh_idx'] = flat_inputs[num_layers:2 * num_layers]
            self.inputs['sub_idx'] = flat_inputs[2 * num_layers:3 * num_layers]
            self.inputs['interp_idx'] = flat_inputs[3 * num_layers:4 *
                                                    num_layers]
            self.inputs['features'] = flat_inputs[4 * num_layers]
            self.inputs['labels'] = flat_inputs[4 * num_layers + 1]
            self.inputs['input_inds'] = flat_inputs[4 * num_layers + 2]
            self.inputs['cloud_inds'] = flat_inputs[4 * num_layers + 3]

            K_points_numpy = np.array(fibonacci_sphere(self.config.k_n))
            K_padding = np.zeros((self.config.k_n, self.config.k_n))
            K_padding[0, 0] = 1.
            K_padding = K_padding[None, None, :, :]
            self.inputs['K_points'] = tf.Variable(K_points_numpy.astype(
                np.float32),
                                                  name='kernel_points',
                                                  trainable=False,
                                                  dtype=tf.float32)
            self.inputs['K_padding'] = tf.Variable(K_padding.astype(
                np.float32),
                                                   name='K_padding',
                                                   trainable=False,
                                                   dtype=tf.float32)

            self.labels = self.inputs['labels']
            self.is_training = tf.compat.v1.placeholder(tf.bool, shape=())
            self.training_step = 1
            self.training_epoch = 0
            self.correct_prediction = 0
            self.accuracy = 0
            self.mIou_list = [0]
            self.class_weights = DP.get_class_weights(dataset.name)
            self.Log_file = open(
                'log_train_' + dataset.name + str(dataset.val_split) +
                '_3.txt', 'a')

        with tf.compat.v1.variable_scope('layers'):
            self.logits = self.inference(self.inputs, self.is_training)

        #####################################################################
        # Ignore the invalid point (unlabeled) when calculating the loss #
        #####################################################################
        with tf.compat.v1.variable_scope('loss'):
            self.logits = tf.reshape(self.logits, [-1, config.num_classes])
            self.labels = tf.reshape(self.labels, [-1])

            # Boolean mask of points that should be ignored
            ignored_bool = tf.zeros_like(self.labels, dtype=tf.bool)
            for ign_label in self.config.ignored_label_inds:
                ignored_bool = tf.logical_or(ignored_bool,
                                             tf.equal(self.labels, ign_label))

            # Collect logits and labels that are not ignored
            valid_idx = tf.squeeze(
                tf.compat.v1.where(tf.logical_not(ignored_bool)))
            valid_logits = tf.gather(self.logits, valid_idx, axis=0)
            valid_labels_init = tf.gather(self.labels, valid_idx, axis=0)

            # Reduce label values in the range of logit shape
            reducing_list = tf.range(self.config.num_classes, dtype=tf.int32)
            inserted_value = tf.zeros((1, ), dtype=tf.int32)
            for ign_label in self.config.ignored_label_inds:
                reducing_list = tf.concat([
                    reducing_list[:ign_label], inserted_value,
                    reducing_list[ign_label:]
                ], 0)
            valid_labels = tf.gather(reducing_list, valid_labels_init)

            self.loss = self.get_loss(valid_logits, valid_labels,
                                      self.class_weights)

        with tf.compat.v1.variable_scope('optimizer'):
            self.learning_rate = tf.Variable(config.learning_rate,
                                             trainable=False,
                                             name='learning_rate')
            self.train_op = tf.compat.v1.train.AdamOptimizer(
                self.learning_rate).minimize(
                    self.loss
                )  #AdamOptimizer(self.learning_rate).minimize(self.loss)
            # self.train_op2 = tf.compat.v1.train.MomentumOptimizer(self.learning_rate, 0.9).minimize(self.loss)

            self.extra_update_ops = tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.UPDATE_OPS)

        with tf.compat.v1.variable_scope('results'):
            self.correct_prediction = tf.nn.in_top_k(predictions=valid_logits,
                                                     targets=valid_labels,
                                                     k=1)
            self.accuracy = tf.reduce_mean(
                input_tensor=tf.cast(self.correct_prediction, tf.float32))
            self.prob_logits = tf.nn.softmax(self.logits)

            tf.compat.v1.summary.scalar('learning_rate', self.learning_rate)
            tf.compat.v1.summary.scalar('loss', self.loss)
            tf.compat.v1.summary.scalar('accuracy', self.accuracy)

        my_vars = tf.compat.v1.get_collection(
            tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
        # my_vars = [v for v in tf.compat.v1.global_variables() if "Momentum" not in v.name]
        self.saver = tf.compat.v1.train.Saver(my_vars, max_to_keep=100)
        c_proto = tf.compat.v1.ConfigProto()
        c_proto.gpu_options.allow_growth = True
        self.sess = tf.compat.v1.Session(config=c_proto)
        self.merged = tf.compat.v1.summary.merge_all()
        self.train_writer = tf.compat.v1.summary.FileWriter(
            config.train_sum_dir, self.sess.graph)
        self.sess.run(tf.compat.v1.global_variables_initializer())

        # Load trained model
        self.saving_path = "results/Log_SemanticKITTI_3"
        if exists(join(self.saving_path, "snapshots")):
            chosen_folder = self.saving_path
            snap_path = join(chosen_folder, 'snapshots')
            snap_steps = [
                int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path)
                if f[-5:] == '.meta'
            ]
            chosen_step = np.sort(snap_steps)[-1]
            chosen_snap = join(snap_path, 'snap-{:d}'.format(chosen_step))
            self.saver.restore(self.sess, chosen_snap)
            print("Model restored from " + chosen_snap)
Ejemplo n.º 11
0
    def __init__(self, mode):
        self.name = 'Semantic3D'
        self.path = 'data/semantic3d'
        self.label_to_names = {
            0: 'unlabeled',
            1: 'man-made terrain',
            2: 'natural terrain',
            3: 'high vegetation',
            4: 'low vegetation',
            5: 'buildings',
            6: 'hard scape',
            7: 'scanning artefacts',
            8: 'cars'
        }
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort(
            [k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.sort([0])

        self.original_folder = join(self.path, 'original_data')
        self.full_pc_folder = join(self.path, 'original_ply')
        self.sub_pc_folder = join(self.path,
                                  'input_{:.3f}'.format(cfg.sub_grid_size))

        # Following KPConv to do the train-validation split
        self.all_splits = [0, 1, 4, 5, 3, 4, 3, 0, 1, 2, 3, 4, 2, 0, 5]
        self.val_split = 1

        # Initial training-validation-testing files
        self.train_files = []
        self.val_files = []
        self.test_files = []
        cloud_names = [
            file_name[:-4] for file_name in os.listdir(self.original_folder)
            if file_name[-4:] == '.txt'
        ]
        for pc_name in cloud_names:
            if exists(join(self.original_folder, pc_name + '.labels')):
                self.train_files.append(
                    join(self.sub_pc_folder, pc_name + '.ply'))
            else:
                self.test_files.append(
                    join(self.full_pc_folder, pc_name + '.ply'))

        self.train_files = np.sort(self.train_files)
        self.test_files = np.sort(self.test_files)

        for i, file_path in enumerate(self.train_files):
            if self.all_splits[i] == self.val_split:
                self.val_files.append(file_path)

        self.train_files = np.sort(
            [x for x in self.train_files if x not in self.val_files])

        # Initiate containers
        self.val_proj = []
        self.val_labels = []
        self.test_proj = []
        self.test_labels = []

        self.possibility = {}
        self.min_possibility = {}
        self.class_weight = {}
        self.input_trees = {'training': [], 'validation': [], 'test': []}
        self.input_colors = {'training': [], 'validation': [], 'test': []}
        self.input_labels = {'training': [], 'validation': []}

        # Ascii files dict for testing
        self.ascii_files = {
            'MarketplaceFeldkirch_Station4_rgb_intensity-reduced.ply':
            'marketsquarefeldkirch4-reduced.labels',
            'sg27_station10_rgb_intensity-reduced.ply':
            'sg27_10-reduced.labels',
            'sg28_Station2_rgb_intensity-reduced.ply':
            'sg28_2-reduced.labels',
            'StGallenCathedral_station6_rgb_intensity-reduced.ply':
            'stgallencathedral6-reduced.labels',
            'birdfountain_station1_xyz_intensity_rgb.ply':
            'birdfountain1.labels',
            'castleblatten_station1_intensity_rgb.ply':
            'castleblatten1.labels',
            'castleblatten_station5_xyz_intensity_rgb.ply':
            'castleblatten5.labels',
            'marketplacefeldkirch_station1_intensity_rgb.ply':
            'marketsquarefeldkirch1.labels',
            'marketplacefeldkirch_station4_intensity_rgb.ply':
            'marketsquarefeldkirch4.labels',
            'marketplacefeldkirch_station7_intensity_rgb.ply':
            'marketsquarefeldkirch7.labels',
            'sg27_station10_intensity_rgb.ply':
            'sg27_10.labels',
            'sg27_station3_intensity_rgb.ply':
            'sg27_3.labels',
            'sg27_station6_intensity_rgb.ply':
            'sg27_6.labels',
            'sg27_station8_intensity_rgb.ply':
            'sg27_8.labels',
            'sg28_station2_intensity_rgb.ply':
            'sg28_2.labels',
            'sg28_station5_xyz_intensity_rgb.ply':
            'sg28_5.labels',
            'stgallencathedral_station1_intensity_rgb.ply':
            'stgallencathedral1.labels',
            'stgallencathedral_station3_intensity_rgb.ply':
            'stgallencathedral3.labels',
            'stgallencathedral_station6_intensity_rgb.ply':
            'stgallencathedral6.labels'
        }

        self.load_sub_sampled_clouds(cfg.sub_grid_size)

        # ES: init lines of `get_batch_gen' here.
        # ignore num_per_epoch and use path_list.

        self.mode = mode

        # Reset possibility
        self.possibility[self.mode] = []
        self.min_possibility[self.mode] = []
        self.class_weight[self.mode] = []

        # Random initialize
        for i, tree in enumerate(self.input_trees[self.mode]):
            self.possibility[self.mode] += [
                np.random.rand(tree.data.shape[0]) * 1e-3
            ]
            self.min_possibility[self.mode] += [
                float(np.min(self.possibility[mode][-1]))
            ]

        if self.mode != 'test':
            _, num_class_total = np.unique(np.hstack(
                self.input_labels[self.mode]),
                                           return_counts=True)
            self.class_weight[self.mode] += [
                np.squeeze([num_class_total / np.sum(num_class_total)], axis=0)
            ]

        #ES: ignroed_label_inds and class_weights
        cfg.ignored_label_inds = [
            self.label_to_idx[ign_label] for ign_label in self.ignored_labels
        ]
        cfg.class_weights = DP.get_class_weights('Semantic3D')
Ejemplo n.º 12
0
        # obtain batch indices to denote which batch is for every weakly point
        batch_inds1 = selected_idx[:,0]
        row_indices = tf.reshape(tf.range(1,tf.shape(weak_label_masks)[0]+1),[tf.shape(weak_label_masks)[0],-1]) # (B,) from 1 to B
        weak_mask_indices = row_indices * weak_label_masks # element-wise *, (B,) * (B,N) -> (B,N)
        # BUG: can not retrieve batch indices correctly, but return all batches
        batch_inds2 = weak_mask_indices[weak_mask_indices!=0,:] - 1 # (n,) batch indices for each weak pt, minus 1 due to index start zero


        is_training = tf.placeholder(tf.bool, shape=())
        training_step = 1
        training_epoch = 0
        correct_prediction = 0
        accuracy = 0
        mIou_list = [0]
        class_weights = DP.get_class_weights(dataset.name)
        Log_file = open('log_train_' + dataset.name + str(dataset.val_split) + '_Sqn.txt', 'a')


    c_proto = tf.ConfigProto()
    c_proto.gpu_options.allow_growth = True
    with tf.Session(config=c_proto) as sess:
        sess.run(tf.global_variables_initializer())
        # use session to start the dataset iterator
        sess.run(dataset.train_init_op)

        # for each batch of training examples, do sth
        while True:
            try:
                # BUG: the ouput should use different names, or it will result in an error like has invalid type <class 'numpy.ndarray'>, must be a string or Tensor.
                flat_inputs, weak_point1, weak_point2, idx, inds1, inds2= sess.run([dataset.flat_inputs, weak_points1, weak_points2, selected_idx, batch_inds1, batch_inds2])