Ejemplo n.º 1
0
def test_multiple(args):
    THREADS = 4
    BATCH_SIZE = args.batchsize
    USE_CUDA = True
    N_CLASSES = 50

    args.data_folder = os.path.join(args.rootdir, "test_data")

    # create the output folders
    output_folder = args.savedir + '_predictions_multi_{}'.format(args.ntree)
    category_list = [
        (category, int(label_num))
        for (category,
             label_num) in [line.split() for line in open(args.category, 'r')]
    ]
    offset = 0
    category_range = dict()
    for category, category_label_seg_max in category_list:
        category_range[category] = (offset, offset + category_label_seg_max)
        offset = offset + category_label_seg_max
        folder = os.path.join(output_folder, category)
        if not os.path.exists(folder):
            os.makedirs(folder)

    input_filelist = []
    output_filelist = []
    output_ply_filelist = []
    for category in sorted(os.listdir(args.data_folder)):
        data_category_folder = os.path.join(args.data_folder, category)
        for filename in sorted(os.listdir(data_category_folder)):
            input_filelist.append(
                os.path.join(args.data_folder, category, filename))
            output_filelist.append(
                os.path.join(output_folder, category, filename[0:-3] + 'seg'))
            output_ply_filelist.append(
                os.path.join(output_folder + '_ply', category,
                             filename[0:-3] + 'ply'))

    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    data, label, data_num, label_test, _ = data_utils.load_seg(
        args.filelist_val)  # no segmentation labels

    net = Net(input_channels=1, output_channels=N_CLASSES)
    net.load_state_dict(
        torch.load(os.path.join(args.savedir, "state_dict.pth")))
    net.cuda()
    net.eval()

    ds = PartNormalDataset(data,
                           data_num,
                           label_test,
                           net.config,
                           npoints=args.npoints)
    test_loader = torch.utils.data.DataLoader(ds,
                                              batch_size=BATCH_SIZE,
                                              shuffle=False,
                                              num_workers=THREADS,
                                              collate_fn=tree_collate)

    cm = np.zeros((N_CLASSES, N_CLASSES))
    t = tqdm(test_loader, ncols=120)
    with torch.no_grad():
        count = 0

        for shape_id in tqdm(range(len(ds)), ncols=120):

            segmentation_ = None

            batches = []

            if args.ntree <= args.batchsize:

                batch = []
                for tree_id in range(args.ntree):
                    batch.append(ds.__getitem__(shape_id))
                batches.append(batch)
            else:
                for i in range(math.ceil(args.ntree / args.batchsize)):
                    bs = min(args.batchsize, args.ntree - i * args.batchsize)
                    batch = []
                    for tree_id in range(bs):
                        batch.append(ds.__getitem__(shape_id))
                    batches.append(batch)

            for batch in batches:

                pts, features, seg, tree = tree_collate(batch)
                if USE_CUDA:
                    features = features.cuda()
                    pts = pts.cuda()
                    for l_id in range(len(tree)):
                        tree[l_id]["points"] = tree[l_id]["points"].cuda()
                        tree[l_id]["indices"] = tree[l_id]["indices"].cuda()

                outputs = net(features, pts, tree)

                for i in range(pts.size(0)):
                    pts_src = pts[i].cpu().numpy()

                    # pts_dest
                    point_num = data_num[count]
                    pts_dest = data[count]
                    pts_dest = pts_dest[:point_num]

                    object_label = label[count]
                    category = category_list[object_label][0]
                    label_start, label_end = category_range[category]

                    seg_ = outputs[i][:, label_start:label_end].cpu().numpy()
                    seg_ = nearest_correspondance(pts_src, pts_dest, seg_)

                    if segmentation_ is None:
                        segmentation_ = seg_
                    else:
                        segmentation_ += seg_

            segmentation_ = np.argmax(segmentation_, axis=1)

            # save labels
            np.savetxt(output_filelist[count], segmentation_, fmt="%i")

            if args.ply:
                data_utils.save_ply_property(pts_dest, segmentation_, 6,
                                             output_ply_filelist[count])

            count += 1
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--load_ckpt',
        '-l',
        default=
        'log/seg/shellconv_seg_shapenet_2019-08-06-14-42-34/ckpts/epoch-326',
        help='Path to a check point file for load')
    parser.add_argument('--model',
                        '-m',
                        default='shellconv',
                        help='Model to use')
    parser.add_argument('--setting',
                        '-x',
                        default='seg_shapenet',
                        help='Setting to use')
    parser.add_argument('--repeat_num',
                        '-r',
                        help='Repeat number',
                        type=int,
                        default=1)
    parser.add_argument('--save_ply',
                        '-s',
                        help='Save results as ply',
                        default=False)
    args = parser.parse_args()
    print(args)

    model = importlib.import_module(args.model)
    setting_path = os.path.join(os.path.dirname(__file__), args.model)
    sys.path.append(setting_path)
    setting = importlib.import_module(args.setting)

    sample_num = setting.sample_num

    output_folder = setting.data_folder + '_pred_shellnet_' + str(
        args.repeat_num)
    category_list = [(category, int(label_num)) for (
        category,
        label_num) in [line.split() for line in open(setting.category, 'r')]]
    offset = 0
    category_range = dict()
    for category, category_label_seg_max in category_list:
        category_range[category] = (offset, offset + category_label_seg_max)
        offset = offset + category_label_seg_max
        folder = os.path.join(output_folder, category)
        if not os.path.exists(folder):
            os.makedirs(folder)

    input_filelist = []
    output_filelist = []
    output_ply_filelist = []
    for category in sorted(os.listdir(setting.data_folder)):
        data_category_folder = os.path.join(setting.data_folder, category)
        for filename in sorted(os.listdir(data_category_folder)):
            input_filelist.append(
                os.path.join(setting.data_folder, category, filename))
            output_filelist.append(
                os.path.join(output_folder, category, filename[0:-3] + 'seg'))
            output_ply_filelist.append(
                os.path.join(output_folder + '_ply', category,
                             filename[0:-3] + 'ply'))

    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    data, label, data_num, _, _ = data_utils.load_seg(setting.filelist_val)

    batch_num = data.shape[0]
    max_point_num = data.shape[1]
    batch_size = args.repeat_num * math.ceil(data.shape[1] / sample_num)

    print('{}-{:d} testing batches.'.format(datetime.now(), batch_num))

    ######################################################################
    # Placeholders
    indices = tf.placeholder(tf.int32,
                             shape=(batch_size, None, 2),
                             name="indices")
    is_training = tf.placeholder(tf.bool, name='is_training')
    pts_fts = tf.placeholder(tf.float32,
                             shape=(None, max_point_num, setting.data_dim),
                             name='pts_fts')
    ######################################################################

    ######################################################################
    pts_fts_sampled = tf.gather_nd(pts_fts,
                                   indices=indices,
                                   name='pts_fts_sampled')
    if setting.data_dim > 3:
        points_sampled, _ = tf.split(pts_fts_sampled,
                                     [3, setting.data_dim - 3],
                                     axis=-1,
                                     name='split_points_features')
    else:
        points_sampled = pts_fts_sampled

    logits_op = model.get_model(points_sampled,
                                is_training,
                                setting.sconv_params,
                                setting.sdconv_params,
                                setting.fc_params,
                                sampling=setting.sampling,
                                weight_decay=setting.weight_decay,
                                bn_decay=None,
                                part_num=setting.num_class)

    probs_op = tf.nn.softmax(logits_op, name='probs')

    saver = tf.train.Saver()

    parameter_num = np.sum(
        [np.prod(v.shape.as_list()) for v in tf.trainable_variables()])
    print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num))

    with tf.Session() as sess:
        # Load the model
        saver.restore(sess, args.load_ckpt)
        print('{}-Checkpoint loaded from {}!'.format(datetime.now(),
                                                     args.load_ckpt))

        indices_batch_indices = np.tile(
            np.reshape(np.arange(batch_size), (batch_size, 1, 1)),
            (1, sample_num, 1))
        for batch_idx in range(batch_num):
            points_batch = data[[batch_idx] * batch_size, ...]
            object_label = label[batch_idx]
            point_num = data_num[batch_idx]
            category = category_list[object_label][0]
            label_start, label_end = category_range[category]

            tile_num = math.ceil((sample_num * batch_size) / point_num)
            indices_shuffle = np.tile(np.arange(point_num),
                                      tile_num)[0:sample_num * batch_size]
            np.random.shuffle(indices_shuffle)
            indices_batch_shuffle = np.reshape(indices_shuffle,
                                               (batch_size, sample_num, 1))
            indices_batch = np.concatenate(
                (indices_batch_indices, indices_batch_shuffle), axis=2)

            probs = sess.run(
                [probs_op],
                feed_dict={
                    pts_fts: points_batch,
                    indices: indices_batch,
                    is_training: False,
                })
            probs_2d = np.reshape(probs, (sample_num * batch_size, -1))
            predictions = [(-1, 0.0)] * point_num
            for idx in range(sample_num * batch_size):
                point_idx = indices_shuffle[idx]
                probs = probs_2d[idx, label_start:label_end]
                confidence = np.amax(probs)
                seg_idx = np.argmax(probs)
                if confidence > predictions[point_idx][1]:
                    predictions[point_idx] = (seg_idx, confidence)

            labels = []
            with open(output_filelist[batch_idx], 'w') as file_seg:
                for seg_idx, _ in predictions:
                    file_seg.write('%d\n' % (seg_idx))
                    labels.append(seg_idx)

            # read the coordinates from the txt file for verification
            coordinates = [[float(value) for value in xyz.split(' ')]
                           for xyz in open(input_filelist[batch_idx], 'r')
                           if len(xyz.split(' ')) == 3]
            assert (point_num == len(coordinates))
            if args.save_ply:
                data_utils.save_ply_property(np.array(coordinates),
                                             np.array(labels), 6,
                                             output_ply_filelist[batch_idx])

            print('{}-[Testing]-Iter: {:06d} saved to {}'.format(
                datetime.now(), batch_idx, output_filelist[batch_idx]))
            sys.stdout.flush()
            ######################################################################
        print('{}-Done!'.format(datetime.now()))
Ejemplo n.º 3
0
def test(args):
    THREADS = 4
    BATCH_SIZE = args.batchsize
    USE_CUDA = True
    N_CLASSES = 50

    args.data_folder = os.path.join(args.rootdir, "test_data")

    # create the output folders
    output_folder = args.savedir + '_predictions'
    category_list = [
        (category, int(label_num))
        for (category,
             label_num) in [line.split() for line in open(args.category, 'r')]
    ]
    offset = 0
    category_range = dict()
    for category, category_label_seg_max in category_list:
        category_range[category] = (offset, offset + category_label_seg_max)
        offset = offset + category_label_seg_max
        folder = os.path.join(output_folder, category)
        if not os.path.exists(folder):
            os.makedirs(folder)

    input_filelist = []
    output_filelist = []
    output_ply_filelist = []
    for category in sorted(os.listdir(args.data_folder)):
        data_category_folder = os.path.join(args.data_folder, category)
        for filename in sorted(os.listdir(data_category_folder)):
            input_filelist.append(
                os.path.join(args.data_folder, category, filename))
            output_filelist.append(
                os.path.join(output_folder, category, filename[0:-3] + 'seg'))
            output_ply_filelist.append(
                os.path.join(output_folder + '_ply', category,
                             filename[0:-3] + 'ply'))

    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    data, label, data_num, label_test, _ = data_utils.load_seg(
        args.filelist_val)  # no segmentation labels

    net = Net(input_channels=1, output_channels=N_CLASSES)
    net.load_state_dict(
        torch.load(os.path.join(args.savedir, "state_dict.pth")))
    net.cuda()
    net.eval()

    ds = PartNormalDataset(data,
                           data_num,
                           label_test,
                           net.config,
                           npoints=args.npoints)
    test_loader = torch.utils.data.DataLoader(ds,
                                              batch_size=BATCH_SIZE,
                                              shuffle=False,
                                              num_workers=THREADS,
                                              collate_fn=tree_collate)

    cm = np.zeros((N_CLASSES, N_CLASSES))
    t = tqdm(test_loader, ncols=120)
    with torch.no_grad():
        count = 0
        for pts, features, seg, tree in t:

            if USE_CUDA:
                features = features.cuda()
                pts = pts.cuda()
                for l_id in range(len(tree)):
                    tree[l_id]["points"] = tree[l_id]["points"].cuda()
                    tree[l_id]["indices"] = tree[l_id]["indices"].cuda()

            outputs = net(features, pts, tree)

            # save results
            for i in range(pts.size(0)):
                # pts_src
                pts_src = pts[i].cpu().numpy()

                # pts_dest
                point_num = data_num[count + i]
                pts_dest = data[count + i]
                pts_dest = pts_dest[:point_num]

                object_label = label[count + i]
                category = category_list[object_label][0]
                label_start, label_end = category_range[category]

                seg_ = outputs[i][:, label_start:label_end].cpu().numpy()
                seg_ = np.argmax(seg_, axis=1)
                seg_ = nearest_correspondance(pts_src, pts_dest, seg_)

                # save labels
                np.savetxt(output_filelist[count + i], seg_, fmt="%i")

                if args.ply:
                    data_utils.save_ply_property(
                        pts_dest, seg_, 6, output_ply_filelist[count + i])
            count += pts.size(0)

            output_np = np.argmax(outputs.cpu().detach().numpy(),
                                  axis=2).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(target_np.ravel(),
                                   output_np.ravel(),
                                   labels=list(range(N_CLASSES)))
            cm += cm_

            oa = "{:.3f}".format(metrics.stats_overall_accuracy(cm))
            aa = "{:.3f}".format(metrics.stats_accuracy_per_class(cm)[0])

            t.set_postfix(OA=oa, AA=aa)
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--folder_gt',
                        '-g',
                        help='Path to ground truth folder',
                        required=True)
    parser.add_argument('--folder_pred',
                        '-p',
                        help='Path to prediction folder',
                        required=True)
    parser.add_argument('--folder_data',
                        '-d',
                        help='Path to point cloud data folder')
    parser.add_argument('--part_avg',
                        '-a',
                        action='store_true',
                        help='Use part level average')
    args = parser.parse_args()
    print(args)

    category_id_to_name = {
        2691156: 'Airplane',
        2773838: 'Bag',
        2954340: 'Cap',
        2958343: 'Car',
        3001627: 'Chair',
        3261776: 'Earphone',
        3467517: 'Guitar',
        3624134: 'Knife',
        3636649: 'Lamp',
        3642806: 'Laptop',
        3790512: 'Motorbike',
        3797390: 'Mug',
        3948459: 'Pistol',
        4099429: 'Rocket',
        4225987: 'Skateboard',
        4379243: 'Table'
    }

    categories = sorted(os.listdir(args.folder_gt))

    label_min = sys.maxsize
    for category in categories:
        category_folder_gt = os.path.join(args.folder_gt, category)
        filenames = sorted(os.listdir(category_folder_gt))
        for filename in filenames:
            filepath_gt = os.path.join(category_folder_gt, filename)
            label_gt = np.loadtxt(filepath_gt).astype(np.int32)
            label_min = min(label_min, np.amin(label_gt))

    IoU = 0.0
    total_num = 0
    mIoU = 0.0
    for category in categories:
        category_folder_gt = os.path.join(args.folder_gt, category)
        category_folder_pred = os.path.join(args.folder_pred, category)
        if args.folder_data:
            category_folder_data = os.path.join(args.folder_data, category)
            category_folder_err = os.path.join(args.folder_pred + '_err_ply',
                                               category)

        IoU_category = 0.0
        filenames = sorted(os.listdir(category_folder_gt))
        for filename in filenames:
            filepath_gt = os.path.join(category_folder_gt, filename)
            filepath_pred = os.path.join(category_folder_pred, filename)
            label_gt = np.loadtxt(filepath_gt).astype(np.int32) - label_min
            label_pred = np.loadtxt(filepath_pred).astype(np.int32)

            if args.folder_data:
                filepath_data = os.path.join(category_folder_data,
                                             filename[:-3] + 'pts')
                filepath_err = os.path.join(category_folder_err,
                                            filename[:-3] + 'ply')
                coordinates = [[float(value) for value in xyz.split(' ')]
                               for xyz in open(filepath_data, 'r')
                               if len(xyz.split(' ')) == 3]
                assert (label_gt.shape[0] == len(coordinates))
                data_utils.save_ply_property(np.array(coordinates),
                                             (label_gt == label_pred), 6,
                                             filepath_err)

            if args.part_avg:
                label_max = np.amax(label_gt)
                IoU_part = 0.0
                for label_idx in range(label_max + 1):
                    locations_gt = (label_gt == label_idx)
                    locations_pred = (label_pred == label_idx)
                    I_locations = np.logical_and(locations_gt, locations_pred)
                    U_locations = np.logical_or(locations_gt, locations_pred)
                    I = np.sum(I_locations) + np.finfo(np.float32).eps
                    U = np.sum(U_locations) + np.finfo(np.float32).eps
                    IoU_part = IoU_part + I / U
                IoU_sample = IoU_part / (label_max + 1)
            else:
                label_correct_locations = (label_gt == label_pred)
                IoU_sample = np.sum(label_correct_locations) / label_gt.size
            IoU_category = IoU_category + IoU_sample
        IoU = IoU + IoU_category
        IoU_category = IoU_category / len(filenames)
        if category.isdigit():
            print("IoU of %s: " % (category_id_to_name[int(category)]),
                  IoU_category)
        else:
            print("IoU of %s: " % category, IoU_category)
        mIoU = mIoU + IoU_category
        total_num = total_num + len(filenames)
    IoU = IoU / total_num
    print("IoU: ", IoU)
    mIoU = mIoU / len(categories)
    print("mIoU: ", mIoU)