Example #1
0
        elif isinstance(first_dp, dict):
            result = {}
            for key in first_dp.keys():
                data_list = [x[key] for x in data_holder]
                if use_list:
                    result[key] = data_list
                else:
                    result[key] = BatchData2Biggest._batch_numpy(data_list)
        return result


if __name__ == '__main__':

    tensorpack.utils.logger.auto_set_dir()

    train_set = MyDataFlow('/media/neil/DATA/mysunrgbd', 'training')
    test_set = MyDataFlow('/media/neil/DATA/mysunrgbd',
                          'training')  # TODO: prepare test data

    print(
        eval_mAP(
            sunrgbd_object('/media/neil/DATA/mysunrgbd', 'training'),
            OfflinePredictor(
                PredictConfig(model=Model(),
                              input_names=['points'],
                              output_names=[
                                  'bboxes_pred', 'class_scores_pred',
                                  'batch_idx'
                              ])), [0.25]))

    # dataset = BatchData(PrefetchData(train_set, 4, 4), BATCH_SIZE)
Example #2
0
            for key in first_dp.keys():
                data_list = [x[key] for x in data_holder]
                if use_list:
                    result[key] = data_list
                else:
                    result[key] = BatchData2Biggest._batch_numpy(data_list)
        return result


if __name__ == '__main__':

    tensorpack.utils.logger.auto_set_dir()

    # this is the official train/val split
    train_set = MyDataFlow('/media/neil/DATA/mysunrgbd',
                           'training',
                           idx_list=list(range(5051, 10336)))

    # dataset = BatchData(PrefetchData(train_set, 4, 4), BATCH_SIZE)

    lr_schedule = [(80, 1e-4), (120, 1e-5)]
    # lr_schedule = [(i, 5e-5) for i in range(260)]
    # get the config which contains everything necessary in a training
    config = AutoResumeTrainConfig(
        always_resume=False,
        model=Model(),
        # The input source for training. FeedInput is slow, this is just for demo purpose.
        # In practice it's best to use QueueInput or others. See tutorials for details.
        data=QueueInput(
            BatchData2Biggest(
                PrefetchData(train_set,
Example #3
0
        elif isinstance(first_dp, dict):
            result = {}
            for key in first_dp.keys():
                data_list = [x[key] for x in data_holder]
                if use_list:
                    result[key] = data_list
                else:
                    result[key] = BatchData2Biggest._batch_numpy(data_list)
        return result


if __name__ == '__main__':
    logger.auto_set_dir()

    # this is the official train/val split
    train_set = MyDataFlow('/data/mysunrgbd', 'training', training=True, idx_list=list(range(5051, 10336)), cache_dir='cache_train')
    # TestDataSpeed(train_set).start()

    gt_cls = {}
    gt_all = {}
    for classname in type2class:
        gt_cls[classname] = get_gt_cls('/home/neil/frustum-pointnets/sunrgbd/sunrgbd_detection/gt_boxes', classname)
        for img_id in gt_cls[classname]:
            if img_id not in gt_all:
                gt_all[img_id] = []
            for box in gt_cls[classname][img_id]:
                gt_all[img_id].append((classname, box))
                    # print(classname, box)

    lr_schedule = [(80, 1e-4), (120, 1e-5)]
    # lr_schedule = [(i, 5e-5) for i in range(260)]