def main():
    args = parse_cmds()
    if DEBUG:
        # dest_dir = os.path.join(os.path.abspath(os.pardir), 'data')
        dest_dir = '/data6/lekevin/fast_track/Fast-Pedestrian-Tracking/dataset'
        src_dir = '/data6/lekevin/fast_track/caltech-pedestrian-dataset' \
                  '-converter'
    else:
        dest_dir = args.path
        src_dir = args.data_dir

    # Console logger
    log_filename = os.path.join(dest_dir, 'dataset.log')
    Logger(log_filename, logging.INFO)
    logger = logging.getLogger(__name__)
    Logger.section_break(title='Generate Dataset')

    # Initialize DatasetGenerator
    datagen = DatasetGenerator(src_dir, logger)
    datagen.generate()
    dataset = datagen.dataset_df

    # Partition dataset
    datagen.train_test_split(dataset)

    # Save dataset
    output_filename = os.path.join(dest_dir, 'data_{}.csv')
    datagen.save(output_filename)
    def _get_images_paths(self):
        """Get image files from data directory"""
        if self.check_valid:
            images = glob.glob(
                os.path.join(self.src_dir, 'data', 'images', '*'))
            valid_imgs = []
            invalid_count = 0
            Logger.section_break('Invalid Images')
            for i in images:
                try:
                    im = Image.open(i)
                    im.verify()
                    valid_imgs.append((i, True))
                except (IOError, SyntaxError):
                    self.logger.info(i)
                    valid_imgs.append((i, False))
                    invalid_count += 1

            self.logger.info('Total invalid images: {}'.format(invalid_count))

        else:
            images = glob.glob(
                os.path.join(self.src_dir, 'data', 'images', '*'))
            valid_imgs = [(i, True) for i in images]

        self.logger.info('')
        self.logger.info('Total images retrieved: {}'.format(len(images)))
        return valid_imgs
    def _report_distribution(self):
        """Report distributions"""
        Logger.section_break('Image per video distribution')
        self.logger.info(
            self.dataset_df[Col.VIDEO].value_counts().sort_index())

        Logger.section_break('Videos per set distribution')
        for set, video in sorted(self.sets2videos.items()):
            total_imgs = len(self.sets2frames[set])
            self.logger.info('{} [{} videos / {} images]: {}\n'.format(
                set, len(video), total_imgs, sorted(video)))
예제 #4
0
def main(**kwargs):
    opt._parse(kwargs)
    # Initialize Logger
    if opt.benchmark_path is None:
        timestr = time.strftime('%m%d%H%M')
        benchmark_path = f'logs/fasterrcnn_{timestr}'
        for k_, v_ in kwargs.items():
            benchmark_path += f'_{v_}'
        benchmark_path += '.log'

    Logger(benchmark_path, logging.INFO)
    logger = logging.getLogger(__name__)
    Logger.section_break(title='Benchmark Model')
    logger.info(f'User Arguments\n{opt._state_dict()}')

    # Load dataset
    dataset = TestDataset(opt, split='test')
    dataloader = data_.DataLoader(dataset,
                                  batch_size=1,
                                  num_workers=opt.test_num_workers,
                                  shuffle=False,
                                  pin_memory=True)

    logger.info(f"DATASET SIZE: {len(dataloader)}")
    logger.info("Using Mask VGG") if opt.mask else logger.info(
        "Using normal VGG16")

    # Construct model
    faster_rcnn = FasterRCNNVGG16(mask=opt.mask)
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    Logger.section_break(title='Model')
    logger.info(str(faster_rcnn))

    # Resume from a checkpoint
    if opt.load_path:
        assert os.path.isfile(opt.load_path),\
            'Checkpoint {} does not exist.'.format(opt.load_path)

        trainer.load(opt.load_path)
        Logger.section_break('Checkpoint')
        logger.info("Loaded checkpoint '{}' (epoch X)".format(opt.load_path))

    # Benchmark dataset
    fps = AverageMeter()
    benchmarker = {FPS: fps}
    result = benchmark(benchmarker, dataloader, faster_rcnn, test_num=1000)
    Logger.section_break('Benchmark completed')
    model_parameters = filter(lambda p: p.requires_grad,
                              faster_rcnn.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])
    logger.info('[PARAMETERS] {params}'.format(params=params))
    logger.info('[RUN TIME] {time.avg:.3f} sec/frame'.format(time=result[FPS]))
예제 #5
0
def benchmark(benchmarker, dataloader, faster_rcnn, test_num=1000):
    logger = logging.getLogger(__name__)
    Logger.section_break(title='Benchmark Begin')

    since = time.time()
    for ii, \
        (imgs, sizes, gt_bboxes_, gt_labels_) in tqdm(enumerate(dataloader)):
        sizes = [sizes[0][0].item(), sizes[1][0].item()]
        since = time.time()
        pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(
            imgs, [sizes])

        benchmarker[FPS].update(1 / (time.time() - since))

        if ii % 10 == 0:
            logger.info('{:5}: FPS {t.val:.3f} ({t.avg:.3f})'.format(
                ii, t=benchmarker[FPS]))

        if ii == test_num:
            break

    return benchmarker