Beispiel #1
0
def test_auto_config():
    config = get_auto_config()
    assert 'Decoder' in config
    assert os.path.exists(config['Decoder']['model_path'])
    assert 'Localizer' in config
    assert os.path.exists(config['Localizer']['model_path'])
    assert 'TagSimilarityEncoder' in config
    assert os.path.exists(config['TagSimilarityEncoder']['model_path'])
Beispiel #2
0
def init_pipeline(output, no_localizer):
    """Helper function to initialize a new pipeline
    that generates the desired output.

    Args:
        output (list): pipeline objects that the pipeline will
                       generate as the result
        no_localizer (boolean): whether or not the localizer should be
                                skipped to decode a single tag in the
                                center of a 100x100 image

    Returns:
        pipeline object
    """

    if no_localizer:
        pipeline = Pipeline([objects.Regions, objects.LocalizerPositions],
                            output, **get_auto_config())
    else:
        pipeline = Pipeline([objects.Image], output, **get_auto_config())
    return pipeline
def init_pipeline(output):
    """Helper function to initialize a new pipeline
    that generates the desired output.

    Args:
        output (list): pipeline objects that the pipeline will
                       generate as the result

    Returns:
        pipeline object
    """
    pipeline = Pipeline([objects.Image], output, **get_auto_config())
    return pipeline
Beispiel #4
0
    def __init__(self, site_builder,
                 camera_rotations={0: 1, 1: -1, 2:1, 3:-1},
                 detections_path='detections.pkl'):
        self.builder = site_builder
        self.rotations = camera_rotations
        self.pipeline = Pipeline([Image], [LocalizerInputImage, FinalResultOverlay, CrownOverlay,
                                           IDs, SaliencyOverlay], **get_auto_config())
        self.crown = ResultCrownVisualizer()

        self.detections_path = detections_path
        if os.path.isfile(self.detections_path):
            self.detections = pickle.load(open(self.detections_path, 'rb'))
        else:
            self.detections = []
def init_pipeline(output, no_localizer):
    """Helper function to initialize a new pipeline
    that generates the desired output.

    Args:
        output (list): pipeline objects that the pipeline will
                       generate as the result
        no_localizer (boolean): whether or not the localizer should be
                                skipped to decode a single tag in the
                                center of a 100x100 image

    Returns:
        pipeline object
    """

    if no_localizer:
        pipeline = Pipeline([objects.Regions, objects.LocalizerPositions],
                            output,
                            **get_auto_config())
    else:
        pipeline = Pipeline([objects.Image],
                            output,
                            **get_auto_config())
    return pipeline
def process_video(args):
    config = get_auto_config()

    logger.info('Initializing {} pipeline(s)'.format(args.num_threads))
    plines = [Pipeline([Image, Timestamp], [PipelineResult], **config)
              for _ in range(args.num_threads)]

    logger.info('Loading bb_binary repository {}'.format(args.repo_output_path))
    repo = Repository(args.repo_output_path)

    camId, _, _ = parse_video_fname(args.video_path)
    logger.info('Parsed camId = {}'.format(camId))
    gen_processor = GeneratorProcessor(plines, lambda: BBBinaryRepoSink(repo, camId=camId))

    logger.info('Processing video frames from {}'.format(args.video_path))
    gen_processor(video_generator(args.video_path, args.timestamp_format, args.text_root_path))
def run(image_path_file, out, force):
    cache = {}

    if force and os.path.exists(out):
        print("Removing {}.".format(out))
        os.remove(out)

    def add_to_cache(**kwargs):
        for name, arr in kwargs.items():
            if name not in cache:
                cache[name] = [arr]
            else:
                cache[name].append(arr)

    def flush_cache():
        cache_concat = {n: np.concatenate(arrs) for n, arrs in cache.items()}
        nb_samples = len(next(iter(cache_concat.values())))
        permutation = np.random.permutation(nb_samples)
        cache_shuffled = {n: arrs[permutation]
                          for n, arrs in cache_concat.items()}
        dset.append(**cache_shuffled)
        cache.clear()

    roi_size = 96
    image_fnames = [n.rstrip('\n') for n in image_path_file.readlines()]
    pipeline = Pipeline([Filename],
                        [Image, LocalizerPositions],
                        **get_auto_config())
    dset = HDF5Dataset(out)
    bar = progressbar.ProgressBar(max_value=len(image_fnames))
    for i, image_fname in enumerate(bar(image_fnames)):
        try:
            results = pipeline([image_fname])
            rois, mask = Localizer.extract_rois(results[LocalizerPositions],
                                                results[Image], roi_size)
        except Exception as e:
            print(e)
            continue

        nb_detections = np.sum(mask)
        camIdx, dt = parse_image_fname(image_fname)
        season = np.array([dt.year] * nb_detections, dtype=np.uint16)
        timestamp = np.array([dt.timestamp()] * nb_detections, dtype=np.float64)
        add_to_cache(rois=rois, season=season, timestamp=timestamp)
        if i % 50 == 0 and i != 0:
            flush_cache()
    flush_cache()
def process_video(video_path, repo_output_path, ts_format, text_root_path,
                  rank):
    info = lambda msg: logger.info('Process {}: {}'.format(rank, msg))

    import theano
    from pipeline import Pipeline
    from pipeline.cmdline import logger
    from pipeline.pipeline import GeneratorProcessor, get_auto_config
    from pipeline.io import BBBinaryRepoSink, video_generator
    from pipeline.objects import PipelineResult, Image, Timestamp
    from bb_binary import Repository, parse_video_fname

    repo_output_path = os.path.join(repo_output_path,
                                    'process_{}'.format(rank))

    info('Theano compile dir: {}'.format(theano.config.base_compiledir))
    info('Output dir: {}'.format(repo_output_path))

    config = get_auto_config()

    info('Initializing pipeline')
    pipeline = Pipeline([Image, Timestamp], [PipelineResult], **config)

    info('Loading bb_binary repository {}'.format(repo_output_path))
    repo = Repository(repo_output_path)

    camId, _, _ = parse_video_fname(video_path)
    info('Parsed camId = {}'.format(camId))
    gen_processor = GeneratorProcessor(
        pipeline, lambda: BBBinaryRepoSink(repo, camId=camId))

    log_callback = lambda frame_idx: info('Processing frame {} from {}'.format(
        frame_idx, video_path))
    ffmpeg_stderr_fd = open('process_{}_ffmpeg_stderr.log'.format(rank), 'w')

    info('Processing video frames from {}'.format(video_path))
    gen_processor(
        video_generator(video_path, ts_format, text_root_path, log_callback,
                        ffmpeg_stderr_fd))
Beispiel #9
0
def process_video(args):
    config = get_auto_config()

    logger.info('Initializing {} pipeline(s)'.format(args.num_threads))
    plines = [
        Pipeline([Image, Timestamp], [PipelineResult], **config)
        for _ in range(args.num_threads)
    ]

    logger.info('Loading bb_binary repository {}'.format(
        args.repo_output_path))
    repo = Repository(args.repo_output_path)

    camId, _, _ = parse_video_fname(args.video_path)
    logger.info('Parsed camId = {}'.format(camId))
    gen_processor = GeneratorProcessor(
        plines, lambda: BBBinaryRepoSink(repo, camId=camId))

    logger.info('Processing video frames from {}'.format(args.video_path))
    gen_processor(
        video_generator(args.video_path, args.timestamp_format,
                        args.text_root_path))
def process_video(video_path, repo_output_path, ts_format, text_root_path,
                  rank):
    info = lambda msg: logger.info(f"Process {rank}: {msg}")

    import theano
    from pipeline import Pipeline
    from pipeline.cmdline import logger
    from pipeline.pipeline import GeneratorProcessor, get_auto_config
    from pipeline.io import BBBinaryRepoSink, video_generator
    from pipeline.objects import PipelineResult, Image, Timestamp
    from bb_binary import Repository, parse_video_fname

    repo_output_path = os.path.join(repo_output_path, f"process_{rank}")

    info(f"Theano compile dir: {theano.config.base_compiledir}")
    info(f"Output dir: {repo_output_path}")

    config = get_auto_config()

    info("Initializing pipeline")
    pipeline = Pipeline([Image, Timestamp], [PipelineResult], **config)

    info(f"Loading bb_binary repository {repo_output_path}")
    repo = Repository(repo_output_path)

    camId, _, _ = parse_video_fname(video_path)
    info(f"Parsed camId = {camId}")
    gen_processor = GeneratorProcessor(
        pipeline, lambda: BBBinaryRepoSink(repo, camId=camId))

    log_callback = lambda frame_idx: info(
        f"Processing frame {frame_idx} from {video_path}")
    ffmpeg_stderr_fd = open(f"process_{rank}_ffmpeg_stderr.log", "w")

    info(f"Processing video frames from {video_path}")
    gen_processor(
        video_generator(video_path, ts_format, text_root_path, log_callback,
                        ffmpeg_stderr_fd))
def process_video(video_path, repo_output_path, ts_format, text_root_path, rank):
    info = lambda msg: logger.info('Process {}: {}'.format(rank, msg))

    import theano
    from pipeline import Pipeline
    from pipeline.cmdline import logger
    from pipeline.pipeline import GeneratorProcessor, get_auto_config
    from pipeline.io import BBBinaryRepoSink, video_generator
    from pipeline.objects import PipelineResult, Image, Timestamp
    from bb_binary import Repository, parse_video_fname

    repo_output_path = os.path.join(repo_output_path, 'process_{}'.format(rank))

    info('Theano compile dir: {}'.format(theano.config.base_compiledir))
    info('Output dir: {}'.format(repo_output_path))

    config = get_auto_config()

    info('Initializing pipeline')
    pipeline = Pipeline([Image, Timestamp], [PipelineResult], **config)

    info('Loading bb_binary repository {}'.format(repo_output_path))
    repo = Repository(repo_output_path)

    camId, _, _ = parse_video_fname(video_path)
    info('Parsed camId = {}'.format(camId))
    gen_processor = GeneratorProcessor(pipeline,
                                       lambda: BBBinaryRepoSink(repo, camId=camId))

    log_callback = lambda frame_idx: info('Processing frame {} from {}'.format(frame_idx,
                                                                               video_path))
    ffmpeg_stderr_fd = open('process_{}_ffmpeg_stderr.log'.format(rank), 'w')

    info('Processing video frames from {}'.format(video_path))
    gen_processor(video_generator(video_path, ts_format, text_root_path,
                                  log_callback, ffmpeg_stderr_fd))
Beispiel #12
0
    # return previously stored events
    def getEvents(self):
        return (self.old_events, self.old_event_candidates,
                self.last_videotime)


config = configparser.ConfigParser()
config.read('server.cfg')
feeders = {}
ids = config['Feeders']['feeder_ids'].split(',')
addresses = config['Feeders']['feeder_addresses'].split(',')
for i in range(len(ids)):
    feeders[ids[i]] = FileLoader(ids[i], addresses[i])

pipeline = Pipeline([Image], [Positions, Orientations, Saliencies, IDs],
                    **get_auto_config())
print("Pipeline initialized")
framenum = 0
vis = ResultCrownVisualizer()

event_num = int(config['General']['last_event_id'])
previous = None
last_time = time.time()
csvfile = open(config['General']['csvfile'], 'a')
csvwriter = csv.writer(csvfile)
running = True
while (running):
    for feeder in feeders.keys():
        print("Downloading videos from feeder with ID: " + feeder)
        feeders[feeder].getFiles()
    for file in os.listdir(config['General']['videodir']):
Beispiel #13
0
def pipeline_config():
    return get_auto_config()
Beispiel #14
0
def test_auto_config():
    config = get_auto_config()
    assert "Decoder" in config
    assert os.path.exists(config["Decoder"]["model_path"])
    assert "Localizer" in config
    assert os.path.exists(config["Localizer"]["model_path"])
Beispiel #15
0
def pipeline_config():
    return get_auto_config()