Esempio n. 1
0
def write_video_as_chunked_tiffs(input_reader,
                                 tiffs_to_trace_directory,
                                 chunk_size=200,
                                 chunk_name_pattern='chunk%08d.tif',
                                 stop_after_frame=None,
                                 monitor_video=None,
                                 timestamps_filename=None,
                                 monitor_video_kwargs=None):
    """Write frames to disk as tiff stacks
    
    input_reader : object providing .iter_frames() method and perhaps
        also a .timestamps attribute. For instance, PFReader, or some
        FFmpegReader object.
    tiffs_to_trace_directory : where to store the chunked tiffs
    stop_after_frame : to stop early
    monitor_video : if not None, should be a filename to write a movie to
    timestamps_filename : if not None, should be the name to write timestamps
    monitor_video_kwargs : ffmpeg params
    
    Returns: ChunkedTiffWriter object    
    """
    # Tiff writer
    ctw = WhiskiWrap.ChunkedTiffWriter(tiffs_to_trace_directory,
                                       chunk_size=chunk_size,
                                       chunk_name_pattern=chunk_name_pattern)

    # FFmpeg writer is initalized after first frame
    ffw = None

    # Iterate over frames
    for nframe, frame in enumerate(input_reader.iter_frames()):
        # Stop early?
        if stop_after_frame is not None and nframe >= stop_after_frame:
            break

        # Write to chunked tiff
        ctw.write(frame)

        # Optionally write to monitor video
        if monitor_video is not None:
            # Initialize ffw after first frame so we know the size
            if ffw is None:
                ffw = WhiskiWrap.FFmpegWriter(monitor_video,
                                              frame_width=frame.shape[1],
                                              frame_height=frame.shape[0],
                                              **monitor_video_kwargs)
            ffw.write(frame)

    # Finalize writers
    ctw.close()
    if ffw is not None:
        ff_stdout, ff_stderr = ffw.close()

    # Also write timestamps as numpy file
    if hasattr(input_reader, 'timestamps') and timestamps_filename is not None:
        timestamps = np.concatenate(input_reader.timestamps)
        assert len(timestamps) >= ctw.frames_written
        np.save(timestamps_filename, timestamps[:ctw.frames_written])

    return ctw
Esempio n. 2
0
def run_benchmarks(benchmark_params, test_root, force=False):
    """Run the benchmarks
    
    For every row in benchmark params, run a trace on the input video
    using the params specified.
    
    benchmark_params: DataFrame with columns corresponding to keywords
        to pass to pipeline_trace. Should have columns 'name',
        'input_video', 'chunk_sz_frames', 'epoch_sz_frames',
        'frame_start', 'frame_stop', 'n_trace_processes', etc
    
    Returns:
        test_results, durations
        test_results : Dict from test['name'] to results read from hdf5 file
        durations : list of durations taken
    """
    WhiskiWrap.utils.probe_needed_commands()

    test_results = {}
    durations = []
    for idx, test in benchmark_params.iterrows():
        print(test['name'])
        test_dir = os.path.expanduser(os.path.join(test_root, test['name']))
        fn = setup_session_directory(test_dir,
                                     test['input_video'],
                                     force=force)

        # Run
        start_time = time.time()
        WhiskiWrap.pipeline_trace(fn.video('mp4'),
                                  fn.hdf5,
                                  chunk_sz_frames=test['chunk_sz_frames'],
                                  epoch_sz_frames=test['epoch_sz_frames'],
                                  frame_start=test['frame_start'],
                                  frame_stop=test['frame_stop'],
                                  n_trace_processes=test['n_trace_processes'])
        stop_time = time.time()
        durations.append(stop_time - start_time)

        # Get the summary
        with tables.open_file(fn.hdf5) as fi:
            test_results[test['name']] = pandas.DataFrame.from_records(
                fi.root.summary.read())

    return test_results, durations
Esempio n. 3
0
def run_benchmarks(benchmark_params, test_root, force=False):
    """Run the benchmarks
    
    For every row in benchmark params, run a trace on the input video
    using the params specified.
    
    benchmark_params: DataFrame with columns corresponding to keywords
        to pass to pipeline_trace. Should have columns 'name',
        'input_video', 'chunk_sz_frames', 'epoch_sz_frames',
        'frame_start', 'frame_stop', 'n_trace_processes', etc
    
    Returns:
        test_results, durations
        test_results : Dict from test['name'] to results read from hdf5 file
        durations : list of durations taken
    """
    WhiskiWrap.utils.probe_needed_commands()
    
    test_results = {}
    durations = []    
    for idx, test in benchmark_params.iterrows():
        print test['name']
        test_dir = os.path.expanduser(os.path.join(test_root, test['name']))
        fn = setup_session_directory(test_dir, test['input_video'], force=force)

        # Run
        start_time = time.time()
        WhiskiWrap.pipeline_trace(
            fn.video('mp4'),
            fn.hdf5,
            chunk_sz_frames=test['chunk_sz_frames'],
            epoch_sz_frames=test['epoch_sz_frames'],
            frame_start=test['frame_start'],
            frame_stop=test['frame_stop'],
            n_trace_processes=test['n_trace_processes'])
        stop_time = time.time()
        durations.append(stop_time - start_time)

        # Get the summary
        with tables.open_file(fn.hdf5) as fi:
            test_results[test['name']] = pandas.DataFrame.from_records(
                fi.root.summary.read()) 
    
    return test_results, durations
Esempio n. 4
0
def run_standard(test_root='~/whiski_wrap_test', force=False):
    """Run a standard trace on a test file to get baseline time"""
    # Check we have commands we need
    WhiskiWrap.utils.probe_needed_commands()

    # Set up test root
    test_root = normalize_path_and_optionally_get_permission(test_root,
                                                             force=force)

    # Find the video to use
    vfile1 = os.path.join(WhiskiWrap.DIRECTORY, 'test_video_165s.mp4')

    # Set up the test directory
    fn = setup_session_directory(os.path.join(test_root, 'standard'), vfile1)

    # Run the test
    start_time = time.time()
    WhiskiWrap.trace_chunk(fn.video('mp4'))
    stop_time = time.time()
    standard_duration = stop_time - start_time

    # Stitch
    WhiskiWrap.setup_hdf5(fn.hdf5, expectedrows=100000)
    WhiskiWrap.append_whiskers_to_hdf5(whisk_filename=fn.whiskers,
                                       h5_filename=fn.hdf5,
                                       chunk_start=0)

    # Get the result
    with tables.open_file(fn.hdf5) as fi:
        test_result = pandas.DataFrame.from_records(fi.root.summary.read())

    return test_result, standard_duration
Esempio n. 5
0
def run_standard(test_root='~/whiski_wrap_test', force=False):
    """Run a standard trace on a test file to get baseline time"""
    # Check we have commands we need
    WhiskiWrap.utils.probe_needed_commands()
    
    # Set up test root
    test_root = normalize_path_and_optionally_get_permission(test_root,
        force=force)
    
    # Find the video to use
    vfile1 = os.path.join(WhiskiWrap.DIRECTORY, 'test_video_165s.mp4')    

    # Set up the test directory
    fn = setup_session_directory(os.path.join(test_root, 'standard'), vfile1)

    # Run the test
    start_time = time.time()
    WhiskiWrap.trace_chunk(fn.video('mp4'))
    stop_time = time.time()
    standard_duration = stop_time - start_time
    
    # Stitch
    WhiskiWrap.setup_hdf5(fn.hdf5, expectedrows=100000)
    WhiskiWrap.append_whiskers_to_hdf5(
        whisk_filename=fn.whiskers,
        h5_filename=fn.hdf5, 
        chunk_start=0)    
    
    # Get the result
    with tables.open_file(fn.hdf5) as fi:
        test_result = pandas.DataFrame.from_records(
            fi.root.summary.read())     

    return test_result, standard_duration
Esempio n. 6
0
def setup_session_directory(directory, input_video, force=False):
    """Create (or overwrite) directory for whisker tracking"""
    # Parse the input video filename
    input_video = os.path.abspath(os.path.expanduser(input_video))
    if not os.path.exists(input_video):
        raise ValueError("%s does not exist" % input_video)
    input_video_directory, input_video_filename = os.path.split(input_video)

    # Erase existing directory and create anew
    whiski_files = [
        '.mp4', '.avi', '.whiskers', '.tif', '.measurements', '.detectorbank',
        '.parameters', '.hdf5'
    ]
    if os.path.exists(directory):
        # Check that it looks like a whiskers directory
        file_list = os.listdir(directory)
        for filename in file_list:
            if (os.path.splitext(filename)[1]) not in whiski_files:
                raise ValueError(directory +
                                 " does not look safe to overwrite, aborting")

        # Get user confirmation
        if not force:
            confirm = WhiskiWrap.raw_input('Ok to erase %s? [y/N]: ' %
                                           directory)
            if confirm.upper() != 'Y':
                raise ValueError("did not receive permission to setup test")

        # Erase
        os.system('rm -rf %s' % directory)
    os.mkdir(directory)

    # Copy the input video into the session directory
    new_video_filename = os.path.join(directory, input_video_filename)
    shutil.copyfile(input_video, new_video_filename)

    # Copy the parameter files in
    for filename in [
            WhiskiWrap.PARAMETERS_FILE, WhiskiWrap.HALFSPACE_DB_FILE,
            WhiskiWrap.LINE_DB_FILE
    ]:
        raw_filename = os.path.split(filename)[1]
        shutil.copyfile(filename, os.path.join(directory, raw_filename))

    return WhiskiWrap.utils.FileNamer.from_video(new_video_filename)
Esempio n. 7
0
        assert args.video_path is not None, 'Video path must be specified when video path is given.'
        video_path = args.video_path

    # working directory is always the script directory
    wdir = os.getcwd()
    # get video name
    video_fname = os.path.basename(video_path)
    video_name = ''.join(video_fname.split('.')[:-1])
    # output_path has the same name of the video name plus whiki_
    output_path = os.path.join(wdir, 'whiski_' + video_name)
    # creates output path if it doesn't exists
    if not os.path.exists(output_path):
        warn('out path didn\'t exist creating output path ' + output_path)
        os.mkdir(output_path)
    # copies video if it is not there (in the output path)
    input_video = os.path.join(output_path, video_fname)
    if not os.path.exists(input_video):
        warn('input video didn\'t exist coping from source ' + output_path)
        shutil.copy(video_path, input_video)
    output_file = os.path.join(output_path, video_name + '.hdf5')
    freeze_support()
    input_video = os.path.expanduser(input_video)
    output_file = os.path.expanduser(output_file)
    print('input_video ', input_video)
    print('output_file', output_file)

    WhiskiWrap.pipeline_trace(input_video,
                              output_file,
                              n_trace_processes=4,
                              chunk_sz_frames=100)
Esempio n. 8
0
def write_video_with_overlays_from_data(
    output_filename,
    input_reader,
    input_width,
    input_height,
    verbose=True,
    frame_triggers=None,
    trigger_dstart=-250,
    trigger_dstop=50,
    plot_trial_numbers=True,
    d_temporal=5,
    d_spatial=1,
    dpi=50,
    output_fps=30,
    input_video_alpha=1,
    whiskers_table=None,
    whiskers_file_handle=None,
    side='left',
    edge_a=None,
    edge_alpha=1,
    typical_edges_hist2d=None,
    contacts_table=None,
    post_contact_linger=50,
    write_stderr_to_screen=True,
    input_frame_offset=0,
    get_extra_text=None,
    contact_colors=None,
    also_plot_traces=False,
    trace_data_x=None,
    trace_data_y=None,
    trace_data_kwargs=None,
    ffmpeg_writer_kwargs=None,
    f=None,
    ax=None,
    func_update_figure=None,
    whisker_lw=2,
    whisker_marker=None,
):
    """Creating a video overlaid with whiskers, contacts, etc.
    
    The overall dataflow is this:
    1. Load chunks of frames from the input
    2. One by one, plot the frame with matplotlib. Overlay whiskers, edges,
        contacts, whatever.
    3. Dump the frame to an ffmpeg writer.
    
    # Input and output
    output_filename : file to create
    input_reader : PFReader or input video
    
    # Timing and spatial parameters
    frame_triggers : Only plot frames within (trigger_dstart, trigger_dstop)
        of a value in this array.
    trigger_dstart, trigger_dstop : number of frames
    d_temporal : Save time by plotting every Nth frame
    d_spatial : Save time by spatially undersampling the image
        The bottleneck is typically plotting the raw image in matplotlib
    
    # Video parameters
    dpi : The output video will always be pixel by pixel the same as the
        input (keeping d_spatial in mind). But this dpi value affects font
        and marker size.
    output_fps : set the frame rate of the output video (ffmpeg -r)
    input_video_alpha : alpha of image
    input_frame_offset : If you already seeked this many frames in the
        input_reader. Thus, now we know that the first frame to be read is
        actually frame `input_frame_offset` in the source (and thus, in
        the edge_a, contacts_table, etc.). This is the only parameter you
        need to adjust in this case, not frame_triggers or anything else.
    ffmpeg_writer_kwargs : other parameters for FFmpegWriter
    
    # Other sources of input
    edge_alpha : alpha of edge
    post_contact_linger : How long to leave the contact displayed    
        This is the total duration, so 0 will display nothing, and 1 is minimal.
    
    # Misc
    get_extra_text : if not None, should be a function that accepts a frame
        number and returns some text to add to the display. This is a 
        "real" frame number after accounting for any offset.
    contact_colors : list of color specs to use
    func_update_figure : optional, function that takes the frame number
        as input and updates the figure
    """
    # We need FFmpegWriter
    # Probably that object should be moved to my.video
    # Or maybe a new repo ffmpeg_tricks
    import WhiskiWrap

    # Parse the arguments
    frame_triggers = np.asarray(frame_triggers).astype(np.int)
    announced_frame_trigger = 0
    input_width = int(input_width)
    input_height = int(input_height)

    if contact_colors is None:
        contact_colors = my.plot.generate_colorbar(7)

    if ffmpeg_writer_kwargs is None:
        ffmpeg_writer_kwargs = {}

    ## Set up the graphical handles
    if verbose:
        print "setting up handles"

    if ax is None:
        # Create a figure with an image that fills it
        # We want the figsize to be in inches, so divide by dpi
        # And we want one invisible axis containing an image that fills the whole figure
        figsize = input_width / float(dpi), input_height / float(dpi)
        f = plt.figure(frameon=False, dpi=dpi / d_spatial, figsize=figsize)
        ax = f.add_axes([0, 0, 1, 1])
        ax.axis('off')

        # This return results in pixels, so should be the same as input width
        # and height. If not, probably rounding error above
        canvas_width, canvas_height = f.canvas.get_width_height()
        if \
            input_width / d_spatial != canvas_width or \
            input_height / d_spatial != canvas_height:
            raise ValueError("canvas size is not the same as input size")
    else:
        assert f is not None

        # This is used later in creating the writer
        canvas_width, canvas_height = f.canvas.get_width_height()

    # Plot typical edge images as static alpha
    if typical_edges_hist2d is not None:
        im1 = my.plot.imshow(typical_edges_hist2d,
                             ax=ax,
                             axis_call='image',
                             extent=(0, input_width, input_height, 0),
                             cmap=plt.cm.gray)
        im1.set_alpha(edge_alpha)

    # Plot input video frames
    in_image = np.zeros((input_height, input_width))
    im2 = my.plot.imshow(in_image[::d_spatial, ::d_spatial],
                         ax=ax,
                         axis_call='image',
                         cmap=plt.cm.gray,
                         extent=(0, input_width, input_height, 0))
    im2.set_alpha(input_video_alpha)
    im2.set_clim((0, 255))

    # Plot contact positions dynamically
    if contacts_table is not None:
        contact_positions_l = []
        for color in contact_colors:
            contact_positions_l.append(
                ax.plot([np.nan], [np.nan], '.', ms=15, color=color)[0])
        #~ contact_positions, = ax.plot([np.nan], [np.nan], 'r.', ms=15)
    else:
        contact_positions_l = None

    # Dynamic edge
    if edge_a is not None:
        edge_a_obj, = ax.plot([np.nan], [np.nan], '-', color='pink', lw=3)
    else:
        edge_a_obj = None

    # Text of trial
    if plot_trial_numbers:
        txt = ax.text(0,
                      ax.get_ylim()[0],
                      'waiting',
                      size=20,
                      ha='left',
                      va='bottom',
                      color='w')
        trial_number = -1

    # This will hold whisker objects
    whisker_handles = []

    # Create the writer
    writer = WhiskiWrap.FFmpegWriter(
        output_filename=output_filename,
        frame_width=canvas_width,
        frame_height=canvas_height,
        output_fps=output_fps,
        input_pix_fmt='argb',
        write_stderr_to_screen=write_stderr_to_screen,
        **ffmpeg_writer_kwargs)

    ## Loop until input frames exhausted
    for nnframe, frame in enumerate(input_reader.iter_frames()):
        # Account for the fact that we skipped the first input_frame_offset frames
        nframe = nnframe + input_frame_offset

        # Break if we're past the last trigger
        if nframe > np.max(frame_triggers) + trigger_dstop:
            break

        # Skip if we're not on a dframe
        if np.mod(nframe, d_temporal) != 0:
            continue

        # Skip if we're not near a trial
        nearest_choice_idx = np.nanargmin(np.abs(frame_triggers - nframe))
        nearest_choice = frame_triggers[nearest_choice_idx]
        if not (nframe > nearest_choice + trigger_dstart
                and nframe < nearest_choice + trigger_dstop):
            continue

        # Announce
        if ((announced_frame_trigger < len(frame_triggers))
                and (nframe > frame_triggers[announced_frame_trigger] +
                     trigger_dstart)):
            print "Reached trigger for frame", frame_triggers[
                announced_frame_trigger]
            announced_frame_trigger += 1

        # Update the trial text
        if plot_trial_numbers:  # and (nearest_choice_idx > trial_number):
            if get_extra_text is not None:
                extra_text = get_extra_text(nframe)
            else:
                extra_text = ''
            txt.set_text('frame %d trial %d %s' %
                         (nframe, nearest_choice_idx, extra_text))
            trial_number = nearest_choice_idx

        # Update the frame
        whisker_handles = frame_update(ax,
                                       nframe,
                                       frame,
                                       whisker_handles,
                                       contacts_table,
                                       post_contact_linger,
                                       whiskers_table,
                                       whiskers_file_handle,
                                       edge_a,
                                       im2,
                                       edge_a_obj,
                                       contact_positions_l,
                                       d_spatial,
                                       d_temporal,
                                       contact_colors,
                                       whisker_lw=whisker_lw,
                                       whisker_marker=whisker_marker)

        if func_update_figure is not None:
            func_update_figure(nframe)

        # Write to pipe
        f.canvas.draw()
        string_bytes = f.canvas.tostring_argb()
        writer.write_bytes(string_bytes)

    ## Clean up
    if whiskers_file_handle is not None:
        whiskers_file_handle.close()
    if not input_reader.isclosed():
        input_reader.close()
    writer.close()
    plt.close(f)
Esempio n. 9
0
def get_permission_for_test_root(test_root):
    """Ask for permission to run in test_root"""
    response = WhiskiWrap.raw_input('Run tests in %s? [y/N]: ' % test_root)
    if response.upper() != 'Y':
        raise ValueError("did not receive permission to run test")
Esempio n. 10
0
def interleaved_reading_and_tracing(
        input_reader,
        tiffs_to_trace_directory,
        sensitive=False,
        chunk_size=200,
        chunk_name_pattern='chunk%08d.tif',
        stop_after_frame=None,
        delete_tiffs=True,
        timestamps_filename=None,
        monitor_video=None,
        monitor_video_kwargs=None,
        write_monitor_ffmpeg_stderr_to_screen=False,
        h5_filename=None,
        frame_func=None,
        n_trace_processes=4,
        expectedrows=1000000,
        verbose=True):
    """Read, write, and trace each chunk, one at a time.
    
    This is an alternative to first calling:
        write_video_as_chunked_tiffs
    And then calling
        trace_chunked_tiffs
    
    input_reader : Typically a PFReader or FFmpegReader
    tiffs_to_trace_directory : Location to write the tiffs
    sensitive: if False, use default. If True, lower MIN_SIGNAL
    chunk_size : frames per chunk
    chunk_name_pattern : how to name them
    stop_after_frame : break early, for debugging
    delete_tiffs : whether to delete tiffs after done tracing
    timestamps_filename : Where to store the timestamps
        Only vallid for PFReader input_reader
    monitor_video : filename for a monitor video
        If None, no monitor video will be written
    monitor_video_kwargs : kwargs to pass to FFmpegWriter for monitor
    write_monitor_ffmpeg_stderr_to_screen : whether to display
        output from ffmpeg writing instance
    h5_filename : hdf5 file to stitch whiskers information into
    frame_func : function to apply to each frame
        If 'invert', will apply 255 - frame
    n_trace_processes : number of simultaneous trace processes
    expectedrows : how to set up hdf5 file
    verbose : verbose
    
    Returns: dict
        trace_pool_results : result of each call to trace
        monitor_ff_stderr, monitor_ff_stdout : results from monitor
            video ffmpeg instance
    """
    ## Set up kwargs
    if monitor_video_kwargs is None:
        monitor_video_kwargs = {}

    if frame_func == 'invert':
        frame_func = lambda frame: 255 - frame

    # Check commands
    WhiskiWrap.utils.probe_needed_commands()

    ## Initialize readers and writers
    if verbose:
        print "initalizing readers and writers"
    # Tiff writer
    ctw = WhiskiWrap.ChunkedTiffWriter(tiffs_to_trace_directory,
                                       chunk_size=chunk_size,
                                       chunk_name_pattern=chunk_name_pattern)

    # FFmpeg writer is initalized after first frame
    ffw = None

    # Setup the result file
    setup_hdf5(h5_filename, expectedrows)

    # Copy the parameters files
    copy_parameters_files(tiffs_to_trace_directory, sensitive=sensitive)

    ## Set up the worker pool
    # Pool of trace workers
    trace_pool = multiprocessing.Pool(n_trace_processes)

    # Keep track of results
    trace_pool_results = []
    deleted_tiffs = []

    def log_result(result):
        trace_pool_results.append(result)

    ## Iterate over chunks
    out_of_frames = False
    nframe = 0

    # Init the iterator outside of the loop so that it persists
    iter_obj = input_reader.iter_frames()

    while not out_of_frames:
        # Get a chunk of frames
        if verbose:
            print "loading chunk of frames starting with ", nframe
        chunk_of_frames = []
        for frame in iter_obj:
            if frame_func is not None:
                frame = frame_func(frame)
            chunk_of_frames.append(frame)
            nframe = nframe + 1
            if stop_after_frame is not None and nframe >= stop_after_frame:
                break
            if len(chunk_of_frames) == chunk_size:
                break

        # Check if we ran out
        if len(chunk_of_frames) != chunk_size:
            out_of_frames = True

        ## Write tiffs
        # We do this synchronously to ensure that it happens before
        # the trace starts
        for frame in chunk_of_frames:
            ctw.write(frame)

        # Make sure the chunk was written, in case this is the last one
        # and we didn't reach chunk_size yet
        if len(chunk_of_frames) != chunk_size:
            ctw._write_chunk()
        assert ctw.count_unwritten_frames() == 0

        # Figure out which tiff file was just generated
        tif_filename = ctw.chunknames_written[-1]

        ## Start trace
        trace_pool.apply_async(trace_chunk,
                               args=(tif_filename, delete_tiffs),
                               callback=log_result)

        ## Determine whether we can delete any tiffs
        #~ if delete_tiffs:
        #~ tiffs_to_delete = [
        #~ tpres['video_filename'] for tpres in trace_pool_results
        #~ if tpres['video_filename'] not in deleted_tiffs]
        #~ for filename in tiffs_to_delete:
        #~ if verbose:
        #~ print "deleting", filename
        #~ os.remove(filename)

        ## Start monitor encode
        # This is also synchronous, otherwise the input buffer might fill up
        if monitor_video is not None:
            if ffw is None:
                ffw = WhiskiWrap.FFmpegWriter(
                    monitor_video,
                    frame_width=frame.shape[1],
                    frame_height=frame.shape[0],
                    write_stderr_to_screen=
                    write_monitor_ffmpeg_stderr_to_screen,
                    **monitor_video_kwargs)
            for frame in chunk_of_frames:
                ffw.write(frame)

        ## Determine if we should pause
        while len(ctw.chunknames_written
                  ) > len(trace_pool_results) + 2 * n_trace_processes:
            print "waiting for tracing to catch up"
            time.sleep(30)

    ## Wait for trace to complete
    if verbose:
        print "done with reading and writing, just waiting for tracing"
    # Tell it no more jobs, so close when done
    trace_pool.close()

    # Wait for everything to finish
    trace_pool.join()

    ## Error check the tifs that were processed
    # Get the tifs we wrote, and the tifs we trace
    written_chunks = sorted(ctw.chunknames_written)
    traced_filenames = sorted(
        [res['video_filename'] for res in trace_pool_results])

    # Check that they are the same
    if not np.all(np.array(written_chunks) == np.array(traced_filenames)):
        raise ValueError("not all chunks were traced")

    ## Extract the chunk numbers from the filenames
    # The tiffs have been written, figure out which they are
    split_traced_filenames = [os.path.split(fn)[1] for fn in traced_filenames]
    tif_file_number_strings = my.misc.apply_and_filter_by_regex(
        '^chunk(\d+).tif$', split_traced_filenames, sort=False)
    tif_full_filenames = [
        os.path.join(tiffs_to_trace_directory, 'chunk%s.tif' % fns)
        for fns in tif_file_number_strings
    ]
    tif_file_numbers = map(int, tif_file_number_strings)
    tif_ordering = np.argsort(tif_file_numbers)
    tif_sorted_filenames = np.array(tif_full_filenames)[tif_ordering]
    tif_sorted_file_numbers = np.array(tif_file_numbers)[tif_ordering]

    # stitch
    print "Stitching"
    for chunk_start, chunk_name in zip(tif_sorted_file_numbers,
                                       tif_sorted_filenames):
        # Append each chunk to the hdf5 file
        fn = WhiskiWrap.utils.FileNamer.from_tiff_stack(chunk_name)
        append_whiskers_to_hdf5(whisk_filename=fn.whiskers,
                                h5_filename=h5_filename,
                                chunk_start=chunk_start)

    # Finalize writers
    ctw.close()
    if ffw is not None:
        ff_stdout, ff_stderr = ffw.close()
    else:
        ff_stdout, ff_stderr = None, None

    # Also write timestamps as numpy file
    if hasattr(input_reader, 'timestamps') and timestamps_filename is not None:
        timestamps = np.concatenate(input_reader.timestamps)
        assert len(timestamps) >= ctw.frames_written
        np.save(timestamps_filename, timestamps[:ctw.frames_written])

    return {
        'trace_pool_results': trace_pool_results,
        'monitor_ff_stdout': ff_stdout,
        'monitor_ff_stderr': ff_stderr,
    }