def open_tracking_file(self, tracking_path):
        print("Loading {}".format(tracking_path))

        # load saved tracking data
        (tail_coords_array, spline_coords_array,
         heading_angle_array, body_position_array,
         eye_coords_array, tracking_params) = an.open_saved_data(tracking_path)

        # calculate tail angles
        if tracking_params['type'] == "freeswimming":
            heading_angle_array = an.fix_heading_angles(heading_angle_array)

            if tracking_params['track_tail']:
                tail_angle_array = an.get_freeswimming_tail_angles(tail_coords_array, heading_angle_array, body_position_array)
            else:
                tail_angle_array = None
        elif tracking_params['type'] == "headfixed":
            tail_angle_array = an.get_headfixed_tail_angles(tail_coords_array, tail_angle=tracking_params['tail_angle'], tail_direction=tracking_params['tail_direction'])
        else:
            tail_angle_array = None

        if tail_angle_array != None:
            self.current_plot_type = "tail"

            # get array of average angle of the last few points of the tail
            # tail_end_angle_array = tail_angle_array[:, :, -1]
            tail_end_angle_array = an.get_tail_end_angles(tail_angle_array, num_to_average=3)
            # print(tail_end_angle_array.shape)
        else:
            self.current_plot_type = "body"
            tail_end_angle_array = None

        self.tail_angle_arrays[self.current_tracking_num]     = tail_angle_array
        self.tail_end_angle_arrays[self.current_tracking_num] = tail_end_angle_array
        self.heading_angle_arrays[self.current_tracking_num]  = heading_angle_array
        self.body_position_arrays[self.current_tracking_num]  = body_position_array
        self.eye_position_arrays[self.current_tracking_num]   = eye_coords_array
        self.tracking_params[self.current_tracking_num]       = tracking_params

        if self.current_plot_type == "tail":
            self.plot_array = self.tail_end_angle_arrays[self.current_tracking_num][self.current_crop]
        elif self.current_plot_type == "body":
            self.plot_array = self.heading_angle_arrays[self.current_tracking_num][self.current_crop]
        elif self.current_plot_type == "eyes":
            self.plot_array = self.eye_position_arrays[self.current_tracking_num][self.current_crop]

        self.analysis_window.update_plot(self.plot_array, self.current_plot_type, keep_xlim=False)
import tracking
import numpy as np
import matplotlib.pyplot as plt
import sys
from ggplot import *
import pandas

tracking_path = sys.argv[1]
video_path = sys.argv[2]

fps, n_frames = tracking.get_video_info(video_path)

print("FPS: {}, # frames: {}.".format(fps, n_frames))

(tail_coords_array, spline_coords_array, heading_angle_array,
 body_position_array, eye_coords_array,
 tracking_params) = an.open_saved_data(tracking_path)

heading_angle_array = an.fix_heading_angles(heading_angle_array)

tail_angle_array = an.get_freeswimming_tail_angles(tail_coords_array,
                                                   heading_angle_array,
                                                   body_position_array)

tail_end_angle_array = an.get_tail_end_angles(tail_angle_array,
                                              num_to_average=1)[0]

plt.plot(tail_end_angle_array)
plt.plot(heading_angle_array[0])
plt.show()
예제 #3
0
tracking_path = sys.argv[1]
video_path = sys.argv[2]

if not (tracking_path.endswith('npz') and video_path.endswith(
    ('.avi', '.mov', '.mp4'))):
    raise ValueError(
        'Invalid arguments provided. The first argument provided needs to be the .npz tracking data file, the second should be the video.'
    )

# Get heading & tail angle arrays
(tail_coords_array, spline_coords_array, heading_angle_array,
 body_position_array, eye_coords_array,
 tracking_params) = analysis.open_saved_data(tracking_path)

heading_angle_array = analysis.fix_heading_angles(heading_angle_array)
tail_angle_array = analysis.get_freeswimming_tail_angles(
    tail_coords_array, heading_angle_array, body_position_array)
heading_angle_array = heading_angle_array[0, :, 0]
tail_end_angle_array = analysis.get_tail_end_angles(tail_angle_array,
                                                    num_to_average=1)[0]

# Get info about the video
fps, n_frames_total = open_media.get_video_info(video_path)
print("FPS: {}, # frames: {}.".format(fps, n_frames_total))

# Update number of frames to load
if n_frames == 0:
    n_frames = n_frames_total

# Create a video capture object that we can re-use
예제 #4
0
def process_video(folder, video_name, plot=False):
    # set data paths
    tracking_path = os.path.join(
        folder, "{}_Image-Data_Video-Capture_tracking.npz".format(video_name))
    stim_data_path = os.path.join(folder,
                                  "{}_Stimulus-Data.csv".format(video_name))
    frame_data_path = os.path.join(folder,
                                   "{}_Vimba-Data.csv".format(video_name))

    # load tracking data
    tail_coords_array, spline_coords_array, heading_angle, body_position, eye_coords_array, tracking_params = analysis.open_saved_data(
        tracking_path)
    heading_angle = analysis.fix_heading_angles(heading_angle)
    body_position = analysis.fix_body_position(body_position)
    heading_angle = heading_angle[0, :, 0]
    body_position = body_position[0]

    # load frame timestamp data
    frame_data = np.loadtxt(frame_data_path, skiprows=1)

    # get total number of frames
    n_frames = frame_data.shape[0]
    print("Number of frames: {}.".format(n_frames))

    # calculate milliseconds at which each frame occurs
    frame_milliseconds = np.zeros(n_frames)
    for i in range(n_frames):
        frame_milliseconds[i] = 1000 * (
            60 * (60 * frame_data[i, 0] + frame_data[i, 1]) +
            frame_data[i, 2]) + frame_data[i, 3]
    frame_nums = frame_data[:, -1]

    frame_nums = frame_nums[:heading_angle.shape[0]]
    frame_milliseconds = frame_milliseconds[:heading_angle.shape[0]]
    n_frames = len(frame_nums)

    frame_nums[frame_nums >= n_frames] = n_frames - 1

    # load stimulus timestamp data
    stim_data = np.loadtxt(stim_data_path, skiprows=1)

    # get total number of stim switches
    n_stim_switches = stim_data.shape[0]
    print("Number of stimulus switches: {}.".format(n_stim_switches))

    # calculate milliseconds and closest frame numbers at which stim switches occur
    stim_switch_milliseconds = np.zeros(n_stim_switches)
    stim_switch_frame_nums = np.zeros(n_stim_switches)
    for i in range(n_stim_switches):
        stim_switch_milliseconds[i] = 1000 * (
            60 * (60 * stim_data[i, 0] + stim_data[i, 1]) +
            stim_data[i, 2]) + stim_data[i, 3]
        stim_switch_frame_nums[i] = frame_nums[find_nearest(
            frame_milliseconds, stim_switch_milliseconds[i],
            return_index=True)]
    stim_switch_frame_nums = stim_switch_frame_nums.astype(int)

    # stim_switch_frame_nums = stim_switch_frame_nums[:heading_angle.shape[0]]
    # stim_switch_milliseconds = stim_switch_milliseconds[:heading_angle.shape[0]]
    # print(heading_angle.shape, body_position.shape)

    # extract stim ids
    stim_ids = stim_data[:, -1]
    stim_ids = stim_ids.astype(int)

    print(stim_ids)

    # create array containing the stim id for each frame
    stim_id_frames = np.zeros(n_frames).astype(int)
    for i in range(n_stim_switches):
        if i < n_stim_switches - 1:
            stim_id_frames[stim_switch_frame_nums[i]:stim_switch_frame_nums[
                i + 1]] = stim_ids[i]
        else:
            stim_id_frames[stim_switch_frame_nums[i]:] = stim_ids[i]

    # create array containing the stim # for each frame
    stim_num_frames = np.zeros(n_frames).astype(int)
    for i in range(n_stim_switches):
        if i < n_stim_switches - 1:
            stim_num_frames[
                stim_switch_frame_nums[i]:stim_switch_frame_nums[i + 1]] = i
        else:
            stim_num_frames[stim_switch_frame_nums[i]:] = i

    # ---- capture bouts that correspond to turns ---- #

    # smooth the heading angle array using a Savitzky-Golay filter
    smoothing_window_width = 50
    smoothed_heading_angle = savitzky_golay(heading_angle, 51, 3)

    # calculate the difference betweeen the heading angle at each frame and the heading angle 10 frames before
    n = 10
    running_heading_angle_difference = np.abs(
        smoothed_heading_angle - np.roll(smoothed_heading_angle, -n))
    running_heading_angle_difference[-n:] = 0
    running_heading_angle_difference = np.nan_to_num(
        running_heading_angle_difference)

    # extract points where the difference is greater than the threshold
    threshold = 0.1
    heading_angle_difference_above_threshold = (
        running_heading_angle_difference >= threshold)

    # smooth this array
    smoothing_window_width = 20
    normpdf = scipy.stats.norm.pdf(
        range(-int(smoothing_window_width / 2),
              int(smoothing_window_width / 2)), 0, 3)
    heading_angle_difference_above_threshold[
        int(smoothing_window_width / 2):-int(smoothing_window_width / 2) +
        1] = np.convolve(heading_angle_difference_above_threshold,
                         normpdf / np.sum(normpdf),
                         mode='valid')
    heading_angle_difference_above_threshold = heading_angle_difference_above_threshold.astype(
        int)

    # ---- capture bouts that correspond to forward motions by looking at the distance from the top-left corner ---- #

    # smooth the body position array using a Savitzky-Golay filter
    smoothed_body_position = np.zeros(body_position.shape)
    smoothed_body_position[:, 0] = savitzky_golay(body_position[:, 0], 51, 3)
    smoothed_body_position[:, 1] = savitzky_golay(body_position[:, 1], 51, 3)

    # get the distance from the x-y position
    body_distance_tl = np.sqrt((body_position[:, 0])**2 +
                               (body_position[:, 1])**2)
    smoothed_body_distance_tl = np.sqrt((smoothed_body_position[:, 0])**2 +
                                        (smoothed_body_position[:, 1])**2)

    # scale so that it's in the same range as the heading angle array
    body_distance_tl -= np.nanmin(body_distance_tl)
    body_distance_tl = (np.nanmax(heading_angle) - np.nanmin(heading_angle)
                        ) * body_distance_tl / np.nanmax(body_distance_tl)
    body_distance_tl += np.nanmin(heading_angle)

    smoothed_body_distance_tl -= np.nanmin(smoothed_body_distance_tl)
    smoothed_body_distance_tl = (
        np.nanmax(smoothed_heading_angle) - np.nanmin(smoothed_heading_angle)
    ) * smoothed_body_distance_tl / np.nanmax(smoothed_body_distance_tl)
    smoothed_body_distance_tl += np.nanmin(smoothed_heading_angle)

    # calculate the difference betweeen the body distance at each frame and the heading angle 10 frames before
    n = 10
    running_body_distance_tl_difference = np.abs(
        smoothed_body_distance_tl - np.roll(smoothed_body_distance_tl, -n))
    running_body_distance_tl_difference[-n:] = 0
    running_body_distance_tl_difference = np.nan_to_num(
        running_body_distance_tl_difference)

    # extract points where the difference is greater than the threshold
    threshold = 0.2
    body_distance_tl_difference_above_threshold = (
        running_body_distance_tl_difference >= threshold)

    # smooth this array
    smoothing_window_width = 20
    normpdf = scipy.stats.norm.pdf(
        range(-int(smoothing_window_width / 2),
              int(smoothing_window_width / 2)), 0, 3)
    body_distance_tl_difference_above_threshold[
        int(smoothing_window_width / 2):-int(smoothing_window_width / 2) +
        1] = np.convolve(body_distance_tl_difference_above_threshold,
                         normpdf / np.sum(normpdf),
                         mode='valid')
    body_distance_tl_difference_above_threshold = body_distance_tl_difference_above_threshold.astype(
        int)

    # ---- Do the same for the distance from the bottom-right corner ---- #

    # get the distance from the x-y position
    body_distance_br = np.sqrt((body_position[:, 0] - 1024)**2 +
                               (body_position[:, 1] - 1280)**2)
    smoothed_body_distance_br = np.sqrt(
        (smoothed_body_position[:, 0] - 1024)**2 +
        (smoothed_body_position[:, 1] - 1280)**2)

    # scale so that it's in the same range as the heading angle array
    body_distance_br -= np.nanmin(body_distance_br)
    body_distance_br = (np.nanmax(heading_angle) - np.nanmin(heading_angle)
                        ) * body_distance_br / np.nanmax(body_distance_br)
    body_distance_br += np.nanmin(heading_angle)

    smoothed_body_distance_br -= np.nanmin(smoothed_body_distance_br)
    smoothed_body_distance_br = (
        np.nanmax(smoothed_heading_angle) - np.nanmin(smoothed_heading_angle)
    ) * smoothed_body_distance_br / np.nanmax(smoothed_body_distance_br)
    smoothed_body_distance_br += np.nanmin(smoothed_heading_angle)

    # calculate the difference betweeen the body distance at each frame and the heading angle 10 frames before
    n = 10
    running_body_distance_br_difference = np.abs(
        smoothed_body_distance_br - np.roll(smoothed_body_distance_br, -n))
    running_body_distance_br_difference[-n:] = 0
    running_body_distance_br_difference = np.nan_to_num(
        running_body_distance_br_difference)

    # extract points where the difference is greater than the threshold
    threshold = 0.2
    body_distance_br_difference_above_threshold = (
        running_body_distance_br_difference >= threshold)

    # smooth this array
    smoothing_window_width = 20
    normpdf = scipy.stats.norm.pdf(
        range(-int(smoothing_window_width / 2),
              int(smoothing_window_width / 2)), 0, 3)
    body_distance_br_difference_above_threshold[
        int(smoothing_window_width / 2):-int(smoothing_window_width / 2) +
        1] = np.convolve(body_distance_br_difference_above_threshold,
                         normpdf / np.sum(normpdf),
                         mode='valid')
    body_distance_br_difference_above_threshold = body_distance_br_difference_above_threshold.astype(
        int)

    # ---- Do the same for the distance from the bottom-left corner ---- #

    # get the distance from the x-y position
    body_distance_bl = np.sqrt((body_position[:, 0] - 1024)**2 +
                               (body_position[:, 1])**2)
    smoothed_body_distance_bl = np.sqrt((smoothed_body_position[:, 0] -
                                         1024)**2 +
                                        (smoothed_body_position[:, 1])**2)

    # scale so that it's in the same range as the heading angle array
    body_distance_bl -= np.nanmin(body_distance_bl)
    body_distance_bl = (np.nanmax(heading_angle) - np.nanmin(heading_angle)
                        ) * body_distance_bl / np.nanmax(body_distance_bl)
    body_distance_bl += np.nanmin(heading_angle)

    smoothed_body_distance_bl -= np.nanmin(smoothed_body_distance_bl)
    smoothed_body_distance_bl = (
        np.nanmax(smoothed_heading_angle) - np.nanmin(smoothed_heading_angle)
    ) * smoothed_body_distance_bl / np.nanmax(smoothed_body_distance_bl)
    smoothed_body_distance_bl += np.nanmin(smoothed_heading_angle)

    # calculate the difference betweeen the body distance at each frame and the heading angle 10 frames before
    n = 10
    running_body_distance_bl_difference = np.abs(
        smoothed_body_distance_bl - np.roll(smoothed_body_distance_bl, -n))
    running_body_distance_bl_difference[-n:] = 0
    running_body_distance_bl_difference = np.nan_to_num(
        running_body_distance_bl_difference)

    # extract points where the difference is greater than the threshold
    threshold = 0.2
    body_distance_bl_difference_above_threshold = (
        running_body_distance_bl_difference >= threshold)

    # smooth this array
    smoothing_window_width = 20
    normpdf = scipy.stats.norm.pdf(
        range(-int(smoothing_window_width / 2),
              int(smoothing_window_width / 2)), 0, 3)
    body_distance_bl_difference_above_threshold[
        int(smoothing_window_width / 2):-int(smoothing_window_width / 2) +
        1] = np.convolve(body_distance_bl_difference_above_threshold,
                         normpdf / np.sum(normpdf),
                         mode='valid')
    body_distance_bl_difference_above_threshold = body_distance_bl_difference_above_threshold.astype(
        int)

    # ---- Do the same for the distance from the top-right corner ---- #

    # get the distance from the x-y position
    body_distance_tr = np.sqrt((body_position[:, 0])**2 +
                               (body_position[:, 1] - 1280)**2)
    smoothed_body_distance_tr = np.sqrt((smoothed_body_position[:, 0])**2 +
                                        (smoothed_body_position[:, 1] -
                                         1280)**2)

    # scale so that it's in the same range as the heading angle array
    body_distance_tr -= np.nanmin(body_distance_tr)
    body_distance_tr = (np.nanmax(heading_angle) - np.nanmin(heading_angle)
                        ) * body_distance_tr / np.nanmax(body_distance_tr)
    body_distance_tr += np.nanmin(heading_angle)

    smoothed_body_distance_tr -= np.nanmin(smoothed_body_distance_tr)
    smoothed_body_distance_tr = (
        np.nanmax(smoothed_heading_angle) - np.nanmin(smoothed_heading_angle)
    ) * smoothed_body_distance_tr / np.nanmax(smoothed_body_distance_tr)
    smoothed_body_distance_tr += np.nanmin(smoothed_heading_angle)

    # calculate the difference betweeen the body distance at each frame and the heading angle 10 frames before
    n = 10
    running_body_distance_tr_difference = np.abs(
        smoothed_body_distance_tr - np.roll(smoothed_body_distance_tr, -n))
    running_body_distance_tr_difference[-n:] = 0
    running_body_distance_tr_difference = np.nan_to_num(
        running_body_distance_tr_difference)

    # extract points where the difference is greater than the threshold
    threshold = 0.2
    body_distance_tr_difference_above_threshold = (
        running_body_distance_tr_difference >= threshold)

    # smooth this array
    smoothing_window_width = 20
    normpdf = scipy.stats.norm.pdf(
        range(-int(smoothing_window_width / 2),
              int(smoothing_window_width / 2)), 0, 3)
    body_distance_tr_difference_above_threshold[
        int(smoothing_window_width / 2):-int(smoothing_window_width / 2) +
        1] = np.convolve(body_distance_tr_difference_above_threshold,
                         normpdf / np.sum(normpdf),
                         mode='valid')
    body_distance_tr_difference_above_threshold = body_distance_tr_difference_above_threshold.astype(
        int)

    # ---- Do the same for the distance from the center of the video ---- #

    # get the distance from the x-y position
    body_distance_c = np.sqrt((body_position[:, 0] - 512)**2 +
                              (body_position[:, 1] - 640)**2)
    smoothed_body_distance_c = np.sqrt((smoothed_body_position[:, 0] -
                                        512)**2 +
                                       (smoothed_body_position[:, 1] - 640)**2)

    # scale so that it's in the same range as the heading angle array
    body_distance_c -= np.nanmin(body_distance_c)
    body_distance_c = (np.nanmax(heading_angle) - np.nanmin(heading_angle)
                       ) * body_distance_c / np.nanmax(body_distance_c)
    body_distance_c += np.nanmin(heading_angle)

    smoothed_body_distance_c -= np.nanmin(smoothed_body_distance_c)
    smoothed_body_distance_c = (
        np.nanmax(smoothed_heading_angle) - np.nanmin(smoothed_heading_angle)
    ) * smoothed_body_distance_c / np.nanmax(smoothed_body_distance_c)
    smoothed_body_distance_c += np.nanmin(smoothed_heading_angle)

    # calculate the difference betweeen the body distance at each frame and the heading angle 10 frames before
    n = 10
    running_body_distance_c_difference = np.abs(
        smoothed_body_distance_c - np.roll(smoothed_body_distance_c, -n))
    running_body_distance_c_difference[-n:] = 0
    running_body_distance_c_difference = np.nan_to_num(
        running_body_distance_c_difference)

    # extract points where the difference is greater than the threshold
    threshold = 0.2
    body_distance_c_difference_above_threshold = (
        running_body_distance_c_difference >= threshold)

    # smooth this array
    smoothing_window_width = 20
    normpdf = scipy.stats.norm.pdf(
        range(-int(smoothing_window_width / 2),
              int(smoothing_window_width / 2)), 0, 3)
    body_distance_c_difference_above_threshold[
        int(smoothing_window_width / 2):-int(smoothing_window_width / 2) +
        1] = np.convolve(body_distance_c_difference_above_threshold,
                         normpdf / np.sum(normpdf),
                         mode='valid')
    body_distance_c_difference_above_threshold = body_distance_c_difference_above_threshold.astype(
        int)

    # -------------------------------------------------- #

    # combine bouts obtained by looking at the body position with those obtained by looking at the heading angle
    combined_difference_above_threshold = np.logical_or(
        heading_angle_difference_above_threshold,
        body_distance_tl_difference_above_threshold).astype(int)
    combined_difference_above_threshold = np.logical_or(
        combined_difference_above_threshold,
        body_distance_br_difference_above_threshold).astype(int)
    combined_difference_above_threshold = np.logical_or(
        combined_difference_above_threshold,
        body_distance_bl_difference_above_threshold).astype(int)
    combined_difference_above_threshold = np.logical_or(
        combined_difference_above_threshold,
        body_distance_tr_difference_above_threshold).astype(int)
    combined_difference_above_threshold = np.logical_or(
        combined_difference_above_threshold,
        body_distance_c_difference_above_threshold).astype(int)

    # get the frame numbers of the start & end of all the bouts
    combined_difference_above_threshold_greater_than_0 = (
        combined_difference_above_threshold > 0).astype(int)
    above_threshold_difference = combined_difference_above_threshold_greater_than_0 - np.roll(
        combined_difference_above_threshold_greater_than_0, -1)
    above_threshold_difference[-1] = 0
    # print(above_threshold_difference.shape)
    bout_start_frames = np.nonzero(above_threshold_difference == -1)[0] + 1
    bout_end_frames = np.nonzero(above_threshold_difference == 1)[0] - 1
    #
    # print(bout_end_frames)

    # if a bout starts at frame 0, add the start to bout_start_frames
    if combined_difference_above_threshold[0] > 0:
        bout_start_frames = np.concatenate([np.array([1]), bout_start_frames])

    # get total number of bouts
    n_bouts = len(bout_start_frames)
    print("Number of bouts: {}.".format(n_bouts))

    # print(n_frames)

    # create array containing the bout number for each frame
    # we set it to -1 when a frame is not in a bout
    bout_number_frames = np.zeros(n_frames) - 1
    for i in range(n_bouts):
        bout_number_frames[bout_start_frames[i]:bout_end_frames[i]] = i

    # initialize variable to calcualate the mean bout length in milliseconds
    mean_bout_length = 0

    n_non_circular_grating_bouts = 0

    # determine, for each bout, the heading angle and position at the start and end
    # bout_results is a list of 9 lists, one for each type of stimulus
    bout_results = [[] for i in range(9)]
    for i in range(n_bouts):
        # get the stim id, frame where it starts and frame when it ends
        stim_id = stim_id_frames[bout_start_frames[i]]
        start_frame = bout_start_frames[i]
        end_frame = bout_end_frames[i]

        if stim_id != 0:
            # add to the mean bout length variable
            mean_bout_length += frame_milliseconds[
                end_frame + 1] - frame_milliseconds[start_frame]
            n_non_circular_grating_bouts += 1

        # print("Bout {} starts at frame {} and ends at frame {}.".format(i, start_frame, end_frame))

        # save the heading angle & position at the start & end of the bout, and the video name
        results = {
            'heading_angle_start':
            heading_angle[start_frame],
            'heading_angle_end':
            heading_angle[end_frame],
            'position_start':
            (body_position[start_frame, 0], body_position[start_frame, 1]),
            'position_end': (body_position[end_frame,
                                           0], body_position[end_frame, 1]),
            'video':
            video_name
        }

        # add to the bout_results list
        bout_results[stim_id].append(results)

    if n_non_circular_grating_bouts > 0:
        # get the mean bout length
        mean_bout_length /= n_non_circular_grating_bouts
    else:
        mean_bout_length = 0
    print("Mean bout length is {} ms.".format(mean_bout_length))

    # print(n_frames)

    # determine, for each type of stimulus, the heading angle and position at the start and end
    # stim_results is a list of 9 lists, one for each type of stimulus
    stim_results = [[] for i in range(9)]
    for i in range(n_stim_switches):
        # get the stim id, frame where it starts and frame when it ends
        stim_id = stim_ids[i]
        start_frame = stim_switch_frame_nums[i]
        if i < n_stim_switches - 1:
            end_frame = stim_switch_frame_nums[i + 1] - 1
        else:
            end_frame = n_frames - 2

        # print(n_frames)

        # print("Stimulus {} starts at frame {} and ends at frame {}.".format(i, start_frame, end_frame))

        # save the heading angle & position at the start & end of the bout, and the video name
        results = {
            'heading_angle_start':
            heading_angle[start_frame],
            'heading_angle_end':
            heading_angle[end_frame],
            'position_start':
            (body_position[start_frame, 0], body_position[start_frame, 1]),
            'position_end': (body_position[end_frame,
                                           0], body_position[end_frame, 1]),
            'video':
            video_name
        }

        # add to the stim_results list
        stim_results[stim_id].append(results)

    if plot:
        # plot results
        print(frame_milliseconds.shape)
        print(heading_angle.shape)
        fig, ax = plt.subplots()
        # ax.plot((frame_milliseconds[:heading_angle.shape[0]] - frame_milliseconds[0])/1000, heading_angle[:frame_milliseconds.shape[0]]*180/np.pi, 'black', lw=1)

        ax.plot(heading_angle[:-1], 'black', lw=1)
        ax.plot(body_distance_c[:-1], 'purple', lw=1)
        ax.fill_between(np.arange(len(body_distance_tl)),
                        np.amin(np.nan_to_num(body_distance_tl)),
                        np.amax(np.nan_to_num(body_distance_tl)),
                        where=combined_difference_above_threshold.astype(bool),
                        facecolor='black',
                        alpha=0.2)

        colors = [
            'red', 'orange', 'yellow', 'green', 'blue', 'brown', 'black',
            'cyan', 'magenta'
        ]
        stims = [
            'Circular Grating', 'Left Grating', 'Right Grating', 'Left Dot',
            'Right Dot', 'Left Looming', 'Right Looming', 'White', 'Black'
        ]
        for i in range(n_stim_switches):
            stim_active = stim_num_frames == i
            if i < n_stim_switches - 1:
                stim_active[stim_switch_frame_nums[i + 1]] = 1
            else:
                stim_active[-1] = 1
            ax.fill_between(np.arange(len(stim_id_frames)),
                            np.amin(np.nan_to_num(heading_angle)),
                            np.amax(np.nan_to_num(heading_angle)),
                            where=stim_active.astype(bool),
                            facecolor=colors[stim_ids[i]],
                            alpha=0.2)
            ax.text(stim_switch_frame_nums[i] + 10,
                    0,
                    stims[stim_ids[i]],
                    fontsize=8,
                    alpha=0.5)
        plt.show()

    return bout_results, stim_results, mean_bout_length