Ejemplo n.º 1
0
def estimate_minimal_resolution(start, end):
    """Estimates the minimal time increment needed to fully capture the data.
    
    Args :  start (arr) = the starting timepoints for the behavioral bouts
            end (arr) = the ending timepoints for the behavioral bouts

    Returns : minimal_resolution (float) = the starting timepoints for the behavioral bouts
    """

    minimal_resolution = 1 / min(end - start)

    utils.print_in_color(
        "The smallest behavioral bout is {0}s long".format(
            1 / minimal_resolution), "GREEN")

    return minimal_resolution
Ejemplo n.º 2
0
def merge_neighboring_bouts(position_bouts, **kwargs):
    """Algorithm that merges behavioral bouts that are close together.
    
    Args :  position_bouts (arr) = list of start and end of each behavioral bout
            kwargs (dict) = dictionnary with additional parameters

    Returns : position_bouts_merged (list) = list of merged start and end of each behavioral bout
              length_bouts_merged (list) = list of the length of each merged behavioral bout
    """

    position_bouts_merged = []
    length_bouts_merged = []

    merged = False
    for i in np.arange(0, len(position_bouts) - 1, 1):
        if not merged:
            cur_bout = position_bouts[i]

        next_bout = position_bouts[i + 1]
        if next_bout[0] - cur_bout[1] <= kwargs["peak_merging_distance"]:
            merged = True
            cur_bout = (cur_bout[0], next_bout[1])
            if i == len(position_bouts) - 2:
                position_bouts_merged.append(cur_bout)
                length_bouts_merged.append(cur_bout[1] - cur_bout[0])
        elif next_bout[0] - cur_bout[1] > kwargs["peak_merging_distance"]:
            merged = False
            position_bouts_merged.append(cur_bout)
            length_bouts_merged.append(cur_bout[1] - cur_bout[0])

            if i == len(position_bouts) - 2:
                position_bouts_merged.append(next_bout)
                length_bouts_merged.append(next_bout[1] - next_bout[0])

    print("\n")
    utils.print_in_color(
        "Merged neighboring peaks that were closer than {0}s away".format(
            kwargs["peak_merging_distance"]), "GREEN")

    return position_bouts_merged, length_bouts_merged
Ejemplo n.º 3
0
def extract_manual_bouts(start, end, **kwargs):
    """Extracts the time and length of each behavioral bout.
    
    Args :  start (arr) = the starting timepoints for the behavioral bouts
            end (arr) = the ending timepoints for the behavioral bouts
            kwargs (dict) = dictionnary with additional parameters

    Returns : position_bouts (list) = list of start and end of each behavioral bout
              length_bouts (list) = list of the length of each behavioral bout
    """
    position_bouts = []
    length_bouts = []
    for s, e in zip(start, end):
        position_bouts.append((s, e))
        length_bouts.append(e - s)

    print("\n")
    utils.print_in_color(
        "Behavioral data extracted. Behavior = {0}".format(
            kwargs["behavior_to_segment"]), "GREEN")

    return position_bouts, length_bouts
Ejemplo n.º 4
0
def create_bool_map(start, end, **kwargs):
    """Creates a boolean map of the time during which the animal performs a given behavior.
    
    Args :  start (arr) = the starting timepoints for the behavioral bouts
            end (arr) = the ending timepoints for the behavioral bouts

    Returns : bool_map (arr) = the boolean map of the time during which the animal performs the behavior
    """

    bool_map = []

    for s, e in zip(start, end):
        for i in np.arange(
                len(bool_map) / kwargs["recording_sampling_rate"], s,
                1 / kwargs["recording_sampling_rate"]):
            bool_map.append(False)  # FIXME: extend not append
        for i in np.arange(s, e, 1 / kwargs["recording_sampling_rate"]):
            bool_map.append(True)

    for i in np.arange(i + (1 / kwargs["recording_sampling_rate"]),
                       round(kwargs["video_end"]),
                       1 / kwargs["recording_sampling_rate"]):
        bool_map.append(False)

    if i != round(kwargs["video_end"]):
        if not bool_map[-1]:
            bool_map.append(False)
        else:
            bool_map.append(True)

    print("\n")
    utils.print_in_color(
        "Behavioral data extracted. Behavior = {0}".format(
            kwargs["behavior_to_segment"]), "GREEN")

    return bool_map
Ejemplo n.º 5
0
def get_recording_duration_and_sampling_rate(file, allow_downsampling=True):
    """Function that takes a csv file as an input, extract the time data from it,
    computes an estimate of the sampling rate and returns it
    
    Args :      file (str) = the path to the csv file
    
    Returns :   sr (float) = The estimate sampling rate of the photometry system
    """
    if not os.path.exists(file):
        raise RuntimeError("{0} file doesn't exist !".format(file))

    photometry_sheet = pd.read_csv(file, header=1,
                                   usecols=[0])  # Load the data
    photometry_data_npy = photometry_sheet.to_numpy(
    )  # Convert to numpy for speed
    photometry_data_transposed = np.transpose(
        photometry_data_npy)  # Transpose data

    x = np.array(photometry_data_transposed[0])  # time data

    sr = round(len(x) / x[-1])

    if sr >= 250:
        print("\n")
        utils.print_in_color(
            "The sampling rate of the recording is pretty high : {0}. "
            "We suggest to downsample the data using the pp.down_sample_signal function (250Hz)"
            .format(sr), "RED")

        if allow_downsampling:
            factor = int(sr / 250)
            sr = sr / factor
            utils.print_in_color(
                "Downsampling was enabled by user. New sampling rate of data : {0}Hz"
                .format(sr), "GREEN")
        else:
            factor = None
    else:
        factor = None

    utils.print_in_color(
        "Lenght of recording : {0}s, estimated sampling rate of the system : {1}"
        .format(round(x[-1]), sr), "GREEN")
    return round(x[-1]), sr, factor
def set_parameters(files, allow_downsampling=True):
    photometry_file_csv, video_file, behavior_automatic_file, behavior_manual_file, saving_directory = files  # Assigning new variables for each file

    recording_duration, recording_sampling_rate, downsampling_factor = io.get_recording_duration_and_sampling_rate(
        photometry_file_csv, allow_downsampling=allow_downsampling
    )  #Get metadata from the photometry file

    general_args = {
        "recording_sampling_rate":
        recording_sampling_rate,  # Sampling rate of the photometry system
        "recording_duration":
        recording_duration,  # The time of recording according to the photometry dataset (s)
        "smoothing_window": int(recording_sampling_rate
                                ),  # The window used to smooth the raw signal
        "moving_average_window": int(recording_sampling_rate) *
        60,  # The window used to estimate the baseline of each signal
        "cropping_window":
        1,  # The time to crop at the begining and the end of the video int(recording_sampling_rate)*30
        "down_sampling_factor_photometry": downsampling_factor,
        "lambda": 10**
        11,  # Lambda parameter for the asymmetric least squares smoothing algorithm
        "p":
        0.01,  # Pparameter for the asymmetric least squares smoothing algorithm
    }

    if video_file is not None:
        general_args[
            "video"] = True  # Bool. True if a video is to be analyzed, otherwise False
        video_duration, video_sampling_rate = io.get_video_duration_and_framerate(
            video_file)  # Get metadata from the video file
        general_args[
            "video_duration"] = video_duration  # The duration of the video (s)
        general_args[
            "video_start"] = 0  # The start of the video (s). Change this value if you would like to skip some time at the beginning
        general_args[
            "video_end"] = video_duration  # The end of the video (s). Change this value if you would like to skip some time at the end
        general_args[
            "video_sampling_rate"] = video_sampling_rate  # The framerate of the video
    else:
        general_args["video"] = False  # If no video
        general_args["video_sampling_rate"] = 0  # The framerate of the video

    plot_args = {
        "photometry_pp": {
            "plots_to_display": {
                "raw_data": False,
                "smoothing": False,
                "baseline_determination": False,
                "baseline_correction": False,
                "standardization": False,
                "inter-channel_regression": False,
                "channel_alignement": False,
                "dFF": True,
            },
            "regression":
            "Lasso",  #The algorithm to use for the inter-channel regression step
            "standardize":
            True,  #If True = Standardizes the signals, otherwise skips this step
            "multicursor":
            True,  #If True = Displays multicursor on plots, otherwise not (Doesn't work properly)
            "purple_laser": "#8200c8",  #Hex color of the 405nm laser trace
            "blue_laser": "#0092ff",  #Hex color of the 465nm laser trace
        },
        "peri_event": {
            "normalize_heatmap":
            False,  #If True, normalizes the heatmap of the peri-event plot
            "graph_distance_pre":
            10,  #Graph distance (distance before the event started (s))
            "graph_distance_post":
            10,  #Graph distance (distance after the event started (s))
            "graph_auc_pre":
            2,  #Distance used to compute the area under the curve before the event started (s)
            "graph_auc_post":
            2,  #Distance used to compute the area under the curve after the event started (s)
            "resample_graph": recording_sampling_rate,
            "resample_heatmap": recording_sampling_rate / 100,
            "style":
            "individual",  #The style of the peri event plot. "individual" overlays each individual trace with the average. "average" only displays the average.
            "individual_color":
            False,  #This parameters asigns a different color to each individual trace (if style = "individual"), otherwise every trace is gray
        },
        "video_photometry": {
            "display_threshold": int(5 * general_args["video_sampling_rate"]),
            "plot_acceleration": general_args["recording_sampling_rate"],
            "global_acceleration": 5,
            "resize_video": 1.5,
            "live_plot_fps": 10.,
        },
        "lw": 1.,  #linewidth of in the plots
        "fsl": 6.,  #fontsize of labels in the plot
        "fst": 8.,  #fontsize of titles in the plot
        "save": True,
        "save_dir": saving_directory,  #Direcotry were the plots will be saved
        "extension": "png",  #Extension of the saved plots   
    }

    behavioral_segmentation_args = {
        "peak_merging_distance": 7.,  #Peak Merging Distance
        "minimal_bout_length": 0,  #Minimal bout length (s)
    }

    args = {**general_args, **behavioral_segmentation_args, **plot_args}

    print("\n")

    utils.print_in_color("Parameters loaded successfully", "GREEN")

    print("\n")

    utils.print_in_color(
        """If you like to change some of the parameters you can directly modify them in
        the 'parameters.py' file or change them by calling : 'args['arg'] = new_value' with
        arg corresponding to the argument you desire to change, and new_value the new value
        of the argument in question", 'GREEN'""")

    return args
Ejemplo n.º 7
0
def peri_event_bar_plot(data_around_major_bouts, **kwargs):
    """Function that compares the area under the curve (AUC) before and after the intitation of the behavior.
    The results are summarized in a bar plot showing the AUC before and after initiation of the behavior.
    
    Args :  data_around_major_bouts (arr) = list of the pre-processed photometry data
            kwargs (dict) = dictionnary with additional parameters
    """

    time_before = np.linspace(
        0, kwargs["peri_event"]["graph_auc_pre"],
        kwargs["peri_event"]["graph_auc_pre"] *
        kwargs["recording_sampling_rate"])
    data_before = data_around_major_bouts[:, 0:kwargs["peri_event"]
                                          ["graph_auc_pre"] *
                                          kwargs["recording_sampling_rate"]]

    time_after = np.linspace(
        0, kwargs["peri_event"]["graph_auc_post"],
        kwargs["peri_event"]["graph_auc_post"] *
        kwargs["recording_sampling_rate"])
    data_after = data_around_major_bouts[:, (
        kwargs["peri_event"]["graph_auc_pre"] +
        1) * kwargs["recording_sampling_rate"]:(
            kwargs["peri_event"]["graph_auc_pre"] + 1 +
            kwargs["peri_event"]["graph_auc_post"]) *
                                         kwargs["recording_sampling_rate"]]

    all_AUC1 = [auc(time_before, i) for i in data_before]
    AUC1_mean = auc(time_before, np.mean(data_before, axis=0))
    AUC1_std = auc(time_before, np.std(data_before, axis=0))

    all_AUC2 = [auc(time_after, i) for i in data_after]
    AUC2_mean = auc(time_after, np.mean(data_after, axis=0))
    AUC2_std = auc(time_after, np.std(data_after, axis=0))

    AUC_mean = [AUC1_mean, AUC2_mean]
    AUC_std = [AUC1_std, AUC2_std]

    stat, pval = stats.ttest_ind(all_AUC1, all_AUC2, equal_var=False)

    fig = plt.figure(figsize=(5, 5), dpi=200.)
    ax0 = plt.subplot(1, 1, 1)

    plot = ax0.bar(
        np.arange(len(AUC_mean)),
        AUC_mean,
        yerr=AUC_std,
        error_kw={
            "elinewidth": 1,
            "solid_capstyle": "projecting",
            "capsize": 5,
            "capthick": 1,
        },
        color=("#81dafc", "#ff6961"),
        edgecolor="black",
        linewidth=1.,
        alpha=0.8,
        # zorder=1,
    )

    scat = sns.swarmplot(
        np.concatenate([np.full_like(all_AUC1, 0),
                        np.full_like(all_AUC2, 1)]),
        np.concatenate([all_AUC1, all_AUC2]),
        ax=ax0,
        s=10,
        palette=("#81dafc", "#ff6961"),
        edgecolor="black",
        linewidth=1.,
        alpha=0.8,
        # zorder = 0,
    )

    y_max = max([max(all_AUC1), max(all_AUC2)
                 ]) + max([max(all_AUC1), max(all_AUC2)]) * 0.1
    y_min = min([min(all_AUC1), min(all_AUC2)
                 ]) + min([min(all_AUC1), min(all_AUC2)]) * 0.1
    ax0.plot((0, 0), (y_max + y_max * 0.05, y_max + y_max * 0.1),
             lw=1,
             color="black")
    ax0.plot((1, 1), (y_max + y_max * 0.05, y_max + y_max * 0.1),
             lw=1,
             color="black")
    ax0.plot((0, 1), (y_max + y_max * 0.1, y_max + y_max * 0.1),
             lw=1,
             color="black")

    utils.print_in_color(
        "The comparison of AUC for before and after the behavioral initiation is : {0}"
        .format(pval), "GREEN")

    if pval > 0.05 and pval >= 0.01:
        ax0.text(0.5, y_max + y_max * 0.1, "n.s")
    elif 0.05 >= pval > 0.01:
        ax0.text(0.5, y_max + y_max * 0.1, "*")
    elif 0.01 >= pval > 0.001:
        ax0.text(0.5, y_max + y_max * 0.1, "**")
    elif 0.001 >= pval > 0.0001:
        ax0.text(0.5, y_max + y_max * 0.1, "***")
    elif pval <= 0.0001:
        ax0.text(0.5, y_max + y_max * 0.1, "****")

    ax0.set_yticks(np.arange(round(y_min), round(y_max + y_max * 0.3) + 2, 1))
    ax0.set_yticklabels(np.arange(round(y_min),
                                  round(y_max + y_max * 0.3) + 2, 1),
                        fontsize=kwargs["fsl"])
    ax0.plot((-0.5, 1.5), (0, 0), lw=1, color="black")
    ax0.set_xticks([0, 1])
    ax0.set_xticklabels([
        "Before initiating {0}".format(kwargs["behavior_to_segment"]),
        "After initiating {0}".format(kwargs["behavior_to_segment"])
    ],
                        fontsize=kwargs["fsl"])

    ax0.set_ylim(y_min + y_min * 0.5, y_max + y_max * 0.5)
    ax0.set_ylabel("Area under the curve (AUC)", fontsize=kwargs["fsl"])
    ax0.set_title("Before vs After initiation of {0} Response Changes".format(
        kwargs["behavior_to_segment"]),
                  fontsize=kwargs["fst"])

    fig.tight_layout()

    if kwargs["save"]:
        plt.savefig(os.path.join(kwargs["save_dir"],
                                 "AUC.{0}".format(kwargs["extension"])),
                    dpi=200.,
                    bbox_inches='tight')
def standardization(x, isosbestic, calcium, **kwargs):
    """Function that performs the standardization of the corrected 
    signals and displays it in a plot.
    
    Args :      x (arr) = The time data in X
                isosbestic (arr) = The baseline corrected isosbestic signal
                calcium (arr) = The baseline corrected calcium signal
                kwargs (dict) = Dictionnary with the parameters

    Returns :   isosbestic_standardized (arr) = The standardized isosbestic signal
                calcium_standardized (arr) = The standardized calcium signal
    """
    
    if kwargs["photometry_pp"]["standardize"]:
        print("\nStarting standardization for Isosbestic and Calcium signals !")
    
        isosbestic_standardized = (isosbestic - np.median(isosbestic)) / np.std(isosbestic)  # standardization correction for isosbestic
        calcium_standardized = (calcium - np.median(calcium)) / np.std(calcium)  # standardization for calcium
        
        x_max = x[-1]
    
        if kwargs["photometry_pp"]["plots_to_display"]["standardization"]:
            xticks, xticklabels, unit = utils.generate_xticks_and_labels(x_max)
                    
            fig = plt.figure(figsize=(10, 5), dpi=200.)
            ax0 = plt.subplot(211)
            p, = ax0.plot(x, isosbestic_standardized, alpha=0.8, c=kwargs["photometry_pp"]["purple_laser"], lw=kwargs["lw"])
            ax0.plot((0, x[-1]), (0, 0), "--", color="black", lw=kwargs["lw"]) #Creates a horizontal dashed line at y = 0 to signal the baseline
            ax0.set_xticks(xticks)
            ax0.set_xticklabels(xticklabels, fontsize=kwargs["fsl"])
            ax0.set_xlim(0, x_max)
            y_min, y_max, round_factor = utils.generate_yticks(isosbestic_standardized, 0.1)
            ax0.set_yticks(np.arange(y_min, y_max+round_factor, round_factor))
            ax0.set_yticklabels(["{:.0f}".format(i) for i in np.arange(y_min, y_max+round_factor, round_factor)], fontsize=kwargs["fsl"])
            ax0.set_ylim(y_min, y_max)
            ax0.set_ylabel("z-score", fontsize=kwargs["fsl"])
            ax0.legend(handles=[p], labels=["isosbestic"], loc=2, fontsize=kwargs["fsl"])
            ax0.set_title("Standardization of Isosbestic and Calcium signals", fontsize=kwargs["fst"])
            ax0.tick_params(axis='both', which='major', labelsize=kwargs["fsl"])
            
            ax1 = plt.subplot(212, sharex=ax0)
            b, = ax1.plot(x, calcium_standardized, alpha=0.8, c=kwargs["photometry_pp"]["blue_laser"], lw=kwargs["lw"])
            ax1.plot((0, x[-1]), (0, 0), "--", color="black", lw=kwargs["lw"]) #Creates a horizontal dashed line at y = 0 to signal the baseline
            ax1.set_xticks(xticks)
            ax1.set_xticklabels(xticklabels, fontsize=kwargs["fsl"])
            ax1.set_xlim(0, x_max)
            y_min, y_max, round_factor = utils.generate_yticks(calcium_standardized, 0.1)
            ax1.set_yticks(np.arange(y_min, y_max+round_factor, round_factor))
            ax1.set_yticklabels(["{:.0f}".format(i) for i in np.arange(y_min, y_max+round_factor, round_factor)], fontsize=kwargs["fsl"])
            ax1.set_ylim(y_min, y_max)
            ax1.set_ylabel("z-score", fontsize=kwargs["fsl"])
            ax1.legend(handles=[b], labels=["calcium"], loc=2, fontsize=kwargs["fsl"])
            ax1.set_xlabel("Time ({0})".format(unit), fontsize=kwargs["fsl"])
            ax1.tick_params(axis='both', which='major', labelsize=kwargs["fsl"])
            
            plt.tight_layout()
            
            if kwargs["photometry_pp"]["multicursor"]:
                multi = MultiCursor(fig.canvas, [ax0, ax1], color='r', lw=1, vertOn=[ax0, ax1])  # FIXME: unused
            if kwargs["save"]:
                plt.savefig(os.path.join(kwargs["save_dir"], "Standardization.{0}".format(kwargs["extension"])), dpi=200.)
    else:
        utils.print_in_color("\nThe standardization step in skipped."
                             " Parameter plot_args['photometry_pp']['standardize'] == False", "RED")
        isosbestic_standardized = isosbestic  # standardization skipped
        calcium_standardized = calcium  # standardization skipped
        
    return isosbestic_standardized, calcium_standardized