def gabor_Uori_decoding_sess123(sessions,
                                analyspar,
                                sesspar,
                                stimpar,
                                logregpar,
                                permpar,
                                figpar,
                                seed=None,
                                parallel=False):
    """
    gabor_Uori_decoding_sess123(sessions, analyspar, sesspar, stimpar, 
                                logregpar, permpar, figpar)

    Runs decoding analyses (U orientations).
        
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list): 
            Session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - sesspar (SessPar): 
            named tuple containing session parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - logregpar (LogRegPar): 
            named tuple containing logistic regression parameters
        - permpar (PermPar): 
            named tuple containing permutation parameters
        - figpar (dict): 
            dictionary containing figure parameters
    
    Optional args:
        - seed (int): 
            seed value to use. (-1 treated as None)
            default: None
        - parallel (bool): 
            if True, some of the analysis is run in parallel across CPU cores 
            default: False
    """

    if logregpar.comp != "Uori":
        raise ValueError("logregpar.comp should be Uori.")

    # ctrl doesn't apply to U orientation decoding
    logregpar = sess_ntuple_util.get_modif_ntuple(logregpar, "ctrl", False)

    gab_ori = sess_gen_util.filter_gab_oris("U", stimpar.gab_ori)
    stimpar = sess_ntuple_util.get_modif_ntuple(stimpar, "gab_ori", gab_ori)

    gabor_decoding_sess123(sessions,
                           analyspar=analyspar,
                           sesspar=sesspar,
                           stimpar=stimpar,
                           logregpar=logregpar,
                           permpar=permpar,
                           figpar=figpar,
                           seed=seed,
                           parallel=parallel)
Ejemplo n.º 2
0
def set_multcomp(permpar, sess_df, CIs=True, pairs=True, factor=1):
    """
    set_multcomp(permpar)

    Returns permpar updated with the number of comparisons computed from the 
    sessions.

    Required args:
        - permpar (PermPar): 
            named tuple containing permutation parameters
        - sess_df (pd.DataFrame):
            dataframe containing session information, including the following 
            keys: "sess_ns", "lines", "planes"
    
    Optional args:
        - CIs (bool):
            include comparisons to CIs comparisons
            default: True
        - pairs (bool):
            include paired comparisons
            default: True
        - factor (int): 
            additional factor by which to multiply the number of comparisons
            default: 1

    Returns:
        - permpar (PermPar): 
            updated permutation parameter named tuple
    """

    if isinstance(permpar, dict):
        permpar = sess_ntuple_util.init_permpar(**permpar)

    n_comps = 0    
    for _, sess_df_grp in sess_df.groupby(["lines", "planes"]):
        n_sess = len(sess_df_grp)

        # sessions compared to CIs
        if CIs:
            n_comps += n_sess

        # session pair comparisons
        if pairs:
            k = 2
            if n_sess >= k:
                fact = np.math.factorial
                n_comps += fact(n_sess) / (fact(k) * fact(n_sess - k))

    # multiplied by specified factor
    n_comps *= factor

    permpar = sess_ntuple_util.get_modif_ntuple(
            permpar, "multcomp", int(n_comps)
        )

    return permpar
Ejemplo n.º 3
0
def set_multcomp(permpar, sessions, analyspar, consec_only=True, factor=1):
    """
    set_multcomp(permpar, sessions, analyspar)

    Returns permpar updated with the number of comparisons computed from the 
    sessions.

    Required args:
        - permpar (PermPar or dict): 
            named tuple containing permutation parameters
        - sessions (list):
            Session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters

    Optional args:
        - consec_only (bool):
            if True, only consecutive session numbers are correlated
            default: True
        - factor (int):
            multiplicative factor
            default: 1

    Returns:
        - permpar (PermPar):
            updated permutation parameter named tuple
    """
    
    sess_df = misc_analys.get_check_sess_df(sessions, analyspar=analyspar)

    n_comps = 0
    for _, lp_df in sess_df.groupby(["lines", "planes"]):
        corr_ns = get_corr_pairs(lp_df, consec_only=consec_only)
        n_comps += len(corr_ns)
    
    n_comps = n_comps * factor

    permpar = sess_ntuple_util.get_modif_ntuple(permpar, "multcomp", n_comps)

    return permpar
Ejemplo n.º 4
0
def run_mag_change(sessions,
                   analysis,
                   seed,
                   analyspar,
                   sesspar,
                   stimpar,
                   permpar,
                   quantpar,
                   figpar,
                   datatype="roi"):
    """
    run_mag_change(sessions, analysis, seed, analyspar, sesspar, stimpar, 
                   permpar, quantpar, figpar)

    Calculates and plots the magnitude of change in activity of ROIs between 
    the first and last quantile for expected vs unexpected sequences.
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list)      : list of Session objects
        - analysis (str)       : analysis type (e.g., "m")
        - seed (int)           : seed value to use. (-1 treated as None) 
        - analyspar (AnalysPar): named tuple containing analysis parameters
        - sesspar (SessPar)    : named tuple containing session parameters
        - stimpar (StimPar)    : named tuple containing stimulus parameters
        - permpar (PermPar)    : named tuple containing permutation parameters
        - quantpar (QuantPar)  : named tuple containing quantile analysis 
                                 parameters
        - figpar (dict)        : dictionary containing figure parameters   

    Optional args:
        - datatype (str): type of data (e.g., "roi", "run") 
    """

    sessstr_pr = sess_str_util.sess_par_str(sesspar.sess_n, stimpar.stimtype,
                                            sesspar.plane, stimpar.visflow_dir,
                                            stimpar.visflow_size, stimpar.gabk,
                                            "print")
    dendstr_pr = sess_str_util.dend_par_str(analyspar.dend, sesspar.plane,
                                            datatype, "print")

    datastr = sess_str_util.datatype_par_str(datatype)

    logger.info(
        f"Calculating and plotting the magnitude changes in {datastr} "
        f"activity across quantiles \n({sessstr_pr}{dendstr_pr}).",
        extra={"spacing": "\n"})

    if permpar.multcomp:
        permpar = sess_ntuple_util.get_modif_ntuple(permpar, "multcomp",
                                                    len(sessions))

    # get full data: session x unexp x quants of interest x [ROI x seq]
    integ_info = quant_analys.trace_stats_by_qu_sess(sessions,
                                                     analyspar,
                                                     stimpar,
                                                     quantpar.n_quants,
                                                     quantpar.qu_idx,
                                                     by_exp=True,
                                                     integ=True,
                                                     ret_arr=True,
                                                     datatype=datatype)
    all_counts = integ_info[-2]
    qu_data = integ_info[-1]

    # extract session info
    mouse_ns = [sess.mouse_n for sess in sessions]
    lines = [sess.line for sess in sessions]

    if analyspar.rem_bad:
        nanpol = None
    else:
        nanpol = "omit"

    seed = rand_util.seed_all(seed, "cpu", log_seed=False)

    mags = quant_analys.qu_mags(qu_data,
                                permpar,
                                mouse_ns,
                                lines,
                                analyspar.stats,
                                analyspar.error,
                                nanpol=nanpol,
                                op_qu="diff",
                                op_unexp="diff")

    # convert mags items to list
    mags = copy.deepcopy(mags)
    mags["all_counts"] = all_counts
    for key in ["mag_st", "L2", "mag_rel_th", "L2_rel_th"]:
        mags[key] = mags[key].tolist()

    sess_info = sess_gen_util.get_sess_info(sessions,
                                            analyspar.fluor,
                                            incl_roi=(datatype == "roi"),
                                            rem_bad=analyspar.rem_bad)

    extrapar = {"analysis": analysis, "datatype": datatype, "seed": seed}

    info = {
        "analyspar": analyspar._asdict(),
        "sesspar": sesspar._asdict(),
        "stimpar": stimpar._asdict(),
        "extrapar": extrapar,
        "permpar": permpar._asdict(),
        "quantpar": quantpar._asdict(),
        "mags": mags,
        "sess_info": sess_info
    }

    fulldir, savename = gen_plots.plot_mag_change(figpar=figpar, **info)

    file_util.saveinfo(info, savename, fulldir, "json")
Ejemplo n.º 5
0
def run_traces_by_qu_lock_sess(sessions,
                               analysis,
                               seed,
                               analyspar,
                               sesspar,
                               stimpar,
                               quantpar,
                               figpar,
                               datatype="roi"):
    """
    run_traces_by_qu_lock_sess(sessions, analysis, analyspar, sesspar, 
                               stimpar, quantpar, figpar)

    Retrieves trace statistics by session x quantile at the transition of
    expected to unexpected sequences (or v.v.) and plots traces across ROIs by 
    quantile with each session in a separate subplot.
    
    Also runs analysis for one quantile (full data) with different unexpected 
    lengths grouped separated 
    
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list)      : list of Session objects
        - analysis (str)       : analysis type (e.g., "l")
        - seed (int)           : seed value to use. (-1 treated as None)
        - analyspar (AnalysPar): named tuple containing analysis parameters
        - sesspar (SessPar)    : named tuple containing session parameters
        - stimpar (StimPar)    : named tuple containing stimulus parameters
        - quantpar (QuantPar)  : named tuple containing quantile analysis 
                                 parameters
        - figpar (dict)        : dictionary containing figure parameters
    
    Optional args:
        - datatype (str): type of data (e.g., "roi", "run")

    """

    sessstr_pr = sess_str_util.sess_par_str(sesspar.sess_n, stimpar.stimtype,
                                            sesspar.plane, stimpar.visflow_dir,
                                            stimpar.visflow_size, stimpar.gabk,
                                            "print")
    dendstr_pr = sess_str_util.dend_par_str(analyspar.dend, sesspar.plane,
                                            datatype, "print")

    datastr = sess_str_util.datatype_par_str(datatype)

    logger.info(
        f"Analysing and plotting unexpected vs expected {datastr} "
        f"traces locked to unexpected onset by quantile ({quantpar.n_quants}) "
        f"\n({sessstr_pr}{dendstr_pr}).",
        extra={"spacing": "\n"})

    seed = rand_util.seed_all(seed, "cpu", log_seed=False)

    # modify quantpar to retain all quantiles
    quantpar_one = sess_ntuple_util.init_quantpar(1, 0)
    n_quants = quantpar.n_quants
    quantpar_mult = sess_ntuple_util.init_quantpar(n_quants, "all")

    if stimpar.stimtype == "visflow":
        pre_post = [2.0, 6.0]
    elif stimpar.stimtype == "gabors":
        pre_post = [2.0, 8.0]
    else:
        gen_util.accepted_values_error("stimpar.stimtype", stimpar.stimtype,
                                       ["visflow", "gabors"])
    logger.warning("Setting pre to {}s and post to {}s.".format(*pre_post))

    stimpar = sess_ntuple_util.get_modif_ntuple(stimpar, ["pre", "post"],
                                                pre_post)

    figpar = copy.deepcopy(figpar)
    if figpar["save"]["use_dt"] is None:
        figpar["save"]["use_dt"] = gen_util.create_time_str()

    for baseline in [None, stimpar.pre]:
        basestr_pr = sess_str_util.base_par_str(baseline, "print")
        for quantpar in [quantpar_one, quantpar_mult]:
            locks = ["unexp", "exp"]
            if quantpar.n_quants == 1:
                locks.append("unexp_split")
            # get the stats (all) separating by session and quantiles
            for lock in locks:
                logger.info(
                    f"{quantpar.n_quants} quant, {lock} lock{basestr_pr}",
                    extra={"spacing": "\n"})
                if lock == "unexp_split":
                    trace_info = quant_analys.trace_stats_by_exp_len_sess(
                        sessions,
                        analyspar,
                        stimpar,
                        quantpar.n_quants,
                        quantpar.qu_idx,
                        byroi=False,
                        nan_empty=True,
                        baseline=baseline,
                        datatype=datatype)
                else:
                    trace_info = quant_analys.trace_stats_by_qu_sess(
                        sessions,
                        analyspar,
                        stimpar,
                        quantpar.n_quants,
                        quantpar.qu_idx,
                        byroi=False,
                        lock=lock,
                        nan_empty=True,
                        baseline=baseline,
                        datatype=datatype)

                # for comparison, locking to middle of expected sample (1 quant)
                exp_samp = quant_analys.trace_stats_by_qu_sess(
                    sessions,
                    analyspar,
                    stimpar,
                    quantpar_one.n_quants,
                    quantpar_one.qu_idx,
                    byroi=False,
                    lock="exp_samp",
                    nan_empty=True,
                    baseline=baseline,
                    datatype=datatype)

                extrapar = {
                    "analysis": analysis,
                    "datatype": datatype,
                    "seed": seed,
                }

                xrans = [xran.tolist() for xran in trace_info[0]]
                all_stats = [sessst.tolist() for sessst in trace_info[1]]
                exp_stats = [expst.tolist() for expst in exp_samp[1]]
                trace_stats = {
                    "xrans": xrans,
                    "all_stats": all_stats,
                    "all_counts": trace_info[2],
                    "lock": lock,
                    "baseline": baseline,
                    "exp_stats": exp_stats,
                    "exp_counts": exp_samp[2]
                }

                if lock == "unexp_split":
                    trace_stats["unexp_lens"] = trace_info[3]

                sess_info = sess_gen_util.get_sess_info(
                    sessions,
                    analyspar.fluor,
                    incl_roi=(datatype == "roi"),
                    rem_bad=analyspar.rem_bad)

                info = {
                    "analyspar": analyspar._asdict(),
                    "sesspar": sesspar._asdict(),
                    "stimpar": stimpar._asdict(),
                    "quantpar": quantpar._asdict(),
                    "extrapar": extrapar,
                    "sess_info": sess_info,
                    "trace_stats": trace_stats
                }

                fulldir, savename = gen_plots.plot_traces_by_qu_lock_sess(
                    figpar=figpar, **info)
                file_util.saveinfo(info, savename, fulldir, "json")
Ejemplo n.º 6
0
def get_sess_ex_traces(sess, analyspar, stimpar, basepar, rolling_win=4):
    """
    get_sess_ex_traces(sess, analyspar, stimpar, basepar)

    Returns example traces selected for the session, based on SNR and Gabor 
    response pattern criteria. 

    Criteria:
    - Above median SNR
    - Sequence response correlation above 75th percentile.
    - Mean sequence standard deviation above 75th percentile.
    - Mean sequence skew above 75th percentile.

    Required args:
        - sess (Session):
            Session object
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - basepar (BasePar): 
            named tuple containing baseline parameters

    Optional args:
        - rolling_win (int):
            window to use in rolling mean over individual trial traces before 
            computing correlation between trials (None for no smoothing)
            default: 4 

    Returns:
        - selected_roi_data (dict):
            ["time_values"] (1D array): values for each frame, in seconds
                (only 0 to stimpar.post, unless split is "by_exp")
            ["roi_ns"] (1D array): selected ROI numbers
            ["roi_traces_sm"] (3D array): selected ROI sequence traces, 
                smoothed, with dims: ROIs x seq x frames
            ["roi_trace_stats"] (2D array): selected ROI trace mean or median, 
                dims: ROIs x frames
    """

    nanpol = None if analyspar.rem_bad else "omit"

    if stimpar.stimtype != "gabors":
        raise NotImplementedError(
            "ROI selection criteria designed for Gabors, and based on their "
            "cyclical responses.")

    snr_analyspar = sess_ntuple_util.get_modif_ntuple(analyspar, "scale",
                                                      False)

    snrs = misc_analys.get_snr(sess, snr_analyspar, "snrs")
    snr_median = np.median(snrs)

    # identify ROIs that meet the SNR threshold
    snr_thr_rois = np.where(snrs > snr_median)[0]

    # collect all data, and compute statistics
    traces, time_values = basic_analys.get_split_data_by_sess(
        sess,
        analyspar=analyspar,
        stimpar=stimpar,
        split="by_exp",
        baseline=basepar.baseline,
    )

    traces_exp = np.asarray(traces[0])  # get expected split
    traces_exp_stat = math_util.mean_med(traces_exp,
                                         stats=analyspar.stats,
                                         axis=1,
                                         nanpol=nanpol)

    # smooth individual traces, then compute correlations
    if rolling_win is not None:
        traces_exp = math_util.rolling_mean(traces_exp, win=rolling_win)

    triu_idx = np.triu_indices(traces_exp[snr_thr_rois].shape[1], k=1)
    corr_medians = [
        np.median(np.corrcoef(roi_trace)[triu_idx])
        for roi_trace in traces_exp[snr_thr_rois]
    ]

    # calculate std and skew over trace statistics
    trace_stat_stds = np.std(traces_exp_stat[snr_thr_rois], axis=1)
    trace_stat_skews = scist.skew(traces_exp_stat[snr_thr_rois], axis=1)

    # identify ROIs that meet thresholds (from those with high enough SNR)
    std_thr = np.percentile(trace_stat_stds, 75)
    skew_thr = np.percentile(trace_stat_skews, 75)
    corr_thr = np.percentile(corr_medians, 75)

    selected_idx = np.where(
        ((trace_stat_stds > std_thr) * (corr_medians > corr_thr) *
         (trace_stat_skews > skew_thr)))[0]

    # re-index into all ROIs
    roi_ns = snr_thr_rois[selected_idx]

    selected_roi_data = {
        "time_values": time_values,
        "roi_ns": roi_ns,
        "roi_traces_sm": traces_exp[roi_ns],
        "roi_trace_stats": traces_exp_stat[roi_ns]
    }

    return selected_roi_data
Ejemplo n.º 7
0
def unexp_resp_stimulus_comp_sess1v3(sessions,
                                     analyspar,
                                     sesspar,
                                     stimpar,
                                     permpar,
                                     figpar,
                                     seed=None,
                                     parallel=False):
    """
    unexp_resp_stimulus_comp_sess1v3(sessions, analyspar, sesspar, stimpar, 
                                     permpar, figpar)

    Retrieves changes in tracked ROI responses to unexpected sequences for 
    Gabors vs visual flow stimuli.
        
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list): 
            Session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - sesspar (SessPar): 
            named tuple containing session parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - permpar (PermPar): 
            named tuple containing permutation parameters
        - figpar (dict): 
            dictionary containing figure parameters
    
    Optional args:
        - seed (int): 
            seed value to use. (-1 treated as None)
            default: None
        - parallel (bool): 
            if True, some of the analysis is run in parallel across CPU cores 
            default: False
    """

    logger.info(
        ("Compiling changes in unexpected responses to Gabor vs visual "
         "flow stimuli."),
        extra={"spacing": "\n"})

    if analyspar.scale:
        raise ValueError("analyspar.scale should be set to False.")

    # calculate multiple comparisons
    dummy_df = misc_analys.get_check_sess_df(
        sessions, None, analyspar).drop_duplicates(subset=["lines", "planes"])
    multcomp = len(dummy_df) + 1
    permpar = sess_ntuple_util.get_modif_ntuple(permpar, "multcomp", multcomp)

    comp_sess = [1, 3]
    datatype = "rel_unexp_resp"
    rel_sess = 1
    pop_stats = True
    unexp_comp_df = stim_analys.get_stim_stats_df(
        sessions,
        analyspar=analyspar,
        stimpar=stimpar,
        permpar=permpar,
        comp_sess=comp_sess,
        datatype=datatype,
        rel_sess=rel_sess,
        pop_stats=pop_stats,
        randst=seed,
        parallel=parallel,
    )

    extrapar = {
        "comp_sess": comp_sess,
        "datatype": datatype,
        "rel_sess": rel_sess,
        "pop_stats": pop_stats,
        "seed": seed,
    }

    info = {
        "analyspar": analyspar._asdict(),
        "sesspar": sesspar._asdict(),
        "stimpar": stimpar._asdict(),
        "permpar": permpar._asdict(),
        "extrapar": extrapar,
        "unexp_comp_df": unexp_comp_df.to_dict(),
    }

    helper_fcts.plot_save_all(info, figpar)
Ejemplo n.º 8
0
def tracked_roi_usis_stimulus_comp_sess1v3(sessions,
                                           analyspar,
                                           sesspar,
                                           stimpar,
                                           basepar,
                                           idxpar,
                                           permpar,
                                           figpar,
                                           seed=None,
                                           parallel=False):
    """
    tracked_roi_usis_stimulus_comp_sess1v3(sessions, analyspar, sesspar, 
                                           stimpar, basepar, idxpar, permpar,
                                           figpar)

    Retrieves changes in tracked ROI USIs for Gabors vs visual flow stimuli.
        
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list): 
            Session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - sesspar (SessPar): 
            named tuple containing session parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - basepar (BasePar): 
            named tuple containing baseline parameters
        - idxpar (IdxPar): 
            named tuple containing index parameters
        - permpar (PermPar): 
            named tuple containing permutation parameters
        - figpar (dict): 
            dictionary containing figure parameters
    
    Optional args:
        - seed (int): 
            seed value to use. (-1 treated as None)
            default: None
        - parallel (bool): 
            if True, some of the analysis is run in parallel across CPU cores 
            default: False
    """

    logger.info(
        ("Compiling changes in ROI USIs for Gabors vs visual flow stimuli."),
        extra={"spacing": "\n"})

    if not analyspar.tracked:
        raise ValueError("analyspar.tracked should be set to True.")

    # remove incomplete session series and warn
    sessions = misc_analys.check_sessions_complete(sessions)

    # calculate multiple comparisons
    dummy_df = misc_analys.get_check_sess_df(
        sessions, None, analyspar).drop_duplicates(subset=["lines", "planes"])
    multcomp = len(dummy_df) + 1
    permpar = sess_ntuple_util.get_modif_ntuple(permpar, "multcomp", multcomp)

    comp_sess = [1, 3]
    datatype = "usis"
    pop_stats = True
    usi_comp_df = stim_analys.get_stim_stats_df(
        sessions,
        analyspar=analyspar,
        stimpar=stimpar,
        basepar=basepar,
        idxpar=idxpar,
        permpar=permpar,
        comp_sess=comp_sess,
        datatype=datatype,
        pop_stats=pop_stats,
        randst=seed,
        parallel=parallel,
    )

    extrapar = {
        "comp_sess": comp_sess,
        "datatype": datatype,
        "pop_stats": pop_stats,
        "seed": seed,
    }

    info = {
        "analyspar": analyspar._asdict(),
        "sesspar": sesspar._asdict(),
        "stimpar": stimpar._asdict(),
        "basepar": basepar._asdict(),
        "idxpar": idxpar._asdict(),
        "permpar": permpar._asdict(),
        "extrapar": extrapar,
        "usi_comp_df": usi_comp_df.to_dict(),
    }

    helper_fcts.plot_save_all(info, figpar)
Ejemplo n.º 9
0
def gabor_corrs_sess123_comps(sessions,
                              analyspar,
                              sesspar,
                              stimpar,
                              basepar,
                              idxpar,
                              permpar,
                              figpar,
                              seed=None,
                              parallel=False):
    """
    gabor_corrs_sess123_comps(sessions, analyspar, sesspar, stimpar, basepar, 
                              idxpar, permpar, figpar)

    Retrieves tracked ROI Gabor USI correlations for session 1 to 3.
        
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list): 
            Session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - sesspar (SessPar): 
            named tuple containing session parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - basepar (BasePar): 
            named tuple containing baseline parameters
        - idxpar (IdxPar): 
            named tuple containing index parameters
        - permpar (PermPar): 
            named tuple containing permutation parameters
        - figpar (dict): 
            dictionary containing figure parameters
    
    Optional args:
        - seed (int): 
            seed value to use. (-1 treated as None)
            default: None
        - parallel (bool): 
            if True, some of the analysis is run in parallel across CPU cores 
            default: False
    """

    logger.info(
        "Compiling tracked ROI Gabor USI correlations for sessions 1 to 3.",
        extra={"spacing": "\n"})

    if not analyspar.tracked:
        raise ValueError("analyspar.tracked should be set to True.")

    # remove incomplete session series and warn
    sessions = misc_analys.check_sessions_complete(sessions)

    consec_only = True
    permpar = corr_analys.set_multcomp(permpar,
                                       sessions,
                                       analyspar,
                                       consec_only=consec_only)

    permute = PERMUTE
    corr_type = CORR_TYPE
    sig_only = SIG_ONLY

    if "R_sqr" in corr_type:
        permpar = sess_ntuple_util.get_modif_ntuple(permpar, "tails", "hi")

    idx_corr_df = corr_analys.get_idx_corrs_df(sessions,
                                               analyspar=analyspar,
                                               stimpar=stimpar,
                                               basepar=basepar,
                                               idxpar=idxpar,
                                               permpar=permpar,
                                               consec_only=consec_only,
                                               permute=permute,
                                               corr_type=corr_type,
                                               sig_only=sig_only,
                                               randst=seed,
                                               parallel=parallel)

    extrapar = {
        "consec_only": consec_only,
        "corr_type": corr_type,
        "permute": permute,
        "seed": seed,
        "sig_only": sig_only,
    }

    info = {
        "analyspar": analyspar._asdict(),
        "sesspar": sesspar._asdict(),
        "stimpar": stimpar._asdict(),
        "basepar": basepar._asdict(),
        "idxpar": idxpar._asdict(),
        "permpar": permpar._asdict(),
        "extrapar": extrapar,
        "idx_corr_df": idx_corr_df.to_dict()
    }

    helper_fcts.plot_save_all(info, figpar)
Ejemplo n.º 10
0
def run_mag_permute(all_data_perm, act_mag_me_rel, act_L2_rel, n_exps, permpar, 
                    op_qu="diff", op_grp="diff", stats="mean", nanpol=None):
    """
    run_mag_permute(all_data_perm, act_mag_rel, act_L2_rel, n_exp, permpar)

    Returns the results of a permutation analysis of difference or ratio 
    between 2 quantiles of the magnitude change or L2 norm between expected and 
    unexpected activity.

    Required args:
        - all_data_perm (2D array): Data from both groups for permutation, 
                                    structured as:
                                        ROI x seqs
        - act_mag_rel (num)       : Real mean/median magnitude difference
                                    between quantiles
        - act_L2_rel (num)        : Real L2 difference between quantiles
        - n_exps (list)           : List of number of expected sequences in
                                    each quantile
        - permpar (PermPar)       : named tuple containing permutation 
                                    parameters
    
    Optional args:
        - op_qu (str) : Operation to use in comparing the last vs first 
                        quantile ("diff" or "ratio")
                        default: "diff"       
        - op_grp (str): Operation to use in comparing groups 
                        (e.g., unexpected vs expected data) ("diff" or "ratio")
                        default: "diff" 
        - stats (str) : Statistic to take across group sequences, and then 
                        across magnitude differences ("mean" or "median")
                        default: "mean"
        - nanpol (str): Policy for NaNs, "omit" or None when taking statistics
                        default: None
    
    Returns:
        - signif (list) : list of significance results ("hi", "lo" or "no") for 
                          magnitude, L2
        - threshs (list): list of thresholds (1 if 1-tailed analysis, 
                          2 if 2-tailed) for magnitude, L2
    """

    if permpar.multcomp:
        permpar = sess_ntuple_util.get_modif_ntuple(
            permpar, ["multcomp", "p_val"], 
            [False, permpar.p_val / permpar.multcomp]
            )

    if len(all_data_perm) != 2 or len(n_exps) !=2:
        raise ValueError("all_data_perm and n_exps must have length of 2.")

    all_rand_vals = [] # qu x grp x ROI x perms
    # for each quantile
    for q, perm_data in enumerate(all_data_perm):
        qu_vals = rand_util.permute_diff_ratio(
            perm_data, n_exps[q], permpar.n_perms, stats, nanpol=nanpol, 
            op="none")
        all_rand_vals.append(qu_vals)

    all_rand_vals = np.asarray(all_rand_vals)
    # get absolute change stats and retain mean/median only
    rand_mag_me = math_util.calc_mag_change(
        all_rand_vals, 0, 2, order="stats", op=op_qu, stats=stats)[0]
    rand_L2 = math_util.calc_mag_change(all_rand_vals, 0, 2, order=2, op=op_qu)

    # take diff/ratio between grps
    rand_mag_rel = math_util.calc_op(rand_mag_me, op_grp, dim=0)
    rand_L2_rel  = math_util.calc_op(rand_L2, op_grp, dim=0)

    # check significance (returns list although only one result tested)
    mag_sign, mag_th = rand_util.id_elem(
        rand_mag_rel, act_mag_me_rel, permpar.tails, permpar.p_val, ret_th=True)
    L2_sign, L2_th   = rand_util.id_elem(
        rand_L2_rel, act_L2_rel, permpar.tails, permpar.p_val, ret_th=True)

    mag_signif, L2_signif = ["no", "no"]
    if str(permpar.tails) == "2":
        if len(mag_sign[0]) == 1:
            mag_signif = "lo"
        elif len(mag_sign[1]) == 1:
            mag_signif = "hi"
        if len(L2_sign[0]) == 1:
            L2_signif = "lo"
        elif len(L2_sign[1]) == 1:
            L2_signif = "hi"
    elif permpar.tails in ["lo", "hi"]:
        if len(mag_sign) == 1:
            mag_signif = permpar.tails
        if len(L2_sign) == 1:
            L2_signif = permpar.tails

    signif  = [mag_signif, L2_signif]
    threshs = [mag_th[0], L2_th[0]]

    return signif, threshs
Ejemplo n.º 11
0
def trace_stats_by_exp_len_sess(sessions, analyspar, stimpar, n_quants=4, 
                                 qu_idx="all", byroi=True, integ=False, 
                                 ret_arr=False, nan_empty=False, 
                                 baseline=None, datatype="roi"):
    """
    trace_stats_by_exp_len_sess(sessions, analyspar, stimpar)

    Returns trace statistics for the quantiles of interest for each
    session and unexpected length value, for the datatype of interest.

    Required args:
        - sessions (list)      : list of Session objects
        - analyspar (AnalysPar): named tuple containing analysis parameters
        - stimpar (StimPar)    : named tuple containing stimulus parameters
        
    Optional args:
        - n_quants (int)      : number of quantiles to divide sessions into
                                default: 4
        - qu_idx (str or list): indices of quantiles to retain
                                default: "all"
        - byroi (bool)        : If datatype is "roi", if True, returns 
                                statistics for each ROI. If False, returns 
                                statistics across ROIs.
                                default: True
        - integ (bool)        : if True, dF/F is integrated over sequences
                                default: False
        - ret_arr (bool)      : if True, data arrays are returned also
                                default: False
        - nan_empty (bool)    : if a quantile is empty, return NaN arrays 
                                (avoids an error)
                                default: False
        - baseline (num)      : number of seconds to use as baseline. If None,
                                data is not baselined.
                                default: None
        - datatype (str)      : datatype, i.e. ROIs or running
                                default: "roi"

    Returns:
        - xrans (list)            : time values for the 2p frames (None if 
                                     integ), for each session
        - all_stats (list)        : list of 2 to 5D arrays of trace data 
                                    statistics for each session, structured as:
                                        unexp_len x
                                        quantiles x
                                        stats (me, err) x
                                        (ROIs if byroi x)
                                        (frames if not integ)
        - all_counts (nested list) : list of number of sequences, 
                                     structured as:
                                        sess x unexp_len x quantiles
        - all_n_consec (list)      : unique values of number of consecutive 
                                     segments, by session  
        if ret_arr:
        - all_arrays (nested lists): list of data trace arrays, structured as:
                                     session x unexp_len x quantile 
                                     of 1 to 3D arrays: 
                                         (ROI x) sequences 
                                         (x frames if not integ)
    """

    shift_gab_segs = False
    if stimpar.stimtype == "gabors" and stimpar.gabfr not in ["any", "all"]:
        shift_gab_segs = True
        orig_gabfr = stimpar.gabfr
        stimpar = sess_ntuple_util.get_modif_ntuple(stimpar, "gabfr", "any")

    all_counts, all_stats, all_arrays, all_n_consec = [], [], [], []
    xrans = []
    for sess in sessions:
        stim = sess.get_stim(stimpar.stimtype)
        sess_counts, sess_stats, sess_arrays = [], [], []
        qu_segs, _, qu_n_consec = quant_segs(
            stim, stimpar, n_quants, qu_idx, 1, empty_ok=nan_empty, 
            by_exp_len=True)

        if shift_gab_segs: # shift to requested gabor frame
            qu_segs = [[s + orig_gabfr for s in segs] for segs in qu_segs]

        n_consec_flat   = [n for sub_ns in qu_n_consec for n in sub_ns]
        all_n_consec.append(sorted(set(n_consec_flat)))

        for n_consec in all_n_consec[-1]:
            sub_segs, sub_counts = [], []
            # retain segments with correct number of consecutive values
            for segs, ns in zip(qu_segs, qu_n_consec): 
                idx = np.where(np.asarray(ns) == n_consec)[0]
                sub_segs.append([segs[i] for i in idx])
                sub_counts.append(len(idx))
            sess_counts.append(sub_counts)
            trace_info = trace_stats_by_qu(stim, sub_segs, stimpar.pre,
                stimpar.post, analyspar, byroi=byroi, integ=integ, 
                ret_arr=ret_arr, nan_empty=nan_empty, 
                baseline=baseline, datatype=datatype)
            sess_stats.append(trace_info[1])
            if ret_arr:
                sess_arrays.append(trace_info[2])
        xrans.append(trace_info[0])
        all_counts.append(sess_counts)
        all_stats.append(np.asarray(sess_stats))
        if ret_arr:
            all_arrays.append(sess_arrays)

    if ret_arr:
        return xrans, all_stats, all_counts, all_n_consec, all_arrays
    else:
        return xrans, all_stats, all_counts, all_n_consec
Ejemplo n.º 12
0
def trace_stats_by_qu_sess(sessions, analyspar, stimpar, n_quants=4, 
                           qu_idx="all", byroi=True, by_exp=False, integ=False, 
                           ret_arr=False, nan_empty=False, lock="no", 
                           baseline=None, datatype="roi", randst=None):
    """
    trace_stats_by_qu_sess(sessions, analyspar, stimpar)

    Returns trace statistics for the quantiles of interest for each
    session and unexpected value, for the datatype of interest.

    Required args:
        - sessions (list)      : list of Session objects
        - analyspar (AnalysPar): named tuple containing analysis parameters
        - stimpar (StimPar)    : named tuple containing stimulus parameters
        
    Optional args:
        - n_quants (int)      : number of quantiles to divide sessions into
                                default: 4
        - qu_idx (str or list): indices of quantiles to retain
                                default: "all"
        - byroi (bool)        : If datatype is "roi", if True, returns 
                                statistics for each ROI. If False, returns 
                                statistics across ROIs.
                                default: True
        - by_exp (bool)     : if True, quantiles are separated into unexpected 
                                and expected groups.
                                default: False
        - integ (bool)        : if True, dF/F is integrated over sequences
                                default: False
        - ret_arr (bool)      : if True, data arrays are returned also
                                default: False
        - nan_empty (bool)    : if a quantile is empty, return NaN arrays 
                                (avoids an error)
                                default: False
        - lock (bool)         : if "unexp", "exp", "exp_samp", only the first 
                                unexpected or expected segments are retained.
                                If "both"
                                (by_exp is ignore). 
                                default: False
        - baseline (num)      : number of seconds to use as baseline. If None,
                                data is not baselined.
                                default: None
        - datatype (str)      : datatype, i.e. ROIs or running
                                default: "roi"
        - randst (int)        : random state or seed for sampling segments
                                default: None

    Returns:
        - xrans (list)             : time values for the 2p frames (None if 
                                     integ), for each session
        - all_stats (list)         : list of 2 to 5D arrays of trace data 
                                     statistics for each session, structured as:
                                         (unexp if by_exp x)
                                         quantiles x
                                         stats (me, err) x
                                         (ROIs if byroi x)
                                         (frames if not integ)
        - all_counts (nested list) : list of number of sequences, 
                                     structured as:
                                        sess 
                                        x (unexp if by_exp or lock is "both") 
                                        x quantiles
        if ret_arr:
        - all_arrays (nested lists): list of data trace arrays, structured as:
                                        session (x unexp if by_exp) x quantile 
                                        of 1 to 3D arrays: 
                                            (ROI x) sequences 
                                            (x frames if not integ)
    """

    incr_unexp_ori = False
    if stimpar.stimtype == "gabors":
        if by_exp and isinstance(stimpar.gab_ori, int):
            if stimpar.gabfr == 3:
                incr_unexp_ori = True
                warnings.warn(
                    "Incrementing orientation for unexpected segments to "
                    "ensure data is paired for by_exp split.", 
                    category=RuntimeWarning, stacklevel=1
                )

    shift_gab_segs = False
    remconsec, sample = False, False
    unexp_vals = ["any"]
    if lock in ["unexp", "exp", "both"]:
        remconsec = True
        unexp_vals = [1, 0]
        if lock == "exp":
            unexp_vals = [0]
        elif lock == "unexp":
            unexp_vals = [1]
        if stimpar.stimtype == "gabors" and stimpar.gabfr not in ["any", "all"]:
            shift_gab_segs = True
            orig_gabfr = stimpar.gabfr
            stimpar = sess_ntuple_util.get_modif_ntuple(stimpar, "gabfr", "any")
    elif lock == "exp_samp":
        remconsec, sample = False, True
        unexp_vals = [0]
    elif by_exp:
        unexp_vals = [0, 1]    

    all_counts, all_stats, all_arrays = [], [], []
    xrans = []
    for sess in sessions:
        stim = sess.get_stim(stimpar.stimtype)
        sess_counts, sess_stats, sess_arrays = [], [], []
        for unexp in unexp_vals:
            stimpar_use = stimpar
            if incr_unexp_ori and unexp == 1:
                incr_ori = sess_gen_util.get_unexp_gab_ori(stimpar.gab_ori)
                stimpar_use = sess_ntuple_util.get_modif_ntuple(
                    stimpar, "gab_ori", incr_ori
                    )

            qu_segs, qu_counts = quant_segs(
                stim, stimpar_use, n_quants, qu_idx, unexp, empty_ok=nan_empty, 
                remconsec=remconsec)
            if shift_gab_segs: # shift to requested gabor frame
                qu_segs = [[s + orig_gabfr for s in segs] for segs in qu_segs]
            if sample:
                pre_seg = stimpar.pre / stim.seg_len_s
                post_seg = stimpar.post / stim.seg_len_s
                qu_segs, qu_counts = samp_quant_segs(
                    qu_segs, pre_seg, post_seg, randst=randst
                    )
            sess_counts.append(qu_counts)
            trace_info = trace_stats_by_qu(
                stim, qu_segs, stimpar.pre, stimpar.post, analyspar, 
                byroi=byroi, integ=integ, ret_arr=ret_arr, nan_empty=nan_empty, 
                baseline=baseline, datatype=datatype)
            sess_stats.append(trace_info[1])
            if ret_arr:
                sess_arrays.append(trace_info[2])
        xrans.append(trace_info[0])
        if len(unexp_vals) > 1:
            sess_stats = np.asarray(sess_stats)
        else:
            sess_stats = np.asarray(sess_stats[0]) # list of length 1
            sess_counts = sess_counts[0]
            if ret_arr:
                sess_arrays = sess_arrays[0] # list of length 1
        all_counts.append(sess_counts)
        all_stats.append(sess_stats)
        if ret_arr:
            all_arrays.append(sess_arrays)

    if ret_arr:
        return xrans, all_stats, all_counts, all_arrays
    else:
        return xrans, all_stats, all_counts
Ejemplo n.º 13
0
def pupil_run_block_diffs(sessions,
                          analyspar,
                          sesspar,
                          stimpar,
                          permpar,
                          figpar,
                          seed=None,
                          parallel=False):
    """
    pupil_run_block_diffs(sessions, analyspar, sesspar, stimpar, permpar, 
                          figpar)

    Retrieves pupil and running block differences for Gabor sequences for 
    session 1.
        
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list): 
            Session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - sesspar (SessPar): 
            named tuple containing session parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - permpar (PermPar): 
            named tuple containing permutation parameters
        - figpar (dict): 
            dictionary containing figure parameters
    
    Optional args:
        - seed (int): 
            seed value to use. (-1 treated as None)
            default: None
        - parallel (bool): 
            if True, some of the analysis is run in parallel across CPU cores 
            default: False
    """

    logger.info("Compiling pupil and running block differences for session 1.",
                extra={"spacing": "\n"})

    permpar = sess_ntuple_util.get_modif_ntuple(permpar, "multcomp", False)

    block_df = behav_analys.get_pupil_run_block_stats_df(sessions,
                                                         analyspar=analyspar,
                                                         stimpar=stimpar,
                                                         permpar=permpar,
                                                         randst=seed,
                                                         parallel=parallel)

    extrapar = {"seed": seed}

    info = {
        "analyspar": analyspar._asdict(),
        "sesspar": sesspar._asdict(),
        "stimpar": stimpar._asdict(),
        "permpar": permpar._asdict(),
        "extrapar": extrapar,
        "block_df": block_df.to_dict()
    }

    helper_fcts.plot_save_all(info, figpar)
Ejemplo n.º 14
0
def gabor_roi_usi_sig(sessions, analyspar, sesspar, stimpar, basepar, 
                      idxpar, permpar, figpar, common_oris=False, seed=None, 
                      parallel=False):
    """
    gabor_roi_usi_sig(sessions, analyspar, sesspar, stimpar, basepar, 
                      idxpar, permpar, figpar)

    Retrieves percentage of signifiant ROI Gabor USIs.
        
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list): 
            Session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - sesspar (SessPar): 
            named tuple containing session parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - basepar (BasePar): 
            named tuple containing baseline parameters
        - idxpar (IdxPar): 
            named tuple containing index parameters
        - permpar (PermPar): 
            named tuple containing permutation parameters
        - figpar (dict): 
            dictionary containing figure parameters
    
    Optional args:
        - common_oris (bool): 
            if True, data is for common orientations
            default: False
        - seed (int): 
            seed value to use. (-1 treated as None)
            default: None
        - parallel (bool): 
            if True, some of the analysis is run in parallel across CPU cores 
            default: False
    """

    common_str = ", with common orientations" if common_oris else ""

    logger.info(
        f"Compiling percentages of significant Gabor USIs{common_str}.", 
        extra={"spacing": "\n"}
        )

    if common_oris:
        gab_ori = sess_gen_util.gab_oris_common_U(stimpar.gab_ori)
        stimpar = sess_ntuple_util.get_modif_ntuple(
            stimpar, "gab_ori", gab_ori
            )

    by_mouse = False
    idx_df = usi_analys.get_idx_df(
        sessions, 
        analyspar=analyspar, 
        stimpar=stimpar, 
        basepar=basepar, 
        idxpar=idxpar, 
        permpar=permpar, 
        common_oris=common_oris,
        by_mouse=by_mouse,
        randst=seed, 
        parallel=parallel,
        )

    permpar = misc_analys.set_multcomp(permpar, sess_df=idx_df, factor=2)
    
    perc_sig_df = usi_analys.get_perc_sig_df(idx_df, analyspar, permpar, seed)

    extrapar = {
        "common_oris": common_oris,
        "by_mouse"   : by_mouse,
        "seed"       : seed,
        }

    info = {"analyspar"  : analyspar._asdict(),
            "sesspar"    : sesspar._asdict(),
            "stimpar"    : stimpar._asdict(),
            "basepar"    : basepar._asdict(),
            "idxpar"     : idxpar._asdict(),
            "permpar"    : permpar._asdict(),
            "extrapar"   : extrapar,
            "perc_sig_df": perc_sig_df.to_dict()
            }

    helper_fcts.plot_save_all(info, figpar)
Ejemplo n.º 15
0
def gabor_example_roi_usis(sessions, analyspar, sesspar, stimpar, basepar, 
                           idxpar, permpar, figpar, seed=None, parallel=False):
    """
    gabor_example_roi_usis(sessions, analyspar, sesspar, stimpar, basepar, 
                           idxpar, permpar, figpar)

    Retrieves example ROI Gabor USI traces.
        
    Saves results and parameters relevant to analysis in a dictionary.

    Required args:
        - sessions (list): 
            Session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - sesspar (SessPar): 
            named tuple containing session parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - basepar (BasePar): 
            named tuple containing baseline parameters
        - idxpar (IdxPar): 
            named tuple containing index parameters
        - permpar (PermPar): 
            named tuple containing permutation parameters
        - figpar (dict): 
            dictionary containing figure parameters
    
    Optional args:
        - seed (int): 
            seed value to use. (-1 treated as None)
            default: None
        - parallel (bool): 
            if True, some of the analysis is run in parallel across CPU cores 
            default: False
    """

    logger.info("Compiling Gabor example ROI USI data.", 
        extra={"spacing": "\n"}
        )

    # check stimpar.pre
    if (not isinstance(stimpar.pre, list)) or (len(stimpar.pre) == 1):
        pre_list = gen_util.list_if_not(stimpar.pre)
        stimpar = sess_ntuple_util.get_modif_ntuple(stimpar, "pre", pre_list)
    
    elif len(stimpar.pre) != 2:
        raise ValueError("Expected 2 values for stimpar.pre: one for "
            "index calculation and one for traces.")

    # use first stimpar.pre for idx calculation
    stimpar_idx = sess_ntuple_util.get_modif_ntuple(
            stimpar, "pre", stimpar.pre[0]
         )

    chosen_rois_df = usi_analys.get_chosen_roi_df(
        sessions, 
        analyspar=analyspar, 
        stimpar=stimpar_idx, 
        basepar=basepar, 
        idxpar=idxpar,
        permpar=permpar,
        target_idx_vals = [0.5, 0, -0.5],
        target_idx_sigs = ["sig", "not_sig", "sig"],
        randst=seed,
        parallel=parallel
        )

    # use second stimpar.pre for traces
    stimpar_tr = sess_ntuple_util.get_modif_ntuple(
        stimpar, "pre", stimpar.pre[1]
        )

    chosen_rois_df = usi_analys.add_chosen_roi_traces(
        sessions, 
        chosen_rois_df, 
        analyspar=analyspar, 
        stimpar=stimpar_tr, 
        basepar=basepar, 
        split=idxpar.feature, 
        parallel=parallel
        )
    
    extrapar = {"seed": seed}

    info = {
        "analyspar": analyspar._asdict(),
        "stimpar": stimpar._asdict(),
        "sesspar": sesspar._asdict(),
        "basepar": basepar._asdict(),
        "idxpar": idxpar._asdict(),
        "permpar": permpar._asdict(),
        "extrapar": extrapar,
        "chosen_rois_df": chosen_rois_df.to_dict()
    }

    helper_fcts.plot_save_all(info, figpar)
Ejemplo n.º 16
0
def get_stim_stats_df(sessions,
                      analyspar,
                      stimpar,
                      permpar,
                      comp_sess=[1, 3],
                      datatype="rel_unexp_resp",
                      rel_sess=1,
                      basepar=None,
                      idxpar=None,
                      pop_stats=True,
                      randst=None,
                      parallel=False):
    """
    get_stim_stats_df(sessions, analyspar, stimpar, permpar)

    Returns dataframe with comparison of absolute fractional data changes 
    between sessions for different stimuli.

    Required args:
        - sessions (list): 
            session objects
        - analyspar (AnalysPar): 
            named tuple containing analysis parameters
        - stimpar (StimPar): 
            named tuple containing stimulus parameters
        - permpar (PermPar): 
            named tuple containing permutation parameters

    Optional args:
        - comp_sess (int):
            sessions for which to obtain absolute fractional change 
            [x, y] => |(y - x) / x|
            default: [1, 3]
        - datatype (str):
            type of data to retrieve
            default: "rel_unexp_resp"
        - rel_sess (int):
            number of session relative to which data should be scaled, for each 
            mouse
            default: 1
        - basepar (BasePar): 
            named tuple containing baseline parameters 
            (needed if datatype is "usis")
            default: None
        - idxpar (IdxPar): 
            named tuple containing index parameters 
            (needed if datatype is "usis")
            default: None
        - pop_stats (bool): 
            if True, analyses are run on population statistics, and not 
            individual tracked ROIs
            default: True
        - randst (int or np.random.RandomState): 
            random state or seed value to use. (-1 treated as None)
            default: None
        - parallel (bool): 
            if True, some of the analysis is run in parallel across CPU cores 
            default: False

    Returns:
        - stim_stats_df (pd.DataFrame):
            dataframe with one row per line/plane and one for all line/planes 
            together, and the basic sess_df columns, in addition to, 
            for each stimtype:
            - {stimtype} (list): absolute fractional change statistics (me, err)
            - raw_p_vals (float): uncorrected p-value for data differences 
                between stimulus types 
            - p_vals (float): p-value for data differences between stimulus 
                types, corrected for multiple comparisons and tails
    """

    if not pop_stats:
        if analyspar.tracked:
            misc_analys.check_sessions_complete(sessions, raise_err=True)
        else:
            raise ValueError(
                "If analysis is run for individual ROIs and not population "
                "statistics, analyspar.tracked must be set to True.")

    if set(stimpar.stimtype) != set(["gabors", "visflow"]):
        raise ValueError(
            "Expected stimpar.stimtype to list 'gabors' and 'visflow'.")
    if (not (isinstance(stimpar.pre, list) and isinstance(stimpar.post, list))
            or not (len(stimpar.pre) == 2 and len(stimpar.post) == 2)):
        raise ValueError(
            "stimpar.pre and stimpar.post must be provided as lists of "
            "length 2 (one value per stimpar.stimtype, in order).")

    if datatype == "usis":
        if (not isinstance(idxpar.feature, list)
                or not len(idxpar.feature) == 2):
            raise ValueError(
                "idxpar.feature must be provided as a list of length 2 "
                "(one value per stimpar.stimtype, in order).")

    stim_stats_df = None
    for s, stimtype in enumerate(stimpar.stimtype):
        stim_stimpar = sess_ntuple_util.get_modif_ntuple(
            stimpar, ["stimtype", "pre", "post"],
            [stimtype, stimpar.pre[s], stimpar.post[s]])

        stim_idxpar = idxpar
        if datatype == "usis":
            stim_idxpar = sess_ntuple_util.get_modif_ntuple(
                idxpar, "feature", idxpar.feature[s])

        stim_stats_df = get_stim_data_df(sessions,
                                         analyspar,
                                         stim_stimpar,
                                         stim_data_df=stim_stats_df,
                                         comp_sess=comp_sess,
                                         datatype=datatype,
                                         rel_sess=rel_sess,
                                         basepar=basepar,
                                         idxpar=stim_idxpar,
                                         abs_usi=pop_stats,
                                         parallel=parallel)

    # add statistics and p-values
    add_stim_stats = add_stim_pop_stats if pop_stats else add_stim_roi_stats
    stim_stats_df = add_stim_stats(stim_stats_df,
                                   sessions,
                                   analyspar,
                                   stimpar,
                                   permpar,
                                   comp_sess=comp_sess,
                                   in_place=True,
                                   randst=randst)

    # adjust data columns
    data_cols = []
    for s, stimtype in enumerate(stimpar.stimtype):
        for n in comp_sess:
            data_cols.append(f"{stimtype}_s{n}")

    stim_stats_df = stim_stats_df.drop(data_cols, axis=1)

    stim_stats_df["sess_ns"] = f"comp{comp_sess[0]}v{comp_sess[1]}"

    # corrected p-values
    stim_stats_df = misc_analys.add_corr_p_vals(stim_stats_df, permpar)

    return stim_stats_df