def run_traces_by_qu_lock_sess(sessions, analysis, seed, analyspar, sesspar, stimpar, quantpar, figpar, datatype="roi"): """ run_traces_by_qu_lock_sess(sessions, analysis, analyspar, sesspar, stimpar, quantpar, figpar) Retrieves trace statistics by session x quantile at the transition of expected to unexpected sequences (or v.v.) and plots traces across ROIs by quantile with each session in a separate subplot. Also runs analysis for one quantile (full data) with different unexpected lengths grouped separated Saves results and parameters relevant to analysis in a dictionary. Required args: - sessions (list) : list of Session objects - analysis (str) : analysis type (e.g., "l") - seed (int) : seed value to use. (-1 treated as None) - analyspar (AnalysPar): named tuple containing analysis parameters - sesspar (SessPar) : named tuple containing session parameters - stimpar (StimPar) : named tuple containing stimulus parameters - quantpar (QuantPar) : named tuple containing quantile analysis parameters - figpar (dict) : dictionary containing figure parameters Optional args: - datatype (str): type of data (e.g., "roi", "run") """ sessstr_pr = sess_str_util.sess_par_str(sesspar.sess_n, stimpar.stimtype, sesspar.plane, stimpar.visflow_dir, stimpar.visflow_size, stimpar.gabk, "print") dendstr_pr = sess_str_util.dend_par_str(analyspar.dend, sesspar.plane, datatype, "print") datastr = sess_str_util.datatype_par_str(datatype) logger.info( f"Analysing and plotting unexpected vs expected {datastr} " f"traces locked to unexpected onset by quantile ({quantpar.n_quants}) " f"\n({sessstr_pr}{dendstr_pr}).", extra={"spacing": "\n"}) seed = rand_util.seed_all(seed, "cpu", log_seed=False) # modify quantpar to retain all quantiles quantpar_one = sess_ntuple_util.init_quantpar(1, 0) n_quants = quantpar.n_quants quantpar_mult = sess_ntuple_util.init_quantpar(n_quants, "all") if stimpar.stimtype == "visflow": pre_post = [2.0, 6.0] elif stimpar.stimtype == "gabors": pre_post = [2.0, 8.0] else: gen_util.accepted_values_error("stimpar.stimtype", stimpar.stimtype, ["visflow", "gabors"]) logger.warning("Setting pre to {}s and post to {}s.".format(*pre_post)) stimpar = sess_ntuple_util.get_modif_ntuple(stimpar, ["pre", "post"], pre_post) figpar = copy.deepcopy(figpar) if figpar["save"]["use_dt"] is None: figpar["save"]["use_dt"] = gen_util.create_time_str() for baseline in [None, stimpar.pre]: basestr_pr = sess_str_util.base_par_str(baseline, "print") for quantpar in [quantpar_one, quantpar_mult]: locks = ["unexp", "exp"] if quantpar.n_quants == 1: locks.append("unexp_split") # get the stats (all) separating by session and quantiles for lock in locks: logger.info( f"{quantpar.n_quants} quant, {lock} lock{basestr_pr}", extra={"spacing": "\n"}) if lock == "unexp_split": trace_info = quant_analys.trace_stats_by_exp_len_sess( sessions, analyspar, stimpar, quantpar.n_quants, quantpar.qu_idx, byroi=False, nan_empty=True, baseline=baseline, datatype=datatype) else: trace_info = quant_analys.trace_stats_by_qu_sess( sessions, analyspar, stimpar, quantpar.n_quants, quantpar.qu_idx, byroi=False, lock=lock, nan_empty=True, baseline=baseline, datatype=datatype) # for comparison, locking to middle of expected sample (1 quant) exp_samp = quant_analys.trace_stats_by_qu_sess( sessions, analyspar, stimpar, quantpar_one.n_quants, quantpar_one.qu_idx, byroi=False, lock="exp_samp", nan_empty=True, baseline=baseline, datatype=datatype) extrapar = { "analysis": analysis, "datatype": datatype, "seed": seed, } xrans = [xran.tolist() for xran in trace_info[0]] all_stats = [sessst.tolist() for sessst in trace_info[1]] exp_stats = [expst.tolist() for expst in exp_samp[1]] trace_stats = { "xrans": xrans, "all_stats": all_stats, "all_counts": trace_info[2], "lock": lock, "baseline": baseline, "exp_stats": exp_stats, "exp_counts": exp_samp[2] } if lock == "unexp_split": trace_stats["unexp_lens"] = trace_info[3] sess_info = sess_gen_util.get_sess_info( sessions, analyspar.fluor, incl_roi=(datatype == "roi"), rem_bad=analyspar.rem_bad) info = { "analyspar": analyspar._asdict(), "sesspar": sesspar._asdict(), "stimpar": stimpar._asdict(), "quantpar": quantpar._asdict(), "extrapar": extrapar, "sess_info": sess_info, "trace_stats": trace_stats } fulldir, savename = gen_plots.plot_traces_by_qu_lock_sess( figpar=figpar, **info) file_util.saveinfo(info, savename, fulldir, "json")
def run_traces_by_qu_unexp_sess(sessions, analysis, analyspar, sesspar, stimpar, quantpar, figpar, datatype="roi"): """ run_traces_by_qu_unexp_sess(sessions, analysis, analyspar, sesspar, stimpar, quantpar, figpar) Retrieves trace statistics by session x unexp val x quantile and plots traces across ROIs by quantile/unexpected with each session in a separate subplot. Also runs analysis for one quantile (full data). Saves results and parameters relevant to analysis in a dictionary. Required args: - sessions (list) : list of Session objects - analysis (str) : analysis type (e.g., "t") - analyspar (AnalysPar): named tuple containing analysis parameters - sesspar (SessPar) : named tuple containing session parameters - stimpar (StimPar) : named tuple containing stimulus parameters - quantpar (QuantPar) : named tuple containing quantile analysis parameters - figpar (dict) : dictionary containing figure parameters Optional args: - datatype (str): type of data (e.g., "roi", "run") """ sessstr_pr = sess_str_util.sess_par_str(sesspar.sess_n, stimpar.stimtype, sesspar.plane, stimpar.visflow_dir, stimpar.visflow_size, stimpar.gabk, "print") dendstr_pr = sess_str_util.dend_par_str(analyspar.dend, sesspar.plane, datatype, "print") datastr = sess_str_util.datatype_par_str(datatype) logger.info( f"Analysing and plotting unexpected vs expected {datastr} " f"traces by quantile ({quantpar.n_quants}) \n({sessstr_pr}" f"{dendstr_pr}).", extra={"spacing": "\n"}) # modify quantpar to retain all quantiles quantpar_one = sess_ntuple_util.init_quantpar(1, 0) n_quants = quantpar.n_quants quantpar_mult = sess_ntuple_util.init_quantpar(n_quants, "all") figpar = copy.deepcopy(figpar) if figpar["save"]["use_dt"] is None: figpar["save"]["use_dt"] = gen_util.create_time_str() for quantpar in [quantpar_one, quantpar_mult]: logger.info(f"{quantpar.n_quants} quant", extra={"spacing": "\n"}) # get the stats (all) separating by session, unexpected and quantiles trace_info = quant_analys.trace_stats_by_qu_sess(sessions, analyspar, stimpar, quantpar.n_quants, quantpar.qu_idx, byroi=False, by_exp=True, datatype=datatype) extrapar = { "analysis": analysis, "datatype": datatype, } xrans = [xran.tolist() for xran in trace_info[0]] all_stats = [sessst.tolist() for sessst in trace_info[1]] trace_stats = { "xrans": xrans, "all_stats": all_stats, "all_counts": trace_info[2] } sess_info = sess_gen_util.get_sess_info(sessions, analyspar.fluor, incl_roi=(datatype == "roi"), rem_bad=analyspar.rem_bad) info = { "analyspar": analyspar._asdict(), "sesspar": sesspar._asdict(), "stimpar": stimpar._asdict(), "quantpar": quantpar._asdict(), "extrapar": extrapar, "sess_info": sess_info, "trace_stats": trace_stats } fulldir, savename = gen_plots.plot_traces_by_qu_unexp_sess( figpar=figpar, **info) file_util.saveinfo(info, savename, fulldir, "json")
def run_regr(args): """ run_regr(args) Does runs of a logistic regressions on the specified comparison and range of sessions. Required args: - args (Argument parser): parser with analysis parameters as attributes: alg (str) : algorithm to use ("sklearn" or "pytorch") bal (bool) : if True, classes are balanced batchsize (int) : nbr of samples dataloader will load per batch (for "pytorch" alg) visflow_dir (str) : visual flow direction to analyse visflow_per (float) : number of seconds to include before visual flow segments visflow_size (int or list): visual flow square sizes to include comp (str) : type of comparison datadir (str) : data directory dend (str) : type of dendrites to use ("allen" or "dend") device (str) : device name (i.e., "cuda" or "cpu") ep_freq (int) : frequency at which to log loss to console error (str) : error to take, i.e., "std" (for std or quantiles) or "sem" (for SEM or MAD) fluor (str) : fluorescence trace type fontdir (str) : directory in which additional fonts are located gabfr (int) : gabor frame of reference if comparison is "unexp" gabk (int or list) : gabor kappas to include gab_ori (list or str) : gabor orientations to include incl (str or list) : sessions to include ("yes", "no", "all") lr (num) : model learning rate (for "pytorch" alg) mouse_n (int) : mouse number n_epochs (int) : number of epochs n_reg (int) : number of regular runs n_shuff (int) : number of shuffled runs scale (bool) : if True, each ROI is scaled output (str) : general directory in which to save output parallel (bool) : if True, runs are done in parallel plt_bkend (str) : pyplot backend to use q1v4 (bool) : if True, analysis is trained on first and tested on last quartiles exp_v_unexp (bool) : if True, analysis is trained on expected and tested on unexpected sequences runtype (str) : type of run ("prod" or "pilot") seed (int) : seed to seed random processes with sess_n (int) : session number stats (str) : stats to take, i.e., "mean" or "median" stimtype (str) : stim to analyse ("gabors" or "visflow") train_p (list) : proportion of dataset to allocate to training uniqueid (str or int) : unique ID for analysis wd (float) : weight decay value (for "pytorch" arg) """ args = copy.deepcopy(args) if args.datadir is None: args.datadir = DEFAULT_DATADIR else: args.datadir = Path(args.datadir) if args.uniqueid == "datetime": args.uniqueid = gen_util.create_time_str() elif args.uniqueid in ["None", "none"]: args.uniqueid = None reseed = False if args.seed in [None, "None"]: reseed = True # deal with parameters extrapar = {"uniqueid": args.uniqueid, "seed": args.seed} techpar = { "reseed": reseed, "device": args.device, "alg": args.alg, "parallel": args.parallel, "plt_bkend": args.plt_bkend, "fontdir": args.fontdir, "output": args.output, "ep_freq": args.ep_freq, "n_reg": args.n_reg, "n_shuff": args.n_shuff, } mouse_df = DEFAULT_MOUSE_DF_PATH stimpar = logreg.get_stimpar(args.comp, args.stimtype, args.visflow_dir, args.visflow_size, args.gabfr, args.gabk, gab_ori=args.gab_ori, visflow_pre=args.visflow_pre) analyspar = sess_ntuple_util.init_analyspar(args.fluor, stats=args.stats, error=args.error, scale=not (args.no_scale), dend=args.dend) if args.q1v4: quantpar = sess_ntuple_util.init_quantpar(4, [0, -1]) else: quantpar = sess_ntuple_util.init_quantpar(1, 0) logregpar = sess_ntuple_util.init_logregpar(args.comp, not (args.not_ctrl), args.q1v4, args.exp_v_unexp, args.n_epochs, args.batchsize, args.lr, args.train_p, args.wd, args.bal, args.alg) omit_sess, omit_mice = sess_gen_util.all_omit(stimpar.stimtype, args.runtype, stimpar.visflow_dir, stimpar.visflow_size, stimpar.gabk) sessids = sorted( sess_gen_util.get_sess_vals(mouse_df, "sessid", args.mouse_n, args.sess_n, args.runtype, incl=args.incl, omit_sess=omit_sess, omit_mice=omit_mice)) if len(sessids) == 0: logger.warning( f"No sessions found (mouse: {args.mouse_n}, sess: {args.sess_n}, " f"runtype: {args.runtype})") for sessid in sessids: sess = sess_gen_util.init_sessions(sessid, args.datadir, mouse_df, args.runtype, full_table=False, fluor=analyspar.fluor, dend=analyspar.dend, temp_log="warning")[0] logreg.run_regr(sess, analyspar, stimpar, logregpar, quantpar, extrapar, techpar) plot_util.cond_close_figs()
def init_param_cont(args): """ init_param_cont(args) Initializes parameter containers. Returns args: - in the following nametuples: analyspar, sesspar, stimpar, autocorr, permpar, quantpar - in the following dictionary: figpar Required args: - args (Argument parser): parser with the following attributes: visflow_dir (str or list): visual flow direction values to include ("right", "left", ["right", "left"]) visflow_size (int or list): visual flow size values to include (128, 256 or [128, 256]) closest (bool) : if False, only exact session number is retained, otherwise the closest. error (str) : error statistic parameter ("std" or "sem") fontdir (str) : path to directory containing additional fonts gabfr (int) : gabor frame at which sequences start (0, 1, 2, 3) gabk (int or list) : gabor kappa values to include (4, 16 or [4, 16]) gab_ori (int or list) : gabor orientation values to include ([0, 45, 90, 135, 180, 225]) incl (str) : sessions to include ("yes", "no", "all") keepnans (str) : if True, the original running array is used instead of the one where NaNs are interpolated. lag_s (num) : lag for autocorrelation (in sec) line (str) : line ("L23", "L5", "any") min_rois (int) : min number of ROIs n_perms (int) : nbr of permutations to run n_quants (int) : number of quantiles ncols (int) : number of columns no_datetime (bool) : if True, figures are not saved in a subfolder named based on the date and time. no_sharey (bool) : if True, sharey figure parameter is set to False. not_save_fig (bool) : if True, figures are not saved output (str) : general directory in which to save output overwrite (bool) : if False, overwriting existing figures is prevented by adding suffix numbers. pass_fail (str or list): pass/fail values of interest ("P", "F") plane (str) : plane ("soma", "dend", "any") plt_bkend (str) : mpl backend to use post (num) : range of frames to include after each reference frame (in s) pre (num) : range of frames to include before each reference frame (in s) runtype (str or list) : runtype ("pilot" or "prod") scale (bool) : whether to scale running data sess_n (int) : session number stats (str) : statistic parameter ("mean" or "median") stimtype (str) : stimulus to analyse ("visflow" or "gabors") tails (str or int) : which tail(s) to test ("hi", "lo", 2) Returns: - analysis_dict (dict): dictionary of analysis parameters ["analyspar"] (AnalysPar) : named tuple of analysis parameters ["sesspar"] (SessPar) : named tuple of session parameters ["stimpar"] (StimPar) : named tuple of stimulus parameters ["autocorrpar"] (AutocorrPar): named tuple of autocorrelation parameters ["permpar"] (PermPar) : named tuple of permutation parameters ["quantpar"] (QuantPar) : named tuple of quantile parameters ["figpar"] (dict) : dictionary containing following subdictionaries: ["init"]: dict with following inputs as attributes: ["ncols"] (int) : number of columns in the figures ["sharex"] (bool) : if True, x axis lims are shared across subplots ["sharey"] (bool) : if True, y axis lims are shared across subplots ["subplot_hei"] (num): height of each subplot (inches) ["subplot_wid"] (num): width of each subplot (inches) ["save"]: dict with the following inputs as attributes: ["datetime"] (bool) : if True, figures are saved in a subfolder named based on the date and time. ["fig_ext"] (str) : figure extension ["overwrite"] (bool): if True, existing figures can be overwritten ["save_fig"] (bool) : if True, figures are saved ["use_dt"] (str) : datetime folder to use ["dirs"]: dict with the following attributes: ["figdir"] (str) : main folder in which to save figures ["roi"] (str) : subdirectory name for ROI analyses ["run"] (str) : subdirectory name for running analyses ["autocorr"] (str) : subdirectory name for autocorrelation analyses ["locori"] (str) : subdirectory name for location and orientation responses ["oridir"] (str) : subdirectory name for orientation/direction analyses ["unexp_qu"] (str) : subdirectory name for unexpected, quantile analyses ["tune_curv"] (str): subdirectory name for tuning curves ["grped"] (str) : subdirectory name for ROI grps data ["mags"] (str) : subdirectory name for magnitude analyses ["mng"]: dict with the following attributes: ["plt_bkend"] (str): mpl backend to use ["linclab"] (bool) : if True, Linclab mpl defaults are used ["fontdir"] (str) : path to directory containing additional fonts """ args = copy.deepcopy(args) analysis_dict = dict() # analysis parameters analysis_dict["analyspar"] = sess_ntuple_util.init_analyspar( "n/a", not (args.keepnans), args.stats, args.error, args.scale) # session parameters analysis_dict["sesspar"] = sess_ntuple_util.init_sesspar( args.sess_n, args.closest, args.plane, args.line, args.min_rois, args.pass_fail, args.incl, args.runtype) # stimulus parameters analysis_dict["stimpar"] = sess_ntuple_util.init_stimpar( args.stimtype, args.visflow_dir, args.visflow_size, args.gabfr, args.gabk, args.gab_ori, args.pre, args.post) # SPECIFIC ANALYSES # autocorrelation parameters analysis_dict["autocorrpar"] = sess_ntuple_util.init_autocorrpar( args.lag_s, byitem=False) # permutation parameters analysis_dict["permpar"] = sess_ntuple_util.init_permpar( args.n_perms, 0.05, args.tails, False) # quantile parameters analysis_dict["quantpar"] = sess_ntuple_util.init_quantpar( args.n_quants, [0, -1]) # figure parameters analysis_dict["figpar"] = sess_plot_util.init_figpar( ncols=int(args.ncols), datetime=not (args.no_datetime), overwrite=args.overwrite, save_fig=not (args.not_save_fig), runtype=args.runtype, output=args.output, plt_bkend=args.plt_bkend, fontdir=args.fontdir, sharey=not (args.no_sharey)) return analysis_dict
def init_param_cont(args): """ init_param_cont(args) Initializes parameter containers. Returns args: - in the following nametuples: analyspar, sesspar, stimpar, autocorr, permpar, quantpar, roigrppar, tcurvpar - in the following dictionary: figpar Required args: - args (Argument parser): parser with the following attributes: visflow_dir (str or list): visual flow direction values to include ("right", "left", ["right", "left"]) visflow_size (int or list): visual flow size values to include (128, 256 or [128, 256]) closest (bool) : if False, only exact session number is retained, otherwise the closest. dend (str) : type of dendrites to use ("allen" or "dend") error (str) : error statistic parameter ("std" or "sem") fluor (str) : if "raw", raw ROI traces are used. If "dff", dF/F ROI traces are used. fontdir (str) : path to directory containing additional fonts gabfr (int) : gabor frame at which sequences start (0, 1, 2, 3) gabk (int or list) : gabor kappa values to include (4, 16 or [4, 16]) gab_ori (int or list) : gabor orientation values to include ([0, 45, 90, 135, 180, 225]) grps (str or list) : set or sets of groups to return, ("all", "change", "no_change", "reduc", "incr".) incl (str) : sessions to include ("yes", "no", "all") keepnans (str) : if True, ROIs with NaN/Inf values are kept in the analyses. lag_s (num) : lag for autocorrelation (in sec) line (str) : "L23", "L5", "any" min_rois (int) : min number of ROIs n_perms (int) : nbr of permutations to run n_quants (int) : number of quantiles ncols (int) : number of columns no_add_exp (bool) : if True, the group of ROIs showing no significance in either is not added to the groups returned no_datetime (bool) : if True, figures are not saved in a subfolder named based on the date and time. not_byitem (bool) : if True, autocorrelation statistics are taken across items (e.g., ROIs) not_save_fig (bool) : if True, figures are not saved op (str) : operation on values, if plotvals if "both" ("ratio" or "diff") output (str) : general directory in which to save output overwrite (bool) : if False, overwriting existing figures is prevented by adding suffix numbers. pass_fail (str or list): pass/fail values of interest ("P", "F") plot_vals (str) : values to plot ("unexp", "exp", "both") plane (str) : plane ("soma", "dend", "any") plt_bkend (str) : mpl backend to use post (num) : range of frames to include after each reference frame (in s) pre (num) : range of frames to include before each reference frame (in s) runtype (str or list) : runtype ("pilot" or "prod") scale (bool) : whether to scale ROI data sess_n (int) : session number stats (str) : statistic parameter ("mean" or "median") stimtype (str) : stimulus to analyse ("visflow" or "gabors") tails (str or int) : which tail(s) to test ("hi", "lo", 2) tc_gabfr (int or str) : gabor frame at which sequences start (0, 1, 2, 3) for tuning curve analysis (x_x, interpreted as 2 gabfrs) tc_grp2 (str) : second group: either unexp, exp or rand (random subsample of exp, the size of unexp) tc_post (num) : range of frames to include after each reference frame (in s) for tuning curve analysis tc_vm_estim (bool) : runs analysis using von Mises parameter estimation method tc_test (bool) : if True, tuning curve analysis is run on a small subset of ROIs and gabors Returns: - analysis_dict (dict): dictionary of analysis parameters ["analyspar"] (AnalysPar) : named tuple of analysis parameters ["sesspar"] (SessPar) : named tuple of session parameters ["stimpar"] (StimPar) : named tuple of stimulus parameters ["autocorrpar"] (AutocorrPar): named tuple of autocorrelation parameters ["permpar"] (PermPar) : named tuple of permutation parameters ["quantpar"] (QuantPar) : named tuple of quantile parameters ["roigrppar"] (RoiGrpPar) : named tuple of roi grp parameters ["tcurvpar"] (TCurvPar) : named tuple of tuning curve parameters ["figpar"] (dict) : dictionary containing following subdictionaries: ["init"]: dict with following inputs as attributes: ["ncols"] (int) : number of columns in the figures ["sharex"] (bool) : if True, x axis lims are shared across subplots ["sharey"] (bool) : if True, y axis lims are shared across subplots ["subplot_hei"] (num): height of each subplot (inches) ["subplot_wid"] (num): width of each subplot (inches) ["save"]: dict with the following inputs as attributes: ["datetime"] (bool) : if True, figures are saved in a subfolder named based on the date and time. ["fig_ext"] (str) : figure extension ["overwrite"] (bool): if True, existing figures can be overwritten ["save_fig"] (bool) : if True, figures are saved ["use_dt"] (str) : datetime folder to use ["dirs"]: dict with the following attributes: ["figdir"] (str) : main folder in which to save figures ["roi"] (str) : subdirectory name for ROI analyses ["run"] (str) : subdirectory name for running analyses ["autocorr"] (str) : subdirectory name for autocorrelation analyses ["locori"] (str) : subdirectory name for location and orientation responses ["oridir"] (str) : subdirectory name for orientation/direction analyses ["unexp_qu"] (str) : subdirectory name for unexpected, quantile analyses ["tune_curv"] (str): subdirectory name for tuning curves ["grped"] (str) : subdirectory name for ROI grps data ["mags"] (str) : subdirectory name for magnitude analyses ["mng"]: dict with the following attributes: ["plt_bkend"] (str): mpl backend to use ["linclab"] (bool) : if True, Linclab mpl defaults are used ["fontdir"] (str) : path to directory containing additional fonts """ args = copy.deepcopy(args) analysis_dict = dict() # analysis parameters analysis_dict["analyspar"] = sess_ntuple_util.init_analyspar( args.fluor, not(args.keepnans), args.stats, args.error, args.scale, dend=args.dend) # session parameters analysis_dict["sesspar"] = sess_ntuple_util.init_sesspar( args.sess_n, args.closest, args.plane, args.line, args.min_rois, args.pass_fail, args.incl, args.runtype) # stimulus parameters analysis_dict["stimpar"] = sess_ntuple_util.init_stimpar( args.stimtype, args.visflow_dir, args.visflow_size, args.gabfr, args.gabk, args.gab_ori, args.pre, args.post) # SPECIFIC ANALYSES # autocorrelation parameters analysis_dict["autocorrpar"] = sess_ntuple_util.init_autocorrpar( args.lag_s, not(args.not_byitem)) # permutation parameters analysis_dict["permpar"] = sess_ntuple_util.init_permpar( args.n_perms, 0.05, args.tails) # quantile parameters analysis_dict["quantpar"] = sess_ntuple_util.init_quantpar( args.n_quants, [0, -1]) # roi grp parameters analysis_dict["roigrppar"] = sess_ntuple_util.init_roigrppar( args.grps, not(args.no_add_exp), args.op, args.plot_vals) # tuning curve parameters analysis_dict["tcurvpar"] = sess_ntuple_util.init_tcurvpar( args.tc_gabfr, 0, args.tc_post, args.tc_grp2, args.tc_test, args.tc_vm_estim) # figure parameters analysis_dict["figpar"] = sess_plot_util.init_figpar( ncols=int(args.ncols), datetime=not(args.no_datetime), overwrite=args.overwrite, save_fig=not(args.not_save_fig), runtype=args.runtype, output=args.output, plt_bkend=args.plt_bkend, fontdir=args.fontdir) return analysis_dict