コード例 #1
0
    def _accum_df_by_row(self,
                         ipath: str,
                         opath: str,
                         index: int,
                         inc_exps: tp.Optional[str]) -> pd.DataFrame:
        if utils.path_exists(opath):
            cum_df = storage.DataFrameReader('storage.csv')(opath)
        else:
            cum_df = None

        if utils.path_exists(ipath):
            t = storage.DataFrameReader('storage.csv')(ipath)

            if inc_exps is not None:
                cols = utils.exp_include_filter(
                    inc_exps, list(t.columns), self.n_exp)
            else:
                cols = t.columns

            if cum_df is None:
                cum_df = pd.DataFrame(columns=cols)

            cum_df = cum_df.append(t.loc[index, cols])

            return cum_df

        return None
コード例 #2
0
    def _gen_csv(self,
                 batch_leaf: str,
                 criteria: bc.IConcreteBatchCriteria,
                 cmdopts: types.Cmdopts,
                 controller: str,
                 src_stem: str,
                 dest_stem: str,
                 inc_exps: tp.Optional[str]) -> None:
        """Helper function for generating a set of .csv files for use in intra-scenario
        graph generation (1 per controller).

        """
        self.logger.debug("Gathering data for '%s' from %s -> %s",
                          controller, src_stem, dest_stem)
        ipath = os.path.join(
            cmdopts['batch_stat_collate_root'], src_stem + '.csv')

        # Some experiments might not generate the necessary performance measure .csvs for graph
        # generation, which is OK.
        if not utils.path_exists(ipath):
            self.logger.warning(
                "%s missing for controller %s", ipath, controller)
            return

        preparer = StatsPreparer(ipath_stem=cmdopts['batch_stat_collate_root'],
                                 ipath_leaf=src_stem,
                                 opath_stem=self.cc_csv_root,
                                 n_exp=criteria.n_exp())
        opath_leaf = LeafGenerator.from_batch_leaf(batch_leaf, dest_stem, None)
        preparer.across_rows(opath_leaf=opath_leaf, index=0, inc_exps=inc_exps)
コード例 #3
0
ファイル: xml.py プロジェクト: swarm-robotics/sierra
    def pickle(self, fpath: str, delete: bool = False) -> None:
        from sierra.core import utils

        if delete and utils.path_exists(fpath):
            os.remove(fpath)

        with open(fpath, 'ab') as f:
            pickle.dump(self.adds, f)
コード例 #4
0
ファイル: heatmap.py プロジェクト: swarm-robotics/sierra
    def generate(self) -> None:
        if not utils.path_exists(self.input_fpath):
            self.logger.debug("Not generating heatmap: %s does not exist",
                              self.input_fpath)
            return

        # Read .csv and create raw heatmap from default configuration
        data_df = storage.DataFrameReader('storage.csv')(self.input_fpath)
        self._plot_df(data_df, self.output_fpath)
コード例 #5
0
    def _read_models(self) -> tp.Tuple[pd.DataFrame, tp.List[str]]:
        if self.model_root is not None:
            model_fpath = os.path.join(
                self.model_root, self.input_stem + '.model')
            model_legend_fpath = os.path.join(
                self.model_root, self.input_stem + '.legend')
            if utils.path_exists(model_fpath):
                model = storage.DataFrameReader('storage.csv')(model_fpath)
                if utils.path_exists(model_legend_fpath):
                    with open(model_legend_fpath, 'r') as f:
                        model_legend = f.read().splitlines()
                else:
                    self.logger.warning(
                        "No legend file for model '%s' found", model_fpath)
                    model_legend = ['Model Prediction']

                return (model, model_legend)

        return (None, [])
コード例 #6
0
    def from_def(self, exp_def: xml.XMLLuigi):
        """
        Given a :class:`~sierra.core.xml.XMLLuigi` object containing all changes
        that should be made to all runs in the experiment, create additional
        changes to create a set of unique runs from which distributions of swarm
        behavior can be meaningfully computed post-hoc.

        Writes out all experiment input files to the filesystem.

        """
        # Clear out commands file if it exists
        configurer = platform.ExpConfigurer(self.cmdopts)
        commands_fpath = self.commands_fpath + \
            config.kGNUParallel['cmdfile_ext']
        if configurer.cmdfile_paradigm() == 'per-exp' and utils.path_exists(
                commands_fpath):
            os.remove(commands_fpath)

        n_robots = utils.get_n_robots(self.criteria.main_config, self.cmdopts,
                                      self.exp_input_root, exp_def)

        generator = platform.ExpRunShellCmdsGenerator(self.cmdopts,
                                                      self.criteria, n_robots,
                                                      self.exp_num)

        # Create all experimental runs
        for run_num in range(self.cmdopts['n_runs']):
            per_run = copy.deepcopy(exp_def)
            self._create_exp_run(per_run, generator, run_num,
                                 self.random_seeds)

        # Perform experiment level configuration AFTER all runs have been
        # generated in the experiment, in case the configuration depends on the
        # generated launch files.
        platform.ExpConfigurer(self.cmdopts).for_exp(self.exp_input_root)

        # Save seeds
        if not utils.path_exists(self.seeds_fpath) or not self.preserve_seeds:
            if utils.path_exists(self.seeds_fpath):
                os.remove(self.seeds_fpath)
            with open(self.seeds_fpath, 'ab') as f:
                pickle.dump(self.random_seeds, f)
コード例 #7
0
ファイル: plugin.py プロジェクト: swarm-robotics/sierra
    def __call__(self, args: argparse.Namespace) -> None:
        if args.nodefile is None:
            assert 'SIERRA_NODEFILE' in os.environ,\
                "Non-Adhoc environment detected: 'SIERRA_NODEFILE' not found"
            args.nodefile = os.environ['SIERRA_NODEFILE']

        assert utils.path_exists(args.nodefile), \
            f"SIERRA_NODEFILE '{args.nodefile}' does not exist"

        assert not args.platform_vc,\
            "Platform visual capture not supported on Adhoc"
コード例 #8
0
    def _read_stats(self) -> tp.Dict[str, pd.DataFrame]:
        dfs = {}
        if self.stats in ['conf95', 'all']:
            stddev_ipath = os.path.join(self.stats_root,
                                        self.input_stem + config.kStatsExtensions['stddev'])
            if utils.path_exists(stddev_ipath):
                dfs['stddev'] = storage.DataFrameReader(
                    'storage.csv')(stddev_ipath)
            else:
                self.logger.warning(
                    "Stddev file not found for '%s'", self.input_stem)

        return dfs
コード例 #9
0
    def _accum_df(self, ipath: str, opath: str, src_stem: str) -> pd.DataFrame:
        if utils.path_exists(opath):
            cum_df = storage.DataFrameReader('storage.csv')(opath)
        else:
            cum_df = None

        if utils.path_exists(ipath):
            t = storage.DataFrameReader('storage.csv')(ipath)
            if cum_df is None:
                cum_df = pd.DataFrame(columns=t.columns)

            if len(t.index) != 1:
                self.logger.warning(
                    "'%s.csv' is a collated inter-experiment csv, not a summary inter-experiment csv:  # rows %s != 1",
                    src_stem, len(t.index))
                self.logger.warning("Truncating '%s.csv' to last row",
                                    src_stem)

            cum_df = cum_df.append(t.loc[t.index[-1], t.columns.to_list()])
            return cum_df

        return None
コード例 #10
0
    def __call__(self, args: argparse.Namespace) -> None:
        if args.nodefile is None:
            assert 'SIERRA_NODEFILE' in os.environ,\
                ("Non-robots.turtlebot3 environment detected: --nodefile not "
                 "passed and 'SIERRA_NODEFILE' not found")
            args.nodefile = os.environ['SIERRA_NODEFILE']

        assert utils.path_exists(args.nodefile), \
            f"SIERRA_NODEFILE '{args.nodefile}' does not exist"
        self.logger.info("Using '%s' as robot hostnames file", args.nodefile)

        assert not args.platform_vc,\
            "Platform visual capture not supported on robots.turtlebot3"
コード例 #11
0
    def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria,
                 template_input_file: str, exp_input_root: str,
                 exp_output_root: str, exp_num: int) -> None:

        # Will get the main name and extension of the config file (without the
        # full absolute path)
        self.main_input_name, self.main_input_extension = os.path.splitext(
            os.path.basename(os.path.abspath(template_input_file)))

        # where the generated config and command files should be stored
        self.exp_input_root = os.path.abspath(exp_input_root)

        self.exp_output_root = os.path.abspath(exp_output_root)
        self.cmdopts = cmdopts
        self.criteria = criteria
        self.exp_num = exp_num
        self.logger = logging.getLogger(__name__)

        # If random seeds where previously generated, use them if configured
        self.seeds_fpath = os.path.join(self.exp_input_root,
                                        config.kRandomSeedsLeaf)
        self.preserve_seeds = not self.cmdopts['no_preserve_seeds']
        self.random_seeds = None

        if self.preserve_seeds:
            if utils.path_exists(self.seeds_fpath):
                with open(self.seeds_fpath, 'rb') as f:
                    self.random_seeds = pickle.load(f)

        if self.random_seeds is not None:
            if len(self.random_seeds) == self.cmdopts['n_runs']:
                self.logger.debug("Using existing random seeds for experiment")
            elif len(self.random_seeds) != self.cmdopts['n_runs']:
                # OK to overwrite the saved random seeds--they changed the
                # experiment definition.
                self.logger.warn(("Experiment definition changed: # random "
                                  "seeds (% s) != --n-runs (%s): create new "
                                  "seeds"), len(self.random_seeds),
                                 self.cmdopts['n_runs'])
                self.preserve_seeds = False

        if not self.preserve_seeds or self.random_seeds is None:
            self.logger.debug("Generating new random seeds for experiment")
            self.random_seeds = random.sample(range(0, int(time.time())),
                                              self.cmdopts["n_runs"])

        # where the commands file will be stored
        self.commands_fpath = os.path.join(self.exp_input_root,
                                           config.kGNUParallel['cmdfile_stem'])
コード例 #12
0
    def initialize(self, project: str, search_path: tp.List[str]) -> None:
        self.logger.debug("Initializing with plugin search path %s",
                          search_path)
        for d in search_path:
            project_path = os.path.join(d, project)

            if utils.path_exists(project_path):
                project_plugin = ProjectPluginManager(d, project)
                self.components.append(project_plugin)
            else:
                pipeline_plugin = DirectoryPluginManager(d)
                self.components.append(pipeline_plugin)

        for c in self.components:
            c.initialize(project)
コード例 #13
0
    def _accum_df_by_col(self,
                         ipath: str,
                         opath: str,
                         all_cols: tp.List[str],
                         col_index: int,
                         inc_exps: tp.Optional[str]) -> pd.DataFrame:
        if utils.path_exists(opath):
            cum_df = storage.DataFrameReader('storage.csv')(opath)
        else:
            cum_df = None

        if utils.path_exists(ipath):
            t = storage.DataFrameReader('storage.csv')(ipath)

            if inc_exps is not None:
                cols_from_index = utils.exp_include_filter(
                    inc_exps, list(t.index), self.n_exp)
            else:
                cols_from_index = slice(None, None, None)

            if cum_df is None:
                cum_df = pd.DataFrame(columns=all_cols)

            # We need to turn each column of the .csv on the filesystem into a
            # row in the .csv which we want to write out, so we transpose, fix
            # the index, and then set the columns of the new transposed
            # dataframe.
            tp_df = t.transpose()
            tp_df = tp_df.reset_index(drop=True)
            tp_df = tp_df[cols_from_index]
            tp_df.columns = all_cols

            cum_df = cum_df.append(tp_df.loc[col_index, :])
            return cum_df

        return None
コード例 #14
0
    def generate(self) -> None:
        input_fpath = os.path.join(self.stats_root, self.input_stem +
                                   config.kStatsExtensions['mean'])
        if not utils.path_exists(input_fpath):
            self.logger.debug("Not generating %s: %s does not exist",
                              self.output_fpath,
                              input_fpath)
            return

        data_df = storage.DataFrameReader('storage.csv')(input_fpath)

        model = self._read_models()
        stat_dfs = self._read_stats()

        # Plot specified columns from dataframe.
        if self.cols is None:
            ncols = max(1, int(len(data_df.columns) / 2.0))
            ax = self._plot_selected_cols(
                data_df, stat_dfs, data_df.columns, model)
        else:
            ncols = max(1, int(len(self.cols) / 2.0))
            ax = self._plot_selected_cols(data_df, stat_dfs, self.cols, model)

        self._plot_ticks(ax)

        self._plot_legend(ax, model[1], ncols)

        # Add title
        ax.set_title(self.title, fontsize=self.text_size['title'])

        # Add X,Y labels
        if self.xlabel is not None:
            ax.set_xlabel(self.xlabel, fontsize=self.text_size['xyz_label'])

        if self.ylabel is not None:
            ax.set_ylabel(self.ylabel, fontsize=self.text_size['xyz_label'])

        # Output figure
        fig = ax.get_figure()
        fig.set_size_inches(config.kGraphBaseSize,
                            config.kGraphBaseSize)
        fig.savefig(self.output_fpath, bbox_inches='tight',
                    dpi=config.kGraphDPI)
        # Prevent memory accumulation (fig.clf() does not close everything)
        plt.close(fig)
コード例 #15
0
    def _gen_csvs_for_2D_or_3D(self,
                               cmdopts: types.Cmdopts,
                               batch_leaf: str,
                               controller: str,
                               src_stem: str,
                               dest_stem: str) -> None:
        """Helper function for generating a set of .csv files for use in intra-scenario
        graph generation (1 per controller) for 2D/3D comparison types. Because
        each ``.csv`` file corresponding to performance measures are 2D arrays,
        we actually just copy and rename the performance measure ``.csv`` files
        for each controllers into :attr:`cc_csv_root`.

        :class:`~sierra.core.graphs.stacked_surface_graph.StackedSurfaceGraph`
        expects an ``_[0-9]+.csv`` pattern for each 2D surfaces to graph in
        order to disambiguate which files belong to which controller without
        having the controller name in the filepath (contains dots), so we do
        that here. :class:`~sierra.core.graphs.heatmap.Heatmap` does not require
        that, but for the heatmap set we generate it IS helpful to have an easy
        way to differentiate primary vs. other controllers, so we do it
        unconditionally here to handle both cases.

        """
        self.logger.debug("Gathering data for '%s' from %s -> %s",
                          controller, src_stem, dest_stem)

        csv_ipath = os.path.join(
            cmdopts['batch_stat_collate_root'], src_stem + ".csv")

        # Some experiments might not generate the necessary performance measure .csvs for
        # graph generation, which is OK.
        if not utils.path_exists(csv_ipath):
            self.logger.warning(
                "%s missing for controller '%s'", csv_ipath, controller)
            return

        df = storage.DataFrameReader('storage.csv')(csv_ipath)

        opath_leaf = LeafGenerator.from_batch_leaf(batch_leaf,
                                                   dest_stem,
                                                   [self.controllers.index(controller)])

        csv_opath_stem = os.path.join(self.cc_csv_root, opath_leaf)
        storage.DataFrameWriter('storage.csv')(
            df, csv_opath_stem + '.csv', index=False)
コード例 #16
0
    def generate(self) -> None:
        input_fpath = os.path.join(self.stats_root, self.input_stem +
                                   config.kStatsExtensions['mean'])
        if not utils.path_exists(input_fpath):
            self.logger.debug("Not generating %s: %s does not exist",
                              self.output_fpath,
                              input_fpath)
            return

        data_dfy = storage.DataFrameReader('storage.csv')(input_fpath)
        model = self._read_models()

        fig, ax = plt.subplots()

        # Plot lines
        self._plot_lines(data_dfy, model)

        # Add legend
        self._plot_legend(model)

        # Add statistics according to configuration
        stat_dfs = self._read_stats()
        self._plot_stats(ax, self.xticks, data_dfy, stat_dfs)

        # Add X,Y labels
        plt.ylabel(self.ylabel, fontsize=self.text_size['xyz_label'])
        plt.xlabel(self.xlabel, fontsize=self.text_size['xyz_label'])

        # Add ticks
        self._plot_ticks(ax)

        # Add title
        plt.title(self.title, fontsize=self.text_size['title'])

        # Output figure
        fig = ax.get_figure()
        fig.set_size_inches(config.kGraphBaseSize,
                            config.kGraphBaseSize)
        fig.savefig(self.output_fpath, bbox_inches='tight',
                    dpi=config.kGraphDPI)
        # Prevent memory accumulation (fig.clf() does not close everything)
        plt.close(fig)
コード例 #17
0
    def _collate_exp(self, target: dict, exp_dir: str,
                     stats: tp.List[BivarGraphCollationInfo]) -> None:
        exp_stat_root = os.path.join(self.cmdopts['batch_stat_root'], exp_dir)

        for stat in stats:
            csv_ipath = os.path.join(exp_stat_root,
                                     target['src_stem'] + stat.df_ext)
            if not utils.path_exists(csv_ipath):
                stat.all_srcs_exist = False
                continue

            stat.some_srcs_exist = True

            data_df = storage.DataFrameReader('storage.csv')(csv_ipath)

            assert target['col'] in data_df.columns.values,\
                "{0} not in columns of {1}, which has {2}".format(target['col'],
                                                                  csv_ipath,
                                                                  data_df.columns)
            xlabel, ylabel = exp_dir.split('+')
            stat.df.loc[xlabel, ylabel] = data_df[target['col']].to_numpy()
コード例 #18
0
    def _collate_exp(self, target: dict, exp_dir: str,
                     stats: tp.List[UnivarGraphCollationInfo]) -> None:
        exp_stat_root = os.path.join(self.cmdopts['batch_stat_root'], exp_dir)

        for stat in stats:
            csv_ipath = os.path.join(exp_stat_root,
                                     target['src_stem'] + stat.df_ext)
            if not utils.path_exists(csv_ipath):
                stat.all_srcs_exist = False
                continue

            stat.some_srcs_exist = True

            data_df = storage.DataFrameReader('storage.csv')(csv_ipath)

            assert target['col'] in data_df.columns.values,\
                "{0} not in columns of {1}".format(target['col'],
                                                   target['src_stem'] + stat.df_ext)

            if target.get('summary', False):
                stat.df.loc[0, exp_dir] = data_df.loc[data_df.index[-1],
                                                      target['col']]
            else:
                stat.df[exp_dir] = data_df[target['col']]
コード例 #19
0
    def _gen_csvs_for_1D(self,
                         cmdopts: types.Cmdopts,
                         criteria: bc.IConcreteBatchCriteria,
                         batch_leaf: str,
                         controller: str,
                         src_stem: str,
                         dest_stem: str,
                         primary_axis: int,
                         inc_exps: tp.Optional[str]) -> None:
        """Helper function for generating a set of .csv files for use in intra-scenario
        graph generation. Because we are targeting linegraphs, we draw the the
        i-th row/col (as configured) from the performance results of each
        controller .csv, and concatenate them into a new .csv file which can be
        given to
        :class:`~sierra.core.graphs.summary_line_graph.SummaryLineGraph`.

        """
        self.logger.debug("Gathering data for '%s' from %s -> %s",
                          controller, src_stem, dest_stem)

        csv_ipath = os.path.join(
            cmdopts['batch_stat_collate_root'], src_stem + ".csv")

        # Some experiments might not generate the necessary performance measure .csvs for
        # graph generation, which is OK.
        if not utils.path_exists(csv_ipath):
            self.logger.warning(
                "%s missing for controller '%s'", csv_ipath, controller)
            return

        if primary_axis == 0:
            preparer = StatsPreparer(ipath_stem=cmdopts['batch_stat_collate_root'],
                                     ipath_leaf=src_stem,
                                     opath_stem=self.cc_csv_root,
                                     n_exp=criteria.criteria2.n_exp())

            n_rows = len(storage.DataFrameReader('storage.csv')(os.path.join(cmdopts['batch_stat_collate_root'],
                                                                             src_stem + ".csv")).index)
            for i in range(0, n_rows):
                opath_leaf = LeafGenerator.from_batch_leaf(
                    batch_leaf, dest_stem, [i])
                preparer.across_rows(opath_leaf=opath_leaf,
                                     index=i, inc_exps=inc_exps)
        else:
            preparer = StatsPreparer(ipath_stem=cmdopts['batch_stat_collate_root'],
                                     ipath_leaf=src_stem,
                                     opath_stem=self.cc_csv_root,
                                     n_exp=criteria.criteria1.n_exp())

            exp_dirs = criteria.gen_exp_dirnames(cmdopts)
            xlabels, ylabels = utils.bivar_exp_labels_calc(exp_dirs)
            xlabels = utils.exp_include_filter(
                inc_exps, xlabels, criteria.criteria1.n_exp())

            for col in ylabels:
                col_index = ylabels.index(col)
                opath_leaf = LeafGenerator.from_batch_leaf(
                    batch_leaf, dest_stem, [col_index])
                preparer.across_cols(opath_leaf=opath_leaf,
                                     col_index=col_index,
                                     all_cols=xlabels,
                                     inc_exps=inc_exps)
コード例 #20
0
    def _gen_csv(self, cmdopts: types.Cmdopts, batch_leaf: str, src_stem: str,
                 dest_stem: str) -> None:
        """
        Helper function for generating a set of .csv files for use in
        inter-scenario graph generation.

        Generates:

        - ``.csv`` file containing results for each scenario the controller is
          being compared across, 1 per line.

        - ``.stddev`` file containing stddev for the generated ``.csv`` file, 1
          per line.

        - ``.model`` file containing model predictions for controller behavior
          during each scenario, 1 per line (not generated if models were not run
          the performance measures we are generating graphs for).

        - ``.legend`` file containing legend values for models to plot (not
          generated if models were not run for the performance measures we are
          generating graphs for).

        """

        csv_ipath = os.path.join(cmdopts['batch_output_root'],
                                 cmdopts['batch_stat_collate_root'],
                                 src_stem + ".csv")
        stddev_ipath = os.path.join(cmdopts['batch_output_root'],
                                    cmdopts['batch_stat_collate_root'],
                                    src_stem + ".stddev")

        model_ipath_stem = os.path.join(cmdopts['batch_model_root'], src_stem)
        model_opath_stem = os.path.join(self.sc_model_root,
                                        dest_stem + "-" + self.controller)

        opath_stem = os.path.join(self.sc_csv_root,
                                  dest_stem + "-" + self.controller)

        # Some experiments might not generate the necessary performance measure
        # .csvs for graph generation, which is OK.
        if not utils.path_exists(csv_ipath):
            self.logger.warning("%s missing for controller %s", csv_ipath,
                                self.controller)
            return

        # Collect performance measure results. Append to existing dataframe if
        # it exists, otherwise start a new one.
        data_df = self._accum_df(csv_ipath, opath_stem + '.csv', src_stem)
        storage.DataFrameWriter('storage.csv')(data_df,
                                               opath_stem + '.csv',
                                               index=False)

        # Collect performance results stddev. Append to existing dataframe if it
        # exists, otherwise start a new one.
        stddev_df = self._accum_df(stddev_ipath, opath_stem + '.stddev',
                                   src_stem)
        if stddev_df is not None:
            storage.DataFrameWriter('storage.csv')(stddev_df,
                                                   opath_stem + '.stddev',
                                                   index=False)

        # Collect performance results models and legends. Append to existing
        # dataframes if they exist, otherwise start new ones.
        model_df = self._accum_df(model_ipath_stem + '.model',
                                  model_opath_stem + '.model', src_stem)
        if model_df is not None:
            storage.DataFrameWriter('storage.csv')(model_df,
                                                   model_opath_stem + '.model',
                                                   index=False)
            with open(model_opath_stem + '.legend', 'a') as f:
                _, scenario, _ = rdg.parse_batch_leaf(batch_leaf)
                sgp = pm.module_load_tiered(
                    project=cmdopts['project'],
                    path='generators.scenario_generator_parser')
                kw = sgp.ScenarioGeneratorParser().to_dict(scenario)
                f.write("{0} Prediction\n".format(kw['scenario_tag']))
コード例 #21
0
 def model_exists(exp_model_root: str, target_stem: str):
     return utils.path_exists(os.path.join(exp_model_root, target_stem + '.model'))
コード例 #22
0
    def _read_stats(self) -> tp.Dict[str, list]:
        dfs = {}

        if self.stats == 'conf95' or self.stats == 'all':
            stddev_ipath = os.path.join(self.stats_root,
                                        self.input_stem + config.kStatsExtensions['stddev'])

            if utils.path_exists(stddev_ipath):
                dfs['stddev'] = storage.DataFrameReader(
                    'storage.csv')(stddev_ipath)
            else:
                self.logger.warning(
                    "stddev file not found for '%s'", self.input_stem)

        if self.stats == 'bw' or self.stats == 'all':
            whislo_ipath = os.path.join(self.stats_root,
                                        self.input_stem + config.kStatsExtensions['whislo'])
            whishi_ipath = os.path.join(self.stats_root,
                                        self.input_stem + config.kStatsExtensions['whishi'])
            median_ipath = os.path.join(self.stats_root,
                                        self.input_stem + config.kStatsExtensions['median'])
            q1_ipath = os.path.join(self.stats_root,
                                    self.input_stem + config.kStatsExtensions['q1'])
            q3_ipath = os.path.join(self.stats_root,
                                    self.input_stem + config.kStatsExtensions['q3'])

            cihi_ipath = os.path.join(self.stats_root,
                                      self.input_stem + config.kStatsExtensions['cihi'])
            cilo_ipath = os.path.join(self.stats_root,
                                      self.input_stem + config.kStatsExtensions['cilo'])

            if utils.path_exists(whislo_ipath):
                dfs['whislo'] = storage.DataFrameReader(
                    'storage.csv')(whislo_ipath)
            else:
                self.logger.warning(
                    "whislo file not found for '%s'", self.input_stem)

            if utils.path_exists(whishi_ipath):
                dfs['whishi'] = storage.DataFrameReader(
                    'storage.csv')(whishi_ipath)
            else:
                self.logger.warning(
                    "whishi file not found for '%s'", self.input_stem)

            if utils.path_exists(cilo_ipath):
                dfs['cilo'] = storage.DataFrameReader('storage.csv')(cilo_ipath)
            else:
                self.logger.warning(
                    "cilo file not found for '%s'", self.input_stem)

            if utils.path_exists(cihi_ipath):
                dfs['cihi'] = storage.DataFrameReader('storage.csv')(cihi_ipath)
            else:
                self.logger.warning(
                    "cihi file not found for '%s'", self.input_stem)

            if utils.path_exists(median_ipath):
                dfs['median'] = storage.DataFrameReader(
                    'storage.csv')(median_ipath)
            else:
                self.logger.warning(
                    "median file not found for '%s'", self.input_stem)

            if utils.path_exists(q1_ipath):
                dfs['q1'] = storage.DataFrameReader('storage.csv')(q1_ipath)
            else:
                self.logger.warning(
                    "q1 file not found for '%s'", self.input_stem)

            if utils.path_exists(q3_ipath):
                dfs['q3'] = storage.DataFrameReader('storage.csv')(q3_ipath)
            else:
                self.logger.warning(
                    "q3 file not found for '%s'", self.input_stem)

        return dfs