Beispiel #1
0
    def __init__(self, filename_or_stream, query=""):
        """Read from Caliper files (`cali` or split JSON).

        Args:
            filename_or_stream (str or file-like): name of a `cali` or
                `cali-query` split JSON file, OR an open file object
            query (str): cali-query arguments (for cali file)
        """
        self.filename_or_stream = filename_or_stream
        self.filename_ext = ""
        self.query = query

        self.json_data = {}
        self.json_cols = {}
        self.json_cols_mdata = {}
        self.json_nodes = {}

        self.idx_to_label = {}
        self.idx_to_node = {}

        self.timer = Timer()
        self.nid_col_name = "nid"

        if isinstance(self.filename_or_stream, str):
            _, self.filename_ext = os.path.splitext(filename_or_stream)
Beispiel #2
0
    def __init__(self, dir_name):
        # this is the name of the HPCToolkit database directory. The directory
        # contains an experiment.xml and some metric-db files
        self.dir_name = dir_name

        root = ET.parse(self.dir_name + "/experiment.xml").getroot()
        self.loadmodule_table = next(root.iter("LoadModuleTable"))
        self.file_table = next(root.iter("FileTable"))
        self.procedure_table = next(root.iter("ProcedureTable"))
        self.metricdb_table = next(root.iter("MetricDBTable"))
        self.callpath_profile = next(root.iter("SecCallPathProfileData"))

        # For a parallel run, there should be one metric-db file per MPI
        # process
        metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
        self.num_metricdb_files = len(metricdb_files)

        # We need to know how many threads per rank there are. This counts the
        # number of thread 0 metric-db files (i.e., number of ranks), then
        # uses this as the divisor to the total number of metric-db files.
        metricdb_numranks_files = glob.glob(self.dir_name +
                                            "/*-000-*.metric-db")
        self.num_ranks = len(metricdb_numranks_files)
        self.num_threads_per_rank = int(self.num_metricdb_files /
                                        len(metricdb_numranks_files))

        self.num_cpu_threads_per_rank = self.count_cpu_threads_per_rank()

        # Read one metric-db file to extract the number of nodes in the CCT
        # and the number of metrics
        with open(metricdb_files[0], "rb") as metricdb:
            metricdb.read(18)  # skip tag
            metricdb.read(5)  # skip version TODO: should we?
            endian = metricdb.read(1)

            if endian == b"b":
                self.num_nodes = struct.unpack(">i", metricdb.read(4))[0]
                self.num_metrics = struct.unpack(">i", metricdb.read(4))[0]
            else:
                raise ValueError(
                    "HPCToolkitReader doesn't support endian '%s'" % endian)

        self.load_modules = {}
        self.src_files = {}
        self.procedure_names = {}
        self.metric_names = {}

        # this list of dicts will hold all the node information such as
        # procedure name, load module, filename, etc. for all the nodes
        self.node_dicts = []

        self.timer = Timer()
Beispiel #3
0
    def __init__(self, filename_or_caliperreader):
        """Read in a native cali with Caliper's python reader.

        Args:
            filename_or_caliperreader (str or CaliperReader): name of a `cali` file OR
                a CaliperReader object
        """
        self.filename_or_caliperreader = filename_or_caliperreader
        self.filename_ext = ""

        self.metric_columns = set()
        self.node_dicts = []

        self.timer = Timer()

        if isinstance(self.filename_or_caliperreader, str):
            _, self.filename_ext = os.path.splitext(filename_or_caliperreader)
Beispiel #4
0
class CaliperReader:
    """Read in a Caliper file (`cali` or split JSON) or file-like object."""
    def __init__(self, filename_or_stream, query=""):
        """Read from Caliper files (`cali` or split JSON).

        Args:
            filename_or_stream (str or file-like): name of a `cali` or
                `cali-query` split JSON file, OR an open file object
            query (str): cali-query arguments (for cali file)
        """
        self.filename_or_stream = filename_or_stream
        self.filename_ext = ""
        self.query = query

        self.json_data = {}
        self.json_cols = {}
        self.json_cols_mdata = {}
        self.json_nodes = {}

        self.idx_to_label = {}
        self.idx_to_node = {}

        self.timer = Timer()
        self.nid_col_name = "nid"

        if isinstance(self.filename_or_stream, str):
            _, self.filename_ext = os.path.splitext(filename_or_stream)

    def read_json_sections(self):
        # if cali-query exists, extract data from .cali to a file-like object
        if self.filename_ext == ".cali":
            cali_query = which("cali-query")
            if not cali_query:
                raise ValueError(
                    "from_caliper() needs cali-query to query .cali file")
            cali_json = subprocess.Popen(
                [cali_query, "-q", self.query, self.filename_or_stream],
                stdout=subprocess.PIPE,
            )
            self.filename_or_stream = cali_json.stdout

        # if filename_or_stream is a str, then open the file, otherwise
        # directly load the file-like object
        if isinstance(self.filename_or_stream, str):
            with open(self.filename_or_stream) as cali_json:
                json_obj = json.load(cali_json)
        else:
            json_obj = json.loads(
                self.filename_or_stream.read().decode("utf-8"))

        # read various sections of the Caliper JSON file
        self.json_data = json_obj["data"]
        self.json_cols = json_obj["columns"]
        self.json_cols_mdata = json_obj["column_metadata"]
        self.json_nodes = json_obj["nodes"]

        # decide which column to use as the primary path hierarchy
        # first preference to callpath if available
        if "source.function#callpath.address" in self.json_cols:
            self.path_col_name = "source.function#callpath.address"
            self.node_type = "function"
        elif "path" in self.json_cols:
            self.path_col_name = "path"
            self.node_type = "region"
        else:
            sys.exit("No hierarchy column in input file")

        # remove data entries containing None in `path` column (null in json file)
        # first, get column where `path` data is
        # then, parse json_data list of lists to identify lists containing None in
        # `path` column
        path_col = self.json_cols.index(self.path_col_name)
        entries_to_remove = []
        for sublist in self.json_data:
            if sublist[path_col] is None:
                entries_to_remove.append(sublist)
        # then, remove them from the json_data list
        for i in entries_to_remove:
            self.json_data.remove(i)

        # change column names
        for idx, item in enumerate(self.json_cols):
            if item == self.path_col_name:
                # this column is just a pointer into the nodes section
                self.json_cols[idx] = self.nid_col_name
            # make other columns consistent with other readers
            if item == "mpi.rank":
                self.json_cols[idx] = "rank"
            if item == "module#cali.sampler.pc":
                self.json_cols[idx] = "module"
            if item == "sum#time.duration" or item == "sum#avg#sum#time.duration":
                self.json_cols[idx] = "time"
            if (item == "inclusive#sum#time.duration"
                    or item == "sum#avg#inclusive#sum#time.duration"):
                self.json_cols[idx] = "time (inc)"

        # make list of metric columns
        self.metric_columns = []
        for idx, item in enumerate(self.json_cols_mdata):
            if self.json_cols[idx] != "rank" and item["is_value"] is True:
                self.metric_columns.append(self.json_cols[idx])

    def create_graph(self):
        list_roots = []

        # find nodes in the nodes section that represent the path hierarchy
        for idx, node in enumerate(self.json_nodes):
            node_label = node["label"]
            self.idx_to_label[idx] = node_label

            if node["column"] == self.path_col_name:
                if "parent" not in node:
                    # since this node does not have a parent, this is a root
                    graph_root = Node(
                        Frame({
                            "type": self.node_type,
                            "name": node_label
                        }), None)
                    list_roots.append(graph_root)

                    node_dict = {
                        self.nid_col_name: idx,
                        "name": node_label,
                        "node": graph_root,
                    }
                    self.idx_to_node[idx] = node_dict
                else:
                    parent_hnode = (self.idx_to_node[node["parent"]])["node"]
                    hnode = Node(
                        Frame({
                            "type": self.node_type,
                            "name": node_label
                        }),
                        parent_hnode,
                    )
                    parent_hnode.add_child(hnode)

                    node_dict = {
                        self.nid_col_name: idx,
                        "name": node_label,
                        "node": hnode,
                    }
                    self.idx_to_node[idx] = node_dict

        return list_roots

    def read(self):
        """Read the caliper JSON file to extract the calling context tree."""
        with self.timer.phase("read json"):
            self.read_json_sections()

        with self.timer.phase("graph construction"):
            list_roots = self.create_graph()

        # create a dataframe of metrics from the data section
        self.df_json_data = pd.DataFrame(self.json_data,
                                         columns=self.json_cols)

        # map non-numeric columns to their mappings in the nodes section
        for idx, item in enumerate(self.json_cols_mdata):
            if item["is_value"] is False and self.json_cols[
                    idx] != self.nid_col_name:
                if self.json_cols[idx] == "sourceloc#cali.sampler.pc":
                    # split source file and line number into two columns
                    self.df_json_data["file"] = self.df_json_data[
                        self.json_cols[idx]].apply(
                            lambda x: re.match(r"(.*):(\d+)", self.json_nodes[
                                x]["label"]).group(1))
                    self.df_json_data["line"] = self.df_json_data[
                        self.json_cols[idx]].apply(
                            lambda x: re.match(r"(.*):(\d+)", self.json_nodes[
                                x]["label"]).group(2))
                    self.df_json_data.drop(self.json_cols[idx],
                                           axis=1,
                                           inplace=True)
                    sourceloc_idx = idx
                else:
                    self.df_json_data[self.json_cols[idx]] = self.df_json_data[
                        self.json_cols[idx]].apply(
                            lambda x: self.json_nodes[x]["label"])

        # since we split sourceloc, we should update json_cols and
        # json_cols_mdata
        if "sourceloc#cali.sampler.pc" in self.json_cols:
            self.json_cols.pop(sourceloc_idx)
            self.json_cols_mdata.pop(sourceloc_idx)
            self.json_cols.append("file")
            self.json_cols.append("line")
            self.json_cols_mdata.append({"is_value": False})
            self.json_cols_mdata.append({"is_value": False})

        max_nid = self.df_json_data[self.nid_col_name].max()

        if "line" in self.df_json_data.columns:
            # split nodes that have multiple file:line numbers to have a child
            # each with a unique file:line number
            unique_nodes = self.df_json_data.groupby(self.nid_col_name)
            df_concat = [self.df_json_data]

            for nid, super_node in unique_nodes:
                line_groups = super_node.groupby("line")
                # only need to do something if there are more than one
                # file:line number entries for the node
                if len(line_groups.size()) > 1:
                    sn_hnode = self.idx_to_node[nid]["node"]

                    for line, line_group in line_groups:
                        # create the node label
                        file_path = (line_group.head(1))["file"].item()
                        file_name = os.path.basename(file_path)
                        node_label = file_name + ":" + line

                        # create a new hatchet node
                        max_nid += 1
                        idx = max_nid
                        hnode = Node(
                            Frame({
                                "type": "statement",
                                "file": file_path,
                                "line": line
                            }),
                            sn_hnode,
                        )
                        sn_hnode.add_child(hnode)

                        node_dict = {
                            self.nid_col_name: idx,
                            "name": node_label,
                            "node": hnode,
                        }
                        self.idx_to_node[idx] = node_dict

                        # change nid of the original node to new node in place
                        for index, row in line_group.iterrows():
                            self.df_json_data.loc[index, "nid"] = max_nid

                    # add new row for original node
                    node_copy = super_node.head(1).copy()
                    for cols in self.metric_columns:
                        node_copy[cols] = 0
                    df_concat.append(node_copy)

            # concatenate all the newly created dataframes with
            # self.df_json_data
            self.df_fixed_data = pd.concat(df_concat)
        else:
            self.df_fixed_data = self.df_json_data

        # create a dataframe with all nodes in the call graph
        self.df_nodes = pd.DataFrame.from_dict(
            data=list(self.idx_to_node.values()))

        # add missing intermediate nodes to the df_fixed_data dataframe
        if "rank" in self.json_cols:
            self.num_ranks = self.df_fixed_data["rank"].max() + 1
            rank_list = range(0, self.num_ranks)

        # create a standard dict to be used for filling all missing rows
        default_metric_dict = {}
        for idx, item in enumerate(self.json_cols_mdata):
            if self.json_cols[idx] != self.nid_col_name:
                if item["is_value"] is True:
                    default_metric_dict[self.json_cols[idx]] = 0
                else:
                    default_metric_dict[self.json_cols[idx]] = None

        # create a list of dicts, one dict for each missing row
        missing_nodes = []
        for iteridx, row in self.df_nodes.iterrows():
            # check if df_nodes row exists in df_fixed_data
            metric_rows = self.df_fixed_data.loc[self.df_fixed_data[
                self.nid_col_name] == row[self.nid_col_name]]
            if "rank" not in self.json_cols:
                if metric_rows.empty:
                    # add a single row
                    node_dict = dict(default_metric_dict)
                    node_dict[self.nid_col_name] = row[self.nid_col_name]
                    missing_nodes.append(node_dict)
            else:
                if metric_rows.empty:
                    # add a row per MPI rank
                    for rank in rank_list:
                        node_dict = dict(default_metric_dict)
                        node_dict[self.nid_col_name] = row[self.nid_col_name]
                        node_dict["rank"] = rank
                        missing_nodes.append(node_dict)
                elif len(metric_rows) < self.num_ranks:
                    # add a row for each missing MPI rank
                    present_ranks = metric_rows["rank"].values
                    missing_ranks = [
                        x for x in rank_list if x not in present_ranks
                    ]
                    for rank in missing_ranks:
                        node_dict = dict(default_metric_dict)
                        node_dict[self.nid_col_name] = row[self.nid_col_name]
                        node_dict["rank"] = rank
                        missing_nodes.append(node_dict)

        self.df_missing = pd.DataFrame.from_dict(data=missing_nodes)
        self.df_metrics = pd.concat([self.df_fixed_data, self.df_missing])

        # create a graph object once all the nodes have been added
        graph = Graph(list_roots)
        graph.enumerate_traverse()

        # merge the metrics and node dataframes on the idx column
        with self.timer.phase("data frame"):
            dataframe = pd.merge(self.df_metrics,
                                 self.df_nodes,
                                 on=self.nid_col_name)
            # set the index to be a MultiIndex
            indices = ["node"]
            if "rank" in self.json_cols:
                indices.append("rank")
            dataframe.set_index(indices, inplace=True)
            dataframe.sort_index(inplace=True)

        # create list of exclusive and inclusive metric columns
        exc_metrics = []
        inc_metrics = []
        for column in self.metric_columns:
            if "(inc)" in column:
                inc_metrics.append(column)
            else:
                exc_metrics.append(column)

        return hatchet.graphframe.GraphFrame(graph, dataframe, exc_metrics,
                                             inc_metrics)
Beispiel #5
0
class HPCToolkitReader:
    """Read in the various sections of an HPCToolkit experiment.xml file and
    metric-db files.
    """
    def __init__(self, dir_name):
        # this is the name of the HPCToolkit database directory. The directory
        # contains an experiment.xml and some metric-db files
        self.dir_name = dir_name

        root = ET.parse(self.dir_name + "/experiment.xml").getroot()
        self.loadmodule_table = next(root.iter("LoadModuleTable"))
        self.file_table = next(root.iter("FileTable"))
        self.procedure_table = next(root.iter("ProcedureTable"))
        self.metricdb_table = next(root.iter("MetricDBTable"))
        self.callpath_profile = next(root.iter("SecCallPathProfileData"))

        # For a parallel run, there should be one metric-db file per MPI
        # process
        metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
        self.num_metricdb_files = len(metricdb_files)

        # We need to know how many threads per rank there are. This counts the
        # number of thread 0 metric-db files (i.e., number of ranks), then
        # uses this as the divisor to the total number of metric-db files.
        metricdb_numranks_files = glob.glob(self.dir_name +
                                            "/*-000-*.metric-db")
        self.num_ranks = len(metricdb_numranks_files)
        self.num_threads_per_rank = int(self.num_metricdb_files /
                                        len(metricdb_numranks_files))

        self.num_cpu_threads_per_rank = self.count_cpu_threads_per_rank()

        # Read one metric-db file to extract the number of nodes in the CCT
        # and the number of metrics
        with open(metricdb_files[0], "rb") as metricdb:
            metricdb.read(18)  # skip tag
            metricdb.read(5)  # skip version TODO: should we?
            endian = metricdb.read(1)

            if endian == b"b":
                self.num_nodes = struct.unpack(">i", metricdb.read(4))[0]
                self.num_metrics = struct.unpack(">i", metricdb.read(4))[0]
            else:
                raise ValueError(
                    "HPCToolkitReader doesn't support endian '%s'" % endian)

        self.load_modules = {}
        self.src_files = {}
        self.procedure_names = {}
        self.metric_names = {}

        # this list of dicts will hold all the node information such as
        # procedure name, load module, filename, etc. for all the nodes
        self.node_dicts = []

        self.timer = Timer()

    def fill_tables(self):
        """Read certain sections of the experiment.xml file to create dicts of load
        modules, src_files, procedure_names, and metric_names.
        """
        for loadm in (self.loadmodule_table).iter("LoadModule"):
            self.load_modules[loadm.get("i")] = loadm.get("n")

        for filename in (self.file_table).iter("File"):
            self.src_files[filename.get("i")] = filename.get("n")

        for procedure in (self.procedure_table).iter("Procedure"):
            self.procedure_names[procedure.get("i")] = procedure.get("n")

        # store the keys as ints because we sort on keys later
        for metric in (self.metricdb_table).iter("MetricDB"):
            self.metric_names[int(metric.get("i"))] = metric.get("n")

        return (
            self.load_modules,
            self.src_files,
            self.procedure_names,
            self.metric_names,
        )

    def read_all_metricdb_files(self):
        """Read all the metric-db files and create a dataframe with num_nodes X
        num_metricdb_files rows and num_metrics columns. Three additional columns
        store the node id, MPI process rank, and thread id (if applicable).
        """
        metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
        metricdb_files.sort()

        # All the metric data per node and per process is read into the metrics
        # array below. The three additional columns are for storing the implicit
        # node id (nid), MPI process rank, and thread id (if applicable).
        shape = [
            self.num_nodes * self.num_metricdb_files, self.num_metrics + 3
        ]
        size = int(np.prod(shape))

        # shared memory buffer for multiprocessing
        shared_buffer = mp.sharedctypes.RawArray("d", size)

        pool = mp.Pool(initializer=init_shared_array,
                       initargs=(shared_buffer, ))
        self.metrics = np.frombuffer(shared_buffer).reshape(shape)
        args = [(
            filename,
            self.num_nodes,
            self.num_threads_per_rank,
            self.num_cpu_threads_per_rank,
            self.num_metrics,
            shape,
        ) for filename in metricdb_files]
        try:
            pool.map(read_metricdb_file, args)
        finally:
            pool.close()

        # once all files have been read, create a dataframe of metrics
        metric_names = [
            self.metric_names[key] for key in sorted(self.metric_names.keys())
        ]
        for idx, name in enumerate(metric_names):
            if name == "CPUTIME (usec) (E)" or name == "CPUTIME (sec) (E)":
                metric_names[idx] = "time"
            if name == "CPUTIME (usec) (I)" or name == "CPUTIME (sec) (I)":
                metric_names[idx] = "time (inc)"

        self.metric_columns = metric_names
        df_columns = self.metric_columns + ["nid", "rank", "thread"]
        self.df_metrics = pd.DataFrame(self.metrics, columns=df_columns)
        self.df_metrics["nid"] = self.df_metrics["nid"].astype(int, copy=False)
        self.df_metrics["rank"] = self.df_metrics["rank"].astype(int,
                                                                 copy=False)
        self.df_metrics["thread"] = self.df_metrics["thread"].astype(
            int, copy=False)

        # if number of threads per rank is 1, we do not need to keep the thread ID column
        if self.num_threads_per_rank == 1:
            del self.df_metrics["thread"]

        # used to speedup parse_xml_node
        self.np_metrics = self.df_metrics[self.metric_columns].values

        # getting the number of execution threads for our stride in
        # subtract_exclusive_metric_vals/ num nodes is already calculated
        self.total_execution_threads = self.num_threads_per_rank * self.num_ranks

    def read(self):
        """Read the experiment.xml file to extract the calling context tree and create
        a dataframe out of it. Then merge the two dataframes to create the final
        dataframe.

        Return:
            (GraphFrame): new GraphFrame with HPCToolkit data.
        """
        with self.timer.phase("fill tables"):
            self.fill_tables()

        with self.timer.phase("read metric db"):
            self.read_all_metricdb_files()

        list_roots = []

        # parse the ElementTree to generate a calling context tree
        for root in self.callpath_profile.findall("PF"):
            global src_file

            nid = int(root.get("i"))
            src_file = root.get("f")

            # start with the root and create the callpath and node for the root
            # also a corresponding node_dict to be inserted into the dataframe
            graph_root = Node(
                Frame({
                    "type": "function",
                    "name": self.procedure_names[root.get("n")]
                }),
                None,
            )
            node_dict = self.create_node_dict(
                nid,
                graph_root,
                self.procedure_names[root.get("n")],
                "PF",
                self.src_files[src_file],
                int(root.get("l")),
                self.load_modules[root.get("lm")],
            )

            self.node_dicts.append(node_dict)
            list_roots.append(graph_root)

            # start graph construction at the root
            with self.timer.phase("graph construction"):
                self.parse_xml_children(root, graph_root)

            # put updated metrics back in dataframe
            for i, column in enumerate(self.metric_columns):
                if "(inc)" not in column and "(I)" not in column:
                    self.df_metrics[column] = self.np_metrics.T[i]

        with self.timer.phase("graph construction"):
            graph = Graph(list_roots)
            graph.enumerate_traverse()

        # create a dataframe for all the nodes in the graph
        self.df_nodes = pd.DataFrame.from_dict(data=self.node_dicts)

        # merge the metrics and node dataframes
        with self.timer.phase("data frame"):
            dataframe = pd.merge(self.df_metrics, self.df_nodes, on="nid")

            # set the index to be a MultiIndex
            if self.num_threads_per_rank > 1:
                indices = ["node", "rank", "thread"]
            # if number of threads per rank is 1, do not make thread an index
            elif self.num_threads_per_rank == 1:
                indices = ["node", "rank"]
            dataframe.set_index(indices, inplace=True)
            dataframe.sort_index(inplace=True)

        # create list of exclusive and inclusive metric columns
        exc_metrics = []
        inc_metrics = []
        for column in self.metric_columns:
            if "(inc)" in column or "(I)" in column:
                inc_metrics.append(column)
            else:
                exc_metrics.append(column)

        return hatchet.graphframe.GraphFrame(graph, dataframe, exc_metrics,
                                             inc_metrics)

    def parse_xml_children(self, xml_node, hnode):
        """Parses all children of an XML node."""
        for xml_child in xml_node:
            if xml_child.tag != "M":
                nid = int(xml_node.get("i"))
                line = int(xml_node.get("l"))
                self.parse_xml_node(xml_child, nid, line, hnode)

    def parse_xml_node(self, xml_node, parent_nid, parent_line, hparent):
        """Parses an XML node and its children recursively."""
        nid = int(xml_node.get("i"))

        global src_file
        xml_tag = xml_node.tag

        if xml_tag == "PF" or xml_tag == "Pr":
            # procedure
            name = self.procedure_names[xml_node.get("n")]
            if parent_line != 0:
                name = str(parent_line) + ":" + name
            src_file = xml_node.get("f")
            line = int(xml_node.get("l"))

            hnode = Node(Frame({"type": "function", "name": name}), hparent)
            node_dict = self.create_node_dict(
                nid,
                hnode,
                name,
                xml_tag,
                self.src_files[src_file],
                line,
                self.load_modules[xml_node.get("lm")],
            )

        elif xml_tag == "L":
            # loop
            src_file = xml_node.get("f")
            line = int(xml_node.get("l"))
            name = ("Loop@" + os.path.basename(self.src_files[src_file]) +
                    ":" + str(line))

            hnode = Node(
                Frame({
                    "type": "loop",
                    "file": self.src_files[src_file],
                    "line": line
                }),
                hparent,
            )
            node_dict = self.create_node_dict(nid, hnode, name, xml_tag,
                                              self.src_files[src_file], line,
                                              None)

        elif xml_tag == "S":
            # statement
            line = int(xml_node.get("l"))
            # this might not be required for resolving conflicts
            name = os.path.basename(self.src_files[src_file]) + ":" + str(line)

            hnode = Node(
                Frame({
                    "type": "statement",
                    "file": self.src_files[src_file],
                    "line": line,
                }),
                hparent,
            )
            node_dict = self.create_node_dict(nid, hnode, name, xml_tag,
                                              self.src_files[src_file], line,
                                              None)

            # when we reach statement nodes, we subtract their exclusive
            # metric values from the parent's values
            for i, column in enumerate(self.metric_columns):
                if "(inc)" not in column and "(I)" not in column:
                    _crm.subtract_exclusive_metric_vals(
                        nid,
                        parent_nid,
                        self.np_metrics.T[i],
                        self.total_execution_threads,
                        self.num_nodes,
                    )

        if xml_tag == "C" or (xml_tag == "Pr" and
                              self.procedure_names[xml_node.get("n")] == ""):
            # do not add a node to the graph if the xml_tag is a callsite
            # or if its a procedure with no name
            # for Prs, the preceding Pr has the calling line number and for
            # PFs, the preceding C has the line number
            line = int(xml_node.get("l"))
            self.parse_xml_children(xml_node, hparent)
        else:
            self.node_dicts.append(node_dict)
            hparent.add_child(hnode)
            self.parse_xml_children(xml_node, hnode)

    def create_node_dict(self, nid, hnode, name, node_type, src_file, line,
                         module):
        """Create a dict with all the node attributes."""
        node_dict = {
            "nid": nid,
            "name": name,
            "type": node_type,
            "file": src_file,
            "line": line,
            "module": module,
            "node": hnode,
        }

        return node_dict

    def count_cpu_threads_per_rank(self):
        metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
        cpu_thread_ids = set()

        for filename in metricdb_files:
            thread = int(
                re.search(r"\-(\d+)\-(\d+)\-([\w\d]+)\-(\d+)\-\d.metric-db$",
                          filename).group(2))
            if thread < 500:
                cpu_thread_ids.add(thread)

        return len(cpu_thread_ids)
class HPCToolkitReader:
    """Read in the various sections of an HPCToolkit experiment.xml file and
    metric-db files.
    """
    def __init__(self, dir_name):
        # this is the name of the HPCToolkit database directory. The directory
        # contains an experiment.xml and some metric-db files
        self.dir_name = dir_name

        root = ET.parse(self.dir_name + "/experiment.xml").getroot()
        self.loadmodule_table = next(root.iter("LoadModuleTable"))
        self.file_table = next(root.iter("FileTable"))
        self.procedure_table = next(root.iter("ProcedureTable"))
        self.metricdb_table = next(root.iter("MetricDBTable"))
        self.callpath_profile = next(root.iter("SecCallPathProfileData"))

        # For a parallel run, there should be one metric-db file per MPI
        # process
        metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
        self.num_pes = len(metricdb_files)

        # Read one metric-db file to extract the number of nodes in the CCT
        # and the number of metrics
        with open(metricdb_files[0], "rb") as metricdb:
            metricdb.read(18)  # skip tag
            metricdb.read(5)  # skip version TODO: should we?
            endian = metricdb.read(1)

            if endian == b"b":
                self.num_nodes = struct.unpack(">i", metricdb.read(4))[0]
                self.num_metrics = struct.unpack(">i", metricdb.read(4))[0]
            else:
                raise ValueError(
                    "HPCToolkitReader doesn't support endian '%s'" % endian)

        self.load_modules = {}
        self.src_files = {}
        self.procedure_names = {}
        self.metric_names = {}

        # this list of dicts will hold all the node information such as
        # procedure name, load module, filename, etc. for all the nodes
        self.node_dicts = []

        self.timer = Timer()

    def fill_tables(self):
        """Read certain sections of the experiment.xml file to create dicts of load
        modules, src_files, procedure_names, and metric_names.
        """
        for loadm in (self.loadmodule_table).iter("LoadModule"):
            self.load_modules[loadm.get("i")] = loadm.get("n")

        for filename in (self.file_table).iter("File"):
            self.src_files[filename.get("i")] = filename.get("n")

        for procedure in (self.procedure_table).iter("Procedure"):
            self.procedure_names[procedure.get("i")] = procedure.get("n")

        for metric in (self.metricdb_table).iter("MetricDB"):
            self.metric_names[metric.get("i")] = metric.get("n")

        return (
            self.load_modules,
            self.src_files,
            self.procedure_names,
            self.metric_names,
        )

    def read_all_metricdb_files(self):
        """Read all the metric-db files and create a dataframe with num_nodes X
        num_pes rows and num_metrics columns. Two additional columns store the node
        id and MPI process rank.
        """
        metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
        metricdb_files.sort()

        # All the metric data per node and per process is read into the metrics
        # array below. The two additional columns are for storing the implicit
        # node id (nid) and MPI process rank.
        shape = [self.num_nodes * self.num_pes, self.num_metrics + 2]
        size = int(np.prod(shape))

        # shared memory buffer for multiprocessing
        shared_buffer = mp.sharedctypes.RawArray("d", size)

        pool = mp.Pool(initializer=init_shared_array,
                       initargs=(shared_buffer, ))
        self.metrics = np.frombuffer(shared_buffer).reshape(shape)
        args = [(filename, self.num_nodes, self.num_metrics, shape)
                for filename in metricdb_files]
        try:
            pool.map(read_metricdb_file, args)
        finally:
            pool.close()

        # once all files have been read, create a dataframe of metrics
        metric_names = [
            self.metric_names[key] for key in sorted(self.metric_names.keys())
        ]
        for idx, name in enumerate(metric_names):
            if name == "CPUTIME (usec) (E)":
                metric_names[idx] = "time"
            if name == "CPUTIME (usec) (I)":
                metric_names[idx] = "time (inc)"

        self.metric_columns = metric_names
        df_columns = self.metric_columns + ["nid", "rank"]
        self.df_metrics = pd.DataFrame(self.metrics, columns=df_columns)
        self.df_metrics["nid"] = self.df_metrics["nid"].astype(int, copy=False)
        self.df_metrics["rank"] = self.df_metrics["rank"].astype(int,
                                                                 copy=False)

    def read(self):
        """Read the experiment.xml file to extract the calling context tree and create
        a dataframe out of it. Then merge the two dataframes to create the final
        dataframe.

        Return:
            (GraphFrame): new GraphFrame with HPCToolkit data.
        """
        with self.timer.phase("fill tables"):
            self.fill_tables()

        with self.timer.phase("read metric db"):
            self.read_all_metricdb_files()

        list_roots = []

        # parse the ElementTree to generate a calling context tree
        for root in self.callpath_profile.findall("PF"):
            nid = int(root.get("i"))

            # start with the root and create the callpath and node for the root
            # also a corresponding node_dict to be inserted into the dataframe
            node_callpath = []
            node_callpath.append(self.procedure_names[root.get("n")])
            graph_root = Node(
                Frame({
                    "type": "function",
                    "name": self.procedure_names[root.get("n")]
                }),
                None,
            )
            node_dict = self.create_node_dict(
                nid,
                graph_root,
                self.procedure_names[root.get("n")],
                "PF",
                self.src_files[root.get("f")],
                root.get("l"),
                self.load_modules[root.get("lm")],
            )

            self.node_dicts.append(node_dict)
            list_roots.append(graph_root)

            # start graph construction at the root
            with self.timer.phase("graph construction"):
                self.parse_xml_children(root, graph_root, list(node_callpath))

        # create a dataframe for all the nodes in the graph
        self.df_nodes = pd.DataFrame.from_dict(data=self.node_dicts)

        # merge the metrics and node dataframes
        with self.timer.phase("data frame"):
            dataframe = pd.merge(self.df_metrics, self.df_nodes, on="nid")
            # set the index to be a MultiIndex
            indices = ["node", "rank"]
            dataframe.set_index(indices, drop=False, inplace=True)

        # create list of exclusive and inclusive metric columns
        exc_metrics = []
        inc_metrics = []
        for column in self.metric_columns:
            if "(inc)" in column:
                inc_metrics.append(column)
            else:
                exc_metrics.append(column)

        return hatchet.graphframe.GraphFrame(Graph(list_roots), dataframe,
                                             exc_metrics, inc_metrics)

    def parse_xml_children(self, xml_node, hnode, callpath):
        """Parses all children of an XML node."""
        for xml_child in xml_node:
            if xml_child.tag != "M":
                nid = int(xml_node.get("i"))
                line = xml_node.get("l")
                self.parse_xml_node(xml_child, nid, line, hnode, callpath)

    def parse_xml_node(self, xml_node, parent_nid, parent_line, hparent,
                       parent_callpath):
        """Parses an XML node and its children recursively."""
        nid = int(xml_node.get("i"))

        global src_file
        xml_tag = xml_node.tag

        if xml_tag == "PF" or xml_tag == "Pr":
            # procedure
            name = self.procedure_names[xml_node.get("n")]
            if parent_line != "0":
                name = parent_line + ":" + name
            src_file = xml_node.get("f")
            line = xml_node.get("l")

            node_callpath = parent_callpath
            node_callpath.append(name)
            hnode = Node(Frame({"type": "function", "name": name}), hparent)
            node_dict = self.create_node_dict(
                nid,
                hnode,
                name,
                xml_tag,
                self.src_files[src_file],
                line,
                self.load_modules[xml_node.get("lm")],
            )

        elif xml_tag == "L":
            # loop
            src_file = xml_node.get("f")
            line = xml_node.get("l")
            name = "Loop@" + (
                self.src_files[src_file]).rpartition("/")[2] + ":" + line

            node_callpath = parent_callpath
            node_callpath.append(name)
            hnode = Node(
                Frame({
                    "type": "loop",
                    "file": self.src_files[src_file],
                    "line": line
                }),
                hparent,
            )
            node_dict = self.create_node_dict(nid, hnode, name, xml_tag,
                                              self.src_files[src_file], line,
                                              None)

        elif xml_tag == "S":
            # statement
            line = xml_node.get("l")
            # this might not be required for resolving conflicts
            name = (self.src_files[src_file]).rpartition("/")[2] + ":" + line

            node_callpath = parent_callpath
            node_callpath.append(name)
            hnode = Node(
                Frame({
                    "type": "statement",
                    "file": self.src_files[src_file],
                    "line": line,
                }),
                hparent,
            )
            node_dict = self.create_node_dict(nid, hnode, name, xml_tag,
                                              self.src_files[src_file], line,
                                              None)

            # when we reach statement nodes, we subtract their exclusive
            # metric values from the parent's values
            for column in self.metric_columns:
                if "(inc)" not in column:
                    self.df_metrics.loc[
                        self.df_metrics["nid"] == parent_nid, column] = (
                            self.df_metrics.loc[self.df_metrics["nid"] ==
                                                parent_nid, column] -
                            self.df_metrics.loc[self.df_metrics["nid"] == nid,
                                                column].values)

        if xml_tag == "C" or (xml_tag == "Pr" and
                              self.procedure_names[xml_node.get("n")] == ""):
            # do not add a node to the graph if the xml_tag is a callsite
            # or if its a procedure with no name
            # for Prs, the preceding Pr has the calling line number and for
            # PFs, the preceding C has the line number
            line = xml_node.get("l")
            self.parse_xml_children(xml_node, hparent, list(parent_callpath))
        else:
            self.node_dicts.append(node_dict)
            hparent.add_child(hnode)
            self.parse_xml_children(xml_node, hnode, list(node_callpath))

    def create_node_dict(self, nid, hnode, name, node_type, src_file, line,
                         module):
        """Create a dict with all the node attributes."""
        node_dict = {
            "nid": nid,
            "name": name,
            "type": node_type,
            "file": src_file,
            "line": line,
            "module": module,
            "node": hnode,
        }

        return node_dict
Beispiel #7
0
class CaliperNativeReader:
    """Read in a native `.cali` file using Caliper's python reader."""
    def __init__(self, filename_or_caliperreader):
        """Read in a native cali with Caliper's python reader.

        Args:
            filename_or_caliperreader (str or CaliperReader): name of a `cali` file OR
                a CaliperReader object
        """
        self.filename_or_caliperreader = filename_or_caliperreader
        self.filename_ext = ""

        self.metric_columns = set()
        self.node_dicts = []

        self.timer = Timer()

        if isinstance(self.filename_or_caliperreader, str):
            _, self.filename_ext = os.path.splitext(filename_or_caliperreader)

    def create_graph(self, ctx="path"):
        list_roots = []
        visited = {}  # map frame to node
        parent_hnode = None

        # find nodes in the nodes section that represent the path hierarchy
        for node in self.filename_or_caliperreader.records:
            metrics = {}
            node_label = ""
            if ctx in node:
                # if it's a list, then it's a callpath
                if isinstance(node[ctx], list):
                    node_label = node[ctx][-1]
                    for i in node.keys():
                        if node[i] == node_label:
                            self.node_type = i
                        elif i != ctx:
                            self.metric_columns.add(i)
                            if (self.filename_or_caliperreader.attribute(
                                    i).attribute_type() == "double"):
                                metrics[i] = float(node[i])
                            elif (self.filename_or_caliperreader.attribute(
                                    i).attribute_type() == "int"):
                                metrics[i] = int(node[i])
                            elif i == "function":
                                if isinstance(node[i], list):
                                    metrics[i] = node[i][-1]
                                else:
                                    metrics[i] = node[i]
                            else:
                                metrics[i] = node[i]

                    frame = Frame({"type": self.node_type, "name": node_label})
                    parent_frame = None
                    for i in visited.keys():
                        parent_label = node[ctx][-2]
                        if i["name"] == parent_label:
                            parent_frame = i
                            break
                    parent_hnode = visited[parent_frame]

                    hnode = Node(frame, parent_hnode)

                    visited[frame] = hnode

                    node_dict = dict({
                        "name": node_label,
                        "node": hnode
                    }, **metrics)
                    parent_hnode.add_child(hnode)
                    self.node_dicts.append(node_dict)
                # if it's a string, then it's a root
                else:
                    node_label = node[ctx]
                    for i in node.keys():
                        if node[i] == node_label:
                            self.node_type = i
                        else:
                            self.metric_columns.add(i)
                            if (self.filename_or_caliperreader.attribute(
                                    i).attribute_type() == "double"):
                                metrics[i] = float(node[i])
                            elif (self.filename_or_caliperreader.attribute(
                                    i).attribute_type() == "int"):
                                metrics[i] = int(node[i])
                            elif i == "function":
                                metrics[i] = node[i][-1]
                            else:
                                metrics[i] = node[i]

                    frame = Frame({"type": self.node_type, "name": node_label})

                    # since this node does not have a parent, this is a root
                    graph_root = Node(frame, None)
                    visited[frame] = graph_root
                    list_roots.append(graph_root)

                    node_dict = dict({
                        "name": node_label,
                        "node": graph_root
                    }, **metrics)
                    self.node_dicts.append(node_dict)
                    parent_hnode = graph_root

        return list_roots

    def read(self):
        """Read the caliper records to extract the calling context tree."""
        if isinstance(self.filename_or_caliperreader, str):
            if self.filename_ext != ".cali":
                raise ValueError("from_caliperreader() needs a .cali file")
            else:
                cali_file = self.filename_or_caliperreader
                self.filename_or_caliperreader = cr.CaliperReader()
                self.filename_or_caliperreader.read(cali_file)

        with self.timer.phase("graph construction"):
            list_roots = self.create_graph()

        # create a graph object once all the nodes have been added
        graph = Graph(list_roots)
        graph.enumerate_traverse()

        dataframe = pd.DataFrame(data=self.node_dicts)

        indices = ["node"]
        if "rank" in dataframe.columns:
            indices.append("rank")
        dataframe.set_index(indices, inplace=True)
        dataframe.sort_index(inplace=True)

        # change column names
        for idx, item in enumerate(dataframe.columns):
            # make other columns consistent with other readers
            if item == "mpi.rank":
                dataframe.columns.values[idx] = "rank"
            if item == "module#cali.sampler.pc":
                dataframe.columns.values[idx] = "module"

        # create list of exclusive and inclusive metric columns
        exc_metrics = []
        inc_metrics = []
        for column in self.metric_columns:
            if "(inc)" in column:
                inc_metrics.append(column)
            else:
                exc_metrics.append(column)

        metadata = self.filename_or_caliperreader.globals

        return hatchet.graphframe.GraphFrame(graph,
                                             dataframe,
                                             exc_metrics,
                                             inc_metrics,
                                             metadata=metadata)