예제 #1
0
class XFSBufBioDecoder(Decoder):
    """
    Decodes a bio with an xfsbuf ->bi_end_io

    Args:
        bio: The struct bio to decode.  The value must be of type
            ``struct bio``.

    Attributes:
        xfsbuf (gdb.Value): The xfsbuf structure.  It is of type
            ``struct xfs_buf *``.
        devname (str): The string representation of the device name
    """
    _description = "{:x} bio: xfs buffer on {}"
    __endio__ = 'xfs_buf_bio_end_io'
    _types = Types(['struct xfs_buf *'])

    def __init__(self, bio: gdb.Value) -> None:
        super(XFSBufBioDecoder, self).__init__()
        self.bio = bio

    def interpret(self) -> None:
        """Interpret the xfsbuf bio to populate its attributes"""
        # pylint: disable=attribute-defined-outside-init
        self.xfsbuf = self.bio['bi_private'].cast(self._types.xfs_buf_p_type)
        self.devname = block_device_name(self.bio['bi_bdev'])

    def __next__(self) -> Any:
        return XFSBufDecoder(self.xfsbuf)

    def __str__(self) -> str:
        return self._description.format(self.bio, self.devname)
예제 #2
0
class DecodeBioBH(Decoder):
    """
    Decodes a bio used to perform i/o for buffer_heads

    This method decodes a bio generated by buffer head submission.

    Args:
        bio: The struct bio to be decoded, generated by buffer head
            submission.  The value must be of type ``struct bio``.

    Attributes:
        bio (:obj:`gdb.Value`): The bio.  The value is of type
            ``struct bio``.
        bh (:obj:`gdb.Value`): The struct buffer_head associated with this
            bio.  The value is of type ``struct buffer_head``.

    """
    _types = Types(['struct buffer_head *'])
    __endio__ = 'end_bio_bh_io_sync'
    _description = "{:x} bio: Bio representation of buffer head"

    def __init__(self, bio: gdb.Value) -> None:
        super().__init__()
        self.bio = bio

    def interpret(self) -> None:
        """Interpret the buffer_head bio to populate its attributes"""
        # pylint: disable=attribute-defined-outside-init
        self.bh = self.bio['bi_private'].cast(self._types.buffer_head_p_type)

    def __str__(self) -> str:
        return self._description.format(int(self.bio))

    def __next__(self) -> Any:
        return decode_bh(self.bh)
예제 #3
0
class ClonedBioDecoder(Decoder):
    """
    Decodes a bio-based device mapper cloned bio

    This method decodes  cloned bio generated by request-based
    device mapper targets.

    Args:
        bio: A ``struct bio`` generated by a bio-based device mapper target.
            The value must be of type ``struct bio``.

    Attributes:
        bio (:obj:`gdb.Value`): A ``struct bio`` generated by a bio-based
            device mapper target.  The value is of type ``struct bio``.

        next_bio (:obj:`gdb.Value`): The struct bio that generated this one.
            The value is of type ``struct bio``.

        tio (:obj:`gdb.Value`): The dm target i/o operation for this bio.  The
            value is of type ``struct dm_target_io``.
    """
    _types = Types(['struct dm_target_io *'])
    _get_clone_bio_tio: Callable[[Any, gdb.Value], gdb.Value]
    __endio__ = 'clone_endio'
    _description = "{:x} bio: device mapper clone: {}[{}] -> {}[{}]"

    def __init__(self, bio: gdb.Value) -> None:
        super().__init__()
        self.bio = bio

        if self._get_clone_bio_tio is None:
            if 'clone' in self._types.dm_target_io_p_type.target():
                getter = self._get_clone_bio_tio_3_15
            else:
                getter = self._get_clone_bio_tio_old
            self._get_clone_bio_tio = getter

    def interpret(self) -> None:
        """Interprets the cloned device mapper bio to populate its
        attributes"""
        # pylint: disable=attribute-defined-outside-init
        self.tio = self._get_clone_bio_tio(self.bio)
        self.next_bio = self.tio['io']['bio']

    def __str__(self) -> str:
        return self._description.format(
            int(self.bio), block_device_name(self.bio['bi_bdev']),
            int(self.bio['bi_sector']),
            block_device_name(self.next_bio['bi_bdev']),
            int(self.next_bio['bi_sector']))

    def __next__(self) -> Decoder:
        return decode_bio(self.next_bio)

    def _get_clone_bio_tio_old(self, bio: gdb.Value) -> gdb.Value:
        return bio['bi_private'].cast(self._types.dm_target_io_p_type)

    def _get_clone_bio_tio_3_15(self, bio: gdb.Value) -> gdb.Value:
        return container_of(bio['bi_private'],
                            self._types.dm_clone_bio_info_p_type, 'clone')
예제 #4
0
class Zone:

    types = Types(['struct page'])

    def __init__(self, obj: gdb.Value, zid: int) -> None:
        self.gdb_obj = obj
        self.zid = zid
        self.nid = int(obj["node"])

    def is_populated(self) -> bool:
        return self.gdb_obj["present_pages"] != 0

    def get_vmstat(self) -> List[int]:
        stats = [0] * VmStat.nr_stat_items
        vm_stat = self.gdb_obj["vm_stat"]

        for item in range(0, VmStat.nr_stat_items):
            # TODO abstract atomic?
            stats[item] = int(vm_stat[item]["counter"])
        return stats

    def add_vmstat_diffs(self, diffs: List[int]) -> None:
        for cpu in for_each_online_cpu():
            pageset = get_percpu_var(self.gdb_obj["pageset"], cpu)
            vmdiff = pageset["vm_stat_diff"]
            for item in range(0, VmStat.nr_stat_items):
                diffs[item] += int(vmdiff[item])

    def get_vmstat_diffs(self) -> List[int]:
        diffs = [0] * VmStat.nr_stat_items
        self.add_vmstat_diffs(diffs)
        return diffs

    def _check_free_area(self, area: gdb.Value, is_pcp: bool) -> None:
        nr_free = 0
        list_array_name = "lists" if is_pcp else "free_list"
        for free_list in array_for_each(area[list_array_name]):
            for page_obj in list_for_each_entry(free_list,
                                                self.types.page_type, "lru"):
                page = crash.types.page.Page.from_obj(page_obj)
                nr_free += 1
                if page.get_nid() != self.nid or page.get_zid() != self.zid:
                    print(
                        "page {:#x} misplaced on {} of zone {}:{}, has flags for zone {}:{}"
                        .format(int(page_obj.address),
                                "pcplist" if is_pcp else "freelist", self.nid,
                                self.zid, page.get_nid(), page.get_zid()))
        nr_expected = area["count"] if is_pcp else area["nr_free"]
        if nr_free != nr_expected:
            print("nr_free mismatch in {} {}: expected {}, counted {}".format(
                "pcplist" if is_pcp else "area", area.address, nr_expected,
                nr_free))

    def check_free_pages(self) -> None:
        for area in array_for_each(self.gdb_obj["free_area"]):
            self._check_free_area(area, False)
        for cpu in for_each_online_cpu():
            pageset = get_percpu_var(self.gdb_obj["pageset"], cpu)
            self._check_free_area(pageset["pcp"], True)
예제 #5
0
            class nested(object):
                types = Types(['unsigned long'])

                ulong_valid = False

                @classmethod
                def check_ulong(cls, gdbtype):
                    cls.ulong_valid = True
예제 #6
0
class VmStat:
    types = Types(['enum zone_stat_item', 'enum vm_event_item'])
    symbols = Symbols(['vm_event_states'])

    nr_stat_items = -1
    nr_event_items = -1

    vm_stat_names: List[str] = list()
    vm_event_names: List[str] = list()

    @classmethod
    def check_enum_type(cls, gdbtype: gdb.Type) -> None:
        if gdbtype == cls.types.enum_zone_stat_item_type:
            (items, names) = cls.__populate_names(gdbtype,
                                                  'NR_VM_ZONE_STAT_ITEMS')
            cls.nr_stat_items = items
            cls.vm_stat_names = names
        elif gdbtype == cls.types.enum_vm_event_item_type:
            (items, names) = cls.__populate_names(gdbtype, 'NR_VM_EVENT_ITEMS')
            cls.nr_event_items = items
            cls.vm_event_names = names
        else:
            raise TypeError("Unexpected type {}".format(gdbtype.name))

    @classmethod
    def __populate_names(cls, enum_type: gdb.Type,
                         items_name: str) -> Tuple[int, List[str]]:
        nr_items = enum_type[items_name].enumval

        names = ["__UNKNOWN__"] * nr_items

        for field in enum_type.fields():
            if field.enumval < nr_items:
                names[field.enumval] = field.name

        return (nr_items, names)

    @classmethod
    def get_stat_names(cls) -> List[str]:
        return cls.vm_stat_names

    @classmethod
    def get_event_names(cls) -> List[str]:
        return cls.vm_event_names

    @classmethod
    def get_events(cls) -> List[int]:
        nr = cls.nr_event_items
        events = [0] * nr

        for cpu in for_each_online_cpu():
            states = get_percpu_var(cls.symbols.vm_event_states, cpu)
            for item in range(0, nr):
                events[item] += int(states["event"][item])

        return events
예제 #7
0
class ClonedBioReqDecoder(Decoder):
    """
    Decodes a request-based device mapper cloned bio

    This decodes a cloned bio generated by request-based device mapper targets.

    Args:
        bio: A ``struct bio`` generated by a request-based device mapper
            target.  The value must be of type ``struct bio``.

    """
    _types = Types(['struct dm_rq_clone_bio_info *'])
    __endio__ = 'end_clone_bio'
    _description = '{:x} bio: Request-based Device Mapper on {}'

    _get_clone_bio_rq_info: Callable[[Any, gdb.Value], gdb.Value]

    def __init__(self, bio: gdb.Value) -> None:
        super().__init__()
        self.bio = bio
        if self._get_clone_bio_rq_info is None:
            if 'clone' in self._types.dm_rq_clone_bio_info_p_type.target():
                getter = self._get_clone_bio_rq_info_3_7
            else:
                getter = self._get_clone_bio_rq_info_old
            self._get_clone_bio_rq_info = getter

    def interpret(self) -> None:
        """Interprets the request-based device mapper bio to populate its
        attributes"""
        # pylint: disable=attribute-defined-outside-init
        self.info = self._get_clone_bio_rq_info(self.bio)
        self.tio = self.info['tio']

    def __str__(self) -> str:
        return self._description.format(int(self.bio),
                                        block_device_name(self.bio['bi_bdev']))

    def __next__(self) -> Decoder:
        return decode_bio(self.info['orig'])

    def _get_clone_bio_rq_info_old(self, bio: gdb.Value) -> gdb.Value:
        return bio['bi_private'].cast(self._types.dm_rq_clone_bio_info_p_type)

    def _get_clone_bio_rq_info_3_7(self, bio: gdb.Value) -> gdb.Value:
        return container_of(bio, self._types.dm_rq_clone_bio_info_p_type,
                            'clone')
예제 #8
0
class DIOBioDecoder(Decoder):
    """
    Decodes a bio used for direct i/o.

    This method decodes a bio generated by the direct-io component of
    the file system subsystem.  The bio can either have been submitted
    directly or asynchronously.

    Args:
        bio: The struct bio to be decoded, generated by the direct i/o
            component.  The value must be of type ``struct bio``.

    Attributes:
        bio (:obj:`gdb.Value`): The bio.  The value is of type
            ``struct bio``.
        dio (:obj:`gdb.Value`): ``struct dio *`` that represents the
            direct i/o operation
        fstype (str): the name of the file system type
        dev (str): the name of the underlying device
        offset (str): the starting offset on disk
    """

    _types = Types(['struct dio *'])
    __endio__ = ['dio_bio_end_io', 'dio_bio_end_io']
    _description = "{:x} bio: Direct I/O for {} inode {}, sector {} on {}"

    def __init__(self, bio: gdb.Value) -> None:
        super().__init__()
        self.bio = bio

    def interpret(self) -> None:
        """Interprets a direct i/o bio to populate its attributes"""
        # pylint: disable=attribute-defined-outside-init
        self.dio = self.bio['bi_private'].cast(self._types.dio_p_type)
        self.fstype = super_fstype(self.dio['inode']['i_sb'])
        self.dev = block_device_name(self.dio['inode']['i_sb']['s_bdev'])
        self.offset = self.dio['block_in_file'] << self.dio['blkbits']

    def __str__(self) -> str:
        return self._description.format(int(self.bio), self.fstype,
                                        self.dio['inode']['i_ino'],
                                        self.bio['bi_sector'], self.dev)

    def __next__(self) -> Any:
        return None
예제 #9
0
from math import log, ceil

import crash
from crash.util import find_member_variant
from crash.util.symbols import Types, Symvals, TypeCallbacks
from crash.util.symbols import SymbolCallbacks, MinimalSymbolCallbacks
from crash.cache.syscache import config
from crash.exceptions import DelayedAttributeError

import gdb

#TODO debuginfo won't tell us, depends on version?
PAGE_MAPPING_ANON = 1

types = Types([
    'unsigned long', 'struct page', 'enum pageflags', 'enum zone_type',
    'struct mem_section'
])
symvals = Symvals(['mem_section', 'max_pfn'])

PageType = TypeVar('PageType', bound='Page')


class Page:
    slab_cache_name = None
    slab_page_name = None
    compound_head_name = None
    vmemmap_base = 0xffffea0000000000
    vmemmap: gdb.Value
    directmap_base = 0xffff880000000000
    pageflags: Dict[str, int] = dict()
예제 #10
0
"""

import re
import fnmatch
import argparse

import gdb

from crash.commands import Command, ArgumentParser
from crash.types.module import for_each_module
from crash.util import struct_has_member
from crash.util.symbols import Types
from crash.types.list import list_for_each_entry
from crash.types.percpu import get_percpu_var

types = Types(['struct module_use'])


class ModuleCommand(Command):
    """display module information"""
    def __init__(self) -> None:
        parser = ArgumentParser(prog="lsmod")

        parser.add_argument('-p', nargs='?', const=-1, default=None, type=int)
        parser.add_argument('args', nargs=argparse.REMAINDER)

        Command.__init__(self, "lsmod", parser)

    def print_module_percpu(self, mod: gdb.Value, cpu: int = -1) -> None:
        cpu = int(cpu)
        addr = int(mod['percpu'])
예제 #11
0
        msg = self.formatter.format(name, spec)
        super().__init__(msg)
        self.name = name
        self.spec = spec

class _InvalidComponentNameError(_InvalidComponentBaseError):
    """The requested member component does not exist in the provided type."""

    formatter = "no such member `{}' in `{}'"
    def __init__(self, member: str, gdbtype: gdb.Type) -> None:
        msg = self.formatter.format(member, str(gdbtype))
        super().__init__(msg)
        self.member = member
        self.type = gdbtype

types = Types(['char *', 'uuid_t'])

def container_of(val: gdb.Value, gdbtype: gdb.Type, member: str) -> gdb.Value:
    """
    Returns an object that contains the specified object at the given
    offset.

    Args:
        val (gdb.Value): The value to be converted.  It can refer to an
            allocated structure or a pointer.
        gdbtype (gdb.Type): The type of the object that will be generated
        member (str):
            The name of the member in the target struct that contains `val`.

    Returns:
        gdb.Value<gdbtype>: The converted object, of the type specified by
예제 #12
0
    MNT_NOEXEC      : "MNT_NOEXEC",
    MNT_NOATIME     : "MNT_NOATIME",
    MNT_NODIRATIME  : "MNT_NODIRATIME",
    MNT_RELATIME    : "MNT_RELATIME",
    MNT_READONLY    : "MNT_READONLY",
}

MNT_FLAGS_HIDDEN = {
    MNT_SHRINKABLE : "[MNT_SHRINKABLE]",
    MNT_WRITE_HOLD : "[MNT_WRITE_HOLD]",
    MNT_SHARED : "[MNT_SHARED]",
    MNT_UNBINDABLE : "[MNT_UNBINDABLE]",
}
MNT_FLAGS_HIDDEN.update(MNT_FLAGS)

types = Types(['struct mount', 'struct vfsmount'])
symvals = Symvals(['init_task'])

class Mount:
    _for_each_mount: Callable[[Any, gdb.Value], Iterator[gdb.Value]]
    _init_fs_root: gdb.Value

    def _for_each_mount_nsproxy(self, task: gdb.Value) -> Iterator[gdb.Value]:
        """
        An implementation of for_each_mount that uses the task's
        nsproxy to locate the mount namespace.  See :ref:`for_each_mount`
        for more details.
        """
        return list_for_each_entry(task['nsproxy']['mnt_ns']['list'],
                                   types.mount_type, 'mnt_list')
예제 #13
0
class CrashKernel:
    """
    Initialize a basic kernel semantic debugging session.

    This means that we load the following:

    - Kernel image symbol table (and debuginfo, if not integrated)
      relocated to the base offset used by kASLR
    - Kernel modules that were loaded on the the crashed system (again,
      with debuginfo if not integrated)
    - Percpu ranges used by kernel module
    - Architecture-specific details
    - Linux tasks populated into the GDB thread table

    If kernel module files and debuginfo cannot be located, backtraces
    may be incomplete if the addresses used by the modules are crossed.
    Percpu ranges will be properly loaded regardless.

    For arguments that accept paths to specify a base directory to be
    used, the entire directory structure will be read and cached to
    speed up subsequent searches.  Still, reading large directory trees
    is a time consuming operation and being exact as possible will
    improve startup time.

    Args:
        root (None for defaults): The roots of trees
            to search for debuginfo files.  When specified, all roots
            will be searched using the following arguments (including
            the absolute paths in the defaults if unspecified).

            Defaults to: /

        vmlinux_debuginfo (None for defaults): The
            location of the separate debuginfo file corresponding
            to the kernel being debugged.

            Defaults to:

            - <loaded kernel path>.debug
            - ./vmlinux-<kernel version>.debug
            - /usr/lib/debug/.build-id/xx/<build-id>.debug
            - /usr/lib/debug/<loaded kernel path>.debug
            - /usr/lib/debug/boot/<loaded kernel name>.debug
            - /usr/lib/debug/boot/vmlinux-<kernel version>


        module_path (None for defaults): The base directory to
            be used to search for kernel modules (e.g. module.ko) to be
            used to load symbols for the kernel being debugged.

            Defaults to:

            - ./modules
            - /lib/modules/<kernel-version>


        module_debuginfo_path (None for defaults): The base
            directory to search for debuginfo matching the kernel
            modules already loaded.

            Defaults to:

            - ./modules.debug
            - /usr/lib/debug/.build-id/xx/<build-id>.debug
            - /usr/lib/debug/lib/modules/<kernel-version>


    Raises:
        CrashKernelError: If the kernel debuginfo cannot be loaded.
        InvalidArgumentError: If any of the arguments are not None, str,
                   or list of str

    """
    types = Types(['char *'])
    symvals = Symvals(['init_task'])
    symbols = Symbols(['runqueues'])

    # pylint: disable=unused-argument
    def __init__(self,
                 roots: PathSpecifier = None,
                 vmlinux_debuginfo: PathSpecifier = None,
                 module_path: PathSpecifier = None,
                 module_debuginfo_path: PathSpecifier = None,
                 verbose: bool = False,
                 debug: bool = False) -> None:
        self.findmap: Dict[str, Dict[Any, Any]] = dict()
        self.modules_order: Dict[str, Dict[str, str]] = dict()
        obj = gdb.objfiles()[0]
        if not obj.filename:
            raise RuntimeError("loaded objfile has no filename???")
        kernel = os.path.basename(obj.filename)

        self.kernel = kernel
        self.version = self.extract_version()

        self._setup_roots(roots, verbose)
        self._setup_vmlinux_debuginfo(vmlinux_debuginfo, verbose)
        self._setup_module_path(module_path, verbose)
        self._setup_module_debuginfo_path(module_debuginfo_path, verbose)

        # We need separate debuginfo.  Let's go find it.
        path_list = []
        build_id_path = self.build_id_path(obj)
        if build_id_path:
            path_list.append(build_id_path)
        path_list += self.vmlinux_debuginfo
        if not obj.has_symbols():
            print("Loading debug symbols for vmlinux")
            for path in path_list:
                try:
                    obj.add_separate_debug_file(path)
                    if obj.has_symbols():
                        break
                except gdb.error:
                    pass

        if not obj.has_symbols():
            raise CrashKernelError(
                "Couldn't locate debuginfo for {}".format(kernel))

        self.vermagic = self.extract_vermagic()

        archname = obj.architecture.name()
        try:
            archclass = crash.arch.get_architecture(archname)
        except RuntimeError as e:
            raise CrashKernelError(str(e))

        self.arch = archclass()

        self.target = crash.current_target()
        self.vmcore = self.target.kdump

        self.crashing_thread: Optional[gdb.InferiorThread] = None

    def _setup_roots(self,
                     roots: PathSpecifier = None,
                     verbose: bool = False) -> None:
        if roots is None:
            self.roots = ["/"]
        elif isinstance(roots, list) and roots and isinstance(roots[0], str):
            x = None
            for root in roots:
                if os.path.exists(root):
                    if x is None:
                        x = [root]
                    else:
                        x.append(root)
                else:
                    print("root {} does not exist".format(root))

            if x is None:
                x = ["/"]
            self.roots = x
        elif isinstance(roots, str):
            x = None
            if os.path.exists(roots):
                if x is None:
                    x = [roots]
                else:
                    x.append(roots)
            if x is None:
                x = ["/"]
            self.roots = x
        else:
            raise InvalidArgumentError(
                "roots must be None, str, or list of str")
        if verbose:
            print("roots={}".format(self.roots))

    def _find_debuginfo_paths(self, variants: List[str]) -> List[str]:
        x: List[str] = list()

        for root in self.roots:
            for debug_path in ["", "usr/lib/debug"]:
                for variant in variants:
                    path = os.path.join(root, debug_path, variant)
                    if os.path.exists(path):
                        x.append(path)

        return x

    def _setup_vmlinux_debuginfo(self,
                                 vmlinux_debuginfo: PathSpecifier = None,
                                 verbose: bool = False) -> None:
        if vmlinux_debuginfo is None:
            defaults = [
                "{}.debug".format(self.kernel),
                "vmlinux-{}.debug".format(self.version),
                "boot/{}.debug".format(os.path.basename(self.kernel)),
                "boot/vmlinux-{}.debug".format(self.version),
            ]

            self.vmlinux_debuginfo = self._find_debuginfo_paths(defaults)

        elif (isinstance(vmlinux_debuginfo, list) and vmlinux_debuginfo
              and isinstance(vmlinux_debuginfo[0], str)):
            self.vmlinux_debuginfo = vmlinux_debuginfo
        elif isinstance(vmlinux_debuginfo, str):
            self.vmlinux_debuginfo = [vmlinux_debuginfo]
        else:
            raise InvalidArgumentError(
                "vmlinux_debuginfo must be None, str, or list of str")

        if verbose:
            print("vmlinux_debuginfo={}".format(self.vmlinux_debuginfo))

    def _setup_module_path(self,
                           module_path: PathSpecifier = None,
                           verbose: bool = False) -> None:
        x: List[str] = []
        if module_path is None:

            path = "modules"
            if os.path.exists(path):
                x.append(path)

            for root in self.roots:
                path = "{}/lib/modules/{}".format(root, self.version)
                if os.path.exists(path):
                    x.append(path)

            self.module_path = x
        elif (isinstance(module_path, list)
              and isinstance(module_path[0], str)):
            for root in self.roots:
                for mpath in module_path:
                    path = "{}/{}".format(root, mpath)
                    if os.path.exists(path):
                        x.append(path)

            self.module_path = x
        elif isinstance(module_path, str):
            if os.path.exists(module_path):
                x.append(module_path)

            self.module_path = x
        else:
            raise InvalidArgumentError(
                "module_path must be None, str, or list of str")

        if verbose:
            print("module_path={}".format(self.module_path))

    def _setup_module_debuginfo_path(
            self,
            module_debuginfo_path: PathSpecifier = None,
            verbose: bool = False) -> None:
        x: List[str] = []
        if module_debuginfo_path is None:
            defaults = [
                "modules.debug",
                "lib/modules/{}".format(self.version),
            ]

            self.module_debuginfo_path = self._find_debuginfo_paths(defaults)
        elif (isinstance(module_debuginfo_path, list)
              and isinstance(module_debuginfo_path[0], str)):

            for root in self.roots:
                for mpath in module_debuginfo_path:
                    path = "{}/{}".format(root, mpath)
                    if os.path.exists(path):
                        x.append(path)

            self.module_debuginfo_path = x
        elif isinstance(module_debuginfo_path, str):

            for root in self.roots:
                path = "{}/{}".format(root, module_debuginfo_path)
                if os.path.exists(path):
                    x.append(path)

            self.module_debuginfo_path = x
        else:
            raise InvalidArgumentError(
                "module_debuginfo_path must be None, str, or list of str")

        if verbose:
            print("module_debuginfo_path={}".format(
                self.module_debuginfo_path))

    # When working without a symbol table, we still need to be able
    # to resolve version information.
    def _get_minsymbol_as_string(self, name: str) -> str:
        sym = gdb.lookup_minimal_symbol(name)
        if sym is None:
            raise MissingSymbolError(name)

        val = sym.value()

        return val.address.cast(self.types.char_p_type).string()

    def extract_version(self) -> str:
        """
        Returns the version from the loaded vmlinux

        If debuginfo is available, ``init_uts_ns`` will be used.
        Otherwise, it will be extracted from the version banner.

        Returns:
            str: The version text.
        """
        try:
            uts = get_symbol_value('init_uts_ns')
            return uts['name']['release'].string()
        except (AttributeError, NameError, MissingSymbolError):
            pass

        banner = self._get_minsymbol_as_string('linux_banner')

        return banner.split(' ')[2]

    def extract_vermagic(self) -> str:
        """
        Returns the vermagic from the loaded vmlinux

        Returns:
            str: The version text.
        """
        try:
            magic = get_symbol_value('vermagic')
            return magic.string()
        except (AttributeError, NameError):
            pass

        return self._get_minsymbol_as_string('vermagic')

    def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]:
        """
        Returns the modinfo from a module file

        Args:
            modpath: The path to the module file.

        Returns:
            dict: A dictionary containing the names and values of the modinfo
            variables.
        """
        f = open(modpath, 'rb')

        elf = ELFFile(f)
        modinfo = elf.get_section_by_name('.modinfo')

        d = {}
        for line in modinfo.data().split(b'\x00'):
            val = line.decode('utf-8')
            if val:
                eq = val.index('=')
                d[val[0:eq]] = val[eq + 1:]

        del elf
        f.close()
        return d

    def _get_module_sections(self, module: gdb.Value) -> str:
        out = []
        for (name, addr) in for_each_module_section(module):
            out.append("-s {} {:#x}".format(name, addr))
        return " ".join(out)

    def _check_module_version(self, modpath: str, module: gdb.Value) -> None:
        modinfo = self.extract_modinfo_from_module(modpath)

        vermagic = modinfo.get('vermagic', None)

        if vermagic != self.vermagic:
            raise _ModVersionMismatchError(modpath, vermagic, self.vermagic)

        mi_srcversion = modinfo.get('srcversion', None)

        mod_srcversion = None
        if 'srcversion' in module.type:
            mod_srcversion = module['srcversion'].string()

        if mi_srcversion != mod_srcversion:
            raise _ModSourceVersionMismatchError(modpath, mi_srcversion,
                                                 mod_srcversion)

    def load_modules(self, verbose: bool = False, debug: bool = False) -> None:
        """
        Load modules (including debuginfo) into the crash session.

        This routine will attempt to locate modules and the corresponding
        debuginfo files, if separate, using the parameters defined
        when the CrashKernel object was initialized.

        Args:
            verbose (default=False): enable verbose output
            debug (default=False): enable even more verbose debugging output

        Raises:
            CrashKernelError: An error was encountered while loading a module.
                This does not include a failure to locate a module or
                its debuginfo.
        """
        import crash.cache.syscache  # pylint: disable=redefined-outer-name
        version = crash.cache.syscache.utsname.release
        print("Loading modules for {}".format(version), end='')
        if verbose:
            print(":", flush=True)
        failed = 0
        loaded = 0
        for module in for_each_module():
            modname = "{}".format(module['name'].string())
            modfname = "{}.ko".format(modname)
            found = False
            for path in self.module_path:

                try:
                    modpath = self._find_module_file(modfname, path)
                except _NoMatchingFileError:
                    continue

                try:
                    self._check_module_version(modpath, module)
                except _ModinfoMismatchError as e:
                    if verbose:
                        print(str(e))
                    continue

                found = True

                if 'module_core' in module.type:
                    addr = int(module['module_core'])
                else:
                    addr = int(module['core_layout']['base'])

                if debug:
                    print("Loading {} at {:#x}".format(modpath, addr))
                elif verbose:
                    print("Loading {} at {:#x}".format(modname, addr))
                else:
                    print(".", end='')
                    sys.stdout.flush()

                sections = self._get_module_sections(module)

                percpu = int(module['percpu'])
                if percpu > 0:
                    sections += " -s .data..percpu {:#x}".format(percpu)

                try:
                    result = gdb.execute("add-symbol-file {} {:#x} {}".format(
                        modpath, addr, sections),
                                         to_string=True)
                except gdb.error as e:
                    raise CrashKernelError(
                        "Error while loading module `{}': {}".format(
                            modname, str(e)))
                if debug:
                    print(result)

                objfile = gdb.lookup_objfile(modpath)
                if not objfile.has_symbols():
                    self._load_module_debuginfo(objfile, modpath, verbose)
                elif debug:
                    print(" + has debug symbols")

                break

            if not found:
                if failed == 0:
                    print()
                print("Couldn't find module file for {}".format(modname))
                failed += 1
            else:
                if not objfile.has_symbols():
                    print("Couldn't find debuginfo for {}".format(modname))
                loaded += 1
            if (loaded + failed) % 10 == 10:
                print(".", end='')
                sys.stdout.flush()
        print(" done. ({} loaded".format(loaded), end='')
        if failed:
            print(", {} failed)".format(failed))
        else:
            print(")")

        # We shouldn't need this again, so why keep it around?
        del self.findmap
        self.findmap = {}

    def _normalize_modname(self, mod: str) -> str:
        return mod.replace('-', '_')

    def _cache_modules_order(self, path: str) -> None:
        self.modules_order[path] = dict()
        order = os.path.join(path, "modules.order")
        try:
            f = open(order)
            for line in f.readlines():
                modpath = line.rstrip()
                modname = self._normalize_modname(os.path.basename(modpath))
                if modname[:7] == "kernel/":
                    modname = modname[7:]
                modpath = os.path.join(path, modpath)
                if os.path.exists(modpath):
                    self.modules_order[path][modname] = modpath
            f.close()
        except OSError:
            pass

    def _get_module_path_from_modules_order(self, path: str, name: str) -> str:
        if not path in self.modules_order:
            self._cache_modules_order(path)

        try:
            return self.modules_order[path][name]
        except KeyError:
            raise _NoMatchingFileError(name)

    def _cache_file_tree(self, path: str, regex: Pattern[str] = None) -> None:
        if not path in self.findmap:
            self.findmap[path] = {
                'filters': [],
                'files': {},
            }

        # If we've walked this path with no filters, we have everything
        # already.
        if self.findmap[path]['filters'] is None:
            return

        if regex is None:
            self.findmap[path]['filters'] = None
        else:
            pattern = regex.pattern
            if pattern in self.findmap[path]['filters']:
                return
            self.findmap[path]['filters'].append(pattern)

        # pylint: disable=unused-variable
        for root, dirs, files in os.walk(path):
            for filename in files:
                modname = self._normalize_modname(filename)

                if regex and regex.match(modname) is None:
                    continue

                modpath = os.path.join(root, filename)
                self.findmap[path]['files'][modname] = modpath

    def _get_file_path_from_tree_search(self,
                                        path: str,
                                        name: str,
                                        regex: Pattern[str] = None) -> str:
        self._cache_file_tree(path, regex)

        try:
            modname = self._normalize_modname(name)
            return self.findmap[path]['files'][modname]
        except KeyError:
            raise _NoMatchingFileError(name)

    def _find_module_file(self, name: str, path: str) -> str:
        try:
            return self._get_module_path_from_modules_order(path, name)
        except _NoMatchingFileError:
            pass

        regex = re.compile(fnmatch.translate("*.ko"))
        return self._get_file_path_from_tree_search(path, name, regex)

    def _find_module_debuginfo_file(self, name: str, path: str) -> str:
        regex = re.compile(fnmatch.translate("*.ko.debug"))
        return self._get_file_path_from_tree_search(path, name, regex)

    @staticmethod
    def build_id_path(objfile: gdb.Objfile) -> Optional[str]:
        """
        Returns the relative path for debuginfo using the objfile's build-id.

        Args:
            objfile: The objfile for which to return the path
        """
        build_id = objfile.build_id
        if build_id is None:
            return None
        return ".build_id/{}/{}.debug".format(build_id[0:2], build_id[2:])

    def _try_load_debuginfo(self,
                            objfile: gdb.Objfile,
                            path: str,
                            verbose: bool = False) -> bool:
        if not os.path.exists(path):
            return False

        try:
            if verbose:
                print(" + Loading debuginfo: {}".format(path))
            objfile.add_separate_debug_file(path)
            if objfile.has_symbols():
                return True
        except gdb.error as e:
            print(e)

        return False

    def _load_module_debuginfo(self,
                               objfile: gdb.Objfile,
                               modpath: str = None,
                               verbose: bool = False) -> None:
        if modpath is None:
            modpath = objfile.filename
        if modpath is None:
            raise RuntimeError("loaded objfile has no filename???")
        if ".gz" in modpath:
            modpath = modpath.replace(".gz", "")
        filename = "{}.debug".format(os.path.basename(modpath))

        build_id_path = self.build_id_path(objfile)

        for path in self.module_debuginfo_path:
            if build_id_path:
                filepath = "{}/{}".format(path, build_id_path)
                if self._try_load_debuginfo(objfile, filepath, verbose):
                    break

            try:
                filepath = self._find_module_debuginfo_file(filename, path)
            except _NoMatchingFileError:
                continue

            if self._try_load_debuginfo(objfile, filepath, verbose):
                break

    def setup_tasks(self) -> None:
        """
        Populate GDB's thread list using the kernel's task lists

        This method will iterate over the kernel's task lists, create a
        LinuxTask object, and create a gdb thread for each one.  The
        threads will be built so that the registers are ready to be
        populated, which allows symbolic stack traces to be made available.
        """
        from crash.types.percpu import get_percpu_vars
        from crash.types.task import LinuxTask, for_each_all_tasks
        import crash.cache.tasks  # pylint: disable=redefined-outer-name
        gdb.execute('set print thread-events 0')

        rqs = get_percpu_vars(self.symbols.runqueues)
        rqscurrs = {int(x["curr"]): k for (k, x) in rqs.items()}

        print("Loading tasks...", end='')
        sys.stdout.flush()

        task_count = 0
        try:
            crashing_cpu = int(get_symbol_value('crashing_cpu'))
        except MissingSymbolError:
            crashing_cpu = -1

        for task in for_each_all_tasks():
            ltask = LinuxTask(task)

            active = int(task.address) in rqscurrs
            if active:
                cpu = rqscurrs[int(task.address)]
                regs = self.vmcore.attr.cpu[cpu].reg
                ltask.set_active(cpu, regs)

            ptid = (LINUX_KERNEL_PID, task['pid'], 0)

            try:
                thread = gdb.selected_inferior().new_thread(ptid, ltask)
            except gdb.error:
                print("Failed to setup task @{:#x}".format(int(task.address)))
                continue
            thread.name = task['comm'].string()
            if active and cpu == crashing_cpu:
                self.crashing_thread = thread

            self.arch.setup_thread_info(thread)
            ltask.attach_thread(thread)
            ltask.set_get_stack_pointer(self.arch.get_stack_pointer)

            crash.cache.tasks.cache_task(ltask)

            task_count += 1
            if task_count % 100 == 0:
                print(".", end='')
                sys.stdout.flush()
        print(" done. ({} tasks total)".format(task_count))

        gdb.selected_inferior().executing = False
예제 #14
0
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:

import addrxlat
import crash
from crash.cache.syscache import utsname
from crash.util import offsetof
from crash.util.symbols import Types

import gdb

types = Types(['uint32_t *', 'uint64_t *'])


class TranslationContext(addrxlat.Context):
    def __init__(self, *args: int, **kwargs: int) -> None:
        super().__init__(*args, **kwargs)
        self.read_caps = addrxlat.CAPS(addrxlat.KVADDR)

    def cb_sym(self, symtype: int, *args: str) -> int:
        if symtype == addrxlat.SYM_VALUE:
            ms = gdb.lookup_minimal_symbol(args[0])
            if ms is not None:
                return int(ms.value().address)
        elif symtype == addrxlat.SYM_SIZEOF:
            sym = gdb.lookup_symbol(args[0], None)[0]
            if sym is not None:
                return sym.type.sizeof
        elif symtype == addrxlat.SYM_OFFSETOF:
            sym = gdb.lookup_symbol(args[0], None, gdb.SYMBOL_STRUCT_DOMAIN)[0]
            if sym is None:
예제 #15
0
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:

from typing import Iterable, Tuple

from crash.util.symbols import Types
from crash.types.list import list_for_each_entry
from crash.cache.syscache import kernel, jiffies_to_msec

import gdb


class NoQueueError(RuntimeError):
    pass


types = Types(['struct request'])


def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]:
    """
    Iterates over each ``struct request`` in request_queue

    This method iterates over the ``request_queue``'s queuelist and
    returns a request for each member.

    Args:
        queue: The ``struct request_queue`` used to iterate.  The value
            must be of type ``struct request_queue``.

    Yields:
        :obj:`gdb.Value`: Each ``struct request`` contained within the
예제 #16
0
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
"""
The crash.types.sbitmap module provides helpers for iterating and scanning
scalable bitmaps

"""

from typing import Iterable

import gdb

from crash.exceptions import InvalidArgumentError
from crash.util.symbols import Types
from crash.util import struct_has_member

types = Types(['struct sbitmap', 'struct sbitmap_word'])


def sbitmap_for_each_set(sbitmap: gdb.Value) -> Iterable[int]:
    """
    Yield each set bit in a scalable bitmap

    Args:
        sbitmap: The bitmap to iterate.

    Yields:
        :obj:`int`: The position of a bit that is set

    """

    length = int(sbitmap['depth'])
예제 #17
0
slab_list_name = {0: "partial", 1: "full", 2: "free"}
slab_list_fullname = {0: "slabs_partial", 1: "slabs_full", 2: "slabs_free"}

BUFCTL_END = ~0 & 0xffffffff


def col_error(msg: str) -> str:
    return "\033[1;31;40m {}\033[0;37;40m ".format(msg)


def col_bold(msg: str) -> str:
    return "\033[1;37;40m {}\033[0;37;40m ".format(msg)


types = Types(['kmem_cache', 'struct kmem_cache'])

SlabType = TypeVar('SlabType', bound='Slab')
KmemCacheType = TypeVar('KmemCacheType', bound='KmemCache')


class Slab:

    slab_list_head: str = 'list'
    page_slab: bool = False
    real_slab_type: gdb.Type
    bufctl_type: gdb.Type

    @classmethod
    def check_page_type(cls, gdbtype: gdb.Type) -> None:
        if cls.page_slab is False:
예제 #18
0
class Zone:

    types = Types(['struct page'])

    def __init__(self, obj: gdb.Value, zid: int) -> None:
        self.gdb_obj = obj
        self.zid = zid
        self.nid = int(obj["node"])

    def is_populated(self) -> bool:
        return self.gdb_obj["present_pages"] != 0

    def get_vmstat(self) -> List[int]:
        stats = [0] * VmStat.nr_stat_items
        vm_stat = self.gdb_obj["vm_stat"]

        for item in range(0, VmStat.nr_stat_items):
            # TODO abstract atomic?
            stats[item] = int(vm_stat[item]["counter"])
        return stats

    def add_vmstat_diffs(self, diffs: List[int]) -> None:
        for cpu in for_each_online_cpu():
            pageset = get_percpu_var(self.gdb_obj["pageset"], cpu)
            vmdiff = pageset["vm_stat_diff"]
            for item in range(0, VmStat.nr_stat_items):
                diffs[item] += int(vmdiff[item])

    def get_vmstat_diffs(self) -> List[int]:
        diffs = [0] * VmStat.nr_stat_items
        self.add_vmstat_diffs(diffs)
        return diffs

    def _check_free_area(self, area: gdb.Value, is_pcp: bool) -> None:
        nr_free = 0
        if is_pcp:
            list_array_name = "lists"
            error_desc = "pcplist"
        else:
            list_array_name = "free_list"
            error_desc = "free area"
        for free_list in array_for_each(area[list_array_name]):
            try:
                for page_obj in list_for_each_entry(free_list,
                                                    self.types.page_type,
                                                    "lru"):
                    page = crash.types.page.Page.from_obj(page_obj)
                    nr_free += 1
                    if page.get_nid() != self.nid or page.get_zid(
                    ) != self.zid:
                        print(
                            f"page 0x{int(page_obj.address):x} misplaced on "
                            f"{error_desc} of node {self.nid} zone {self.zid}, "
                            f"has flags for node {page.get_nid()} zone {page.get_zid()}"
                        )
            except BufferError as e:
                print(f"Error traversing free area: {e}")
        nr_expected = area["count"] if is_pcp else area["nr_free"]
        if nr_free != nr_expected:
            print(f"nr_free mismatch in {error_desc} 0x{int(area.address):x}: "
                  f"expected {nr_expected}, counted {nr_free}")

    def check_free_pages(self) -> None:
        for area in array_for_each(self.gdb_obj["free_area"]):
            self._check_free_area(area, False)
        for cpu in for_each_online_cpu():
            pageset = get_percpu_var(self.gdb_obj["pageset"], cpu)
            self._check_free_area(pageset["pcp"], True)
예제 #19
0
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:

from typing import Iterable, Union

from crash.util import container_of, get_typed_pointer, decode_flags
from crash.util.symbols import Types, Symvals
from crash.infra.lookup import DelayedSymval, DelayedType
from crash.types.list import list_for_each_entry
from crash.subsystem.storage import block_device_name

import gdb

types = Types('struct super_block')
symvals = Symvals('super_blocks')

AddressSpecifier = Union[int, str, gdb.Value]

MS_RDONLY = 1
MS_NOSUID = 2
MS_NODEV = 4
MS_NOEXEC = 8
MS_SYNCHRONOUS = 16
MS_REMOUNT = 32
MS_MANDLOCK = 64
MS_DIRSYNC = 128
MS_NOATIME = 1024
MS_NODIRATIME = 2048
MS_BIND = 4096
MS_MOVE = 8192
MS_REC = 16384
예제 #20
0
import gdb

from crash.util import container_of
from crash.util.symbols import Types
from crash.exceptions import ArgumentTypeError, UnexpectedGDBTypeError


class TreeError(Exception):
    pass


class CorruptTreeError(TreeError):
    pass


types = Types(['struct rb_root', 'struct rb_node'])


def _rb_left_deepest_node(node: gdb.Value) -> Optional[gdb.Value]:
    while int(node) != 0:
        if int(node['rb_left']) != 0:
            node = node['rb_left']
        elif int(node['rb_right']) != 0:
            node = node['rb_right']
        else:
            return node

    return None


def _rb_parent(node: gdb.Value) -> Optional[gdb.Value]:
예제 #21
0
from crash.types.cpu import highest_possible_cpu_nr

import gdb

SymbolOrValue = Union[gdb.Value, gdb.Symbol]


class PerCPUError(TypeError):
    """The passed object does not respond to a percpu pointer."""
    _fmt = "{} does not correspond to a percpu pointer."

    def __init__(self, var: SymbolOrValue) -> None:
        super().__init__(self._fmt.format(var))


types = Types(
    ['void *', 'char *', 'struct pcpu_chunk', 'struct percpu_counter'])
symvals = Symvals([
    '__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', 'pcpu_nr_slots',
    'pcpu_group_offsets'
])
msymvals = MinimalSymvals(['__per_cpu_start', '__per_cpu_end'])


class PerCPUState:
    """
    Per-cpus come in a few forms:
    - "Array" of objects
    - "Array" of pointers to objects
    - Pointers to either of those

    If we want to get the typing right, we need to recognize each one
예제 #22
0
#!/usr/bin/python3
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:

import gdb

from crash.util.symbols import Types, Symvals
from crash.types.kallsyms import kallsyms_lookup

types = Types(['union handle_parts', 'struct stack_record'])

symvals = Symvals(['stack_slabs'])

# TODO not sure how to determine this from the dump
STACK_ALLOC_ALIGN = 4


class StackTrace:
    def __init__(self, nr_entries: int, entries: gdb.Value) -> None:
        self.nr_entries = nr_entries
        self.entries = entries

    def dump(self, prefix: str = "") -> None:
        for i in range(self.nr_entries):
            addr = int(self.entries[i])
            sym = kallsyms_lookup(addr)
            print(f"{prefix}0x{addr:x} {sym}")

    @classmethod
    def from_handle(cls, handle: gdb.Value) -> 'StackTrace':

        parts = handle.address.cast(types.union_handle_parts_type.pointer())
예제 #23
0
.. _bitmap_note:

A bitmap is represented as either an array of ``unsigned long`` or as
``unsigned long *``.  Each routine below that accepts a gdb.Value
requires that it be of either type.
"""

from typing import Iterable, Tuple

from crash.exceptions import InvalidArgumentError
from crash.util.symbols import Types

import gdb

types = Types('unsigned long')


def _check_bitmap_type(bitmap: gdb.Value) -> None:
    if ((bitmap.type.code != gdb.TYPE_CODE_ARRAY
         or bitmap[0].type.code != types.unsigned_long_type.code
         or bitmap[0].type.sizeof != types.unsigned_long_type.sizeof) and
        (bitmap.type.code != gdb.TYPE_CODE_PTR
         or bitmap.type.target().code != types.unsigned_long_type.code
         or bitmap.type.target().sizeof != types.unsigned_long_type.sizeof)):
        raise InvalidArgumentError(
            "bitmaps are expected to be arrays of unsigned long not `{}'".
            format(bitmap.type))


def _get_bit_location(bit: int) -> Tuple[int, int]:
예제 #24
0
from crash.subsystem.filesystem.xfs import xfs_mount
from crash.subsystem.filesystem.xfs import xfs_for_each_ail_log_item
from crash.subsystem.filesystem.xfs import xfs_log_item_typed
from crash.subsystem.filesystem.xfs import xfs_format_xfsbuf
from crash.subsystem.filesystem.xfs import XFS_LI_TYPES
from crash.subsystem.filesystem.xfs import XFS_LI_EFI
from crash.subsystem.filesystem.xfs import XFS_LI_INODE
from crash.subsystem.filesystem.xfs import XFS_LI_BUF, XFS_LI_DQUOT
from crash.subsystem.filesystem.xfs import XFS_LI_QUOTAOFF, XFS_BLI_FLAGS
from crash.subsystem.filesystem.xfs import XFS_DQ_FLAGS
from crash.subsystem.filesystem.xfs import xfs_mount_flags, xfs_mount_uuid
from crash.subsystem.filesystem.xfs import xfs_mount_version
from crash.util import decode_flags, struct_has_member
from crash.util.symbols import Types

types = Types(['struct xfs_buf *'])


class XFSCommand(Command):
    """display XFS internal data structures"""
    def __init__(self, name: str) -> None:
        parser = ArgumentParser(prog=name)
        subparsers = parser.add_subparsers(help="sub-command help")
        show_parser = subparsers.add_parser('show', help='show help')
        show_parser.set_defaults(subcommand=self.show_xfs)
        show_parser.add_argument('addr')
        list_parser = subparsers.add_parser('list', help='list help')
        list_parser.set_defaults(subcommand=self.list_xfs)
        ail_parser = subparsers.add_parser('dump-ail', help='ail help')
        ail_parser.set_defaults(subcommand=self.dump_ail)
        ail_parser.add_argument('addr')
예제 #25
0
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:

import uuid

from crash.exceptions import InvalidArgumentError
from crash.util import decode_uuid, struct_has_member, container_of
from crash.util.symbols import Types
from crash.subsystem.filesystem import is_fstype_super

import gdb

types = Types(
    ['struct btrfs_inode', 'struct btrfs_fs_info *', 'struct btrfs_fs_info'])


def is_btrfs_super(super_block: gdb.Value) -> bool:
    """
    Tests whether a ``struct super_block`` belongs to btrfs.

    Args:
        super_block: The ``struct super_block`` to test.
            The value must be of type ``struct super_block``.

    Returns:
        :obj:`bool`: Whether the super_block belongs to btrfs

    Raises:
        :obj:`gdb.NotAvailableError`: The target value was not available.
    """
    return is_fstype_super(super_block, "btrfs")
예제 #26
0
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:

from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch
from crash.arch import FetchRegistersCallback
from crash.util.symbols import Types, MinimalSymvals
from crash.util.symbols import TypeCallbacks, MinimalSymbolCallbacks

import gdb

types = Types([
    'struct inactive_task_frame *', 'struct thread_info *', 'unsigned long *'
])
msymvals = MinimalSymvals(['thread_return'])


# pylint: disable=abstract-method
class _FetchRegistersBase(FetchRegistersCallback):
    def fetch_active(self, thread: gdb.InferiorThread, register: int) -> None:
        task = thread.info
        for reg in task.regs:
            if reg == "rip" and register not in (16, -1):
                continue
            try:
                thread.registers[reg].value = task.regs[reg]
            except KeyError:
                pass


# pylint: disable=abstract-method
class _FRC_inactive_task_frame(_FetchRegistersBase):
예제 #27
0
        self.xfsbuf = self.bio['bi_private'].cast(self._types.xfs_buf_p_type)
        self.devname = block_device_name(self.bio['bi_bdev'])

    def __next__(self) -> Any:
        return XFSBufDecoder(self.xfsbuf)

    def __str__(self) -> str:
        return self._description.format(self.bio, self.devname)


XFSBufBioDecoder.register()

types = Types([
    'struct xfs_log_item', 'struct xfs_buf_log_item',
    'struct xfs_inode_log_item', 'struct xfs_efi_log_item',
    'struct xfs_efd_log_item', 'struct xfs_dq_logitem',
    'struct xfs_qoff_logitem', 'struct xfs_inode', 'struct xfs_mount *',
    'struct xfs_buf *'
])


class XFS:
    """
    XFS File system state class.  Not meant to be instantiated directly.
    """
    _ail_head_name = None

    @classmethod
    def detect_ail_version(cls, gdbtype: gdb.Type) -> None:
        """
        Detect what version of the ail structure is in use
예제 #28
0
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:

from typing import Iterator, Callable, Dict, List

from crash.exceptions import InvalidArgumentError, ArgumentTypeError
from crash.exceptions import UnexpectedGDBTypeError
from crash.util import array_size, struct_has_member
from crash.util.symbols import Types, Symvals, SymbolCallbacks
from crash.types.list import list_for_each_entry

import gdb

PF_EXITING = 0x4

types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t'])
symvals = Symvals(['init_task', 'init_mm'])

# This is pretty painful.  These are all #defines so none of them end
# up with symbols in the kernel.  The best approximation we have is
# task_state_array which doesn't include all of them.  All we can do
# is make some assumptions based on the changes upstream.  This will
# be fragile.
class TaskStateFlags:
    """
    A class to contain state related to discovering task flag values.
    Not meant to be instantiated.


    The initial values below are overridden once symbols are available to
    resolve them properly.
예제 #29
0
class CrashConfigCache(CrashCache):
    types = Types(['char *'])
    symvals = Symvals(['kernel_config_data'])
    msymvals = MinimalSymvals(['kernel_config_data', 'kernel_config_data_end'])

    def __init__(self) -> None:
        self._config_buffer = ""
        self._ikconfig_cache: Dict[str, str] = dict()

    @property
    def config_buffer(self) -> str:
        if not self._config_buffer:
            self._config_buffer = self._decompress_config_buffer()
        return self._config_buffer

    @property
    def ikconfig_cache(self) -> Dict[str, str]:
        if not self._ikconfig_cache:
            self._parse_config()
        return self._ikconfig_cache

    def __getitem__(self, name: str) -> Any:
        try:
            return self.ikconfig_cache[name]
        except KeyError:
            return None

    @staticmethod
    def _read_buf_bytes(address: int, size: int) -> bytes:
        return gdb.selected_inferior().read_memory(address, size).tobytes()

    def _locate_config_buffer_section(self) -> ImageLocation:
        data_start = int(self.msymvals.kernel_config_data)
        data_end = int(self.msymvals.kernel_config_data_end)

        return {
            'data': {
                'start': data_start,
                'size': data_end - data_start,
            },
            'magic': {
                'start': data_start - 8,
                'end': data_end,
            },
        }

    def _locate_config_buffer_typed(self) -> ImageLocation:
        start = int(self.symvals.kernel_config_data.address)
        end = start + self.symvals.kernel_config_data.type.sizeof

        return {
            'data': {
                'start': start + 8,
                'size': end - start - 2 * 8 - 1,
            },
            'magic': {
                'start': start,
                'end': end - 8 - 1,
            },
        }

    def _verify_image(self, location: ImageLocation) -> None:
        magic_start = b'IKCFG_ST'
        magic_end = b'IKCFG_ED'

        buf_len = len(magic_start)
        buf = self._read_buf_bytes(location['magic']['start'], buf_len)
        if buf != magic_start:
            raise IOError(
                f"Missing magic_start in kernel_config_data. Got `{buf!r}'")

        buf_len = len(magic_end)
        buf = self._read_buf_bytes(location['magic']['end'], buf_len)
        if buf != magic_end:
            raise IOError(
                "Missing magic_end in kernel_config_data. Got `{buf}'")

    def _decompress_config_buffer(self) -> str:
        try:
            location = self._locate_config_buffer_section()
        except DelayedAttributeError:
            location = self._locate_config_buffer_typed()

        self._verify_image(location)

        # Read the compressed data
        buf = self._read_buf_bytes(location['data']['start'],
                                   location['data']['size'])

        return zlib.decompress(buf, 16 + zlib.MAX_WBITS).decode('utf-8')

    def __str__(self) -> str:
        return self.config_buffer

    def _parse_config(self) -> None:
        for line in self.config_buffer.splitlines():
            # bin comments
            line = re.sub("#.*$", "", line).strip()

            if not line:
                continue

            m = re.match("CONFIG_([^=]*)=(.*)", line)
            if m:
                self._ikconfig_cache[m.group(1)] = m.group(2)
예제 #30
0
from typing import Iterable, Tuple

import gdb

from crash.util.symbols import Types
from crash.subsystem.storage import queue_is_mq, rq_is_sync, rq_in_flight
from crash.types.sbitmap import sbitmap_for_each_set
from crash.exceptions import InvalidArgumentError


class NoQueueError(RuntimeError):
    pass


types = Types([
    'struct request', 'struct request_queue', 'struct sbitmap_queue',
    'struct blk_mq_hw_ctx'
])


def _check_queue_type(queue: gdb.Value) -> None:
    if not queue_is_mq(queue):
        raise InvalidArgumentError(
            "Passed request queue is not a multiqueue queue")


def mq_queue_for_each_hw_ctx(queue: gdb.Value) -> Iterable[gdb.Value]:
    """
    Iterates over each ``struct blk_mq_hw_ctx`` in request_queue

    This method iterates over each blk-mq hardware context in request_queue
    and yields each blk_mq_hw_ctx.