Exemplo n.º 1
0
def load_project(path):
    if not os.path.isfile(path):
        raise SpliceError(f"Could not read file at path: {path}")
    df_root = None
    with open(path, 'r') as fh:
        df_root = DFProject().loadObject(json.load(fh))
    return df_root
Exemplo n.º 2
0
def elaborate(top_docs, scope, max_depth=None):
    """ Elaborate described design from a top-level tag downwards.

    Run elaboration, starting from a Phhidle YAML schema object. Depending on the
    type of the schema object, different elaboration pathways are followed. All
    results are returned as a DesignFormat DFProject.

    Args:
        top_docs : The top level document to elaborate from
        scope    : An instance of ElaboratorScope that includes all documents
                 : parsed into the tool, allows references to be evaluated.
        max_depth: The maximum depth to elaborate to (optional, by default
                   performs a full depth elaboration - max_depth=None)

    Returns:
        DFProject: A DesignFormat project describing the elaborated design
    """
    # Separate !Config and !Group tags as these need special handling
    other_tags = [x for x in top_docs if type(x) not in [Group, Config, Reg]]
    reg_tags = [x for x in top_docs if type(x) in [Group, Config, Reg]]

    # Create a single project to return
    project = DFProject()

    # Work through all of the non-register tags
    for doc in other_tags:
        if type(doc) in ignored:
            report.debug(
                f"Ignoring top-level tag of type {type(doc).__name__}",
                item=doc)
            continue
        elif not type(doc).__name__ in elaborators:
            raise ElaborationError(
                report.error(
                    f"Unsupported top level type {type(doc).__name__}",
                    item=doc))
        df_obj = elaborators[type(doc).__name__](doc,
                                                 scope,
                                                 max_depth=max_depth)
        if isinstance(df_obj, DFProject):
            project.mergeProject(df_obj)
        else:
            project.addPrincipalNode(df_obj)

    # Special handling for !Config and !Group tags
    if len(reg_tags) > 0:
        configs = [x for x in reg_tags if isinstance(x, Config)]
        if len(configs) == 0:
            config = Config(
                [Register(x.name) for x in reg_tags if isinstance(x, Group)])
            configs.append(config)
        for reg_group in elaborate_registers(configs[0], scope):
            project.addPrincipalNode(reg_group)

    # Return the project
    return project
Exemplo n.º 3
0
def elaborate_interconnect(his, scope, max_depth=None, project=None):
    """
    Evaluate top-level !His and every interconnect type it references, returning
    a DFProject. The top-level !His will be a principal node, whilst referenced
    interconnects will be reference nodes.

    Args:
        his      : The top-level !His to evaluate
        scope    : The ElaboratorScope object containing all referenced documents
        max_depth: Ignored at present, provided for compatibility (optional)
        project  : Project to append to, else a new one is created (optional)

    Returns:
        DFProject: Project containing the elaborated interconnect
    """
    # Build the top level interconnect
    df_intc = build_interconnect(his, scope)

    # If no project, create one and add the interconnect as principal
    if not project:
        project = DFProject(his.name, his.source.path)
        project.addPrincipalNode(df_intc)
    else:
        project.addReferenceNode(df_intc)

    # For any referenced interconnect types, build those as reference nodes
    for component in df_intc.components:
        if component.type == DFConstants.COMPONENT.COMPLEX:
            ref_his = scope.get_document(component.ref, expected=His)
            if not ref_his:
                raise ElaborationError(
                    report.error(f"Failed to resolve His {component.ref}"))
            elaborate_interconnect(ref_his, scope, project=project)

    # Return the project
    return project
Exemplo n.º 4
0
        help="Preserve the entire connectivity tree from a specific port")
    # Options for the blob
    parser.add_argument("input", help="Path to the blob file to load as input")
    parser.add_argument("output", help="Output path for the cleaned blob file")
    # Parse arguments
    return parser.parse_args()


if __name__ == "__main__":
    # Get the arguments passed to the script
    args = get_args()
    # Load the blob
    print(f"Loading blob {args.input}")
    df_root = None
    with open(args.input, 'r') as fh:
        df_root = DFProject().loadObject(json.load(fh))
    if not df_root:
        print(f"Failed to open blob from path: {args.input}")
        sys.exit(1)
    print(f"Blob loaded: {df_root.id}")

    # Keep track of all of the nodes we want to preserve
    to_preserve = []

    # For every preserved tree, attempt to resolve it
    print("Preserving tree")
    for path in args.preserve_tree:
        # Resolve this path to a block
        node = df_root.getAllPrincipalNodes()[0].resolvePath(path)
        assert isinstance(node, DFBlock)
        # Add every unique block into the preserve list
Exemplo n.º 5
0
def elaborate_module(top, scope, max_depth=None):
    """
    Elaborate a !Mod tag instance, expanding hierarchy and resolving connections
    up to the maximum requested depth.

    Args:
        top      : The top-level !Mod to elaborate from
        scope    : An ElaboratorScope object containing all documents included
                   directly or indirectly by the top module.
        max_depth: The maximum depth to elaborate to (optional, by default
                   performs a full depth elaboration - max_depth=None)

    Returns:
        DFProject: Contains the elaborated block and all interconnects used
    """
    # Build a new project
    project = DFProject(top.name, top.source.path)

    # Build the tree for the root block
    block = build_tree(
        top,                    # The top-level !Mod to evaluate
        top.name,               # Name to use for the top-level !Mod instance
        None,                   # No parent exists
        scope,                  # Scope to use for elaboration
        max_depth = max_depth   # Maximum depth to elaborate to
    )

    # Attach the block as a principal node to the project
    project.addPrincipalNode(block)

    # Get a list of all of the interconnection types directly used by the design
    def list_interconnects(block):
        types = [x.type for x in block.getAllPorts()]
        for child in block.children:
            types += list_interconnects(child)
        return types

    used_types = []

    for block in (x for x in project.nodes.values() if isinstance(x, DFBlock)):
        used_types += list_interconnects(block)

    # Expand the directly used types to include all referenced types
    def chase_his(his_ref):
        his = scope.get_document(his_ref, His)
        if not his:
            raise ElaborationError(report.error(f"Could not locate His {his_ref}"))
        sub_his  = [x for x in his.ports if isinstance(x, HisRef)]
        required = [his] + [scope.get_document(x.ref, His) for x in sub_his]
        for item in sub_his:
            required += chase_his(item.ref)
        return required

    all_required = []
    for his_type in used_types:
        all_required += chase_his(his_type)

    # Ensure the list of His types is unique
    all_required = list(set(all_required))

    # Build and attach descriptions of each interconnect type
    for his in all_required:
        project.addReferenceNode(build_interconnect(his, scope))

    # Log all of the interconnect types that were detected
    report.info(
        f"Identified {len(all_required)} interconnect types in the design",
        body="\n".join((x.name for x in all_required))
    )

    # Log the design hierarchy
    def chase_hierarchy(block, depth=0):
        intro = ""
        if depth > 0: intro += (" | " * (depth - 1)) + " |-"
        intro += block.id
        lines = [intro]
        for child in block.children: lines += chase_hierarchy(child, depth+1)
        return lines

    txt_hier = chase_hierarchy(block)
    report.info(
        f"Design hierarchy contains {len(txt_hier)} nodes", body="\n".join(txt_hier)
    )

    # Return the project
    return project
Exemplo n.º 6
0
import sys

# Import DesignFormat
from designformat import DFProject

# Check that enough arguments have been passed
if len(sys.argv) != 2 or sys.argv[1] == '-h':
    print("usage: repl_cli.py [-h] blob_path")
    print(
        "repl_cli.py: error: the following arguments are required: blob_path")
    sys.exit(0)

# Get hold of the root node
df_root = None
with open(sys.argv[1], 'r') as fh:
    df_root = DFProject().loadObject(json.load(fh))
print("Got df_root object with ID '" + df_root.id + "' of type " +
      type(df_root).__name__)
print("Access the object properties using df_root.id etc.")

# Expose all principal nodes from the root node
print("Exposing principal nodes:")
for node in df_root.getAllPrincipalNodes():
    node_var = node.id.replace(" ", "_")
    print(" - " + node_var + ": " + node.id + " of type " +
          type(node).__name__)
    globals()[node_var] = node

# Start up the REPL
import pdb
pdb.set_trace()
Exemplo n.º 7
0
def main():
    # Get arguments
    args = get_args()

    # Read in the DFBlob file
    if not os.path.isfile(args.blob):
        print(f"ERROR: Could not read file at path: {args.blob}")
        sys.exit(255)

    df_root = None
    with open(args.blob, 'r') as fh:
        df_root = DFProject().loadObject(json.load(fh))

    # Identify the first principal node
    try:
        principal = [
            x for x in df_root.getAllPrincipalNodes()
            if isinstance(x, DFBlock)
        ][0]
    except:
        print("ERROR: Failed to locate a principal node")
        sys.exit(1)

    # Dump a list of DFInterconnect types
    if args.top_interconnects or args.interconnects:
        intcs = []
        # Dump interconnects only used in the top-level
        if args.top_interconnects:
            block = df_root.getAllPrincipalNodes()[0]
            if not isinstance(block, DFBlock):
                print("ERROR: Failed to locate a principal node")
                sys.exit(1)
            intcs += block.getInterconnectTypes(depth=0)
        # Dump all interconnects included in the DFProject (not just top-level)
        elif args.interconnects:
            intcs = [
                x for x in df_root.nodes.values()
                if isinstance(x, DFInterconnect)
            ]
        # If test mode is enabled, filter the interconnects
        if args.test:
            intcs = [
                x for x in intcs if run_attribute_test(x,
                                                       args.test,
                                                       args.false,
                                                       args.value,
                                                       None,
                                                       None,
                                                       use_exitcode=False,
                                                       use_print=False)
            ]
        # Chase each interconnect so that we also have all of it's components
        def chase_intc(intc):
            for comp in (x for x in intc.components if x.isComplex()):
                comp_intc = comp.getReference()
                if not comp_intc in intcs:
                    intcs.append(comp_intc)
                chase_intc(comp_intc)

        for intc in intcs[:]:
            chase_intc(intc)
        # Print out all of the interconnects using spaces or newlines
        print((" " if args.spaced else "\n").join([x.id for x in intcs]))

    # Dump the list of root DFBlocks
    elif args.blocks:
        blocks = [x for x in df_root.nodes.values() if isinstance(x, DFBlock)]
        print((" " if args.spaced else "\n").join([x.id for x in blocks]))

    # Dump an address map from a named entry-point
    elif args.address_map:
        entrypoint = df_root.getAllPrincipalNodes()[0].resolvePath(
            args.address_map)
        if not entrypoint:
            print("ERROR: Failed to identify entrypoint: " + entrypoint)
            sys.exit(1)
        # Declare a function to recursively find all address maps
        def find_maps(port, index=0, maps=None, depth=0):
            maps = [] if not maps else maps
            prefix = " | ".join(["" for x in range(depth - 1)])
            # If this is a newly encountered address map, look through all the targets
            if port.block.address_map and port.block.address_map not in maps:
                maps.append(port.block.address_map)
                # Check this port is actually accessible
                rel_addr = entrypoint.getRelativeAddress(port,
                                                         remote_index=index)
                if rel_addr == None: return
                # Print out this address
                print(
                    f"{prefix}{' |- ' if (depth > 0) else ''}{port.block.hierarchicalPath()}: {hex(rel_addr)}"
                )
                for target in port.block.address_map.targets:
                    find_maps(target.port,
                              index=target.port_index,
                              maps=maps,
                              depth=(depth + 1))
            # Else, if we have a output, chase it
            elif len(port.getOutboundConnections()) > 0:
                pathways = port.chaseConnection(index=index)
                for path in pathways:
                    # Look at the last entry in the path (which is the endpoint)
                    find_maps(path[-1][0],
                              index=path[-1][1],
                              maps=maps,
                              depth=depth)
            # Else this is a termination
            else:
                # Check this port is actually accessible
                rel_addr = entrypoint.getRelativeAddress(port,
                                                         remote_index=index)
                if rel_addr == None: return
                # Print out this address
                print(
                    f"{prefix}{' |- ' if (depth > 0) else ''}{port.hierarchicalPath()}[{index}]: {hex(rel_addr)}"
                )

        find_maps(entrypoint)

    # If 'present' or 'absent' lists were provided
    elif len(args.present) > 0 or len(args.absent) > 0:
        missing = [
            x for x in args.present if principal.getAttribute(x) == None
        ]
        extra = [x for x in args.absent if principal.getAttribute(x) != None]
        # Allow either AND or OR operations for a list of 'present' tags
        present_result = ((len(missing) == 0)
                          or (len(missing) < len(args.present)
                              and args.present_or))
        # Allow either AND or OR operations for a list of 'absent' tags
        absent_result = ((len(extra) == 0)
                         or (len(extra) < len(args.absent) and args.absent_or))
        if (present_result and absent_result) ^ args.false:
            result_pass(args.if_true, 0 if args.exitcode else None, True)
        else:
            result_fail(args.if_false, 1 if args.exitcode else None, True)
    # If attribute test mode enabled, then run an attribute comparison
    elif args.test:
        run_attribute_test(principal, args.test, args.false, args.value,
                           args.if_true, args.if_false, args.exitcode)
Exemplo n.º 8
0
def check_apertures(project: DFProject):
    """ Check that register maps of blocks are visible through the aperture

    Args:
        project: Project to check through

    Returns:
        list: List of any RuleViolations that have been detected
    """

    # Create storage for any detected violations
    violations = []

    # Check if the principal node is a DFBlock
    if len(project.getAllPrincipalNodes()) == 0:
        report.debug("Project contains no principal nodes - skipping check")
        return violations

    roots = [x for x in project.getAllPrincipalNodes() if type(x) == DFBlock]
    if len(roots) == 0:
        report.debug(
            "Project contains no DFBlock principal nodes - skipping check")
        return violations

    # For each root node, search out every DFBlock with attached registers
    def find_reg_blocks(block):
        found = []
        # First iterate through any child blocks
        for child in block.children:
            found += find_reg_blocks(child)
        # Check if I have registers?
        if len(block.registers) != 0:
            found.append(block)
        return found

    reg_blocks = []
    for block in roots:
        reg_blocks += find_reg_blocks(block)

    report.info(f"Found the following {len(reg_blocks)} register blocks",
                body="\n".join(
                    [" - " + x.hierarchicalPath() for x in reg_blocks]))

    # If we didn't find any register blocks, skip the check
    if len(reg_blocks) == 0:
        report.debug("Project contains no register blocks - skipping check")
        return violations

    # Iterate through every located block with registers
    curr_violations = 0
    for block in reg_blocks:
        report.debug(f"Examining block: {block.hierarchicalPath()}")

        # Keep track of how many violations have already been recorded
        curr_violations = len(violations)

        # First, try to locate which port is attached to an address map
        access_port = None
        for port in block.ports.input:
            for port_idx in range(port.count):
                # Find out who drives this port
                # NOTE: This is tuple of a DFPort and the port index (integer)
                driver = chase_driver(port, port_idx)
                # Does the driver has an address map that we can chase through?
                if not driver[0].block.address_map: continue
                # Is there a target on the address map for the driver port?
                if not driver[0].block.address_map.getTarget(
                        driver[0], driver[1]):
                    continue
                # We found our access point!
                access_port = (port, port_idx)
                break

        # If we didn't find an access port, this is a violation
        if access_port == None:
            violations.append(
                RuleViolation(
                    f"Could not establish access port for block {block.hierarchicalPath()}",
                    block))
            continue

        # Now we want to find all address maps in a direct chain
        address_maps = []

        def find_maps(port, port_idx):
            # Get the first address map in the chain
            driver = chase_driver(port, port_idx)
            addr = driver[0].block.address_map
            if not addr: return
            address_maps.append({
                "map": addr,
                "port": driver[0],
                "index": driver[1]
            })
            # Identify the target port for the driver
            tgt = addr.getTarget(driver[0], driver[1])
            if not tgt: return
            # How many initiators can access this target?
            inits = addr.getInitiatorsForTarget(tgt)
            if len(inits) == 0:
                violations.append(
                    RuleViolation(
                        f"No initiators can access port '{driver[0].name}' in address "
                        f"map of '{driver[0].block.hierarchicalPath()}'",
                        node=driver))
                return
            # If we have more than one initiator, the path has diverged - so stop
            if len(inits) > 1: return
            # So if we have exactly one initiator, chase it
            return find_maps(inits[0].port, inits[0].port_index)

        find_maps(*access_port)

        # Check no new violations have been raised
        if len(violations) > curr_violations: continue

        report.debug(
            f"Identified {len(address_maps)} address maps in driving chain for "
            f"port '{access_port[0].hierarchicalPath()}' index {access_port[1]}"
        )

        # Identify the highest address in the register map
        max_reg = None
        for reg in block.registers:
            if type(reg) == DFRegisterGroup:
                for grp_reg in reg.registers:
                    if type(grp_reg) != DFRegister:
                        raise CriticalRuleViolation(
                            f"Invalid node '{reg.id}' of type {type(reg).__name__} "
                            f"in register group", reg)
                    if not max_reg or grp_reg.getOffset() > max_reg.getOffset(
                    ):
                        max_reg = grp_reg
            elif type(reg) == DFRegister:
                if not max_reg or reg.getOffset() > max_reg.getOffset():
                    max_reg = reg
            else:
                raise CriticalRuleViolation(
                    f"Invalid node '{reg.id}' of type {type(reg).__name__} in "
                    f"block's register set", block)
        if max_reg == None:
            report.info(f"No registers found in {block.hierarchicalPath()}")
            return violations
        report.debug(
            f"Maximum register offset of {block.hierarchicalPath()} is "
            f"{hex(max_reg.getOffset())} with size {ceil(max_reg.width / 8)}")
        max_address = max_reg.getOffset() + int(ceil(max_reg.width / 8))

        # Walk each address map checking that register block is accessible
        for addr in address_maps:
            tgt = addr['map'].getTarget(addr['port'], addr['index'])
            # Check maximum address is within the aperture size
            if max_address > tgt.aperture:
                violations.append(
                    RuleViolation(
                        f"Register {block.hierarchicalPath()}.{max_reg.id} at offset "
                        f"{hex(max_reg.getOffset())} does not fit in the address map "
                        f"aperture of {tgt.aperture} bytes.\n"
                        f"Block        : {block.hierarchicalPath()}\n"
                        f"Register     : {max_reg.id} @ {hex(max_reg.getOffset())}\n"
                        f"Map          : {addr['map'].block.hierarchicalPath()}\n"
                        f"Target Offset: {hex(tgt.offset)}\n"
                        f"Aperture Size: {tgt.aperture}\n", block))
                break
            # Examine every initiator in this address map
            for init in addr['map'].initiators:
                tgt_inits = addr['map'].getInitiatorsForTarget(tgt)
                # If this initiator can't access the target, warn about it
                if init not in tgt_inits:
                    report.warning(
                        f"Restricted Access To {block.hierarchicalPath()}",
                        body=f"Register block {block.hierarchicalPath()} cannot "
                        f"be accessed from {init.port.hierarchicalPath()} "
                        f"index {init.port_index}")
                    continue
                # Check that the initiator's offset and mask are sufficient
                init_min = init.offset
                init_max = init.offset + init.mask + 1
                if tgt.offset < init_min or (tgt.offset +
                                             max_address) > init_max:
                    violations.append(
                        RuleViolation(
                            f"Not all registers of {block.id} can be accessed by {init.port.id} "
                            f"index {init.port_index}:\n"
                            f"Block         : {block.hierarchicalPath()}\n"
                            f"Address Map   : {addr['map'].block.hierarchicalPath()}\n"
                            f"Target Port   : {tgt.port.id}\n"
                            f"Target Min    : {hex(tgt.offset)}\n"
                            f"Target Max    : {hex(tgt.offset + max_address)}\n"
                            f"Initiator Port: {init.port.id}\n"
                            f"Initiator Min : {hex(init_min)}\n"
                            f"Initiator Max : {hex(init_max)}\n", block))
                    continue

        # Check we have no new violations
        if len(violations) > curr_violations: continue

    # Return the list of detected violations
    return violations