コード例 #1
0
def gen_verilog():
    arch = read_arch("outputs/PE.json")
    graph_arch(arch)
    PE_fc = pe_arch_closure(arch)
    PE = PE_fc(family.MagmaFamily())

    if not os.path.exists('outputs/verilog'):
        os.makedirs('outputs/verilog')
    m.compile(f"outputs/verilog/PE", PE, output="coreir-verilog")
コード例 #2
0
    def write_peak_arch(self, filename: str):
        if not hasattr(self, "arch"):
            raise ValueError("Generate peak arch first")

        if not os.path.exists('outputs/'):
            os.makedirs('outputs/')

        with open(filename, "w") as write_file:
            write_file.write(json.dumps(self.arch, indent=4, sort_keys=True))

        arch = read_arch("./outputs/PE.json")
        graph_arch(arch)
コード例 #3
0
    def analyze_pe(self):
        if not hasattr(self, "arch"):
            raise ValueError("Generate peak arch first")

        arch = read_arch("./outputs/PE.json")
        arch_stats = {}
        arch_stats['num_inputs'] = arch.num_inputs
        arch_stats['num_bit_inputs'] = arch.num_bit_inputs
        arch_stats['num_outputs'] = arch.num_outputs
        arch_stats['num_bit_outputs'] = arch.num_bit_outputs
        arch_stats['num_modules'] = len(arch.modules)
        arch_stats['num_reg'] = arch.num_reg
        arch_stats['num_bit_reg'] = arch.num_bit_reg

        arch_stats['num_IO'] = arch.num_inputs + arch.num_outputs

        inputs = set()
        outputs = set()
        for n, d in self.subgraph.nodes.data(True):
            if "input" in d['op']:
                inputs.add(n)

            if "output" in d['op']:
                outputs.add(n)

        total_paths = 0
        for iput in inputs:
            for oput in outputs:
                total_paths += len(
                    list(
                        nx.all_simple_paths(self.subgraph,
                                            source=iput,
                                            target=oput)))

        print("PE stats")
        print("Num ops:", arch_stats['num_modules'])
        print("Num I/O:", arch_stats['num_IO'])
        print("Num paths:", total_paths)
コード例 #4
0
    def generate_rewrite_rule(self, subgraph_ind, mul_op):

        if not os.path.exists("./outputs/PE.json"):
            raise ValueError("Generate and write merged graph peak arch first")

        if not os.path.exists("./outputs/peak_eqs/peak_eq_" +
                              str(subgraph_ind) + ".py"):
            raise ValueError("Generate and write peak_eq first")
        print("\nGenerating rewrite rule for:")
        print(self.short_eq)

        arch = read_arch("./outputs/PE.json")
        graph_arch(arch)
        PE_fc = wrapped_peak_class(arch)

        arch_inputs = arch.inputs

        input_constraints = {}

        for n, d in self.subgraph.nodes.data(True):
            if utils.is_node_input(d):
                if utils.is_node_bit_input(d):
                    input_constraints[(f"bitinputs{arch.bit_inputs.index(n)}",
                                       )] = (f"data{n}", )
                else:
                    input_constraints[(f"inputs{arch.inputs.index(n)}", )] = (
                        f"data{n}", )

        path_constraints = {}

        if not mul_op:
            print("Datagating multipliers")
            idx = 0
            for module in arch.modules:
                if module.type_ == "mul":
                    path_constraints[(
                        'inst', 'mul',
                        idx)] = hwtypes.smt_bit_vector.SMTBitVector[2](2)
                    print(path_constraints)
                    idx += 1

        arch_mapper = ArchMapper(PE_fc,
                                 path_constraints=path_constraints,
                                 input_constraints=input_constraints)

        peak_eq = importlib.import_module("outputs.peak_eqs.peak_eq_" +
                                          str(subgraph_ind))

        if subgraph_ind > -1:
            print("Solving...")
            ir_mapper = arch_mapper.process_ir_instruction(
                peak_eq.mapping_function_fc)
            start = time.time()
            solution = ir_mapper.solve('btor', external_loop=True, logic=QF_BV)
            end = time.time()
            print("Rewrite rule solving time:", end - start)
        else:
            print("Skipping...")
            solution = None

        if solution is None:
            utils.print_red(
                "No rewrite rule found, trying without input constraints")
            arch_mapper = ArchMapper(PE_fc)
            ir_mapper = arch_mapper.process_ir_instruction(
                peak_eq.mapping_function_fc)
            solution = ir_mapper.solve('btor', external_loop=True, logic=QF_BV)
            if solution is None:
                print("Still couldn't find solution")
                exit()
            else:
                utils.print_green("Found rewrite rule")
                self.rewrite_rule = solution
        else:
            utils.print_green("Found rewrite rule")
            self.rewrite_rule = solution
            for i in solution.ibinding:
                print(i)
コード例 #5
0
ファイル: garnet.py プロジェクト: StanfordAHA/garnet
def main():
    parser = argparse.ArgumentParser(description='Garnet CGRA')
    parser.add_argument('--width', type=int, default=4)
    parser.add_argument('--height', type=int, default=2)
    parser.add_argument('--pipeline_config_interval', type=int, default=8)
    parser.add_argument('--glb_tile_mem_size', type=int, default=256)
    parser.add_argument("--input-app", type=str, default="", dest="app")
    parser.add_argument("--input-file", type=str, default="", dest="input")
    parser.add_argument("--output-file", type=str, default="", dest="output")
    parser.add_argument("--gold-file", type=str, default="",
                        dest="gold")
    parser.add_argument("-v", "--verilog", action="store_true")
    parser.add_argument("--no-pd", "--no-power-domain", action="store_true")
    parser.add_argument("--no-pond", action="store_true")
    parser.add_argument("--interconnect-only", action="store_true")
    parser.add_argument("--compact", action="store_true")
    parser.add_argument("--no-sram-stub", action="store_true")
    parser.add_argument("--standalone", action="store_true")
    parser.add_argument("--unconstrained-io", action="store_true")
    parser.add_argument("--dump-config-reg", action="store_true")
    parser.add_argument("--virtualize-group-size", type=int, default=4)
    parser.add_argument("--virtualize", action="store_true")
    parser.add_argument("--use-io-valid", action="store_true")
    parser.add_argument("--pipeline-pnr", action="store_true")
    parser.add_argument("--generate-bitstream-only", action="store_true")
    parser.add_argument('--pe', type=str, default="")
    args = parser.parse_args()

    if not args.interconnect_only:
        assert args.width % 2 == 0 and args.width >= 4
    if args.standalone and not args.interconnect_only:
        raise Exception("--standalone must be specified with "
                        "--interconnect-only as well")

    pe_fc = lassen_fc
    if args.pe:
        arch = read_arch(args.pe)
        pe_fc = wrapped_peak_class(arch, debug=True)
    glb_params = gen_global_buffer_params(num_glb_tiles=args.width // 2,
                                          num_cgra_cols=args.width,
                                          # NOTE: We assume num_prr is same as num_glb_tiles
                                          num_prr=args.width // 2,
                                          glb_tile_mem_size=args.glb_tile_mem_size,
                                          bank_data_width=64,
                                          cfg_addr_width=32,
                                          cfg_data_width=32,
                                          axi_addr_width=12,
                                          axi_data_width=32)

    garnet = Garnet(width=args.width, height=args.height,
                    glb_params=glb_params,
                    add_pd=not args.no_pd,
                    pipeline_config_interval=args.pipeline_config_interval,
                    add_pond=not args.no_pond,
                    use_io_valid=args.use_io_valid,
                    interconnect_only=args.interconnect_only,
                    use_sram_stub=not args.no_sram_stub,
                    standalone=args.standalone,
                    pe_fc=pe_fc)

    if args.verilog:
        garnet_circ = garnet.circuit()
        magma.compile("garnet", garnet_circ, output="coreir-verilog",
                      coreir_libs={"float_CW"},
                      passes=["rungenerators", "inline_single_instances", "clock_gate"],
                      disable_ndarray=True,
                      inline=False)
        garnet.create_stub()
        if not args.interconnect_only:
            garnet_home = os.getenv('GARNET_HOME')
            if not garnet_home:
                garnet_home = os.path.dirname(os.path.abspath(__file__))
            gen_param_header(top_name="global_buffer_param",
                             params=glb_params,
                             output_folder=os.path.join(garnet_home, "global_buffer/header"))
            gen_rdl_header(top_name="glb",
                           rdl_file=os.path.join(garnet_home, "global_buffer/systemRDL/glb.rdl"),
                           output_folder=os.path.join(garnet_home, "global_buffer/header"))
            gen_rdl_header(top_name="glc",
                           rdl_file=os.path.join(garnet_home, "global_controller/systemRDL/rdl_models/glc.rdl.final"),
                           output_folder=os.path.join(garnet_home, "global_controller/header"))

    if len(args.app) > 0 and len(args.input) > 0 and len(args.gold) > 0 \
            and len(args.output) > 0 and not args.virtualize:
        
        placement, routing, id_to_name, instance_to_instr, \
           netlist, bus = garnet.place_and_route(args.app, args.unconstrained_io or args.generate_bitstream_only, compact=args.compact, load_only=args.generate_bitstream_only)
        
        if args.pipeline_pnr:
            return

        bitstream, (inputs, outputs, reset, valid, \
            en, delay) = garnet.generate_bitstream(args.app, placement, routing, id_to_name, instance_to_instr, netlist, bus, compact=args.compact)

        # write out the config file
        if len(inputs) > 1:
            if reset in inputs:
                inputs.remove(reset)
            for en_port in en:
                if en_port in inputs:
                    inputs.remove(en_port)
        total_cycle = get_total_cycle_from_app(args.app)
        if len(outputs) > 1:
            outputs.remove(valid)
        config = {
            "input_filename": args.input,
            "bitstream": args.output,
            "gold_filename": args.gold,
            "output_port_name": outputs,
            "input_port_name": inputs,
            "valid_port_name": valid,
            "reset_port_name": reset,
            "en_port_name": en,
            "delay": delay,
            "total_cycle": total_cycle
        }
        with open(f"{args.output}.json", "w+") as f:
            json.dump(config, f)
        write_out_bitstream(args.output, bitstream)
    elif args.virtualize and len(args.app) > 0:
        group_size = args.virtualize_group_size
        result = garnet.compile_virtualize(args.app, group_size)
        for c_id, bitstream in result.items():
            filename = os.path.join("temp", f"{c_id}.bs")
            write_out_bitstream(filename, bitstream)
    if args.dump_config_reg:
        ic = garnet.interconnect
        ic_reg = get_interconnect_regs(ic)
        core_reg = get_core_registers(ic)
        with open("config.json", "w+") as f:
            json.dump(ic_reg + core_reg, f)