def __init__(self, dw, nrxslots=2, ntxslots=2, endianness="big"):
        self.sink = stream.Endpoint(eth_phy_description(dw))
        self.source = stream.Endpoint(eth_phy_description(dw))
        self.bus = wishbone.Interface()

        # # #

        # storage in SRAM
        sram_depth = eth_mtu//(dw//8)
        self.submodules.sram = sram.LiteEthMACSRAM(dw, sram_depth, nrxslots, ntxslots, endianness)
        self.comb += [
            self.sink.connect(self.sram.sink),
            self.sram.source.connect(self.source)
        ]

        # Wishbone interface
        wb_rx_sram_ifs = [wishbone.SRAM(self.sram.writer.mems[n], read_only=True)
            for n in range(nrxslots)]
        # TODO: FullMemoryWE should move to Mibuild
        wb_tx_sram_ifs = [FullMemoryWE()(wishbone.SRAM(self.sram.reader.mems[n], read_only=False))
            for n in range(ntxslots)]
        wb_sram_ifs = wb_rx_sram_ifs + wb_tx_sram_ifs

        wb_slaves = []
        decoderoffset = log2_int(sram_depth, need_pow2=False)
        decoderbits = log2_int(len(wb_sram_ifs))
        for n, wb_sram_if in enumerate(wb_sram_ifs):
            def slave_filter(a, v=n):
                return a[decoderoffset:decoderoffset+decoderbits] == v
            wb_slaves.append((slave_filter, wb_sram_if.bus))
            self.submodules += wb_sram_if
        wb_con = wishbone.Decoder(self.bus, wb_slaves, register=True)
        self.submodules += wb_con
Ejemplo n.º 2
0
    def register_sdram(self,
                       phy,
                       geom_settings,
                       timing_settings,
                       use_axi=False,
                       use_full_memory_we=True,
                       **kwargs):
        assert not self._sdram_phy
        self._sdram_phy.append(
            phy)  # encapsulate in list to prevent CSR scanning

        self.submodules.sdram = ControllerInjector(phy, geom_settings,
                                                   timing_settings, **kwargs)

        dfi_databits_divisor = 1 if phy.settings.memtype == "SDR" else 2
        sdram_width = phy.settings.dfi_databits // dfi_databits_divisor
        main_ram_size = 2**(geom_settings.bankbits + geom_settings.rowbits +
                            geom_settings.colbits) * sdram_width // 8

        # TODO: modify mem_map to allow larger memories.
        main_ram_size = min(main_ram_size, 256 * 1024 * 1024)
        self.add_constant("L2_SIZE", self.l2_size)

        # add a Wishbone interface to the DRAM
        wb_sdram = wishbone.Interface()
        self.add_wb_sdram_if(wb_sdram)
        self.register_mem("main_ram", self.mem_map["main_ram"], wb_sdram,
                          main_ram_size)

        if self.l2_size:
            port = self.sdram.crossbar.get_port()
            port.data_width = 2**int(log2(
                port.data_width))  # Round to nearest power of 2
            l2_size = 2**int(log2(self.l2_size))  # Round to nearest power of 2
            l2_cache = wishbone.Cache(l2_size // 4, self._wb_sdram,
                                      wishbone.Interface(port.data_width))
            # XXX Vivado ->2018.2 workaround, Vivado is not able to map correctly our L2 cache.
            # Issue is reported to Xilinx, Remove this if ever fixed by Xilinx...
            from litex.build.xilinx.vivado import XilinxVivadoToolchain
            if isinstance(self.platform.toolchain,
                          XilinxVivadoToolchain) and use_full_memory_we:
                from migen.fhdl.simplify import FullMemoryWE
                self.submodules.l2_cache = FullMemoryWE()(l2_cache)
            else:
                self.submodules.l2_cache = l2_cache
            if use_axi:
                axi_port = LiteDRAMAXIPort(
                    port.data_width,
                    port.address_width + log2_int(port.data_width // 8))
                axi2native = LiteDRAMAXI2Native(axi_port, port)
                self.submodules += axi2native
                self.submodules.wishbone_bridge = LiteDRAMWishbone2AXI(
                    self.l2_cache.slave, axi_port)
            else:
                self.submodules.wishbone_bridge = LiteDRAMWishbone2Native(
                    self.l2_cache.slave, port)
Ejemplo n.º 3
0
    def register_sdram(self, phy, geom_settings, timing_settings, use_axi=False, use_full_memory_we=True, **kwargs):
        assert not self._sdram_phy
        self._sdram_phy.append(phy) # encapsulate in list to prevent CSR scanning

        # LiteDRAM core ----------------------------------------------------------------------------
        self.submodules.sdram = ControllerInjector(
            phy, geom_settings, timing_settings, self.clk_freq, **kwargs)

        # SoC <--> L2 Cache <--> LiteDRAM ----------------------------------------------------------
        if self.with_wishbone:
            # LiteDRAM port ------------------------------------------------------------------------
            port = self.sdram.crossbar.get_port()
            port.data_width = 2**int(log2(port.data_width)) # Round to nearest power of 2

            # Parameters ---------------------------------------------------------------------------
            main_ram_size = 2**(geom_settings.bankbits +
                                geom_settings.rowbits +
                                geom_settings.colbits)*phy.settings.databits//8
            main_ram_size = min(main_ram_size, 0x20000000) # FIXME: limit to 512MB for now

            l2_size = max(self.l2_size, int(2*port.data_width/8)) # L2 has a minimal size, use it if lower
            l2_size = 2**int(log2(l2_size))                       # Round to nearest power of 2

            # SoC <--> L2 Cache Wishbone interface -------------------------------------------------
            wb_sdram = wishbone.Interface()
            self.add_wb_sdram_if(wb_sdram)
            self.register_mem("main_ram", self.mem_map["main_ram"], wb_sdram, main_ram_size)

            # L2 Cache -----------------------------------------------------------------------------
            l2_cache = wishbone.Cache(l2_size//4, self._wb_sdram, wishbone.Interface(port.data_width))
            # XXX Vivado ->2018.2 workaround, Vivado is not able to map correctly our L2 cache.
            # Issue is reported to Xilinx, Remove this if ever fixed by Xilinx...
            from litex.build.xilinx.vivado import XilinxVivadoToolchain
            if isinstance(self.platform.toolchain, XilinxVivadoToolchain) and use_full_memory_we:
                from migen.fhdl.simplify import FullMemoryWE
                self.submodules.l2_cache = FullMemoryWE()(l2_cache)
            else:
                self.submodules.l2_cache = l2_cache
            self.config["L2_SIZE"] = l2_size

            # L2 Cache <--> LiteDRAM bridge --------------------------------------------------------
            if use_axi:
                axi_port = LiteDRAMAXIPort(
                    port.data_width,
                    port.address_width + log2_int(port.data_width//8))
                axi2native = LiteDRAMAXI2Native(axi_port, port)
                self.submodules += axi2native
                self.submodules.wishbone_bridge = LiteDRAMWishbone2AXI(self.l2_cache.slave, axi_port)
            else:
                self.submodules.wishbone_bridge = LiteDRAMWishbone2Native(self.l2_cache.slave, port)
Ejemplo n.º 4
0
    def register_sdram(self, phy, geom_settings, timing_settings, **kwargs):
        assert not self._sdram_phy
        self._sdram_phy.append(
            phy)  # encapsulate in list to prevent CSR scanning

        self.submodules.sdram = ControllerInjector(phy, geom_settings,
                                                   timing_settings, **kwargs)

        dfi_databits_divisor = 1 if phy.settings.memtype == "SDR" else 2
        sdram_width = phy.settings.dfi_databits // dfi_databits_divisor
        main_ram_size = 2**(geom_settings.bankbits + geom_settings.rowbits +
                            geom_settings.colbits) * sdram_width // 8
        # TODO: modify mem_map to allow larger memories.
        main_ram_size = min(main_ram_size, 256 * 1024 * 1024)
        self.add_constant("L2_SIZE", self.l2_size)

        # add a Wishbone interface to the DRAM
        wb_sdram = wishbone.Interface()
        self.add_wb_sdram_if(wb_sdram)
        self.register_mem("main_ram", self.mem_map["main_ram"], wb_sdram,
                          main_ram_size)

        if self.l2_size:
            port = self.sdram.crossbar.get_port()
            l2_cache = wishbone.Cache(self.l2_size // 4, self._wb_sdram,
                                      wishbone.Interface(port.dw))
            # XXX Vivado ->2015.1 workaround, Vivado is not able to map correctly our L2 cache.
            # Issue is reported to Xilinx and should be fixed in next releases (2015.2?).
            # Remove this workaround when fixed by Xilinx.
            from litex.build.xilinx.vivado import XilinxVivadoToolchain
            if isinstance(self.platform.toolchain, XilinxVivadoToolchain):
                from migen.fhdl.simplify import FullMemoryWE
                self.submodules.l2_cache = FullMemoryWE()(l2_cache)
            else:
                self.submodules.l2_cache = l2_cache
            self.submodules.wishbone_bridge = LiteDRAMWishboneBridge(
                self.l2_cache.slave, port)
Ejemplo n.º 5
0
    def build(self, platform, fragment, **kwargs):

        # Apply FullMemoryWE on Design (Efiniy does not infer memories correctly otherwise).
        FullMemoryWE()(fragment)

        return GenericToolchain.build(self, platform, fragment, **kwargs)
Ejemplo n.º 6
0
    def register_sdram(self, phy, sdram_controller_type, geom_settings,
                       timing_settings):
        # register PHY
        assert not self._sdram_phy
        self._sdram_phy.append(
            phy)  # encapsulate in list to prevent CSR scanning

        # connect CPU to SDRAM, needs to be done here so that we know the size
        dfi_databits_divisor = 1 if phy.settings.memtype == "SDR" else 2
        sdram_width = phy.settings.dfi_databits // dfi_databits_divisor
        main_ram_size = 2**(geom_settings.bankbits + geom_settings.rowbits +
                            geom_settings.colbits) * sdram_width // 8
        # TODO: modify mem_map to allow larger memories.
        main_ram_size = min(main_ram_size, 256 * 1024 * 1024)
        wb_sdram = wishbone.Interface()
        self.add_cpulevel_sdram_if(wb_sdram)
        self.register_mem("main_ram", self.mem_map["main_ram"], main_ram_size,
                          wb_sdram)

        # create DFI injector
        self.submodules.dfii = dfii.DFIInjector(geom_settings.addressbits,
                                                geom_settings.bankbits,
                                                phy.settings.dfi_databits,
                                                phy.settings.nphases)
        self.comb += self.dfii.master.connect(phy.dfi)

        # create controller
        if sdram_controller_type == "minicon":
            self.submodules.sdram_controller = minicon.Minicon(
                phy.settings, geom_settings, timing_settings)
            self._native_sdram_ifs = []

            bridge_if = self.get_native_sdram_if()
            if self.l2_size:
                l2_cache = wishbone.Cache(self.l2_size // 4,
                                          self._cpulevel_sdram_if_arbitrated,
                                          bridge_if)
                # XXX Vivado ->2015.1 workaround, Vivado is not able to map correctly our L2 cache.
                # Issue is reported to Xilinx and should be fixed in next releases (2015.2?).
                # Remove this workaround when fixed by Xilinx.
                from migen.build.xilinx.vivado import XilinxVivadoToolchain
                if isinstance(self.platform.toolchain, XilinxVivadoToolchain):
                    from migen.fhdl.simplify import FullMemoryWE
                    self.submodules.l2_cache = FullMemoryWE()(l2_cache)
                else:
                    self.submodules.l2_cache = l2_cache
            else:
                self.submodules.converter = wishbone.Converter(
                    self._cpulevel_sdram_if_arbitrated, bridge_if)
        elif sdram_controller_type == "lasmicon":
            self.submodules.sdram_controller = lasmicon.LASMIcon(
                phy.settings, geom_settings, timing_settings)
            self.submodules.lasmi_crossbar = lasmi_bus.LASMIxbar(
                [self.sdram_controller.lasmic], self.sdram_controller.nrowbits)

            bridge_if = self.get_native_sdram_if()
            if self.l2_size:
                l2_cache = wishbone.Cache(self.l2_size // 4,
                                          self._cpulevel_sdram_if_arbitrated,
                                          wishbone.Interface(bridge_if.dw))
                # XXX Vivado ->2015.1 workaround, Vivado is not able to map correctly our L2 cache.
                # Issue is reported to Xilinx and should be fixed in next releases (2015.2?).
                # Remove this workaround when fixed by Xilinx.
                from migen.build.xilinx.vivado import XilinxVivadoToolchain
                if isinstance(self.platform.toolchain, XilinxVivadoToolchain):
                    from migen.fhdl.simplify import FullMemoryWE
                    self.submodules.l2_cache = FullMemoryWE()(l2_cache)
                else:
                    self.submodules.l2_cache = l2_cache
                self.submodules.wishbone2lasmi = wishbone2lasmi.WB2LASMI(
                    self.l2_cache.slave, bridge_if)
            else:
                raise NotImplementedError
        else:
            raise ValueError("Incorrect SDRAM controller type specified")
        self.comb += self.sdram_controller.dfi.connect(self.dfii.slave)
Ejemplo n.º 7
0
    def register_sdram(self,
                       phy,
                       geom_settings,
                       timing_settings,
                       main_ram_size_limit=None,
                       **kwargs):
        assert not self._sdram_phy
        self._sdram_phy.append(
            phy)  # encapsulate in list to prevent CSR scanning

        # LiteDRAM core ----------------------------------------------------------------------------
        self.submodules.sdram = LiteDRAMCore(phy=phy,
                                             geom_settings=geom_settings,
                                             timing_settings=timing_settings,
                                             clk_freq=self.clk_freq,
                                             **kwargs)

        # LiteDRAM port ------------------------------------------------------------------------
        port = self.sdram.crossbar.get_port()
        port.data_width = 2**int(log2(
            port.data_width))  # Round to nearest power of 2

        # Main RAM size ------------------------------------------------------------------------
        main_ram_size = 2**(geom_settings.bankbits + geom_settings.rowbits +
                            geom_settings.colbits) * phy.settings.databits // 8
        if main_ram_size_limit is not None:
            main_ram_size = min(main_ram_size, main_ram_size_limit)

        # SoC [<--> L2 Cache] <--> LiteDRAM ----------------------------------------------------
        if self.cpu.name == "rocket":
            # Rocket has its own I/D L1 cache: connect directly to LiteDRAM, also bypassing MMIO/CSR wb bus:
            if port.data_width == self.cpu.mem_axi.data_width:
                # straightforward AXI link, no data_width conversion needed:
                self.submodules += LiteDRAMAXI2Native(
                    self.cpu.mem_axi,
                    port,
                    base_address=self.mem_map["main_ram"])
            else:
                # FIXME: replace WB data-width converter with native AXI converter!!!
                mem_wb = wishbone.Interface(
                    data_width=self.cpu.mem_axi.data_width,
                    adr_width=32 - log2_int(self.cpu.mem_axi.data_width // 8))
                # NOTE: AXI2Wishbone FSMs must be reset with the CPU!
                mem_a2w = ResetInserter()(AXI2Wishbone(self.cpu.mem_axi,
                                                       mem_wb,
                                                       base_address=0))
                self.comb += mem_a2w.reset.eq(ResetSignal() | self.cpu.reset)
                self.submodules += mem_a2w
                litedram_wb = wishbone.Interface(port.data_width)
                self.submodules += LiteDRAMWishbone2Native(
                    litedram_wb, port, base_address=self.mem_map["main_ram"])
                self.submodules += wishbone.Converter(mem_wb, litedram_wb)
            # Register main_ram region (so it will be added to generated/mem.h):
            self.add_memory_region("main_ram", self.mem_map["main_ram"],
                                   main_ram_size)
        elif self.with_wishbone:
            # Insert L2 cache inbetween Wishbone bus and LiteDRAM
            l2_size = max(self.l2_size,
                          int(2 * port.data_width /
                              8))  # L2 has a minimal size, use it if lower
            l2_size = 2**int(log2(l2_size))  # Round to nearest power of 2

            # SoC <--> L2 Cache Wishbone interface -------------------------------------------------
            wb_sdram = wishbone.Interface()
            self.add_wb_sdram_if(wb_sdram)
            self.register_mem("main_ram", self.mem_map["main_ram"], wb_sdram,
                              main_ram_size)

            # L2 Cache -----------------------------------------------------------------------------
            l2_cache = wishbone.Cache(l2_size // 4, self._wb_sdram,
                                      wishbone.Interface(port.data_width))
            # XXX Vivado ->2018.2 workaround, Vivado is not able to map correctly our L2 cache.
            # Issue is reported to Xilinx, Remove this if ever fixed by Xilinx...
            from litex.build.xilinx.vivado import XilinxVivadoToolchain
            if isinstance(self.platform.toolchain, XilinxVivadoToolchain):
                from migen.fhdl.simplify import FullMemoryWE
                self.submodules.l2_cache = FullMemoryWE()(l2_cache)
            else:
                self.submodules.l2_cache = l2_cache
            self.config["L2_SIZE"] = l2_size

            # L2 Cache <--> LiteDRAM bridge --------------------------------------------------------
            self.submodules.wishbone_bridge = LiteDRAMWishbone2Native(
                self.l2_cache.slave, port)
Ejemplo n.º 8
0
    def register_sdram_phy(self, phy):
        if self._sdram_phy_registered:
            raise FinalizeError
        self._sdram_phy_registered = True

        # Core
        self.submodules.sdram = SDRAMCore(phy, phy.module.geom_settings,
                                          phy.module.timing_settings,
                                          self.sdram_controller_settings)

        dfi_databits_divisor = 1 if phy.settings.memtype == "SDR" else 2
        sdram_width = phy.settings.dfi_databits // dfi_databits_divisor
        main_ram_size = 2**(
            phy.module.geom_settings.bankbits +
            phy.module.geom_settings.rowbits +
            phy.module.geom_settings.colbits) * sdram_width // 8
        # XXX: Limit main_ram_size to 256MB, we should modify mem_map to allow larger memories.
        main_ram_size = min(main_ram_size, 256 * 1024 * 1024)
        l2_size = self.sdram_controller_settings.l2_size
        if l2_size:
            self.add_constant("L2_SIZE", l2_size)

        # add a Wishbone interface to the DRAM
        wb_sdram = wishbone.Interface()
        self.add_wb_sdram_if(wb_sdram)
        self.register_mem("main_ram", self.mem_map["main_ram"], wb_sdram,
                          main_ram_size)

        # LASMICON frontend
        if isinstance(self.sdram_controller_settings, LASMIconSettings):
            if self.sdram_controller_settings.with_bandwidth:
                self.sdram.controller.multiplexer.add_bandwidth()

            if self.sdram_controller_settings.with_memtest:
                self.submodules.memtest_w = memtest.MemtestWriter(
                    self.sdram.crossbar.get_master())
                self.submodules.memtest_r = memtest.MemtestReader(
                    self.sdram.crossbar.get_master())

            if l2_size:
                lasmim = self.sdram.crossbar.get_master()
                l2_cache = wishbone.Cache(l2_size // 4, self._wb_sdram,
                                          wishbone.Interface(lasmim.dw))
                # XXX Vivado ->2015.1 workaround, Vivado is not able to map correctly our L2 cache.
                # Issue is reported to Xilinx and should be fixed in next releases (2015.2?).
                # Remove this workaround when fixed by Xilinx.
                from mibuild.xilinx.vivado import XilinxVivadoToolchain
                if isinstance(self.platform.toolchain, XilinxVivadoToolchain):
                    from migen.fhdl.simplify import FullMemoryWE
                    self.submodules.l2_cache = FullMemoryWE()(l2_cache)
                else:
                    self.submodules.l2_cache = l2_cache
                self.submodules.wishbone2lasmi = wishbone2lasmi.WB2LASMI(
                    self.l2_cache.slave, lasmim)

        # MINICON frontend
        elif isinstance(self.sdram_controller_settings, MiniconSettings):
            if l2_size:
                l2_cache = wishbone.Cache(l2_size // 4, self._wb_sdram,
                                          self.sdram.controller.bus)
                # XXX Vivado ->2015.1 workaround, Vivado is not able to map correctly our L2 cache.
                # Issue is reported to Xilinx and should be fixed in next releases (2015.2?).
                # Remove this workaround when fixed by Xilinx.
                from mibuild.xilinx.vivado import XilinxVivadoToolchain
                if isinstance(self.platform.toolchain, XilinxVivadoToolchain):
                    from migen.fhdl.simplify import FullMemoryWE
                    self.submodules.l2_cache = FullMemoryWE()(l2_cache)
                else:
                    self.submodules.l2_cache = l2_cache
            else:
                self.submodules.converter = wishbone.Converter(
                    self._wb_sdram, self.sdram.controller.bus)
Ejemplo n.º 9
0
    def build(self,
              platform,
              fragment,
              build_dir="build",
              build_name="top",
              run=True,
              **kwargs):

        self.ifacewriter.set_build_params(platform, build_name)

        # Create Build Directory.
        cwd = os.getcwd()
        os.makedirs(build_dir, exist_ok=True)
        os.chdir(build_dir)

        # Apply FullMemoryWE on Design (Efiniy does not infer memories correctly otherwise).
        FullMemoryWE()(fragment)

        # Finalize Design.
        if not isinstance(fragment, _Fragment):
            fragment = fragment.get_fragment()
        platform.finalize(fragment)

        # Generate Design.
        v_output = platform.get_verilog(fragment, name=build_name, **kwargs)
        v_output.write(f"{build_name}.v")
        platform.add_source(f"{build_name}.v")

        # Add Include Paths.
        if platform.verilog_include_paths:
            self.options["includ_path"] = "{" + ";".join(
                platform.verilog_include_paths) + "}"

        os.environ["EFXPT_HOME"] = self.efinity_path + "/pt"

        # Generate Design Timing Constraints file (.sdc)
        named_sc, named_pc = platform.resolve_signals(v_output.ns)
        _build_sdc(
            clocks=self.clocks,
            false_paths=self.false_paths,
            vns=v_output.ns,
            named_sc=named_sc,
            build_name=build_name,
            additional_sdc_commands=self.additional_sdc_commands,
        )

        # Generate project file (.xml)
        _build_xml(family=platform.family,
                   device=platform.device,
                   timing_model=platform.timing_model,
                   build_name=build_name,
                   sources=platform.sources)

        # Generate peripheral file (.peri.xml)
        _build_peri(efinity_path=self.efinity_path,
                    build_name=build_name,
                    device=platform.device,
                    named_sc=named_sc,
                    named_pc=named_pc,
                    fragment=fragment,
                    platform=platform,
                    additional_iface_commands=self.additional_iface_commands,
                    excluded_ios=self.excluded_ios)

        # Some IO blocks don't have Python API so we need to configure them
        # directly in the peri.xml file
        # We also need to configure the bank voltage here
        if self.ifacewriter.xml_blocks or platform.iobank_info:
            self.ifacewriter.generate_xml_blocks()

        # Run
        if run:
            # Synthesis/Mapping.
            r = tools.subprocess_call_filtered([
                self.efinity_path + "/bin/efx_map", "--project",
                f"{build_name}", "--root", f"{build_name}",
                "--write-efx-verilog", f"outflow/{build_name}.map.v",
                "--write-premap-module", f"outflow/{build_name}.elab.vdb",
                "--binary-db", f"{build_name}.vdb", "--family",
                platform.family, "--device", platform.device, "--mode",
                "speed", "--max_ram", "-1", "--max_mult", "-1",
                "--infer-clk-enable", "3", "--infer-sync-set-reset", "1",
                "--fanout-limit", "0", "--bram_output_regs_packing", "1",
                "--retiming", "1", "--seq_opt", "1",
                "--blast_const_operand_adders", "1",
                "--mult_input_regs_packing", "1", "--mult_output_regs_packing",
                "1", "--veri_option",
                "verilog_mode=verilog_2k,vhdl_mode=vhdl_2008", "--work-dir",
                "work_syn", "--output-dir", "outflow", "--project-xml",
                f"{build_name}.xml", "--I", "./"
            ], common.colors)
            if r != 0:
                raise OSError("Error occurred during efx_map execution.")

            # Place and Route.
            r = tools.subprocess_call_filtered([
                self.efinity_path + "/bin/python3",
                self.efinity_path + "/scripts/efx_run_pt.py", f"{build_name}",
                platform.family, platform.device
            ], common.colors)
            if r != 0:
                raise OSError("Error occurred during efx_run_pt execution.")

            r = tools.subprocess_call_filtered([
                self.efinity_path + "/bin/efx_pnr", "--circuit",
                f"{build_name}", "--family", platform.family, "--device",
                platform.device, "--operating_conditions",
                platform.timing_model, "--pack", "--place", "--route",
                "--vdb_file", f"work_syn/{build_name}.vdb", "--use_vdb_file",
                "on", "--place_file", f"outflow/{build_name}.place",
                "--route_file", f"outflow/{build_name}.route", "--sdc_file",
                f"{build_name}.sdc", "--sync_file",
                f"outflow/{build_name}.interface.csv", "--seed", "1",
                "--work_dir", "work_pnr", "--output_dir", "outflow",
                "--timing_analysis", "on", "--load_delay_matrix"
            ], common.colors)
            if r != 0:
                raise OSError("Error occurred during efx_pnr execution.")

            # Bitstream.
            r = tools.subprocess_call_filtered([
                self.efinity_path + "/bin/efx_pgm", "--source",
                f"work_pnr/{build_name}.lbf", "--dest", f"{build_name}.hex",
                "--device", platform.device, "--family", platform.family,
                "--periph", f"outflow/{build_name}.lpf",
                "--oscillator_clock_divider", "DIV8", "--spi_low_power_mode",
                "off", "--io_weak_pullup", "on", "--enable_roms", "on",
                "--mode", "active", "--width", "1", "--enable_crc_check", "on"
            ], common.colors)
            if r != 0:
                raise OSError("Error occurred during efx_pgm execution.")

        os.chdir(cwd)

        return v_output.ns