Пример #1
0
class L1SharedCache(L1Cache):
    """Simple L1 instruction cache with default values"""

    # Default parameters
    size = "256kB"
    assoc = 1
    tag_latency = 1
    data_latency = 1
    response_latency = 20
    mshrs = 20
    tgts_per_mshr = 12

    SimpleOpts.add_option('--l1s_size',
                          help="L1 shared cache size. Default: %s" % size)
    SimpleOpts.add_option('--l1s_assoc',
                          help="L1s shared cache assoc. Default: %s" % assoc)

    def __init__(self, opts=None):
        super(L1SharedCache, self).__init__(opts)
        if not opts:
            return
        if opts.l1i_size:
            self.size = opts.l1i_size
        if opts.l1i_assoc:
            self.assoc = opts.l1i_assoc

    def connectCPUSideBus(self, bus):
        self.cpu_side = bus.master

    def connectMemSideBus(self, bus):
        self.mem_side = bus.slave
Пример #2
0
class L1DCache(L1Cache):
    """Simple L1 data cache with default values"""

    # Default parameters
    size = "256kB"
    assoc = 1
    tag_latency = 1
    data_latency = 1
    response_latency = 20
    mshrs = 20
    tgts_per_mshr = 12

    SimpleOpts.add_option('--l1d_size',
                          help="L1 data cache size. Default: %s" % size)
    SimpleOpts.add_option('--l1d_assoc',
                          help="L1 data cache assoc. Default: %s" % assoc)

    def __init__(self, opts=None):
        super(L1DCache, self).__init__(opts)
        if not opts:
            return
        if opts.l1d_size:
            self.size = opts.l1d_size
        if opts.l1d_assoc:
            self.assoc = opts.l1d_assoc

    def connectCPU(self, cpu):
        """Connect this cache's port to a CPU dcache port"""
        self.cpu_side = cpu.dcache_port
Пример #3
0
class L2Cache(Cache):
    """Simple L2 Cache with default values"""

    # Default parameters
    size = '128kB'
    assoc = 8
    hit_latency = 12
    response_latency = 12
    mshrs = 20
    tgts_per_mshr = 12

    SimpleOpts.add_option('--l2_size',
                          help="L2 cache size. Default: %s" % size)

    def __init__(self, opts=None):
        super(L2Cache, self).__init__()
        if not opts or not opts.l2_size:
            return
        self.size = opts.l2_size

    def connectCPUSideBus(self, bus):
        self.cpu_side = bus.master

    def connectMemSideBus(self, bus):
        self.mem_side = bus.slave
Пример #4
0
class L2Cache(Cache):
    assoc = 8
    tag_latency = 20
    data_latency = 20
    response_latency = 80
    mshrs = 20
    tgts_per_mshr = 12
    write_buffers = 8
    size = '256kB'
    SimpleOpts.add_option('--l2_size',
                          help="L2 cache size. Default: %s" % size)

    # 	SimpleOpts.add_option('--l2_assoc',
    # 		help="L2 associativity value. ")
    def __init__(self, options=None):
        super(L2Cache, self).__init__()
        if not options or not options.l2_size:
            return
        self.size = options.l2_size

    def connectBus(self, bus):
        self.mem_side = bus.slave

    def connectCPU(self, cpu):
        self.cpu_side = cpu.master
Пример #5
0
class L1_ICache(L1Cache):
    is_read_only = True
    writeback_clean = True
    size = '16kB'
    SimpleOpts.add_option('--l1i_size',
                          help="L1 instruction cache size. Default: %s" % size)
    SimpleOpts.add_option('--l1i_assoc', help="L1 associativity value. ")

    def __init__(self, opts=None):
        super(L1_ICache, self).__init__(opts)
        if opts.l1i_size:
            self.size = opts.l1i_size
        if opts.l1i_assoc:
            self.assoc = opts.l1i_assoc

    def connectCPU(self, cpu):
        self.cpu_side = cpu.icache_port
Пример #6
0
class L1_DCache(L1Cache):
    size = '64kB'
    SimpleOpts.add_option('--l1d_size',
                          help="L1 data cache size. Default: %s" % size)
    SimpleOpts.add_option('--l1d_assoc', help="L1 associativity value. ")

    def __init__(self, opts=None):
        super(L1_DCache, self).__init__(opts)
        if not opts or not opts.l1d_size:
            return
        self.size = opts.l1d_size


# 		if opts.l1d_assoc:
# 			self.assoc = opts.l1d_assoc

    def connectCPU(self, cpu):
        self.cpu_side = cpu.dcache_port
Пример #7
0
class L1DCache(L1Cache):
    """Simple L1 data cache with default values"""

    # Set the default size
    size = '32kB'

    SimpleOpts.add_option('--l1d_size',
                          help="L1 data cache size. Default: %s" % size)

    def __init__(self, opts=None):
        super(L1DCache, self).__init__(opts)
        if not opts or not opts.l1d_size:
            return
        self.size = opts.l1d_size

    def connectCPU(self, cpu):
        """Connect this cache's port to a CPU dcache port"""
        self.cpu_side = cpu.dcache_port
Пример #8
0
class L1ICache(L1Cache):
    """Simple L1 instruction cache with default values"""

    # Set the default size
    size = '16kB'

    SimpleOpts.add_option('--l1i_size',
                          help="L1 instruction cache size. Default: %s" % size)

    def __init__(self, opts=None):
        super(L1ICache, self).__init__(opts)
        # if not opts or not opts.l1i_size:
        #     return
        # self.size = opts.l1i_size

    def connectCPU(self, cpu):
        """Connect this cache's port to a CPU icache port"""
        self.cpu_side = cpu.icache_port
Пример #9
0
import m5
from m5.objects import *
import os
from Caches import *
from common import SimpleOpts
gem5_path = os.environ["GEM5"]

SimpleOpts.add_option('--clock', help="clock frequency")
(opts, args) = SimpleOpts.parse_args()
"""create system obj"""
system = System()
"""create clock domain"""
system.clk_domain = SrcClockDomain()
if opts.clock:
    print "setting clock frequency as ", opts.clock
    system.clk_domain.clock = opts.clock
else:
    system.clk_domain.clock = '1GHz'
system.clk_domain.voltage_domain = VoltageDomain()
"""memory simulation mode"""
system.mem_mode = 'timing'
system.mem_ranges = [AddrRange('512MB')]
"""create CPU"""
system.cpu = TimingSimpleCPU()
"""create memory bus"""
system.membus = SystemXBar()
system.l2bus = L2XBar()
"""create cache"""
system.cpu.icache = L1_ICache(opts)
system.cpu.dcache = L1_DCache(opts)
system.cpu.icache.connectCPU(system.cpu)
Пример #10
0
class MySystem(LinuxX86System):

    SimpleOpts.add_option(
        "--no_host_parallel",
        default=False,
        action="store_true",
        help="Do NOT run gem5 on multiple host threads (kvm only)")

    SimpleOpts.add_option("--disk-image",
                          default='/proj/radl_tools/fs/ubuntu-18.4.img',
                          help="The boot disk image to mount (/dev/hda)")

    # SimpleOpts.add_option("--second-disk",
    #     default='/proj/radl_tools/fs/linux-bigswap2.img',
    #     help="The second disk image to mount (/dev/hdb)")

    SimpleOpts.add_option("--kernel",
                          default='/proj/radl_tools/fs/vmlinux',
                          help="Linux kernel to boot")

    def __init__(self, opts):
        super(MySystem, self).__init__()
        self._opts = opts

        # Override defaults from common options / check for problems
        self._opts.network = "garnet2.0"
        self._opts.cpu_clock = '4GHz'
        self._opts.ruby_clock = '2GHz'

        # Set up the clock domain and the voltage domain
        self.clk_domain = SrcClockDomain()
        self.clk_domain.clock = self._opts.cpu_clock
        self.clk_domain.voltage_domain = VoltageDomain()

        # Setup a single memory range for X86
        self.setupMemoryRange()

        # Setup all the system devices
        self.initFS()

        # Set the boot disk image and add a swap disk for large input size
        # benchmarks.
        #self.setDiskImages(opts.disk_image, opts.second_disk)
        self.setDiskImage(opts.disk_image)

        # Use our self built kernel with no ACPI and 9p support.
        self.kernel = opts.kernel

        # Options specified on the kernel command line
        boot_options = [
            'earlyprintk=ttyS0', 'console=ttyS0,9600', 'lpj=7999923',
            'root=/dev/sda1', 'drm_kms_helper.fbdev_emulation=0'
        ]
        self.boot_osflags = ' '.join(boot_options)

        # Create the CPUs for our system.
        self.createCPU()

        # Create the GPU
        if self._opts.dgpu or self._opts.apu:
            self.createGPU()

        # Create the memory heirarchy for the system.
        self.createMemoryHierarchy()

        # Set up the interrupt controllers for the system (x86 specific)
        self.setupInterrupts()

    def setupMemoryRange(self):
        mem_size = self._opts.mem_size
        excess_mem_size = \
                convert.toMemorySize(mem_size) - convert.toMemorySize('3GB')
        if excess_mem_size <= 0:
            self.mem_ranges = [AddrRange(0, size=mem_size)]
        else:
            print("Physical memory size specified is %s which is greater than"\
                  " 3GB.  Twice the number of memory controllers would be "\
                  "created."  % (mem_size))

            self.mem_ranges = [
                AddrRange(0, size=Addr('3GB')),
                AddrRange(Addr('4GB'), size=excess_mem_size)
            ]

        if self._opts.dgpu or self._opts.apu:
            self.shadow_rom_ranges = [AddrRange(0xc0000, size=Addr('128kB'))]

    def createGPU(self):
        # shader is the GPU
        self.shader = Shader(
            n_wf=self._opts.wfs_per_simd,
            clk_domain=SrcClockDomain(
                clock=self._opts.gpu_clock,
                voltage_domain=VoltageDomain(voltage=self._opts.gpu_voltage)))

        # VIPER GPU protocol implements release consistency at GPU side. So,
        # we make their writes visible to the global memory and should read
        # from global memory during kernal boundary. The pipeline initiates
        # (or do not initiate) the acquire/release operation depending on
        # these impl_kern_launch_rel and impl_kern_end_rel flags. The flag=true
        # means pipeline initiates a acquire/release operation at kernel
        # launch/end. VIPER protocol is write-through based, and thus only
        # impl_kern_launch_acq needs to set.
        if (buildEnv['PROTOCOL'] == 'GPU_VIPER'):
            self.shader.impl_kern_launch_acq = True
            self.shader.impl_kern_end_rel = False
        else:
            self.shader.impl_kern_launch_acq = True
            self.shader.impl_kern_end_rel = True

        # List of compute units; one GPU can have multiple compute units
        compute_units = []

        for i in xrange(self._opts.num_compute_units):
            compute_units.append(
                     ComputeUnit(cu_id = i, perLaneTLB = False,
                                 num_SIMDs = self._opts.simds_per_cu,
                                 wf_size = self._opts.wf_size,
                                 spbypass_pipe_length = \
                                 self._opts.sp_bypass_path_length,
                                 dpbypass_pipe_length = \
                                 self._opts.dp_bypass_path_length,
                                 issue_period = self._opts.issue_period,
                                 coalescer_to_vrf_bus_width = \
                                 self._opts.glbmem_rd_bus_width,
                                 vrf_to_coalescer_bus_width = \
                                 self._opts.glbmem_wr_bus_width,
                                 num_global_mem_pipes = \
                                 self._opts.glb_mem_pipes_per_cu,
                                 num_shared_mem_pipes = \
                                 self._opts.shr_mem_pipes_per_cu,
                                 n_wf = self._opts.wfs_per_simd,
                                 execPolicy = self._opts.CUExecPolicy,
                                 debugSegFault = self._opts.SegFaultDebug,
                                 functionalTLB = self._opts.FunctionalTLB,
                                 localMemBarrier = self._opts.LocalMemBarrier,
                                 countPages = self._opts.countPages,
                                 localDataStore = \
                                 LdsState(banks = self._opts.numLdsBanks,
                                          bankConflictPenalty = \
                                          self._opts.ldsBankConflictPenalty)))

            wavefronts = []
            vrfs = []
            vrf_pool_mgrs = []
            srfs = []
            srf_pool_mgrs = []
            for j in xrange(self._opts.simds_per_cu):
                for k in xrange(self.shader.n_wf):
                    wavefronts.append(
                        Wavefront(simdId=j,
                                  wf_slot_id=k,
                                  wf_size=self._opts.wf_size))
                vrf_pool_mgrs.append(
                                 SimplePoolManager(pool_size = \
                                                   self._opts.vreg_file_size,
                                                   min_alloc = \
                                                   self._opts.vreg_min_alloc))

                vrfs.append(
                    VectorRegisterFile(simd_id=j,
                                       wf_size=self._opts.wf_size,
                                       num_regs=self._opts.vreg_file_size))

                srf_pool_mgrs.append(
                                 SimplePoolManager(pool_size = \
                                                   self._opts.sreg_file_size,
                                                   min_alloc = \
                                                   self._opts.vreg_min_alloc))
                srfs.append(
                    ScalarRegisterFile(simd_id=j,
                                       wf_size=self._opts.wf_size,
                                       num_regs=self._opts.sreg_file_size))

            compute_units[-1].wavefronts = wavefronts
            compute_units[-1].vector_register_file = vrfs
            compute_units[-1].scalar_register_file = srfs
            compute_units[-1].register_manager = \
                RegisterManager(policy=self._opts.registerManagerPolicy,
                                vrf_pool_managers=vrf_pool_mgrs,
                                srf_pool_managers=srf_pool_mgrs)
            if self._opts.TLB_prefetch:
                compute_units[-1].prefetch_depth = self._opts.TLB_prefetch
                compute_units[-1].prefetch_prev_type = self._opts.pf_type

            # attach the LDS and the CU to the bus (actually a Bridge)
            compute_units[-1].ldsPort = compute_units[-1].ldsBus.slave
            compute_units[-1].ldsBus.master = \
                compute_units[-1].localDataStore.cuPort

        self.shader.CUs = compute_units

        self.shader.cpu_pointer = self.cpu[0]

    # Creates TimingSimpleCPU by default
    def createCPU(self):
        self.warmupCpu = [
            TimingSimpleCPU(cpu_id=i, switched_out=True)
            for i in range(self._opts.num_cpus)
        ]
        map(lambda c: c.createThreads(), self.warmupCpu)
        if self._opts.cpu_type == "TimingSimpleCPU":
            print("Running with Timing Simple CPU")
            self.mem_mode = 'timing'
            self.cpu = [
                TimingSimpleCPU(cpu_id=i, switched_out=False)
                for i in range(self._opts.num_cpus)
            ]
            map(lambda c: c.createThreads(), self.cpu)
        elif self._opts.cpu_type == "AtomicSimpleCPU":
            print("Running with Atomic Simple CPU")
            if self._opts.ruby:
                self.mem_mode = 'atomic_noncaching'
            else:
                self.mem_mode = 'atomic'
            self.cpu = [
                AtomicSimpleCPU(cpu_id=i, switched_out=False)
                for i in range(self._opts.num_cpus)
            ]
            map(lambda c: c.createThreads(), self.cpu)
        elif self._opts.cpu_type == "DerivO3CPU":
            print("Running with O3 CPU")
            self.mem_mode = 'timing'
            self.cpu = [
                DerivO3CPU(cpu_id=i, switched_out=False)
                for i in range(self._opts.num_cpus)
            ]
            map(lambda c: c.createThreads(), self.cpu)
        elif self._opts.cpu_type == "X86KvmCPU":
            print("Running with KVM to start")
            # Note KVM needs a VM and atomic_noncaching
            self.mem_mode = 'atomic_noncaching'
            self.cpu = [
                X86KvmCPU(cpu_id=i, hostFreq="3.6GHz")
                for i in range(self._opts.num_cpus)
            ]
            self.kvm_vm = KvmVM()
            map(lambda c: c.createThreads(), self.cpu)
        else:
            panic("Bad CPU type!")

    def switchCpus(self, old, new):
        assert (new[0].switchedOut())
        m5.switchCpus(self, zip(old, new))

    def setDiskImages(self, img_path_1, img_path_2):
        disk0 = CowDisk(img_path_1)
        disk2 = CowDisk(img_path_2)
        self.pc.south_bridge.ide.disks = [disk0, disk2]

    def setDiskImage(self, img_path_1):
        disk0 = CowDisk(img_path_1)
        self.pc.south_bridge.ide.disks = [disk0]

    def createDMADevices(self):
        if self._opts.dgpu or self._opts.apu:
            # Set up the HSA packet processor
            hsapp_gpu_map_paddr = int(Addr(self._opts.mem_size))
            gpu_hsapp = HSAPacketProcessor(
                pioAddr=hsapp_gpu_map_paddr,
                numHWQueues=self._opts.num_hw_queues)

            dispatcher = GPUDispatcher()
            gpu_cmd_proc = GPUCommandProcessor(hsapp=gpu_hsapp,
                                               dispatcher=dispatcher)

            self.shader.dispatcher = dispatcher
            self.shader.gpu_cmd_proc = gpu_cmd_proc
            self._dma_ports.append(gpu_hsapp)
            self._dma_ports.append(gpu_cmd_proc)

    def createMemoryHierarchy(self):
        self.createDMADevices()

        # VIPER requires the number of instruction and scalar caches
        if (buildEnv['PROTOCOL'] == 'GPU_VIPER'):
            # Currently, the sqc (I-Cache of GPU) is shared by
            # multiple compute units(CUs). The protocol works just fine
            # even if sqc is not shared. Overriding this option here
            # so that the user need not explicitly set this (assuming
            # sharing sqc is the common usage)
            self._opts.num_sqc = \
                int(math.ceil(float(self._opts.num_compute_units)\
                                    / self._opts.cu_per_sqc))
            self._opts.num_scalar_cache = \
                int(math.ceil(float(self._opts.num_compute_units)\
                                        / self._opts.cu_per_scalar_cache))

        Ruby.create_system(self._opts, True, self, self.iobus, self._dma_ports,
                           None)

        # don't connect ide as it gets connected in attachIO call
        for dma_port in self._dma_ports[1:]:
            dma_port.pio = self.iobus.master

        self.ruby.clk_domain = SrcClockDomain()
        self.ruby.clk_domain.clock = self._opts.ruby_clock
        self.ruby.clk_domain.voltage_domain = VoltageDomain()

        for i, cpu in enumerate(self.cpu):
            cpu.icache_port = self.ruby._cpu_ports[i].slave
            cpu.dcache_port = self.ruby._cpu_ports[i].slave

            cpu.itb.walker.port = self.ruby._cpu_ports[i].slave
            cpu.dtb.walker.port = self.ruby._cpu_ports[i].slave

        if self._opts.dgpu or self._opts.apu:
            gpu_port_idx = len(self.ruby._cpu_ports) \
                           - self._opts.num_compute_units \
                           - self._opts.num_sqc \
                           - self._opts.num_scalar_cache

            for i in xrange(self._opts.num_compute_units):
                for j in xrange(self._opts.wf_size):
                    self.shader.CUs[i].memory_port[j] = \
                        self.ruby._cpu_ports[gpu_port_idx].slave[j]
                gpu_port_idx += 1

            for i in xrange(self._opts.num_compute_units):
                if i > 0 and not i % self._opts.cu_per_sqc:
                    gpu_port_idx += 1
                self.shader.CUs[i].sqc_port = \
                    self.ruby._cpu_ports[gpu_port_idx].slave

            gpu_port_idx += 1

            for i in xrange(self._opts.num_compute_units):
                if i > 0 and not i % self._opts.cu_per_scalar_cache:
                    gpu_port_idx += 1
                self.shader.CUs[i].scalar_port = \
                    self.ruby._cpu_ports[gpu_port_idx].slave

    def setupInterrupts(self):
        for i, cpu in enumerate(self.cpu):
            # create the interrupt controller CPU and connect to RubyPort
            cpu.createInterruptController()

            # For x86 only, connect interrupts to the memory
            # Note: these are directly connected to RubyPort and
            #       not cached
            cpu.interrupts[0].pio = self.ruby._cpu_ports[i].master
            cpu.interrupts[0].int_master = self.ruby._cpu_ports[i].slave
            cpu.interrupts[0].int_slave = self.ruby._cpu_ports[i].master

    def initFS(self):
        self.pc = Pc()

        # North Bridge
        self.iobus = IOXBar()

        # add the ide to the list of dma devices that later need to attach to
        # dma controllers
        if (buildEnv['PROTOCOL'] == 'GPU_VIPER'):
            # VIPER expects the port itself while others use the dma object
            self._dma_ports = [self.pc.south_bridge.ide]
            self.pc.attachIO(self.iobus, [p.dma for p in self._dma_ports])
        else:
            self._dma_ports = [self.pc.south_bridge.ide.dma]
            self.pc.attachIO(self.iobus, [port for port in self._dma_ports])

        if self._opts.dgpu or self._opts.apu:
            # add GPU to southbridge
            self.pc.south_bridge.attachGPU(
                self.iobus, [port.dma for port in self._dma_ports])

        self.intrctrl = IntrControl()

        ###############################################

        # Add in a Bios information structure.
        self.smbios_table.structures = [X86SMBiosBiosInformation()]

        # Set up the Intel MP table
        base_entries = []
        ext_entries = []

        for i in range(self._opts.num_cpus):
            bp = X86IntelMPProcessor(local_apic_id=i,
                                     local_apic_version=0x14,
                                     enable=True,
                                     bootstrap=(i == 0))
            base_entries.append(bp)

        io_apic = X86IntelMPIOAPIC(id=self._opts.num_cpus,
                                   version=0x11,
                                   enable=True,
                                   address=0xfec00000)
        self.pc.south_bridge.io_apic.apic_id = io_apic.id
        base_entries.append(io_apic)

        pci_bus = X86IntelMPBus(bus_id=0, bus_type='PCI   ')
        base_entries.append(pci_bus)
        isa_bus = X86IntelMPBus(bus_id=1, bus_type='ISA   ')
        base_entries.append(isa_bus)
        connect_busses = X86IntelMPBusHierarchy(bus_id=1,
                                                subtractive_decode=True,
                                                parent_bus=0)
        ext_entries.append(connect_busses)
        pci_dev4_inta = X86IntelMPIOIntAssignment(interrupt_type='INT',
                                                  polarity='ConformPolarity',
                                                  trigger='ConformTrigger',
                                                  source_bus_id=0,
                                                  source_bus_irq=0 + (4 << 2),
                                                  dest_io_apic_id=io_apic.id,
                                                  dest_io_apic_intin=16)
        base_entries.append(pci_dev4_inta)

        def assignISAInt(irq, apicPin):
            assign_8259_to_apic = X86IntelMPIOIntAssignment(
                interrupt_type='ExtInt',
                polarity='ConformPolarity',
                trigger='ConformTrigger',
                source_bus_id=1,
                source_bus_irq=irq,
                dest_io_apic_id=io_apic.id,
                dest_io_apic_intin=0)
            base_entries.append(assign_8259_to_apic)
            assign_to_apic = X86IntelMPIOIntAssignment(
                interrupt_type='INT',
                polarity='ConformPolarity',
                trigger='ConformTrigger',
                source_bus_id=1,
                source_bus_irq=irq,
                dest_io_apic_id=io_apic.id,
                dest_io_apic_intin=apicPin)
            base_entries.append(assign_to_apic)

        assignISAInt(0, 2)
        assignISAInt(1, 1)
        for i in range(3, 15):
            assignISAInt(i, i)
        self.intel_mp_table.base_entries = base_entries
        self.intel_mp_table.ext_entries = ext_entries

        entries = \
           [
            # Mark the first megabyte of memory as reserved
            X86E820Entry(addr = 0, size = '639kB', range_type = 1),
            X86E820Entry(addr = 0x9fc00, size = '385kB', range_type = 2),
            # Mark the rest of physical memory as available
            X86E820Entry(addr = 0x100000,
                    size = '%dB' % (self.mem_ranges[0].size() - 0x100000),
                    range_type = 1),
            ]
        # Mark [mem_size, 3GB) as reserved if memory less than 3GB, which
        # force IO devices to be mapped to [0xC0000000, 0xFFFF0000). Requests
        # to this specific range can pass though bridge to iobus.
        entries.append(
            X86E820Entry(addr=self.mem_ranges[0].size(),
                         size='%dB' % (0xC0000000 - self.mem_ranges[0].size()),
                         range_type=2))

        # Reserve the last 16kB of the 32-bit address space for m5ops
        entries.append(X86E820Entry(addr=0xFFFF0000, size='64kB',
                                    range_type=2))

        # Add the rest of memory. This is where all the actual data is
        entries.append(
            X86E820Entry(addr=self.mem_ranges[-1].start,
                         size='%dB' % (self.mem_ranges[-1].size()),
                         range_type=1))

        self.e820_table.entries = entries
Пример #11
0
# import the SimpleOpts module
from common import SimpleOpts

# get ISA for the default binary to run. This is mostly for simple testing
isa = str(m5.defines.buildEnv['TARGET_ISA']).lower()

# Default to running 'hello', use the compiled ISA to find the binary
# grab the specific path to the binary
thispath = os.path.dirname(os.path.realpath(__file__))
default_binary = os.path.join(thispath, '../../../',
                              'tests/test-progs/hello/bin/', isa,
                              'linux/hello')

# Binary to execute
SimpleOpts.add_option("binary", nargs='?', default=default_binary)

# Finalize the arguments and grab the args so we can pass it on to our objects
args = SimpleOpts.parse_args()

# create the system we are going to simulate
system = System()

# Set the clock fequency of the system (and all of its children)
system.clk_domain = SrcClockDomain()
system.clk_domain.clock = '1GHz'
system.clk_domain.voltage_domain = VoltageDomain()

# Set up the system
system.mem_mode = 'timing'  # Use timing accesses
system.mem_ranges = [AddrRange('512MB')]  # Create an address range
Пример #12
0
#
# Authors: Jason Lowe-Power

import sys

import m5
from m5.objects import *
from m5.util import addToPath

addToPath('../')  # For the next line...
from common import SimpleOpts

from simple_full_system import MySystem

SimpleOpts.add_option("--script",
                      default='',
                      help="Script to execute in the simulated system")

if __name__ == "__m5_main__":
    (opts, args) = SimpleOpts.parse_args()

    # create the system we are going to simulate
    system = MySystem(opts)

    # Read in the script file passed in via an option.
    # This file gets read and executed by the simulated system after boot.
    # Note: The disk image needs to be configured to do this.
    system.readfile = opts.script

    # set up the root SimObject and start the simulation
    root = Root(full_system=True, system=system)
Пример #13
0
# Add the common scripts to our path
m5.util.addToPath('../../')

# import the caches which we made
from caches import *

# import the SimpleOpts module
from common import SimpleOpts

# set default args
cpu_2006_base_dir = '/speccpu2006-clean/benchspec/CPU2006/'
default_max_insts = 100000000 # 100 million

# Set the usage message to display
SimpleOpts.add_option('--maxinsts',
        help='Max instructions to run. Default: %s' % default_max_insts)
SimpleOpts.add_option('--rl_prefetcher',
                      help='Which RL prefetcher to use')
SimpleOpts.add_option('--reward_type',
                      help='Type of rewards to use with the RL prefetcher')

SimpleOpts.set_usage('usage: %prog [--maxinsts number] [--rl_prefetcher string] [--reward_type string] spec_program')

# Finalize the arguments and grab the opts so we can pass it on to our objects
(opts, args) = SimpleOpts.parse_args()

# Check if there was a binary passed in via the command line and error if
# there are too many arguments
if len(args) == 1:
    spec_program = args[0]
else:
Пример #14
0
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Lowe-Power

#from system import MySystem

import m5
from m5.util import addToPath

addToPath('../')
from common import SimpleOpts

SimpleOpts.add_option("--script",
                      default='',
                      help="Script to execute in the simulated system")

SimpleOpts.add_option(
    "--no_host_parallel",
    default=False,
    action="store_true",
    help="Do NOT run gem5 on multiple host threads (kvm only)")

SimpleOpts.add_option("--num_cpus",
                      default=2,
                      type="int",
                      help="Number of CPUs in the system")

SimpleOpts.add_option("--second_disk",
                      default='',
Пример #15
0
# import all of the SimObjects
from m5.objects import *

# Add the common scripts to our path
m5.util.addToPath('../../')

# import the caches which we made
from caches import *
from bp import *

# import the SimpleOpts module
from common import SimpleOpts

# Set the usage message to display
SimpleOpts.set_usage("usage: %prog [options] <binary to execute>")
SimpleOpts.add_option('--mult', type='int')
SimpleOpts.add_option('--pred')

# Finalize the arguments and grab the opts so we can pass it on to our objects
(opts, args) = SimpleOpts.parse_args()

# get ISA for the default binary to run. This is mostly for simple testing
isa = str(m5.defines.buildEnv['TARGET_ISA']).lower()

# Check if there was a binary passed in via the command line and error if
# there are too many arguments
if len(args) == 1:
    binary = args[0]
else:
    SimpleOpts.print_help()
    m5.fatal("Expected a binary to execute as positional argument")
Пример #16
0
# import all of the SimObjects
from m5.objects import *

# Add the common scripts to our path
m5.util.addToPath('../../')

# import the caches which we made
from caches import *

# import the SimpleOpts module
from common import SimpleOpts

# Set the usage message to display
SimpleOpts.set_usage("usage: %prog [options] <binary to execute>")
SecBufSize = 16 
SimpleOpts.add_option('--numSecBufEntries',
                          help="Number of entries in security buffer. Default: %s" % SecBufSize)

# Finalize the arguments and grab the opts so we can pass it on to our objects
(opts, args) = SimpleOpts.parse_args()

# get ISA for the default binary to run. This is mostly for simple testing
isa = str(m5.defines.buildEnv['TARGET_ISA']).lower()

# Default to running 'hello', use the compiled ISA to find the binary
# grab the specific path to the binary
thispath = os.path.dirname(os.path.realpath(__file__))
binary = os.path.join(thispath, '../../../',
                      'tests/test-progs/hello/bin/', isa, 'linux/hello')

# Check if there was a binary passed in via the command line and error if
# there are too many arguments