def __init__(self, args, **kwargs): super(SimpleSeSystem, self).__init__(**kwargs) # Setup book keeping to be able to use CpuClusters from the # devices module. self._clusters = [] self._num_cpus = 0 # Create a voltage and clock domain for system components self.voltage_domain = VoltageDomain(voltage="3.3V") self.clk_domain = SrcClockDomain(clock="1GHz", voltage_domain=self.voltage_domain) # Create the off-chip memory bus. # Wire up the system port that gem5 uses to load the kernel # and to perform debug accesses. # Add CPUs to the system. A cluster of CPUs typically have # private L1 caches and a shared L2 cache. self.cpu_cluster = devices.CpuCluster(self, args.num_cores, "4GHz", "1.2V", *cpu_types[args.cpu]) # Create a cache hierarchy (unless we are simulating a # functional CPU in atomic memory mode) for the CPU cluster # and connect it to the shared memory bus. if self.cpu_cluster.memoryMode() == "timing": self.cpu_cluster.addL1() self.cpu_cluster.addL2(self.cpu_cluster.clk_domain) # self.cpu_cluster.connectMemSide(self.membus) # self.cpu_cluster.l2.mem_side = self.membus.slave # Tell gem5 about the memory mode used by the CPUs we are # simulating. self.mem_mode = self.cpu_cluster.memoryMode()
def create(args): ''' Create and configure the system object. ''' if not args.dtb: dtb_file = SysPaths.binary("armv8_gem5_v1_%icpu.%s.dtb" % (args.num_cores, default_dist_version)) else: dtb_file = args.dtb if args.script and not os.path.isfile(args.script): print "Error: Bootscript %s does not exist" % args.script sys.exit(1) cpu_class = cpu_types[args.cpu][0] mem_mode = cpu_class.memory_mode() # Only simulate caches when using a timing CPU (e.g., the HPI model) want_caches = True if mem_mode == "timing" else False system = devices.SimpleSystem(want_caches, args.mem_size, mem_mode=mem_mode, dtb_filename=dtb_file, kernel=SysPaths.binary(args.kernel), readfile=args.script, machine_type="DTOnly") MemConfig.config_mem(args, system) # Add the PCI devices we need for this system. The base system # doesn't have any PCI devices by default since they are assumed # to be added by the configurastion scripts needin them. system.pci_devices = [ # Create a VirtIO block device for the system's boot # disk. Attach the disk image using gem5's Copy-on-Write # functionality to avoid writing changes to the stored copy of # the disk image. PciVirtIO(vio=VirtIOBlock(image=create_cow_image(args.disk_image))), ] # Attach the PCI devices to the system. The helper method in the # system assigns a unique PCI bus ID to each of the devices and # connects them to the IO bus. for dev in system.pci_devices: system.attach_pci(dev) # Wire up the system's memory system system.connect() # Add CPU clusters to the system system.cpu_cluster = [ devices.CpuCluster(system, args.num_cores, args.cpu_freq, "1.0V", *cpu_types[args.cpu]), ] # Create a cache hierarchy for the cluster. We are assuming that # clusters have core-private L1 caches and an L2 that's shared # within the cluster. for cluster in system.cpu_cluster: system.addCaches(want_caches, last_cache_level=2) # Setup gem5's minimal Linux boot loader. system.realview.setupBootLoader(system.membus, system, SysPaths.binary) # Linux boot command flags kernel_cmd = [ # Tell Linux to use the simulated serial port as a console "console=ttyAMA0", # Hard-code timi "lpj=19988480", # Disable address space randomisation to get a consistent # memory layout. "norandmaps", # Tell Linux where to find the root disk image. "root=/dev/vda1", # Mount the root disk read-write by default. "rw", # Tell Linux about the amount of physical memory present. "mem=%s" % args.mem_size, ] system.boot_osflags = " ".join(kernel_cmd) return system
def create(args): ''' Create and configure the system object. ''' if args.readfile and not os.path.isfile(args.readfile): print("Error: Bootscript %s does not exist" % args.readfile) sys.exit(1) object_file = args.kernel if args.kernel else "" cpu_class = cpu_types[args.cpu][0] mem_mode = cpu_class.memory_mode() # Only simulate caches when using a timing CPU (e.g., the HPI model) want_caches = True if mem_mode == "timing" else False platform = ObjectList.platform_list.get(args.machine_type) system = devices.simpleSystem(ArmSystem, want_caches, args.mem_size, platform=platform(), mem_mode=mem_mode, readfile=args.readfile) MemConfig.config_mem(args, system) if args.semi_enable: system.semihosting = ArmSemihosting(stdin=args.semi_stdin, stdout=args.semi_stdout, stderr=args.semi_stderr, files_root_dir=args.semi_path, cmd_line=" ".join([object_file] + args.args)) # Add the PCI devices we need for this system. The base system # doesn't have any PCI devices by default since they are assumed # to be added by the configurastion scripts needin them. pci_devices = [] if args.disk_image: # Create a VirtIO block device for the system's boot # disk. Attach the disk image using gem5's Copy-on-Write # functionality to avoid writing changes to the stored copy of # the disk image. system.disk = PciVirtIO(vio=VirtIOBlock( image=create_cow_image(args.disk_image))) pci_devices.append(system.disk) # Attach the PCI devices to the system. The helper method in the # system assigns a unique PCI bus ID to each of the devices and # connects them to the IO bus. for dev in pci_devices: system.attach_pci(dev) # Wire up the system's memory system system.connect() # Add CPU clusters to the system system.cpu_cluster = [ devices.CpuCluster(system, args.num_cores, args.cpu_freq, "1.0V", *cpu_types[args.cpu]), ] # Create a cache hierarchy for the cluster. We are assuming that # clusters have core-private L1 caches and an L2 that's shared # within the cluster. for cluster in system.cpu_cluster: system.addCaches(want_caches, last_cache_level=2) # Setup gem5's minimal Linux boot loader. system.auto_reset_addr = True # Using GICv3 system.realview.gic.gicv4 = False system.highest_el_is_64 = True system.have_virtualization = True system.have_security = True workload_class = workloads.workload_list.get(args.workload) system.workload = workload_class(object_file, system) return system
def __init__(self, args, **kwargs): super(SimpleSeSystem, self).__init__(**kwargs) self.cache_line_size = args.cache_line_size customICache.size=args.Icache_size customDCache.size=args.Dcache_size customICache.assoc=args.Icache_assoc customDCache.assoc=args.Dcache_assoc print("cache line size: %d " % (self.cache_line_size)) print("I Cache size: %s" % (customICache.size)) print("D Cache size: %s" % (customDCache.size)) # Setup book keeping to be able to use CpuClusters from the # devices module. self._clusters = [] self._num_cpus = 0 if args.branch_predictor == "TournamentBP": HPICPU.branchPred = TournamentBP() HPICPU.branchPred.BTBEntries = args.BTBEntries HPICPU.branchPred.localPredictorSize = args.local_predictor_size HPICPU.branchPred.globalPredictorSize = args.global_predictor_size HPICPU.branchPred.choicePredictorSize = args.choice_predictor_size elif args.branch_predictor == "LocalBP": HPICPU.branchPred=LocalBP() HPICPU.branchPred.BTBEntries = args.BTBEntries HPICPU.branchPred.localPredictorSize = args.local_predictor_size else: HPICPU.branchPred=BiModeBP() HPICPU.branchPred.BTBEntries = args.BTBEntries HPICPU.branchPred.globalPredictorSize = args.global_predictor_size HPICPU.branchPred.choicePredictorSize = args.choice_predictor_size # Create a voltage and clock domain for system components self.voltage_domain = VoltageDomain(voltage="3.3V") self.clk_domain = SrcClockDomain(clock="1GHz", voltage_domain=self.voltage_domain) # Create the off-chip memory bus. self.membus = SystemXBar() # Wire up the system port that gem5 uses to load the kernel # and to perform debug accesses. self.system_port = self.membus.slave # Add CPUs to the system. A cluster of CPUs typically have # private L1 caches and a shared L2 cache. self.cpu_cluster = devices.CpuCluster(self, args.num_cores, args.cpu_freq, "1.2V", *cpu_types[args.cpu]) # Create a cache hierarchy (unless we are simulating a # functional CPU in atomic memory mode) for the CPU cluster # and connect it to the shared memory bus. if self.cpu_cluster.memoryMode() == "timing": self.cpu_cluster.addL1() self.cpu_cluster.addL2(self.cpu_cluster.clk_domain) self.cpu_cluster.connectMemSide(self.membus) # Tell gem5 about the memory mode used by the CPUs we are # simulating. self.mem_mode = self.cpu_cluster.memoryMode()
def create(args): ''' Create and configure the system object. ''' if args.readfile and not os.path.isfile(args.readfile): print("Error: Bootscript %s does not exist" % args.readfile) sys.exit(1) object_file = args.kernel if args.kernel else "" cpu_class = cpu_types[args.cpu][0] mem_mode = cpu_class.memory_mode() # Only simulate caches when using a timing CPU (e.g., the HPI model) want_caches = True if mem_mode == "timing" else False platform = ObjectList.platform_list.get(args.machine_type) system = devices.SimpleSystem(want_caches, args.mem_size, platform=platform(), mem_mode=mem_mode, readfile=args.readfile) MemConfig.config_mem(args, system) if args.semi_enable: system.semihosting = ArmSemihosting( stdin=args.semi_stdin, stdout=args.semi_stdout, stderr=args.semi_stderr, files_root_dir=args.semi_path, cmd_line = " ".join([ object_file ] + args.args) ) if args.disk_image: # Create a VirtIO block device for the system's boot # disk. Attach the disk image using gem5's Copy-on-Write # functionality to avoid writing changes to the stored copy of # the disk image. system.realview.vio[0].vio = VirtIOBlock( image=create_cow_image(args.disk_image)) # Wire up the system's memory system system.connect() # Add CPU clusters to the system system.cpu_cluster = [ devices.CpuCluster(system, args.num_cores, args.cpu_freq, "1.0V", *cpu_types[args.cpu]), ] # Create a cache hierarchy for the cluster. We are assuming that # clusters have core-private L1 caches and an L2 that's shared # within the cluster. system.addCaches(want_caches, last_cache_level=2) # Setup gem5's minimal Linux boot loader. system.auto_reset_addr = True # Using GICv3 system.realview.gic.gicv4 = False system.highest_el_is_64 = True system.release.add(ArmExtension('SECURITY')) system.release.add(ArmExtension('VIRTUALIZATION')) workload_class = workloads.workload_list.get(args.workload) system.workload = workload_class( object_file, system) return system
def create(args): ''' Create and configure the system object. ''' if args.script and not os.path.isfile(args.script): print("Error: Bootscript %s does not exist" % args.script) sys.exit(1) cpu_class = cpu_types[args.cpu][0] mem_mode = cpu_class.memory_mode() system = devices.ArmRubySystem( args.mem_size, mem_mode=mem_mode, workload=ArmFsLinux(object_file=SysPaths.binary(args.kernel)), readfile=args.script) # Add CPU clusters to the system system.cpu_cluster = [ devices.CpuCluster(system, args.num_cpus, args.cpu_freq, "1.0V", *cpu_types[args.cpu]), ] # Add the PCI devices we need for this system. The base system # doesn't have any PCI devices by default since they are assumed # to be added by the configuration scripts needing them. system.pci_devices = [ # Create a VirtIO block device for the system's boot # disk. Attach the disk image using gem5's Copy-on-Write # functionality to avoid writing changes to the stored copy of # the disk image. PciVirtIO(vio=VirtIOBlock(image=create_cow_image(args.disk_image))), ] # Attach the PCI devices to the system. The helper method in the # system assigns a unique PCI bus ID to each of the devices and # connects them to the IO bus. for dev in system.pci_devices: system.attach_pci(dev) config_ruby(system, args) # Wire up the system's memory system system.connect() # Setup gem5's minimal Linux boot loader. system.realview.setupBootLoader(system, SysPaths.binary) if args.dtb: system.workload.dtb_filename = args.dtb else: # No DTB specified: autogenerate DTB system.workload.dtb_filename = \ os.path.join(m5.options.outdir, 'system.dtb') system.generateDtb(system.workload.dtb_filename) # Linux boot command flags kernel_cmd = [ # Tell Linux to use the simulated serial port as a console "console=ttyAMA0", # Hard-code timi "lpj=19988480", # Disable address space randomisation to get a consistent # memory layout. "norandmaps", # Tell Linux where to find the root disk image. "root=%s" % args.root_device, # Mount the root disk read-write by default. "rw", # Tell Linux about the amount of physical memory present. "mem=%s" % args.mem_size, ] system.workload.command_line = " ".join(kernel_cmd) return system