def test_resource_container(self): """ tests that creating multiple resource containers doesn't cause issues. """ sdram = ConstantSDRAM(128 * (2**20)) dtcm = DTCMResource(128 * (2**20) + 1) cpu = CPUCyclesPerTickResource(128 * (2**20) + 2) container = ResourceContainer(dtcm, sdram, cpu) self.assertEqual(container.sdram.get_total_sdram(None), 128 * (2**20)) self.assertEqual(container.dtcm.get_value(), 128 * (2**20) + 1) self.assertEqual(container.cpu_cycles.get_value(), 128 * (2**20) + 2) sdram = ConstantSDRAM(128 * (2**19)) dtcm = DTCMResource(128 * (2**19) + 1) cpu = CPUCyclesPerTickResource(128 * (2**19) + 2) container = ResourceContainer(dtcm, sdram, cpu) self.assertEqual(container.sdram.get_total_sdram(None), 128 * (2**19)) self.assertEqual(container.dtcm.get_value(), 128 * (2**19) + 1) self.assertEqual(container.cpu_cycles.get_value(), 128 * (2**19) + 2) sdram = ConstantSDRAM(128 * (2**21)) dtcm = DTCMResource(128 * (2**21) + 1) cpu = CPUCyclesPerTickResource(128 * (2**21) + 2) container = ResourceContainer(dtcm, sdram, cpu) self.assertEqual(container.sdram.get_total_sdram(None), 128 * (2**21)) self.assertEqual(container.dtcm.get_value(), 128 * (2**21) + 1) self.assertEqual(container.cpu_cycles.get_value(), 128 * (2**21) + 2)
def test_sdram(self): """ test that adding a SDRAM resource to a resource container works correctly """ const1 = ConstantSDRAM(128) self.assertEqual(const1.get_total_sdram(None), 128) const2 = ConstantSDRAM(256) combo = const1 + const2 self.assertEqual(combo.get_total_sdram(None), 128+256) combo = const1 - const2 self.assertEqual(combo.get_total_sdram(None), 128-256) combo = const2 + const1 self.assertEqual(combo.get_total_sdram(None), 256+128) combo = const2 - const1 self.assertEqual(combo.get_total_sdram(None), 256-128) var1 = VariableSDRAM(124, 8) self.assertEqual(var1.get_total_sdram(100), 124 + 8 * 100) combo = var1 + const1 self.assertEqual(combo.get_total_sdram(100), 124 + 8 * 100 + 128) combo = var1 - const1 self.assertEqual(combo.get_total_sdram(100), 124 + 8 * 100 - 128) combo = const1 + var1 self.assertEqual(combo.get_total_sdram(100), 128 + 124 + 8 * 100) combo = const1 - var1 self.assertEqual(combo.get_total_sdram(100), 128 - (124 + 8 * 100)) var2 = VariableSDRAM(234, 6) combo = var2 + var1 self.assertEqual(combo.get_total_sdram(150), 234 + 124 + (6 + 8) * 150) combo = var2 - var1 self.assertEqual(combo.get_total_sdram(150), 234 - 124 + (6 - 8) * 150)
def test_sdram(self): """ test that adding a SDRAM resource to a resource container works correctly """ const1 = ConstantSDRAM(128) self.assertEqual(const1.get_total_sdram(None), 128) const2 = ConstantSDRAM(256) combo = const1 + const2 self.assertEqual(combo.get_total_sdram(None), 128 + 256) combo = const1 - const2 self.assertEqual(combo.get_total_sdram(None), 128 - 256) combo = const2 + const1 self.assertEqual(combo.get_total_sdram(None), 256 + 128) combo = const2 - const1 self.assertEqual(combo.get_total_sdram(None), 256 - 128) var1 = VariableSDRAM(124, 8) self.assertEqual(var1.get_total_sdram(100), 124 + 8 * 100) combo = var1 + const1 self.assertEqual(combo.get_total_sdram(100), 124 + 8 * 100 + 128) combo = var1 - const1 self.assertEqual(combo.get_total_sdram(100), 124 + 8 * 100 - 128) combo = const1 + var1 self.assertEqual(combo.get_total_sdram(100), 128 + 124 + 8 * 100) combo = const1 - var1 self.assertEqual(combo.get_total_sdram(100), 128 - (124 + 8 * 100)) var2 = VariableSDRAM(234, 6) combo = var2 + var1 self.assertEqual(combo.get_total_sdram(150), 234 + 124 + (6 + 8) * 150) combo = var2 - var1 self.assertEqual(combo.get_total_sdram(150), 234 - 124 + (6 - 8) * 150) multi1 = MultiRegionSDRAM() multi1.add_cost(1, 100, 4) multi1.add_cost(2, 50, 3) multi1.add_cost("overheads", 20) multi2 = MultiRegionSDRAM() multi2.add_cost(MockEnum.ZERO, 88) multi2.add_cost(MockEnum.ONE, 72) multi2.add_cost("overheads", 22) combo = multi1 + multi2 self.assertEqual(combo.get_total_sdram(150), 100 + 50 + 20 + 88 + 72 + 22 + (4 + 3) * 150) multi3 = MultiRegionSDRAM() multi3.nest("foo", multi1) multi3.nest("bar", multi2) multi1.merge(multi2) self.assertEqual(len(multi1.regions), 5) self.assertEqual(multi1.regions["overheads"], ConstantSDRAM(20 + 22)) self.assertEqual(multi1.get_total_sdram(150), 100 + 50 + 20 + 88 + 72 + 22 + (4 + 3) * 150) self.assertEqual(multi1, combo) self.assertEqual(multi1, multi3) with tempfile.TemporaryFile(mode="w") as target: multi3.report(1000, target=target)
def _add_chip_lpg_reqs(lpg_parameters, chip, lpg_sdram, sdrams, cores, iptags): # pylint: disable=too-many-arguments sdram_reqs = 0 core_reqs = 0 for lpg_params in lpg_parameters: if (lpg_params.board_address is None or lpg_params.board_address == chip.ip_address): sdram_reqs += lpg_sdram core_reqs += 1 iptags.append( SpecificBoardTagResource( board=chip.ip_address, ip_address=lpg_params.hostname, port=lpg_params.port, strip_sdp=lpg_params.strip_sdp, tag=lpg_params.tag, traffic_identifier=LPG.TRAFFIC_IDENTIFIER)) if sdram_reqs: sdrams.append( SpecificChipSDRAMResource(chip, ConstantSDRAM(sdram_reqs))) if core_reqs: cores.append(CoreResource(chip, core_reqs))
def resources_required(self): resources = ResourceContainer( sdram=ConstantSDRAM(SYSTEM_BYTES_REQUIREMENT + get_recording_header_size(len(Channels)) + self._string_data_size)) return resources
def test_1_chip_pre_allocated_too_much_sdram(self): machine = virtual_machine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = SplitterPartitioner() eight_meg = 8 * 1024 * 1024 # add graph vertices which reside on 0,0 for _ in range(0, 13): vertex = SimpleTestVertex( constraints=[ChipAndCoreConstraint(x=0, y=0)], n_atoms=1, fixed_sdram_value=eight_meg) vertex.splitter = SplitterSliceLegacy() graph.add_vertex(vertex) # add pre-allocated resources for cores on 0,0 twenty_meg = ConstantSDRAM(20 * 1024 * 1024) core_pre = SpecificChipSDRAMResource( chip=machine.get_chip_at(0, 0), sdram_usage=twenty_meg) pre_allocated_res = PreAllocatedResourceContainer( specific_sdram_usage=[core_pre]) # run partitioner that should go boom try: partitioner(graph, machine, plan_n_time_steps=None, pre_allocated_resources=pre_allocated_res) raise Exception("should have blown up here") except PacmanPartitionException: pass except Exception: exc_info = sys.exc_info() six.reraise(*exc_info)
def create_requirement_collections(vertices, machine_graph): """ Get a collection of requirements that includes SDRAM edge resources """ # Get all but the last requirements, keeping the SDRAM edge requirements required_resources = list() to_add_partitions = set() last_resources = None last_constraints = None for vertex in vertices: if last_resources is not None: required_resources.append([last_resources, last_constraints]) last_resources = vertex.resources_required last_constraints = vertex.constraints to_add_partitions.update( machine_graph.get_sdram_edge_partitions_starting_at_vertex(vertex)) # Add up all the SDRAM edge requirements total_sdram = 0 for partition in to_add_partitions: total_sdram += partition.total_sdram_requirements() # Add the SDRAM requirements to the final requirements resources = ResourceContainer(sdram=ConstantSDRAM(total_sdram)) resources.extend(last_resources) required_resources.append([resources, last_constraints]) return required_resources
def test_deallocation_of_resources(self): machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18) chip_sdram = machine.get_chip_at(1, 1).sdram.size res_sdram = 12345 tracker = ResourceTracker(machine, plan_n_timesteps=None, preallocated_resources=None) sdram_res = ConstantSDRAM(res_sdram) resources = ResourceContainer(sdram=sdram_res) chip_0 = machine.get_chip_at(0, 0) # verify core tracker is empty if (0, 0) in tracker._core_tracker: raise Exception("shouldnt exist") tracker._get_core_tracker(1, 1) # verify core tracker not empty if (1, 1) not in tracker._core_tracker: raise Exception("should exist") # verify sdram tracker # 0, 0 in _sdram_tracker due to the get_core_tracker(0, 0) call if tracker._sdram_tracker[1, 1] != chip_sdram: raise Exception("incorrect sdram of {}".format( tracker._sdram_tracker[1, 1])) # allocate some res chip_x, chip_y, processor_id, ip_tags, reverse_ip_tags = \ tracker.allocate_resources(resources, [(0, 0)]) # verify chips used is updated cores = list(tracker._core_tracker[(0, 0)]._cores) self.assertEqual(len(cores), chip_0.n_user_processors - 1) # verify sdram used is updated sdram = tracker._sdram_tracker[(0, 0)] self.assertEqual(sdram, chip_sdram - res_sdram) if (0, 0) not in tracker._chips_used: raise Exception("should exist") # deallocate res tracker.unallocate_resources(chip_x, chip_y, processor_id, resources, ip_tags, reverse_ip_tags) # verify chips used is updated if tracker._core_tracker[(0, 0)].n_cores_available != \ chip_0.n_user_processors: raise Exception("shouldn't exist or should be right size") # if (0, 0) in tracker._chips_used: # raise Exception("shouldnt exist") # verify sdram tracker if tracker._sdram_tracker[0, 0] != chip_sdram: raise Exception("incorrect sdram of {}".format( tracker._sdram_tracker[0, 0]))
def __init__(self, lpg_params, constraints=None, app_vertex=None, label=None): """ :param LivePacketGatherParams lpg_params: :param LivePacketGather app_vertex: :param str label: :param constraints: :type constraints: iterable(~pacman.model.constraints.AbstractConstraint) """ # inheritance super(LivePacketGatherMachineVertex, self).__init__(label or lpg_params.label, constraints=constraints, app_vertex=app_vertex) self._resources_required = ResourceContainer( cpu_cycles=CPUCyclesPerTickResource(self.get_cpu_usage()), dtcm=DTCMResource(self.get_dtcm_usage()), sdram=ConstantSDRAM(self.get_sdram_usage()), iptags=[lpg_params.get_iptag_resource()]) # app specific data items self._lpg_params = lpg_params
def resources_required(self): resources = ResourceContainer(sdram=ConstantSDRAM( SYSTEM_BYTES_REQUIREMENT + recording_utilities.get_recording_header_size(1) + self._string_data_size)) return resources
def __init__(self, vertex_slice, resources_required, constraints, label, app_vertex, truth_table, input_sequence, rate_on, rate_off, score_delay, stochastic, incoming_spike_buffer_size, simulation_duration_ms, rand_seed): # resources required self._resources_required = ResourceContainer( sdram=ConstantSDRAM(resources_required)) # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label # Pass in variables self._truth_table = truth_table self._rate_on = rate_on self._rate_off = rate_off self._stochastic = stochastic self._input_sequence = input_sequence self._no_inputs = len(input_sequence) self._n_neurons = self._no_inputs self._rand_seed = rand_seed self._score_delay = score_delay # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # Superclasses MachineVertex.__init__(self, label, constraints, app_vertex, vertex_slice)
def get_resources_used_by_atoms(self, vertex_slice, graph): out_edges = graph.get_edges_starting_at_vertex(self) return ResourceContainer( sdram=ConstantSDRAM(self.get_sdram_usage_for_atoms(out_edges)), dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)), cpu_cycles=CPUCyclesPerTickResource( self.get_cpu_usage_for_atoms(vertex_slice)))
def get_resources_used_by_atoms(self, vertex_slice, machine_time_step): """ :param ~pacman.model.graphs.common.Slice vertex_slice: :param int machine_time_step: """ # pylint: disable=arguments-differ poisson_params_sz = self.get_rates_bytes(vertex_slice) other = ConstantSDRAM( SYSTEM_BYTES_REQUIREMENT + SpikeSourcePoissonMachineVertex.get_provenance_data_size(0) + poisson_params_sz + self.tdma_sdram_size_in_bytes + recording_utilities.get_recording_header_size(1) + recording_utilities.get_recording_data_constant_size(1) + profile_utils.get_profile_region_size(self.__n_profile_samples)) recording = self.get_recording_sdram_usage(vertex_slice, machine_time_step) # build resources as i currently know container = ResourceContainer(sdram=recording + other, dtcm=DTCMResource( self.get_dtcm_usage_for_atoms()), cpu_cycles=CPUCyclesPerTickResource( self.get_cpu_usage_for_atoms())) return container
def resources_required(self): if self.group.output_grp: resources = ResourceContainer( sdram=VariableSDRAM(self._sdram_fixed, self._sdram_variable)) else: resources = ResourceContainer( sdram=ConstantSDRAM(self._sdram_fixed)) return resources
def get_resources_used_by_atoms(self, vertex_slice): # @UnusedVariable return ResourceContainer( sdram=ConstantSDRAM( LivePacketGatherMachineVertex.get_sdram_usage()), dtcm=DTCMResource(LivePacketGatherMachineVertex.get_dtcm_usage()), cpu_cycles=CPUCyclesPerTickResource( LivePacketGatherMachineVertex.get_cpu_usage()), iptags=[self._lpg_params.get_iptag_resource()])
def resources_required(self, app_graph): out_edges = app_graph.get_edges_starting_at_vertex(self.app_vertex) in_edges = app_graph.get_edges_starting_at_vertex(self.app_vertex) return ResourceContainer(sdram=ConstantSDRAM( SIMULATION_N_BYTES + (len(out_edges) * self.SDRAM_PARTITION_BASE_DSG_SIZE) + (len(in_edges) * self.SDRAM_PARTITION_BASE_DSG_SIZE) + (self.SDRAM_PARTITION_COUNTERS * 2) + SARK_PER_MALLOC_SDRAM_USAGE))
def resources_required(self): sdram = (self.get_timed_commands_bytes() + self.get_n_command_bytes(self._commands_at_start_resume) + self.get_n_command_bytes(self._commands_at_pause_stop) + SYSTEM_BYTES_REQUIREMENT + self.get_provenance_data_size(0)) # Return the SDRAM and 1 core return ResourceContainer(sdram=ConstantSDRAM(sdram))
def resources_required(self): fixed_sdram = (SYSTEM_BYTES_REQUIREMENT + self.BASE_PARAMS_DATA_SIZE + self.weight_container_size + self.softmax_params_data_size + self.trainable_params_data_size + self.next_layer_weights_container_size) return ResourceContainer(sdram=ConstantSDRAM(fixed_sdram))
def get_recording_sdram_usage(self, vertex_slice): """ :param ~pacman.model.graphs.common.Slice vertex_slice: """ variable_sdram = self.__spike_recorder.get_sdram_usage_in_bytes( vertex_slice.n_atoms, self.max_spikes_per_ts()) constant_sdram = ConstantSDRAM(variable_sdram.per_timestep * OVERFLOW_TIMESTEPS_FOR_SDRAM) return variable_sdram + constant_sdram
def get_resources_used_by_atoms(lo_atom, hi_atom, vertex_in_edges): vertex = Vertex(1, None) cpu_cycles = vertex.get_cpu_usage_for_atoms(lo_atom, hi_atom) dtcm_requirement = vertex.get_dtcm_usage_for_atoms(lo_atom, hi_atom) sdram_requirement = vertex.get_sdram_usage_for_atoms( Slice(lo_atom, hi_atom)) return ResourceContainer( cpu_cycles=CPUCyclesPerTickResource(cpu_cycles), dtcm=DTCMResource(dtcm_requirement), sdram=ConstantSDRAM(sdram_requirement))
def test_resource_container(self): sdram1 = ConstantSDRAM(128 * (2**20)) dtcm = DTCMResource(128 * (2**20) + 1) cpu = CPUCyclesPerTickResource(128 * (2**20) + 2) r1 = ResourceContainer(dtcm, sdram1, cpu) self.resource_there_and_back(r1) t1 = IPtagResource("1", 2, True) # Minimal args t2 = IPtagResource("1.2.3.4", 2, False, 4, 5) r2 = r1 = ResourceContainer(dtcm, sdram1, cpu, iptags=[t1, t2]) self.resource_there_and_back(r2)
def get_resourced_machine_vertex(lo_atom, hi_atom, label=None): cpu_cycles = 10 * (hi_atom - lo_atom) dtcm_requirement = 200 * (hi_atom - lo_atom) sdram_requirement = 4000 + 50 * (hi_atom - lo_atom) resources = ResourceContainer( cpu_cycles=CPUCyclesPerTickResource(cpu_cycles), dtcm=DTCMResource(dtcm_requirement), sdram=ConstantSDRAM(sdram_requirement)) return SimpleMachineVertex(resources, label=label, vertex_slice=Slice(lo_atom, hi_atom))
def get_resources_used_by_atoms(self, vertex_slice, graph): """ :param ~pacman.model.graphs.application.ApplicationGraph graph: """ # pylint: disable=arguments-differ out_edges = graph.get_edges_starting_at_vertex(self) return ResourceContainer( sdram=ConstantSDRAM(self.get_sdram_usage_for_atoms(out_edges)), dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)), cpu_cycles=CPUCyclesPerTickResource( self.get_cpu_usage_for_atoms(vertex_slice)))
def test_place_vertex_too_big_with_vertex(self): cpu_cycles = 1000 dtcm_requirement = 1000 sdram_requirement = self.machine.get_chip_at(0, 0).sdram.size * 20 rc = ResourceContainer(cpu_cycles=CPUCyclesPerTickResource(cpu_cycles), dtcm=DTCMResource(dtcm_requirement), sdram=ConstantSDRAM(sdram_requirement)) large_machine_vertex = T_MachineVertex(0, 499, rc, "Second vertex") self.mach_graph.add_vertex(large_machine_vertex) with self.assertRaises(PacmanValueError): ConnectiveBasedPlacer()(self.mach_graph, self.machine, 100)
def static_resources_required(): return ResourceContainer( sdram=ConstantSDRAM(_CONFIG_REGION_REINJECTOR_SIZE_IN_BYTES + _CONFIG_DATA_SPEED_UP_SIZE_IN_BYTES + _CONFIG_MAX_EXTRA_SEQ_NUM_SIZE_IN_BYTES + # Data spec size DATA_SPECABLE_BASIC_SETUP_INFO_N_BYTES + # One malloc for extra sequence numbers SARK_PER_MALLOC_SDRAM_USAGE + _MAX_DATA_SIZE_FOR_DATA_IN_MULTICAST_ROUTING + _SDRAM_FOR_ROUTER_TABLE_ENTRIES + _CONFIG_DATA_IN_KEYS_SDRAM_IN_BYTES))
def get_resources_used_by_atoms(self, vertex_slice): """ standard method call to get the sdram, cpu and dtcm usage of a collection of atoms :param vertex_slice: the collection of atoms """ return ResourceContainer( sdram=ConstantSDRAM(self.get_sdram_usage_for_atoms(vertex_slice)), cpu_cycles=CPUCyclesPerTickResource( self.get_cpu_usage_for_atoms(vertex_slice)), dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)))
def test_place_vertex_too_big_with_vertex(self): cpu_cycles = 1000 dtcm_requirement = 1000 sdram_requirement = self.machine.get_chip_at(0, 0).sdram.size * 20 rc = ResourceContainer(cpu_cycles=CPUCyclesPerTickResource(cpu_cycles), dtcm=DTCMResource(dtcm_requirement), sdram=ConstantSDRAM(sdram_requirement)) large_machine_vertex = SimpleMachineVertex(rc, vertex_slice=Slice(0, 499), label="Second vertex") self.mach_graph.add_vertex(large_machine_vertex) with self.assertRaises(PacmanValueError): radial_placer(self.mach_graph, self.machine, 100)
def constant_sdram(self, graph, vertex_slice): """ returns the sdram used by the delay extension :param ApplicationGraph graph: app graph :param Slice vertex_slice: The slice to get the size of :rtype: ConstantSDRAM """ out_edges = graph.get_edges_starting_at_vertex(self) return ConstantSDRAM( SYSTEM_BYTES_REQUIREMENT + self._governed_app_vertex.delay_params_size(vertex_slice) + self._governed_app_vertex.tdma_sdram_size_in_bytes + DelayExtensionMachineVertex.get_provenance_data_size( DelayExtensionMachineVertex.N_EXTRA_PROVENANCE_DATA_ENTRIES) + self._get_size_of_generator_information(out_edges))
def __init__(self, label, use_prefix=False, key_prefix=None, prefix_type=None, message_type=EIEIOType.KEY_32_BIT, right_shift=0, payload_as_time_stamps=True, use_payload_prefix=True, payload_prefix=None, payload_right_shift=0, number_of_packets_sent_per_time_step=0, hostname=None, port=None, strip_sdp=None, board_address=None, tag=None, constraints=None): # pylint: disable=too-many-arguments, too-many-locals # inheritance super(LivePacketGatherMachineVertex, self).__init__(label, constraints=constraints) self._resources_required = ResourceContainer( cpu_cycles=CPUCyclesPerTickResource(self.get_cpu_usage()), dtcm=DTCMResource(self.get_dtcm_usage()), sdram=ConstantSDRAM(self.get_sdram_usage()), iptags=[ IPtagResource(ip_address=hostname, port=port, strip_sdp=strip_sdp, tag=tag, traffic_identifier=self.TRAFFIC_IDENTIFIER) ]) # app specific data items self._use_prefix = use_prefix self._key_prefix = key_prefix self._prefix_type = prefix_type self._message_type = message_type self._right_shift = right_shift self._payload_as_time_stamps = payload_as_time_stamps self._use_payload_prefix = use_payload_prefix self._payload_prefix = payload_prefix self._payload_right_shift = payload_right_shift self._number_of_packets_sent_per_time_step = \ number_of_packets_sent_per_time_step
def get_resources_used_by_atoms(self, vertex_slice): # @UnusedVariable return ResourceContainer( sdram=ConstantSDRAM( LivePacketGatherMachineVertex.get_sdram_usage()), dtcm=DTCMResource(LivePacketGatherMachineVertex.get_dtcm_usage()), cpu_cycles=CPUCyclesPerTickResource( LivePacketGatherMachineVertex.get_cpu_usage()), iptags=[ IPtagResource( ip_address=self._ip_address, port=self._port, strip_sdp=self._strip_sdp, tag=self._tag, traffic_identifier=( LivePacketGatherMachineVertex.TRAFFIC_IDENTIFIER)) ])
def __init__(self, vertex_slice, resources_required, constraints, label, app_vertex, encoding, time_increment, pole_length, pole_angle, reward_based, force_increments, max_firing_rate, number_of_bins, central, bin_overlap, tau_force, incoming_spike_buffer_size, simulation_duration_ms, rand_seed): # Resources required self._resource_required = ResourceContainer( sdram=ConstantSDRAM(resources_required)) # **NOTE** n_neurons currently ignored - width and height will be # specified as additional parameters, forcing their product to be # duplicated in n_neurons seems pointless self._label = label self._encoding = encoding # Pass in variables self._pole_length = pole_length self._pole_angle = pole_angle self._force_increments = force_increments # for rate based it's only 1 neuron per metric # (position, angle, velocity of both) if self._encoding == 0: self._n_neurons = 4 else: self._n_neurons = 4 * number_of_bins self._time_increment = time_increment self._reward_based = reward_based self._max_firing_rate = max_firing_rate self._number_of_bins = number_of_bins self._central = central self._rand_seed = rand_seed self._bin_overlap = bin_overlap self._tau_force = tau_force # used to define size of recording region self._recording_size = int((simulation_duration_ms / 1000.) * 4) # Superclasses MachineVertex.__init__(self, label, constraints, app_vertex, vertex_slice)