def __init__(self, topology: Topology, cluster_context: ClusterContext = None, initial_time=0, scheduler_params=None): super().__init__(initial_time) self.request_generator = object self.request_queue = simpy.Store(self) self.scheduler_queue = simpy.Store(self) self.topology: Topology = topology topology.create_index() # allows us to inject a pre-calculated bandwidth graph that was cached if cluster_context is None: self.cluster: ClusterContext = topology.create_cluster_context() else: self.cluster: ClusterContext = cluster_context if scheduler_params: self.scheduler = Scheduler(self.cluster, **scheduler_params) else: self.scheduler = Scheduler(self.cluster) self.faas_gateway = FaasGateway(self) self.execution_simulator = ExecutionSimulator(self) self.clock = SimulatedClock(self, start=datetime(1970, 1, 1, 0, 0, 0)) self.metrics = Metrics(self, RuntimeLogger(self.clock)) self.execution_time_oracle = oracles.FittedExecutionTimeOracle() self.startup_time_oracle = oracles.HackedFittedStartupTimeOracle()
def __init__(self, env, latency=0, jitter=0, loss=0, qlimit=None): self.env = env # parameter self.latency = latency # seconds self.jitter = jitter self.loss = loss # % #self.rate = rate # Mbps self.qlimit = qlimit # queue limit in packets # store resources self.S_in = simpy.Store(env) self.S_1 = simpy.Store(env) self.S_2 = simpy.Store(env) # input connection self.input = self.S_in # output (should always be set to "None") self.output = None # running concurrent processes self.action = env.process(self.r_1()) self.action = env.process(self.r_2()) self.action = env.process(self.r_3())
def __init__(self, env: Environment, scale_by_requests: bool = False, scale_by_average_requests: bool = False, scale_by_queue_requests_per_replica: bool = False) -> None: self.env = env self.function_containers = dict() # collects all FunctionReplicas under the name of the corresponding FunctionDeployment self.replicas = defaultdict(list) self.request_queue = simpy.Store(env) self.scheduler_queue = simpy.Store(env) # TODO let users inject LoadBalancer self.load_balancer = RoundRobinLoadBalancer(env, self.replicas) self.functions_deployments: Dict[str, FunctionDeployment] = dict() self.replica_count: Dict[str, int] = dict() self.functions_definitions = Counter() self.scale_by_requests = scale_by_requests self.scale_by_average_requests_per_replica = scale_by_average_requests self.scale_by_queue_requests_per_replica = scale_by_queue_requests_per_replica self.faas_scalers: Dict[str, FaasRequestScaler] = dict() self.avg_faas_scalers: Dict[str, AverageFaasRequestScaler] = dict() self.queue_faas_scalers: Dict[str, AverageQueueFaasRequestScaler] = dict()
def __init__(self, env, aId, distribution, cpriFrameGenerationTime, transmissionTime, localTransmissionTime, graph, cpriMode): self.env = env self.nextNode = None self.aType = "RRH" self.aId = "RRH"+":"+str(aId) self.frames = [] self.users = []#list of active UEs served by this RRH self.nodes_connection = []#binary array that keeps the connection fron this RRH to fog nodes and cloud node(s) self.distribution = distribution#the distribution for the traffic generator distribution self.trafficGen = self.env.process(self.run())#initiate the built-in traffic generator #self.genFrame = self.env.process(self.takeFrameUE()) self.uplinkTransmitCPRI = self.env.process(self.uplinkTransmitCPRI())#send eCPRI frames to a processing node self.downlinkTransmitUE = self.env.process(self.downlinkTransmitUE())#send frames to the UEs #thsi store receives frames back from the users self.received_users_frames = simpy.Store(self.env) #buffer to transmit to UEs self.currentLoad = 0 #this store receives frames back from the processing nodes #self.received_eCPRI_frames = simpy.Store(self.env) self.processingQueue = simpy.Store(self.env) #this store keeps the local processed baseband signals self.local_processing_queue = simpy.Store(self.env) #self.frameProcTime = frameProcTime self.cpriFrameGenerationTime = cpriFrameGenerationTime self.transmissionTime = transmissionTime self.localTransmissionTime = localTransmissionTime self.graph = graph self.cpriMode = cpriMode #limiting coordinates of the base station area self.x1 = 0 self.x2 = 0 self.y1 = 0 self.y2 = 0
def __init__(self, env, name, num_stages, delay_per_stage): BaseOperator.__init__(self, env, name) self.num_stages = num_stages self.delay_per_stage = delay_per_stage #default parameter values self.start_time = 0 #A conveyor belt has at-least two stages assert (isinstance(self.num_stages, int) and (num_stages >= 2)) assert (isinstance(self.delay_per_stage, int)) assert (isinstance(self.start_time, int)) # create input and output buffers # corresponding to the first and last stages self.input_buf = simpy.Store(env, capacity=1) self.output_buf = simpy.Store(env, capacity=1) # a list to model stages self.stages = [None for i in range(num_stages)] #states self.define_states(["empty", "moving", "stalled"], start_state="empty") # start behavior self.process = env.process(self.behavior())
def __init__(self, env, id=1, dest_id=2, flow_id=1, priority=0,\ adist=None, sdist=None, start_time=0, stop_time=float('inf'), rate=10000,\ debug=False,type="None",data="None"): # simpy environment self.env = env # parameters self.rate = rate # store resources self.S_in = simpy.Store(env) self.S_out = simpy.Store(env) # input connection self.input = self.S_in # output (should always be set to "None") self.output = None # instantiating sub-blocks and connecting its outputs to a store resource self.tg = traffic_generator(env, id=id, dest_id=dest_id, flow_id=flow_id, priority=priority,\ adist=adist, sdist=sdist, start_time=start_time, stop_time=stop_time, type=type, data=data) self.tg.output = self.S_out self.ts = traffic_sink(env, id=id, debug=debug) # running concurrent processes self.action = env.process(self.r_out()) self.action = env.process(self.r_in())
def __init__(self, env, qlimit=None): self.env = env # parameters # store resources self.S_st2q_in = simpy.Store(env) self.S_st1q_in = simpy.Store(env) self.S_beq_in = simpy.Store(env) self.S_out = simpy.Store(env) # input connection self.in_st_2q = self.S_st2q_in self.in_st_1q = self.S_st1q_in self.in_be_q = self.S_beq_in # output (should always be set to "None") self.output = None # running concurrent processes self.action = env.process(self.r_st_2()) self.action = env.process(self.r_st_1()) self.action = env.process(self.r_be()) self.action = env.process(self.r_priority_router())
def __init__(self, env, out_stream, param, SiPM_Matrix, COMP): self.env = env self.SiPM_Matrix = SiPM_Matrix self.first_sipm = param.P['TOPOLOGY']['first_sipm'] self.param = param self.out_stream = out_stream self.latency = int(1E9 / param.P['L1']['L1_outrate']) self.fifoA = simpy.Store(self.env, capacity=param.P['L1']['FIFO_L1a_depth']) self.fifoB = simpy.Store(self.env, capacity=param.P['L1']['FIFO_L1b_depth']) self.buffer_A = np.array([]).reshape(0, 6) self.buffer_B = np.array([]).reshape(0, 6) # self.flag = False self.frame_count = 0 self.lostB = 0 self.action1 = env.process(self.PreBUFFER_load()) self.action2 = env.process(self.L1_outlink()) self.process_frames = env.process(self.process_frames_2BUF()) self.act_buffer_proc = env.event() # self.act_ENCODER_proc = env.event() self.flag = simpy.Resource(self.env, capacity=1) # self.flag_ENC = simpy.Resource(self.env,capacity=1) self.logA = np.array([]).reshape(0, 2) self.logB = np.array([]).reshape(0, 2) self.logC = np.array([]).reshape(0, 2) self.n_rows = self.param.P['TOPOLOGY']['n_rows'] self.empty = [{ 'data': [1, -1, -1, 0, 0], 'in_time': -1, 'out_time': 0 }] self.out_array = [self.empty for i in range(2)] self.i_out_array = 0 n_sipms_int = param.P['TOPOLOGY']['sipm_int_row'] * param.P[ 'TOPOLOGY']['n_rows'] n_sipms_ext = param.P['TOPOLOGY']['sipm_ext_row'] * param.P[ 'TOPOLOGY']['n_rows'] self.n_sipms = n_sipms_int + n_sipms_ext # Let's find index of L1_Slice style = param.P['L1']['map_style'] L1_Slice, SiPM_Matrix_I, SiPM_Matrix_O, topology = MAP.SiPM_Mapping( param.P, style) self.L1_id = L1_Slice.index(SiPM_Matrix) print("L1_id is %d" % self.L1_id) self.COMP = COMP kwargs = { 'n_rows': param.P['TOPOLOGY']['n_rows'], 'COMP': COMP, 'TE2': param.P['L1']['TE'], 'n_sensors': 0 } self.compress = ET.encoder_tools(**kwargs)
def __init__(self, env, name, capacity): BaseOperator.__init__(self, env, name) self.capacity = capacity #default parameter values self.delay = 1 self.start_time = 0.5 #A conveyor belt has at-least two stages assert (isinstance(capacity, int) and (capacity >= 2)) # create input and output buffers # corresponding to the first and last stages self.input_buf = simpy.Store(env, capacity=1) self.output_buf = simpy.Store(env, capacity=1) # a list to model stages self.stages = [None for i in range(capacity)] #states self.define_states(["stalled", "moving"]) # start behavior self.process = env.process(self.behavior())
def __init__(self, env, qlimit=None): self.env = env # parameters self.qlimit = qlimit # store resources self.s1 = simpy.Store(env) self.s2 = simpy.Store(env) self.s3 = simpy.Store(env) self.s4 = simpy.Store(env) # input connection self.in_rx = self.s1 self.in_tx = self.s4 # output (should always be set to "None") self.out_rx = None self.out_tx = None # instantiating sub-blocks and its outputs self.obj_rx = rx(env) self.obj_rx.output = self.s2 self.obj_tx = tx(env, self.qlimit) self.obj_tx.output = self.s3 # instantiating a packet object self.pkt = None # running concurrent processes self.action = env.process(self.p_1()) self.action = env.process(self.p_2()) self.action = env.process(self.p_3()) self.action = env.process(self.p_4())
def __init__(self, env, gid=None, qlimit=None): self.env = env # parameters self.gid = gid # component-id self.qlimit = qlimit # queue limit in packets # variables self.priority = None self.var_gce = None # store resources self.S_in = simpy.Store(env) self.S_q = simpy.Store(env) self.S_gce = simpy.Store(env) self.S_tcs = simpy.Store(env) self.S_gcs = simpy.Store(env) # input connection self.input = self.S_in self.gce = self.S_gce self.tcs = self.S_tcs # output (should always be set to "None") self.output = None self.pas = None # running concurrent processes self.action = env.process(self.r_q()) self.action = env.process(self.r_tr()) self.action = env.process(self.r_gce())
def __init__(self, sim_env, bus_id, data_rate, avg_ecu_dist=2): ''' Constructor Input: sim_env simpy.Environment environment in which this Bus acts bus_id string id of this Bus object data_rate float datarate of this bus avg_ecu_dist float average distance between two connected ECUs Output: - ''' AbstractCANBus.__init__(self, sim_env, bus_id, data_rate, avg_ecu_dist) # bus objects self.current_message = None # current message on the bus [sender_ecu, message] self.set_settings() self.monitor_list = RefList() self._used_prop_times = {} self.gateways = [] self.first = True # synchronization objects self.pot_messages = [] # gathers all potential messages that want to be sent at a certain point in time self.sync_1 = simpy.Store(self.sim_env, capacity=1) # if the decision, who is allowed to sent is done this synchronizer starts the transmission self.sync_2 = simpy.Store(self.sim_env, capacity=1) # if store is empty then the channel is busy self.sync_send = simpy.Store(self.sim_env, capacity=1) # store that is full if sending and free else self.subscribers = 0 # number of ECUs waiting for the channel to be freed # project parameters self.SCB_GATHER_MSGS = time.SCB_GATHER_MSGS self.SCB_GRAB_PRIO_MSG = time.SCB_GRAB_PRIO_MSG self.SCB_PROPAGATION_DELAY = time.SCB_PROPAGATION_DELAY self.SCB_SENDING_TIME = time.SCB_SENDING_TIME self.SCB_WRITE_TO_TRANSCEIVER_BUFFER = time.SCB_WRITE_TO_TRANSCEIVER_BUFFER
def __init__(self, env, gid=None, qlimit=None): self.env = env # parameters self.gid = gid # component-id self.qlimit = qlimit # queue limit in packets # variables self.gate_signal = None # store resources self.S_in = simpy.Store(env) self.S_gc = simpy.Store(env) self.S_ge = simpy.Store(env) self.S_q = simpy.Store(env) # input connection self.input = self.S_in self.in_gcl = self.S_gc # output (should always be set to "None") self.output = None # running concurrent processes self.action = env.process(self.r_in()) self.action = env.process(self.r_out()) self.action = env.process(self.r_gc())
def __init__(self, env, gid=None, qlimit='inf'): self.env = env # parameters self.gid = gid # component-id self.qlimit = qlimit # on packet # variables self.gate_en = None # store resources self.S_in = simpy.Store(env) self.S_gcl = simpy.Store(env) self.S_event_gate_enable = simpy.Store(env, capacity=1) # input connection self.input = self.S_in self.in_gcl = self.S_gcl # output (should always be set to "None") self.output = None # running concurrent processes self.action = env.process(self.r_in_gcl()) self.action = env.process(self.r_transfer())
def make_tree(self, shape, parent, max_node_size): """ Recursive function to make scheduling tree """ r_in_pipe = simpy.Store(self.env) r_out_pipe = simpy.Store(self.env) w_in_pipe = simpy.Store(self.env) w_out_pipe = simpy.Store(self.env) if (type(shape) == int): # base case children = [] ID = shape node = Scheduling_tree_node(self.env, self.period, ID, r_in_pipe, r_out_pipe, w_in_pipe, w_out_pipe, children, parent, max_node_size) elif (type(shape) == dict): keys = shape.keys() vals = shape.values() if len(keys) != 1 or type(keys[0]) != int or type(vals[0]) != list: # must be exactly one integer key with a list value print >> sys.stderr, "ERROR: incorrct format of shape: {}".format(shape) sys.exit(1) ID = keys[0] node = Scheduling_tree_node(self.env, self.period, ID, r_in_pipe, r_out_pipe, w_in_pipe, w_out_pipe, [], parent, max_node_size) children = [] for child in vals[0]: child_node = self.make_tree(child, node) children.append(child_node) node.children = children else: print >> sys.stderr, "ERROR: incorrct format of shape: {}".format(shape) sys.exit(1) self.nodes[ID] = node return node
def __init__(self, env, fwd_tbl=None): self.env = env # class parameters self.fwd_tbl = fwd_tbl # store resources self.s1 = simpy.Store(env) self.s2 = simpy.Store(env) self.s3 = simpy.Store(env) self.s4 = simpy.Store(env) self.s5 = simpy.Store(env) # input connection's to store resources self.in_1 = self.s1 self.in_2 = self.s2 self.in_3 = self.s3 self.in_4 = self.s4 # output (should always be set to "None") self.out_1 = None self.out_2 = None self.out_3 = None self.out_4 = None # instantiating sub-blocks and its outputs # running concurrent processes self.action = env.process(self.p_1()) self.action = env.process(self.p_2()) self.action = env.process(self.p_3()) self.action = env.process(self.p_4()) self.action = env.process(self.p_5())
def __init__(self, env): self.env = env self.upstream = simpy.Store(env) # upstream chanel self.downstream = [] # downstream chanel #create downstream splitter for i in range(NUMBER_OF_ONUs): self.downstream.append(simpy.Store(env))
def __init__(self, env, name, ip, rate, flows, gateway, packet_size=1024, debug=False): self.out_store = simpy.Store(env) self.in_store = simpy.Store(env) self.rate = rate self.env = env self.name = name self.ip = ip self.gateway = gateway self.n_flows = flows self.out_ports = [None for i in range(flows)] self.out_port = None self.in_port = None self.packets_rec = 0 self.packets_drop = 0 self.packet_size = packet_size self.debug = debug self.busy = 0 # Used to track if a packet is currently being sent self.send_action = env.process( self.send()) # starts the run() method as a SimPy process self.recv_action = env.process( self.receive()) # starts the run() method as a SimPy process
def __init__(self, id, env, network): BaseObject.__init__(self, id) Runnable.__init__(self, env) Loggable.__init__(self) self._network = network self._id = id self._neighbors = {} self._data = simpy.Store(env) self._msg = simpy.Store(env)
def __init__(self, env, type, graph, vpon_scheduling, vpon_remove): self.env = env self.requests = simpy.Store(self.env) self.departs = simpy.Store(self.env) self.action = self.env.process(self.run()) self.deallocation = self.env.process(self.depart_request()) self.type = type self.graph = graph self.vpon_scheduling = vpon_scheduling self.vpon_remove = vpon_remove
def __init__(self, env, period): self.env = env self.period = period self.sw_ready_out_pipe = simpy.Store(env) self.sw_pkt_in_pipe = simpy.Store(env) self.sw_pkt_out_pipe = simpy.Store(env) self.start_dequeue_pipe = simpy.Store(env) self.input_done = False self.input_pkts = []
def __init__(self, env, name): self.name = name self.env = env self.antId = random.randrange(9999999) self.interface = None self.store = simpy.Store(env) # The queue of pkts in the internal process self.names = simpy.Store(env) # The queue of pkts in the internal process self.action = env.process(self.run()) # starts the run() method as a SimPy process self.action2 = env.process(self.listen()) # starts the run() method as a SimPy process self.receivedPackets = list()
def __init__(self, env, period): super(Packet_storage_tb, self).__init__(env, period) self.ptr_in_pipe = simpy.Store(env) self.ptr_out_pipe = simpy.Store(env) self.pkt_in_pipe = simpy.Store(env) self.pkt_out_pipe = simpy.Store(env) self.ps = Pkt_storage(env, period, self.pkt_in_pipe, self.pkt_out_pipe, self.ptr_in_pipe, self.ptr_out_pipe) self.run()
def __init__(self, env, name, delay=0, size=0): self.name = name self.env = env self.delay = delay self.size = 0 if self.size != 0: self.store = simpy.Store(self.env, capacity=self.size) else: self.store = simpy.Store(self.env) self.receive = self.env.process(self.receive()) self.port_in = None self.port_out = None
def __init__(self, env, period, ready_out_pipe, pkt_in_pipe, pkt_out_pipe, start_dequeue_pipe, sched_tree_shape, sched_alg, istate=None, sched_node_size=None): super(Switch, self).__init__(env, period) self.ready_out_pipe = ready_out_pipe self.pkt_in_pipe = pkt_in_pipe self.pkt_out_pipe = pkt_out_pipe self.start_dequeue_pipe = start_dequeue_pipe ingress_tm_ready_pipe = simpy.Store(env) ingress_tm_pkt_pipe = simpy.Store(env) tm_egress_ready_pipe = simpy.Store(env) tm_egress_pkt_pipe = simpy.Store(env) ingress_egress_pipe = simpy.Store(env) if sched_alg == "Invert_pkts": self.global_state = None elif sched_alg == "STFQ": self.global_state = STFQ_global_state() elif sched_alg == "HSTFQ": self.global_state = HSTFQ_global_state() elif sched_alg == "MinRate": self.global_state = None elif sched_alg == "RR": self.global_state = None elif sched_alg == "WRR": self.global_state = None elif sched_alg == "Strict": self.global_state = None self.ingress = IngressPipe(env, period, ingress_tm_ready_pipe, self.pkt_in_pipe, ingress_tm_pkt_pipe, self.global_state, sched_alg, istate) self.tm = Scheduling_tree(env, period, ingress_tm_ready_pipe, tm_egress_ready_pipe, ingress_tm_pkt_pipe, tm_egress_pkt_pipe, sched_tree_shape, max_node_size=sched_node_size) self.egress = EgressPipe(env, period, tm_egress_ready_pipe, self.ready_out_pipe, tm_egress_pkt_pipe, self.pkt_out_pipe, self.start_dequeue_pipe, self.global_state, sched_alg)
def network_setup(realtime=False): import simpy if not realtime: env = simpy.Environment() else: env = simpy.RealtimeEnvironment(strict=False, factor=2) available_links = simpy.FilterStore(env) pending_messages = simpy.FilterStore(env) send_messages = simpy.Store(env) log = simpy.Store(env) return Network(env, available_links, pending_messages, send_messages, log)
def setUp(self): env = simpy.Environment() dispatch_q, notify_q = simpy.Store(env), simpy.Store(env) rnd = np.random hosts = [ Host(env, str(i), rnd.randint(1, 10), rnd.randint(4096, 10240), rnd.randint(10240, 102400), rnd.randint(1, 10)) for i in range(10) ] self.env = env self.cluster = Cluster(env, dispatch_q, notify_q, hosts) self.scheduler = MockScheduler(env, dispatch_q, notify_q)
def __init__(self, period, delta, jitter1, jitter2, verbose=False): self.env = simpy.Environment() self.period = period self.delta = delta self.jitter1 = jitter1 self.jitter2 = jitter2 self.ro2_bc_pipe = BroadcastPipe(self.env) self.beat_bc_pipe = BroadcastPipe(self.env) self.counter_pipe = simpy.Store(self.env) self.q_pipe = simpy.Store(self.env) self.out_pipe = simpy.Store(self.env) self.ro1 = RO(self.env, self.period, self.jitter1, None, verbose=verbose) self.ro2 = RO( self.env, self.period + self.delta, self.jitter2, self.ro2_bc_pipe, verbose=verbose, ) self.beat_ff = FF( self.env, self.ro2_bc_pipe.get_output_conn(), self.ro1, self.beat_bc_pipe, verbose=verbose, ) self.counter = Counter( self.env, self.ro2_bc_pipe.get_output_conn(), self.beat_ff, self.q_pipe, verbose=verbose, ) self.rand_out = RandOut(self.env, self.beat_bc_pipe.get_output_conn(), self.counter, verbose=verbose) self.ro1_proc = self.env.process(self.ro1.run()) self.ro2_proc = self.env.process(self.ro2.run()) self.beat_ff_proc = self.env.process(self.beat_ff.run()) self.counter_proc = self.env.process(self.counter.run()) self.rand_out_proc = self.env.process(self.rand_out.run()) self.data = []
def __init__(self, network, processor): self.processing_queue = simpy.Store(network.env) self.network = network self.env = network.env self.processor = processor self.address = None self.methods = { 'on_message': [], 'on_connect': [], 'on_advertise': [], 'on_disconnect': [] } self.keep_alive_interval = 20 self.async_calls = simpy.Store(network.env)
def __init__(self, env, name='Operator', ships=None, n_margin=0): super().__init__(env=env) self.name = name if not ships: raise ValueError("Non empty list of ships is required") self.fleet = simpy.Store(env, capacity=len(ships)) # create list of tasks, any ship can pick them up self.tasks = simpy.Store(env, capacity=len(ships)) for ship in ships: self.fleet.put(ship) # number of extra tasks to send as a safety margin # TODO: implement ship -> operator communication for replanning self.n_margin = n_margin