def start_simulation(self,
                         order_list,
                         num_robots,
                         render_=True,
                         log_=True,
                         print_=True):

        # Define simulation environment
        env = simpy.Environment()

        # Define knowledge base (SQL database)
        kb = self.define_knowledge_base(env)

        # Define communication channel between FleetManager and AGVs (These are just the virtual IP adresses)
        fm_to_agv_comm = dict()
        [
            fm_to_agv_comm.update({i + 1: simpy.FilterStore(env)})
            for i in range(num_robots)
        ]

        # Define communication channel between AGVs and fleet manager (These are just the virtual IP adresses)
        agv_to_fm_comm = simpy.FilterStore(env)

        # Define MES
        mes = MES(env, kb, order_list, print_)

        # Define Fleet Manger / Central Auctioneer
        FleetManager(env, kb, fm_to_agv_comm, agv_to_fm_comm, print_)

        # Define AGVs
        for ID in range(num_robots):
            agv_params = {
                'ID': ID + 1,
                'robot_speed': self.robot_speed,
                'task_execution_time': self.task_execution_time,
                'start_location': self.start_locations[ID % 3],
                'depot_locations': self.depot_locations,
                'battery_threshold': self.battery_threshold,
                'collision_threshold': self.collision_threshold,
                'max_charging_time': self.max_charging_time,
                'max_tasks_in_task_list': self.max_tasks_in_task_list,
                'epsilon': self.epsilon,
                'initial_resources': self.initial_resources
            }
            AGV(env, agv_params, kb, fm_to_agv_comm[ID + 1], agv_to_fm_comm,
                print_)

        # Define logger
        if log_:
            Logger(env, kb, print_)

        # Define online renderer
        if render_:
            RendererOnline(env, kb, self.depot_locations,
                           self.charge_locations, print_)

        # Run environment untill all tasks are executed
        env.run(until=mes.main)
        simulation_time = env.now - 1  # Correction for last time step

        # Print simulation duration
        with open("../logfiles/simulation_information.txt", "w") as file:
            file.write(str(num_robots) + '\n')
            file.write(str(simulation_time) + '\n')

        # Get simulation summary
        travel_cost = sum(get_travel_cost_per_robot())
        charging_cost = sum(get_charging_cost_per_robot())
        congestions = sum(get_congestions_per_robot()) / 2

        return simulation_time, travel_cost, charging_cost, congestions
Пример #2
0
        while True:
            print('Parked at %d' % self.env.now)
            parking_time = 5

            # process() returns to wait for parking_time
            yield self.env.timeout(parking_time)

            # after the timeout the code return here
            print('Start driving at %d' % self.env.now)

            trip_duration = 2
            yield self.env.timeout(trip_duration)
            # after driving the code will return here


env = simpy.Environment(initial_time=2)
Car(env)
env.run(until=15)

#-------------------------------------------------------------------------------

exit(0)
# Behind the scenes
coroutine


class Environment(BaseEnvironment):
    def __init__(self, initial_time=0):
        self._now = initial_time
        self._queue = []  # The list of all currently scheduled events.
        self._eid = count()  # Counter for event IDs
Пример #3
0
        self.action = env.process(self.run())

    def run(self):
        while True:
            print('Start parking and charging at %d' % self.env.now)
            charge_duration = 5
            # We yield the process that process() returns
            # to wait for it to finish
            yield self.env.process(self.charge(charge_duration))

            # The charge process has finished and
            # we can start driving again.
            print('Start driving at %d' % self.env.now)
            trip_duration = 2
            yield self.env.timeout(trip_duration)

    def charge(self, duration):
        yield self.env.timeout(duration)


# Run the process
env = simpy.Environment()

# https://simpy.readthedocs.io/en/latest/api_reference/simpy.core.html#simpy.core.Environment
# Create a new Process instance for generator
# Note that the generator is created and set during class initialization
car = Car(env)

# https://simpy.readthedocs.io/en/latest/api_reference/simpy.core.html#simpy.core.Environment
# Executes step() until the given criterion until is met.
env.run(until=15)
Пример #4
0
def run_model(input_abstraction):

    import simpy
    import numpy.random as random
    import numpy as np
    import json
    import pickle
    import os

    from agile_team_model import plots
    from agile_team_model import globals
    # Initialise global variables
    globals.initialise_variables()

    size_ba, size_data_ba, size_data_eng, size_qa = input_abstraction

    # Hard codes as in main.py
    path_queue = "./team_queue/"
    path_epic = "./epic_progress/"

    # If not made, create
    if not os.path.exists(path_queue):
        os.makedirs(path_queue)

    if not os.path.exists(path_epic):
        os.makedirs(path_epic)

    class Team(simpy.Resource):
        """A team consistents of a number of workers (``` num_workers ```). 
        Additional workers aid the speed with which epics are completed. 
        
        Each worker has a work rate (``` work_rate ```) which is the rate
        at which tickets can be completed by that team. 
        
        The total work rate for each team is calculated from 
        `` num_workers ``` *  ``` work_rate ``` 

        Tickets are request time from workers. When time is found, they
        can start the processes and wait for it to finish (which
        takes ``PROCESS_TIME`` units).

        """
        def __init__(self, env, num_workers, work_rate, name):
            self.env = env
            # We consider the team a resource,
            # each worker is not a resource otherwise:
            #self.workers = simpy.Resource(env, num_workers)
            super().__init__(env)
            self.queue_length = []
            self.team_capacity = []
            self.num_workers = num_workers
            self.work_rate = work_rate
            self.name = name

        def process(self, ticket, epic_size):
            """The processes. It takes a ``ticket`` and processes it"""
            process_time = epic_size / (self.num_workers * self.work_rate)
            yield self.env.timeout(process_time)

        def request(self, *args, **kwargs):
            self.team_capacity.append((self._env.now, self.capacity))
            self.queue_length.append((self._env.now, len(self.queue)))
            return super().request(*args, **kwargs)

        def release(self, *args, **kwargs):
            self.team_capacity.append((self._env.now, self.capacity))
            self.queue_length.append((self._env.now, len(self.queue)))
            return super().release(*args, **kwargs)

    def epic(env, name, epic_size, team_list, data_dict):
        """Each epic has a ``name`` and a size ```epic_size```
        it arrives at a processor ```team``` 

        It then starts the process, waits for it to finish then is passed
        on to the next processor until completed 
        """

        # Settup a dictionary to aggregate data
        if name not in data_dict:
            data_dict[name] = {}
            data_dict[name]['arrives'] = []
            data_dict[name]['enters'] = []
            data_dict[name]['leaves'] = []

        for team in team_list:
            data_dict[name]['arrives'].append((team.name, env.now))
            #print('%s arrives at the %s at time %.2f' % (name, team.name, env.now))
            with team.request() as req:

                yield req
                data_dict[name]['enters'].append((team.name, env.now))
                #print('%s enters the %s at time %.2f' % (name, team.name, env.now))

                yield env.process(team.process(name, epic_size))
                data_dict[name]['leaves'].append((team.name, env.now))
                #print('%s leaves the %s at %.2f' % (name, team.name, env.now))


    data_headers = ['name', 'arrives_ba', 'arrives_data_ba', \
                    'arrives_data_eng', 'arrives_qa', 'arrives_handover']

    # Unsure how else to access these variables (use global for now)

    data_dict = {}

    # Setup and start the simulation
    print('Process Flow')

    # Initialise environmnet
    env = simpy.Environment()
    """
    Need to setup our flow 
    1) define teams
    team --> env, num_workers, work_rate (in epics per week), name 
    Start with a more or less balanced team (1 epic per 2 weeks / sprint) 
    """

    ba = Team(env, size_ba, 0.5, 'BA team')  # 1 epic per 2 weeks
    data_ba = Team(env, size_data_ba, 0.2,
                   'Data BA team')  # 1 epic per 2 weeks
    data_engineering = Team(env, size_data_eng, 0.33,
                            'Data Engineering team')  # 1 epic per 1 week
    qa = Team(env, size_qa, 0.5, 'Data QA team')  # 1 epic per 1 week

    # 2) Define the order of the teams
    team_list = [ba, data_ba, data_engineering, qa, data_ba]

    # 3) Create backlog
    epic_list = []

    for i in range(globals.amount_of_epics):
        # Could make epics random lengths (or not!)
        # epic_size = random.uniform(1,6)
        epic_size = 2
        epic_list.append(epic_size)
        env.process(epic(env, 'Epic %d' % i, epic_size, team_list, data_dict))

    # Define length of simulation
    # Length is not important -- simulation will stop when backlog is empty
    years = 3

    # Execute!
    env.run(until=years * 52)

    # Save data out?

    # Calculate cost
    # Find time when team is finished
    last_epic = sorted(list(data_dict.keys()))[-1]
    team, last_time = map(list, zip(*data_dict[last_epic]['leaves']))
    last_time = max(last_time)

    team_size = np.sum([
        ba.num_workers, data_ba.num_workers, data_engineering.num_workers,
        qa.num_workers
    ])

    cost = team_size * last_time * 5 * 1000

    # minimise queue lengths -- maximise value?
    total_queue = []
    for team in team_list:
        team, queue = map(list, zip(*team.queue_length))
        total_queue.append(np.sum(queue))
    total_queue = np.sum(total_queue)

    cost_per_epic = cost / len(data_dict.keys())
    queue_penality = total_queue * 100

    loss_function = cost_per_epic + queue_penality

    print('%s epics were processed' %
          (int(''.join(filter(str.isdigit, last_epic))) + 1))
    print('The final time is: %.2d weeks' % last_time)
    print('The cost per epic is: £%.2d K' % (cost_per_epic / 1000))
    print('The queue penality is: £%.2d K' % (queue_penality / 1000))
    print('The loss function is: £%.2d K' % (loss_function / 1000))
    print('\n')
    print('The team size is: %s' % team_size)
    print('There are %s BAs' % ba.num_workers)
    print('There are %s Data BAs' % data_ba.num_workers)
    print('There are %s Data Engineers' % data_engineering.num_workers)
    print('There are %s Data QAs' % qa.num_workers)

    globals.g_cost_per_epic.append(cost_per_epic / 1000)
    globals.g_final_time.append(last_time)
    globals.g_team_size.append(team_size)
    globals.g_amount_ba.append(ba.num_workers)
    globals.g_amount_dba.append(data_ba.num_workers)
    globals.g_amount_data_eng.append(data_engineering.num_workers)
    globals.g_amount_qa.append(qa.num_workers)

    # Plots:
    plots.plot_queue_len(data_dict, team_list, ba, data_ba, data_engineering,
                         qa, path_queue)
    plots.plot_epic_progress(data_dict, ba, data_ba, data_engineering, qa,
                             path_epic)

    return loss_function
Пример #5
0
def simulate_scenario(nrNodes):
    env = simpy.Environment()

    def checkcollision(packet):
        col = 0  # flag needed since there might be several collisions for packet
        processing = 0
        #print ("MAX RECEIVE IS: ", maxBSReceives)
        for i in range(0, len(packetsAtBS)):
            if packetsAtBS[i].packet.processed == 1:
                processing = processing + 1
        if (processing > maxBSReceives):
            print(
                "{:3.5f} || Too much packets on Base Sattion.. Packet will be lost!",
                len(packetsAtBS))
            packet.processed = 0
        else:
            packet.processed = 1

        if packetsAtBS:
            print(
                "{:3.5f} || >> FOUND overlap... node {} (sf:{} bw:{} freq:{}) others: {}"
                .format(env.now, packet.nodeid, packet.sf, packet.bw,
                        packet.freq, len(packetsAtBS)))
            for other in packetsAtBS:
                if other.nodeid != packet.nodeid:
                    print(
                        "{:3.5f} || >> node {} overlapped with node {} (sf:{} bw:{} freq:{}). Let's check Freq..."
                        .format(env.now, packet.nodeid, other.nodeid,
                                other.packet.sf, other.packet.bw,
                                other.packet.freq))
                    # simple collision
                    #if frequencyCollision(packet, other.packet) and sfCollision(packet, other.packet):
                    if frequencyCollision(packet,
                                          other.packet) and sfCollision(
                                              packet, other.packet):
                        # =============================================================================
                        #                     if timingCollision(packet, other.packet):
                        #                        # check who collides in the power domain
                        #                        c = powerCollision(packet, other.packet)
                        #                        # mark all the collided packets
                        #                        # either this one, the other one, or both
                        #                        for p in c:
                        #                            p.collided = 1
                        #                            if p == packet:
                        #                                col = 1
                        # =============================================================================

                        packet.collided = 1
                        other.packet.collided = 1  # other also got lost, if it wasn't lost already
                        col = 1

            return col
        return 0

    ###frequencyCollision, CONDITIONS###

    ##|f1-f2| <= 120 kHz if f1 or f2 has bw 500
    ##|f1-f2| <= 60 kHz if f1 or f2 has bw 250
    ##|f1-f2| <= 30 kHz if f1 or f2 has bw 125
    def frequencyCollision(p1, p2):
        if (abs(p1.freq - p2.freq) <= 120
                and (p1.bw == 500 or p2.freq == 500)):
            print(
                "{:3.5f} || >> freq coll on node {} and node {}.. Let's check SF..."
                .format(env.now, p1.nodeid, p2.nodeid))
            return True
        elif (abs(p1.freq - p2.freq) <= 60
              and (p1.bw == 250 or p2.freq == 250)):
            print(
                "{:3.5f} || >> freq coll on node {} and node {}.. Let's check SF..."
                .format(env.now, p1.nodeid, p2.nodeid))
            return True
        else:
            if (abs(p1.freq - p2.freq) <= 30):
                print(
                    "{:3.5f} || >> Freq coll on node {} and node {}.. Let's check SF..."
                    .format(env.now, p1.nodeid, p2.nodeid))
                return True
            #else:
        print("{:3.5f} || >> No frequency collision..".format(env.now))
        return False

    #FOLLOWING FUNCTION NOT USED
    def channelCollision(p1, p2):
        if (p1.ch == p2.ch):
            print(
                "{:3.5f} || >> channel coll for ch {} on node {} and ch {} on node {}.. Let's check SF..."
                .format(env.now, p1.ch, p1.nodeid, p2.ch, p2.nodeid))
            return True
        else:
            print("{:3.5f} || >> No channel collision..".format(env.now))
            return False

    def sfCollision(p1, p2):
        if p1.sf == p2.sf:
            print(
                "{:3.5f} || >> COLLISION! SF coll on node {} and node {} (ie same SF)..."
                .format(env.now, p1.nodeid, p2.nodeid))
            # p2 may have been lost too, will be marked by other checks
            return True
        print("{:3.5f} || >> No SF Collision!".format(env.now))
        return False

    def timingCollision(p1, p2):
        # assuming p1 is the freshly arrived packet and this is the last check
        # we've already determined that p1 is a weak packet, so the only
        # way we can win is by being late enough (only the first n - 5 preamble symbols overlap)

        # assuming 8 preamble symbols
        Npream = 8

        # we can lose at most (Npream - 5) * Tsym of our preamble
        Tpreamb = 2**p1.sf / (1.0 * p1.bw) * (Npream - 5)

        # check whether p2 ends in p1's critical section
        p2_end = p2.addTime + p2.rectime
        p1_cs = env.now + (Tpreamb / 1000.0)  # to sec
        ##print ("{} || >> collision timing node {} ({},{},{}) node {} ({},{})".format(env.now,p1.nodeid, env.now - env.now, p1_cs - env.now, p1.rectime,p2.nodeid, p2.addTime - env.now, p2_end - env.now))
        if p1_cs < p2_end:
            # p1 collided with p2 and lost
            print("{:3.5f} || not late enough.. Timing collision...".format(
                env.now))
            return True
        print("{:3.5f} || Saved by the preamble.. No timing collision!".format(
            env.now))
        return False

    def powerCollision(p1, p2):
        powerThreshold = 6  # dB
        print(
            "{:3.5f} || power: node {} {:3.2f} dBm, node {} {:3.2f}; diff is {}dBm"
            .format(
                env.now, p1.nodeid, p1.rssi[math.ceil(env.now)], p2.nodeid,
                p2.rssi[math.ceil(env.now)],
                round(
                    p1.rssi[math.ceil(env.now)] - p2.rssi[math.ceil(env.now)],
                    2)))
        #print ("pwr: node {0.nodeid} {0.rssi:3.2f} dBm node {1.nodeid} {1.rssi:3.2f} dBm; diff {2:3.2f} dBm".format(p1, p2, round(p1.rssi[math.ceil(env.now)] - p2.rssi[math.ceil(env.now)],2)))
        if abs(p1.rssi[math.ceil(env.now)] -
               p2.rssi[math.ceil(env.now)]) < powerThreshold:
            print("{:3.5f} || Collision power both node {} and node {}".format(
                env.now, p1.nodeid, p2.nodeid))
            # packets are too close to each other, both collide
            # return both packets as casualties
            return (p1, p2)
        elif p1.rssi[math.ceil(env.now)] - p2.rssi[math.ceil(
                env.now)] < powerThreshold:
            # p2 overpowered p1, return p1 as casualty
            print("{:3.5f} || Collision pwr node {} has overpowered node {}".
                  format(env.now, p2.nodeid, p1.nodeid))
            return (p1, )
        print("{:3.5f} || p1 wins, p2 lost".format(env.now))
        # p2 was the weaker packet, return it as a casualty
        return (p2, )

    class myNode():
        def __init__(self, nodeid, bs, avgSendTime, packetlen, total_data):
            global channel
            self.nodeid = nodeid
            self.avgSendTime = avgSendTime
            self.bs = bs
            self.dist = distance[nodeid, :]
            self.mindist = np.amin(distance[nodeid, :])
            self.mindist_pos = int(
                np.where(
                    distance[nodeid, :] == np.amin(distance[nodeid, :]))[0])
            #print('node %d' %nodeid, "dist: ", self.dist[0])
            self.buffer = total_data
            self.packetlen = packetlen
            #self.ch = int(random.choice(channel))
            self.packet = myPacket(self.nodeid, packetlen, self.dist)
            self.sent = 0  #INITIAL SENT PACKETS
            self.totalLost = 0  #INITIAL TOTAL LOST FOR PARTICULAR NODE
            self.totalColl = 0
            self.totalRec = 0
            self.totalProc = 0

    class myPacket():
        def __init__(self, nodeid, packetlen, dist):
            #global experiment
            global Ptx
            global Prx
            #global gamma
            #global d0
            #global var
            global Lpl
            #global freq
            #global GL
            global c
            global distance
            global channel
            global frequency
            SF = [7, 8, 9, 10, 11, 12]

            self.nodeid = nodeid
            self.txpow = Ptx
            self.sf = random.choice(SF)
            self.cr = 1  ##CODING RATE
            self.bw = 125
            # for experiment 3 find the best setting
            # OBS, some hardcoded values
            #Prx = self.txpow  ## zero path loss by default
            #Prx = self.txpow + G_device + G_sat - Lpl_node

            # transmission range, needs update XXX
            self.transRange = 150
            self.pl = packetlen
            self.symTime = (2.0**self.sf) / self.bw
            self.arriveTime = 0
            self.rssi = Prx[nodeid, :]
            self.freq = int(random.choice(frequency))

            #self.ch = int(random.choice(channel))
            # frequencies: lower bound + number of 61 Hz steps
            #self.freq = 860000000 + random.randint(0,2622950)

            # for certain experiments override these and
            # choose some random frequences
            #print ("frequency" ,self.freq, "symTime ", self.symTime)
            #print ("bw", self.bw, "sf", self.sf, "cr", self.cr, "rssi", self.rssi)

            self.rectime = airtime(
                self.sf, self.cr, self.pl,
                self.bw)  ##RECTIME IS THE RECEPTION TIME (ie AIRTIME)
            self.proptime = distance[nodeid, :] * (1 / c)
            #print ("rectime node ", self.nodeid, "  ", self.rectime)
            #print ("Airtime for node {} is {} [seconds]".format(self.nodeid,self.rectime)) #from https://www.loratools.nl/#/airtime
            # denote if packet is collided
            self.collided = 0
            self.processed = 0
            self.lost = bool

    def airtime(sf, cr, pl, bw):
        H = 0  # implicit header disabled (H=0) or not (H=1)
        DE = 0  # low data rate optimization enabled (=1) or not (=0)
        Npream = 8  # number of preamble symbol (12.25  from Utz paper)

        if bw == 125 and sf in [11, 12]:
            # low data rate optimization mandated for BW125 with SF11 and SF12
            DE = 1
        if sf == 6:
            # can only have implicit header with SF6
            H = 1

        Tsym = (2.0**sf) / bw
        Tpream = (Npream + 4.25) * Tsym
        #print ("PARAMS FOR TRANSMISION: sf", sf, " cr", cr, "pl", pl, "bw", bw)
        payloadSymbNB = 8 + max(
            math.ceil((8.0 * pl - 4.0 * sf + 28 + 16 - 20 * H) /
                      (4.0 * (sf - 2 * DE))) * (cr + 4), 0)
        Tpayload = payloadSymbNB * Tsym
        return ((Tpream + Tpayload) / 1000)  ##IN SECS

    def transmit(env, node):
        #while nodes[node.nodeid].buffer > 0.0:
        global wait_min
        global wait_max
        global back_off
        global beacon_time
        while node.buffer > 0.0:
            #node.packet.sf = 12
            yield env.timeout(node.packet.rectime +
                              float(node.packet.proptime[math.ceil(
                                  env.now)]))  ##GIVE TIME TO RECEIVE BEACON

            if node in packetsAtBS:
                print("{:3.5f} || ERROR: packet is already in...".format(
                    env.now))
            else:
                sensibility = sensi[12 - 7,
                                    [125, 250, 500].index(node.packet.bw) + 1]
                if node.packet.rssi[math.ceil(
                        env.now
                )] < sensibility:  #HERE WE ARE CONSIDERING RSSI AT TIME ENV.NOW
                    print("{:3.5f} || Node {}: Can not reach beacon due Lpl".
                          format(env.now, node.nodeid))
                    wait = 0  ##LETS WAIT FOR NEXT BEACON
                    node.packet.lost = False
                    trySend = False

                else:
                    wait = random.uniform(
                        0, back_off - node.packet.rectime -
                        float(node.packet.proptime[math.ceil(
                            env.now)]))  ##TRIGGER BACK-OFF TIME
                    yield env.timeout(wait)
                    print("{:3.5f} || Node {} begins to transmit a packet".
                          format(env.now, node.nodeid))
                    trySend = True
                    node.sent = node.sent + 1
                    node.buffer = node.buffer - node.packetlen
                    if node in packetsAtBS:
                        print("{} || ERROR: packet is already in...".format(
                            env.now))
                    else:
                        sensibility = sensi[
                            node.packet.sf - 7,
                            [125, 250, 500].index(node.packet.bw) + 1]
                        if node.packet.rssi[math.ceil(
                                env.now
                        )] < sensibility:  #HERE WE ARE CONSIDERING RSSI AT TIME ENV.NOW
                            print(
                                "{:3.5f} || Node {}: The Packet will be Lost due Lpl"
                                .format(env.now, node.nodeid))
                            node.packet.lost = True  ## LOST ONLY CONSIDERING Lpl
                        else:
                            node.packet.lost = False  ## LOST ONLY CONSIDERING Lpl
                            print("{:3.5f} || Prx for node {} is {:3.2f} dB".
                                  format(env.now, node.nodeid,
                                         node.packet.rssi[math.ceil(env.now)]))
                            #print ("Prx for node",node.nodeid, "is: ",node.packet.rssi[math.ceil(env.now)],"at time",env.now)
                            print(
                                "{:3.5f} || Let's try if there are collisions..."
                                .format(env.now))
                            if (checkcollision(node.packet) == 1):
                                node.packet.collided = 1
                            else:
                                node.packet.collided = 0
                                print("{:3.5f} || ...No Collision by now!".
                                      format(env.now))
                            packetsAtBS.append(node)
                            node.packet.addTime = env.now
                            yield env.timeout(node.packet.rectime)

            if node.packet.lost:
                global nrLost
                nrLost += 1
                node.totalLost += 1  #ONLY DUE Lpl
            if node.packet.collided == 1:
                global nrCollisions
                nrCollisions = nrCollisions + 1
                node.totalColl += 1

            if node.packet.collided == 0 and node.packet.processed == 1 and not node.packet.lost and trySend:
                global nrReceived
                nrReceived = nrReceived + 1
                node.totalRec += 1
            if node.packet.processed == 1:
                global nrProcessed
                nrProcessed = nrProcessed + 1
                node.totalProc += 1
            # complete packet has been received by base station
            # Let's remove from Base Station
            if (node in packetsAtBS):
                packetsAtBS.remove(node)
                # reset the packet
            node.packet.collided = 0
            node.packet.processed = 0
            node.packet.lost = False
            #node.packet.sf = 12

            #yield env.timeout(beacon_time-wait-node.packet.rectime)
            if trySend:
                yield env.timeout(beacon_time - wait - 2 * node.packet.rectime)
            else:
                yield env.timeout(beacon_time - wait - node.packet.rectime)

    def beacon(env):
        global beacon_time
        i = 0
        while True:
            if i == 0:
                yield env.timeout(0)
            else:
                yield env.timeout(beacon_time)
            i = i + 1
            print(
                "{:3.5f} || ***A new beacon has been sended from Satellite***".
                format(env.now))

    env.process(beacon(env))  ##BEACON SENDER

    ### THIS IS GOING TO CREATE NODES AND DO TRAMSMISIONS. IS THE MAIN PROGRAM ###
    for i in range(nrNodes):
        node = myNode(i, bsId, avgSendTime, packetlen, total_data)
        nodes.append(node)
        env.process(transmit(env, node))

    env.run(until=600)

    sent = sum(n.sent for n in nodes)

    return ([sent, nrCollisions, nrLost, nrProcessed, nrReceived])
Пример #6
0
                            yield env.timeout(1)

        # Returns the amount of RAM allocated
        with RAM.put(RAMQuantity) as returnRAM:
            yield env.timeout(1)
            yield returnRAM

    # Records the process time
    totalTime = env.now - timeReceived
    print('%s TERMINATED in %f' % (name, totalTime))
    timeValues.append(totalTime)
    print()


# Conditions for the simulating environment
env = simpy.Environment()  # Simulation environment
RAM = simpy.Container(env, init=RAMCapacity, capacity=RAMCapacity)
CPU = simpy.Resource(env, capacity=CPUCores)
WAITING = simpy.Resource(env, capacity=1)  # Queue of I/O


# Runs the processes
def main(environment, ram, cpu, waiting):
    for i in range(numberOfProcesses):
        env.process(proceso("PROCESS %d" % i, environment, cpu, waiting, ram))
        yield env.timeout(random.expovariate(1.0 / instructionInterval))


# Runs the environment
env.process(main(env, RAM, CPU, WAITING))
env.run()
def api_invoker(has_ventil,
                number_sfcs,
                vari_choice,
                oper_choice=None,
                bw_sfc_choice=None,
                nc_cases=None):
    global ventil_type
    global iter_sfcs
    global oper_bk_count
    global op_choice
    global last_date_ts
    ventil_type = has_ventil
    print(os.getcwd())
    os.chdir(simpy_path)
    #sys.path.insert(1, 'C:/Users/H395978/PycharmProjects/Neo4j-ProductionFlow-UI/simpy-models/')
    if has_ventil == "no":
        df1 = pd.read_csv('monty2_sfcflow_noventil.csv', sep=';')
    elif has_ventil == "undefined":
        df1 = pd.read_csv('monty2_sfcflow_noventil.csv', sep=';')
    else:
        df1 = pd.read_csv('monty2_sfcflow.csv', sep=';')
    iter_sfcs = int(number_sfcs) if (
        nc_cases == None) else int(number_sfcs) + int(nc_cases)
    #print(iter_sfcs)
    oper_bk_count = randint(0, iter_sfcs) if (bw_sfc_choice
                                              == None) else int(bw_sfc_choice)
    oper_bk_count = oper_bk_count if (
        nc_cases == None) else oper_bk_count + int(nc_cases)
    print("Breakdown will occur at")
    print(oper_bk_count)
    global cols_list
    global resource_list
    global wc_list
    global processing_time
    global waiting_time
    global ops_list
    global start_time
    global var_choice
    global per_choice
    global nc_count
    nc_count = int(nc_cases) if (nc_cases != None) else 0
    cols_list = df1.columns
    resource_list = df1['RESRCE']
    wc_list = df1['WORK_CENTER']
    processing_time = df1['PROCESSING_TIME_SECS']
    waiting_time = df1['WAITING_TIME_SECS']
    ops_list = df1['OPERATION']
    print('Total number of Simulation runs including NonConformance is')
    print(iter_sfcs)
    #print(ops_list)
    # op_choice = input('Enter the operation id of choice to simulate bottleneck \n')
    # print(op_choice)
    # var_choice = int(input('Enter the variation of time in % \n'))
    # print(var_choice)
    op_choice = oper_choice
    var_choice = int(vari_choice)
    per_choice = 100 - random.randint(10, 99)
    #for key,val in df1.iterrows():
    total_no_of_steps = 70
    start_time = time.time()
    for xt in range(iter_sfcs):
        if (xt == 0):
            start_time = start_time + (xt * 60)
            env = simpy.Environment(initial_time=start_time)
            env.process(startSimulator(env, df1, xt))
            env.run()
        else:
            ts_comp = time.mktime(
                datetime.strptime(last_date_ts,
                                  "%d-%m-%Y %H:%M:%S").timetuple())
            start_time = ts_comp - (random.randint(2, 4) * 60)
            # start_time = start_time + (xt*60)
            # if (ts_comp - start_time > 2400): #2400
            #     start_time = ts_comp + (random.randint(1,xt)*60)
            # elif (ts_comp - start_time < -300):
            #     start_time = ts_comp + (random.randint(1,3)*60)
            # else:
            #     start_time = start_time
            env = simpy.Environment(initial_time=start_time)
            env.process(startSimulator(env, df1, xt))
            env.run()

    return last_date_ts
Пример #8
0
def run_vaccination_simulation(NUM_REPS, RANDOM_SEED, NUM_CHECKIN, CHECKIN_TIME, PATIENT_INTER, SIM_TIME, NUM_VACCINATORS, VACCINATION_TIME, NUM_ADVERSEWAIT, ADVERSEWAIT_TIME):
    output_checkin_waittime = []
    output_checkin_waitnum = []
    output_vaccination_waittime = []
    output_vaccination_waitnum = []
    output_adverse_waittime = []
    output_adverse_waitnum = []
    output_total_facility_time = []
    output_num_vaccinated = []

    my_bar = st.progress(0.0)
    reps = math.ceil(NUM_REPS)
    for replication in range(0,reps):
        percent_complete = float(replication/reps)
        facility_arrival_times = []
        checkin_begin_times = []
        checkin_end_times = []
        vaccination_begin_times = []
        vaccination_end_times = []
        adverse_begin_times = []
        adverse_end_times = []
        facility_departure_times = []


        class Vaccination_Clinic(object):
            def __init__(self, env, num_checkin, checkin_time, num_vaccinators, vaccination_time, num_adversewait, adversewait_time):
                self.env = env
                self.checkin_personnel = simpy.Resource(env, num_checkin)
                self.checkintime = checkin_time
                self.vaccination_booth = simpy.Resource(env, num_vaccinators)
                self.vaccinationtime = vaccination_time
                self.adverse_event_spot = simpy.Resource(env, num_adversewait)
                self.adversewaittime = adversewait_time

            def checkin(self, patient):
                yield self.env.timeout(np.random.triangular(max(0.2, CHECKIN_TIME - 1), CHECKIN_TIME, CHECKIN_TIME + 1))


            def vaccinate(self, patient):
                yield self.env.timeout(np.random.triangular(max(VACCINATION_TIME - 1, 0.2), VACCINATION_TIME, VACCINATION_TIME + 1))

            def monitor_adverse(self, patient):
                yield self.env.timeout(ADVERSEWAIT_TIME)

        def patient(env, name, vac):
            #print('%s arrives at the vaccination clinic at %.2f.' % (name, env.now))
            facility_arrival_times.append(env.now)
            with vac.checkin_personnel.request() as request:
                yield request

                #print("%s arrives at checkin counter" % name)
                checkin_begin_times.append(env.now)
                yield env.process(vac.checkin(name))
                checkin_end_times.append(env.now)
                #print("%s completes check-in at %.2f." % (name, env.now))

            with vac.vaccination_booth.request() as request:
                yield request
                vaccination_begin_times.append(env.now)
                #print("%s arrives at vaccination booth" % name)
                yield env.process(vac.vaccinate(name))
                vaccination_end_times.append(env.now)


                #print("%s gets shot in the arm at %.2f." % (name, env.now))

            with vac.adverse_event_spot.request() as request:
                yield request
                adverse_begin_times.append(env.now)
                #print("%s proceeds to wait to monitor for adverse events" % name)
                yield env.process(vac.monitor_adverse(name))
                adverse_end_times.append(env.now)
                facility_departure_times.append(env.now)
                #print("%s leaves facility safely at %.2f." % (name, env.now))

        def setup(env, num_checkin, checkin_time, num_vaccinators, vaccination_time, num_adversewait, adversewait_time, patient_inter):
            vaccinationclinic = Vaccination_Clinic(env, num_checkin, checkin_time, num_vaccinators, vaccination_time, num_adversewait, adversewait_time)
            i = 0
            while True:
                yield env.timeout(np.random.exponential(scale=patient_inter))
                i += 1
                env.process(patient(env, 'Patient %d' % i, vaccinationclinic))   


        random.seed(RANDOM_SEED)

        # Create an environment and start the setup process
        env = simpy.Environment()
        env.process(setup(env, NUM_CHECKIN, CHECKIN_TIME, NUM_VACCINATORS, VACCINATION_TIME, NUM_ADVERSEWAIT, ADVERSEWAIT_TIME, PATIENT_INTER))
        # Execute!
        env.run(until=SIM_TIME)
        average_facility_total_time = np.mean([facility_departure_times[i] - facility_arrival_times[i] for i in range(len(facility_departure_times))])
        #print("Approximate total time at facility is %.1f mins." % average_facility_total_time)
        average_checkin_wait_time = np.mean([checkin_begin_times[i] - facility_arrival_times[i] for i in range(len(checkin_begin_times))])
        #print("Approximate wait time between arrival and checkin is %.1f mins." % average_checkin_wait_time)
        average_vaccination_wait_time = np.mean([vaccination_begin_times[i] - checkin_end_times[i] for i in range(len(vaccination_begin_times))])
        #print("Approximate wait time between checkin and getting vaccinated is %.1f mins." % average_vaccination_wait_time)
        average_adverse_wait_time = np.mean([adverse_begin_times[i] - vaccination_end_times[i] for i in range(len(adverse_begin_times))])
        #print("Approximate wait time between getting vaccine and finding adverse monitoring wait spot is %.1f mins." % average_adverse_wait_time)

        avg_waiting_checkin = []
        for i in [x * 0.1 for x in range(0, SIM_TIME*10)]:
            num_arrived_facility = sum(1 for j in facility_arrival_times if j <= i)
            num_started_checin = sum(1 for j in checkin_begin_times if j <= i)
            avg_waiting_checkin.append(num_arrived_facility - num_started_checin)
        #print("Approximate # of patients waiting to checkin at any time is %.1f." % np.mean(avg_waiting_checkin))

        avg_waiting_vaccine = []
        for i in [x * 0.1 for x in range(0, SIM_TIME*10)]:
            num_finished_checin = sum(1 for j in checkin_end_times if j <= i)
            num_started_vaccine = sum(1 for j in vaccination_begin_times if j <= i)
            avg_waiting_vaccine.append(num_finished_checin - num_started_vaccine)
        #print("Approximate # of patients waiting between checkin and vaccine at any time is %.1f." % np.mean(avg_waiting_vaccine))

        avg_waiting_adverse = []
        for i in [x * 0.1 for x in range(0, SIM_TIME*10)]:
            num_finished_vaccine = sum(1 for j in vaccination_end_times if j <= i)
            num_started_adverse = sum(1 for j in adverse_begin_times if j <= i)
            avg_waiting_adverse.append(num_finished_vaccine - num_started_adverse)
        #print("Approximate # of patients waiting between checkin and vaccine at any time is %.1f." % np.mean(avg_waiting_vaccine))

        output_checkin_waittime.append(average_checkin_wait_time)
        output_checkin_waitnum.append(np.mean(avg_waiting_checkin))
        output_vaccination_waittime.append(average_vaccination_wait_time)
        output_vaccination_waitnum.append(np.mean(avg_waiting_vaccine))
        output_adverse_waittime.append(average_adverse_wait_time)
        output_adverse_waitnum.append(np.mean(avg_waiting_adverse))
        output_total_facility_time.append(average_facility_total_time)
        output_num_vaccinated.append(len(facility_departure_times))
        
        
        my_bar.progress(float(percent_complete))
    my_bar.progress(1.0)   
    return [np.mean(output_checkin_waittime), np.mean(output_checkin_waitnum), np.mean(output_vaccination_waittime), 
            np.mean(output_vaccination_waitnum), np.mean(output_adverse_waittime), np.mean(output_adverse_waitnum), conf_interval(output_total_facility_time),
            np.mean(output_total_facility_time), conf_interval(output_num_vaccinated), np.mean(output_num_vaccinated)]
Пример #9
0
 def main(self):
     for trial in range(self.num_trials):
         env = simpy.Environment(
         )  # sets up a new simpy env for every trial
         self.env = env
         self.trial_reset(
         )  # resets the simpy constructs and data structures for recording the information
         self.env.process(self.create_loadout_rate())
         self.env.process(
             self.harvest()
         )  #triggers the harvest schedule, JIT_schedule and preprocessing every period
         self.env.process(self.farm_transport(trial))
         self.env.process(self.moniter_ssl(
         ))  # checks the ssl transport schedule for refinery deliveries
         #self.env.process(self.degradation_field()) # counts degredation of sitting sorghum at the fields
         #self.env.process(self.degradation_ensiled()) # counts degradation of ensiled sorghum
         self.env.process(
             self.record_data()
         )  # Records all of the trial specific data and appends it to trial constant data
         self.env.run(until=self.SIM_TIME)  # planning horizon in hours
         self.all_demand.append(self.refinery.level)
     self.schedules()
     self.sim_results = {
         "demand": {
             "percent": 0,
             "average": 0,
             "stdev": 0,
             "sem": 0,
             "conf int": "N/a",
             'range': [0, 0],
             "conf": {
                 '90': 0,
                 '95': 0,
                 '99': 0
             }
         },
         "telehandler rate": {
             "average": 0,
             "stdev": 0,
             "sem": 0,
             "conf int": 0,
             'range': [0, 0]
         },
         "press rate": {
             "average": 0,
             "stdev": 0,
             "sem": 0,
             "conf int": 0,
             'range': [0, 0]
         },
         "chopper rate": {
             "average": 0,
             "stdev": 0,
             "sem": 0,
             "conf int": 0,
             'range': [0, 0]
         },
         "bagger rate": {
             "average": 0,
             "stdev": 0,
             "sem": 0,
             "conf int": 0,
             'range': [0, 0]
         },
         "module former rate": {
             "average": 0,
             "stdev": 0,
             "sem": 0,
             "conf int": 0,
             'range': [0, 0]
         },
         "module hauler rate": {
             "average": 0,
             "stdev": 0,
             "sem": 0,
             "conf int": 0,
             'range': [0, 0]
         }
     }
     self.simulation_results()
     self.round_conf_int()
    def init(self, seed):

        # reset network caps and available SFs:
        reader.reset_cap(self.network)
        # Initialize metrics, record start time
        metrics.reset_metrics()
        self.run_times = int(1)
        self.start_time = time.time()

        # Parse network and SFC + SF file

        # Generate SimPy simulation environment
        self.env = simpy.Environment()

        # Instantiate the parameter object for the simulator.

        self.params = SimulatorParams(self.network,
                                      self.ing_nodes,
                                      self.sfc_list,
                                      self.sf_list,
                                      self.config,
                                      adapter=Adapter())

        if self.params.use_states and 'trace_path' in self.config:
            logger.warning(
                'Two state model and traces are both activated, thi will cause unexpected behaviour!'
            )

        if self.params.use_states:
            if self.params.in_init_state:
                self.params.in_init_state = False
            else:
                self.params.update_state()

        self.duration = self.params.run_duration
        # Get and plant random seed
        self.seed = seed
        random.seed(self.seed)
        numpy.random.seed(self.seed)

        # Instantiate a simulator object, pass the environment and params
        self.simulator = FlowSimulator(self.env, self.params)

        # Start the simulator
        self.simulator.start()
        # Trace handling
        if 'trace_path' in self.config:
            trace_path = os.path.join(os.getcwd(), self.config['trace_path'])
            trace = reader.get_trace(trace_path)
            TraceProcessor(self.params, self.env, trace, self.simulator)

        # Run the environment for one step to get initial stats.
        self.env.step()

        # Parse the NetworkX object into a dict format specified in SimulatorState. This is done to account
        # for changing node remaining capacities.
        # Also, parse the network stats and prepare it in SimulatorState format.
        self.parse_network()
        self.network_metrics()

        # Record end time and running time metrics
        self.end_time = time.time()
        metrics.running_time(self.start_time, self.end_time)
        simulator_state = SimulatorState(self.network_dict,
                                         self.simulator.params.sf_placement,
                                         self.sfc_list, self.sf_list,
                                         self.traffic, self.network_stats)
        logger.debug(f"t={self.env.now}: {simulator_state}")

        return simulator_state
def simulate_one_day(config: dict, G: nx.Graph, path_generator_function,
                     path_generator_args: list):
    # Get parameters
    num_hours_open = config['num_hours_open']
    logging_enabled = config.get('logging_enabled', False)
    raise_test_error = config.get('raise_test_error',
                                  False)  # for debugging purposes
    with_node_capacity = config.get('with_node_capacity', False)
    max_customers_in_store_per_sqm = config.get(
        'max_customers_in_store_per_sqm', None)
    floorarea = config.get('floorarea', None)
    if max_customers_in_store_per_sqm is None:
        max_customers_in_store = config.get('max_customers_in_store', None)
    else:
        if floorarea is not None:
            max_customers_in_store = int(max_customers_in_store_per_sqm *
                                         floorarea)
        else:
            raise ValueError(
                'If you set the parameter "max_customers_in_store_per_sqm", '
                'you need to specify the floor area via the "floorarea" parameter in the config.'
            )

    # Set up environment and run
    env = simpy.Environment()
    store = Store(env,
                  G,
                  max_customers_in_store=max_customers_in_store,
                  logging_enabled=logging_enabled)
    if with_node_capacity:
        node_capacity = config.get('node_capacity', 2)
        store.enable_node_capacity(node_capacity)
    path_generator = path_generator_function(*path_generator_args)
    env.process(_customer_arrivals(env, store, path_generator, config))
    env.process(_stats_recorder(store))
    env.run(until=num_hours_open * 60 * 10)

    # Record stats
    _sanity_checks(store, raise_test_error=raise_test_error)
    num_cust = len(store.customers)
    num_S = len(store.number_encounters_with_infected)
    shopping_times = list(store.shopping_times.values())
    waiting_times = np.array(list(store.waiting_times.values()))
    b = np.array(waiting_times) > 0
    num_waiting_people = sum(b)
    if num_waiting_people > 0:
        mean_waiting_time = np.mean(waiting_times[b])
    else:
        mean_waiting_time = 0

    num_contacts_per_cust = [
        contacts
        for contacts in store.number_encounters_with_infected.values()
        if contacts != 0
    ]
    df_num_encounters_per_node = pd.DataFrame(store.number_encounters_per_node,
                                              index=[0])
    df_num_encounters_per_node = df_num_encounters_per_node[range(len(G))]
    df_exposure_time_per_node = pd.DataFrame(store.time_with_infected_per_node,
                                             index=[0])
    df_exposure_time_per_node = df_exposure_time_per_node[range(len(G))]
    exposure_times = [
        val for val in list(store.time_with_infected_per_customer.values())
        if val > 0
    ]
    results = {
        'num_cust':
        num_cust,
        'num_S':
        num_S,
        'num_I':
        num_cust - num_S,
        'total_exposure_time':
        sum(store.time_with_infected_per_customer.values()),
        'num_contacts_per_cust':
        num_contacts_per_cust,
        'num_cust_w_contact':
        len(num_contacts_per_cust),
        'mean_num_cust_in_store':
        np.mean(list(store.stats['num_customers_in_store'].values())),
        'max_num_cust_in_store':
        max(list(store.stats['num_customers_in_store'].values())),
        'num_contacts':
        sum(num_contacts_per_cust),
        'shopping_times':
        shopping_times,
        'mean_shopping_time':
        np.mean(shopping_times),
        'num_waiting_people':
        num_waiting_people,
        'mean_waiting_time':
        mean_waiting_time,
        'store_open_length':
        max(list(store.stats['num_customers_in_store'].keys())),
        'df_num_encounters_per_node':
        df_num_encounters_per_node,
        'df_exposure_time_per_node':
        df_exposure_time_per_node,
        'total_time_crowded':
        store.total_time_crowded,
        'exposure_times':
        exposure_times,
    }

    if floorarea is not None:
        results['mean_num_cust_in_store_per_sqm'] = results[
            'mean_num_cust_in_store'] / floorarea
        results['max_num_cust_in_store_per_sqm'] = results[
            'max_num_cust_in_store'] / floorarea
    results['logs'] = store.logs
    return results
Пример #12
0
#universidad del valle de Guatemala
#carlos Alberto Raxtum ramos
#carne 19721
#basado de los ejemplos del libro y los subido a canvas
import simpy
import random

#Definicion de variables
InstrucT = 3
memoria_ram = 100
cant_procesos = int(input("Ingrese la cantidad de procesos"))
t_total = 0
tiempos = []

principal = simpy.Environment()
cpu = simpy.Resource(principal, capacity=2)
ram = simpy.Container(principal, init=memoria_ram, capacity=memoria_ram)
espera = simpy.Resource(principal, capacity=2)


def proceso(principal, tiempo, nombre, ram, memoria, cant_ins, InstrucT):

    global t_total
    global tiempos

    yield principal.timeout(tiempo)
    print('tiempo: %f - %s (new) son %d de memoria ram' %
          (principal.now, nombre, memoria))
    tiempo_al_llegar = principal.now

    yield ram.get(memoria)
Пример #13
0
def main():
    print(
        "Make sure the WIDTH of the console is big enough to display table in one line."
    )
    print("Simple queue system model:mu = {0} B = 10".format(MU))
    print(
        "{0:<4} {1:<9} {2:<10} {3:<10} {4:<10} {5:<10} {6:<10} {7:<10} {8:<8} {9:<10}"
        .format("Lambda", "Count", "Min", "Max", "Mean", "Median", "Sd",
                "Utilization", "Pd", "Calculated Pd"))
    random.seed(RANDOM_SEED)
    for arrival_rate in [0.2, 0.4, 0.6, 0.8, 0.9, 0.99]:
        env = simpy.Environment()
        Packet_Delay = StatObject()
        Server_Idle_Periods = StatObject()
        router = server_queue(env, arrival_rate, Packet_Delay,
                              Server_Idle_Periods, 10)
        env.process(router.packets_arrival(env))
        env.run(until=SIM_TIME)
        print(
            "{0:<0.3f} {1:<10} {2:<10.3f} {3:<10.3f} {4:<10.3f} {5:<10.3f} {6:<10.3f} {7:<10.3f} {8:<10.3f} {9:<10.3f}"
            .format(
                round(arrival_rate, 3), int(Packet_Delay.count()),
                round(Packet_Delay.minimum(), 3),
                round(Packet_Delay.maximum(), 3),
                round(Packet_Delay.mean(), 3), round(Packet_Delay.median(), 3),
                round(Packet_Delay.standarddeviation(), 3),
                round(1 - Server_Idle_Periods.sum() / SIM_TIME, 3),
                round((router.packet_number - Packet_Delay.count()) /
                      router.packet_number, 3),
                round(((1 - arrival_rate / MU) /
                       (1 - (arrival_rate / MU)**(router.buffer_size + 2))) *
                      ((arrival_rate / MU)**router.buffer_size), 3)))

    print("Simple queue system model:mu = {0}, B = 50".format(MU))
    print(
        "{0:<4} {1:<9} {2:<10} {3:<10} {4:<10} {5:<10} {6:<10} {7:<10} {8:<10} {9:<10}"
        .format("Lambda", "Count", "Min", "Max", "Mean", "Median", "Sd",
                "Utilization", "Pd", "Calculated Pd"))
    random.seed(RANDOM_SEED)
    for arrival_rate in [0.2, 0.4, 0.6, 0.8, 0.9, 0.99]:
        env = simpy.Environment()
        Packet_Delay = StatObject()
        Server_Idle_Periods = StatObject()
        router = server_queue(env, arrival_rate, Packet_Delay,
                              Server_Idle_Periods, 50)
        env.process(router.packets_arrival(env))
        env.run(until=SIM_TIME)
        print(
            "{0:<0.3f} {1:<10} {2:<10.3f} {3:<10.3f} {4:<10.3f} {5:<10.3f} {6:<10.3f} {7:<10.3f} {8:<10.3f} {9:<10.3f}"
            .format(
                round(arrival_rate, 3), int(Packet_Delay.count()),
                round(Packet_Delay.minimum(), 3),
                round(Packet_Delay.maximum(), 3),
                round(Packet_Delay.mean(), 3), round(Packet_Delay.median(), 3),
                round(Packet_Delay.standarddeviation(), 3),
                round(1 - Server_Idle_Periods.sum() / SIM_TIME, 3),
                round((router.packet_number - Packet_Delay.count()) /
                      router.packet_number, 3),
                round(((1 - arrival_rate / MU) /
                       (1 - (arrival_rate / MU)**(router.buffer_size + 2))) *
                      ((arrival_rate / MU)**router.buffer_size), 3)))
Пример #14
0
def env():
    yield simpy.Environment()
Пример #15
0
def main():
    print("1 Simple queue system model: mu = {0}; Lambda = {1}".format(
        tiempoDeServicio1, tiempoEntreArribos1))
    print("{0:<9} {1:<9} {2:<9} {3:<9}".format("Count", "Demora promedio",
                                               "Utilización del Servidor",
                                               "Promedio clientes en Cola"))
    print("2 Simple queue system model: mu = {0}; Lambda = {1}".format(
        tiempoDeServicio2, tiempoEntreArribos2))
    print("{0:<9} {1:<9} {2:<9} {3:<9}".format("Count", "Demora promedio",
                                               "Utilización del Servidor",
                                               "Promedio clientes en Cola"))
    random.seed(SEMILLA_RANDOM)
    env = simpy.Environment()
    Demoras0 = ObjetoEstadístico()
    Demoras1 = ObjetoEstadístico()
    Demoras2 = ObjetoEstadístico()
    Demoras3 = ObjetoEstadístico()
    Demoras4 = ObjetoEstadístico()
    Demoras5 = ObjetoEstadístico()
    ServidorDesocupado0 = ObjetoEstadístico()
    ServidorDesocupado1 = ObjetoEstadístico()
    ServidorDesocupado2 = ObjetoEstadístico()
    ServidorDesocupado3 = ObjetoEstadístico()
    ServidorDesocupado4 = ObjetoEstadístico()
    ServidorDesocupado5 = ObjetoEstadístico()
    LongitudCola0 = ObjetoEstadístico()
    LongitudCola1 = ObjetoEstadístico()
    LongitudCola2 = ObjetoEstadístico()
    LongitudCola3 = ObjetoEstadístico()
    LongitudCola4 = ObjetoEstadístico()
    LongitudCola5 = ObjetoEstadístico()
    TiempoColas0 = ObjetoEstadístico()
    TiempoColas1 = ObjetoEstadístico()
    TiempoColas2 = ObjetoEstadístico()
    TiempoColas3 = ObjetoEstadístico()
    TiempoColas4 = ObjetoEstadístico()
    TiempoColas5 = ObjetoEstadístico()
    ServidorOcupado0 = ObjetoEstadístico()
    ServidorOcupado1 = ObjetoEstadístico()
    ServidorOcupado2 = ObjetoEstadístico()
    ServidorOcupado3 = ObjetoEstadístico()
    ServidorOcupado4 = ObjetoEstadístico()
    ServidorOcupado5 = ObjetoEstadístico()
    servidor0 = serverQueue(env, tiempoEntreArribos1, tiempoDeServicio1,
                            Demoras0, ServidorDesocupado0, LongitudCola0,
                            TiempoColas0, ServidorOcupado0, 0)
    servidor1 = serverQueue(env, tiempoEntreArribos1, tiempoDeServicio1,
                            Demoras1, ServidorDesocupado1, LongitudCola1,
                            TiempoColas1, ServidorOcupado1, 1)
    servidor2 = serverQueue(env, tiempoEntreArribos1, tiempoDeServicio1,
                            Demoras2, ServidorDesocupado2, LongitudCola2,
                            TiempoColas2, ServidorOcupado2, 2)
    servidor3 = serverQueue(env, tiempoEntreArribos2, tiempoDeServicio2,
                            Demoras3, ServidorDesocupado3, LongitudCola3,
                            TiempoColas3, ServidorOcupado3, 3)
    servidor4 = serverQueue(env, tiempoEntreArribos2, tiempoDeServicio2,
                            Demoras4, ServidorDesocupado4, LongitudCola4,
                            TiempoColas4, ServidorOcupado4, 4)
    servidor5 = serverQueue(env, tiempoEntreArribos2, tiempoDeServicio2,
                            Demoras5, ServidorDesocupado5, LongitudCola5,
                            TiempoColas5, ServidorOcupado5, 5)
    env.process(
        servidor0.arribos0(env, servidor1, servidor2, servidor3, servidor4,
                           servidor5))
    # env.process(servidor1.arribos12(env, servidor3, servidor4, servidor5))
    # env.process(servidor2.arribos12(env, servidor3, servidor4, servidor5))
    env.run(until=TIEMPO_SIMULACION)
    print("{0:<9} {1:<9.3f}       {2:<9.3f}                {3:<9.3f}".format(
        int(Demoras1.count()), round(Demoras1.mean(), 3),
        round(1 - ServidorDesocupado1.sum() / TIEMPO_SIMULACION, 3),
        round(servidor1.colaPromedio(), 3)))
    print("{0:<9} {1:<9.3f}       {2:<9.3f}                {3:<9.3f}".format(
        int(Demoras2.count()), round(Demoras2.mean(), 3),
        round(1 - ServidorDesocupado2.sum() / TIEMPO_SIMULACION, 3),
        round(servidor2.colaPromedio(), 3)))
    print("{0:<9} {1:<9.3f}       {2:<9.3f}                {3:<9.3f}".format(
        int(Demoras3.count()), round(Demoras3.mean(), 3),
        round(1 - ServidorDesocupado3.sum() / TIEMPO_SIMULACION, 3),
        round(servidor3.colaPromedio(), 3)))
    print("{0:<9} {1:<9.3f}       {2:<9.3f}                {3:<9.3f}".format(
        int(Demoras4.count()), round(Demoras4.mean(), 3),
        round(1 - ServidorDesocupado4.sum() / TIEMPO_SIMULACION, 3),
        round(servidor4.colaPromedio(), 3)))
    print("{0:<9} {1:<9.3f}       {2:<9.3f}                {3:<9.3f}".format(
        int(Demoras5.count()), round(Demoras5.mean(), 3),
        round(1 - ServidorDesocupado5.sum() / TIEMPO_SIMULACION, 3),
        round(servidor5.colaPromedio(), 3)))

    # Servidor 1
    plt.bar(TiempoColas1.dataset, LongitudCola1.dataset)
    plt.xlabel('Tiempo en Cola', fontsize=10)
    plt.ylabel('Longitud de la Cola', fontsize=10)
    plt.title('Promedio de Clientes En Cola Servidor 1-1')
    plt.show()
    # Servidor 2
    plt.bar(TiempoColas2.dataset, LongitudCola2.dataset)
    plt.xlabel('Tiempo en Cola', fontsize=10)
    plt.ylabel('Longitud de la Cola', fontsize=10)
    plt.title('Promedio de Clientes En Cola Servidor 1-2')
    plt.show()
    # Servidor 3
    plt.bar(TiempoColas3.dataset, LongitudCola3.dataset)
    plt.xlabel('Tiempo en Cola', fontsize=10)
    plt.ylabel('Longitud de la Cola', fontsize=10)
    plt.title('Promedio de Clientes En Cola Servidor 2-1')
    plt.show()
    # Servidor 4
    plt.bar(TiempoColas4.dataset, LongitudCola4.dataset)
    plt.xlabel('Tiempo en Cola', fontsize=10)
    plt.ylabel('Longitud de la Cola', fontsize=10)
    plt.title('Promedio de Clientes En Cola Servidor 2-2')
    plt.show()
    # Servidor 5
    plt.bar(TiempoColas5.dataset, LongitudCola5.dataset)
    plt.xlabel('Tiempo en Cola', fontsize=10)
    plt.ylabel('Longitud de la Cola', fontsize=10)
    plt.title('Promedio de Clientes En Cola Servidor 2-3')
    plt.show()

    # Servidor 1
    ServidorOcupado1 = 1 - ServidorDesocupado1.sum() / TIEMPO_SIMULACION
    ServidorDesocupado1 = ServidorDesocupado1.sum() / TIEMPO_SIMULACION
    plt.pie([ServidorOcupado1, ServidorDesocupado1],
            labels=['Servidor Ocupado', 'Servidor Desocupado'],
            autopct='%1.2f%%')
    plt.title('Utilizacion del servidor 1-1')
    plt.show()

    # Servidor 2
    ServidorOcupado2 = 1 - ServidorDesocupado2.sum() / TIEMPO_SIMULACION
    ServidorDesocupado2 = ServidorDesocupado2.sum() / TIEMPO_SIMULACION
    plt.pie([ServidorOcupado2, ServidorDesocupado2],
            labels=['Servidor Ocupado', 'Servidor Desocupado'],
            autopct='%1.2f%%')
    plt.title('Utilizacion del servidor 1-2')
    plt.show()

    # Servidor 3
    ServidorOcupado3 = 1 - ServidorDesocupado3.sum() / TIEMPO_SIMULACION
    ServidorDesocupado3 = ServidorDesocupado3.sum() / TIEMPO_SIMULACION
    plt.pie([ServidorOcupado3, ServidorDesocupado3],
            labels=['Servidor Ocupado', 'Servidor Desocupado'],
            autopct='%1.2f%%')
    plt.title('Utilizacion del servidor 2-1')
    plt.show()

    # Servidor 4
    ServidorOcupado4 = 1 - ServidorDesocupado4.sum() / TIEMPO_SIMULACION
    ServidorDesocupado4 = ServidorDesocupado4.sum() / TIEMPO_SIMULACION
    plt.pie([ServidorOcupado4, ServidorDesocupado4],
            labels=['Servidor Ocupado', 'Servidor Desocupado'],
            autopct='%1.2f%%')
    plt.title('Utilizacion del servidor 2-2')
    plt.show()

    # Servidor 5
    ServidorOcupado5 = 1 - ServidorDesocupado5.sum() / TIEMPO_SIMULACION
    ServidorDesocupado5 = ServidorDesocupado5.sum() / TIEMPO_SIMULACION
    plt.pie([ServidorOcupado5, ServidorDesocupado5],
            labels=['Servidor Ocupado', 'Servidor Desocupado'],
            autopct='%1.2f%%')
    plt.title('Utilizacion del servidor 2-3')
    plt.show()
Пример #16
0

if __name__ == "__main__":
    # cols_list = []
    mty2.api_invoker("no", "5")
    # Commented out currently
    df1 = pd.read_csv('sapme_sfcflow.csv', sep=';')
    #print(df1.head())
    global cols_list
    global resource_list
    global wc_list
    global processing_time
    global waiting_time
    global ops_list
    cols_list = df1.columns
    resource_list = df1['RESRCE']
    wc_list = df1['WORK_CENTER']
    processing_time = df1['PROCESSING_TIME_SECS']
    waiting_time = df1['WAITING_TIME_SECS']
    ops_list = df1['OPERATION']
    print(cols_list)
    print(len(cols_list))
    #for key,val in df1.iterrows():
    env = simpy.Environment(initial_time=time.time())
    total_no_of_steps = 70
    env.process(startSimulator(env, df1, total_no_of_steps))
    #startSimulator(df1,env)
    env.run()
    #print(resource_list)
    #print(wc_list)
Пример #17
0
    def run(self):
        profiling_mode = self.params['profiling_mode']
        time_limit = self.params['time_limit']

        if profiling_mode and (time_limit < 3601):
            self.output.sig.emit(
                "Profiling mode requires longer simulation duration.")
            self.signal.sig.emit('Simulation aborted')
            return

        if self.sanity_check():
            self.output.sig.emit(
                "Production line needs to end with printlines and/or waferbins."
            )
            self.signal.sig.emit('Simulation aborted')
            return

        if len(self.cassette_loops) == 0:
            self.output.sig.emit(
                "Production line requires at least one cassette loop.")
            self.signal.sig.emit('Simulation aborted')
            return

        self.env = simpy.Environment(
        )  # do not put in init; need a new one for every simulation

        self.add_cassette_loops()
        self.replace_for_real_instances()

        start_time = time.clock()

        if profiling_mode:  # prepare production data storage entities

            columns = []
            for i in range(len(self.batchlocations)):
                columns.append("[" +
                               str(self.batchlocations[i].__class__.__name__) +
                               "][" +
                               str(self.batchlocations[i].params['name']) +
                               "]")

            self.prod_rates_df = pd.DataFrame(columns=columns)

            prev_prod_volumes = []
            for i in range(len(self.batchlocations)):
                prev_prod_volumes.append(0)

        ### Calculate time steps needed ###
        no_hourly_updates = time_limit // (60 * 60)
        hourly_updates = []
        for i in range(0, no_hourly_updates):
            hourly_updates.append((i + 1) * 60 * 60)

        percentage_updates = []
        for i in range(0, 10):
            percentage_updates.append(round((i + 1) * time_limit / 10))

        updates_list = hourly_updates + percentage_updates
        updates_list = set(updates_list)
        updates_list = sorted(updates_list)
        hourly_updates = set(hourly_updates)
        percentage_updates = set(percentage_updates)

        ### Run simulation ###

        string = "Simulation started "
        if profiling_mode:
            string += "in profiling mode "
        string += "with " + str(time_limit // (60 * 60)) + " hour duration"
        self.output.sig.emit(string)

        prev_production_volume_update = 0
        prev_percentage_time = self.env.now

        for i in updates_list:

            if (self.stop_simulation):
                string = "Stopped at " + str(int(
                    self.env.now // 3600)) + " hours"
                self.output.sig.emit(string)
                break

            #try:
            self.env.run(until=i)
            #except Exception as inst:
            #    print(inst)

            if (i == time_limit):
                string = "Finished at " + str(int(
                    self.env.now // 3600)) + " hours"
                self.output.sig.emit(string)
                break

            if (i in percentage_updates):

                l_loc = len(
                    self.locationgroups) - 2  # second to last locationgroup

                percentage_production_volume_update = 0
                for j in range(len(self.locationgroups[l_loc])):
                    percentage_production_volume_update += self.locationgroups[
                        l_loc][j].output.container.level

                percentage_wph_update = (percentage_production_volume_update -
                                         prev_production_volume_update)
                percentage_wph_update = 3600 * percentage_wph_update / (
                    self.env.now - prev_percentage_time)

                # float needed for very large integer division
                string = str(round(
                    100 * float(i) / time_limit)) + "% progress - " + str(
                        round(i / 3600, 1)) + " hours / "
                string += str(
                    percentage_production_volume_update) + " produced (" + str(
                        int(percentage_wph_update)) + " wph)"
                self.output.sig.emit(string)

                prev_percentage_time = self.env.now
                prev_production_volume_update = percentage_production_volume_update

            if profiling_mode and (i in hourly_updates):

                prod_volumes = []
                for i in range(len(self.batchlocations)):
                    prod_volumes.append(self.batchlocations[i].prod_volume())

                prod_rates = []
                for i in range(len(self.batchlocations)):
                    if not isinstance(self.batchlocations[i], Buffer):
                        prod_rates.append(prod_volumes[i] -
                                          prev_prod_volumes[i])
                    else:
                        # not really a production rate; just current buffer volume
                        prod_rates.append(prod_volumes[i])

                self.prod_rates_df.loc[len(self.prod_rates_df)] = prod_rates

                prev_prod_volumes = prod_volumes

        end_time = time.clock()

        if profiling_mode:
            self.prod_rates_df.index += 1
            # set index name to time in hours; has to be after changing index values
            self.prod_rates_df.index.name = "Time [hours]"

        ### Generate summary output in log tab ###
        for i, value in enumerate(self.batchlocations):
            self.batchlocations[i].report()

        for i, value in enumerate(self.operators):
            self.operators[i].report()

        for i, value in enumerate(self.technicians):
            self.technicians[i].report()

        ### Generate utilization output for special tab ###
        utilization_list = []
        for i, value in enumerate(self.batchlocations):
            if len(self.batchlocations[i].utilization):
                utilization_list.append(self.batchlocations[i].utilization)

        for i, value in enumerate(self.operators):
            if len(self.operators[i].utilization):
                utilization_list.append(self.operators[i].utilization)

        for i, value in enumerate(self.technicians):
            if len(self.technicians[i].utilization):
                utilization_list.append(self.technicians[i].utilization)

        self.util.sig.emit(utilization_list)

        ### Calculate sum of all produced cells ###
        prod_vol = 0
        l_loc = len(self.locationgroups) - 2
        for i in range(len(
                self.locationgroups[l_loc])):  # second to last locationgroup
            prod_vol += self.locationgroups[l_loc][i].output.container.level

        self.output.sig.emit("Production volume: " + str(prod_vol))
        self.output.sig.emit("Average throughput (WPH): " +
                             str(int(3600 * prod_vol / self.env.now)))

        for i in range(len(self.locationgroups[-1])):  # last locationgroup
            buffer_content = len(self.locationgroups[-1][i].input.input.items)
            self.output.sig.emit("Cassette source buffer content for loop " +
                                 str(i) + ": " + str(buffer_content))

        sim_time = end_time - start_time
        if sim_time < 60:
            self.output.sig.emit("Simulation time: " +
                                 str(round(sim_time, 1)) + " seconds")
        elif sim_time < 3600:
            self.output.sig.emit("Simulation time: " +
                                 str(int(sim_time // 60)) + " minutes " +
                                 str(int(sim_time % 60)) + " seconds")
        else:
            self.output.sig.emit("Simulation time: " +
                                 str(int(sim_time // 3600)) + " hours " +
                                 str(int(sim_time % 3600 // 60)) +
                                 " minutes " + str(int(sim_time % 3600 % 60)) +
                                 " seconds")

        self.signal.sig.emit('Simulation finished')
def main(argc, argv):
    """Set up and start the simulation."""
    global NUM_PROCESSES, enableProcLogs, enableBqLogs, HELP, useWeibull

    print('Process checkpoint-restart simulator')
    random.seed(RANDOM_SEED)  # constant seed for reproducibility

    # Create an environment and start the setup process
    env = simpy.Environment()
    parser = ap.ArgumentParser(description=HELP,
                               formatter_class=ap.RawTextHelpFormatter)
    parser.add_argument("-p",
                        "--proc_logs",
                        action="store_true",
                        help="Show run time logs from processes")
    parser.add_argument("-b",
                        "--batchqueue_logs",
                        action="store_true",
                        help="Show run time logs from the batch-queue manager")
    parser.add_argument(
        "-n",
        "--procs",
        type=int,
        default=NUM_PROCESSES,
        help="Max. number of processes to simulate (default: 7)")
    #parser.add_argument("-x", "--no_preempt", action="store_true", help="Disables preemption of currently executing "\
    #                                                                    "job on failure. This simulates the behavior "\
    #                                                                    "of a simple FIFO queue.")
    parser.add_argument(
        "-w",
        "--use-weibull",
        action="store_true",
        help=
        "Use Weibull distribution for failure injection. Default is to use exponential distribution"
    )
    parser.add_argument(
        "-f",
        "--file-name",
        type=str,
        help="Store lost work/throughput results in the given file.")
    parser.add_argument("-s",
                        "--show-throughput-results",
                        action="store_true",
                        help="Show throughput results using matplotlib.")
    parser.add_argument("-l",
                        "--show-lostwork-results",
                        action="store_true",
                        help="Show lost work results using matplotlib.")
    parser.add_argument("-c",
                        "--show-ckpt-results",
                        action="store_true",
                        help="Show checkpoint results using matplotlib.")
    parser.add_argument("-r",
                        "--show-restart-results",
                        action="store_true",
                        help="Show restart results using matplotlib.")
    parser.add_argument("--sorted",
                        action="store_true",
                        help="Submit jobs in increasing order of ckpt ovhd.")
    args = parser.parse_args()
    NUM_PROCESSES = args.procs
    MAX_CIRC_Q_LEN = NUM_PROCESSES + 1
    enableProcLogs = args.proc_logs
    enableBqLogs = args.batchqueue_logs
    useWeibull = args.use_weibull

    # Create a batch queue
    mymachine = simpy.Resource(env, MAX_PARALLEL_PROCESSES)
    batchQ = BatchQueue(env, MAX_CIRC_Q_LEN, mymachine, False)
    showPlot = args.show_throughput_results | args.show_lostwork_results |\
               args.show_ckpt_results | args.show_restart_results

    testProcesses = [
        Process(env, 'Process %d' % i,
                random.randint(0, 100) * 2, mymachine)
        for i in range(NUM_PROCESSES)
    ]
    if args.sorted:
        testProcesses.sort(key=lambda p: p.ckptTime)

    simulateArrivalOfJobs(env, testProcesses, batchQ)
    env.process(batchQ.runBq(False))
    # Execute
    env.run()

    # Analyis/results
    print("******************************************************")
    print("******************FINAL DATA**************************")
    print("******************************************************")

    computeResults(args, batchQ)
    saveResults(args, batchQ)
    showResults(args, batchQ)

    print("Process #, # Ckpts, # Total Failures, # Restarts, # Failed Restarts, # Failed Ckpts, # Preempts,"\
          " Compute Time, Ckpt Time, Lost Work, Lost Restart Time, Lost Ckpt Time, Submission Time, Start Time,"\
          " End Time, Actual Run Time")
    for p in testProcesses:
        t1 = int(p.numCkpts * p.ckptTime +
                 p.numRestarts * int(p.ckptTime / 2.0) + p.lostWork +
                 p.totalComputeTime + p.lostRestartTime)
        t2 = int(p.actualRunTime)
        if not p.restartFailures * p.ckptTime >= p.lostRestartTime:
            print "Warning"
        if t1 != t2:
            print("Warning: %d != %d" % (t1, t2))
        print(p)
    print("End Time: %d" %
          (max(testProcesses, key=lambda p: p.endTime).endTime))
    if showPlot:
        plt.show()
    global wc_list
    global processing_time
    global waiting_time
    global ops_list
    global start_time
    global per_choice
    cols_list = df1.columns
    resource_list = df1['RESRCE']
    wc_list = df1['WORK_CENTER']
    processing_time = df1['PROCESSING_TIME_SECS']
    waiting_time = df1['WAITING_TIME_SECS']
    ops_list = df1['OPERATION']
    #print(cols_list)
    print(ops_list)
    op_choice = input(
        'Enter the operation of choice to simulate bottleneck \n')
    print(op_choice)
    var_choice = int(input('Enter the variation of time in % \n'))
    print(var_choice)
    per_choice = 100 - var_choice
    #for key,val in df1.iterrows():
    total_no_of_steps = 70
    start_time = time.time()
    for xt in range(iter_sfcs):
        start_time = start_time + (xt * 60)
        env = simpy.Environment(initial_time=start_time)
        env.process(startSimulator(env, df1, xt))
        env.run()

    #startSimulator(df1,env)
Пример #20
0
    def __init__(self, sim_time, m_dict, recipes, lead_dict, wafers_per_box, wip_levels, break_mean=None, repair_mean=None):
        self.break_mean = break_mean
        self.repair_mean = repair_mean
        self.order_completed = False
        self.allowed_actions = None
        self.env = simpy.Environment()
        self.Sim_time = sim_time
        self.next_machine = None
        # self.dgr = dgr_dict
        self.lead_dict = lead_dict
        self.num_wafers = wafers_per_box
        self.wip_levels = wip_levels
        # self.machine_failure = False

        # Number of future weeks we want to look into for calculating due dates
        self.FUTURE_WEEKS = 1000

        # Initialize an index that will be used to name each wafer box
        self.wafer_index = 0

        # Dictionary where the key is the name of the machine and the value is [station, proc_t]
        self.machine_dict = m_dict

        self.machines_list = [Machine(self, mach[0], mach[1], self.break_mean, self.repair_mean) for mach in self.machine_dict.items()]

        # create a list of all the station names
        self.stations = list(set(list(self.machine_dict.values())))

        # sim_inst.recipes give the sequence of stations that must be processed at for the wafer of that head type to be completed
        self.recipes = recipes

        # create a list to store the number of complete wafers for each head type
        self.complete_wafer_dict = {}
        for ht in self.recipes.keys():
            d = {ht:0}
            self.complete_wafer_dict.update(d)

        self.number_of_machines = len(self.machine_dict)

        # Create a dictionary which holds lists that will contain 
        # the queues of wafer_box objects at each station and that have been completed
        self.queue_lists = {station: [] for station in self.stations}
        # self.queue_lists['complete'] = []

        self.order_complete_time = 0
        self.cycle_time = []
        self.step_reward = 0

        # Create a dictionary which holds the number of wafers due in a given week of each head type
        self.due_wafers = {}
        for ht in self.recipes.keys():
            list_of_wafers_due_each_week = [0]*self.FUTURE_WEEKS
            d = {ht:list_of_wafers_due_each_week}
            self.due_wafers.update(d)

        # Creates a dictionary where the key is the toolset name and the value is a list of tuples of all head type and
        # sequence step combinations which may be processed at that station
        self.station_HT_seq = {station: [] for station in self.stations}

        for HT in self.recipes.keys():
            for seq, step in enumerate(self.recipes[HT]):
                self.station_HT_seq[step[0]].append((HT, seq))
Пример #21
0
def env(FG):
    env = simpy.Environment()
    env.FG = FG

    return env
Пример #22
0
def env():
    simulation_start = datetime.datetime(2019, 1, 1)
    my_env = simpy.Environment(initial_time=time.mktime(simulation_start.timetuple()))
    my_env.epoch = time.mktime(simulation_start.timetuple())
    return my_env
Пример #23
0
# Open the log files
logs = []
if (LOGGED):
    for i in range(REPLICATIONS):
        logs.append(open('log' + str(SEED + i) + '.csv', 'w'))
        logs[i].write(
            'Time,Queue Length,Mean Response Time,Mean Waiting Time,Utilization,Reliability\n'
        )

# Create a simulation environment

envs = []
servers = []
job_generators = []
for i in range(REPLICATIONS):
    envs.append(simpy.Environment())
    servers.append(Server(i, envs[i]))
    job_generators.append(
        JobGenerator(i, envs[i], servers[i], BUFFER, LAMBDA, MU))

# Start the simulation
for i in range(REPLICATIONS):
    envs[i].run(until=SIMULATION_TIME)

# Close the log file
if (LOGGED):
    for i in range(REPLICATIONS):
        logs[i].close()

# Read log data file
dfs = []
Пример #24
0
def test_avq(nf, ar, t, r, k, serv="Exp", servdist_m=None,
             w_sys=True, mixed_traff=False, sching="rep-to-all", p_i_l= [] ):
  E_T_f_sum = 0
  for f in range(nf):
    log(WARNING, "ar= {}, t= {}, r= {}, k= {}, servdist_m= {}, w_sys= {}, mixed_traff= {}, sching= {}". \
        format(ar, t, r, k, servdist_m, w_sys, mixed_traff, sching) )
    
    env = simpy.Environment()
    if mixed_traff:
      sym_l, sym__rgroup_l_m = simplex_sym_l__sym__rgroup_l_m(t)
      log(WARNING, "sym__rgroup_l_m=\n {}".format(pprint.pformat(sym__rgroup_l_m) ) )
      pg = MT_PG(env, "pg", ar, sym_l)
      avq = MT_AVQ("mt_avq", env, t, sym__rgroup_l_m, serv, servdist_m)
      # monitor = AVQMonitor(env, aq=avq, poll_dist=lambda: 0.1)
      # avq.join_q.out_m = monitor
    else:
      psize = None
      if serv == "Bern*Pareto":
        psize = "Pareto"
        serv = "Bern"
      pg = PG(env, "pg", ar, psize=psize, psize_dist_m=servdist_m)
      avq = AVQ("avq", env, t, r, k, servdist_m, sching, w_sys=w_sys)
      # monitor = AVQMonitor(env, aq=avq, poll_dist=lambda: 0.1)
      # avq.join_q.out_m = monitor
    pg.out = avq
    pg.init()
    c = 3 if serv == "Pareto" or serv == "Bern" else 1
    env.run(until=c*50000) # 20
    
    if mixed_traff:
      print("pg.sym__n_sent= {}".format(pprint.pformat(pg.sym__n_sent) ) )
    st_l = avq.jsink.st_l
    if len(st_l) > 0:
      E_T_f_sum += float(sum(st_l) )/len(st_l)
      # continue
    # print("avq.jsink.qid__num_win_map= {}".format(pprint.pformat(avq.jsink.qid__num_win_map) ) )
    total_n_wins = sum([n for i, n in avq.jsink.qid__num_win_map.items() ] )
    print("pg.n_sent= {}, total_n_wins= {}".format(pg.n_sent, total_n_wins) )
    qid_winfreq_map = {i:float(n)/total_n_wins for i, n in avq.jsink.qid__num_win_map.items() }
    print("qid_winfreq_map= {}".format(pprint.pformat(qid_winfreq_map) ) )
    # if not mixed_traff:
    #   total_n_types = sum(avq.servtype__num_m)
    #   p_i_l[:] = [n/total_n_types for t, n in enumerate(avq.servtype__num_m) ]
    #   print("p_i_l= {}".format(p_i_l) )
    """
    print("\n")
    # print("avq.join_q.state__num_found_map= {}".format(pprint.pformat(avq.join_q.state__num_found_map) ) )
    # total_num_founds = sum([n for s, n in avq.join_q.state__num_found_map.items() ] )
    # state__found_freq_map = {s:float(n)/total_num_founds for s, n in avq.join_q.state__num_found_map.items() }
    # print("state__found_freq_map= {}".format(pprint.pformat(state__found_freq_map) ) )
    
    print("\n")
    # print("monitor.polled_state__counter_map= {}".format(pprint.pformat(monitor.polled_state__counter_map) ) )
    total_counter = sum([c for rs, c in monitor.polled_state__counter_map.items() ] )
    polled_state__counter_map = {rs:float(c)/total_counter for rs, c in monitor.polled_state__counter_map.items() }
    print("polled_state__counter_map= {}".format(pprint.pformat(polled_state__counter_map) ) )
    
    print("\n")
    # print("monitor.state__num_found_by_job_departed_map= {}".format(pprint.pformat(monitor.state__num_found_by_job_departed_map) ) )
    total_counter = sum([c for rs, c in monitor.state__num_found_by_job_departed_map.items() ] )
    state__freq_found_by_job_departed_map = {rs:float(c)/total_counter for rs, c in monitor.state__num_found_by_job_departed_map.items() }
    print("state__freq_found_by_job_departed_map= {}".format(pprint.pformat(state__freq_found_by_job_departed_map) ) )
    
    print("\n")
    # print("monitor.start_setup__num_found_by_job_departed_map= {}".format(pprint.pformat(monitor.start_setup__num_found_by_job_departed_map) ) )
    total_counter = sum([c for rs, c in monitor.start_setup__num_found_by_job_departed_map.items() ] )
    start_setup__freq_found_by_job_departed_map = {rs:float(c)/total_counter for rs, c in monitor.start_setup__num_found_by_job_departed_map.items() }
    print("start_setup__freq_found_by_job_departed_map= {}".format(pprint.pformat(start_setup__freq_found_by_job_departed_map) ) )
    """
  E_T = E_T_f_sum/nf
  print(">> E_T= {}".format(E_T) )
  if E_T > 100: return None
  return E_T
Пример #25
0
# -*- coding: utf8 -*-


class Oficina:
    def __init__(self, ambiente):
        self.ambiente = ambiente
        self.mecanico_disponivel = self.ambiente.event()
        self.ambiente.process(self.chegou_carro())
        self.ambiente.process(self.chegou_mecanico())

    def chegou_carro(self):
        yield self.mecanico_disponivel
        print('Reparos iniciados em', self.ambiente.now)

    def chegou_mecanico(self):
        yield self.ambiente.timeout(13)
        self.mecanico_disponivel.succeed()
        yield self.ambiente.timeout(1)
        print('Mecânico disponível em', self.ambiente.now)


import simpy
ambiente = simpy.Environment()
Oficina(ambiente)
ambiente.run()
Пример #26
0
def Test():
    env = simpy.Environment()
    B = Buffer()
    B.capacity = 5
    Source_Geo(env, "Source", 0.8, B)
    env.run(until=20)
Пример #27
0
def test_bs():
    env = simpy.Environment()
    env.process(simpy_bs(env))

    env.run(until = 20)
Пример #28
0
    def __init__(self):
        """Creates instance of SimPy model environemnt"""

        self.env = simpy.Environment()
Пример #29
0
import simpy
from random import randint


def atencion(env):
    index = 1
    while index <= 50:
        print("Atendiendo al cliente %d en el tiempo %d" % (index, env.now))
        tiempo_atencion = randint(2, 7)
        yield env.timeout(tiempo_atencion)
        index = index + 1


enviroment = simpy.Environment()

enviroment.process(atencion(enviroment))
enviroment.run()
Пример #30
0
 def __init__(self):
     self.env = simpy.Environment()
     self.gen = RandomState(2)