Beispiel #1
0
 def __init__(self, nname, env):
     super().__init__(nname)
     conf = Configuration()
     self.queue = Store(env)
     self.tcomm = conf.get('[comm]t_comm')
     self.perr = conf.get('[comm]p_err')
     self.unavail = conf.get('[comm]unavail')
def drop_object(obj_name, me):
    pos = _WORLD.component_for_entity(me, Position)
    inventory = _WORLD.component_for_entity(me, Inventory)
    skeleton = inventory.objects.get(obj_name, None)
    success: bool = False
    msg: str = "Something went wrong"
    if skeleton is None:
        msg = f'Not holding object {obj_name}'
    else:
        reply_channel = Store(_ENV)
        drop_offset = (pos.center[0], pos.center[1] + pos.h)
        drop_payload = ObjectManager.DropPayload(
            obj_name, ObjectManager.ObjectManagerOps.RECREATE, skeleton,
            drop_offset, reply_channel)
        _EVENT_STORE.put(EVENT(ObjectManager.ManagerTag, drop_payload))
        # Wait for reply
        response = yield reply_channel.get()
        if response.get('success', False):
            success = True
            msg = ''
    response = RESPONSE_ClawPayload(op=ClawOps.DROP,
                                    ent=me,
                                    success=success,
                                    msg=msg)
    event_to_put = EVENT(ClawDoneTag, response)
    _EVENT_STORE.put(event_to_put)
    return success, msg
Beispiel #3
0
 def __init__(self, env, capacity, lift, position, orientation):
     Store.__init__(self, env, capacity)
     Resource.__init__(self, env)
     self.position = position
     self.lift = lift
     self.orientation = orientation
     assert isinstance(lift, Lift)
def truck_controllers(env: simpy.Environment, truck_q: simpy.Store):

    for i in range(100):
        truck = Truck(truck_id=f"truck_{i}", env=env)
        truck.start_wait()
        truck_q.put(truck)
        wait_time = 5
        yield env.timeout(wait_time)
def generate_item(env, last_q: simpy.Store, item_num: int = 100):
    """模拟物件的到达"""

    for i in range(item_num):
        print(f'{round(env.now, 2)} - item: item_{i} - created创建')
        last_q.put(f'item_{i}')
        t = random.expovariate(1 / MEAN_TIME)
        yield env.timeout(round(t, 1))
Beispiel #6
0
    def run(self, env, simulator, netmodel):
        self.env = env
        self.simulator = simulator
        self.netmodel = netmodel
        self.ready_store = Store(env)
        self.download_wakeup = Event(self.simulator.env)

        self.free_cpus = self.cpus
        env.process(self._download_process())

        prepared_assignments = []
        events = [self.ready_store.get()]

        while True:
            finished = yield env.any_of(events)
            for event in finished.keys():
                if event == events[0]:
                    events[0] = self.ready_store.get()
                    assignment = event.value
                    if assignment.cancelled:
                        continue
                    prepared_assignments.append(assignment)
                    prepared_assignments.sort(key=lambda a: a.priority,
                                              reverse=True)
                    continue

                assignment = event.value
                task = assignment.task
                self.free_cpus += task.cpus
                assert not assignment.cancelled
                del self.assignments[assignment.task]
                events.remove(event)
                del self.running_tasks[task]
                simulator.add_trace_event(
                    TaskEndTraceEvent(self.env.now, self, task))
                for output in task.outputs:
                    self._add_data(output)
                simulator.on_task_finished(self, task)

            block = float("-inf")
            for assignment in prepared_assignments[:]:
                if assignment.priority < block:
                    continue
                task = assignment.task
                if task.cpus <= self.free_cpus:
                    prepared_assignments.remove(assignment)
                    if assignment.cancelled:
                        continue
                    self.free_cpus -= task.cpus
                    self.running_tasks[task] = RunningTask(task, self.env.now)
                    simulator.add_trace_event(
                        TaskStartTraceEvent(self.env.now, self, task))
                    events.append(env.timeout(task.duration, assignment))
                    self.simulator.on_task_start(self, assignment.task)
                else:
                    block = max(block, assignment.block)
Beispiel #7
0
    def __init__(self, sim, peer_id: int, peer_type: str, location: str,
                 bandwidth_ul: float, bandwidth_dl: float, **kwargs):
        """
        Physical representation of a Peer
        sim: Simulation environment
        name: Info about peer type and peer id
        location: Physical location of peer
        bandwidth_ul: Uplink bandwidth
        bandwidth_dl: Downlink bandwidth
        """
        self.sim = sim
        self.env = sim.env
        self.peer_type = peer_type
        self.peer_id = peer_id
        self.name = str(peer_id) + ":" + str(peer_type)
        self.location = location
        self.bandwidth_ul = bandwidth_ul
        self.bandwidth_dl = bandwidth_dl
        self.__dict__.update(kwargs)

        peer_repr = repr(self)
        # Define log file path for results of the simulation
        if sim.sim_dir:
            self.log_name = os.path.join(sim.sim_dir, peer_repr + ".log")
            self.logger = setup_logger(peer_repr, self.log_name)
        else:
            self.log_name = None
            self.logger = None

        # Message queue for the received messages
        self.msg_queue = Store(self.env)

        # Network connections that are online
        self.online = True
        self.connections = dict()

        # Known peers
        self.disconnect_callbacks = []
        self.last_seen = dict()

        # Peer services
        self.handlers = {
        }  # Service.handle_message(self, msg) called on message
        self.mh_map = {}  # Message -> Handler map
        self.runners = {}  # Peer service runners
        self.mprt_map = {}  # Message -> Pre-Receive Trigger

        # Storage
        self.storage = {}

        # Monitoring services
        self.bytes_load = {}  # Overhead on bytes per sec
        self.msg_count_load = {}  # Msg per sec

        # Start peer as it is created
        self.env.process(self.run())
 def makeStation(self, index, envy):
     name = 'STAT_' + str(index)
     cin = Store(envy)
     cout = Store(envy)
     v = Vision('VIS_' + str(index), self.accuracyParameter)
     s = Station(name, cin, cout)
     s.setVision(v)
     Blackboard().get('[Shared]packages')[name] = None
     Blackboard().get('[Shared]packages.dirtybit')[name] = True
     return name, s
def pick_object(obj_name: str, me: int):
    pos = _WORLD.component_for_entity(me, Position)
    claw = _WORLD.component_for_entity(me, Claw)
    success: bool = False
    msg: str = f'Object {obj_name} not found.'
    # Create boundaries, if necessary
    if claw.boundaries is None:
        span = Ellipse(pos.center, claw.max_range, claw.max_range)
        col = Collidable(shape=helpers.collision_from_points(span, pos.center))
        claw.boundaries = col
    # For every pickable component, see if it's within range
    for _, (pick, col) in _WORLD.get_components(Pickable, Collidable):
        if pick.name == obj_name:
            # This is the object we want. Let's see if it's in range and under limit weight
            for s1 in col.shapes:
                if collide(claw.boundaries.shapes[0], s1):
                    if pick.weight <= claw.max_weight:
                        # Take the object
                        reply_channel = Store(_ENV)
                        payload = ObjectManager.GrabPayload(
                            obj_name, ObjectManager.ObjectManagerOps.REMOVE,
                            reply_channel)
                        event = EVENT(ObjectManager.ManagerTag, payload)
                        _EVENT_STORE.put(event)
                        # Wait for reply
                        response = yield reply_channel.get()
                        if response.get('success', False):
                            success = True
                            # Add removed component to my inventory
                            if not _WORLD.has_component(me, Inventory):
                                _WORLD.add_component(me, Inventory())
                            inventory = _WORLD.component_for_entity(
                                me, Inventory)
                            inventory.objects[obj_name] = pick.skeleton
                            msg = f'Picked {obj_name}. My inventory: {inventory.objects}'
                        else:
                            success = False
                            msg = response.get('msg', '')
                    else:
                        msg = f'Pickable {obj_name} too heavy. Max weight:{claw.max_weight}. Object weight: {pick.weight}'
                        success = False
                else:
                    msg = f'Pickable {obj_name} not within claw range!'
                    success = False
    response = RESPONSE_ClawPayload(op=ClawOps.GRAB,
                                    ent=me,
                                    success=success,
                                    msg=msg)
    event_to_put = EVENT(ClawDoneTag, response)
    _EVENT_STORE.put(event_to_put)
    return success, msg
Beispiel #10
0
def _attach_store_items(store: simpy.Store, callbacks: ProbeCallbacks) -> None:
    def make_wrapper(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            old_items = len(store.items)
            ret = func(*args, **kwargs)
            new_items = len(store.items)
            if new_items != old_items:
                for callback in callbacks:
                    callback(new_items)
            return ret

        return wrapper

    store._do_get = make_wrapper(store._do_get)  # type: ignore
    store._do_put = make_wrapper(store._do_put)  # type: ignore
Beispiel #11
0
def run_scheduler_worker(env: simpy.Environment, queue: simpy.Store, context: ClusterContext, scheduler: Scheduler,
                         oracles: List[Oracle], log: List[LoggingRow]):
    while True:
        logging.debug('Scheduler waiting for pod...')
        pod = yield queue.get()

        # TODO fix time not changing (env.now)
        logging.debug('Pod received by scheduler at %.2f', env.now)
        log.append(LoggingRow(env.now, EventType.POD_RECEIVED, pod.name))

        # execute scheduling algorithm
        then = time.time()
        result = scheduler.schedule(pod)
        duration = time.time() - then

        yield env.timeout(duration)

        logging.debug('Pod scheduling took %.2f ms, and yielded %s', duration * 1000, result)

        # weight the startup
        metadata = dict([o.estimate(context, pod, result) for o in oracles])

        # also add the image name and the selected node to the metadata
        metadata['image'] = pod.spec.containers[0].image
        metadata['suggested_host'] = None if result.suggested_host is None else result.suggested_host.name

        log.append(LoggingRow(env.now, EventType.POD_SCHEDULED, pod.name, metadata))
Beispiel #12
0
    def run(self, env, simulator, connector):
        self.env = env
        self.simulator = simulator
        self.connector = connector
        self.ready_tasks = Store(env)

        while True:
            task = yield self.ready_tasks.get()
            simulator.add_trace_event(
                TaskStartTraceEvent(self.env.now, self, task))
            yield env.timeout(task.duration)
            simulator.add_trace_event(
                TaskEndTraceEvent(self.env.now, self, task))
            self.assigned_tasks.remove(task)
            self.data.add(task)
            simulator.on_task_finished(self, task)
Beispiel #13
0
 def __init__(self, env, cp_params):
     self.env = env
     self.in_pipe = Store(self.env) # TODO: define capacity in cp_params
     self.pending_jobs = {}
     self.response_times = []
     self.interarrivals = []
     self.queue_times = []
     self.num_cores = cp_params['num_cores']
     self.used_cores = 0
     self.core_speed = random_number(cp_params['core_speed'])
     self.cores = Resource(self.num_cores)
     self.name = cp_params['name']
     self.jobs_completed = 0
     self.sim_components = None
     self.data_res = []
     self.idle_time = 0
     self.time_last_arrival = None
Beispiel #14
0
def run_load_generator(env: simpy.Environment, queue: simpy.Store, pod_synth: PodSynthesizer,
                       ia_sampler: Generator[float, float, None], log: List[LoggingRow]):
    """
    :param env: simpy environment
    :param queue: the work queue
    :param pod_synth: fake Pod generator
    :param ia_sampler: arrival profile
    :param log: simple array to append log messages
    :return:
    """
    while True:
        ia = next(ia_sampler)  # inter-arrival
        ia = round(ia, 3)  # millisecond accuracy
        yield env.timeout(ia)

        pod = next(pod_synth)
        queue.put(pod)

        logging.debug('pod arrived at %.2f seconds' % env.now)
        log.append(LoggingRow(env.now, EventType.POD_QUEUED, pod.name, {'queue_length': len(queue.items)}))
class Config(object):

    env = Environment()

    # 随机种子配置
    RANDOM_SEED = 42
    # package生成器配置信息
    NUM_PACKAGES = 10   # 生成package数量/个
    INTERVAL_TIME = 10  # 生成package间隔/s
    # 包裹传送带类型:pipline 类/simpy.Store/simpy.PriorityStore
    TYPE_PIP_LINE = Store(env)  #配置传送带的类型
Beispiel #16
0
 def __init__(self, orderchannel):
     super().__init__('AGV', 0, 0)
     b = Blackboard()
     transportationTime = b.get('[agv]time')
     num = b.get('[agv]number')
     self.env = b.get('enviro')
     self.vehicles = list()
     for i in range(0, num):
         vchannel = Store(self.env)
         v = Vehicle('Vehicle_' + str(i), transportationTime, vchannel)
         self.vehicles.append((v, vchannel))
     self.orderChannel = orderchannel
class Connection:
    """This class represents the propagation through a Connection."""
    def __init__(self, env, origin_node, destination_node):
        self.env = env
        self.store = Store(env)
        self.origin_node = origin_node
        self.destination_node = destination_node

    def latency(self, envelope):
        latency_delay = get_latency_delay(self.env, self.origin_node.location,
                                          self.destination_node.location)
        yield self.env.timeout(latency_delay)
        self.store.put(envelope)

    def put(self, envelope):
        print(
            f'{envelope.origin.address} at {envelope.timestamp}: Message (ID: {envelope.msg["id"]}) sent with {envelope.msg["size"]} MB with a destination: {envelope.destination.address}'
        )
        self.env.process(self.latency(envelope))

    def get(self):
        return self.store.get()
Beispiel #18
0
    def __init__(self, sim, buffer_params):
        """Returns a buffer object for a Buffer object in MOCA

        :param sim: Reference to DES simulation object
        :param buffer_params: Dictionary of parameters
        """
        # Name and size of the buffer object
        self.name = buffer_params['name']
        self.size = buffer_params['size']

        # The parent simulation object
        self.procflow_sim = sim

        # References to upstream and downstream processes
        self.upstream_process = None
        self.downstream_process = None

        # Internal buffer
        self.buffer = Store(self.procflow_sim.simpy_env, capacity=self.size)

        # Buffer state variables
        self.buffer_metrics = {'usage': 0}
Beispiel #19
0
class Storage:
    def __init__(self, sim, buffer_params):
        """Returns a buffer object for a Buffer object in MOCA

        :param sim: Reference to DES simulation object
        :param buffer_params: Dictionary of parameters
        """
        # Name and size of the buffer object
        self.name = buffer_params['name']
        self.size = buffer_params['size']

        # The parent simulation object
        self.procflow_sim = sim

        # References to upstream and downstream processes
        self.upstream_process = None
        self.downstream_process = None

        # Internal buffer
        self.buffer = Store(self.procflow_sim.simpy_env, capacity=self.size)

        # Buffer state variables
        self.buffer_metrics = {'usage': 0}

    def store(self, part):
        """Stores the given part in the buffer

        :param part: Part object to be stored
        :return: The event of the buffer
        """
        yield self.buffer.put(part)

    def retrieve(self):
        """Retrieves the first part from the buffer

        :return: The part from the buffer (or waits for the buffer till a part arrives)
        """
        yield self.buffer.get()
 def __init__(self, nname, agvcommchannel):
     super().__init__(nname)
     b = Blackboard()
     self.rbs = RuleBasedSystem(b.get('[RBS]maxattempts'))
     envy = b.get('enviro')
     self.accuracyParameter = b.get('[vision]accuracy')
     self.toagv = agvcommchannel
     #robot
     self.robotcomm = Store(envy)
     self.robot = Robot('robot', self.robotcomm)
     # stations
     ninputs = b.get('[stations]inumber')
     noutputs = b.get('[stations]onumber')
     nspas = b.get('[stations]spa')
     self.stations = dict()
     b.put('[Shared]packages', dict())
     b.put('[Shared]packages.dirtybit', dict())
     for sindex in range(0, ninputs):
         sname, stat = self.makeStation(sindex, envy)
         self.stations[sname] = (stat, StationDir.INPUT)
     for sindex in range(0, noutputs):
         sname, stat = self.makeStation(sindex + ninputs, envy)
         self.stations[sname] = (stat, StationDir.OUTPUT)
Beispiel #21
0
class Comm(Component):
    def __init__(self, nname, env):
        super().__init__(nname)
        conf = Configuration()
        self.queue = Store(env)
        self.tcomm = conf.get('[comm]t_comm')
        self.perr = conf.get('[comm]p_err')
        self.unavail = conf.get('[comm]unavail')

    def put(self, msg):
        guess = uniform(0, 1)
        if (guess > self.unavail):
            yield self.env.timeout(self.tcomm)
            while (random() < self.perr):
                self.debug('errorInMessage;;')
                yield self.env.timeout(self.tcomm)
            self.queue.put(msg)
        else:
            self.debug('unavailabilityOfCommunication;;')

    def get(self):
        msg = yield self.queue.get()
        return msg
Beispiel #22
0
def main(stop):
    record = Recorder()
    record.reset()
    enviro = Environment()
    logqueue = makeLogging()
    board = Blackboard()
    board.put('logqueue',logqueue)
    board.put('enviro',enviro)
    toavg = Store(enviro)
    AGV(toavg)
    SmartMixingCell("SMC1",toavg)
    enviro.run(until=stop)
    logqueue.put('HALT')
    retval = record.generateRecord()
    return retval
def machine(env: simpy.Environment, last_q: simpy.Store, next_q: simpy.Store,
            machine_id: str):
    """模拟一个机器, 一个机器就可以同时处理多少物件 取决资源数(工人数)"""

    workers = simpy.Resource(env, capacity=WORKER_NUM)

    def process(item):
        """模拟一个工人的工作进程"""

        with workers.request() as req:
            yield req
            yield env.timeout(PROCESS_TIME)
            env.process(con_belt_process(env, CON_BELT_TIME, item, next_q))
            print(
                f'{round(env.now, 2)} - item: {item} - machine工人: {machine_id} - processed正在处理'
            )

    while True:
        item = yield last_q.get()
        env.process(process(item))
Beispiel #24
0
 def loadScenario(self, enviro):
     numberOfEvcs = Configuration().get('[ertms]evcs')
     numberOfRbcs = Configuration().get('[ertms]rbcs')
     toETCS = Store(enviro)
     rbcs = list()
     comms = list()
     evcs = list()
     for i in range(0, numberOfRbcs):
         rname = 'RBC_' + str(i)
         rbc = RBC(rname, toETCS)
         rbcs.append(rbc)
     rbcCounter = 0
     for i in range(0, numberOfEvcs):
         cname = 'COMM_' + str(i)
         comm = Comm(cname, enviro)
         comms.append(comm)
         ename = 'EVC_' + str(i)
         evc = EVC(ename, comm, toETCS)
         evcs.append(evc)
         logicname = 'LOGIC_' + str(rbcCounter) + '_to_' + str(i)
         rbcs[rbcCounter].addBehaviour(logicname, comm)
         rbcCounter = (rbcCounter + 1) % numberOfRbcs
     etcs = ETCS('ETCS', toETCS, numberOfRbcs, numberOfEvcs)
Beispiel #25
0
class Worker:

    DOWNLOAD_PRIORITY_BOOST_FOR_READY_TASK = 100000

    def __init__(self, cpus=1, max_downloads=4, max_downloads_per_worker=2):
        self.cpus = cpus
        self.assignments = []
        self.ready_store = None

        self.data = set()
        self.running_tasks = {}
        self.scheduled_downloads = {}
        self.running_downloads = []

        self.free_cpus = cpus
        self.max_downloads = max_downloads
        self.max_downloads_per_worker = max_downloads_per_worker
        self.id = None

    def copy(self):
        return Worker(cpus=self.cpus,
                      max_downloads=self.max_downloads,
                      max_downloads_per_worker=self.max_downloads_per_worker)

    def _download(self, output, priority):
        logger.info("Worker %s: scheduled downloading %s, priority=%s", self,
                    output, priority)
        assert output not in self.data
        download = Download(Event(self.env), output, priority)
        self.scheduled_downloads[output] = download
        return download

    def assign_tasks(self, assignments):
        for assignment in assignments:
            assert assignment.worker == self
            # assert assignment not in self.assignments

            if assignment.task.cpus > self.cpus:
                raise Exception(
                    "Task {} allocated on worker with {} cpus".format(
                        assignment.task, self.cpus))

            self.assignments.append(assignment)
            self._init_downloads(assignment)

        if not self.download_wakeup.triggered:
            self.download_wakeup.succeed()

    def update_tasks(self, tasks):
        for task in tasks:
            for assignment in self.assignments:
                if task == assignment.task:
                    self._init_downloads(assignment)
                    break
            else:
                raise Exception(
                    "Updating non assigned task {}, worker={}".format(
                        task, self))
        if not self.download_wakeup.triggered:
            self.download_wakeup.succeed()

    @property
    def assigned_tasks(self):
        return [a.task for a in self.assignments]

    def _init_downloads(self, assignment):
        deps = []
        not_complete = False
        runtime_state = self.simulator.runtime_state
        for input in assignment.task.inputs:
            if input in self.data:
                continue
            priority = assignment.priority
            if runtime_state.task_info(assignment.task).is_ready:
                priority += self.DOWNLOAD_PRIORITY_BOOST_FOR_READY_TASK
            d = self.scheduled_downloads.get(input)
            if d is None:
                info = runtime_state.output_info(input)
                if info.placing:
                    if input.size == 0:
                        self.data.add(input)
                        continue

                    d = self._download(input, priority)
                    deps.append(d.event)
                else:
                    not_complete = True
            else:
                d.update_priority(priority)
                deps.append(d.event)

        if not_complete:
            return
        if not deps:
            self.ready_store.put(assignment)
        else:
            e = self.env.all_of(deps)
            e.callbacks.append(lambda _: self.ready_store.put(assignment))

    def _download_process(self):
        events = [self.download_wakeup]
        env = self.env
        runtime_state = self.simulator.runtime_state

        while True:
            finished = yield env.any_of(events)
            for event in finished.keys():
                if event == events[0]:
                    self.download_wakeup = Event(self.simulator.env)
                    events[0] = self.download_wakeup
                    downloads = None
                    continue
                events.remove(event)
                download = event.value
                assert download.output not in self.data
                self.data.add(download.output)
                self.running_downloads.remove(download)
                del self.scheduled_downloads[download.output]
                download.event.succeed(download)
                self.simulator.add_trace_event(
                    FetchEndTraceEvent(self.env.now, self, download.source,
                                       download.output))

            if len(self.running_downloads) < self.max_downloads:
                # We need to sort any time, as it priority may changed in background

                if downloads is None:
                    downloads = list(
                        o for o in self.scheduled_downloads.values()
                        if o not in self.running_downloads)
                    downloads.sort(key=lambda d: d.priority, reverse=True)

                for d in downloads[:]:
                    count = 0
                    worker = runtime_state.output_info(d.output).placing[0]
                    for rd in self.running_downloads:
                        if worker == rd.source:
                            count += 1
                    if count >= self.max_downloads_per_worker:
                        continue
                    downloads.remove(d)
                    d.start_time = self.env.now
                    d.source = worker
                    self.running_downloads.append(d)
                    event = self.netmodel.download(worker, self, d.output.size,
                                                   d)
                    events.append(event)
                    self.simulator.add_trace_event(
                        FetchStartTraceEvent(self.env.now, self, worker,
                                             d.output))
                    if len(self.running_downloads) >= self.max_downloads:
                        break

    def run(self, env, simulator, netmodel):
        self.env = env
        self.simulator = simulator
        self.netmodel = netmodel
        self.ready_store = Store(env)
        self.download_wakeup = Event(self.simulator.env)

        self.free_cpus = self.cpus
        env.process(self._download_process())

        prepared_assignments = []
        events = [self.ready_store.get()]

        while True:
            finished = yield env.any_of(events)
            for event in finished.keys():
                if event == events[0]:
                    prepared_assignments.append(event.value)
                    prepared_assignments.sort(key=lambda a: a.priority,
                                              reverse=True)
                    events[0] = self.ready_store.get()
                    continue

                assignment = event.value
                task = assignment.task
                self.free_cpus += task.cpus
                self.assignments.remove(assignment)
                events.remove(event)
                del self.running_tasks[task]
                simulator.add_trace_event(
                    TaskEndTraceEvent(self.env.now, self, task))
                for output in task.outputs:
                    self.data.add(output)
                simulator.on_task_finished(self, task)

            block = float("-inf")
            for assignment in prepared_assignments[:]:
                if assignment.priority < block:
                    continue
                task = assignment.task
                if task.cpus <= self.free_cpus:
                    self.free_cpus -= task.cpus
                    self.running_tasks[task] = RunningTask(task, self.env.now)
                    simulator.add_trace_event(
                        TaskStartTraceEvent(self.env.now, self, task))
                    events.append(env.timeout(task.duration, assignment))
                    prepared_assignments.remove(assignment)
                else:
                    block = max(block, assignment.block)

    def __repr__(self):
        return "<Worker {}>".format(self.id)
Beispiel #26
0
class Peer:

    def __init__(self, sim, peer_id: int, peer_type: str,
                 location: str, bandwidth_ul: float, bandwidth_dl: float, **kwargs):
        """
        Physical representation of a Peer
        sim: Simulation environment
        name: Info about peer type and peer id
        location: Physical location of peer
        bandwidth_ul: Uplink bandwidth
        bandwidth_dl: Downlink bandwidth
        """
        self.sim = sim
        self.env = sim.env
        self.peer_type = peer_type
        self.peer_id = peer_id
        self.name = str(peer_id) + ":" + str(peer_type)
        self.location = location
        self.bandwidth_ul = bandwidth_ul
        self.bandwidth_dl = bandwidth_dl
        self.__dict__.update(kwargs)

        peer_repr = repr(self)
        # Define log file path for results of the simulation
        if sim.sim_dir:
            self.log_name = os.path.join(sim.sim_dir, peer_repr + ".log")
            self.logger = setup_logger(peer_repr, self.log_name)
        else:
            self.log_name = None
            self.logger = None

        # Message queue for the received messages
        self.msg_queue = Store(self.env)

        # Network connections that are online
        self.online = True
        self.connections = dict()

        # Known peers
        self.disconnect_callbacks = []
        self.last_seen = dict()

        # Peer services
        self.handlers = {}  # Service.handle_message(self, msg) called on message
        self.mh_map = {}  # Message -> Handler map
        self.runners = {}  # Peer service runners
        self.mprt_map = {} # Message -> Pre-Receive Trigger

        # Storage
        self.storage = {}

        # Monitoring services 
        self.bytes_load = {} # Overhead on bytes per sec 
        self.msg_count_load = {} # Msg per sec 

        # Start peer as it is created
        self.env.process(self.run())

    def __repr__(self):
        return '%s_%s' % (self.__class__.__name__, self.name)

    def __lt__(self, other):
        return self.name < other.name

    def run(self):
        while True:
            # Receive message  
            msg = yield self.msg_queue.get()
            num_bytes = msg.size
            sender = msg.sender
            delay = num_bytes / self.bandwidth_dl
            yield self.env.timeout(delay)

            # Trigger pre-receive tasks if any
            if msg.pre_task:
                val = yield self.env.process(msg.pre_task(msg, self))
            if not msg.pre_task or val:
                self.receive(msg)
            # Trigger post-receive tasks if any

    # Check for connection with any particular peer object
    def is_connected(self, other):
        return other in self.connections

    def bootstrap_connect(self, other):
        """
        Create ad-hoc connection and send Hello
        other: peer object
        """
        #
        cnx = Connection(self, other)
        cnx.send(Hello(self), connect=True)

    def connect(self, other):
        """
        Add peer to the connections and repeat the same with other peer
        other: peer object
        """
        if not self.is_connected(other):
            if self.logger:
                self.logger.info("%s: Connecting to %s", self.env.now, repr(other))
            self.connections[other] = Connection(self, other)
            # We create bilateral connection
            if not other.is_connected(self):
                other.connect(self)

    def disconnect(self, other):
        """
        Disconnect with previously connected peer
        other: peer object
        """
        if self.is_connected(other):
            if self.logger:
                self.logger.warning("%s: Breaking connection with %s", self.env.now, repr(other))
            del self.connections[other]
            if other.is_connected(self):
                other.disconnect(self)
            for cb in self.disconnect_callbacks:
                cb(self, other)

    def receive(self, msg):
        """
        Receive message, will trigger handlers on the message
        msg: message object
        """
        if self.online:
            msg_sender =  msg.sender

            # Monitor the overhead of the message size 
            now_sec = int(self.env.now / 1000) 
            self.bytes_load[now_sec] = self.bytes_load.get(now_sec, 0) + msg.size
            self.msg_count_load[now_sec] = self.msg_count_load.get(now_sec, 0) + 1

            # Update peer connection data
            self.last_seen[msg_sender] = self.env.now

            if self.logger:
                self.logger.info("%s: Received message <%s> from %s", self.env.now, repr(msg), msg_sender)

            # Find the services that should be triggered
            services = set()
            for msg_type in self.mh_map:
                if isinstance(msg, msg_type):
                    services.update(self.mh_map[msg_type])

            if not services:
                if self.logger:
                    self.logger.error("No handler for the message %s", type(msg))
                raise Exception("No handler for the message ", type(msg), repr(self))
            else: 
                for service_id in services:
                    self.handlers[service_id].handle_message(msg)

    def send(self, receiver, msg):
        """
        Send to a receiver peer in a fire-and-forget fashion.
        If receiver is not connected will raise and exception
        """
        # fire and forget
        if self.online:
            if receiver not in self.connections:
                if self.logger:
                    self.logger.error("%s: Sending message to a not connected peer %s",
                                      self.env.now, repr(receiver))
                raise Exception("Not connected")
            if self.logger:
                self.logger.info("%s: Sending message <%s> to %s", self.env.now, repr(msg), receiver)
            self.connections[receiver].send(msg)

    # Get all peer connections
    def _get_connections(self, exclude_bootstrap=True, except_set: set = None, except_type: set = None):
        if except_set is None:
            except_set = set()
        if except_type is None:
            except_type = set()
        if exclude_bootstrap:
            except_type.add('bootstrap')
        conn_set = set(self.connections.keys()) - except_set
        return (p for p in conn_set if p.peer_type not in except_type)

    def gossip(self, msg, f, exclude_bootstrap=True, except_peers: set = None, except_type: set = None):
        """
        Send to f neighbours selected randomly
        msg: Message object
        f: the fanout parameter (number of peers to gossip to)
        exclude_bootstrap: Exclude bootstrap from gossip
        except_peers: connected peers to exclude from gossip
        except_type: exclude from gossip type of peers
        """
        if not self.online:
            return None
        gossip_set = list(self._get_connections(exclude_bootstrap, except_peers, except_type))
        selected = random.sample(gossip_set, min(f, len(gossip_set)))

        for other in selected:
            self.send(other, msg)

        return selected

    def broadcast(self, msg, exclude_bootstrap=True, except_set: set = None, except_type: set = None):
        """Send to all connected peers except given """
        for other in self._get_connections(exclude_bootstrap, except_set, except_type):
            self.send(other, msg)

    def add_service(self, service):
        """
        Add a service to the peer
        """
        serv_name = type(service).__name__
        if isinstance(service, BaseHandler):
            self.handlers[serv_name] = service
            for m in service.messages:
                if m not in self.mh_map:
                    self.mh_map[m] = set()
                self.mh_map[m].add(serv_name)
        if isinstance(service, BaseRunner):
            self.runners[serv_name] = service

    # Start all peer serice runners
    def start_all_runners(self):
        for runner in self.runners.values():
            runner.start()

    # Get storage used by the peer
    def get_storage(self, storage_name):
        return self.storage.get(storage_name)

    # Store message in peer storage
    def store(self, storage_name, msg_id, msg):
        if storage_name not in self.storage:
            if self.logger:
                self.logger.error("No storage %s found", storage_name)
            raise Exception("No storage {} found" % storage_name)
        self.storage[storage_name].add(msg_id, msg)

    # Add new storage for the peer
    def add_storage(self, storage_name, storage):
        self.storage[storage_name] = storage
class MainController(Behaviour):
    def __init__(self, nname, agvcommchannel):
        super().__init__(nname)
        b = Blackboard()
        self.rbs = RuleBasedSystem(b.get('[RBS]maxattempts'))
        envy = b.get('enviro')
        self.accuracyParameter = b.get('[vision]accuracy')
        self.toagv = agvcommchannel
        #robot
        self.robotcomm = Store(envy)
        self.robot = Robot('robot', self.robotcomm)
        # stations
        ninputs = b.get('[stations]inumber')
        noutputs = b.get('[stations]onumber')
        nspas = b.get('[stations]spa')
        self.stations = dict()
        b.put('[Shared]packages', dict())
        b.put('[Shared]packages.dirtybit', dict())
        for sindex in range(0, ninputs):
            sname, stat = self.makeStation(sindex, envy)
            self.stations[sname] = (stat, StationDir.INPUT)
        for sindex in range(0, noutputs):
            sname, stat = self.makeStation(sindex + ninputs, envy)
            self.stations[sname] = (stat, StationDir.OUTPUT)

    def makeStation(self, index, envy):
        name = 'STAT_' + str(index)
        cin = Store(envy)
        cout = Store(envy)
        v = Vision('VIS_' + str(index), self.accuracyParameter)
        s = Station(name, cin, cout)
        s.setVision(v)
        Blackboard().get('[Shared]packages')[name] = None
        Blackboard().get('[Shared]packages.dirtybit')[name] = True
        return name, s

    def boot(self):
        for name in self.stations.keys():
            self.load(name)

    def load(self, sname):
        (stat, dir) = self.stations[sname]
        order = MovementOrder.INBD if (dir == StationDir.INPUT) else (
            MovementOrder.OUTD)
        msg = (order, stat.channelIN, stat.channelOUT)
        self.toagv.put(msg)

    def reload(self, sname):
        (stat, dir) = self.stations[sname]
        order = MovementOrder.DRPI if (dir == StationDir.INPUT) else (
            MovementOrder.DRPO)
        msg = (order, stat.channelIN, stat.channelOUT)
        self.toagv.put(msg)

    def checkFullEmptyPallets(self):
        for name in self.stations:
            stat, dir = self.stations[name]
            if stat.operative():
                p = stat.getPallet()
                flag = (dir == StationDir.INPUT) and (p.isEmpty())
                flagout = ((dir == StationDir.OUTPUT) and (p.isSatisfied()))
                flag = flag or flagout
                if flag == True:
                    stat.dirt()
                    if (flagout):
                        self.log('vault;' + str(p.lenght()) + ';', 2)
                        Recorder().add('hit', p.lenght())
                    self.reload(name)

    def do(self):
        if (self.onrun == True):
            action = None
            while (action == None):
                action, resetcondition, stationtoreset = self.rbs.computeNextStep(
                    self.stations)
                if (action == None):
                    if resetcondition == False:
                        yield self.env.timeout(10)
                    else:
                        stationtoreset.dirt()
                        p = stationtoreset.getPallet()
                        pname = stationtoreset.getName()
                        l = p.lenght()
                        self.log(
                            'MISS;' + stationtoreset.getName() + ',' + str(l) +
                            ';', 2)
                        Recorder().add('miss', l)
                        self.reload(pname)
                yield self.env.timeout(1)
            self.robotcomm.put(action)
            yield self.robotcomm.get()
            self.checkFullEmptyPallets()
 def __init__(self, env, origin_node, destination_node):
     self.env = env
     self.store = Store(env)
     self.origin_node = origin_node
     self.destination_node = destination_node
Beispiel #29
0
class Worker:

    DOWNLOAD_PRIORITY_BOOST_FOR_READY_TASK = 100000

    def __init__(self, cpus=1, max_downloads=4, max_downloads_per_worker=2):
        self.cpus = cpus
        self.assignments = {}
        self.ready_store = None

        self.data = set()
        self.running_tasks = {}
        self.scheduled_downloads = {}
        self.running_downloads = []

        self.free_cpus = cpus
        self.max_downloads = max_downloads
        self.max_downloads_per_worker = max_downloads_per_worker
        self.id = None

    def to_dict(self):
        return {"id": self.id, "cpus": self.cpus}

    def copy(self):
        return Worker(cpus=self.cpus,
                      max_downloads=self.max_downloads,
                      max_downloads_per_worker=self.max_downloads_per_worker)

    def try_retract_task(self, task):
        if task in self.running_tasks:
            logging.debug(
                "Retracting task %s from worker %s cancelled because task is running",
                task, self)
            return False

        logging.debug("Retracting task %s from worker %s", task, self)
        a = self.assignments[task]
        a.cancelled = True

        for inp in task.inputs:
            d = self.scheduled_downloads.get(inp)
            if d is None:
                continue
            d.consumer_count -= 1
            if d.consumer_count <= 0:
                logging.debug("Cancelling download of %s", inp)
                assert d.consumer_count == 0
                if d.source is None:  # is not running
                    assert d not in self.running_downloads
                    del self.scheduled_downloads[inp]

                # This is necessary to cleanup cache
                if not self.download_wakeup.triggered:
                    self.download_wakeup.succeed()

        del self.assignments[a.task]
        return True

    def assign_tasks(self, assignments):
        runtime_state = self.simulator.runtime_state
        for assignment in assignments:
            assert assignment.worker == self
            # assert assignment not in self.assignments

            if assignment.task.cpus > self.cpus:
                raise Exception(
                    "Task {} allocated on worker with {} cpus".format(
                        assignment.task, self.cpus))

            self.assignments[assignment.task] = assignment
            need_inputs = 0
            for inp in assignment.task.inputs:
                if inp in self.data:
                    continue
                if runtime_state.object_info(inp).placing:
                    self._schedule_download(
                        assignment, inp,
                        runtime_state.task_info(assignment.task).is_ready)
                need_inputs += 1
            assignment.remaining_inputs_count = need_inputs
            if need_inputs == 0:
                self.ready_store.put(assignment)
            logger.info("Task %s scheduled on %s (%s ri)", assignment.task,
                        self, need_inputs)

    def update_tasks(self, updates):
        runtime_state = self.simulator.runtime_state
        for task, obj in updates:
            a = self.assignments[task]
            if obj not in self.data:
                if obj.size == 0:
                    self._add_data(obj)
                else:
                    self._schedule_download(
                        a, obj,
                        runtime_state.task_info(task).is_ready)

    @property
    def assigned_tasks(self):
        return iter(self.assignments)

    def _add_data(self, obj):
        if obj in self.data:
            raise Exception("Object {} is already on worker {}".format(
                obj, self))
        self.data.add(obj)
        for t in obj.consumers:
            a = self.assignments.get(t)
            if a is None:
                continue
            a.remaining_inputs_count -= 1
            if a.remaining_inputs_count <= 0:
                assert a.remaining_inputs_count == 0
                self.ready_store.put(a)

    def _schedule_download(self, assignment, obj, ready):
        priority = assignment.priority
        if ready:
            priority += self.DOWNLOAD_PRIORITY_BOOST_FOR_READY_TASK
        d = self.scheduled_downloads.get(obj)
        if d is None:
            logger.info("Worker %s: scheduled downloading %s, priority=%s",
                        self, obj, priority)
            assert obj not in self.data
            d = Download(obj, priority)
            self.scheduled_downloads[obj] = d
        else:
            d.update_priority(priority)
        d.consumer_count += 1
        if not self.download_wakeup.triggered:
            self.download_wakeup.succeed()

    def _download_process(self):
        events = [self.download_wakeup]
        env = self.env
        runtime_state = self.simulator.runtime_state

        while True:
            finished = yield env.any_of(events)
            for event in finished.keys():
                if event == events[0]:
                    self.download_wakeup = Event(self.simulator.env)
                    events[0] = self.download_wakeup
                    downloads = None
                    continue
                events.remove(event)
                download = event.value
                self._add_data(download.output)
                self.running_downloads.remove(download)
                del self.scheduled_downloads[download.output]

                self.simulator.fetch_finished(self, download.source,
                                              download.output)

            if len(self.running_downloads) < self.max_downloads:
                # We need to sort any time, as it priority may changed in background

                if downloads is None:
                    downloads = list(
                        o for o in self.scheduled_downloads.values()
                        if o not in self.running_downloads)
                    downloads.sort(key=lambda d: d.priority, reverse=True)

                for d in downloads[:]:
                    count = 0
                    worker = runtime_state.object_info(d.output).placing[0]
                    for rd in self.running_downloads:
                        if worker == rd.source:
                            count += 1
                    if count >= self.max_downloads_per_worker:
                        continue
                    downloads.remove(d)
                    assert d.start_time is None
                    d.start_time = self.env.now
                    d.source = worker
                    self.running_downloads.append(d)
                    event = self.netmodel.download(worker, self, d.output.size,
                                                   d)
                    events.append(event)
                    self.simulator.add_trace_event(
                        FetchStartTraceEvent(self.env.now, self, worker,
                                             d.output))
                    if len(self.running_downloads) >= self.max_downloads:
                        break

    def run(self, env, simulator, netmodel):
        self.env = env
        self.simulator = simulator
        self.netmodel = netmodel
        self.ready_store = Store(env)
        self.download_wakeup = Event(self.simulator.env)

        self.free_cpus = self.cpus
        env.process(self._download_process())

        prepared_assignments = []
        events = [self.ready_store.get()]

        while True:
            finished = yield env.any_of(events)
            for event in finished.keys():
                if event == events[0]:
                    events[0] = self.ready_store.get()
                    assignment = event.value
                    if assignment.cancelled:
                        continue
                    prepared_assignments.append(assignment)
                    prepared_assignments.sort(key=lambda a: a.priority,
                                              reverse=True)
                    continue

                assignment = event.value
                task = assignment.task
                self.free_cpus += task.cpus
                assert not assignment.cancelled
                del self.assignments[assignment.task]
                events.remove(event)
                del self.running_tasks[task]
                simulator.add_trace_event(
                    TaskEndTraceEvent(self.env.now, self, task))
                for output in task.outputs:
                    self._add_data(output)
                simulator.on_task_finished(self, task)

            block = float("-inf")
            for assignment in prepared_assignments[:]:
                if assignment.priority < block:
                    continue
                task = assignment.task
                if task.cpus <= self.free_cpus:
                    prepared_assignments.remove(assignment)
                    if assignment.cancelled:
                        continue
                    self.free_cpus -= task.cpus
                    self.running_tasks[task] = RunningTask(task, self.env.now)
                    simulator.add_trace_event(
                        TaskStartTraceEvent(self.env.now, self, task))
                    events.append(env.timeout(task.duration, assignment))
                    self.simulator.on_task_start(self, assignment.task)
                else:
                    block = max(block, assignment.block)

    def __repr__(self):
        return "<Worker {}>".format(self.id)
Beispiel #30
0
class Worker:
    def __init__(self):
        self.assigned_tasks = []
        self.ready_tasks = None

        self.data = set()
        self.downloads = {}

    def _download(self, worker, task):
        def _helper():
            yield self.connector.download(worker, self, task.size)
            self.simulator.add_trace_event(
                FetchEndTraceEvent(self.env.now, self, worker, task))
            del self.downloads[task]
            self.data.add(task)

        assert worker != self
        self.simulator.add_trace_event(
            FetchStartTraceEvent(self.env.now, self, worker, task))
        p = self.env.process(_helper())
        self.downloads[task] = p
        return p

    def assign_task(self, task):
        self.assigned_tasks.append(task)
        self._init_downloads(task)

    def update_task(self, task):
        assert task in self.assigned_tasks
        self._init_downloads(task)

    def _init_downloads(self, task):
        deps = []
        not_complete = False
        for input in task.inputs:
            if input in self.data:
                continue

            d = self.downloads.get(input)
            if d is None:
                if input.info.is_finished:
                    worker = input.info.assigned_workers[0]
                    d = self._download(worker, input)
                else:
                    not_complete = True
            deps.append(d)

        def _helper():
            yield self.env.all_of(deps)
            self.ready_tasks.put(task)

        if not_complete:
            return
        if not deps:
            self.ready_tasks.put(task)
        else:
            self.env.process(_helper())

    def run(self, env, simulator, connector):
        self.env = env
        self.simulator = simulator
        self.connector = connector
        self.ready_tasks = Store(env)

        while True:
            task = yield self.ready_tasks.get()
            simulator.add_trace_event(
                TaskStartTraceEvent(self.env.now, self, task))
            yield env.timeout(task.duration)
            simulator.add_trace_event(
                TaskEndTraceEvent(self.env.now, self, task))
            self.assigned_tasks.remove(task)
            self.data.add(task)
            simulator.on_task_finished(self, task)

    def __repr__(self):
        return "<Worker {}>".format(id(self))