Exemple #1
0
 def remove_task(self, task_id):
     logger.debug('Task %s removed', task_id)
     data_size = self.tasks[task_id].get_data_size()
     self.length -= data_size
     if self.app_type:
         self.cpu_needs -= data_size * applications.get_info(self.app_type)
     else:
         self.cpu_needs -= data_size * applications.get_info(
             self.tasks[task_id].app_type)
     del self.tasks[task_id]
Exemple #2
0
 def abort_task(self, task_id):
     logger.info('Task %s aborted', task_id)
     data_size = self.tasks[task_id].get_data_size()
     self.length -= data_size
     if self.app_type:
         self.cpu_needs -= data_size * applications.get_info(self.app_type)
     else:
         self.cpu_needs -= data_size * applications.get_info(
             self.tasks[task_id].app_type)
     self.remove_task(task_id)
Exemple #3
0
 def _get_obs(self,
              time,
              estimate_interval=100,
              involve_capability=False,
              scale=1):
     queue_estimated_arrivals = np.zeros(3)
     queue_arrivals = np.zeros(3)
     queue_lengths = np.zeros(3)
     app_info = np.zeros(3)
     cpu_used = np.zeros(3)
     # arrival_rates = np.zeros(3)
     for app_type, queue in self.queue_list.items():
         queue_estimated_arrivals[app_type - 1] = queue.mean_arrival(
             time, estimate_interval, scale=scale)
         queue_arrivals[app_type - 1] = queue.last_arrival(time,
                                                           scale=scale)
         queue_lengths[app_type - 1] = queue.get_length(scale=scale)
         app_info[app_type -
                  1] = applications.get_info(app_type, "workload") / KB
         cpu_used[
             app_type -
             1] = self.cpu_used[app_type] / self.computational_capability
     # print("asdsad")
     # print(self.cpu_used)
     # print(self.computational_capability)
     # print(queue_lengths)
     # print(cpu_used)
     # if queue.is_exploded()>0:
     #     import pdb; pdb.set_trace()
     # arrival_rates[queue.app_type-1] = queue.estimate_arrival_rate()
     # 아 채널 스테이트도 받아와야 하는데 ㅠㅠ 일단 메인에서 받는다
     if involve_capability:
         return list(queue_lengths) + [self.computational_capability / GHZ]
     return list(queue_estimated_arrivals) + list(queue_arrivals) + list(
         queue_lengths) + list(cpu_used) + list(app_info)
Exemple #4
0
 def arrived(self, task, arrival_timestamp):
     task_id = task.get_uuid()
     task_length = task.data_size
     self.arrival_size_buffer.add((arrival_timestamp, task_length))
     if self.get_length() + task_length <= self.max_length:
         self.tasks[task_id] = task
         self.length += task_length
         if self.app_type:
             self.cpu_needs += task_length * applications.get_info(
                 self.app_type)
         else:
             self.cpu_needs += task_length * applications.get_info(
                 self.tasks[task_id].app_type)
         self.exploded = max(0, self.exploded - 1)
         return True
     else:
         del task
         self.exploded = min(10, self.exploded + 1)
         return False
Exemple #5
0
    def served(self, resource, type=1, silence=True):
        if not silence:
            print(
                "########### compute or offload : inside of task_queue.served ##########"
            )
        if resource == 0:
            logger.info('No data to be served')
            return
        else:
            task_to_remove = []
            offloaded_tasks = {}
            served = 0
            # application queue 이면서 type=1, (serve by itself) 일 때
            if (self.app_type and type):
                # resource unit : cycles --> bits
                to_be_served = int(
                    resource /
                    applications.get_info(self.app_type, 'workload'))
            # application queue 가 아니거나 type 이 아닐 때
            else:
                # to_be_served unit: bits
                # if not app_type: resource(to_be_served) unit: cycles
                # if not type: resource(to_be_served) unit: bits
                to_be_served = resource
            if not silence:
                print("data size to be offloaded : {}".format(to_be_served))
            for task_id, task_ob in self.tasks.items():
                task_size = task_ob.data_size
                workload = applications.get_info(task_ob.get_app_type(),
                                                 "workload")
                # if not app_type: task_size unit: bits --> cycles
                if (type and not self.app_type):
                    task_size *= workload
                if not silence: print("task_size : {}".format(task_size))
                if to_be_served >= task_size:
                    if not silence:
                        print("data size can be served >= task_size case")
                    if not type:
                        offloaded_tasks[task_id] = task_ob
                    task_to_remove.append(task_id)
                    to_be_served -= task_size
                    served += task_size
                    # if not silence: print("remained queue_length of type{} : {}".format(self.app_type, self.length))
                elif to_be_served > 0:
                    if not silence:
                        print("data size to be offloaded < task_size case")
                    # if this is an application queue
                    if self.app_type:
                        task_size -= to_be_served
                        # offloading
                        if not type:
                            # task_ob data size is adjusted in make_child_task function
                            new_task = task_ob.make_child_task(to_be_served)
                            # print("old_task uuid\t", task_id)
                            # print("new_task uuid\t", new_task.get_uuid())
                            offloaded_tasks[new_task.get_uuid()] = new_task
                        # computation by itself
                        else:
                            self.tasks[task_id].data_size = task_size
                        self.length -= to_be_served
                        self.cpu_needs -= to_be_served * applications.get_info(
                            self.app_type)
                    # if this is not an application queue
                    # if not app_type: task_size unit: bits --> cycles
                    else:
                        # if not app_type: task_size unit: cycles --> bits
                        task_size /= workload
                        # if not app_type: to_be_served unit: cycles --> bits
                        to_be_served = int(to_be_served / workload)
                        task_size -= to_be_served
                        self.tasks[task_id].data_size = task_size
                        self.length -= to_be_served
                        self.cpu_needs -= to_be_served * workload
                        # if not app_type: to_be_served unit: bit --> cycles
                        to_be_served *= workload

                    served += to_be_served
                    # if not silence: print("remained queue_length of type{} : {}".format(self.app_type, self.length))
                    to_be_served = 0
                else:
                    if not silence and not type:
                        print(
                            'All tasks are done in task_queue.served(type=0) - offloaded'
                        )
                    if not silence and type:
                        print(
                            'All tasks are done in task_queue.served(type=1) - computed'
                        )
                    break
            if type and self.app_type:
                resource = served * applications.get_info(
                    self.app_type, 'workload')
            else:
                resource = served
            self.remove_multiple_tasks(task_to_remove)
            self.get_length()
            if not silence:
                print("########### task_queue.served ends ###########")
            return resource, offloaded_tasks