def remove_task_of_slice(self, k, task): """Try to remove a task from the buffer. Returns the task if it was indeed on this buffer, and gives back the memory and cpu units the task might have had. (the task only has them when it's processing) Parameters: k: int - the slice index task: Task - task to be removed from buffer in slice k """ if k < 0 or k >= self.max_k or not isinstance(task, Task): raise InvalidValueError( "Invalid arguments for remove_task_of_slice") # removes and returns a task if it is in the buffer try: self.buffers[k].remove(task) if task.is_processing(): self._being_processed[k] -= 1 # adds to the number of tasks leaving the buffer self._dealt_tasks[k] += 1 # values should be zero if it's not processing self._avail_ram_units += task._memory_units self._avail_cpu_units += task._cpu_units except: return None return task
def start_transmitting(self, bw): """Uses some part of the bandwidth for a transmission""" if self._bandwidth < bw or bw < 0: raise InvalidValueError( "Can't transmit in more bandwidth than the available") self._bandwidth -= bw
def being_processed_on_slice(self, k): """Retrieves the number of tasks being processed in the buffer Parameters: k: int - the slice index """ if k < 0 or k >= self.max_k: raise InvalidValueError("Invalid slice number", "[0," + str(self.max_k) + "[") return self._being_processed[k]
def slice_buffer_len(self, k): """Retrieves the lenght of a buffer Parameters: k: int - the slice index """ if k < 0 or k >= self.max_k: raise InvalidValueError("Invalid slice number", "[0," + str(self.max_k) + "[") return len(self.buffers[k])
def __init__(self, time, classtype=None): """ Parameters: time: float - time of the event execution classtype: str = None - the class type of the subclass """ if time < 0: raise InvalidValueError( "an event execution time cannot be negative") self.time = time self.classtype = classtype
def __init__(self, time, node, k, task): """ Parameters: time: float - the current simulation time node: Fog_nodes - an insntance of the class that represents a Fog node hardware strucutre k: int - the slice in which the task is arriving task: Task - the task that is arriving at the said slice and node """ super(Task_arrival, self).__init__(time, "Task_arrival") self.node = node self.k = k self.task = task # cannot recieve wrong slice if not isinstance( node, Fog_node) or k >= node.max_k or k < 0 or not isinstance( task, Task): raise InvalidValueError( "Verify arguments of Task_arrival creation") # cannot recieve future tasks if time < task.task_time(): raise InvalidValueError("Cannot recieve a task from the future")
def point_to_point_transmission_rate(d, bw=cfg.NODE_BANDWIDTH): """ Calculates the transmission rate over a noisy channel between two nodes Parameters: d: float - distance between two nodes concurr: int - number of tasks that will share the bandwidth """ if bw <= 0: raise InvalidValueError("Available bandwidth has to be positive", "[0,+inf[") g = channel_gain(d, cfg.PATH_LOSS_CONSTANT, cfg.PATH_LOSS_EXPONENT) p_mw = db_to_linear(cfg.TRANSMISSION_POWER) n0_mw = db_to_linear(cfg.THERMAL_NOISE_DENSITY) return shannon_hartley(g, p_mw, bw, n0_mw)
def __init__(self, time, node, bw): """ Parameters: (super) time: float - the time in which the event will run node: Fog_node - the fog node which is transmitting bw: int - the bandwidth used for this transmission """ super(Stop_transmitting, self).__init__(time, "Stop_transmitting") self.node = node self.bw = bw if not isinstance(node, Fog_node): raise InvalidValueError( "Verify arguments of Stop_transmitting creation")
def add_task_on_slice(self, k, task): """Tries to add a task on the buffer, if it overflows, returns the discarded task Parameters: k: int - the slice index task: Task - task to be added to buffer in slice k """ if k < 0 or k >= self.max_k or not isinstance(task, Task): raise InvalidValueError("Invalid arguments for add_task_on_slice") # returns the task if the slice buffer is full if len(self.buffers[k]) == self.buffers[k].maxlen: return task self.buffers[k].append(task) return None
def channel_gain(distance, linear_coefficient, exponential_coefficient): """Returns a medium distance (>1m) channel gain given the distance between two nodes and the coefficients A linear and an exponential coefficients all greater than zero, return a channel gain in [0,1] channel gain = linear coefficient * distance ^ (- exponential coefficient) Parameters: distance: float - channel transmission distance linear_coefficient: float - linear coefficient greater than zero exponential_coefficient: float - exponential coefficient greater than zero """ if distance <= 0 or linear_coefficient <= 0 or exponential_coefficient <= 0: raise InvalidValueError( "channel gain function arguments must be positive") return linear_coefficient * distance**(-exponential_coefficient)
def task_communication_time(packet_size_, bit_rate): """ Calculates the transmission/communication time of a task t, an instance of class Task, given a bit_rate Parameters: packet_size_: int - a task packet size in bits bit_rate: float - the number of bits per second transmitted Exceptions: InvalidValueError - raised when the argument values are incorrect """ if bit_rate <= 0 or packet_size_ <= 0: raise InvalidValueError( "task_communication_time must have a valid task with a positive bitrate" ) # simple packet_size calc return float(float(packet_size_) / bit_rate)
def stop_processing_in_slice(self, k, task, time): """Try to stop processing a task in the slice k at a time. Parameters: k: int - the slice index task: Task - the task to stop processing time: float - current simulation time """ if k < 0 or k >= self.max_k or not isinstance(task, Task): raise InvalidValueError( "Invalid arguments for remove_task_of_slice") # only if the task is there if task in self.buffers[k] and task.is_processing(): self._being_processed[k] -= 1 self._avail_ram_units += task._memory_units self._avail_cpu_units += task._cpu_units task.stop_processing(time)
def start_processing(self, cpu_units, memory_units, start_time): """Sets up the variables indicating that it has started processing Parameters: cpu_units: int - the number of cpu units from the fog node attributed to this task memory_units: int - the number of memory units from the fog node attributed to this task start_time: float - the starting time of the processing """ if cpu_units < 0 or memory_units < 0 or start_time < self._timestamp: raise InvalidValueError( "Task start_processing arguments do not meet requirements") self._processing = True self._cpu_units = cpu_units self._memory_units = memory_units self._started_processing = start_time if self._expected_delay == -1: # only set the expected delay the first time self._expected_delay = task_processing_time(self)
def __init__(self, time, node, k, w): """ Parameters: (super) time: float - the time in which the processing will start node: Fog_node - the fog node in which the tasks will process k: int - the slice in which the task is processing w: int - the number of tasks that will be attempted to start processing """ super(Start_processing, self).__init__(time, "Start_processing") self.node = node self.k = k self.w = w if not isinstance(node, Fog_node) or k >= node.max_k or k < 0 or w <= 0: raise InvalidValueError( "Verify arguments of Start_processing creation")
def pop_task_to_send(self, k, time): """Tries pop last recieved task in slice k to send. Only valid if recieved within this timestep Parameters: k: int - the slice index time: float - current simulation time """ if k < 0 or k >= self.max_k: raise InvalidValueError("Invalid slice number", "[0," + str(self.max_k) + "[") # only works if there is a task in the buffer if len(self.buffers[k]) == 0: return None # shouldn't pop a task that is processing if self.buffers[k][-1].is_processing(): return None # and should only offload a task arriving in this timestep if self.buffers[k][-1]._timestamp != time: return None return self.buffers[k].pop()
def __init__(self, timestamp, packet_size=cfg.PACKET_SIZE, delay_constraint=10, cpu_demand=400, ram_demand=400, task_type=None): """ Parameters: timestamp - the timestamp of task creation packet_size = PACKET_SIZE - packet size in bits of a task delay_constraint: int = 10 - the delay constraint of a task in milliseconds cpu_demand: int = 400 - the computing demand in cycles/bit ram_demand: int = 400 - the ram demand of a task in MB task_type = None - the three aforementioned attributes but in a list Exceptions: InvalidValueError - raised when any of the arguments is negative """ super(Task, self).__init__() # check arguments if delay_constraint < 0 or cpu_demand < 0 or ram_demand < 0 or packet_size < 0 or timestamp < 0: raise InvalidValueError( "No arguments on Task object can be negative") self._timestamp = timestamp # must either have task type or the other self.packet_size = packet_size if not task_type == None and len( task_type) == 3: # BUG: task_type can be "hacked" self.delay_constraint = task_type[0] self.cpu_demand = task_type[1] self.ram_demand = task_type[2] else: self.delay_constraint = delay_constraint self.cpu_demand = cpu_demand self.ram_demand = ram_demand self._processing = False self._memory_units = 0 self._cpu_units = 0 self._total_delay = -1 # keep track of delay self._started_processing = -1 self._expected_delay = -1
def shannon_hartley(gain, power, bandwidth, noise_density): """Returns a channel bitrate calculated by the Shannon-Hartley theorem bitrate = bandwidth * log2(1 + gain * power / bandwidth * noise_density) Parameters: gain: float - channel gain, value between [0,1] power: float - transmission power in linear units (mW) bandwidth: float - transmission bandwidth of this noise channel (Hz) noise_density: float - noise density in linear units (mW/Hz) """ if bandwidth <= 0 or power <= 0 or gain <= 0 or gain > 1 or noise_density <= 0: raise InvalidValueError( "shannon_hartley function arguments must be positive and channel gain between [0,1]" ) return float(bandwidth) * np.log2(1 + ( (float(gain) * float(power)) / (float(bandwidth) * float(noise_density) + eps)))
def __init__(self, time, node, k, task): """ Parameters: (super) time: float - the time of the event execution node: Fog_node - the fog node in which the task is in the buffer k: int - the slice in which the task currently is placed task: Task - the task that had its delay constraint unmet """ super(Discard_task, self).__init__(time, "Discard_task") self.node = node self.k = k self.task = task if not isinstance( node, Fog_node) or k >= node.max_k or k < 0 or not isinstance( task, Task): raise InvalidValueError( "Verify arguments of Discard_task creation")
def __init__(self, time, node, k, task): """ Parameters: (super) time: float - the time in which the processing will stop node: Fog_node - the fog node in which the task is processing k: int - the slice in which the task is processing task: Task - the task that will be stopped """ super(Stop_processing, self).__init__(time, "Stop_processing") self.node = node self.k = k self.task = task if not isinstance( node, Fog_node) or k >= node.max_k or k < 0 or not isinstance( task, Task): raise InvalidValueError( "Verify arguments of Stop_processing creation")
def start_processing_in_slice(self, k, w, time): """Try to start processing w task, if any task has already exceeded time limit, discard it. On the slice k, starting at time, try to queue w tasks to processing. It depends on the bottleneck (cpu or ram) the amount that actually will start processing. If a task that would start processing has already exceeded its constraint, discard it instead. Parameters: k: int - the slice index w: int - number of tasks to attempt to process time: float - current simulation time """ if k < 0 or k >= self.max_k or w <= 0 or time < 0: raise InvalidValueError( "Invalid arguments for start_processing_in_slice") under_processing = [] discarded = [] # only process if there is a task on the buffer for task in self.buffers[k]: # only process if has cores, memory and an action request W for it if not task.is_processing( ) and w > 0 and self._avail_cpu_units > 0 and self._avail_ram_units >= np.ceil( task.ram_demand / cfg.RAM_UNIT): # if processor tries to load them and they exceeded constraint, move on if task.exceeded_constraint(time): discarded.append(task) continue # one cpu unit per task and the ram demand they require n_cpu_units = 1 n_memory_units = np.ceil(task.ram_demand / cfg.RAM_UNIT) # and take them from the available pool self._avail_cpu_units -= n_cpu_units self._avail_ram_units -= n_memory_units # then start the processing task.start_processing(n_cpu_units, n_memory_units, time) under_processing.append(task) # reduce the number that we will still allocate w -= 1 self._being_processed[k] += 1 return under_processing, discarded
def __init__(self, time, node, k, destination, concurr): """ Parameters: (super) time: float - the time of the event execution node: Fog_node - the fog node in which the task is arriving k: int - the slice in which the task is arriving destination: Fog_node - the node to which the task is going to be offloaded concurr: int - number of concurrent offloads happening in this node """ super(Offload_task, self).__init__(time, "Offload_task") self.node = node self.k = k self.destination = destination self.concurr = concurr if not isinstance( node, Fog_node) or k >= node.max_k or k < 0 or not isinstance( destination, Fog_node) or concurr < 1: raise InvalidValueError( "Verify arguments of Discard_task creation")
def stop_processing(self, finish_time): """Stops task processing and verifies if the task has completed its processing. Keeps track of the processing time left in case it was a premature interruption. Parameters: finish_time: float - the precise time when it finished processing in the fog node """ if finish_time < self._timestamp: raise InvalidValueError("Task cannot stop before creation") if self._processing: self._processing = False self._cpu_units = 0 self._memory_units = 0 # if it finished the whole processing if round(finish_time - self._started_processing, 5) == round(self._expected_delay, 5): self._total_delay = finish_time - self._timestamp self._expected_delay = 0 # else just keep track of new expected delay else: self._expected_delay -= finish_time - self._started_processing
def __init__(self, index, x, y, cpu_frequency, ram_size, number_of_slices): """ Parameters: index: int - identifier of the fog node in a list x: float - placement alongside the X-axis y: float - placement alongside the Y-axis cpu_frequency: int - cpu frequency in GHz ram_size: int - the size of the RAM in MB number_of_slices: int - number of software defined slices in the fog Exceptions: InvalidValueError - raised when any of the arguments is negative """ super(Fog_node, self).__init__() # check arguments if index < 0 or x < 0 or y < 0 or cpu_frequency < 0 or ram_size < 0 or number_of_slices < 0: raise InvalidValueError( "No arguments on Fog_node object can be negative") self.index = index self.name = "node_" + str(index) self.x = x self.y = y self._distances = {} self._bandwidth = cfg.NODE_BANDWIDTH self.cpu_frequency = cpu_frequency self.ram_size = ram_size self._avail_cpu_units = cpu_frequency / cfg.CPU_UNIT self._avail_ram_units = int(ram_size / cfg.RAM_UNIT) self.max_k = number_of_slices self.buffers = [ deque(maxlen=cfg.MAX_QUEUE) for _ in range(number_of_slices) ] self._dealt_tasks = np.zeros(number_of_slices, dtype=np.uint64) self._total_time_intervals = 0 self._service_rate = np.zeros(number_of_slices, dtype=np.float32) self._being_processed = np.zeros(number_of_slices, dtype=np.uint8)
def linear_to_db(value): """Returns a dB value given its linear value """ if value <= 0: raise InvalidValueError("linear values must be positive", "[0, +inf[") return 10 * np.log10(float(value))
def finished_transmitting(self, bw): """Re-gains the bandwidth that was used for a transmission to the pool""" if bw < 0: raise InvalidValueError("Bw cannot be negative") self._bandwidth += bw