Beispiel #1
0
 def action_map(self, _range=1):
     self.actions = set()
     for i in range(_range + 1):
         a = i * 100 / settings.ACTIONS
         Mlog.INFO("ACTION: ", a)
         self.actions.add(a)
         self.actions.add(-a)
     self.actions = tuple(self.actions)
     Mlog.INFO('THE ACTIONS: ', self.actions)
     return self.actions
Beispiel #2
0
    def get_reward(self, clients, old_action):
        """ recuperar a recompensa para uma mudança de estado """
        reward = 0
        gain1 = self.client.get_gain1()
        #gain2 = self.client.get_gain2()
        self.client.update()
        #print(self.dc.load, self.dc.cap, self.actions[self.old_action])
        if settings.STRATEGY in ('fql_step', 'fsl_step'):
            old_action = self.old_action
        else:
            old_action = self.actions[self.old_action]

        if self.dc.load >= self.dc.cap:
            Mlog.INFO("SERVER GREATER", self.dc.load, self.dc.cap)
            if old_action >= 0:
                reward = -3  #*(self.dc.load/self.dc.cap)
                return reward
            else:
                reward = 1  #* (self.dc.load / self.dc.cap)
                return reward

        if self.client.nbw < 0.5 * self.client.bw:
            #Mlog.INFO('OLD ACTION', self.client.id, old_action)
            if old_action >= 0:
                reward = 0.7  #*self.client.nbw/self.client.bw
            else:
                #Mlog.INFO("NEGATIVE REWARD BY CLIENT UNDER SLA")
                reward = -4  #*self.client.nbw/self.client.bw
                return reward

        if self.client.nbw < self.client.bw:
            if old_action >= 0:
                #print()
                reward = 0.5  #* self.client.nbw / self.client.bw
                return reward
            else:
                #Mlog.INFO("NEGATIVE REWARD BY CLIENT UNDER SLA 2")
                reward = -2  #* self.client.nbw / self.client.bw
                return reward

        #if self.SLA_PROBLEM:
        #return -0.7*self.client.nbw/self.client.bw
        return -0.7 * self.client.nbw / self.client.bw

        reward += gain1
        if gain1 < 1:
            return -reward
        return reward
Beispiel #3
0
    def get_current_state(self):
        if settings.STATE_TYPE == 'val':
            # OLD
            state = int(self.dc.load / (self.dc.cap / self.OBSERVATION_SPACE))
        elif settings.STATE_TYPE == 'diff':
            state = int(
                self.dc.load /
                (self.old_dc_load * self.dc.cap / self.OBSERVATION_SPACE))
            self.old_dc_load = self.dc.load
        elif settings.STATE_TYPE == 'fuzzy':
            cpu = psutil.cpu_percent()
            load = self.dc.load
            state = int(
                self.fls.get_state(cpu=psutil.cpu_percent(), bw=self.dc.load))
            Mlog.INFO('FUZZY INPUT: ', cpu, load, state)
        if state > self.OBSERVATION_SPACE:
            return self.dump_state(self.OBSERVATION_SPACE)

        return self.dump_state(state)