Example #1
0
    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size

        self.hirs_ratio = 0.01

        process_kwargs(self, kwargs, acceptable_kws=['hirs_ratio'])

        self.hirs_limit = max(1,
                              int((self.cache_size * self.hirs_ratio) + 0.5))
        self.lirs_limit = self.cache_size - self.hirs_limit

        self.hirs_count = 0
        self.lirs_count = 0
        self.demoted = 0
        self.nonresident = 0

        # s stack, semi-split to find nonresident HIRs quickly
        self.lirs = DequeDict()
        self.hirs = DequeDict()
        # q, the resident HIR stack
        self.q = DequeDict()

        self.time = 0
        self.visual = Visualizinator(labels=['Q size'])
        self.pollution = Pollutionator(cache_size)
Example #2
0
    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size
        self.mru = DequeDict()

        self.time = 0

        self.pollution = Pollutionator(cache_size)
Example #3
0
    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size
        self.lfu = HeapDict()
        self.time = 0
        np.random.seed(123)

        self.pollution = Pollutionator(cache_size)
Example #4
0
    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size
        self.p = 0

        self.T1 = DequeDict()
        self.T2 = DequeDict()
        self.B1 = DequeDict()
        self.B2 = DequeDict()

        self.time = 0

        self.visual = Visualizinator(labels=['p_value'])
        self.pollution = Pollutionator(self.cache_size)
Example #5
0
    def __init__(self, cache_size, **kwargs):
        # Randomness and Time
        np.random.seed(123)
        self.time = 0

        # Cache
        self.cache_size = cache_size

        #two stacks for the cache(s and q) to mark demoted items faster
        self.s = DequeDict()
        self.q = DequeDict()

        #lfu heap
        self.lfu = HeapDict()

        # Histories
        self.history_size = cache_size // 2
        self.lru_hist = DequeDict()
        self.lfu_hist = DequeDict()

        # Decision Weights Initilized
        self.initial_weight = 0.5

        # Learning Rate
        self.learning_rate = self.Cacheus_Learning_Rate(cache_size, **kwargs)

        process_kwargs(self,
                       kwargs,
                       acceptable_kws=['initial_weight', 'history_size'])

        # Decision Weights
        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)

        # Variables
        hirsRatio = 0.01
        self.q_limit = max(1, int((hirsRatio * self.cache_size) + 0.5))
        self.s_limit = self.cache_size - self.q_limit
        self.q_size = 0
        self.s_size = 0
        self.dem_count = 0
        self.nor_count = 0
        self.q_sizes = []

        # Visualize
        self.visual = Visualizinator(
            labels=['W_lru-Cacheus', 'W_lfu-Cacheus', 'q_size'])

        # Pollution
        self.pollution = Pollutionator(cache_size)
Example #6
0
class LRU:
    class LRU_Entry:
        def __init__(self, oblock):
            self.oblock = oblock

        def __repr__(self):
            return "(o={})".format(self.oblock)

    def __init__(self, cache_size, **kwargs):

        self.cache_size = cache_size
        self.lru = DequeDict()

        self.time = 0

        self.pollution = Pollutionator(cache_size)

        self.cache = []

    def __contains__(self, oblock):
        return oblock in self.lru

    def addToCache(self, oblock):
        x = self.LRU_Entry(oblock)
        self.lru[oblock] = x

    def hit(self, oblock):
        x = self.lru[oblock]
        self.lru[oblock] = x

    def evict(self):
        lru = self.lru.popFirst()
        self.pollution.remove(lru.oblock)
        return lru

    def miss(self, oblock):
        if len(self.lru) == self.cache_size:
            self.evict()
        self.addToCache(oblock)

    def request(self, oblock):
        miss = True
        eviction = False
        self.time += 1

        if oblock in self.lru:
            miss = False
            self.hit(oblock)
        else:
            eviction = self.miss(oblock)

        self.cache.append(oblock)

        # Pollutionator
        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        self.pollution.update(self.time)

        return miss
Example #7
0
    def __init__(self, cache_size, **kwargs):
        # Randomness and Time
        np.random.seed(123)
        self.time = 0

        # Cache
        self.cache_size = cache_size
        self.lru = DequeDict()
        self.lfu = HeapDict()

        # Histories
        self.history_size = cache_size // 2
        self.lru_hist = DequeDict()
        self.lfu_hist = DequeDict()

        # Decision Weights Initilized
        self.initial_weight = 0.5

        # Fixed Learning Rate
        self.learning_rate = 0.45

        # Fixed Discount Rate
        self.discount_rate = 0.005**(1 / self.cache_size)

        # Apply values in kwargs, before any acceptable_kws
        # members are prerequisites
        process_kwargs(
            self,
            kwargs,
            acceptable_kws=['learning_rate', 'initial_weight', 'history_size'])

        # Decision Weights
        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)
        # Visualize
        self.visual = Visualizinator(
            labels=['W_lru-LeCaR', 'W_lfu-LeCaR', 'hit-rate'],
            windowed_labels=['hit-rate'],
            window_size=cache_size)

        # Pollution
        self.pollution = Pollutionator(cache_size)
Example #8
0
    def __init__(self, cache_size, **kwargs):
        np.random.seed(123)
        self.time = 0

        self.cache_size = cache_size

        kwargs_arc = {}
        kwargs_lfu = {}
        if 'arc' in kwargs:
            kwargs_arc = kwargs['arc']
        if 'lfu' in kwargs:
            kwargs_lfu = kwargs['lfu']

        self.arc = self.ARC(cache_size, **kwargs_arc)
        self.lfu = self.LFU(cache_size, **kwargs_lfu)

        self.history_size = cache_size
        self.history = DequeDict()

        self.initial_weight = 0.5

        self.learning_rate = self.ARCALeCaR_Learning_Rate(cache_size, **kwargs)

        process_kwargs(self,
                       kwargs,
                       acceptable_kws=['initial_weight', 'history_size'])

        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)

        self.visual = Visualizinator(labels=['W_arc', 'W_lfu', 'hit-rate'],
                                     windowed_labels=['hit-rate'],
                                     window_size=cache_size,
                                     **kwargs)

        self.pollution = Pollutionator(cache_size, **kwargs)
Example #9
0
    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size

        self.hirs_ratio = 0.01

        process_kwargs(self, kwargs, acceptable_kws=['hirs_ratio'])

        self.hirs_limit = max(2, int((self.cache_size * self.hirs_ratio)))
        self.lirs_limit = self.cache_size - self.hirs_limit

        self.hirs_count = 0
        self.lirs_count = 0
        self.nonresident = 0

        # s stack, semi-split to find nonresident HIRs quickly
        self.s = DequeDict()
        self.nr_hirs = DequeDict()
        # q, the resident HIR stack
        self.q = DequeDict()

        self.time = 0
        self.last_oblock = None

        self.pollution = Pollutionator(cache_size)
Example #10
0
class ARC:
    class ARC_Entry:
        def __init__(self, oblock):
            self.oblock = oblock

        def __repr__(self):
            return "({})".format(self.oblock)

    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size
        self.p = 0

        self.T1 = DequeDict()
        self.T2 = DequeDict()
        self.B1 = DequeDict()
        self.B2 = DequeDict()

        self.time = 0

        self.visual = Visualizinator(labels=['p_value'])
        self.pollution = Pollutionator(self.cache_size)

    def __contains__(self, oblock):
        return oblock in self.T1 or oblock in self.T2

    def cacheFull(self):
        return len(self.T1) + len(self.T2) == self.cache_size

    def addToCache(self, oblock):
        x = self.ARC_Entry(oblock)
        self.T1[oblock] = x

    def moveToList(self, entry, arc_list):
        arc_list[entry.oblock] = entry

    def hit(self, oblock, arc_list):
        x = arc_list[oblock]
        del arc_list[oblock]
        self.moveToList(x, self.T2)

    def evictFromList(self, arc_list):
        assert (len(arc_list) > 0)
        return arc_list.popFirst()

    def evict(self):
        len_L1 = len(self.T1) + len(self.B1)
        len_L2 = len(self.T2) + len(self.B2)

        if len_L1 == self.cache_size:
            if len(self.T1) < self.cache_size:
                hist_evict = self.evictFromList(self.B1)
                evicted = self.replace()
            else:
                evicted = self.evictFromList(self.T1)
        elif len_L1 < self.cache_size and len_L1 + len_L2 >= self.cache_size:
            if len_L1 + len_L2 == 2 * self.cache_size:
                self.evictFromList(self.B2)
            evicted = self.replace()
        self.pollution.remove(evicted.oblock)
        return evicted

    def replace(self, x_in_B2=False):
        if len(self.T1) > 0 and ((x_in_B2 and len(self.T1) == self.p)
                                 or len(self.T1) > self.p):
            evicted = self.evictFromList(self.T1)
            self.moveToList(evicted, self.B1)
        else:
            evicted = self.evictFromList(self.T2)
            self.moveToList(evicted, self.B2)
        return evicted

    def missInHistory(self, oblock, history):
        x = history[oblock]
        x_in_B2 = oblock in self.B2
        del history[oblock]

        evicted = self.replace(x_in_B2)
        self.pollution.remove(evicted.oblock)

        self.moveToList(x, self.T2)

    def miss(self, oblock):
        if oblock in self.B1:
            self.p = min(self.p + max(1,
                                      len(self.B2) // len(self.B1)),
                         self.cache_size)
            self.missInHistory(oblock, self.B1)
        elif oblock in self.B2:
            self.p = max(self.p - max(1, len(self.B1) // len(self.B2)), 0)
            self.missInHistory(oblock, self.B2)
        else:
            if self.cacheFull():
                self.evict()
            self.addToCache(oblock)

    def request(self, oblock):
        miss = True
        self.time += 1

        if oblock in self:
            miss = False
            if oblock in self.T1:
                self.hit(oblock, self.T1)
            else:
                self.hit(oblock, self.T2)
        else:
            self.miss(oblock)

        # Visualizinator
        self.visual.add({'p_value': (self.time, self.p)})

        # Pollutionator
        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        self.pollution.update(self.time)

        return miss

    def get_p(self):
        return float(self.p / self.cache_size)
Example #11
0
class ALeCaR6:
    ######################
    ## INTERNAL CLASSES ##
    ######################

    # Entry to track the page information
    class ALeCaR6_Entry:
        def __init__(self, oblock, freq=1, time=0):
            self.oblock = oblock
            self.freq = freq
            self.time = time
            self.evicted_time = None

        # Minimal comparitors needed for HeapDict
        def __lt__(self, other):
            if self.freq == other.freq:
                return self.time > other.time
            return self.freq < other.freq

        # Useful for debugging
        def __repr__(self):
            return "(o={}, f={}, t={})".format(self.oblock, self.freq,
                                               self.time)

    # Adaptive learning rate of ALeCaR6
    # TODO consider an internal time instead of taking time as a parameter
    class ALeCaR6_Learning_Rate:
        # kwargs: We're using keyword arguments so that they can be passed down as
        #         needed. We can filter through the keywords for ones we want,
        #         ignoring those we don't use. We then update our instance with
        #         the passed values for the given keys after the default
        #         initializations and before the possibly passed keys are used in
        #         a way that cannot be taken back, such as setting the learning rate
        #         reset point, which is reliant on the starting learning_rate
        def __init__(self, period_length, **kwargs):
            self.learning_rate = np.sqrt((2.0 * np.log(2)) / period_length)

            process_kwargs(self, kwargs, acceptable_kws=['learning_rate'])

            self.learning_rate_reset = min(max(self.learning_rate, 0.001), 1)
            self.learning_rate_curr = self.learning_rate
            self.learning_rate_prev = 0.0
            self.learning_rates = []

            self.period_len = period_length

            self.hitrate = 0
            self.hitrate_prev = 0.0
            self.hitrate_diff_prev = 0.0

            self.hitrate_nega_count = 0
            self.hitrate_zero_count = 0

        # Used to use the learning_rate value to multiply without
        # having to use self.learning_rate.learning_rate, which can
        # impact readability
        def __mul__(self, other):
            return self.learning_rate * other

        # Update the adaptive learning rate when we've reached the end of a period
        def update(self, time):
            if time % self.period_len == 0:
                # TODO: remove float() when using Python3
                hitrate_curr = round(self.hitrate / float(self.period_len), 3)
                hitrate_diff = round(hitrate_curr - self.hitrate_prev, 3)

                delta_LR = round(self.learning_rate_curr, 3) - round(
                    self.learning_rate_prev, 3)
                delta, delta_HR = self.updateInDeltaDirection(
                    delta_LR, hitrate_diff)

                if delta > 0:
                    self.learning_rate = min(
                        self.learning_rate +
                        abs(self.learning_rate * delta_LR), 1)
                    self.hitrate_nega_count = 0
                    self.hitrate_zero_count = 0
                elif delta < 0:
                    self.learning_rate = max(
                        self.learning_rate -
                        abs(self.learning_rate * delta_LR), 0.001)
                    self.hitrate_nega_count = 0
                    self.hitrate_zero_count = 0
                elif delta == 0 and hitrate_diff <= 0:
                    if (hitrate_curr <= 0 and hitrate_diff == 0):
                        self.hitrate_zero_count += 1
                    if hitrate_diff < 0:
                        self.hitrate_nega_count += 1
                        self.hitrate_zero_count += 1
                    if self.hitrate_zero_count >= 10:
                        self.learning_rate = self.learning_rate_reset
                        self.hitrate_zero_count = 0
                    elif hitrate_diff < 0:
                        if self.hitrate_nega_count >= 10:
                            self.learning_rate = self.learning_rate_reset
                            self.hitrate_nega_count = 0
                        else:
                            self.updateInRandomDirection()
                self.learning_rate_prev = self.learning_rate_curr
                self.learning_rate_curr = self.learning_rate
                self.hitrate_prev = hitrate_curr
                self.hitrate_diff_prev = hitrate_diff
                self.hitrate = 0

            # TODO check that this is necessary and shouldn't be moved to
            #      the Visualizinator
            self.learning_rates.append(self.learning_rate)

        # Update the learning rate according to the change in learning_rate and hitrate
        def updateInDeltaDirection(self, learning_rate_diff, hitrate_diff):
            delta = learning_rate_diff * hitrate_diff
            # Get delta = 1 if learning_rate_diff and hitrate_diff are both positive or negative
            # Get delta =-1 if learning_rate_diff and hitrate_diff have different signs
            # Get delta = 0 if either learning_rate_diff or hitrate_diff == 0
            delta = int(delta / abs(delta)) if delta != 0 else 0
            delta_HR = 0 if delta == 0 and learning_rate_diff != 0 else 1
            return delta, delta_HR

        # Update the learning rate in a random direction or correct it from extremes
        def updateInRandomDirection(self):
            if self.learning_rate >= 1:
                self.learning_rate = 0.9
            elif self.learning_rate <= 0.001:
                self.learning_rate = 0.005
            elif np.random.choice(['Increase', 'Decrease']) == 'Increase':
                self.learning_rate = min(self.learning_rate * 1.25, 1)
            else:
                self.learning_rate = max(self.learning_rate * 0.75, 0.001)

    # kwargs: We're using keyword arguments so that they can be passed down as
    #         needed. We can filter through the keywords for ones we want,
    #         ignoring those we don't use. We then update our instance with
    #         the passed values for the given keys after the default
    #         initializations and before the possibly passed keys are used in
    #         a way that cannot be taken back, such as setting the weights(W)
    #         Please note that cache_size is a required argument and not
    #         optional like all the kwargs are
    def __init__(self, cache_size, **kwargs):
        # Randomness and Time
        np.random.seed(123)
        self.time = 0

        # Cache
        self.cache_size = cache_size
        self.lru = DequeDict()
        self.lfu = HeapDict()

        # Histories
        self.history_size = cache_size // 2
        self.lru_hist = DequeDict()
        self.lfu_hist = DequeDict()

        # Decision Weights Initilized
        self.initial_weight = 0.5

        # Learning Rate
        self.learning_rate = self.ALeCaR6_Learning_Rate(cache_size, **kwargs)

        # Apply values in kwargs, before any accepted_kws members
        # are prerequisites
        process_kwargs(self,
                       kwargs,
                       acceptable_kws=['initial_weight', 'history_size'])

        # Decision Weights
        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)
        # Visualize
        self.visual = Visualizinator(
            labels=['W_lru-ALeCaR6', 'W_lfu-ALeCaR6', 'hit-rate'],
            windowed_labels=['hit-rate'],
            window_size=cache_size)

        # Pollution
        self.pollution = Pollutionator(cache_size)

    # True if oblock is in cache (which LRU can represent)
    def __contains__(self, oblock):
        return oblock in self.lru

    # Add Entry to cache with given frequency
    def addToCache(self, oblock, freq):
        x = self.ALeCaR6_Entry(oblock, freq, self.time)
        self.lru[oblock] = x
        self.lfu[oblock] = x

    # Add Entry to history dictated by policy
    # policy: 0, Add Entry to LRU History
    #         1, Add Entry to LFU History
    #        -1, Do not add Entry to any History
    def addToHistory(self, x, policy):
        # Use reference to policy_history to reduce redundant code
        policy_history = None
        if policy == 0:
            policy_history = self.lru_hist
        elif policy == 1:
            policy_history = self.lfu_hist
        elif policy == -1:
            return

        # Evict from history is it is full
        if len(policy_history) == self.history_size:
            evicted = self.getLRU(policy_history)
            del policy_history[evicted.oblock]

        policy_history[x.oblock] = x

    # Get the LRU item in the given DequeDict
    # NOTE: DequeDict can be: lru, lru_hist, or lfu_hist
    # NOTE: does *NOT* remove the LRU Entry from given DequeDict
    def getLRU(self, dequeDict):
        return dequeDict.first()

    # Get the LFU min item in the LFU (HeapDict)
    # NOTE: does *NOT* remove the LFU Entry from LFU
    def getHeapMin(self):
        return self.lfu.min()

    # Get the random eviction choice based on current weights
    def getChoice(self):
        return 0 if np.random.rand() < self.W[0] else 1

    # Evict an entry
    def evict(self):
        lru = self.getLRU(self.lru)
        lfu = self.getHeapMin()

        evicted = lru
        policy = self.getChoice()

        # Since we're using Entry references, we use is to check
        # that the LRU and LFU Entries are the same Entry
        if lru is lfu:
            evicted, policy = lru, -1
        elif policy == 0:
            evicted = lru
        else:
            evicted = lfu

        del self.lru[evicted.oblock]
        del self.lfu[evicted.oblock]

        evicted.evicted_time = self.time

        self.pollution.remove(evicted.oblock)

        self.addToHistory(evicted, policy)

        return evicted, policy

    # Cache Hit
    def hit(self, oblock):
        x = self.lru[oblock]
        x.time = self.time

        self.lru[oblock] = x

        x.freq += 1
        self.lfu[oblock] = x

    # Adjust the weights based on the given rewards for LRU and LFU
    def adjustWeights(self, rewardLRU, rewardLFU):
        reward = np.array([rewardLRU, rewardLFU], dtype=np.float32)
        self.W = self.W * np.exp(self.learning_rate * reward)
        self.W = self.W / np.sum(self.W)

        if self.W[0] >= 0.99:
            self.W = np.array([0.99, 0.01], dtype=np.float32)
        elif self.W[1] >= 0.99:
            self.W = np.array([0.01, 0.99], dtype=np.float32)

    # Cache Miss
    def miss(self, oblock):
        freq = 1
        if oblock in self.lru_hist:
            entry = self.lru_hist[oblock]
            freq = entry.freq + 1
            del self.lru_hist[oblock]
            self.adjustWeights(-1, 0)
        elif oblock in self.lfu_hist:
            entry = self.lfu_hist[oblock]
            freq = entry.freq + 1
            del self.lfu_hist[oblock]
            self.adjustWeights(0, -1)

        # If the cache is full, evict
        if len(self.lru) == self.cache_size:
            evicted, policy = self.evict()

        self.addToCache(oblock, freq)

    # Process and access request for the given oblock
    def request(self, oblock):
        miss = True

        self.time += 1

        self.visual.add({
            'W_lru-ALeCaR6': (self.time, self.W[0]),
            'W_lfu-ALeCaR6': (self.time, self.W[1])
        })

        self.learning_rate.update(self.time)

        if oblock in self:
            miss = False
            self.hit(oblock)
        else:
            self.miss(oblock)

        # Windowed
        self.visual.addWindow({'hit-rate': 0 if miss else 1}, self.time)

        # Learning Rate
        if not miss:
            self.learning_rate.hitrate += 1

        # Pollution
        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        self.pollution.update(self.time)

        return miss
Example #12
0
class Cacheus:
    ######################
    ## INTERNAL CLASSES ##
    ######################

    # Entry to track the page information
    class Cacheus_Entry:
        # TODO is_new logic to is not demoted
        def __init__(self, oblock, freq=1, time=0, is_new=True):
            self.oblock = oblock
            self.freq = freq
            self.time = time
            self.evicted_time = None
            self.is_demoted = False
            self.is_new = is_new

        # Minimal comparitors needed for HeapDict
        def __lt__(self, other):
            if self.freq == other.freq:
                return self.time > other.time
            return self.freq < other.freq

        # Useful for debugging
        def __repr__(self):
            return "(o={}, f={}, t={})".format(self.oblock, self.freq,
                                               self.time)

    # Adaptive learning rate of Cacheus
    # TODO consider an internal time instead of taking time as a parameter
    class Cacheus_Learning_Rate:
        # kwargs: We're using keyword arguments so that they can be passed down as
        #         needed. We can filter through the keywords for ones we want,
        #         ignoring those we don't use. We then update our instance with
        #         the passed values for the given keys after the default
        #         initializations and before the possibly passed keys are used in
        #         a way that cannot be taken back, such as setting the learning rate
        #         reset point, which is reliant on the starting learning_rate
        def __init__(self, period_length, **kwargs):
            self.learning_rate = np.sqrt((2.0 * np.log(2)) / period_length)

            process_kwargs(self, kwargs, acceptable_kws=['learning_rate'])

            self.learning_rate_reset = min(max(self.learning_rate, 0.001), 1)
            self.learning_rate_curr = self.learning_rate
            self.learning_rate_prev = 0.0
            self.learning_rates = []

            self.period_len = period_length

            self.hitrate = 0
            self.hitrate_prev = 0.0
            self.hitrate_diff_prev = 0.0
            self.hitrate_zero_count = 0
            self.hitrate_nega_count = 0

        # Used to use the learning_rate value to multiply without
        # having to use self.learning_rate.learning_rate, which can
        # impact readability
        def __mul__(self, other):
            return self.learning_rate * other

        # Update the adaptive learning rate when we've reached the end of a period
        def update(self, time):
            if time % self.period_len == 0:
                # TODO: remove float() when using Python3
                hitrate_curr = round(self.hitrate / self.period_len, 3)
                hitrate_diff = round(hitrate_curr - self.hitrate_prev, 3)

                delta_LR = round(self.learning_rate_curr, 3) - round(
                    self.learning_rate_prev, 3)
                delta, delta_HR = self.updateInDeltaDirection(
                    delta_LR, hitrate_diff)

                if delta > 0:
                    self.learning_rate = min(
                        self.learning_rate +
                        abs(self.learning_rate * delta_LR), 1)
                    self.hitrate_nega_count = 0
                    self.hitrate_zero_count = 0
                elif delta < 0:
                    self.learning_rate = max(
                        self.learning_rate -
                        abs(self.learning_rate * delta_LR), 0.001)
                    self.hitrate_nega_count = 0
                    self.hitrate_zero_count = 0
                elif delta == 0 and hitrate_diff <= 0:
                    if (hitrate_curr <= 0 and hitrate_diff == 0):
                        self.hitrate_zero_count += 1
                    if hitrate_diff < 0:
                        self.hitrate_nega_count += 1
                        self.hitrate_zero_count += 1
                    if self.hitrate_zero_count >= 10:
                        self.learning_rate = self.learning_rate_reset
                        self.hitrate_zero_count = 0
                    elif hitrate_diff < 0:
                        if self.hitrate_nega_count >= 10:
                            self.learning_rate = self.learning_rate_reset
                            self.hitrate_nega_count = 0
                        else:
                            self.updateInRandomDirection()
                self.learning_rate_prev = self.learning_rate_curr
                self.learning_rate_curr = self.learning_rate
                self.hitrate_prev = hitrate_curr
                self.hitrate_diff_prev = hitrate_diff
                self.hitrate = 0

            # TODO check that this is necessary and shouldn't be moved to
            #      the Visualizinator
            self.learning_rates.append(self.learning_rate)

        # Update the learning rate according to the change in learning_rate and hitrate
        def updateInDeltaDirection(self, learning_rate_diff, hitrate_diff):
            delta = learning_rate_diff * hitrate_diff
            # Get delta = 1 if learning_rate_diff and hitrate_diff are both positive or negative
            # Get delta =-1 if learning_rate_diff and hitrate_diff have different signs
            # Get delta = 0 if either learning_rate_diff or hitrate_diff == 0
            delta = int(delta / abs(delta)) if delta != 0 else 0
            delta_HR = 0 if delta == 0 and learning_rate_diff != 0 else 1
            return delta, delta_HR

        # Update the learning rate in a random direction or correct it from extremes
        def updateInRandomDirection(self):
            if self.learning_rate >= 1:
                self.learning_rate = 0.9
            elif self.learning_rate <= 0.001:
                self.learning_rate = 0.005
            elif np.random.choice(['Increase', 'Decrease']) == 'Increase':
                self.learning_rate = min(self.learning_rate * 1.25, 1)
            else:
                self.learning_rate = max(self.learning_rate * 0.75, 0.001)

    # kwargs: We're using keyword arguments so that they can be passed down as
    #         needed. We can filter through the keywords for ones we want,
    #         ignoring those we don't use. We then update our instance with
    #         the passed values for the given keys after the default
    #         initializations and before the possibly passed keys are used in
    #         a way that cannot be taken back, such as setting the weights(W)
    #         Please note that cache_size is a required argument and not
    #         optional like all the kwargs are
    def __init__(self, cache_size, **kwargs):
        # Randomness and Time
        np.random.seed(123)
        self.time = 0

        # Cache
        self.cache_size = cache_size

        #two stacks for the cache(s and q) to mark demoted items faster
        self.s = DequeDict()
        self.q = DequeDict()

        #lfu heap
        self.lfu = HeapDict()

        # Histories
        self.history_size = cache_size // 2
        self.lru_hist = DequeDict()
        self.lfu_hist = DequeDict()

        # Decision Weights Initilized
        self.initial_weight = 0.5

        # Learning Rate
        self.learning_rate = self.Cacheus_Learning_Rate(cache_size, **kwargs)

        process_kwargs(self,
                       kwargs,
                       acceptable_kws=['initial_weight', 'history_size'])

        # Decision Weights
        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)

        # Variables
        hirsRatio = 0.01
        self.q_limit = max(1, int((hirsRatio * self.cache_size) + 0.5))
        self.s_limit = self.cache_size - self.q_limit
        self.q_size = 0
        self.s_size = 0
        self.dem_count = 0
        self.nor_count = 0
        self.q_sizes = []

        # Visualize
        self.visual = Visualizinator(
            labels=['W_lru-Cacheus', 'W_lfu-Cacheus', 'q_size'])

        # Pollution
        self.pollution = Pollutionator(cache_size)

    # True if oblock is in cache (which LRU can represent)
    def __contains__(self, oblock):
        return (oblock in self.s or oblock in self.q)

    #Hit in MRU portion of the cache
    def hitinS(self, oblock):
        x = self.s[oblock]
        x.time = self.time
        self.s[oblock] = x

        x.freq += 1
        self.lfu[oblock] = x

    #Hit in LRU portion of the cache
    def hitinQ(self, oblock):
        x = self.q[oblock]
        x.time = self.time

        x.freq += 1
        self.lfu[oblock] = x

        if x.is_demoted:
            self.adjustSize(True)
            x.is_demoted = False
            self.dem_count -= 1
        del self.q[x.oblock]
        self.q_size -= 1

        if self.s_size >= self.s_limit:
            y = self.s.popFirst()
            y.is_demoted = True
            self.dem_count += 1
            self.s_size -= 1
            self.q[y.oblock] = y
            self.q_size += 1

        self.s[x.oblock] = x
        self.s_size += 1

    # Add Entry to S with given frequency
    def addToS(self, oblock, freq, isNew=True):
        x = self.Cacheus_Entry(oblock, freq, self.time, isNew)
        self.s[oblock] = x
        self.lfu[oblock] = x
        self.s_size += 1

    def addToQ(self, oblock, freq, isNew=True):
        x = self.Cacheus_Entry(oblock, freq, self.time, isNew)
        self.q[oblock] = x
        self.lfu[oblock] = x
        self.q_size += 1

    # Add Entry to history dictated by policy
    # policy: 0, Add Entry to LRU History
    #         1, Add Entry to LFU History
    #        -1, Do not add Entry to any History
    def addToHistory(self, x, policy):
        # Use reference to policy_history to reduce redundant code
        policy_history = None
        if policy == 0:
            policy_history = self.lru_hist
            if x.is_new:
                self.nor_count += 1
        elif policy == 1:
            policy_history = self.lfu_hist
        elif policy == -1:
            return

        # Evict from history is it is full
        if len(policy_history) == self.history_size:
            evicted = self.getLRU(policy_history)
            del policy_history[evicted.oblock]
        policy_history[x.oblock] = x

    # Get the LRU item in the given DequeDict
    # NOTE: DequeDict can be: lru, lru_hist, or lfu_hist
    # NOTE: does *NOT* remove the LRU Entry from given DequeDict
    def getLRU(self, dequeDict):
        return dequeDict.first()

    # Get the LFU min item in the LFU (HeapDict)
    # NOTE: does *NOT* remove the LFU Entry from LFU
    def getHeapMin(self):
        return self.lfu.min()

    # Get the random eviction choice based on current weights
    def getChoice(self):
        return 0 if np.random.rand() < self.W[0] else 1

    # Evict an entry
    def evict(self):
        lru = self.getLRU(self.q)
        lfu = self.getHeapMin()

        evicted = lru
        policy = self.getChoice()

        # Since we're using Entry references, we use is to check
        # that the LRU and LFU Entries are the same Entry
        if lru is lfu:
            evicted, policy = lru, -1
        elif policy == 0:
            evicted = lru
            del self.q[evicted.oblock]
            self.q_size -= 1
        elif policy == 1:
            evicted = lfu
            if evicted.oblock in self.s:
                del self.s[evicted.oblock]
                self.s_size -= 1
                #self.q_size += 1
            elif evicted.oblock in self.q:
                del self.q[evicted.oblock]
                self.q_size -= 1

        if policy == -1:
            del self.q[evicted.oblock]
            self.q_size -= 1

        del self.lfu[evicted.oblock]
        evicted.evicted_time = self.time
        self.pollution.remove(evicted.oblock)

        self.addToHistory(evicted, policy)

        return evicted, policy

    # Adjust the weights based on the given rewards for LRU and LFU
    def adjustWeights(self, rewardLRU, rewardLFU):
        reward = np.array([rewardLRU, rewardLFU], dtype=np.float32)
        self.W = self.W * np.exp(self.learning_rate * reward)
        self.W = self.W / np.sum(self.W)

        if self.W[0] >= 0.99:
            self.W = np.array([0.99, 0.01], dtype=np.float32)
        elif self.W[1] >= 0.99:
            self.W = np.array([0.01, 0.99], dtype=np.float32)

    def adjustSize(self, hit_in_Q):
        #        self.dem_count = 1 if self.dem_count == 0
        #        self.nor_count = 1 if self.nor_count == 0

        if hit_in_Q:
            self.s_limit = min(
                self.cache_size - 1, self.s_limit +
                max(1, int((self.nor_count / (self.dem_count + 2)) + 0.5)))
            self.q_limit = self.cache_size - self.s_limit
        else:
            self.q_limit = min(
                self.cache_size - 1, self.q_limit +
                max(1, int((self.dem_count / (self.nor_count + 2)) + 0.5)))
            self.s_limit = self.cache_size - self.q_limit

    def hitinLRUHist(self, oblock):
        entry = self.lru_hist[oblock]
        freq = entry.freq + 1
        del self.lru_hist[oblock]
        if entry.is_new:
            self.nor_count -= 1
            entry.is_new = False
            self.adjustSize(False)
        self.adjustWeights(-1, 0)

        if (self.s_size + self.q_size) >= self.cache_size:
            evicted, policy = self.evict()

        self.addToS(entry.oblock, entry.freq, isNew=False)
        self.limitStack()

    def hitinLFUHist(self, oblock):
        entry = self.lfu_hist[oblock]
        freq = entry.freq + 1
        del self.lfu_hist[oblock]
        self.adjustWeights(0, -1)

        if (self.s_size + self.q_size) >= self.cache_size:
            evicted, policy = self.evict()

        self.addToS(entry.oblock, entry.freq, isNew=False)
        self.limitStack()

    def limitStack(self):
        while self.s_size >= self.s_limit:
            #print("entering to mark demoted in limited stack")
            demoted = self.s.popFirst()
            self.s_size -= 1

            demoted.is_demoted = True
            self.dem_count += 1

            self.q[demoted.oblock] = demoted
            self.q_size += 1

    # Cache Miss
    def miss(self, oblock):
        freq = 1

        if self.s_size < self.s_limit and self.q_size == 0:
            self.addToS(oblock, freq, isNew=False)
        elif self.s_size + self.q_size < self.cache_size and self.q_size < self.q_limit:
            self.addToQ(oblock, freq, isNew=False)
        else:
            # NOTE: does this belong before the previous if-else block?
            # TODO Eviction of multiple blocks in Q ?
            # If the cache is full, evict
            if (self.s_size + self.q_size) >= self.cache_size:
                evicted, policy = self.evict()

            # NOTE: it's possible that these should be in an else instead
            #Filling up the cache
            self.addToQ(oblock, freq, isNew=True)
            self.limitStack()

    # Process and access request for the given oblock

    def request(self, oblock):
        miss = False
        #print("Request: {}\n".format(oblock))
        self.time += 1

        self.visual.add({
            'W_lru-Cacheus': (self.time, self.W[0]),
            'W_lfu-Cacheus': (self.time, self.W[1]),
            'q_size': (self.time, self.q_size)
        })
        #if self.time % (self.cache_size//2) == 0:
        #print("q size", self.q_size)
        #print("demoted count", self.dem_count)
        #print("nor count", self.nor_count)
        self.learning_rate.update(self.time)

        if oblock in self.s:
            #print("hit in s")
            self.hitinS(oblock)
        elif oblock in self.q:
            #print("hit in q")
            self.hitinQ(oblock)
        elif oblock in self.lru_hist:
            #print("hit in lru_hits")
            miss = True
            self.hitinLRUHist(oblock)
        elif oblock in self.lfu_hist:
            #print("hit in lfu_hist")
            miss = True
            self.hitinLFUHist(oblock)
        else:
            #print("miss")
            miss = True
            self.miss(oblock)

        # Learning Rate
        if not miss:
            self.learning_rate.hitrate += 1

        # Pollution
        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        if self.time % self.cache_size == 0:
            self.pollution.update(self.time)

        return miss

    def getQsize(self):
        x, y = zip(*self.visual.get('q_size'))
        return y

    def get_w_lru(self):
        return self.W[0]

    def get_learning_rate(self):
        return self.learning_rate.learning_rate_curr

    def get_q(self):
        return self.q_size
Example #13
0
class LFU:
    class LFU_Entry:
        def __init__(self, oblock, freq=1, time=0):
            self.oblock = oblock
            self.freq = freq
            self.time = time

        def __lt__(self, other):
            if self.freq == other.freq:
                return np.random.choice([True, False])
            return self.freq < other.freq

        def __repr__(self):
            return "(o={}, f={}, t={})".format(self.oblock, self.freq,
                                               self.time)

    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size
        self.lfu = HeapDict()
        self.time = 0
        np.random.seed(123)

        self.pollution = Pollutionator(cache_size)

    def __contains__(self, oblock):
        return oblock in self.lfu

    def addToCache(self, oblock):
        x = self.LFU_Entry(oblock, freq=1, time=self.time)
        self.lfu[oblock] = x

    def hit(self, oblock):
        x = self.lfu[oblock]
        x.freq += 1
        x.time = self.time
        self.lfu[oblock] = x

    def evict(self):
        lfu_min = self.lfu.popMin()
        self.pollution.remove(lfu_min.oblock)
        return lfu_min

    def miss(self, oblock):
        if len(self.lfu) == self.cache_size:
            self.evict()
        self.addToCache(oblock)

    def request(self, oblock):
        miss = True

        self.time += 1

        if oblock in self:
            miss = False
            self.hit(oblock)
        else:
            self.miss(oblock)

        # Pollutionator
        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        self.pollution.update(self.time)

        return miss
Example #14
0
class ARCALeCaR(ALeCaR6):
    class ARC(ARC):
        def get(self, oblock):
            if oblock in self.T1:
                return self.T1[oblock]
            if oblock in self.T2:
                return self.T2[oblock]
            if oblock in self.B1:
                return self.B1[oblock]
            if oblock in self.B2:
                return self.B2[oblock]
            return None

        def replaceSafe(self):
            # getting rid of x_in_B2 as that's never a concern.
            # removed the logic related to x_in_B2 as well
            if len(self.T1) > self.p:
                return self.T1.first()
            else:
                return self.T2.first()

        def nextVictim(self):
            len_L1 = len(self.T1) + len(self.B1)
            len_L2 = len(self.T2) + len(self.B2)

            if len_L1 >= self.cache_size:
                if len(self.T1) < self.cache_size:
                    return self.replaceSafe()
                else:
                    return self.T1.first()
            elif len_L1 < self.cache_size and len_L1 + len_L2 >= self.cache_size:
                return self.replaceSafe()

        def evictThis(self, oblock, put_in_history=True):
            if put_in_history:
                next_victim = self.nextVictim().oblock
                if oblock == next_victim:
                    evicted = self.evict()
                    assert (evicted == next_victim)
                else:
                    victim = self.get(oblock)
                    if oblock in self.T1:
                        if len(self.B1) + len(self.B2) == self.cache_size:
                            if len(self.B1) == 0:
                                self.B2.popFirst()
                            else:
                                self.B1.popFirst()
                        del self.T1[oblock]
                        self.B1[oblock] = victim
                    else:
                        if len(self.B1) + len(self.B2) == self.cache_size:
                            if len(self.B2) == 0:
                                self.B1.popFirst()
                            else:
                                self.B2.popFirst()
                        del self.T2[oblock]
                        self.B2[oblock] = victim
            else:
                if oblock in self.T1:
                    del self.T1[oblock]
                else:
                    del self.T2[oblock]

        def missInHistory(self, oblock, history):
            x = history[oblock]
            x_in_B2 = oblock in self.B2
            del history[oblock]

            if len(self.T1) + len(self.T2) == self.cache_size:
                evicted = self.replace(x_in_B2)
                self.pollution.remove(evicted.oblock)

            self.moveToList(x, self.T2)

    class LFU(LFU):
        def get(self, oblock):
            return self.lfu[oblock]

        def nextVictim(self):
            return self.lfu.min()

        def evictThis(self, oblock):
            del self.lfu[oblock]

        def request(self, oblock, freq=None):
            miss = super().request(oblock)

            if freq != None:
                assert (isinstance(freq, int))
                x = self.lfu[oblock]
                del self.lfu[oblock]
                x.freq = freq
                self.lfu[oblock] = x

            return miss

    class ARCALeCaR_Entry(ALeCaR6.ALeCaR6_Entry):
        pass

    class ARCALeCaR_Learning_Rate(ALeCaR6.ALeCaR6_Learning_Rate):
        pass

    def __init__(self, cache_size, **kwargs):
        np.random.seed(123)
        self.time = 0

        self.cache_size = cache_size

        kwargs_arc = {}
        kwargs_lfu = {}
        if 'arc' in kwargs:
            kwargs_arc = kwargs['arc']
        if 'lfu' in kwargs:
            kwargs_lfu = kwargs['lfu']

        self.arc = self.ARC(cache_size, **kwargs_arc)
        self.lfu = self.LFU(cache_size, **kwargs_lfu)

        self.history_size = cache_size
        self.history = DequeDict()

        self.initial_weight = 0.5

        self.learning_rate = self.ARCALeCaR_Learning_Rate(cache_size, **kwargs)

        process_kwargs(self,
                       kwargs,
                       acceptable_kws=['initial_weight', 'history_size'])

        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)

        self.visual = Visualizinator(labels=['W_arc', 'W_lfu', 'hit-rate'],
                                     windowed_labels=['hit-rate'],
                                     window_size=cache_size,
                                     **kwargs)

        self.pollution = Pollutionator(cache_size, **kwargs)

    def __contains__(self, oblock):
        return oblock in self.lfu

    def cacheFull(self):
        return self.lfu.cacheFull()

    def addToCache(self, oblock, freq):
        self.arc.request(oblock)
        self.lfu.request(oblock, freq=freq)

    def addToHistory(self, x, policy):
        policy_history = None
        if policy == 0:
            policy_history = "ARC"
        elif policy == 1:
            policy_history = "LFU"
        elif policy == -1:
            return False

        # prune history for lazy removal to match ARC's history
        if len(self.history) == 2 * self.history_size:
            history_oblocks = [meta.oblock for meta in self.history]
            for oblock in history_oblocks:
                if not (oblock in self.arc.B1 or oblock in self.arc.B2):
                    del self.history[oblock]

        x.evicted_me = policy_history

        self.history[x.oblock] = x

        return True

    def evict(self):
        arc = self.arc.nextVictim()
        lfu = self.lfu.nextVictim()

        assert (arc != None)
        assert (lfu != None)

        evicted = arc
        policy = self.getChoice()

        if arc.oblock == lfu.oblock:
            evicted, policy = arc, -1
        elif policy == 0:
            evicted = arc
        else:
            evicted = lfu

        # save info to meta ARCLeCaREntry for history
        meta = self.ARCALeCaR_Entry(evicted.oblock, time=self.time)

        # arc data for meta
        # Not really any that matters. All of that is in ARC

        # lfu data for meta
        meta.freq = self.lfu.get(evicted.oblock).freq

        put_in_history = self.addToHistory(meta, policy)

        # evict from both
        self.arc.evictThis(evicted.oblock, put_in_history=put_in_history)
        self.lfu.evictThis(evicted.oblock)

        self.pollution.remove(evicted.oblock)

        return meta.oblock, policy

    def hit(self, oblock):
        self.arc.request(oblock)
        self.lfu.request(oblock)

    # NOTE: adjustWeights has parameters rewardLRU and rewardLFU but technically
    # the naming didn't matter and we can use as it is

    def miss(self, oblock):
        freq = None
        evicted = None

        if oblock in self.history:
            # history based on N in ARC's history
            # we can do get since we missed so this should be checking
            # B1 or B2 only now
            if self.arc.get(oblock) != None:
                meta = self.history[oblock]
                freq = meta.freq + 1

                if meta.evicted_me == "ARC":
                    self.adjustWeights(-1, 0)
                else:
                    self.adjustWeights(0, -1)

            del self.history[oblock]

        if len(self.lfu.lfu) == self.cache_size:
            evicted, policy = self.evict()

        self.addToCache(oblock, freq)

        return evicted

    def request(self, oblock):
        miss = True
        evicted = None
        op = CacheOp.INSERT

        self.time += 1

        self.visual.add({
            'W_arc': (self.time, self.W[0]),
            'W_lfu': (self.time, self.W[1])
        })

        self.learning_rate.update(self.time)

        if oblock in self:
            miss = False
            op = CacheOp.HIT
            self.hit(oblock)
        else:
            evicted = self.miss(oblock)

        self.visual.addWindow({'hit-rate': 0 if miss else 1}, self.time)

        if not miss:
            self.learning_rate.hitrate += 1

        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        self.pollution.update(self.time)

        return op, evicted
Example #15
0
class LIRS:
    class LIRS_Entry:
        def __init__(self, oblock, is_LIR=False, in_cache=True):
            self.oblock = oblock
            self.is_LIR = is_LIR
            self.in_cache = in_cache

        def __repr__(self):
            return "(o={}, is_LIR={}, in_cache={})".format(
                self.oblock, self.is_LIR, self.in_cache)

    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size

        self.hirs_ratio = 0.01

        process_kwargs(self, kwargs, acceptable_kws=['hirs_ratio'])

        self.hirs_limit = max(2, int((self.cache_size * self.hirs_ratio)))
        self.lirs_limit = self.cache_size - self.hirs_limit

        self.hirs_count = 0
        self.lirs_count = 0
        self.nonresident = 0

        # s stack, semi-split to find nonresident HIRs quickly
        self.s = DequeDict()
        self.nr_hirs = DequeDict()
        # q, the resident HIR stack
        self.q = DequeDict()

        self.time = 0
        self.last_oblock = None

        self.pollution = Pollutionator(cache_size)

    def __contains__(self, oblock):
        if oblock in self.s:
            return self.s[oblock].in_cache
        return oblock in self.q

    def _cacheFull(self):
        return self.lirs_count + self.hirs_count == self.cache_size

    def hitLIR(self, oblock):
        lru_lir = self.s.first()
        x = self.s[oblock]
        self.s[oblock] = x
        if lru_lir is x:
            self.prune()

    def prune(self):
        while self.s:
            x = self.s.first()
            if x.is_LIR:
                break

            del self.s[x.oblock]
            if not x.in_cache:
                del self.nr_hirs[x.oblock]
                self.nonresident -= 1

    def hitHIRinLIRS(self, oblock):
        x = self.s[oblock]

        if x.in_cache:
            del self.s[oblock]
            del self.q[oblock]
            self.hirs_count -= 1
        else:
            del self.s[oblock]
            del self.nr_hirs[oblock]
            self.nonresident -= 1

            if self._cacheFull():
                self.ejectHIR()

        if self.lirs_count >= self.lirs_limit:
            self.ejectLIR()

        self.s[oblock] = x
        x.in_cache = True
        x.is_LIR = True
        self.lirs_count += 1

    def ejectLIR(self):
        assert (self.s.first().is_LIR)

        lru = self.s.popFirst()
        self.lirs_count -= 1
        lru.is_LIR = False

        self.q[lru.oblock] = lru
        self.hirs_count += 1

        self.prune()

    def ejectHIR(self):
        lru = self.q.popFirst()
        self.hirs_count -= 1

        if lru.oblock in self.s:
            self.nr_hirs[lru.oblock] = lru
            lru.in_cache = False
            self.nonresident += 1

        self.pollution.remove(lru.oblock)

    def hitHIRinQ(self, oblock):
        x = self.q[oblock]
        self.q[oblock] = x
        self.s[oblock] = x

    def limitStack(self):
        while len(self.s) > (2 * self.cache_size):
            lru = self.nr_hirs.popFirst()
            del self.s[lru.oblock]
            self.nonresident -= 1

    def miss(self, oblock):
        if self._cacheFull():
            self.ejectHIR()

        if self.lirs_count < self.lirs_limit and self.hirs_count == 0:
            x = self.LIRS_Entry(oblock, is_LIR=True)
            self.s[oblock] = x
            self.lirs_count += 1
        else:
            x = self.LIRS_Entry(oblock, is_LIR=False)
            self.s[oblock] = x
            self.q[oblock] = x
            self.hirs_count += 1

    def request(self, oblock):
        miss = oblock not in self

        self.time += 1

        if oblock != self.last_oblock:
            self.last_oblock = oblock

            if oblock in self.s:
                x = self.s[oblock]
                if x.is_LIR:
                    self.hitLIR(oblock)
                else:
                    self.hitHIRinLIRS(oblock)
            elif oblock in self.q:
                self.hitHIRinQ(oblock)
            else:
                self.miss(oblock)

        self.limitStack()

        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        self.pollution.update(self.time)

        return miss
Example #16
0
class DLIRS:
    class DLIRS_Entry:
        def __init__(self, oblock, is_LIR=False, in_cache=True):
            self.oblock = oblock
            self.is_LIR = is_LIR
            self.is_demoted = False
            self.in_cache = in_cache

        def __repr__(self):
            return "(o={}, is_LIR={}, is_demoted={}, in_cache={})".format(
                self.oblock, self.is_LIR, self.is_demoted, self.in_cache)

    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size

        self.hirs_ratio = 0.01

        process_kwargs(self, kwargs, acceptable_kws=['hirs_ratio'])

        self.hirs_limit = max(1,
                              int((self.cache_size * self.hirs_ratio) + 0.5))
        self.lirs_limit = self.cache_size - self.hirs_limit

        self.hirs_count = 0
        self.lirs_count = 0
        self.demoted = 0
        self.nonresident = 0

        # s stack, semi-split to find nonresident HIRs quickly
        self.lirs = DequeDict()
        self.hirs = DequeDict()
        # q, the resident HIR stack
        self.q = DequeDict()

        self.time = 0
        self.visual = Visualizinator(labels=['Q size'])
        self.pollution = Pollutionator(cache_size)

    def __contains__(self, oblock):
        if oblock in self.lirs:
            return self.lirs[oblock].in_cache
        return oblock in self.q

    def hitLIR(self, oblock):
        lru_lir = self.lirs.first()
        x = self.lirs[oblock]
        self.lirs[oblock] = x
        if lru_lir is x:
            self.prune()

    def prune(self):
        while self.lirs:
            x = self.lirs.first()
            if x.is_LIR:
                break

            del self.lirs[x.oblock]
            del self.hirs[x.oblock]

            if not x.in_cache:
                self.nonresident -= 1

    def hitHIRinLIRS(self, oblock):
        x = self.lirs[oblock]
        in_cache = x.in_cache

        x.is_LIR = True

        del self.lirs[oblock]
        del self.hirs[oblock]

        if in_cache:
            del self.q[oblock]
            self.hirs_count -= 1
        else:
            self.adjustSize(True)
            x.in_cache = True
            self.nonresident -= 1

        while self.lirs_count >= self.lirs_limit:
            self.ejectLIR()
        while self.hirs_count + self.lirs_count >= self.cache_size:
            self.ejectHIR()

        self.lirs[oblock] = x
        self.lirs_count += 1

        return not in_cache

    def ejectLIR(self):
        lru = self.lirs.popFirst()
        self.lirs_count -= 1
        lru.is_LIR = False

        lru.is_demoted = True
        self.demoted += 1

        self.q[lru.oblock] = lru
        self.hirs_count += 1

        self.prune()

    def ejectHIR(self):
        lru = self.q.popFirst()
        if lru.oblock in self.lirs:
            lru.in_cache = False
            self.nonresident += 1
        if lru.is_demoted:
            self.demoted -= 1
        self.hirs_count -= 1
        self.pollution.remove(lru.oblock)

    def hitHIRinQ(self, oblock):
        x = self.q[oblock]
        if x.is_demoted:
            self.adjustSize(False)
            x.is_demoted = False
            self.demoted -= 1

        self.q[oblock] = x
        self.lirs[oblock] = x
        self.hirs[oblock] = x
        self.limitStack()

    def limitStack(self):
        while self.hirs_count + self.lirs_count + self.nonresident > 2 * self.cache_size:
            lru = self.hirs.popFirst()
            del self.lirs[lru.oblock]
            if not lru.in_cache:
                self.nonresident -= 1

    def miss(self, oblock):
        if self.lirs_count < self.lirs_limit and self.hirs_count == 0:
            x = self.DLIRS_Entry(oblock, is_LIR=True)
            self.lirs[oblock] = x
            self.lirs_count += 1
            return

        while self.hirs_count + self.lirs_count >= self.cache_size:
            while self.lirs_count > self.lirs_limit:
                self.ejectLIR()
            self.ejectHIR()

        x = self.DLIRS_Entry(oblock, is_LIR=False)
        self.lirs[oblock] = x
        self.hirs[oblock] = x
        self.q[oblock] = x

        self.hirs_count += 1
        self.limitStack()

    def adjustSize(self, hit_nonresident_hir):
        if hit_nonresident_hir:
            self.hirs_limit = min(
                self.cache_size - 1, self.hirs_limit +
                max(1, int((self.demoted / self.nonresident) + 0.5)))
            self.lirs_limit = self.cache_size - self.hirs_limit
        else:
            self.lirs_limit = min(
                self.cache_size - 1, self.lirs_limit +
                max(1, int((self.nonresident / self.demoted) + 0.5)))
            self.hirs_limit = self.cache_size - self.lirs_limit

    def request(self, oblock):
        miss = False
        self.time += 1

        if oblock in self.lirs:
            x = self.lirs[oblock]
            if x.is_LIR:
                self.hitLIR(oblock)
            else:
                miss = self.hitHIRinLIRS(oblock)
        elif oblock in self.q:
            self.hitHIRinQ(oblock)
        else:
            miss = True
            self.miss(oblock)

        # Visualizinator
        self.visual.add({'Q size': (self.time, self.hirs_limit)})

        # Pollutionator
        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        self.pollution.update(self.time)

        return miss

    def get_hir_size(self):
        #print("in the dlir hir size method")
        #print(len(self.q))
        return len(self.q)
Example #17
0
class LeCaR:
    ######################
    ## INTERNAL CLASSES ##
    ######################

    # Entry to track the page information
    class LeCaR_Entry:
        def __init__(self, oblock, freq=1, time=0):
            self.oblock = oblock
            self.freq = freq
            self.time = time
            self.evicted_time = None

        # Minimal comparitors needed for HeapDict
        def __lt__(self, other):
            if self.freq == other.freq:
                return self.oblock < other.oblock
            return self.freq < other.freq

        # Useful for debugging
        def __repr__(self):
            return "(o={}, f={}, t={})".format(self.oblock, self.freq,
                                               self.time)

    # kwargs: We're using keyword arguments so that they can be passed down as
    #         needed. We can filter through the keywords for ones we want,
    #         ignoring those we don't use. We then update our instance with
    #         the passed values for the given keys after the default
    #         initializations and before the possibly passed keys are used in
    #         a way that cannot be taken back, such as setting the weights(W)
    #         Please note that cache_size is a required argument and not
    #         optional like all the kwargs are
    def __init__(self, cache_size, **kwargs):
        # Randomness and Time
        np.random.seed(123)
        self.time = 0

        # Cache
        self.cache_size = cache_size
        self.lru = DequeDict()
        self.lfu = HeapDict()

        # Histories
        self.history_size = cache_size // 2
        self.lru_hist = DequeDict()
        self.lfu_hist = DequeDict()

        # Decision Weights Initilized
        self.initial_weight = 0.5

        # Fixed Learning Rate
        self.learning_rate = 0.45

        # Fixed Discount Rate
        self.discount_rate = 0.005**(1 / self.cache_size)

        # Apply values in kwargs, before any acceptable_kws
        # members are prerequisites
        process_kwargs(
            self,
            kwargs,
            acceptable_kws=['learning_rate', 'initial_weight', 'history_size'])

        # Decision Weights
        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)
        # Visualize
        self.visual = Visualizinator(
            labels=['W_lru-LeCaR', 'W_lfu-LeCaR', 'hit-rate'],
            windowed_labels=['hit-rate'],
            window_size=cache_size)

        # Pollution
        self.pollution = Pollutionator(cache_size)

    # True if oblock is in cache (which LRU can represent)
    def __contains__(self, oblock):
        return oblock in self.lru

    # Add Entry to cache with given frequency
    def addToCache(self, oblock, freq):
        x = self.LeCaR_Entry(oblock, freq, self.time)
        self.lru[oblock] = x
        self.lfu[oblock] = x

    # Add Entry to history dictated by policy
    # policy: 0, Add Entry to LRU History
    #         1, Add Entry to LFU History
    #        -1, Do not add Entry to any History
    def addToHistory(self, x, policy):
        # Use reference to policy_history to reduce redundant code
        policy_history = None
        if policy == 0:
            policy_history = self.lru_hist
        elif policy == 1:
            policy_history = self.lfu_hist
        elif policy == -1:
            return

        # Evict from history is it is full
        if len(policy_history) == self.history_size:
            evicted = self.getLRU(policy_history)
            del policy_history[evicted.oblock]
        policy_history[x.oblock] = x

    # Get the LRU item in the given DequeDict
    # NOTE: DequeDict can be: lru, lru_hist, or lfu_hist
    # NOTE: does *NOT* remove the LRU Entry from given DequeDict
    def getLRU(self, dequeDict):
        return dequeDict.first()

    # Get the LFU min item in the LFU (HeapDict)
    # NOTE: does *NOT* remove the LFU Entry from LFU
    def getHeapMin(self):
        return self.lfu.min()

    # Get the random eviction choice based on current weights
    def getChoice(self):
        return 0 if np.random.rand() < self.W[0] else 1

    # Evict an entry
    def evict(self):
        lru = self.getLRU(self.lru)
        lfu = self.getHeapMin()

        evicted = lru
        policy = self.getChoice()

        # Since we're using Entry references, we use is to check
        # that the LRU and LFU Entries are the same Entry
        if lru is lfu:
            evicted, policy = lru, -1
        elif policy == 0:
            evicted = lru
        else:
            evicted = lfu

        del self.lru[evicted.oblock]
        del self.lfu[evicted.oblock]

        evicted.evicted_time = self.time
        self.pollution.remove(evicted.oblock)

        self.addToHistory(evicted, policy)

        return evicted, policy

    # Cache Hit
    def hit(self, oblock):
        x = self.lru[oblock]
        x.time = self.time

        self.lru[oblock] = x

        x.freq += 1
        self.lfu[oblock] = x

    # Adjust the weights based on the given rewards for LRU and LFU
    def adjustWeights(self, rewardLRU, rewardLFU):
        reward = np.array([rewardLRU, rewardLFU], dtype=np.float32)
        self.W = self.W * np.exp(self.learning_rate * reward)
        self.W = self.W / np.sum(self.W)

        if self.W[0] >= 0.99:
            self.W = np.array([0.99, 0.01], dtype=np.float32)
        elif self.W[1] >= 0.99:
            self.W = np.array([0.01, 0.99], dtype=np.float32)

    # Cache Miss
    def miss(self, oblock):
        freq = 1
        if oblock in self.lru_hist:
            entry = self.lru_hist[oblock]
            freq = entry.freq + 1
            del self.lru_hist[oblock]
            reward_lru = -(self.discount_rate
                           **(self.time - entry.evicted_time))
            self.adjustWeights(reward_lru, 0)
        elif oblock in self.lfu_hist:
            entry = self.lfu_hist[oblock]
            freq = entry.freq + 1
            del self.lfu_hist[oblock]
            reward_lfu = -(self.discount_rate
                           **(self.time - entry.evicted_time))
            self.adjustWeights(0, reward_lfu)

        # If the cache is full, evict
        if len(self.lru) == self.cache_size:
            evicted, policy = self.evict()

        self.addToCache(oblock, freq)

    # Process and access request for the given oblock
    def request(self, oblock):
        miss = True

        self.time += 1

        self.visual.add({
            'W_lru-LeCaR': (self.time, self.W[0]),
            'W_lfu-LeCaR': (self.time, self.W[1])
        })

        if oblock in self:
            miss = False
            self.hit(oblock)
        else:
            self.miss(oblock)

        # Windowed
        self.visual.addWindow({'hit-rate': 0 if miss else 1}, self.time)

        # Pollution
        if miss:
            self.pollution.incrementUniqueCount()
        self.pollution.setUnique(oblock)
        if self.time % self.cache_size == 0:
            self.pollution.update(self.time)

        return miss

    def get_w_lru(self):
        #print("in get w lru method")
        #print(self.w[0])
        return self.W[0]