Esempio n. 1
0
    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size

        self.hirs_ratio = 0.01

        process_kwargs(self, kwargs, acceptable_kws=['hirs_ratio'])

        self.hirs_limit = max(1,
                              int((self.cache_size * self.hirs_ratio) + 0.5))
        self.lirs_limit = self.cache_size - self.hirs_limit

        self.hirs_count = 0
        self.lirs_count = 0
        self.demoted = 0
        self.nonresident = 0

        # s stack, semi-split to find nonresident HIRs quickly
        self.lirs = DequeDict()
        self.hirs = DequeDict()
        # q, the resident HIR stack
        self.q = DequeDict()

        self.time = 0
        self.visual = Visualizinator(labels=['Q size'])
        self.pollution = Pollutionator(cache_size)
Esempio n. 2
0
    def __init__(self, cache_size, **kwargs):
        # Randomness and Time
        np.random.seed(123)
        self.time = 0

        # Cache
        self.cache_size = cache_size

        #two stacks for the cache(s and q) to mark demoted items faster
        self.s = DequeDict()
        self.q = DequeDict()

        #lfu heap
        self.lfu = HeapDict()

        # Histories
        self.history_size = cache_size // 2
        self.lru_hist = DequeDict()
        self.lfu_hist = DequeDict()

        # Decision Weights Initilized
        self.initial_weight = 0.5

        # Learning Rate
        self.learning_rate = self.Cacheus_Learning_Rate(cache_size, **kwargs)

        process_kwargs(self,
                       kwargs,
                       acceptable_kws=['initial_weight', 'history_size'])

        # Decision Weights
        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)

        # Variables
        hirsRatio = 0.01
        self.q_limit = max(1, int((hirsRatio * self.cache_size) + 0.5))
        self.s_limit = self.cache_size - self.q_limit
        self.q_size = 0
        self.s_size = 0
        self.dem_count = 0
        self.nor_count = 0
        self.q_sizes = []

        # Visualize
        self.visual = Visualizinator(
            labels=['W_lru-Cacheus', 'W_lfu-Cacheus', 'q_size'])

        # Pollution
        self.pollution = Pollutionator(cache_size)
Esempio n. 3
0
        def __init__(self, period_length, **kwargs):
            self.learning_rate = np.sqrt((2.0 * np.log(2)) / period_length)

            process_kwargs(self, kwargs, acceptable_kws=['learning_rate'])

            self.learning_rate_reset = min(max(self.learning_rate, 0.001), 1)
            self.learning_rate_curr = self.learning_rate
            self.learning_rate_prev = 0.0
            self.learning_rates = []

            self.period_len = period_length

            self.hitrate = 0
            self.hitrate_prev = 0.0
            self.hitrate_diff_prev = 0.0
            self.hitrate_zero_count = 0
            self.hitrate_nega_count = 0
Esempio n. 4
0
    def __init__(self, cache_size, **kwargs):
        # Randomness and Time
        np.random.seed(123)
        self.time = 0

        # Cache
        self.cache_size = cache_size
        self.lru = DequeDict()
        self.lfu = HeapDict()

        # Histories
        self.history_size = cache_size // 2
        self.lru_hist = DequeDict()
        self.lfu_hist = DequeDict()

        # Decision Weights Initilized
        self.initial_weight = 0.5

        # Fixed Learning Rate
        self.learning_rate = 0.45

        # Fixed Discount Rate
        self.discount_rate = 0.005**(1 / self.cache_size)

        # Apply values in kwargs, before any acceptable_kws
        # members are prerequisites
        process_kwargs(
            self,
            kwargs,
            acceptable_kws=['learning_rate', 'initial_weight', 'history_size'])

        # Decision Weights
        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)
        # Visualize
        self.visual = Visualizinator(
            labels=['W_lru-LeCaR', 'W_lfu-LeCaR', 'hit-rate'],
            windowed_labels=['hit-rate'],
            window_size=cache_size)

        # Pollution
        self.pollution = Pollutionator(cache_size)
Esempio n. 5
0
    def __init__(self, cache_size, **kwargs):
        np.random.seed(123)
        self.time = 0

        self.cache_size = cache_size

        kwargs_arc = {}
        kwargs_lfu = {}
        if 'arc' in kwargs:
            kwargs_arc = kwargs['arc']
        if 'lfu' in kwargs:
            kwargs_lfu = kwargs['lfu']

        self.arc = self.ARC(cache_size, **kwargs_arc)
        self.lfu = self.LFU(cache_size, **kwargs_lfu)

        self.history_size = cache_size
        self.history = DequeDict()

        self.initial_weight = 0.5

        self.learning_rate = self.ARCALeCaR_Learning_Rate(cache_size, **kwargs)

        process_kwargs(self,
                       kwargs,
                       acceptable_kws=['initial_weight', 'history_size'])

        self.W = np.array([self.initial_weight, 1 - self.initial_weight],
                          dtype=np.float32)

        self.visual = Visualizinator(labels=['W_arc', 'W_lfu', 'hit-rate'],
                                     windowed_labels=['hit-rate'],
                                     window_size=cache_size,
                                     **kwargs)

        self.pollution = Pollutionator(cache_size, **kwargs)
Esempio n. 6
0
    def __init__(self, cache_size, **kwargs):
        self.cache_size = cache_size

        self.hirs_ratio = 0.01

        process_kwargs(self, kwargs, acceptable_kws=['hirs_ratio'])

        self.hirs_limit = max(2, int((self.cache_size * self.hirs_ratio)))
        self.lirs_limit = self.cache_size - self.hirs_limit

        self.hirs_count = 0
        self.lirs_count = 0
        self.nonresident = 0

        # s stack, semi-split to find nonresident HIRs quickly
        self.s = DequeDict()
        self.nr_hirs = DequeDict()
        # q, the resident HIR stack
        self.q = DequeDict()

        self.time = 0
        self.last_oblock = None

        self.pollution = Pollutionator(cache_size)