示例#1
0
 def __init__(self):
     self.sim = simulation.Simulation()
     self.caches = []
     for _ in range(16):
         new_cache = mem.cached(algorithm=mem.LFU,
                                max_size=len(c.ACTION_LIST)**2,
                                custom_key_maker=self._custom_keys)(
                                    self.getNextState)
         self.caches.append(new_cache)
示例#2
0
def cached(user_function=None,
           max_size=500000,
           ttl=None,
           algorithm=CachingAlgorithmFlag.LRU,
           thread_safe=True,
           order_independent=False):
    """
    Entry point for the configuration of the memoization cache (max_size=500000)
    """

    return memoization.cached(user_function, max_size, ttl, algorithm,
                              thread_safe, order_independent)
示例#3
0
    def _cache_in_memory(self):
        @self._update_signature(signature(self.func).parameters.values())
        def make_custom_key(*args, **kwargs):
            key = self._get_data_key(**kwargs)
            return key

        f = cached(ttl=self.expired_time,
                   custom_key_maker=make_custom_key)(self.func)

        @wraps(f)
        def wrapper(*args, **kwargs):
            return f(*args, **kwargs)

        return wrapper
示例#4
0
def cached_wrappers(user_function=None,
                    max_size=0,
                    ttl=None,
                    algorithm=CachingAlgorithmFlag.LRU,
                    thread_safe=True,
                    order_independent=False):
    """
    Entry point for the configuration of the memoization cache

    To find the optimal max-size value:

    from sharadar.util.cache import wrappers
    for wrapper in wrappers:
        print(wrapper, wrapper.cache_info())
    """
    wrapper = memoization.cached(user_function, max_size, ttl, algorithm,
                                 thread_safe, order_independent)
    wrappers.append(wrapper)
    return wrapper
示例#5
0
def fit_mice_execute(X: DataFrame, iterations: int = 1) -> Any:
    kernel = MultipleImputedKernel(X, save_all_iterations=True, datasets=1)
    kernel.mice(iterations, verbose=True)
    return kernel


fit_mice_execute_cached = memory.cache(fit_mice_execute)


def fit_mice_memory_cached_(X: DataFrame, iterations: int = 1) -> Any:
    print('MICE cache miss')
    return fit_mice_execute(X, iterations)


fit_mice_execute_memory_cached = cached(fit_mice_memory_cached_, custom_key_maker=fit_mice_hash)


class MiceForest(BaseEstimator, TransformerMixin):

    def __init__(self, iterations: int = 1):
        self.kernel = None
        self.iterations = iterations

    def fit(self, X, y=None, **fit_params):
        self.kernel = fit_mice_execute_memory_cached(X)
        return self

    def transform(self, X, y=None):
        print('MICE transforming')
        data = self.kernel.impute_new_data(X).complete_data(self.iterations)