Пример #1
0
def get_machine_info():
    """
    Gets information on the memory, cpu and processes in the server
    :returns: tuple of strings containing cpu percentage, used memory, free memory and number of active processes
    """
    if not SUPPORTED_OS:
        return (None, None, None, None)
    used_cpu = str(psutil.cpu_percent())
    used_memory = str(psutil.virtual_memory().used /
                      pow(2, 20))  # In Megabytes
    total_memory = str(psutil.virtual_memory().total /
                       pow(2, 20))  # In Megabytes
    total_processes = str(len(psutil.pids()))

    return (used_cpu, used_memory, total_memory, total_processes)
Пример #2
0
def calculate_thread_pool():
    """
    Returns the default value for CherryPY thread_pool:
    - calculated based on the best values obtained in several partners installations
    - value must be between 10 (default CherryPy value) and 200
    - servers with more memory can deal with more threads
    - calculations are done for servers with more than 2 Gb of RAM
    """
    MIN_POOL = 50
    MAX_POOL = 150
    if psutil:
        MIN_MEM = 2
        MAX_MEM = 6
        total_memory = psutil.virtual_memory().total / pow(2, 30)  # in Gb
        # if it's in the range, scale thread count linearly with available memory
        if MIN_MEM < total_memory < MAX_MEM:
            return MIN_POOL + int(
                (MAX_POOL - MIN_POOL)
                * float(total_memory - MIN_MEM)
                / (MAX_MEM - MIN_MEM)
            )
        # otherwise return either the min or max amount
        return MAX_POOL if total_memory >= MAX_MEM else MIN_POOL
    elif sys.platform.startswith(
        "darwin"
    ):  # Considering MacOS has at least 4 Gb of RAM
        return MAX_POOL
    return MIN_POOL
Пример #3
0
def calculate_cache_size():
    """
    Returns the default value for CherryPY memory cache:
    - value between 50MB and 250MB
    """
    MIN_CACHE = 50000000
    MAX_CACHE = 250000000
    if psutil:
        MIN_MEM = 1
        MAX_MEM = 4
        total_memory = psutil.virtual_memory().total / pow(2, 30)  # in Gb
        # if it's in the range, scale thread count linearly with available memory
        if MIN_MEM < total_memory < MAX_MEM:
            return MIN_CACHE + int(
                (MAX_CACHE - MIN_CACHE)
                * float(total_memory - MIN_MEM)
                / (MAX_MEM - MIN_MEM)
            )
        # otherwise return either the min or max amount
        return MAX_CACHE if total_memory >= MAX_MEM else MIN_CACHE
    elif sys.platform.startswith(
        "darwin"
    ):  # Considering MacOS has at least 4 Gb of RAM
        return MAX_CACHE
    return MIN_CACHE
def enable_redis_cache():
    """
    Set redis as the cache backend.
    When multiple processes run the server we need to use
    redis to ensure the cache is shared among them.
    It also limits redis memory usage to avoid server problems
    if the cache grows too much
    """
    update_options_file("Cache", "CACHE_BACKEND", "redis")
    update_options_file("Cache", "CACHE_REDIS_MAXMEMORY_POLICY", "allkeys-lru")

    delete_redis_cache()
    server_memory = psutil.virtual_memory().total
    max_memory = round(server_memory / 10)
    if hasattr(process_cache, "get_master_client"):
        helper = RedisSettingsHelper(process_cache.get_master_client())
        redis_memory = helper.get_used_memory()
        if max_memory < redis_memory:
            max_memory = redis_memory + 2000

    update_options_file("Cache", "CACHE_REDIS_MAXMEMORY", max_memory)