def test_dict_ops():
    ed = ExpiringDict()
    ed["one"] = 1
    ed["two"] = 2
    ed["three"] = 3
    d = dict()
    d["one"] = 1
    d["two"] = 2
    d["three"] = 3
    assert [x for x in d] == [x for x in ed]
    assert [k for k in d.keys()] == [k for k in ed.keys()]
    assert [v for v in d.values()] == [v for v in ed.values()]
    assert [i for i in d.items()] == [i for i in ed.items()]
    assert "one" in ed
    def __init__(self, host=None, port=None, max_clients=None):
        if not host:
            host = os.getenv('HOST', '0.0.0.0')
        if not port:
            port = os.getenv('PORT', 31337)
        if not max_clients:
            max_clients = os.getenv('MAX_CLIENTS', 15)
        self._pool = Pool(int(max_clients))
        self._server = StreamServer((host, int(port)),
                                    self.connection_handler,
                                    spawn=self._pool)

        self._protocol = DataHandler()
        self._kv = ExpiringDict()

        self._commands = self.get_commands()
Esempio n. 3
0
import os
import pprint
import boto3
import uuid
import requests
import logging

NUM_DISCOUNTS = 2

EXPERIMENTATION_LOGGING = True
DEBUG_LOGGING = True

# Since the DescribeCampaign API easily throttles and we just need
# the recipe from the campaign and it won't change often (if at all),
# use a cache to help smooth out periods where we get throttled.
personalize_meta_cache = ExpiringDict(2 * 60 * 60)

servicediscovery = boto3.client('servicediscovery')
personalize = boto3.client('personalize')
ssm = boto3.client('ssm')
codepipeline = boto3.client('codepipeline')
sts = boto3.client('sts')
cw_events = boto3.client('events')

# SSM parameter name for the Personalize filter for purchased items
filter_purchased_param_name = 'retaildemostore-personalize-filter-purchased-arn'

training_config_param_name = 'retaildemostore-training-config'  # ParameterPersonalizeTrainConfig
dataset_group_name_root = 'retaildemostore-'

# -- Shared Functions
Esempio n. 4
0
import requests
import threading
import signal
import logging
import time
from flask import request
import json
from expiring_dict import ExpiringDict
import time

app = Flask(__name__)

access_token = ""
logger = logging.getLogger(__name__)

channels = ExpiringDict(300)
broadcasters = ExpiringDict(300)

CLIENT_ID = "ao93ev500wqpaczsn55vca74kli26z"
CLIENT_SECRETE = "wdcac9rjzje6lwxczkffwz1woj5iw9"


@app.before_first_request
def activate_job():
    def run_job():
        while True:
            get_access_token()
            time.sleep(60)

    thread = threading.Thread(target=run_job)
    thread.start()
def test_no_ttl():
    d = ExpiringDict()
    d["key"] = "value"
    assert len(d._ExpiringDict__expirations) == 0
def test_init():
    ExpiringDict()
def test_set_ttl():
    d = ExpiringDict()
    d.ttl("key", "expire", 1)
    assert len(d) == 1
    sleep(1.1)
    assert len(d) == 0
Esempio n. 8
0
update. We use etags and a cache to avoid refetching things that have not
changed.
"""
tracemalloc.start(25)

TRACE = False
HARVEST_TTL = 60
HARVEST_TTL_INTERVAL = 5

logger = logging.getLogger(__name__)

# A dictionary to hold our data
# In order to avoid memory leakage, we need to expire items
# in this cache after HARVEST_TTL seconds
# We should check for things to expire every HARVEST_TTL_INTERVAL seconds
harvest_dict = ExpiringDict(ttl=HARVEST_TTL, interval=HARVEST_TTL_INTERVAL)

# TODO: update when this is updated upstream
# https://github.com/clearlydefined/service/blob/master/schemas/definition-1.0.json#L17
known_types = (
    # None gets _all definitions_ from the definition endpoint.
    # None,
    'npm',
    # 'git',
    # 'pypi',
    # 'composer',
    # 'maven',
    # 'gem',
    # 'nuget',
    # 'sourcearchive',
    # 'deb',
Esempio n. 9
0
from fastapi_playground.logger import NoHealth, get_basic_logger
from os import environ
import random
from time import perf_counter

import uvicorn
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from starlette.config import Config
from expiring_dict import ExpiringDict

known_cookies = ExpiringDict(600)

logger = get_basic_logger(__name__, 'DEBUG')
logger.addFilter(NoHealth())
"""
one of our dependencies probably locked the seed somewhere.
However, I actually need the global random.
If unlocking the random seed breaks your code,
you should create a new random instance and lock only the one you created
"""
random.seed()

# For deployment. SCRIPT_NAME is the root path of uvicorn.
# https://fastapi.tiangolo.com/advanced/behind-a-proxy/
SCRIPT_NAME = Config()('SCRIPT_NAME', default='')

TITLE = 'Sticky Session Demo'

app = FastAPI(
    title=TITLE,
Esempio n. 10
0
def test_set_ttl():
    d = ExpiringDict(interval=0.005)
    d.ttl("key", "expire", 0.01)
    assert len(d) == 1
    sleep(0.02)
    assert len(d) == 0
Esempio n. 11
0
def test_class_ttl():
    d = ExpiringDict(ttl=0.01, interval=0.005)
    d["key"] = "should be gone"
    assert len(d) == 1
    sleep(0.02)
    assert len(d) == 0
# SPDX-License-Identifier: MIT-0

import boto3
import os
import json
import logging
from typing import Dict,List
from expiring_dict import ExpiringDict
from experimentation.features import FEATURE_NAMES
from experimentation.experiment_evidently import EvidentlyExperiment

log = logging.getLogger(__name__)

evidently = boto3.client('evidently')
# Cache feature evals for 30 seconds to balance latency and timeliness of picking up experiments
eval_features_by_user_cache = ExpiringDict(30)

project_name = os.environ['EVIDENTLY_PROJECT_NAME']

class EvidentlyFeatureResolver:
    """
    This class is used by ExperimentManager to determine if an Evidently experiment is active
    for a feature as well as for mapping a correlation ID to an EvidentlyExperiment instance for logging outcomes.
    """

    def evaluate_feature(self, user_id: str, feature: str) -> EvidentlyExperiment:
        """ Evaluates a storefront feature for a user
        An EvidentlyExperiment will be returned if there is an active Evidently experiment for the feature or
        None if an experiment is not active.
        """
        cache_key = user_id
class Server(object):
    def __init__(self, host=None, port=None, max_clients=None):
        if not host:
            host = os.getenv('HOST', '0.0.0.0')
        if not port:
            port = os.getenv('PORT', 31337)
        if not max_clients:
            max_clients = os.getenv('MAX_CLIENTS', 15)
        self._pool = Pool(int(max_clients))
        self._server = StreamServer((host, int(port)),
                                    self.connection_handler,
                                    spawn=self._pool)

        self._protocol = DataHandler()
        self._kv = ExpiringDict()

        self._commands = self.get_commands()

    def get_commands(self):
        return {
            'GET': self.get,
            'SET': self.set,
            'DEL': self.delete,
            'KEYS': self.keys,
            'FLUSHDB': self.flush,
            'EXPIRE': self.expire,
            'HGET': self.hget,
            'HSET': self.hset,
            'LSET': self.lset,
            'RPUSH': self.rpush,
            'LPUSH': self.lpush,
            'LRANGE': self.lrange,
            'LINDEX': self.lindex
        }

    def connection_handler(self, conn, address):
        logger.info('Connection received: %s:%s' % address)
        socket_file = conn.makefile('rwb')

        while True:
            try:
                data = self._protocol.handle_request(socket_file)
            except Disconnect:
                logger.info('Client went away: %s:%s' % address)
                break
            try:
                resp = self.get_response(data)
            except CommandError as exc:
                logger.exception('Command error')
                resp = Error(exc.args[0])

            self._protocol.write_response(socket_file, resp)

    def run(self):
        self._server.serve_forever()

    def get_response(self, data):
        if not isinstance(data, list):
            try:
                data = data.split()
            except:
                raise CommandError('Request must be list or simple string.')

        if not data:
            raise CommandError('Missing command')

        command = data[0].upper()
        if command not in self._commands:
            raise CommandError('Unrecognized command: %s' % command)
        else:
            logger.debug('Received %s', command)
        try:
            response = self._commands[command](*data[1:])
        except TypeError:
            raise CommandError(
                f'ERR wrong number of arguments for {command.lower()} command')
        return response

    def get(self, key):
        return self._kv.get(key)

    def set(self, key, value):
        self._kv[key] = value
        return '1'

    def delete(self, key):
        if key in self._kv:
            del self._kv[key]
            return 1
        return 0

    def keys(self, pattern):
        r = []
        if pattern == '*':
            pattern = '\w*'
        for k in self._kv.keys():
            if re.match(pattern, k):
                r.append(k)
        return r

    def flush(self):
        kvlen = len(self._kv)
        self._kv.clear()
        return str(kvlen)

    def expire(self, key, ttl):
        value = self._kv.get(key, None)
        if value:
            del self._kv[key]
            self._kv.ttl(key, value, float(ttl))
            return 1
        return 0

    def hset(self, key, field, value):
        self._kv.setdefault(key, {})
        self._kv[key][field] = value
        return 1

    def hget(self, k, field):
        if self._kv.get(k):
            return self._kv[k].get(field)

    def lset(self, key, index, value):
        self._kv.setdefault(key, [])
        self._kv[key][int(index)] = value
        return 'OK'

    def rpush(self, key, *value):
        self._kv.setdefault(key, [])
        for v in value:
            self._kv[key].append(v)
        return len(self._kv[key])

    def lpush(self, key, *value):
        self._kv.setdefault(key, [])
        for v in value:
            self._kv[key].insert(0, v)
        return len(self._kv[key])

    def lrange(self, key, start, end):
        if isinstance(self._kv.get(key), list):
            if end == '-1':
                return self._kv[key][int(start):]
            return self._kv[key][int(start):int(end) + 1]
        return None

    def lindex(self, key, index):
        if isinstance(self._kv.get(key), list):
            try:
                return self._kv[key][int(index)]
            except IndexError:
                return None
        return None
Esempio n. 14
0
import logging

import vk_api
from vk_api.bot_longpoll import VkBotLongPoll
from vk_api.upload import VkUpload
from vk_api.bot_longpoll import VkBotEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor

from expiring_dict import ExpiringDict
import COVID19Py

keyboard = VkKeyboard(one_time=False)
keyboard.add_button('Коронавирус в мире', color=VkKeyboardColor.PRIMARY)
keyboard.add_button('Коронавирус в россии', color=VkKeyboardColor.PRIMARY)

corona_dict = ExpiringDict(max_len=2, max_age_seconds=3600)
covid19 = COVID19Py.COVID19()

vk_session = vk_api.VkApi(token=os.getenv('TOKEN'), api_version='5.95')
vk = vk_session.get_api()
longpoll = VkBotLongPoll(vk_session, os.getenv('GROUP_ID'))
upload = VkUpload(vk_session)

logging.basicConfig(format='%(levelname)s %(name)s [%(asctime)s]: %(message)s',
                    datefmt='%d:%m:%Y:%H:%M:%S')

log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.info('start')

Esempio n. 15
0
from time import sleep
from expiring_dict import ExpiringDict

cache = ExpiringDict()  # No TTL set, keys set via [] will not expire

cache["abc"] = "persistent"
cache.ttl("123", "expires", 1)  # This will expire after 1 second
print("abc" in cache)
print("123" in cache)
sleep(1.1)
print("abc" in cache)
print("123" not in cache)

cache2 = ExpiringDict(1)

cache2["abc"] = "expires"
cache2["123"] = "also expires"
print("abc" in cache2)
print("123" in cache2)
sleep(1.1)
print("abc" not in cache2)
print("123" not in cache2)
def test_class_ttl():
    d = ExpiringDict(ttl=1)
    d["key"] = "should be gone"
    assert len(d) == 1
    sleep(1.1)
    assert len(d) == 0
Esempio n. 17
0
    history_data_columns: List[str] = []
    admin_users_data_columns: List[str] = []
    ui: UIConfig = None
    track_history_endpoints: List[str] = [
        "model_delete",
        "model_delete_all",
        "model_edit_post",
        "model_add",
        "presets_use",
        "init_db_run",
        "file_upload",
        "sql_query_run",
        "login",
        "logout_post",
    ]
    hide_columns: List[Any] = []

    @validator("displayable_setting")
    def displayable_setting_cannot_be_changed(cls, value):
        return ["presets_folder", "composite_csv_settings", "name"]

    class Config(BaseConfig):
        arbitrary_types_allowed = True


# instance of config on current run
cfg = Config()
cfg.sessions = ExpiringDict(ttl=3600)
cfg.jinja = jinja
cfg.ui = UIConfig(colors=ColorSchema())
Esempio n. 18
0
import boto3
import os
import random

from botocore.exceptions import ClientError
from aws_lambda_powertools import Logger
from expiring_dict import ExpiringDict

logger = Logger(child=True)

_clients_by_region = {}
# Since the DescribeCampaign API easily throttles and we just need
# the minProvisionedTPS from the campaign, use a cache to help smooth
# out periods where we get throttled.
_campaign_cache = ExpiringDict(22 * 60)

PROJECT_NAME = 'PersonalizeMonitor'
ALARM_NAME_PREFIX = PROJECT_NAME + '-'


def put_event(detail_type, detail, resources=[]):
    event_bridge = get_client('events')

    logger.info({
        'detail_type': detail_type,
        'detail': detail,
        'resources': resources
    })

    event_bridge.put_events(Entries=[{