Beispiel #1
0
 def setUp(self):
     try:
         self.redlock = Redlock([{"host": "localhost"}])
         self.dstlock = Redlock([{"host": "localhost", "port": 6379, "socket_timeout": 0.5},
                                 {"host": "localhost", "port": 6380, "socket_timeout": 0.5},
                                 {"host": "localhost", "port": 6381, "socket_timeout": 0.5}])
     except Exception as e:
         pass
Beispiel #2
0
 def redlock(self):
     return Redlock([{
         'host': 'localhost',
         'port': 6379,
         'db': 0
     }],
                    retry_count=5)
Beispiel #3
0
def dlock(key, ttl, **kwargs):
    """
    分布式锁
    :param key: 分布式锁ID
    :param ttl: 分布式锁生存时间
    :param kwargs: 可选参数字典
    :return: None
    """
    resource_servers = [{
        'host': REDIS_HOST,
        'port': REDIS_PORT,
        'db': REDIS_DB,
        'password': REDIS_PASSWORD
    }]
    dl = Redlock(resource_servers)
    # 获取锁
    lock = dl.lock(key, ttl)
    # if ret is False:
    #     detail = u'acquire lock[%s] error' % key
    #     raise AcquireLockError(detail)

    yield lock

    # 释放锁
    if isinstance(lock, Lock):
        dl.unlock(lock)
 def __init__(self, verbose: bool = False):
     self.lock_manager = Redlock([{
         "host": "localhost",
         "port": 6379,
         "db": 0
     }])
     self.verbose = verbose
def lightning_order_with_redlock() -> None:
    """
    Lightning order with Redlock algorithm.
    :return: None
    """
    r = redis.Redis()

    dlm = Redlock([{
        'host': 'localhost',
        'port': 6379,
        'db': 0
    }, ])  # Stands for "distributed lock manager"

    lock = None
    try:
        # Try to acquire the lock
        lock = dlm.lock(LOCK_KEY, 30000)  # If not acquiring the lock, block here
        # Business codes
        remaining = int(r.get('stock'))
        if remaining > 0:
            r.set('stock', str(remaining - 1))
            print(f'Deducted stock, {remaining - 1} remaining')
        else:
            print('Failed to deduct stock')
    except MultipleRedlockException as e:
        print(e)
    finally:
        # Release the lock
        dlm.unlock(lock)
Beispiel #6
0
    def setUp(self):
        super().setUp()

        self.lock = RedisLocker(current_app.config['REDIS_URL'])
        another_lock = Redlock([
            current_app.config['REDIS_URL'],
        ],
                               retry_count=1)
        self.another_locker = another_lock
Beispiel #7
0
    def __init__(self, redis_connection=None, locker=None, *args, **kwargs):
        self.__redis_connection = redis_connection
        if self.__redis_connection is None:
            self.__redis_connection = StrictRedis.from_url(
                current_app.conf.CELERY_REDIS_SCHEDULER_URL)

        self._schedule = EntryProxy(self.__redis_connection)
        self._locker = locker
        if self._locker is None:
            self._locker = Redlock(
                [current_app.conf.CELERY_REDIS_SCHEDULER_URL])
        super(ProbitScheduler, self).__init__(*args, **kwargs)
Beispiel #8
0
def select_gpu(redis_conf=None, timeout=10000, shuffle=True):
    """Sets the CUDA_VISIBLE_DEVICES environment variable

    :param redis_conf: Redis configuration passed to redlock-py
    :param timeout: Timeout of the lock in milliseconds, default 10000
    :param shuffle: Shuffles the available GPU list, default True
    """
    if len(os.environ.get('CUDA_VISIBLE_DEVICES', 'unset')) == 0:
        # Environment variable empty
        return "", None

    gpu_status = str(subprocess.check_output(['nvidia-smi', 'pmon', '-c',
                                              '1']))
    # Example of expected result from nvidia-smi:
    #   # gpu        pid  type    sm   mem   enc   dec   command
    #   # Idx          #   C/G     %     %     %     %   name
    #       0      25729     C    94    57     0     0   python
    #       1          -     -     -     -     -     -   -
    gpu_status = map(lambda x: x.split(), gpu_status.split("\n")[2:-1])

    # Check if the GPU is not already used by the current process
    pid = os.getpid()
    gpu_pids = map(lambda x: x[:2], gpu_status)
    for gpu, p in gpu_pids:
        if p == '-':
            continue
        if pid == int(p):
            return int(gpu), None

    gpu_status = filter(lambda x: x[7] == '-', gpu_status)
    if shuffle:
        # Suffle GPUs list
        random.shuffle(gpu_status)

    if redis_conf is None:
        redis_conf = {'unix_socket_path': '/var/run/redis/redis-server.sock'}
    dlm = Redlock([redis_conf])

    if len(gpu_status) > 0:
        for gpu_ in gpu_status:
            gpu = gpu_[0]
            gpu_lock = dlm.lock("{}:gpu{}".format(platform.node(), gpu),
                                timeout)
            if gpu_lock != False:
                os.environ['CUDA_VISIBLE_DEVICES'] = gpu

                def unlock():
                    return dlm.unlock(gpu_lock)

                return int(gpu), unlock

    raise Exception("No GPU available!")
Beispiel #9
0
 def __init__(self, key, host='127.0.0.1', port=6379, db=0):
     self.key = key  # redis-bitmap的key
     self.redis_cli = redis.StrictRedis(host=host,
                                        port=port,
                                        db=db,
                                        charset='utf-8')
     self.red_lock = Redlock([
         {
             "host": host,
             "port": port,
             "db": db
         },
     ])
Beispiel #10
0
 def __init__(self):
     global eval_pool
     config = get_config()
     self.redis_client = redis.StrictRedis(host=config.host(),
                                           port=config.port(),
                                           db=0)
     self.dlm = Redlock([
         {
             "host": "localhost",
             "port": 6379,
             "db": 0
         },
     ],
                        retry_count=10)
Beispiel #11
0
 def __init__(self, nl_rest_url, data_key, user="******"):
     self.user = user
     self._dlm = Redlock([
         {
             "host": "redis_trade",
             "port": 6379,
             "db": 0
         },
     ])
     self.error = None
     self.nl_rest_url = nl_rest_url
     self.data_key = data_key
     self.token = self._get_token
     self.data = self._get_data
Beispiel #12
0
 def __init__(self, conf, prefix=None, ttl_sec=60, backlog_path=None):
     self.backlog_lock = Lock()
     self.conf = conf
     self.redlock = Redlock([{"host": conf['backend']['host'], "port": int(conf['backend']['port']), "db": 13, "password": conf['backend']['password']}, ],
                            retry_count=100000, retry_delay=0.01)
     if prefix is not None:
         self.prefix = prefix + "-"
     self.ttl = ttl_sec * 1000
     self.backlog_filename_pat = re.compile(self.prefix + '([\w\d\.\-\\/]+?)\-backlog\.([\w\d\-]+)')
     self.backlog = {}
     self.backlog_path = backlog_path # Should be abs path to the dir containing backlog pickles
     self.is_dumping = False
     if not os.path.exists(self.backlog_path):
         os.makedirs(self.backlog_path)
     self.reload()
Beispiel #13
0
 def __init__(self):
     config = get_config()
     startup_nodes = map(
         lambda x: {
             "host": x,
             "port": "%s" % (config.port())
         }, config.cluster_nodes())
     self.redis_client = rediscluster.StrictRedisCluster(
         startup_nodes=startup_nodes, decode_responses=True)
     self.dlm = Redlock([
         {
             "host": "localhost",
             "port": 6379,
             "db": 0
         },
     ],
                        retry_count=10)
Beispiel #14
0
    def provide_default_basics(cls):
        """
            provide default basics
        """
        config = Configuration.load(os.path.join(root_path, cls.CONF_PATH),
                                    Ini).get(cls.CONF_SECTION)
        redis_host = config.get('host', cls.DEFAULT_DB_HOST)
        redis_port = int(config.get('port', cls.DEFAULT_DB_PORT))
        redis_db_num = int(config.get('dbnum', cls.DEFAULT_DB_NUM))
        redlock = Redlock([{
            'host': redis_host,
            'port': redis_port,
            'db': redis_db_num
        }])

        logger = Logger.get('lock', cls.LOG_PATH)

        return (logger, redlock)
Beispiel #15
0
    def __init__(self, name, **redis_kwargs):
        """The default connection parameters are: host='localhost', port=6379, db=0

       The work queue is identified by "name".  The library may create other
       keys with "name" as a prefix.
       """
        self._db = redis.StrictRedis(**redis_kwargs)
        self.lock_manager = Redlock([redis_kwargs])
        # The session ID will uniquely identify this "worker".
        self._session = str(uuid.uuid4())
        # Work queue is implemented as two queues: main, and processing.
        # Work is initially in main, and moved to processing when a client picks it up.
        self._main_q_key = name
        self._processing_q_key = name + ":processing"
        self._lease_key_prefix = name + ":leased_by_session:"
        self._gc_lock_key = name + ":gc-lock"
        self._gc_lock = RedisDistributedLock(self.lock_manager,
                                             self._gc_lock_key)
        self._leases = {}
    def __init__(self, *args, **kwargs):
        if hasattr(current_app.conf, 'CELERY_REDIS_SCHEDULER_URL'):
            logger.info('backend scheduler using %s',
                        current_app.conf.CELERY_REDIS_SCHEDULER_URL)
        else:
            logger.info('backend scheduler using %s', DEFAULT_REDIS_URI)

        self.update_interval = current_app.conf.get(
            'UPDATE_INTERVAL') or datetime.timedelta(seconds=10)

        # how long we should hold on to the redis lock in seconds
        if 'CELERY_REDIS_SCHEDULER_LOCK_TTL' in current_app.conf:
            lock_ttl = current_app.conf.CELERY_REDIS_SCHEDULER_LOCK_TTL
        else:
            lock_ttl = 30

        if lock_ttl < self.update_interval.seconds:
            lock_ttl = self.update_interval.seconds * 2
        self.lock_ttl = lock_ttl

        self._dirty = set(
        )  # keeping modified entries by name for sync later on
        self._schedule = {}  # keeping dynamic schedule from redis DB here
        # self.data is used for statically configured schedule
        try:
            self.schedule_url = current_app.conf.CELERY_REDIS_SCHEDULER_URL
        except AttributeError:
            self.schedule_url = DEFAULT_REDIS_URI

        self.rdb = StrictRedis.from_url(self.schedule_url)
        logger.info('Setting RedLock provider to {}'.format(self.schedule_url))
        self.dlm = Redlock([self.rdb])
        self._secure_cronlock = \
            lock_factory(self.dlm, 'celery:beat:task_lock',  self.lock_ttl)
        self._last_updated = None

        self.Entry.scheduler = self
        self.Entry.rdb = self.rdb

        # This will launch setup_schedule if not lazy
        super(RedisScheduler, self).__init__(*args, **kwargs)
        logger.info('Scheduler ready')
Beispiel #17
0
    def id(self):
        deviceIP = self.client.host
        self.session_id = self.get_redis_session(deviceIP)
        if not self.check_session_invalid(self.session_id, deviceIP):
            local_ip = self.get_local_ip()
            redisobj = RedisConfig()
            redis_server = redisobj.get_redis_servers(True)
            redis_conf_ls = [{
                "host": s["host"],
                "port": s["port"],
                "db": s["dbno"]
            } for s in redis_server]
            lock_mgmt = Redlock(redis_conf_ls)
            device_lock = lock_mgmt.lock(
                local_ip + "_" + deviceIP + '_' + self.username +
                '_device_lock_calabash', 30 * 1000)
            tmp_count = 0
            while isinstance(device_lock,
                             bool) and not device_lock and tmp_count < 1000:
                tmp_count += 1
                time.sleep(0.5)
                self.session_id = self.get_redis_session(deviceIP)
                if self.check_session_invalid(self.session_id, deviceIP):
                    return self.session_id

                device_lock = lock_mgmt.lock(
                    local_ip + "_" + deviceIP + '_' + self.username +
                    '_device_lock_calabash', 30 * 1000)

            self.session_id = self.get_redis_session(deviceIP)
            if self.check_session_invalid(self.session_id, deviceIP):
                return self.session_id

            self.authenticate(self.username, self.password)
            #set sessionid to redis
            self.set_session_to_redis(self.session_id, deviceIP)
            lock_mgmt.unlock(device_lock)
        return self.session_id
Beispiel #18
0
 def __init__(self, url: str):
     logger.info('Connecting to Redis on {}..'.format(url))
     self._redis = redis.StrictRedis.from_url(url)
     self._redlock = Redlock([url])
Beispiel #19
0
 def __init__(self, redis_config, quency_config):
     self.quency_config = quency_config
     self.redlock = Redlock([redis_config])
     self.rds = redis.Redis(**redis_config)
Beispiel #20
0
def __commit_image_file(
        data: bytes,
        mode: str,
        auto_remove: bool,
        image_format: str,
        # WebP options
        method: int,
        lossless: bool,
        quality: int,
        attach_info: str):
    """
    Commit single file to weed FS filer

    :param data:
    :param mode:
    :param auto_remove:
    :param image_format:
    :param method:
    :param lossless:
    :param quality:
    :param attach_info:
    :return:
    """
    request_count.labels("__commit_image_file").inc()
    if mode not in {"keep", "block", "largest"}:
        raise ParameterError("mode should in keep/block/largest.")

    try:
        attach_obj = json.loads(attach_info)
    except Exception as ex:
        log.warning(
            "Error while parsing attach info as JSON caused by: {}".format(
                str(ex)))
        attach_obj = {}
    with BytesIO(data) as fp:
        im = Image.open(fp)
        width = im.width
        height = im.height
        im_format = im.format
        attach_obj["mode"] = im.mode
        attach_obj["tick"] = int(time.time() * 1000)
        attach_obj["w"] = width
        attach_obj["h"] = height
        image_format = image_format.upper()
        # That means don't need convert
        image_format_matched = image_format.lower(
        ) == "original" or im.format == image_format
        image_hash = get_hash(im)
        is_animated = getattr(im, "is_animated", False)
        attach_obj["is_animated"] = is_animated
        if not image_format_matched:
            # Won't convert animated image
            if is_animated:
                log.warning(
                    "Can't convert animated image format from {} -> {}".format(
                        im.format, image_format))

            else:
                with BytesIO() as wio:
                    im_format = image_format.upper()
                    if image_format == "WEBP":
                        im.save(wio,
                                format="WEBP",
                                lossless=lossless,
                                method=method,
                                quality=quality)
                    else:
                        im.save(wio, format=image_format)
                    wio.seek(0)
                    data = wio.read()
        # Get image hash by hash function
    attach_obj["format"] = im_format
    lazy_existed_file_info = LazyResource(
        lambda: metadb.list_images(image_hash))
    # Lock the hash in redis
    with RedisDistributedLock(Redlock(redis_connection_pool), image_hash) as _:
        if mode == "keep":
            need_to_write = True
        else:
            if len(lazy_existed_file_info.resource) <= 0:
                need_to_write = True
            else:
                if mode == "block":
                    need_to_write = False
                elif mode == "largest":
                    max_image_size = max(
                        int(info["w"]) * int(info["h"])
                        for info in lazy_existed_file_info.resource
                        if "w" in info and "h" in info)
                    need_to_write = (width * height) > max_image_size
                else:
                    raise Exception("Unknown mode: {}".format(mode))
        if need_to_write:
            # File name parts:
            # <image hash>/<hostname>_<micro sec tick(%x)>_<random str>.img
            removed = []
            if auto_remove:
                __delete_hash(image_hash=image_hash)
            fid = filesystem.write(data)
            attach_obj["content_size"] = len(data)
            metadb.add_image(image_hash=image_hash, fid=fid, **attach_obj)
            return {
                "status": "success",
                "wrote": True,
                "fid": fid,
                "removed": removed,
                "hash": image_hash,
                "attach": attach_obj
            }
        else:
            return {
                "status": "success",
                "wrote": False,
                "hash": image_hash,
            }
Beispiel #21
0
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """
    auth_tkt_policy = AuthTktAuthenticationPolicy(
        settings["authtkt.secret"],
        hashalg="sha512",
        callback=groupfinder,
        max_age=2592000,
        secure=asbool(settings.get("authtkt.secure", "false")),
    )
    auth_token_policy = AuthTokenAuthenticationPolicy(callback=groupfinder)
    authorization_policy = ACLAuthorizationPolicy()
    authentication_policy = AuthenticationStackPolicy()
    authentication_policy.add_policy("auth_tkt", auth_tkt_policy)
    authentication_policy.add_policy("auth_token", auth_token_policy)
    # set crypto key
    encryption.ENCRYPTION_SECRET = settings.get("encryption_secret")
    # import this later so encyption key can be monkeypatched
    from appenlight.models import DBSession, register_datastores

    # registration
    settings["appenlight.disable_registration"] = asbool(
        settings.get("appenlight.disable_registration")
    )

    # update config with cometd info
    settings["cometd_servers"] = {
        "server": settings["cometd.server"],
        "secret": settings["cometd.secret"],
    }

    # Create the Pyramid Configurator.
    settings["_mail_url"] = settings["mailing.app_url"]
    config = CythonCompatConfigurator(
        settings=settings,
        authentication_policy=authentication_policy,
        authorization_policy=authorization_policy,
        root_factory="appenlight.security.RootFactory",
        default_permission="view",
    )
    # custom registry variables

    # resource type information
    config.registry.resource_types = ["resource", "application"]
    # plugin information
    config.registry.appenlight_plugins = {}

    config.set_default_csrf_options(require_csrf=True, header="X-XSRF-TOKEN")
    config.add_view_deriver("appenlight.predicates.csrf_view", name="csrf_view")

    # later, when config is available
    dogpile_config = {
        "url": settings["redis.url"],
        "redis_expiration_time": 86400,
        "redis_distributed_lock": True,
    }
    cache_regions.regions = cache_regions.CacheRegions(dogpile_config)
    config.registry.cache_regions = cache_regions.regions
    engine = engine_from_config(settings, "sqlalchemy.", json_serializer=json.dumps)
    DBSession.configure(bind=engine)

    # json rederer that serializes datetime
    config.add_renderer("json", json_renderer)
    config.add_request_method(
        "appenlight.lib.request.es_conn", "es_conn", property=True
    )
    config.add_request_method(
        "appenlight.lib.request.get_user", "user", reify=True, property=True
    )
    config.add_request_method(
        "appenlight.lib.request.get_csrf_token", "csrf_token", reify=True, property=True
    )
    config.add_request_method(
        "appenlight.lib.request.safe_json_body",
        "safe_json_body",
        reify=True,
        property=True,
    )
    config.add_request_method(
        "appenlight.lib.request.unsafe_json_body",
        "unsafe_json_body",
        reify=True,
        property=True,
    )
    config.add_request_method(
        "appenlight.lib.request.add_flash_to_headers", "add_flash_to_headers"
    )
    config.add_request_method(
        "appenlight.lib.request.get_authomatic", "authomatic", reify=True
    )

    config.include("pyramid_redis_sessions")
    config.include("pyramid_tm")
    config.include("pyramid_jinja2")
    config.include("pyramid_mailer")
    config.include("appenlight_client.ext.pyramid_tween")
    config.include("ziggurat_foundations.ext.pyramid.sign_in")
    es_server_list = aslist(settings["elasticsearch.nodes"])
    redis_url = settings["redis.url"]
    log.warning("Elasticsearch server list: {}".format(es_server_list))
    log.warning("Redis server: {}".format(redis_url))
    config.registry.es_conn = Elasticsearch(es_server_list)
    config.registry.redis_conn = redis.StrictRedis.from_url(redis_url)

    config.registry.redis_lockmgr = Redlock(
        [settings["redis.redlock.url"]], retry_count=0, retry_delay=0
    )
    # mailer bw compat
    config.registry.mailer = config.registry.getUtility(IMailer)

    # Configure sessions
    session_factory = session_factory_from_settings(settings)
    config.set_session_factory(session_factory)

    # Configure renderers and event subscribers
    config.add_jinja2_extension("jinja2.ext.loopcontrols")
    config.add_jinja2_search_path("appenlight:templates")
    # event subscribers
    config.add_subscriber(
        "appenlight.subscribers.application_created",
        "pyramid.events.ApplicationCreated",
    )
    config.add_subscriber(
        "appenlight.subscribers.add_renderer_globals", "pyramid.events.BeforeRender"
    )
    config.add_subscriber(
        "appenlight.subscribers.new_request", "pyramid.events.NewRequest"
    )
    config.add_view_predicate(
        "context_type_class", "appenlight.predicates.contextTypeClass"
    )

    register_datastores(
        es_conn=config.registry.es_conn,
        redis_conn=config.registry.redis_conn,
        redis_lockmgr=config.registry.redis_lockmgr,
    )

    # base stuff and scan

    # need to ensure webassets exists otherwise config.override_asset()
    # throws exception
    if not os.path.exists(settings["webassets.dir"]):
        os.mkdir(settings["webassets.dir"])
    config.add_static_view(
        path="appenlight:webassets", name="static", cache_max_age=3600
    )
    config.override_asset(
        to_override="appenlight:webassets/", override_with=settings["webassets.dir"]
    )

    config.include("appenlight.views")
    config.include("appenlight.views.admin")
    config.scan(
        ignore=["appenlight.migrations", "appenlight.scripts", "appenlight.tests"]
    )

    config.add_directive("register_appenlight_plugin", register_appenlight_plugin)

    for entry_point in iter_entry_points(group="appenlight.plugins"):
        plugin = entry_point.load()
        plugin.includeme(config)

    # include other appenlight plugins explictly if needed
    includes = aslist(settings.get("appenlight.includes", []))
    for inc in includes:
        config.include(inc)

    # run this after everything registers in configurator

    def pre_commit():
        jinja_env = config.get_jinja2_environment()
        jinja_env.filters["tojson"] = json.dumps
        jinja_env.filters["toJSONUnsafe"] = jinja2_filters.toJSONUnsafe

    config.action(None, pre_commit, order=PHASE3_CONFIG + 999)

    def wrap_config_celery():
        configure_celery(config.registry)

    config.action(None, wrap_config_celery, order=PHASE3_CONFIG + 999)

    app = config.make_wsgi_app()
    return app
Beispiel #22
0
def get_redlock_client():
    redlock_client = Redlock([settings.REDIS_URL], 1)
    return redlock_client
Beispiel #23
0
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """
    auth_tkt_policy = AuthTktAuthenticationPolicy(
        settings['authtkt.secret'],
        hashalg='sha512',
        callback=groupfinder,
        max_age=2592000,
        secure=asbool(settings.get('authtkt.secure', 'false')))
    auth_token_policy = AuthTokenAuthenticationPolicy(
        callback=groupfinder
    )
    authorization_policy = ACLAuthorizationPolicy()
    authentication_policy = AuthenticationStackPolicy()
    authentication_policy.add_policy('auth_tkt', auth_tkt_policy)
    authentication_policy.add_policy('auth_token', auth_token_policy)
    # set crypto key
    encryption.ENCRYPTION_SECRET = settings.get('encryption_secret')
    # import this later so encyption key can be monkeypatched
    from appenlight.models import DBSession, register_datastores
    # update config with cometd info
    settings['cometd_servers'] = {'server': settings['cometd.server'],
                                  'secret': settings['cometd.secret']}

    # Create the Pyramid Configurator.
    settings['_mail_url'] = settings['mailing.app_url']
    config = CythonCompatConfigurator(
        settings=settings,
        authentication_policy=authentication_policy,
        authorization_policy=authorization_policy,
        root_factory='appenlight.security.RootFactory',
        default_permission='view')
    # custom registry variables

    # resource type information
    config.registry.resource_types = ['resource', 'application']
    # plugin information
    config.registry.appenlight_plugins = {}

    config.set_default_csrf_options(require_csrf=True, header='X-XSRF-TOKEN')
    config.add_view_deriver('appenlight.predicates.csrf_view',
                            name='csrf_view')

    # later, when config is available
    dogpile_config = {'url': settings['redis.url'],
                      "redis_expiration_time": 86400,
                      "redis_distributed_lock": True}
    cache_regions.regions = cache_regions.CacheRegions(dogpile_config)
    config.registry.cache_regions = cache_regions.regions
    engine = engine_from_config(settings, 'sqlalchemy.',
                                json_serializer=json.dumps)
    DBSession.configure(bind=engine)

    # json rederer that serializes datetime
    config.add_renderer('json', json_renderer)
    config.set_request_property('appenlight.lib.request.es_conn', 'es_conn')
    config.set_request_property('appenlight.lib.request.get_user', 'user',
                                reify=True)
    config.set_request_property('appenlight.lib.request.get_csrf_token',
                                'csrf_token', reify=True)
    config.set_request_property('appenlight.lib.request.safe_json_body',
                                'safe_json_body', reify=True)
    config.set_request_property('appenlight.lib.request.unsafe_json_body',
                                'unsafe_json_body', reify=True)
    config.add_request_method('appenlight.lib.request.add_flash_to_headers',
                              'add_flash_to_headers')
    config.add_request_method('appenlight.lib.request.get_authomatic',
                              'authomatic', reify=True)

    config.include('pyramid_redis_sessions')
    config.include('pyramid_tm')
    config.include('pyramid_jinja2')
    config.include('appenlight_client.ext.pyramid_tween')
    config.include('ziggurat_foundations.ext.pyramid.sign_in')
    es_server_list = aslist(settings['elasticsearch.nodes'])
    redis_url = settings['redis.url']
    log.warning('Elasticsearch server list: {}'.format(es_server_list))
    log.warning('Redis server: {}'.format(redis_url))
    config.registry.es_conn = pyelasticsearch.ElasticSearch(es_server_list)
    config.registry.redis_conn = redis.StrictRedis.from_url(redis_url)

    config.registry.redis_lockmgr = Redlock([settings['redis.redlock.url'], ],
                                            retry_count=0, retry_delay=0)
    # mailer
    config.registry.mailer = Mailer.from_settings(settings)

    # Configure sessions
    session_factory = session_factory_from_settings(settings)
    config.set_session_factory(session_factory)

    # Configure renderers and event subscribers
    config.add_jinja2_extension('jinja2.ext.loopcontrols')
    config.add_jinja2_search_path('appenlight:templates')
    # event subscribers
    config.add_subscriber("appenlight.subscribers.application_created",
                          "pyramid.events.ApplicationCreated")
    config.add_subscriber("appenlight.subscribers.add_renderer_globals",
                          "pyramid.events.BeforeRender")
    config.add_subscriber('appenlight.subscribers.new_request',
                          'pyramid.events.NewRequest')
    config.add_view_predicate('context_type_class',
                              'appenlight.predicates.contextTypeClass')

    register_datastores(es_conn=config.registry.es_conn,
                        redis_conn=config.registry.redis_conn,
                        redis_lockmgr=config.registry.redis_lockmgr)

    # base stuff and scan

    # need to ensure webassets exists otherwise config.override_asset()
    # throws exception
    if not os.path.exists(settings['webassets.dir']):
        os.mkdir(settings['webassets.dir'])
    config.add_static_view(path='appenlight:webassets',
                           name='static', cache_max_age=3600)
    config.override_asset(to_override='appenlight:webassets/',
                          override_with=settings['webassets.dir'])

    config.include('appenlight.views')
    config.include('appenlight.views.admin')
    config.scan(ignore=['appenlight.migrations', 'appenlight.scripts',
                        'appenlight.tests'])

    config.add_directive('register_appenlight_plugin',
                         register_appenlight_plugin)

    for entry_point in iter_entry_points(group='appenlight.plugins'):
        plugin = entry_point.load()
        plugin.includeme(config)

    # include other appenlight plugins explictly if needed
    includes = aslist(settings.get('appenlight.includes', []))
    for inc in includes:
        config.include(inc)

    # run this after everything registers in configurator

    def pre_commit():
        jinja_env = config.get_jinja2_environment()
        jinja_env.filters['tojson'] = json.dumps
        jinja_env.filters['toJSONUnsafe'] = jinja2_filters.toJSONUnsafe

    config.action(None, pre_commit, order=PHASE3_CONFIG + 999)

    def wrap_config_celery():
        configure_celery(config.registry)

    config.action(None, wrap_config_celery, order=PHASE3_CONFIG + 999)

    app = config.make_wsgi_app()
    return app
Beispiel #24
0
    return celery


logger = logging.getLogger('python')
logger.setLevel(logging.WARNING)
app = Flask(__name__)
mail = Mail(app)
app.config.from_object('config.{}Config'.format(
    os.environ.get('SERVER_ENV', 'Development')))
app.config['SQLALCHEMY_DATABASE_URI'] = app.config[
    'SQLALCHEMY_DATABASE_URI'].format(os.environ.get('DB_USER'),
                                      os.environ.get('DB_PASS'),
                                      os.environ.get('DB_HOST'),
                                      os.environ.get('DB_NAME'))
app.config['APP_NAME'] = app.config['APP_NAME'].format("Consensus")
app.config['SECRET_KEY'] = app.config['SECRET_KEY'].format(
    os.environ.get('SECRET_KEY', 'SECRET'))
app.config['SECURITY_PASSWORD_SALT'] = app.config[
    'SECURITY_PASSWORD_SALT'].format(os.environ.get('SECURITY_PASSWORD_SALT'))
app.config['CELERY_BROKER_URL'] = 'redis://redis:6379/0'
app.config['CELERY_RESULT_BACKEND'] = "db+{}".format(
    app.config['SQLALCHEMY_DATABASE_URI'])
celery = make_celery(app)
recaptcha = ReCaptcha(app=app)
limiter = Limiter(
    app,
    key_func=get_remote_address,
    global_limits=["20 per minute", "1 per second"],
)
redlock = Redlock([{"host": "redis"}])
Beispiel #25
0
 def setUp(self):
     self.redlock = Redlock([{"host": "localhost"}])
Beispiel #26
0
# url = r.lpop('urls')
# f = open('Xspider/payload.txt', 'r')
# list = []
# for line in f.readlines():
#     line = line.strip('\n')
#     url = url + line
#     list.append(url)
# print list

from redlock import Redlock
from redlock import MultipleRedlockException
import time
dlm = Redlock([
    {
        "host": "42.96.132.158",
        "port": 6379,
        "db": 0,
        'password': "******"
    },
])

try:
    dlm.servers[0].flushall()
    my_lock = dlm.lock('LOCK', 1000)
    while True:
        if isinstance(my_lock, bool):
            print 'wait'
            time.sleep(0.5)
        else:
            print 'dosomething'
            dlm.servers[0].lpush('urls', time.time())
    dlm.unlock(my_lock)
Beispiel #27
0
 def test_bad_connection_info(self):
     with self.assertRaises(Warning):
         Redlock([{"cat": "hog"}])
Beispiel #28
0
 def __init__(self, host, port=6379, db=0):
     self.redis = redis.StrictRedis(host=host, port=port, db=db)
     self.redlock = Redlock([self.redis])
kafka_connection = sys.argv[1]
topic = sys.argv[2]
traces_path = sys.argv[3]

if not traces_path[-1] == '/':
    traces_path += "/"

# To consume messages
consumer = KafkaConsumer(topic,
                         group_id='traces_cache',
                         bootstrap_servers=[kafka_connection])
consumer.commit()
dlm = Redlock([
    {
        "host": "localhost",
        "port": 6379,
        "db": 0
    },
])
for message in consumer:
    key = message.key.decode('utf-8')
    lock = False
    while not lock:
        lock = dlm.lock(key, 5000)

    with open(traces_path + key, 'ab') as f:
        f.write(message.value)

    dlm.unlock(lock)

    consumer.task_done(message)
Beispiel #30
0
 def __init__(self):
     self.dlm = Redlock([{"host": "42.96.132.158", "port": 6379, "db": 0, 'password': "******"}, ])
     self.list = []
     self.driver = PhantomJS()